Complete Yocto mirror with license table for TQMa6UL (2038-compliance)

- 264 license table entries with exact download URLs (224/264 resolved)
- Complete sources/ directory with all BitBake recipes
- Build configuration: tqma6ul-multi-mba6ulx, spaetzle (musl)
- Full traceability for Softwarefreigabeantrag
- GCC 13.4.0, Linux 6.6.102, U-Boot 2023.04, musl 1.2.4
- License distribution: GPL-2.0 (24), MIT (23), GPL-2.0+ (18), BSD-3 (16)
This commit is contained in:
Siggi (OpenClaw Agent)
2026-03-01 20:58:18 +00:00
commit 16accb6b24
15086 changed files with 1292356 additions and 0 deletions

View File

@@ -0,0 +1,630 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
#
# This bbclass is used for creating archive for:
# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
# 3) configured source: ARCHIVER_MODE[src] = "configured"
# 4) source mirror: ARCHIVER_MODE[src] = "mirror"
# 5) The patches between do_unpack and do_patch:
# ARCHIVER_MODE[diff] = "1"
# And you can set the one that you'd like to exclude from the diff:
# ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
# 6) The environment data, similar to 'bitbake -e recipe':
# ARCHIVER_MODE[dumpdata] = "1"
# 7) The recipe (.bb and .inc): ARCHIVER_MODE[recipe] = "1"
# 8) Whether output the .src.rpm package:
# ARCHIVER_MODE[srpm] = "1"
# 9) Filter the license, the recipe whose license in
# COPYLEFT_LICENSE_INCLUDE will be included, and in
# COPYLEFT_LICENSE_EXCLUDE will be excluded.
# COPYLEFT_LICENSE_INCLUDE = 'GPL* LGPL*'
# COPYLEFT_LICENSE_EXCLUDE = 'CLOSED Proprietary'
# 10) The recipe type that will be archived:
# COPYLEFT_RECIPE_TYPES = 'target'
# 11) The source mirror mode:
# ARCHIVER_MODE[mirror] = "split" (default): Sources are split into
# per-recipe directories in a similar way to other archiver modes.
# Post-processing may be required to produce a single mirror directory.
# This does however allow inspection of duplicate sources and more
# intelligent handling.
# ARCHIVER_MODE[mirror] = "combined": All sources are placed into a single
# directory suitable for direct use as a mirror. Duplicate sources are
# ignored.
# 12) Source mirror exclusions:
# ARCHIVER_MIRROR_EXCLUDE is a list of prefixes to exclude from the mirror.
# This may be used for sources which you are already publishing yourself
# (e.g. if the URI starts with 'https://mysite.com/' and your mirror is
# going to be published to the same site). It may also be used to exclude
# local files (with the prefix 'file://') if these will be provided as part
# of an archive of the layers themselves.
#
# Create archive for all the recipe types
COPYLEFT_RECIPE_TYPES ?= 'target native nativesdk cross crosssdk cross-canadian'
inherit copyleft_filter
ARCHIVER_MODE[srpm] ?= "0"
ARCHIVER_MODE[src] ?= "patched"
ARCHIVER_MODE[diff] ?= "0"
ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
ARCHIVER_MODE[dumpdata] ?= "0"
ARCHIVER_MODE[recipe] ?= "0"
ARCHIVER_MODE[mirror] ?= "split"
ARCHIVER_MODE[compression] ?= "xz"
DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources"
ARCHIVER_ARCH = "${TARGET_SYS}"
ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${ARCHIVER_ARCH}/${PF}/"
ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm"
ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${ARCHIVER_ARCH}/${PF}/"
ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
# When producing a combined mirror directory, allow duplicates for the case
# where multiple recipes use the same SRC_URI.
ARCHIVER_COMBINED_MIRRORDIR = "${ARCHIVER_TOPDIR}/mirror"
SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}/mirror"
do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
# This is a convenience for the shell script to use it
def include_package(d, pn):
included, reason = copyleft_should_include(d)
if not included:
bb.debug(1, 'archiver: %s is excluded: %s' % (pn, reason))
return False
else:
bb.debug(1, 'archiver: %s is included: %s' % (pn, reason))
# glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
# so avoid archiving source here.
if pn.startswith('glibc-locale'):
return False
# We just archive gcc-source for all the gcc related recipes
if d.getVar('BPN') in ['gcc', 'libgcc'] \
and not pn.startswith('gcc-source'):
bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
return False
return True
python () {
pn = d.getVar('PN')
assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
if pn in assume_provided:
for p in d.getVar("PROVIDES").split():
if p != pn:
pn = p
break
if not include_package(d, pn):
return
# TARGET_SYS in ARCHIVER_ARCH will break the stamp for gcc-source in multiconfig
if pn.startswith('gcc-source'):
d.setVar('ARCHIVER_ARCH', "allarch")
def hasTask(task):
return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
ar_src = d.getVarFlag('ARCHIVER_MODE', 'src')
ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata')
ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe')
if ar_src == "original":
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_original' % pn)
# 'patched' and 'configured' invoke do_unpack_and_patch because
# do_ar_patched resp. do_ar_configured depend on it, but for 'original'
# we have to add it explicitly.
if d.getVarFlag('ARCHIVER_MODE', 'diff') == '1':
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_unpack_and_patch' % pn)
elif ar_src == "patched":
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn)
elif ar_src == "configured":
# We can't use "addtask do_ar_configured after do_configure" since it
# will cause the deptask of do_populate_sysroot to run no matter what
# archives we need, so we add the depends here.
# There is a corner case with "gcc-source-${PV}" recipes, they don't have
# the "do_configure" task, so we need to use "do_preconfigure"
if hasTask("do_preconfigure"):
d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_preconfigure' % pn)
elif hasTask("do_configure"):
d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn)
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_configured' % pn)
elif ar_src == "mirror":
d.appendVarFlag('do_deploy_archives', 'depends', '%s:do_ar_mirror' % pn)
elif ar_src:
bb.fatal("Invalid ARCHIVER_MODE[src]: %s" % ar_src)
if ar_dumpdata == "1":
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_dumpdata' % pn)
if ar_recipe == "1":
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_recipe' % pn)
# Output the SRPM package
if d.getVarFlag('ARCHIVER_MODE', 'srpm') == "1" and d.getVar('PACKAGES'):
if "package_rpm" not in d.getVar('PACKAGE_CLASSES'):
bb.fatal("ARCHIVER_MODE[srpm] needs package_rpm in PACKAGE_CLASSES")
# Some recipes do not have any packaging tasks
if hasTask("do_package_write_rpm"):
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_package_write_rpm' % pn)
d.appendVarFlag('do_package_write_rpm', 'dirs', ' ${ARCHIVER_RPMTOPDIR}')
d.appendVarFlag('do_package_write_rpm', 'sstate-inputdirs', ' ${ARCHIVER_RPMTOPDIR}')
d.appendVarFlag('do_package_write_rpm', 'sstate-outputdirs', ' ${DEPLOY_DIR_SRC}')
if ar_dumpdata == "1":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_dumpdata' % pn)
if ar_recipe == "1":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_recipe' % pn)
if ar_src == "original":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_original' % pn)
elif ar_src == "patched":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_patched' % pn)
elif ar_src == "configured":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
}
# Take all the sources for a recipe and put them in WORKDIR/archiver-work/.
# Files in SRC_URI are copied directly, anything that's a directory
# (e.g. git repositories) is "unpacked" and then put into a tarball.
python do_ar_original() {
import shutil, tempfile
if d.getVarFlag('ARCHIVER_MODE', 'src') != "original":
return
ar_outdir = d.getVar('ARCHIVER_OUTDIR')
bb.note('Archiving the original source...')
urls = d.getVar("SRC_URI").split()
# destsuffix (git fetcher) and subdir (everything else) are allowed to be
# absolute paths (for example, destsuffix=${S}/foobar).
# That messes with unpacking inside our tmpdir below, because the fetchers
# will then unpack in that directory and completely ignore the tmpdir.
# That breaks parallel tasks relying on ${S}, like do_compile.
#
# To solve this, we remove these parameters from all URLs.
# We do this even for relative paths because it makes the content of the
# archives more useful (no extra paths that are only used during
# compilation).
for i, url in enumerate(urls):
decoded = bb.fetch2.decodeurl(url)
for param in ('destsuffix', 'subdir'):
if param in decoded[5]:
del decoded[5][param]
encoded = bb.fetch2.encodeurl(decoded)
urls[i] = encoded
# Cleanup SRC_URI before call bb.fetch2.Fetch() since now SRC_URI is in the
# variable "urls", otherwise there might be errors like:
# The SRCREV_FORMAT variable must be set when multiple SCMs are used
ld = bb.data.createCopy(d)
ld.setVar('SRC_URI', '')
fetch = bb.fetch2.Fetch(urls, ld)
tarball_suffix = {}
for url in fetch.urls:
local = fetch.localpath(url).rstrip("/");
if os.path.isfile(local):
shutil.copy(local, ar_outdir)
elif os.path.isdir(local):
tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR'))
fetch.unpack(tmpdir, (url,))
# To handle recipes with more than one source, we add the "name"
# URL parameter as suffix. We treat it as an error when
# there's more than one URL without a name, or a name gets reused.
# This is an additional safety net, in practice the name has
# to be set when using the git fetcher, otherwise SRCREV cannot
# be set separately for each URL.
params = bb.fetch2.decodeurl(url)[5]
type = bb.fetch2.decodeurl(url)[0]
location = bb.fetch2.decodeurl(url)[2]
name = params.get('name', '')
if type.lower() == 'file':
name_tmp = location.rstrip("*").rstrip("/")
name = os.path.basename(name_tmp)
else:
if name in tarball_suffix:
if not name:
bb.fatal("Cannot determine archive names for original source because 'name' URL parameter is unset in more than one URL. Add it to at least one of these: %s %s" % (tarball_suffix[name], url))
else:
bb.fatal("Cannot determine archive names for original source because 'name=' URL parameter '%s' is used twice. Make it unique in: %s %s" % (tarball_suffix[name], url))
tarball_suffix[name] = url
create_tarball(d, tmpdir + '/.', name, ar_outdir)
# Emit patch series files for 'original'
bb.note('Writing patch series files...')
for patch in src_patches(d):
_, _, local, _, _, parm = bb.fetch.decodeurl(patch)
patchdir = parm.get('patchdir')
if patchdir:
series = os.path.join(ar_outdir, 'series.subdir.%s' % patchdir.replace('/', '_'))
else:
series = os.path.join(ar_outdir, 'series')
with open(series, 'a') as s:
s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
}
python do_ar_patched() {
if d.getVarFlag('ARCHIVER_MODE', 'src') != 'patched':
return
# Get the ARCHIVER_OUTDIR before we reset the WORKDIR
ar_outdir = d.getVar('ARCHIVER_OUTDIR')
if not is_work_shared(d):
ar_workdir = d.getVar('ARCHIVER_WORKDIR')
d.setVar('WORKDIR', ar_workdir)
bb.note('Archiving the patched source...')
create_tarball(d, d.getVar('S'), 'patched', ar_outdir)
}
python do_ar_configured() {
import shutil
# Forcibly expand the sysroot paths as we're about to change WORKDIR
d.setVar('STAGING_DIR_HOST', d.getVar('STAGING_DIR_HOST'))
d.setVar('STAGING_DIR_TARGET', d.getVar('STAGING_DIR_TARGET'))
d.setVar('RECIPE_SYSROOT', d.getVar('RECIPE_SYSROOT'))
d.setVar('RECIPE_SYSROOT_NATIVE', d.getVar('RECIPE_SYSROOT_NATIVE'))
ar_outdir = d.getVar('ARCHIVER_OUTDIR')
if d.getVarFlag('ARCHIVER_MODE', 'src') == 'configured':
bb.note('Archiving the configured source...')
pn = d.getVar('PN')
# "gcc-source-${PV}" recipes don't have "do_configure"
# task, so we need to run "do_preconfigure" instead
if pn.startswith("gcc-source-"):
d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR'))
bb.build.exec_func('do_preconfigure', d)
# The libtool-native's do_configure will remove the
# ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
# do_configure, we archive the already configured ${S} to
# instead of.
# The kernel class functions require it to be on work-shared, we
# don't unpack, patch, configure again, just archive the already
# configured ${S}
elif not (pn == 'libtool-native' or is_work_shared(d)):
def runTask(task):
prefuncs = d.getVarFlag(task, 'prefuncs') or ''
for func in prefuncs.split():
if func != "sysroot_cleansstate":
bb.build.exec_func(func, d)
bb.build.exec_func(task, d)
postfuncs = d.getVarFlag(task, 'postfuncs') or ''
for func in postfuncs.split():
if func != 'do_qa_configure':
bb.build.exec_func(func, d)
# Change the WORKDIR to make do_configure run in another dir.
d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR'))
preceeds = bb.build.preceedtask('do_configure', False, d)
for task in preceeds:
if task != 'do_patch' and task != 'do_prepare_recipe_sysroot':
runTask(task)
runTask('do_configure')
srcdir = d.getVar('S')
builddir = d.getVar('B')
if srcdir != builddir:
if os.path.exists(builddir):
oe.path.copytree(builddir, os.path.join(srcdir, \
'build.%s.ar_configured' % d.getVar('PF')))
create_tarball(d, srcdir, 'configured', ar_outdir)
}
python do_ar_mirror() {
import subprocess
src_uri = (d.getVar('SRC_URI') or '').split()
if len(src_uri) == 0:
return
dl_dir = d.getVar('DL_DIR')
mirror_exclusions = (d.getVar('ARCHIVER_MIRROR_EXCLUDE') or '').split()
mirror_mode = d.getVarFlag('ARCHIVER_MODE', 'mirror')
have_mirror_tarballs = d.getVar('BB_GENERATE_MIRROR_TARBALLS')
if mirror_mode == 'combined':
destdir = d.getVar('ARCHIVER_COMBINED_MIRRORDIR')
elif mirror_mode == 'split':
destdir = d.getVar('ARCHIVER_OUTDIR')
else:
bb.fatal('Invalid ARCHIVER_MODE[mirror]: %s' % (mirror_mode))
if not have_mirror_tarballs:
bb.fatal('Using `ARCHIVER_MODE[src] = "mirror"` depends on setting `BB_GENERATE_MIRROR_TARBALLS = "1"`')
def is_excluded(url):
for prefix in mirror_exclusions:
if url.startswith(prefix):
return True
return False
bb.note('Archiving the source as a mirror...')
bb.utils.mkdirhier(destdir)
fetcher = bb.fetch2.Fetch(src_uri, d)
for ud in fetcher.expanded_urldata():
if is_excluded(ud.url):
bb.note('Skipping excluded url: %s' % (ud.url))
continue
bb.note('Archiving url: %s' % (ud.url))
ud.setup_localpath(d)
localpath = None
# Check for mirror tarballs first. We will archive the first mirror
# tarball that we find as it's assumed that we just need one.
for mirror_fname in ud.mirrortarballs:
mirror_path = os.path.join(dl_dir, mirror_fname)
if os.path.exists(mirror_path):
bb.note('Found mirror tarball: %s' % (mirror_path))
localpath = mirror_path
break
if len(ud.mirrortarballs) and not localpath:
bb.warn('Mirror tarballs are listed for a source but none are present. ' \
'Falling back to original download.\n' \
'SRC_URI = %s' % (ud.url))
# Check original download
if not localpath:
bb.note('Using original download: %s' % (ud.localpath))
localpath = ud.localpath
if not localpath or not os.path.exists(localpath):
bb.fatal('Original download is missing for a source.\n' \
'SRC_URI = %s' % (ud.url))
# We now have an appropriate localpath
bb.note('Copying source mirror')
cmd = 'cp -fpPRH %s %s' % (localpath, destdir)
subprocess.check_call(cmd, shell=True)
}
def create_tarball(d, srcdir, suffix, ar_outdir):
"""
create the tarball from srcdir
"""
import subprocess
# Make sure we are only creating a single tarball for gcc sources
if (d.getVar('SRC_URI') == ""):
return
# For the kernel archive, srcdir may just be a link to the
# work-shared location. Use os.path.realpath to make sure
# that we archive the actual directory and not just the link.
srcdir = os.path.realpath(srcdir)
compression_method = d.getVarFlag('ARCHIVER_MODE', 'compression')
if compression_method == "xz":
compression_cmd = "xz %s" % d.getVar('XZ_DEFAULTS')
# To keep compatibility with ARCHIVER_MODE[compression]
elif compression_method == "gz":
compression_cmd = "gzip"
elif compression_method == "bz2":
compression_cmd = "bzip2"
else:
bb.fatal("Unsupported compression_method: %s" % compression_method)
bb.utils.mkdirhier(ar_outdir)
if suffix:
filename = '%s-%s.tar.%s' % (d.getVar('PF'), suffix, compression_method)
else:
filename = '%s.tar.%s' % (d.getVar('PF'), compression_method)
tarname = os.path.join(ar_outdir, filename)
bb.note('Creating %s' % tarname)
dirname = os.path.dirname(srcdir)
basename = os.path.basename(srcdir)
exclude = "--exclude=temp --exclude=patches --exclude='.pc'"
tar_cmd = "tar %s -cf - %s | %s > %s" % (exclude, basename, compression_cmd, tarname)
subprocess.check_call(tar_cmd, cwd=dirname, shell=True)
# creating .diff.gz between source.orig and source
def create_diff_gz(d, src_orig, src, ar_outdir):
import subprocess
if not os.path.isdir(src) or not os.path.isdir(src_orig):
return
# The diff --exclude can't exclude the file with path, so we copy
# the patched source, and remove the files that we'd like to
# exclude.
src_patched = src + '.patched'
oe.path.copyhardlinktree(src, src_patched)
for i in d.getVarFlag('ARCHIVER_MODE', 'diff-exclude').split():
bb.utils.remove(os.path.join(src_orig, i), recurse=True)
bb.utils.remove(os.path.join(src_patched, i), recurse=True)
dirname = os.path.dirname(src)
basename = os.path.basename(src)
bb.utils.mkdirhier(ar_outdir)
cwd = os.getcwd()
try:
os.chdir(dirname)
out_file = os.path.join(ar_outdir, '%s-diff.gz' % d.getVar('PF'))
diff_cmd = 'diff -Naur %s.orig %s.patched | gzip -c > %s' % (basename, basename, out_file)
subprocess.check_call(diff_cmd, shell=True)
bb.utils.remove(src_patched, recurse=True)
finally:
os.chdir(cwd)
def is_work_shared(d):
sharedworkdir = os.path.join(d.getVar('TMPDIR'), 'work-shared')
sourcedir = os.path.realpath(d.getVar('S'))
return sourcedir.startswith(sharedworkdir)
# Run do_unpack and do_patch
python do_unpack_and_patch() {
if d.getVarFlag('ARCHIVER_MODE', 'src') not in \
[ 'patched', 'configured'] and \
d.getVarFlag('ARCHIVER_MODE', 'diff') != '1':
return
ar_outdir = d.getVar('ARCHIVER_OUTDIR')
ar_workdir = d.getVar('ARCHIVER_WORKDIR')
ar_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
pn = d.getVar('PN')
# The kernel class functions require it to be on work-shared, so we don't change WORKDIR
if not is_work_shared(d):
# Change the WORKDIR to make do_unpack do_patch run in another dir.
d.setVar('WORKDIR', ar_workdir)
# Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
d.setVar('STAGING_DIR_NATIVE', ar_sysroot_native)
# The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
# possibly requiring of the following tasks (such as some recipes's
# do_patch required 'B' existed).
bb.utils.mkdirhier(d.getVar('B'))
bb.build.exec_func('do_unpack', d)
# Save the original source for creating the patches
if d.getVarFlag('ARCHIVER_MODE', 'diff') == '1':
src = d.getVar('S').rstrip('/')
src_orig = '%s.orig' % src
oe.path.copytree(src, src_orig)
if bb.data.inherits_class('dos2unix', d):
bb.build.exec_func('do_convert_crlf_to_lf', d)
# Make sure gcc and kernel sources are patched only once
if not (d.getVar('SRC_URI') == "" or is_work_shared(d)):
bb.build.exec_func('do_patch', d)
# Create the patches
if d.getVarFlag('ARCHIVER_MODE', 'diff') == '1':
bb.note('Creating diff gz...')
create_diff_gz(d, src_orig, src, ar_outdir)
bb.utils.remove(src_orig, recurse=True)
}
# BBINCLUDED is special (excluded from basehash signature
# calculation). Using it in a task signature can cause "basehash
# changed" errors.
#
# Depending on BBINCLUDED also causes do_ar_recipe to run again
# for unrelated changes, like adding or removing buildhistory.bbclass.
#
# For these reasons we ignore the dependency completely. The versioning
# of the output file ensures that we create it each time the recipe
# gets rebuilt, at least as long as a PR server is used. We also rely
# on that mechanism to catch changes in the file content, because the
# file content is not part of the task signature either.
do_ar_recipe[vardepsexclude] += "BBINCLUDED"
python do_ar_recipe () {
"""
archive the recipe, including .bb and .inc.
"""
import re
import shutil
require_re = re.compile( r"require\s+(.+)" )
include_re = re.compile( r"include\s+(.+)" )
bbfile = d.getVar('FILE')
outdir = os.path.join(d.getVar('WORKDIR'), \
'%s-recipe' % d.getVar('PF'))
bb.utils.mkdirhier(outdir)
shutil.copy(bbfile, outdir)
pn = d.getVar('PN')
bbappend_files = d.getVar('BBINCLUDED').split()
# If recipe name is aa, we need to match files like aa.bbappend and aa_1.1.bbappend
# Files like aa1.bbappend or aa1_1.1.bbappend must be excluded.
bbappend_re = re.compile( r".*/%s_[^/]*\.bbappend$" % re.escape(pn))
bbappend_re1 = re.compile( r".*/%s\.bbappend$" % re.escape(pn))
for file in bbappend_files:
if bbappend_re.match(file) or bbappend_re1.match(file):
shutil.copy(file, outdir)
dirname = os.path.dirname(bbfile)
bbpath = '%s:%s' % (dirname, d.getVar('BBPATH'))
f = open(bbfile, 'r')
for line in f.readlines():
incfile = None
if require_re.match(line):
incfile = require_re.match(line).group(1)
elif include_re.match(line):
incfile = include_re.match(line).group(1)
if incfile:
incfile = d.expand(incfile)
if incfile:
incfile = bb.utils.which(bbpath, incfile)
if incfile:
shutil.copy(incfile, outdir)
create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR'))
bb.utils.remove(outdir, recurse=True)
}
python do_dumpdata () {
"""
dump environment data to ${PF}-showdata.dump
"""
dumpfile = os.path.join(d.getVar('ARCHIVER_OUTDIR'), \
'%s-showdata.dump' % d.getVar('PF'))
bb.note('Dumping metadata into %s' % dumpfile)
with open(dumpfile, "w") as f:
# emit variables and shell functions
bb.data.emit_env(f, d, True)
# emit the metadata which isn't valid shell
for e in d.keys():
if d.getVarFlag(e, "python", False):
f.write("\npython %s () {\n%s}\n" % (e, d.getVar(e, False)))
}
SSTATETASKS += "do_deploy_archives"
do_deploy_archives () {
bbnote "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
}
python do_deploy_archives_setscene () {
sstate_setscene(d)
}
do_deploy_archives[dirs] = "${ARCHIVER_TOPDIR}"
do_deploy_archives[sstate-inputdirs] = "${ARCHIVER_TOPDIR}"
do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}"
addtask do_deploy_archives_setscene
addtask do_ar_original after do_unpack
addtask do_unpack_and_patch after do_patch do_preconfigure
addtask do_ar_patched after do_unpack_and_patch
addtask do_ar_configured after do_unpack_and_patch
addtask do_ar_mirror after do_fetch
addtask do_dumpdata
addtask do_ar_recipe
addtask do_deploy_archives
do_build[recrdeptask] += "do_deploy_archives"
do_rootfs[recrdeptask] += "do_deploy_archives"
do_populate_sdk[recrdeptask] += "do_deploy_archives"
python () {
# Add tasks in the correct order, specifically for linux-yocto to avoid race condition.
# sstatesig.py:sstate_rundepfilter has special support that excludes this dependency
# so that do_kernel_configme does not need to run again when do_unpack_and_patch
# gets added or removed (by adding or removing archiver.bbclass).
if bb.data.inherits_class('kernel-yocto', d):
bb.build.addtask('do_kernel_configme', 'do_configure', 'do_unpack_and_patch', d)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,46 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# Summarize sstate usage at the end of the build
python buildstats_summary () {
import collections
import os.path
bsdir = e.data.expand("${BUILDSTATS_BASE}/${BUILDNAME}")
if not os.path.exists(bsdir):
return
sstatetasks = (e.data.getVar('SSTATETASKS') or '').split()
built = collections.defaultdict(lambda: [set(), set()])
for pf in os.listdir(bsdir):
taskdir = os.path.join(bsdir, pf)
if not os.path.isdir(taskdir):
continue
tasks = os.listdir(taskdir)
for t in sstatetasks:
no_sstate, sstate = built[t]
if t in tasks:
no_sstate.add(pf)
elif t + '_setscene' in tasks:
sstate.add(pf)
header_printed = False
for t in sstatetasks:
no_sstate, sstate = built[t]
if no_sstate | sstate:
if not header_printed:
header_printed = True
bb.note("Build completion summary:")
sstate_count = len(sstate)
no_sstate_count = len(no_sstate)
total_count = sstate_count + no_sstate_count
bb.note(" {0}: {1:.1f}% sstate reuse({2} setscene, {3} scratch)".format(
t, round(100 * sstate_count / total_count, 1), sstate_count, no_sstate_count))
}
addhandler buildstats_summary
buildstats_summary[eventmask] = "bb.event.BuildCompleted"

View File

@@ -0,0 +1,81 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
#
# Usage:
# - Enable ccache
# Add the following line to a conffile such as conf/local.conf:
# INHERIT += "ccache"
#
# - Disable ccache for a recipe
# Add the following line to the recipe if it can't be built with ccache:
# CCACHE_DISABLE = '1'
#
# - Share ccache files between different builds
# Set CCACHE_TOP_DIR to a shared dir
# CCACHE_TOP_DIR = /path/to/shared_ccache/
#
# - TO debug ccahe
# export CCACHE_DEBUG = "1"
# export CCACHE_LOGFILE = "${CCACHE_DIR}/logfile.log"
# And also set PARALLEL_MAKE = "-j 1" to get make the log in order
#
# Set it to a shared location for different builds, so that cache files can
# be shared between different builds.
CCACHE_TOP_DIR ?= "${TMPDIR}/ccache"
# ccache-native and cmake-native have a circular dependency
# that affects other native recipes, but not all.
# Allows to use ccache in specified native recipes.
CCACHE_NATIVE_RECIPES_ALLOWED ?= ""
# ccahe removes CCACHE_BASEDIR from file path, so that hashes will be the same
# in different builds.
export CCACHE_BASEDIR ?= "${TMPDIR}"
# Used for sharing cache files after compiler is rebuilt
export CCACHE_COMPILERCHECK ?= "%compiler% -dumpspecs"
export CCACHE_CONFIGPATH ?= "${COREBASE}/meta/conf/ccache.conf"
export CCACHE_DIR ?= "${CCACHE_TOP_DIR}/${MULTIMACH_TARGET_SYS}/${PN}"
# Fixed errors:
# ccache: error: Failed to create directory /run/user/0/ccache-tmp: Permission denied
export CCACHE_TEMPDIR ?= "${CCACHE_DIR}/tmp"
# We need to stop ccache considering the current directory or the
# debug-prefix-map target directory to be significant when calculating
# its hash. Without this the cache would be invalidated every time
# ${PV} or ${PR} change.
export CCACHE_NOHASHDIR ?= "1"
python() {
"""
Enable ccache for the recipe
"""
pn = d.getVar('PN')
if (pn in d.getVar('CCACHE_NATIVE_RECIPES_ALLOWED') or
not (bb.data.inherits_class("native", d) or
bb.utils.to_boolean(d.getVar('CCACHE_DISABLE')))):
d.appendVar('DEPENDS', ' ccache-native')
d.setVar('CCACHE', 'ccache ')
}
addtask cleanccache after do_clean
python do_cleanccache() {
import shutil
ccache_dir = d.getVar('CCACHE_DIR')
if os.path.exists(ccache_dir):
bb.note("Removing %s" % ccache_dir)
shutil.rmtree(ccache_dir)
else:
bb.note("%s doesn't exist" % ccache_dir)
}
addtask cleanall after do_cleanccache
do_cleanccache[nostamp] = "1"

View File

@@ -0,0 +1,103 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
inherit terminal
python do_ccmake() {
import shutil
# copy current config for diffing
config = os.path.join(d.getVar("B"), "CMakeCache.txt")
if os.path.exists(config):
shutil.copy(config, config + ".orig")
oe_terminal(d.expand("ccmake ${OECMAKE_GENERATOR_ARGS} ${OECMAKE_SOURCEPATH} -Wno-dev"),
d.getVar("PN") + " - ccmake", d)
if os.path.exists(config) and os.path.exists(config + ".orig"):
if bb.utils.md5_file(config) != bb.utils.md5_file(config + ".orig"):
# the cmake class uses cmake --build, which will by default
# regenerate configuration, simply mark the compile step as tainted
# to ensure it is re-run
bb.note("Configuration changed, recompile will be forced")
bb.build.write_taint('do_compile', d)
}
do_ccmake[depends] += "cmake-native:do_populate_sysroot"
do_ccmake[nostamp] = "1"
do_ccmake[dirs] = "${B}"
addtask ccmake after do_configure
def cmake_parse_config_cache(path):
with open(path, "r") as f:
for i in f:
i = i.rstrip("\n")
if len(i) == 0 or i.startswith("//") or i.startswith("#"):
continue # empty or comment
key, value = i.split("=", 1)
key, keytype = key.split(":")
if keytype in ["INTERNAL", "STATIC"]:
continue # skip internal and static config options
yield key, keytype, value
def cmake_diff_config_vars(a, b):
removed, added = [], []
for ak, akt, av in a:
found = False
for bk, bkt, bv in b:
if bk == ak:
found = True
if bkt != akt or bv != av: # changed
removed.append((ak, akt, av))
added.append((bk, bkt, bv))
break
# remove any missing from b
if not found:
removed.append((ak, akt, av))
# add any missing from a
for bk, bkt, bv in b:
if not any(bk == ak for ak, akt, av in a):
added.append((bk, bkt, bv))
return removed, added
python do_ccmake_diffconfig() {
import shutil
config = os.path.join(d.getVar("B"), "CMakeCache.txt")
if os.path.exists(config) and os.path.exists(config + ".orig"):
if bb.utils.md5_file(config) != bb.utils.md5_file(config + ".orig"):
# scan the changed options
old = list(cmake_parse_config_cache(config + ".orig"))
new = list(cmake_parse_config_cache(config))
_, added = cmake_diff_config_vars(old, new)
if len(added) != 0:
with open(d.expand("${WORKDIR}/configuration.inc"), "w") as f:
f.write("EXTRA_OECMAKE += \" \\\n")
for k, kt, v in added:
escaped = v if " " not in v else "\"{0}\"".format(v)
f.write(" -D{0}:{1}={2} \\\n".format(k, kt, escaped))
f.write(" \"\n")
bb.plain("Configuration recipe fragment written to: {0}".format(d.expand("${WORKDIR}/configuration.inc")))
with open(d.expand("${WORKDIR}/site-file.cmake"), "w") as f:
for k, kt, v in added:
f.write("SET({0} \"{1}\" CACHE {2} \"\")\n".format(k, v, kt))
bb.plain("Configuration cmake fragment written to: {0}".format(d.expand("${WORKDIR}/site-file.cmake")))
# restore the original config
shutil.copy(config + ".orig", config)
else:
bb.plain("No configuration differences, skipping configuration fragment generation.")
else:
bb.fatal("No config files found. Did you run ccmake?")
}
do_ccmake_diffconfig[nostamp] = "1"
do_ccmake_diffconfig[dirs] = "${B}"
addtask ccmake_diffconfig

View File

@@ -0,0 +1,140 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
CHRPATH_BIN ?= "chrpath"
PREPROCESS_RELOCATE_DIRS ?= ""
def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False):
import subprocess, oe.qa
with oe.qa.ELFFile(fpath) as elf:
try:
elf.open()
except oe.qa.NotELFFileError:
return
try:
out = subprocess.check_output([cmd, "-l", fpath], universal_newlines=True)
except subprocess.CalledProcessError:
return
# Handle RUNPATH as well as RPATH
out = out.replace("RUNPATH=","RPATH=")
# Throw away everything other than the rpath list
curr_rpath = out.partition("RPATH=")[2]
#bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
rpaths = curr_rpath.strip().split(":")
new_rpaths = []
modified = False
for rpath in rpaths:
# If rpath is already dynamic copy it to new_rpath and continue
if rpath.find("$ORIGIN") != -1:
new_rpaths.append(rpath)
continue
rpath = os.path.normpath(rpath)
if baseprefix not in rpath and tmpdir not in rpath:
# Skip standard search paths
if rpath in ['/lib', '/usr/lib', '/lib64/', '/usr/lib64']:
bb.warn("Skipping RPATH %s as is a standard search path for %s" % (rpath, fpath))
modified = True
continue
new_rpaths.append(rpath)
continue
new_rpaths.append("$ORIGIN/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/"))))
modified = True
# if we have modified some rpaths call chrpath to update the binary
if modified:
if break_hardlinks:
bb.utils.break_hardlinks(fpath)
args = ":".join(new_rpaths)
#bb.note("Setting rpath for %s to %s" %(fpath, args))
try:
subprocess.check_output([cmd, "-r", args, fpath],
stderr=subprocess.PIPE, universal_newlines=True)
except subprocess.CalledProcessError as e:
bb.fatal("chrpath command failed with exit code %d:\n%s\n%s" % (e.returncode, e.stdout, e.stderr))
def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False):
import subprocess as sub
p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE, text=True)
out, err = p.communicate()
# If returned successfully, process stdout for results
if p.returncode != 0:
return
for l in out.split("\n"):
if "(compatibility" not in l:
continue
rpath = l.partition("(compatibility")[0].strip()
if baseprefix not in rpath:
continue
if break_hardlinks:
bb.utils.break_hardlinks(fpath)
newpath = "@loader_path/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/")))
p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, newpath, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
out, err = p.communicate()
def process_dir(rootdir, directory, d, break_hardlinks = False):
bb.debug(2, "Checking %s for binaries to process" % directory)
if not os.path.exists(directory):
return
import stat
rootdir = os.path.normpath(rootdir)
cmd = d.expand('${CHRPATH_BIN}')
tmpdir = os.path.normpath(d.getVar('TMPDIR', False))
baseprefix = os.path.normpath(d.expand('${base_prefix}'))
hostos = d.getVar("HOST_OS")
if "linux" in hostos:
process_file = process_file_linux
elif "darwin" in hostos:
process_file = process_file_darwin
else:
# Relocations not supported
return
dirs = os.listdir(directory)
for file in dirs:
fpath = directory + "/" + file
fpath = os.path.normpath(fpath)
if os.path.islink(fpath):
# Skip symlinks
continue
if os.path.isdir(fpath):
process_dir(rootdir, fpath, d, break_hardlinks = break_hardlinks)
else:
#bb.note("Testing %s for relocatability" % fpath)
# We need read and write permissions for chrpath, if we don't have
# them then set them temporarily. Take a copy of the files
# permissions so that we can restore them afterwards.
perms = os.stat(fpath)[stat.ST_MODE]
if os.access(fpath, os.W_OK|os.R_OK):
perms = None
else:
# Temporarily make the file writeable so we can chrpath it
os.chmod(fpath, perms|stat.S_IRWXU)
process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = break_hardlinks)
if perms:
os.chmod(fpath, perms)
def rpath_replace (path, d):
bindirs = d.expand("${bindir} ${sbindir} ${base_sbindir} ${base_bindir} ${libdir} ${base_libdir} ${libexecdir} ${PREPROCESS_RELOCATE_DIRS}").split()
for bindir in bindirs:
#bb.note ("Processing directory " + bindir)
directory = path + "/" + bindir
process_dir (path, directory, d)

View File

@@ -0,0 +1,70 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# Deploy sources for recipes for compliance with copyleft-style licenses
# Defaults to using symlinks, as it's a quick operation, and one can easily
# follow the links when making use of the files (e.g. tar with the -h arg).
#
# vi:sts=4:sw=4:et
inherit copyleft_filter
COPYLEFT_SOURCES_DIR ?= '${DEPLOY_DIR}/copyleft_sources'
python do_prepare_copyleft_sources () {
"""Populate a tree of the recipe sources and emit patch series files"""
import os.path
import shutil
p = d.getVar('P')
included, reason = copyleft_should_include(d)
if not included:
bb.debug(1, 'copyleft: %s is excluded: %s' % (p, reason))
return
else:
bb.debug(1, 'copyleft: %s is included: %s' % (p, reason))
sources_dir = d.getVar('COPYLEFT_SOURCES_DIR')
dl_dir = d.getVar('DL_DIR')
src_uri = d.getVar('SRC_URI').split()
fetch = bb.fetch2.Fetch(src_uri, d)
ud = fetch.ud
pf = d.getVar('PF')
dest = os.path.join(sources_dir, pf)
shutil.rmtree(dest, ignore_errors=True)
bb.utils.mkdirhier(dest)
for u in ud.values():
local = os.path.normpath(fetch.localpath(u.url))
if local.endswith('.bb'):
continue
elif local.endswith('/'):
local = local[:-1]
if u.mirrortarball:
tarball_path = os.path.join(dl_dir, u.mirrortarball)
if os.path.exists(tarball_path):
local = tarball_path
oe.path.symlink(local, os.path.join(dest, os.path.basename(local)), force=True)
patches = src_patches(d)
for patch in patches:
_, _, local, _, _, parm = bb.fetch.decodeurl(patch)
patchdir = parm.get('patchdir')
if patchdir:
series = os.path.join(dest, 'series.subdir.%s' % patchdir.replace('/', '_'))
else:
series = os.path.join(dest, 'series')
with open(series, 'a') as s:
s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
}
addtask prepare_copyleft_sources after do_fetch before do_build
do_prepare_copyleft_sources[dirs] = "${WORKDIR}"
do_build[recrdeptask] += 'do_prepare_copyleft_sources'

View File

@@ -0,0 +1,83 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# Filter the license, the copyleft_should_include returns True for the
# COPYLEFT_LICENSE_INCLUDE recipe, and False for the
# COPYLEFT_LICENSE_EXCLUDE.
#
# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary.
COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL* AGPL*'
COPYLEFT_LICENSE_INCLUDE[type] = 'list'
COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which include licenses'
COPYLEFT_LICENSE_EXCLUDE ?= 'CLOSED Proprietary'
COPYLEFT_LICENSE_EXCLUDE[type] = 'list'
COPYLEFT_LICENSE_EXCLUDE[doc] = 'Space separated list of globs which exclude licenses'
COPYLEFT_RECIPE_TYPE ?= '${@copyleft_recipe_type(d)}'
COPYLEFT_RECIPE_TYPE[doc] = 'The "type" of the current recipe (e.g. target, native, cross)'
COPYLEFT_RECIPE_TYPES ?= 'target'
COPYLEFT_RECIPE_TYPES[type] = 'list'
COPYLEFT_RECIPE_TYPES[doc] = 'Space separated list of recipe types to include'
COPYLEFT_AVAILABLE_RECIPE_TYPES = 'target native nativesdk cross crosssdk cross-canadian'
COPYLEFT_AVAILABLE_RECIPE_TYPES[type] = 'list'
COPYLEFT_AVAILABLE_RECIPE_TYPES[doc] = 'Space separated list of available recipe types'
COPYLEFT_PN_INCLUDE ?= ''
COPYLEFT_PN_INCLUDE[type] = 'list'
COPYLEFT_PN_INCLUDE[doc] = 'Space separated list of recipe names to include'
COPYLEFT_PN_EXCLUDE ?= ''
COPYLEFT_PN_EXCLUDE[type] = 'list'
COPYLEFT_PN_EXCLUDE[doc] = 'Space separated list of recipe names to exclude'
def copyleft_recipe_type(d):
for recipe_type in oe.data.typed_value('COPYLEFT_AVAILABLE_RECIPE_TYPES', d):
if oe.utils.inherits(d, recipe_type):
return recipe_type
return 'target'
def copyleft_should_include(d):
"""
Determine if this recipe's sources should be deployed for compliance
"""
import ast
import oe.license
from fnmatch import fnmatchcase as fnmatch
recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE')
if recipe_type not in oe.data.typed_value('COPYLEFT_RECIPE_TYPES', d):
included, motive = False, 'recipe type "%s" is excluded' % recipe_type
else:
included, motive = False, 'recipe did not match anything'
include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d)
exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d)
try:
is_included, reason = oe.license.is_included(d.getVar('LICENSE'), include, exclude)
except oe.license.LicenseError as exc:
bb.fatal('%s: %s' % (d.getVar('PF'), exc))
else:
if is_included:
if reason:
included, motive = True, 'recipe has included licenses: %s' % ', '.join(reason)
else:
included, motive = False, 'recipe does not include a copyleft license'
else:
included, motive = False, 'recipe has excluded licenses: %s' % ', '.join(reason)
if any(fnmatch(d.getVar('PN'), name) \
for name in oe.data.typed_value('COPYLEFT_PN_INCLUDE', d)):
included, motive = True, 'recipe included by name'
if any(fnmatch(d.getVar('PN'), name) \
for name in oe.data.typed_value('COPYLEFT_PN_EXCLUDE', d)):
included, motive = False, 'recipe excluded by name'
return included, motive

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,8 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
# Include this class when you don't care what version of SPDX you get; it will
# be updated to the latest stable version that is supported
inherit create-spdx-2.2

View File

@@ -0,0 +1,670 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# This class is used to check recipes against public CVEs.
#
# In order to use this class just inherit the class in the
# local.conf file and it will add the cve_check task for
# every recipe. The task can be used per recipe, per image,
# or using the special cases "world" and "universe". The
# cve_check task will print a warning for every unpatched
# CVE found and generate a file in the recipe WORKDIR/cve
# directory. If an image is build it will generate a report
# in DEPLOY_DIR_IMAGE for all the packages used.
#
# Example:
# bitbake -c cve_check openssl
# bitbake core-image-sato
# bitbake -k -c cve_check universe
#
# DISCLAIMER
#
# This class/tool is meant to be used as support and not
# the only method to check against CVEs. Running this tool
# doesn't guarantee your packages are free of CVEs.
# The product name that the CVE database uses defaults to BPN, but may need to
# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
CVE_PRODUCT ??= "${BPN}"
CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_FILENAME ?= "nvdcve_2-2.db"
CVE_CHECK_DB_DIR ?= "${STAGING_DIR}/CVE_CHECK"
CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/${CVE_CHECK_DB_FILENAME}"
CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock"
CVE_CHECK_LOG ?= "${T}/cve.log"
CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve"
CVE_CHECK_SUMMARY_FILE_NAME ?= "cve-summary"
CVE_CHECK_SUMMARY_FILE ?= "${CVE_CHECK_SUMMARY_DIR}/${CVE_CHECK_SUMMARY_FILE_NAME}"
CVE_CHECK_SUMMARY_FILE_NAME_JSON = "cve-summary.json"
CVE_CHECK_SUMMARY_INDEX_PATH = "${CVE_CHECK_SUMMARY_DIR}/cve-summary-index.txt"
CVE_CHECK_LOG_JSON ?= "${T}/cve.json"
CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}"
CVE_CHECK_RECIPE_FILE_JSON ?= "${CVE_CHECK_DIR}/${PN}_cve.json"
CVE_CHECK_MANIFEST ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}.cve"
CVE_CHECK_MANIFEST_JSON_SUFFIX ?= "json"
CVE_CHECK_MANIFEST_JSON ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}.${CVE_CHECK_MANIFEST_JSON_SUFFIX}"
CVE_CHECK_COPY_FILES ??= "1"
CVE_CHECK_CREATE_MANIFEST ??= "1"
# Report Patched or Ignored CVEs
CVE_CHECK_REPORT_PATCHED ??= "1"
CVE_CHECK_SHOW_WARNINGS ??= "1"
# Provide text output
CVE_CHECK_FORMAT_TEXT ??= "1"
# Provide JSON output
CVE_CHECK_FORMAT_JSON ??= "1"
# Check for packages without CVEs (no issues or missing product name)
CVE_CHECK_COVERAGE ??= "1"
# Skip CVE Check for packages (PN)
CVE_CHECK_SKIP_RECIPE ?= ""
# Replace NVD DB check status for a given CVE. Each of CVE has to be mentioned
# separately with optional detail and description for this status.
#
# CVE_STATUS[CVE-1234-0001] = "not-applicable-platform: Issue only applies on Windows"
# CVE_STATUS[CVE-1234-0002] = "fixed-version: Fixed externally"
#
# Settings the same status and reason for multiple CVEs is possible
# via CVE_STATUS_GROUPS variable.
#
# CVE_STATUS_GROUPS = "CVE_STATUS_WIN CVE_STATUS_PATCHED"
#
# CVE_STATUS_WIN = "CVE-1234-0001 CVE-1234-0003"
# CVE_STATUS_WIN[status] = "not-applicable-platform: Issue only applies on Windows"
# CVE_STATUS_PATCHED = "CVE-1234-0002 CVE-1234-0004"
# CVE_STATUS_PATCHED[status] = "fixed-version: Fixed externally"
#
# All possible CVE statuses could be found in cve-check-map.conf
# CVE_CHECK_STATUSMAP[not-applicable-platform] = "Ignored"
# CVE_CHECK_STATUSMAP[fixed-version] = "Patched"
#
# CVE_CHECK_IGNORE is deprecated and CVE_STATUS has to be used instead.
# Keep CVE_CHECK_IGNORE until other layers migrate to new variables
CVE_CHECK_IGNORE ?= ""
# Layers to be excluded
CVE_CHECK_LAYER_EXCLUDELIST ??= ""
# Layers to be included
CVE_CHECK_LAYER_INCLUDELIST ??= ""
# set to "alphabetical" for version using single alphabetical character as increment release
CVE_VERSION_SUFFIX ??= ""
python () {
# Fallback all CVEs from CVE_CHECK_IGNORE to CVE_STATUS
cve_check_ignore = d.getVar("CVE_CHECK_IGNORE")
if cve_check_ignore:
bb.warn("CVE_CHECK_IGNORE is deprecated in favor of CVE_STATUS")
for cve in (d.getVar("CVE_CHECK_IGNORE") or "").split():
d.setVarFlag("CVE_STATUS", cve, "ignored")
# Process CVE_STATUS_GROUPS to set multiple statuses and optional detail or description at once
for cve_status_group in (d.getVar("CVE_STATUS_GROUPS") or "").split():
cve_group = d.getVar(cve_status_group)
if cve_group is not None:
for cve in cve_group.split():
d.setVarFlag("CVE_STATUS", cve, d.getVarFlag(cve_status_group, "status"))
else:
bb.warn("CVE_STATUS_GROUPS contains undefined variable %s" % cve_status_group)
}
def generate_json_report(d, out_path, link_path):
if os.path.exists(d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")):
import json
from oe.cve_check import cve_check_merge_jsons, update_symlinks
bb.note("Generating JSON CVE summary")
index_file = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
summary = {"version":"1", "package": []}
with open(index_file) as f:
filename = f.readline()
while filename:
with open(filename.rstrip()) as j:
data = json.load(j)
cve_check_merge_jsons(summary, data)
filename = f.readline()
summary["package"].sort(key=lambda d: d['name'])
with open(out_path, "w") as f:
json.dump(summary, f, indent=2)
update_symlinks(out_path, link_path)
python cve_save_summary_handler () {
import shutil
import datetime
from oe.cve_check import update_symlinks
cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
cve_summary_name = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME")
cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
bb.utils.mkdirhier(cvelogpath)
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
cve_summary_file = os.path.join(cvelogpath, "%s-%s.txt" % (cve_summary_name, timestamp))
if os.path.exists(cve_tmp_file):
shutil.copyfile(cve_tmp_file, cve_summary_file)
cvefile_link = os.path.join(cvelogpath, cve_summary_name)
update_symlinks(cve_summary_file, cvefile_link)
bb.plain("Complete CVE report summary created at: %s" % cvefile_link)
if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
json_summary_link_name = os.path.join(cvelogpath, d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON"))
json_summary_name = os.path.join(cvelogpath, "%s-%s.json" % (cve_summary_name, timestamp))
generate_json_report(d, json_summary_name, json_summary_link_name)
bb.plain("Complete CVE JSON report summary created at: %s" % json_summary_link_name)
}
addhandler cve_save_summary_handler
cve_save_summary_handler[eventmask] = "bb.event.BuildCompleted"
python do_cve_check () {
"""
Check recipe for patched and unpatched CVEs
"""
from oe.cve_check import get_patched_cves
with bb.utils.fileslocked([d.getVar("CVE_CHECK_DB_FILE_LOCK")], shared=True):
if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
try:
patched_cves = get_patched_cves(d)
except FileNotFoundError:
bb.fatal("Failure in searching patches")
ignored, patched, unpatched, status = check_cves(d, patched_cves)
if patched or unpatched or (d.getVar("CVE_CHECK_COVERAGE") == "1" and status):
cve_data = get_cve_info(d, patched + unpatched + ignored)
cve_write_data(d, patched, unpatched, ignored, cve_data, status)
else:
bb.note("No CVE database found, skipping CVE check")
}
addtask cve_check before do_build
do_cve_check[depends] = "cve-update-nvd2-native:do_unpack"
do_cve_check[nostamp] = "1"
python cve_check_cleanup () {
"""
Delete the file used to gather all the CVE information.
"""
bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE"))
bb.utils.remove(e.data.getVar("CVE_CHECK_SUMMARY_INDEX_PATH"))
}
addhandler cve_check_cleanup
cve_check_cleanup[eventmask] = "bb.event.BuildCompleted"
python cve_check_write_rootfs_manifest () {
"""
Create CVE manifest when building an image
"""
import shutil
import json
from oe.rootfs import image_list_installed_packages
from oe.cve_check import cve_check_merge_jsons, update_symlinks
if d.getVar("CVE_CHECK_COPY_FILES") == "1":
deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
if os.path.exists(deploy_file):
bb.utils.remove(deploy_file)
deploy_file_json = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
if os.path.exists(deploy_file_json):
bb.utils.remove(deploy_file_json)
# Create a list of relevant recipies
recipies = set()
for pkg in list(image_list_installed_packages(d)):
pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
'runtime-reverse', pkg)
pkg_data = oe.packagedata.read_pkgdatafile(pkg_info)
recipies.add(pkg_data["PN"])
bb.note("Writing rootfs CVE manifest")
deploy_dir = d.getVar("IMGDEPLOYDIR")
link_name = d.getVar("IMAGE_LINK_NAME")
json_data = {"version":"1", "package": []}
text_data = ""
enable_json = d.getVar("CVE_CHECK_FORMAT_JSON") == "1"
enable_text = d.getVar("CVE_CHECK_FORMAT_TEXT") == "1"
save_pn = d.getVar("PN")
for pkg in recipies:
# To be able to use the CVE_CHECK_RECIPE_FILE variable we have to evaluate
# it with the different PN names set each time.
d.setVar("PN", pkg)
if enable_text:
pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE")
if os.path.exists(pkgfilepath):
with open(pkgfilepath) as pfile:
text_data += pfile.read()
if enable_json:
pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
if os.path.exists(pkgfilepath):
with open(pkgfilepath) as j:
data = json.load(j)
cve_check_merge_jsons(json_data, data)
d.setVar("PN", save_pn)
if enable_text:
manifest_name = d.getVar("CVE_CHECK_MANIFEST")
with open(manifest_name, "w") as f:
f.write(text_data)
if link_name:
link_path = os.path.join(deploy_dir, "%s.cve" % link_name)
update_symlinks(manifest_name, link_path)
bb.plain("Image CVE report stored in: %s" % manifest_name)
if enable_json:
manifest_name_suffix = d.getVar("CVE_CHECK_MANIFEST_JSON_SUFFIX")
manifest_name = d.getVar("CVE_CHECK_MANIFEST_JSON")
with open(manifest_name, "w") as f:
json.dump(json_data, f, indent=2)
if link_name:
link_path = os.path.join(deploy_dir, "%s.%s" % (link_name, manifest_name_suffix))
update_symlinks(manifest_name, link_path)
bb.plain("Image CVE JSON report stored in: %s" % manifest_name)
}
ROOTFS_POSTPROCESS_COMMAND:prepend = "${@'cve_check_write_rootfs_manifest ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
do_populate_sdk[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
def check_cves(d, patched_cves):
"""
Connect to the NVD database and find unpatched cves.
"""
from oe.cve_check import Version, convert_cve_version, decode_cve_status
pn = d.getVar("PN")
real_pv = d.getVar("PV")
suffix = d.getVar("CVE_VERSION_SUFFIX")
cves_unpatched = []
cves_ignored = []
cves_status = []
cves_in_recipe = False
# CVE_PRODUCT can contain more than one product (eg. curl/libcurl)
products = d.getVar("CVE_PRODUCT").split()
# If this has been unset then we're not scanning for CVEs here (for example, image recipes)
if not products:
return ([], [], [], [])
pv = d.getVar("CVE_VERSION").split("+git")[0]
# If the recipe has been skipped/ignored we return empty lists
if pn in d.getVar("CVE_CHECK_SKIP_RECIPE").split():
bb.note("Recipe has been skipped by cve-check")
return ([], [], [], [])
# Convert CVE_STATUS into ignored CVEs and check validity
cve_ignore = []
for cve in (d.getVarFlags("CVE_STATUS") or {}):
decoded_status, _, _ = decode_cve_status(d, cve)
if decoded_status == "Ignored":
cve_ignore.append(cve)
import sqlite3
db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
conn = sqlite3.connect(db_file, uri=True)
# For each of the known product names (e.g. curl has CPEs using curl and libcurl)...
for product in products:
cves_in_product = False
if ":" in product:
vendor, product = product.split(":", 1)
else:
vendor = "%"
# Find all relevant CVE IDs.
cve_cursor = conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor))
for cverow in cve_cursor:
cve = cverow[0]
if cve in cve_ignore:
bb.note("%s-%s ignores %s" % (product, pv, cve))
cves_ignored.append(cve)
continue
elif cve in patched_cves:
bb.note("%s has been patched" % (cve))
continue
# Write status once only for each product
if not cves_in_product:
cves_status.append([product, True])
cves_in_product = True
cves_in_recipe = True
vulnerable = False
ignored = False
product_cursor = conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor))
for row in product_cursor:
(_, _, _, version_start, operator_start, version_end, operator_end) = row
#bb.debug(2, "Evaluating row " + str(row))
if cve in cve_ignore:
ignored = True
version_start = convert_cve_version(version_start)
version_end = convert_cve_version(version_end)
if (operator_start == '=' and pv == version_start) or version_start == '-':
vulnerable = True
else:
if operator_start:
try:
vulnerable_start = (operator_start == '>=' and Version(pv,suffix) >= Version(version_start,suffix))
vulnerable_start |= (operator_start == '>' and Version(pv,suffix) > Version(version_start,suffix))
except:
bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_start, version_start, cve))
vulnerable_start = False
else:
vulnerable_start = False
if operator_end:
try:
vulnerable_end = (operator_end == '<=' and Version(pv,suffix) <= Version(version_end,suffix) )
vulnerable_end |= (operator_end == '<' and Version(pv,suffix) < Version(version_end,suffix) )
except:
bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_end, version_end, cve))
vulnerable_end = False
else:
vulnerable_end = False
if operator_start and operator_end:
vulnerable = vulnerable_start and vulnerable_end
else:
vulnerable = vulnerable_start or vulnerable_end
if vulnerable:
if ignored:
bb.note("%s is ignored in %s-%s" % (cve, pn, real_pv))
cves_ignored.append(cve)
else:
bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
cves_unpatched.append(cve)
break
product_cursor.close()
if not vulnerable:
bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve))
patched_cves.add(cve)
cve_cursor.close()
if not cves_in_product:
bb.note("No CVE records found for product %s, pn %s" % (product, pn))
cves_status.append([product, False])
conn.close()
diff_ignore = list(set(cve_ignore) - set(cves_ignored))
if diff_ignore:
oe.qa.handle_error("cve_status_not_in_db", "Found CVE (%s) with CVE_STATUS set that are not found in database for this component" % " ".join(diff_ignore), d)
if not cves_in_recipe:
bb.note("No CVE records for products in recipe %s" % (pn))
return (list(cves_ignored), list(patched_cves), cves_unpatched, cves_status)
def get_cve_info(d, cves):
"""
Get CVE information from the database.
"""
import sqlite3
cve_data = {}
db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
conn = sqlite3.connect(db_file, uri=True)
for cve in cves:
cursor = conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,))
for row in cursor:
cve_data[row[0]] = {}
cve_data[row[0]]["summary"] = row[1]
cve_data[row[0]]["scorev2"] = row[2]
cve_data[row[0]]["scorev3"] = row[3]
cve_data[row[0]]["scorev4"] = row[4]
cve_data[row[0]]["modified"] = row[5]
cve_data[row[0]]["vector"] = row[6]
cve_data[row[0]]["vectorString"] = row[7]
cursor.close()
conn.close()
return cve_data
def cve_write_data_text(d, patched, unpatched, ignored, cve_data):
"""
Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and
CVE manifest if enabled.
"""
from oe.cve_check import decode_cve_status
cve_file = d.getVar("CVE_CHECK_LOG")
fdir_name = d.getVar("FILE_DIRNAME")
layer = fdir_name.split("/")[-3]
include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
if exclude_layers and layer in exclude_layers:
return
if include_layers and layer not in include_layers:
return
# Early exit, the text format does not report packages without CVEs
if not patched+unpatched+ignored:
return
nvd_link = "https://nvd.nist.gov/vuln/detail/"
write_string = ""
unpatched_cves = []
bb.utils.mkdirhier(os.path.dirname(cve_file))
for cve in sorted(cve_data):
is_patched = cve in patched
is_ignored = cve in ignored
status = "Unpatched"
if (is_patched or is_ignored) and not report_all:
continue
if is_ignored:
status = "Ignored"
elif is_patched:
status = "Patched"
else:
# default value of status is Unpatched
unpatched_cves.append(cve)
write_string += "LAYER: %s\n" % layer
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
write_string += "CVE: %s\n" % cve
write_string += "CVE STATUS: %s\n" % status
_, detail, description = decode_cve_status(d, cve)
if detail:
write_string += "CVE DETAIL: %s\n" % detail
if description:
write_string += "CVE DESCRIPTION: %s\n" % description
write_string += "CVE SUMMARY: %s\n" % cve_data[cve]["summary"]
write_string += "CVSS v2 BASE SCORE: %s\n" % cve_data[cve]["scorev2"]
write_string += "CVSS v3 BASE SCORE: %s\n" % cve_data[cve]["scorev3"]
write_string += "CVSS v4 BASE SCORE: %s\n" % cve_data[cve]["scorev4"]
write_string += "VECTOR: %s\n" % cve_data[cve]["vector"]
write_string += "VECTORSTRING: %s\n" % cve_data[cve]["vectorString"]
write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve)
if unpatched_cves and d.getVar("CVE_CHECK_SHOW_WARNINGS") == "1":
bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file))
with open(cve_file, "w") as f:
bb.note("Writing file %s with CVE information" % cve_file)
f.write(write_string)
if d.getVar("CVE_CHECK_COPY_FILES") == "1":
deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
bb.utils.mkdirhier(os.path.dirname(deploy_file))
with open(deploy_file, "w") as f:
f.write(write_string)
if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
bb.utils.mkdirhier(cvelogpath)
with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
f.write("%s" % write_string)
def cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file):
"""
Write CVE information in the JSON format: to WORKDIR; and to
CVE_CHECK_DIR, if CVE manifest if enabled, write fragment
files that will be assembled at the end in cve_check_write_rootfs_manifest.
"""
import json
write_string = json.dumps(output, indent=2)
with open(direct_file, "w") as f:
bb.note("Writing file %s with CVE information" % direct_file)
f.write(write_string)
if d.getVar("CVE_CHECK_COPY_FILES") == "1":
bb.utils.mkdirhier(os.path.dirname(deploy_file))
with open(deploy_file, "w") as f:
f.write(write_string)
if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
index_path = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
bb.utils.mkdirhier(cvelogpath)
fragment_file = os.path.basename(deploy_file)
fragment_path = os.path.join(cvelogpath, fragment_file)
with open(fragment_path, "w") as f:
f.write(write_string)
with open(index_path, "a+") as f:
f.write("%s\n" % fragment_path)
def cve_write_data_json(d, patched, unpatched, ignored, cve_data, cve_status):
"""
Prepare CVE data for the JSON format, then write it.
"""
from oe.cve_check import decode_cve_status
output = {"version":"1", "package": []}
nvd_link = "https://nvd.nist.gov/vuln/detail/"
fdir_name = d.getVar("FILE_DIRNAME")
layer = fdir_name.split("/")[-3]
include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
if exclude_layers and layer in exclude_layers:
return
if include_layers and layer not in include_layers:
return
unpatched_cves = []
product_data = []
for s in cve_status:
p = {"product": s[0], "cvesInRecord": "Yes"}
if s[1] == False:
p["cvesInRecord"] = "No"
product_data.append(p)
package_version = "%s%s" % (d.getVar("EXTENDPE"), d.getVar("PV"))
package_data = {
"name" : d.getVar("PN"),
"layer" : layer,
"version" : package_version,
"products": product_data
}
cve_list = []
for cve in sorted(cve_data):
is_patched = cve in patched
is_ignored = cve in ignored
status = "Unpatched"
if (is_patched or is_ignored) and not report_all:
continue
if is_ignored:
status = "Ignored"
elif is_patched:
status = "Patched"
else:
# default value of status is Unpatched
unpatched_cves.append(cve)
issue_link = "%s%s" % (nvd_link, cve)
cve_item = {
"id" : cve,
"summary" : cve_data[cve]["summary"],
"scorev2" : cve_data[cve]["scorev2"],
"scorev3" : cve_data[cve]["scorev3"],
"scorev4" : cve_data[cve]["scorev4"],
"vector" : cve_data[cve]["vector"],
"vectorString" : cve_data[cve]["vectorString"],
"status" : status,
"link": issue_link
}
_, detail, description = decode_cve_status(d, cve)
if detail:
cve_item["detail"] = detail
if description:
cve_item["description"] = description
cve_list.append(cve_item)
package_data["issue"] = cve_list
output["package"].append(package_data)
direct_file = d.getVar("CVE_CHECK_LOG_JSON")
deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
manifest_file = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON")
cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file)
def cve_write_data(d, patched, unpatched, ignored, cve_data, status):
"""
Write CVE data in each enabled format.
"""
if d.getVar("CVE_CHECK_FORMAT_TEXT") == "1":
cve_write_data_text(d, patched, unpatched, ignored, cve_data)
if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
cve_write_data_json(d, patched, unpatched, ignored, cve_data, status)

View File

@@ -0,0 +1,244 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# Development tool - source extraction helper class
#
# NOTE: this class is intended for use by devtool and should not be
# inherited manually.
#
# Copyright (C) 2014-2017 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
DEVTOOL_TEMPDIR ?= ""
DEVTOOL_PATCH_SRCDIR = "${DEVTOOL_TEMPDIR}/patchworkdir"
python() {
tempdir = d.getVar('DEVTOOL_TEMPDIR')
if not tempdir:
bb.fatal('devtool-source class is for internal use by devtool only')
# Make a subdir so we guard against WORKDIR==S
workdir = os.path.join(tempdir, 'workdir')
d.setVar('WORKDIR', workdir)
if not d.getVar('S').startswith(workdir):
# Usually a shared workdir recipe (kernel, gcc)
# Try to set a reasonable default
if bb.data.inherits_class('kernel', d):
d.setVar('S', '${WORKDIR}/source')
else:
d.setVar('S', '${WORKDIR}/%s' % os.path.basename(d.getVar('S')))
if bb.data.inherits_class('kernel', d):
# We don't want to move the source to STAGING_KERNEL_DIR here
d.setVar('STAGING_KERNEL_DIR', '${S}')
d.setVar('STAMPS_DIR', os.path.join(tempdir, 'stamps'))
d.setVar('T', os.path.join(tempdir, 'temp'))
# Hook in pre/postfuncs
is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
if is_kernel_yocto:
unpacktask = 'do_kernel_checkout'
d.appendVarFlag('do_configure', 'postfuncs', ' devtool_post_configure')
else:
unpacktask = 'do_unpack'
d.appendVarFlag(unpacktask, 'postfuncs', ' devtool_post_unpack')
d.prependVarFlag('do_patch', 'prefuncs', ' devtool_pre_patch')
d.appendVarFlag('do_patch', 'postfuncs', ' devtool_post_patch')
# NOTE: in order for the patch stuff to be fully functional,
# PATCHTOOL and PATCH_COMMIT_FUNCTIONS need to be set; we can't
# do that here because we can't guarantee the order of the anonymous
# functions, so it gets done in the bbappend we create.
}
python devtool_post_unpack() {
import oe.recipeutils
import shutil
sys.path.insert(0, os.path.join(d.getVar('COREBASE'), 'scripts', 'lib'))
import scriptutils
from devtool import setup_git_repo
tempdir = d.getVar('DEVTOOL_TEMPDIR')
workdir = d.getVar('WORKDIR')
srcsubdir = d.getVar('S')
def _move_file(src, dst):
"""Move a file. Creates all the directory components of destination path."""
dst_d = os.path.dirname(dst)
if dst_d:
bb.utils.mkdirhier(dst_d)
shutil.move(src, dst)
def _ls_tree(directory):
"""Recursive listing of files in a directory"""
ret = []
for root, dirs, files in os.walk(directory):
ret.extend([os.path.relpath(os.path.join(root, fname), directory) for
fname in files])
return ret
is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
# Move local source files into separate subdir
recipe_patches = [os.path.basename(patch) for patch in
oe.recipeutils.get_recipe_patches(d)]
local_files = oe.recipeutils.get_recipe_local_files(d)
if is_kernel_yocto:
for key in [f for f in local_files if f.endswith('scc')]:
with open(local_files[key], 'r') as sccfile:
for l in sccfile:
line = l.split()
if line and line[0] in ('kconf', 'patch'):
cfg = os.path.join(os.path.dirname(local_files[key]), line[-1])
if cfg not in local_files.values():
local_files[line[-1]] = cfg
shutil.copy2(cfg, workdir)
# Ignore local files with subdir={BP}
srcabspath = os.path.abspath(srcsubdir)
local_files = [fname for fname in local_files if
os.path.exists(os.path.join(workdir, fname)) and
(srcabspath == workdir or not
os.path.join(workdir, fname).startswith(srcabspath +
os.sep))]
if local_files:
for fname in local_files:
_move_file(os.path.join(workdir, fname),
os.path.join(tempdir, 'oe-local-files', fname))
with open(os.path.join(tempdir, 'oe-local-files', '.gitignore'),
'w') as f:
f.write('# Ignore local files, by default. Remove this file '
'if you want to commit the directory to Git\n*\n')
if srcsubdir == workdir:
# Find non-patch non-local sources that were "unpacked" to srctree
# directory
src_files = [fname for fname in _ls_tree(workdir) if
os.path.basename(fname) not in recipe_patches]
srcsubdir = d.getVar('DEVTOOL_PATCH_SRCDIR')
# Move source files to S
for path in src_files:
_move_file(os.path.join(workdir, path),
os.path.join(srcsubdir, path))
elif os.path.dirname(srcsubdir) != workdir:
# Handle if S is set to a subdirectory of the source
srcsubdir = os.path.join(workdir, os.path.relpath(srcsubdir, workdir).split(os.sep)[0])
scriptutils.git_convert_standalone_clone(srcsubdir)
# Make sure that srcsubdir exists
bb.utils.mkdirhier(srcsubdir)
if not os.listdir(srcsubdir):
bb.warn("No source unpacked to S - either the %s recipe "
"doesn't use any source or the correct source "
"directory could not be determined" % d.getVar('PN'))
devbranch = d.getVar('DEVTOOL_DEVBRANCH')
setup_git_repo(srcsubdir, d.getVar('PV'), devbranch, d=d)
(stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srcsubdir)
initial_rev = stdout.rstrip()
with open(os.path.join(tempdir, 'initial_rev'), 'w') as f:
f.write(initial_rev)
with open(os.path.join(tempdir, 'srcsubdir'), 'w') as f:
f.write(srcsubdir)
}
python devtool_pre_patch() {
if d.getVar('S') == d.getVar('WORKDIR'):
d.setVar('S', '${DEVTOOL_PATCH_SRCDIR}')
}
python devtool_post_patch() {
import shutil
tempdir = d.getVar('DEVTOOL_TEMPDIR')
with open(os.path.join(tempdir, 'srcsubdir'), 'r') as f:
srcsubdir = f.read()
with open(os.path.join(tempdir, 'initial_rev'), 'r') as f:
initial_rev = f.read()
def rm_patches():
patches_dir = os.path.join(srcsubdir, 'patches')
if os.path.exists(patches_dir):
shutil.rmtree(patches_dir)
# Restore any "patches" directory that was actually part of the source tree
try:
bb.process.run('git checkout -- patches', cwd=srcsubdir)
except bb.process.ExecutionError:
pass
extra_overrides = d.getVar('DEVTOOL_EXTRA_OVERRIDES')
if extra_overrides:
extra_overrides = set(extra_overrides.split(':'))
devbranch = d.getVar('DEVTOOL_DEVBRANCH')
default_overrides = d.getVar('OVERRIDES').split(':')
no_overrides = []
# First, we may have some overrides that are referred to in the recipe set in
# our configuration, so we need to make a branch that excludes those
for override in default_overrides:
if override not in extra_overrides:
no_overrides.append(override)
if default_overrides != no_overrides:
# Some overrides are active in the current configuration, so
# we need to create a branch where none of the overrides are active
bb.process.run('git checkout %s -b devtool-no-overrides' % initial_rev, cwd=srcsubdir)
# Run do_patch function with the override applied
localdata = bb.data.createCopy(d)
localdata.setVar('OVERRIDES', ':'.join(no_overrides))
localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides))
bb.build.exec_func('do_patch', localdata)
rm_patches()
# Now we need to reconcile the dev branch with the no-overrides one
# (otherwise we'd likely be left with identical commits that have different hashes)
bb.process.run('git checkout %s' % devbranch, cwd=srcsubdir)
bb.process.run('git rebase devtool-no-overrides', cwd=srcsubdir)
else:
bb.process.run('git checkout %s -b devtool-no-overrides' % devbranch, cwd=srcsubdir)
for override in extra_overrides:
localdata = bb.data.createCopy(d)
if override in default_overrides:
bb.process.run('git branch devtool-override-%s %s' % (override, devbranch), cwd=srcsubdir)
else:
# Reset back to the initial commit on a new branch
bb.process.run('git checkout %s -b devtool-override-%s' % (initial_rev, override), cwd=srcsubdir)
# Run do_patch function with the override applied
localdata.setVar('OVERRIDES', ':'.join(no_overrides + [override]))
localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides + [override]))
bb.build.exec_func('do_patch', localdata)
rm_patches()
# Now we need to reconcile the new branch with the no-overrides one
# (otherwise we'd likely be left with identical commits that have different hashes)
bb.process.run('git rebase devtool-no-overrides', cwd=srcsubdir)
bb.process.run('git checkout %s' % devbranch, cwd=srcsubdir)
bb.process.run('git tag -f devtool-patched', cwd=srcsubdir)
if os.path.exists(os.path.join(srcsubdir, '.gitmodules')):
bb.process.run('git submodule foreach --recursive "git tag -f devtool-patched"', cwd=srcsubdir)
}
python devtool_post_configure() {
import shutil
tempdir = d.getVar('DEVTOOL_TEMPDIR')
shutil.copy2(os.path.join(d.getVar('B'), '.config'), tempdir)
}

View File

@@ -0,0 +1,38 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# Turns certain DISTRO_FEATURES into overrides with the same
# name plus a df- prefix. Ensures that these special
# distro features remain set also for native and nativesdk
# recipes, so that these overrides can also be used there.
#
# This makes it simpler to write .bbappends that only change the
# task signatures of the recipe if the change is really enabled,
# for example with:
# do_install:append:df-my-feature () { ... }
# where "my-feature" is a DISTRO_FEATURE.
#
# The class is meant to be used in a layer.conf or distro
# .inc file with:
# INHERIT += "distrooverrides"
# DISTRO_FEATURES_OVERRIDES += "my-feature"
#
# Beware that this part of OVERRIDES changes during parsing, so usage
# of these overrides should be limited to .bb and .bbappend files,
# because then DISTRO_FEATURES is final.
DISTRO_FEATURES_OVERRIDES ?= ""
DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \
Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES."
DISTRO_FEATURES_FILTER_NATIVE:append = " ${DISTRO_FEATURES_OVERRIDES}"
DISTRO_FEATURES_FILTER_NATIVESDK:append = " ${DISTRO_FEATURES_OVERRIDES}"
# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task
# signature because of this line, then the task dependency on
# OVERRIDES itself should be fixed. Excluding these two variables
# with DISTROOVERRIDES[vardepsexclude] would just work around the problem.
DISTROOVERRIDES .= "${@ ''.join([':df-' + x for x in sorted(set(d.getVar('DISTRO_FEATURES_OVERRIDES').split()) & set((d.getVar('DISTRO_FEATURES') or '').split()))]) }"

View File

@@ -0,0 +1,274 @@
# Copyright (C) 2012 Linux Foundation
# Author: Richard Purdie
# Some code and influence taken from srctree.bbclass:
# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
#
# SPDX-License-Identifier: MIT
#
# externalsrc.bbclass enables use of an existing source tree, usually external to
# the build system to build a piece of software rather than the usual fetch/unpack/patch
# process.
#
# To use, add externalsrc to the global inherit and set EXTERNALSRC to point at the
# directory you want to use containing the sources e.g. from local.conf for a recipe
# called "myrecipe" you would do:
#
# INHERIT += "externalsrc"
# EXTERNALSRC:pn-myrecipe = "/path/to/my/source/tree"
#
# In order to make this class work for both target and native versions (or with
# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
# directory under the work directory (split source and build directories). This is
# the default, but the build directory can be set to the source directory if
# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
#
# EXTERNALSRC_BUILD:pn-myrecipe = "/path/to/my/source/tree"
#
SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
EXTERNALSRC_SYMLINKS ?= "oe-workdir:${WORKDIR} oe-logs:${T}"
python () {
externalsrc = d.getVar('EXTERNALSRC')
externalsrcbuild = d.getVar('EXTERNALSRC_BUILD')
if externalsrc and not externalsrc.startswith("/"):
bb.error("EXTERNALSRC must be an absolute path")
if externalsrcbuild and not externalsrcbuild.startswith("/"):
bb.error("EXTERNALSRC_BUILD must be an absolute path")
# If this is the base recipe and EXTERNALSRC is set for it or any of its
# derivatives, then enable BB_DONT_CACHE to force the recipe to always be
# re-parsed so that the file-checksums function for do_compile is run every
# time.
bpn = d.getVar('BPN')
classextend = (d.getVar('BBCLASSEXTEND') or '').split()
if bpn == d.getVar('PN') or not classextend:
if (externalsrc or
('native' in classextend and
d.getVar('EXTERNALSRC:pn-%s-native' % bpn)) or
('nativesdk' in classextend and
d.getVar('EXTERNALSRC:pn-nativesdk-%s' % bpn)) or
('cross' in classextend and
d.getVar('EXTERNALSRC:pn-%s-cross' % bpn))):
d.setVar('BB_DONT_CACHE', '1')
if externalsrc:
import oe.recipeutils
import oe.path
d.setVar('S', externalsrc)
if externalsrcbuild:
d.setVar('B', externalsrcbuild)
else:
d.setVar('B', '${WORKDIR}/${BPN}-${PV}')
bb.fetch.get_hashvalue(d)
local_srcuri = []
fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
for url in fetch.urls:
url_data = fetch.ud[url]
parm = url_data.parm
if url_data.type in ['file', 'npmsw', 'crate'] or parm.get('type') in ['kmeta', 'git-dependency']:
local_srcuri.append(url)
d.setVar('SRC_URI', ' '.join(local_srcuri))
# sstate is never going to work for external source trees, disable it
d.setVar('SSTATE_SKIP_CREATION', '1')
if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
d.setVar('CONFIGUREOPT_DEPTRACK', '')
tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
for task in tasks:
if os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
# Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
for v in d.keys():
cleandirs = d.getVarFlag(v, "cleandirs", False)
if cleandirs:
# We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
cleandirs = oe.recipeutils.split_var_value(cleandirs)
setvalue = False
for cleandir in cleandirs[:]:
if oe.path.is_path_parent(externalsrc, d.expand(cleandir)):
cleandirs.remove(cleandir)
setvalue = True
if setvalue:
d.setVarFlag(v, 'cleandirs', ' '.join(cleandirs))
fetch_tasks = ['do_fetch', 'do_unpack']
# If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
# Note that we cannot use d.appendVarFlag() here because deps is expected to be a list object, not a string
d.setVarFlag('do_configure', 'deps', (d.getVarFlag('do_configure', 'deps', False) or []) + ['do_unpack'])
d.setVarFlag('do_populate_lic', 'deps', (d.getVarFlag('do_populate_lic', 'deps', False) or []) + ['do_unpack'])
for task in d.getVar("SRCTREECOVEREDTASKS").split():
if local_srcuri and task in fetch_tasks:
continue
bb.build.deltask(task, d)
if task == 'do_unpack':
# The reproducible build create_source_date_epoch_stamp function must
# be run after the source is available and before the
# do_deploy_source_date_epoch task. In the normal case, it's attached
# to do_unpack as a postfuncs, but since we removed do_unpack (above)
# we need to move the function elsewhere. The easiest thing to do is
# move it into the prefuncs of the do_deploy_source_date_epoch task.
# This is safe, as externalsrc runs with the source already unpacked.
d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ')
d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
d.setVarFlag('do_configure', 'file-checksums', '${@srctree_configure_hash_files(d)}')
d.appendVarFlag('do_compile', 'prefuncs', ' fetcher_hashes_dummyfunc')
d.appendVarFlag('do_configure', 'prefuncs', ' fetcher_hashes_dummyfunc')
# We don't want the workdir to go away
d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN'))
bb.build.addtask('do_buildclean',
'do_clean' if d.getVar('S') == d.getVar('B') else None,
None, d)
# If B=S the same builddir is used even for different architectures.
# Thus, use a shared CONFIGURESTAMPFILE and STAMP directory so that
# change of do_configure task hash is correctly detected and stamps are
# invalidated if e.g. MACHINE changes.
if d.getVar('S') == d.getVar('B'):
configstamp = '${TMPDIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}/configure.sstate'
d.setVar('CONFIGURESTAMPFILE', configstamp)
d.setVar('STAMP', '${STAMPS_DIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}')
d.setVar('STAMPCLEAN', '${STAMPS_DIR}/work-shared/${PN}/*-*')
}
python externalsrc_configure_prefunc() {
s_dir = d.getVar('S')
# Create desired symlinks
symlinks = (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()
newlinks = []
for symlink in symlinks:
symsplit = symlink.split(':', 1)
lnkfile = os.path.join(s_dir, symsplit[0])
target = d.expand(symsplit[1])
if len(symsplit) > 1:
if os.path.islink(lnkfile):
# Link already exists, leave it if it points to the right location already
if os.readlink(lnkfile) == target:
continue
os.unlink(lnkfile)
elif os.path.exists(lnkfile):
# File/dir exists with same name as link, just leave it alone
continue
os.symlink(target, lnkfile)
newlinks.append(symsplit[0])
# Hide the symlinks from git
try:
git_exclude_file = os.path.join(s_dir, '.git/info/exclude')
if os.path.exists(git_exclude_file):
with open(git_exclude_file, 'r+') as efile:
elines = efile.readlines()
for link in newlinks:
if link in elines or '/'+link in elines:
continue
efile.write('/' + link + '\n')
except IOError as ioe:
bb.note('Failed to hide EXTERNALSRC_SYMLINKS from git')
}
python externalsrc_compile_prefunc() {
# Make it obvious that this is happening, since forgetting about it could lead to much confusion
bb.plain('NOTE: %s: compiling from external source tree %s' % (d.getVar('PN'), d.getVar('EXTERNALSRC')))
}
do_buildclean[dirs] = "${S} ${B}"
do_buildclean[nostamp] = "1"
do_buildclean[doc] = "Call 'make clean' or equivalent in ${B}"
externalsrc_do_buildclean() {
if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
rm -f ${@' '.join([x.split(':')[0] for x in (d.getVar('EXTERNALSRC_SYMLINKS') or '').split()])}
if [ "${CLEANBROKEN}" != "1" ]; then
oe_runmake clean || die "make failed"
fi
else
bbnote "nothing to do - no makefile found"
fi
}
def srctree_hash_files(d, srcdir=None):
import shutil
import subprocess
import tempfile
import hashlib
s_dir = srcdir or d.getVar('EXTERNALSRC')
git_dir = None
try:
git_dir = os.path.join(s_dir,
subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
top_git_dir = os.path.join(d.getVar("TOPDIR"),
subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
if git_dir == top_git_dir:
git_dir = None
except subprocess.CalledProcessError:
pass
ret = " "
if git_dir is not None:
oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1-%s' % d.getVar('PN'))
with tempfile.NamedTemporaryFile(prefix='oe-devtool-index') as tmp_index:
# Clone index
shutil.copyfile(os.path.join(git_dir, 'index'), tmp_index.name)
# Update our custom index
env = os.environ.copy()
env['GIT_INDEX_FILE'] = tmp_index.name
subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
if os.path.exists(os.path.join(s_dir, ".gitmodules")) and os.path.getsize(os.path.join(s_dir, ".gitmodules")) > 0:
submodule_helper = subprocess.check_output(["git", "config", "--file", ".gitmodules", "--get-regexp", "path"], cwd=s_dir, env=env).decode("utf-8")
for line in submodule_helper.splitlines():
module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
if os.path.isdir(module_dir):
proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
proc.communicate()
proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, _ = proc.communicate()
git_sha1 += stdout.decode("utf-8")
sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
ret = oe_hash_file + ':True'
else:
ret = s_dir + '/*:True'
return ret
def srctree_configure_hash_files(d):
"""
Get the list of files that should trigger do_configure to re-execute,
based on the value of CONFIGURE_FILES
"""
import fnmatch
in_files = (d.getVar('CONFIGURE_FILES') or '').split()
out_items = []
search_files = []
for entry in in_files:
if entry.startswith('/'):
out_items.append('%s:%s' % (entry, os.path.exists(entry)))
else:
search_files.append(entry)
if search_files:
s_dir = d.getVar('EXTERNALSRC')
for root, _, files in os.walk(s_dir):
for p in search_files:
for f in fnmatch.filter(files, p):
out_items.append('%s:True' % os.path.join(root, f))
return ' '.join(out_items)
EXPORT_FUNCTIONS do_buildclean

View File

@@ -0,0 +1,77 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# This bbclass is used for image level user/group configuration.
# Inherit this class if you want to make EXTRA_USERS_PARAMS effective.
# Below is an example showing how to use this functionality.
# IMAGE_CLASSES += "extrausers"
# EXTRA_USERS_PARAMS = "\
# useradd -p '' tester; \
# groupadd developers; \
# userdel nobody; \
# groupdel -g video; \
# groupmod -g 1020 developers; \
# usermod -s /bin/sh tester; \
# "
inherit useradd_base
PACKAGE_INSTALL:append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
# Image level user / group settings
ROOTFS_POSTPROCESS_COMMAND:append = " set_user_group"
# Image level user / group settings
set_user_group () {
user_group_settings="${EXTRA_USERS_PARAMS}"
export PSEUDO="${FAKEROOTENV} ${STAGING_DIR_NATIVE}${bindir}/pseudo"
setting=`echo $user_group_settings | cut -d ';' -f1`
remaining=`echo $user_group_settings | cut -d ';' -f2-`
while test "x$setting" != "x"; do
cmd=`echo $setting | cut -d ' ' -f1`
opts=`echo $setting | cut -d ' ' -f2-`
# Different from useradd.bbclass, there's no file locking issue here, as
# this setting is actually a serial process. So we only retry once.
case $cmd in
useradd)
perform_useradd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
;;
groupadd)
perform_groupadd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
;;
userdel)
perform_userdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
;;
groupdel)
perform_groupdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
;;
usermod)
perform_usermod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
;;
passwd-expire)
perform_passwd_expire "${IMAGE_ROOTFS}" "$opts"
;;
groupmod)
perform_groupmod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
;;
*)
bbfatal "Invalid command in EXTRA_USERS_PARAMS: $cmd"
;;
esac
# Avoid infinite loop if the last parameter doesn't end with ';'
if [ "$setting" = "$remaining" ]; then
break
fi
# iterate to the next setting
setting=`echo $remaining | cut -d ';' -f1`
remaining=`echo $remaining | cut -d ';' -f2-`
done
}
USERADDEXTENSION ?= ""
inherit ${USERADDEXTENSION}

View File

@@ -0,0 +1,211 @@
#
# Copyright 2023 (C) Weidmueller GmbH & Co KG
# Author: Lukas Funke <lukas.funke@weidmueller.com>
#
# Handle Go vendor support for offline builds
#
# When importing Go modules, Go downloads the imported modules using
# a network (proxy) connection ahead of the compile stage. This contradicts
# the yocto build concept of fetching every source ahead of build-time
# and supporting offline builds.
#
# To support offline builds, we use Go 'vendoring': module dependencies are
# downloaded during the fetch-phase and unpacked into the modules 'vendor'
# folder. Additionally a manifest file is generated for the 'vendor' folder
#
inherit go-mod
def go_src_uri(repo, version, path=None, subdir=None, \
vcs='git', replaces=None, pathmajor=None):
destsuffix = "git/src/import/vendor.fetch"
module_path = repo if not path else path
src_uri = "{}://{};name={}".format(vcs, repo, module_path.replace('/', '.'))
src_uri += ";destsuffix={}/{}@{}".format(destsuffix, repo, version)
if vcs == "git":
src_uri += ";nobranch=1;protocol=https"
src_uri += ";go_module_path={}".format(module_path)
if replaces:
src_uri += ";go_module_replacement={}".format(replaces)
if subdir:
src_uri += ";go_subdir={}".format(subdir)
if pathmajor:
src_uri += ";go_pathmajor={}".format(pathmajor)
src_uri += ";is_go_dependency=1"
return src_uri
python do_vendor_unlink() {
go_import = d.getVar('GO_IMPORT')
source_dir = d.getVar('S')
linkname = os.path.join(source_dir, *['src', go_import, 'vendor'])
os.unlink(linkname)
}
addtask vendor_unlink before do_package after do_install
python do_go_vendor() {
import shutil
src_uri = (d.getVar('SRC_URI') or "").split()
if not src_uri:
bb.fatal("SRC_URI is empty")
default_destsuffix = "git/src/import/vendor.fetch"
fetcher = bb.fetch2.Fetch(src_uri, d)
go_import = d.getVar('GO_IMPORT')
source_dir = d.getVar('S')
linkname = os.path.join(source_dir, *['src', go_import, 'vendor'])
vendor_dir = os.path.join(source_dir, *['src', 'import', 'vendor'])
import_dir = os.path.join(source_dir, *['src', 'import', 'vendor.fetch'])
if os.path.exists(vendor_dir):
# Nothing to do except re-establish link to actual vendor folder
if not os.path.exists(linkname):
os.symlink(vendor_dir, linkname)
return
bb.utils.mkdirhier(vendor_dir)
modules = {}
for url in fetcher.urls:
srcuri = fetcher.ud[url].host + fetcher.ud[url].path
# Skip non Go module src uris
if not fetcher.ud[url].parm.get('is_go_dependency'):
continue
destsuffix = fetcher.ud[url].parm.get('destsuffix')
# We derive the module repo / version in the following manner (exmaple):
#
# destsuffix = git/src/import/vendor.fetch/github.com/foo/bar@v1.2.3
# p = github.com/foo/bar@v1.2.3
# repo = github.com/foo/bar
# version = v1.2.3
p = destsuffix[len(default_destsuffix)+1:]
repo, version = p.split('@')
module_path = fetcher.ud[url].parm.get('go_module_path')
subdir = fetcher.ud[url].parm.get('go_subdir')
subdir = None if not subdir else subdir
pathMajor = fetcher.ud[url].parm.get('go_pathmajor')
pathMajor = None if not pathMajor else pathMajor.strip('/')
if not (repo, version) in modules:
modules[(repo, version)] = {
"repo_path": os.path.join(import_dir, p),
"module_path": module_path,
"subdir": subdir,
"pathMajor": pathMajor }
for module_key, module in modules.items():
# only take the version which is explicitly listed
# as a dependency in the go.mod
module_path = module['module_path']
rootdir = module['repo_path']
subdir = module['subdir']
pathMajor = module['pathMajor']
src = rootdir
if subdir:
src = os.path.join(rootdir, subdir)
# If the module is released at major version 2 or higher, the module
# path must end with a major version suffix like /v2.
# This may or may not be part of the subdirectory name
#
# https://go.dev/ref/mod#modules-overview
if pathMajor:
tmp = os.path.join(src, pathMajor)
# source directory including major version path may or may not exist
if os.path.exists(tmp):
src = tmp
dst = os.path.join(vendor_dir, module_path)
bb.debug(1, "cp %s --> %s" % (src, dst))
shutil.copytree(src, dst, symlinks=True, dirs_exist_ok=True, \
ignore=shutil.ignore_patterns(".git", \
"vendor", \
"*._test.go"))
# If the root directory has a LICENSE file but not the subdir
# we copy the root license to the sub module since the license
# applies to all modules in the repository
# see https://go.dev/ref/mod#vcs-license
if subdir:
rootdirLicese = os.path.join(rootdir, "LICENSE")
subdirLicense = os.path.join(src, "LICENSE")
if not os.path.exists(subdir) and \
os.path.exists(rootdirLicese):
shutil.copy2(rootdirLicese, subdirLicense)
# Copy vendor manifest
modules_txt_src = os.path.join(d.getVar('WORKDIR'), "modules.txt")
bb.debug(1, "cp %s --> %s" % (modules_txt_src, vendor_dir))
shutil.copy2(modules_txt_src, vendor_dir)
# Clean up vendor dir
# We only require the modules in the modules_txt file
fetched_paths = set([os.path.relpath(x[0], vendor_dir) for x in os.walk(vendor_dir)])
# Remove toplevel dir
fetched_paths.remove('.')
vendored_paths = set()
replaced_paths = dict()
with open(modules_txt_src) as f:
for line in f:
if not line.startswith("#"):
line = line.strip()
vendored_paths.add(line)
# Add toplevel dirs into vendored dir, as we want to keep them
topdir = os.path.dirname(line)
while len(topdir):
if not topdir in vendored_paths:
vendored_paths.add(topdir)
topdir = os.path.dirname(topdir)
else:
replaced_module = line.split("=>")
if len(replaced_module) > 1:
# This module has been replaced, use a local path
# we parse the line that has a pattern "# module-name [module-version] => local-path
actual_path = replaced_module[1].strip()
vendored_name = replaced_module[0].split()[1]
bb.debug(1, "added vendored name %s for actual path %s" % (vendored_name, actual_path))
replaced_paths[vendored_name] = actual_path
for path in fetched_paths:
if path not in vendored_paths:
realpath = os.path.join(vendor_dir, path)
if os.path.exists(realpath):
shutil.rmtree(realpath)
for vendored_name, replaced_path in replaced_paths.items():
symlink_target = os.path.join(source_dir, *['src', go_import, replaced_path])
symlink_name = os.path.join(vendor_dir, vendored_name)
bb.debug(1, "vendored name %s, symlink name %s" % (vendored_name, symlink_name))
os.symlink(symlink_target, symlink_name)
# Create a symlink to the actual directory
os.symlink(vendor_dir, linkname)
}
addtask go_vendor before do_patch after do_unpack

View File

@@ -0,0 +1,461 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# Icecream distributed compiling support
#
# Stages directories with symlinks from gcc/g++ to icecc, for both
# native and cross compilers. Depending on each configure or compile,
# the directories are added at the head of the PATH list and ICECC_CXX
# and ICECC_CC are set.
#
# For the cross compiler, creates a tar.gz of our toolchain and sets
# ICECC_VERSION accordingly.
#
# The class now handles all 3 different compile 'stages' (i.e native ,cross-kernel and target) creating the
# necessary environment tar.gz file to be used by the remote machines.
# It also supports meta-toolchain generation.
#
# If ICECC_PATH is not set in local.conf then the class will try to locate it using 'bb.utils.which'
# but nothing is sure. ;)
#
# If ICECC_ENV_EXEC is set in local.conf, then it should point to the icecc-create-env script provided by the user
# or the default one provided by icecc-create-env_0.1.bb will be used.
# (NOTE that this is a modified version of the needed script and *not the one that comes with icecream*).
#
# User can specify if specific recipes or recipes inheriting specific classes should not use icecc to distribute
# compile jobs to remote machines, but handle them locally by defining ICECC_CLASS_DISABLE and ICECC_RECIPE_DISABLE
# with the appropriate values in local.conf. In addition the user can force to enable icecc for recipes
# which set an empty PARALLEL_MAKE variable by defining ICECC_RECIPE_ENABLE.
#
#########################################################################################
# Error checking is kept to minimum so double check any parameters you pass to the class
#########################################################################################
BB_BASEHASH_IGNORE_VARS += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_RECIPE_DISABLE \
ICECC_CLASS_DISABLE ICECC_RECIPE_ENABLE ICECC_PATH ICECC_ENV_EXEC \
ICECC_CARET_WORKAROUND ICECC_CFLAGS ICECC_ENV_VERSION \
ICECC_DEBUG ICECC_LOGFILE ICECC_REPEAT_RATE ICECC_PREFERRED_HOST \
ICECC_CLANG_REMOTE_CPP ICECC_IGNORE_UNVERIFIED ICECC_TEST_SOCKET \
ICECC_ENV_DEBUG ICECC_REMOTE_CPP \
"
ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
HOSTTOOLS_NONFATAL += "icecc patchelf"
# This version can be incremented when changes are made to the environment that
# invalidate the version on the compile nodes. Changing it will cause a new
# environment to be created.
#
# A useful thing to do for testing icecream changes locally is to add a
# subversion in local.conf:
# ICECC_ENV_VERSION:append = "-my-ver-1"
ICECC_ENV_VERSION = "2"
# Default to disabling the caret workaround, If set to "1" in local.conf, icecc
# will locally recompile any files that have warnings, which can adversely
# affect performance.
#
# See: https://github.com/icecc/icecream/issues/190
export ICECC_CARET_WORKAROUND ??= "0"
export ICECC_REMOTE_CPP ??= "0"
ICECC_CFLAGS = ""
CFLAGS += "${ICECC_CFLAGS}"
CXXFLAGS += "${ICECC_CFLAGS}"
# Debug flags when generating environments
ICECC_ENV_DEBUG ??= ""
# Disable recipe list contains a list of recipes that can not distribute
# compile tasks for one reason or the other. When adding a new entry, please
# document why (how it failed) so that we can re-evaluate it later e.g. when
# there is a new version.
#
# libgcc-initial - fails with CPP sanity check error if host sysroot contains
# cross gcc built for another target tune/variant.
# pixman - prng_state: TLS reference mismatches non-TLS reference, possibly due to
# pragma omp threadprivate(prng_state).
# systemtap - _HelperSDT.c undefs macros and uses the identifiers in macros emitting
# inline assembly.
# target-sdk-provides-dummy - ${HOST_PREFIX} is empty which triggers the "NULL
# prefix" error.
ICECC_RECIPE_DISABLE += "\
libgcc-initial \
pixman \
systemtap \
target-sdk-provides-dummy \
"
# Classes that should not use icecc. When adding a new entry, please
# document why (how it failed) so that we can re-evaluate it later.
#
# image - images aren't compiling, but the testing framework for images captures
# PARALLEL_MAKE as part of the test environment. Many tests won't use
# icecream, but leaving the high level of parallelism can cause them to
# consume an unnecessary amount of resources.
ICECC_CLASS_DISABLE += "\
image \
"
def get_icecc_dep(d):
# INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
# we need that built is the responsibility of the patch function / class, not
# the application.
if not d.getVar('INHIBIT_DEFAULT_DEPS'):
return "icecc-create-env-native"
return ""
DEPENDS:prepend = "${@get_icecc_dep(d)} "
get_cross_kernel_cc[vardepsexclude] += "KERNEL_CC"
def get_cross_kernel_cc(bb,d):
if not icecc_is_kernel(bb, d):
return None
# evaluate the expression by the shell if necessary
kernel_cc = d.getVar('KERNEL_CC')
if '`' in kernel_cc or '$(' in kernel_cc:
import subprocess
kernel_cc = subprocess.check_output("echo %s" % kernel_cc, shell=True).decode("utf-8")[:-1]
kernel_cc = kernel_cc.replace('ccache', '').strip()
kernel_cc = kernel_cc.split(' ')[0]
kernel_cc = kernel_cc.strip()
return kernel_cc
def get_icecc(d):
return d.getVar('ICECC_PATH') or bb.utils.which(os.getenv("PATH"), "icecc")
def use_icecc(bb,d):
if d.getVar('ICECC_DISABLED') == "1":
# don't even try it, when explicitly disabled
return "no"
# allarch recipes don't use compiler
if icecc_is_allarch(bb, d):
return "no"
if icecc_is_cross_canadian(bb, d):
return "no"
pn = d.getVar('PN')
bpn = d.getVar('BPN')
# Enable/disable checks are made against BPN, because there is a good
# chance that if icecc should be skipped for a recipe, it should be skipped
# for all the variants of that recipe. PN is still checked in case a user
# specified a more specific recipe.
check_pn = set([pn, bpn])
class_disable = (d.getVar('ICECC_CLASS_DISABLE') or "").split()
for bbclass in class_disable:
if bb.data.inherits_class(bbclass, d):
bb.debug(1, "%s: bbclass %s found in disable, disable icecc" % (pn, bbclass))
return "no"
disabled_recipes = (d.getVar('ICECC_RECIPE_DISABLE') or "").split()
enabled_recipes = (d.getVar('ICECC_RECIPE_ENABLE') or "").split()
if check_pn & set(disabled_recipes):
bb.debug(1, "%s: found in disable list, disable icecc" % pn)
return "no"
if check_pn & set(enabled_recipes):
bb.debug(1, "%s: found in enabled recipes list, enable icecc" % pn)
return "yes"
if d.getVar('PARALLEL_MAKE') == "":
bb.debug(1, "%s: has empty PARALLEL_MAKE, disable icecc" % pn)
return "no"
return "yes"
def icecc_is_allarch(bb, d):
return d.getVar("PACKAGE_ARCH") == "all"
def icecc_is_kernel(bb, d):
return \
bb.data.inherits_class("kernel", d);
def icecc_is_native(bb, d):
return \
bb.data.inherits_class("cross", d) or \
bb.data.inherits_class("native", d);
def icecc_is_cross_canadian(bb, d):
return bb.data.inherits_class("cross-canadian", d)
def icecc_dir(bb, d):
return d.expand('${TMPDIR}/work-shared/ice')
# Don't pollute allarch signatures with TARGET_FPU
icecc_version[vardepsexclude] += "TARGET_FPU"
def icecc_version(bb, d):
if use_icecc(bb, d) == "no":
return ""
parallel = d.getVar('ICECC_PARALLEL_MAKE') or ""
if not d.getVar('PARALLEL_MAKE') == "" and parallel:
d.setVar("PARALLEL_MAKE", parallel)
# Disable showing the caret in the GCC compiler output if the workaround is
# disabled
if d.getVar('ICECC_CARET_WORKAROUND') == '0':
d.setVar('ICECC_CFLAGS', '-fno-diagnostics-show-caret')
if icecc_is_native(bb, d):
archive_name = "local-host-env"
elif d.expand('${HOST_PREFIX}') == "":
bb.fatal(d.expand("${PN}"), " NULL prefix")
else:
prefix = d.expand('${HOST_PREFIX}' )
distro = d.expand('${DISTRO}')
target_sys = d.expand('${TARGET_SYS}')
float = d.getVar('TARGET_FPU') or "hard"
archive_name = prefix + distro + "-" + target_sys + "-" + float
if icecc_is_kernel(bb, d):
archive_name += "-kernel"
import socket
ice_dir = icecc_dir(bb, d)
tar_file = os.path.join(ice_dir, "{archive}-{version}-@VERSION@-{hostname}.tar.gz".format(
archive=archive_name,
version=d.getVar('ICECC_ENV_VERSION'),
hostname=socket.gethostname()
))
return tar_file
def icecc_path(bb,d):
if use_icecc(bb, d) == "no":
# don't create unnecessary directories when icecc is disabled
return
staging = os.path.join(d.expand('${STAGING_BINDIR}'), "ice")
if icecc_is_kernel(bb, d):
staging += "-kernel"
return staging
def icecc_get_external_tool(bb, d, tool):
external_toolchain_bindir = d.expand('${EXTERNAL_TOOLCHAIN}${bindir_cross}')
target_prefix = d.expand('${TARGET_PREFIX}')
return os.path.join(external_toolchain_bindir, '%s%s' % (target_prefix, tool))
def icecc_get_tool_link(tool, d):
import subprocess
try:
return subprocess.check_output("readlink -f %s" % tool, shell=True).decode("utf-8")[:-1]
except subprocess.CalledProcessError as e:
bb.note("icecc: one of the tools probably disappeared during recipe parsing, cmd readlink -f %s returned %d:\n%s" % (tool, e.returncode, e.output.decode("utf-8")))
return tool
def icecc_get_path_tool(tool, d):
# This is a little ugly, but we want to make sure we add an actual
# compiler to the toolchain, not ccache. Some distros (e.g. Fedora)
# have ccache enabled by default using symlinks in PATH, meaning ccache
# would be found first when looking for the compiler.
paths = os.getenv("PATH").split(':')
while True:
p, hist = bb.utils.which(':'.join(paths), tool, history=True)
if not p or os.path.basename(icecc_get_tool_link(p, d)) != 'ccache':
return p
paths = paths[len(hist):]
return ""
# Don't pollute native signatures with target TUNE_PKGARCH through STAGING_BINDIR_TOOLCHAIN
icecc_get_tool[vardepsexclude] += "STAGING_BINDIR_TOOLCHAIN"
def icecc_get_tool(bb, d, tool):
if icecc_is_native(bb, d):
return icecc_get_path_tool(tool, d)
elif icecc_is_kernel(bb, d):
return icecc_get_path_tool(get_cross_kernel_cc(bb, d), d)
else:
ice_dir = d.expand('${STAGING_BINDIR_TOOLCHAIN}')
target_sys = d.expand('${TARGET_SYS}')
for p in ice_dir.split(':'):
tool_bin = os.path.join(p, "%s-%s" % (target_sys, tool))
if os.path.isfile(tool_bin):
return tool_bin
external_tool_bin = icecc_get_external_tool(bb, d, tool)
if os.path.isfile(external_tool_bin):
return external_tool_bin
return ""
def icecc_get_and_check_tool(bb, d, tool):
# Check that g++ or gcc is not a symbolic link to icecc binary in
# PATH or icecc-create-env script will silently create an invalid
# compiler environment package.
t = icecc_get_tool(bb, d, tool)
if t:
link_path = icecc_get_tool_link(t, d)
if link_path == get_icecc(d):
bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, link_path))
return ""
else:
return t
else:
return t
wait_for_file() {
local TIME_ELAPSED=0
local FILE_TO_TEST=$1
local TIMEOUT=$2
until [ -f "$FILE_TO_TEST" ]
do
TIME_ELAPSED=$(expr $TIME_ELAPSED + 1)
if [ $TIME_ELAPSED -gt $TIMEOUT ]
then
return 1
fi
sleep 1
done
}
def set_icecc_env():
# dummy python version of set_icecc_env
return
set_icecc_env[vardepsexclude] += "KERNEL_CC"
set_icecc_env() {
if [ "${@use_icecc(bb, d)}" = "no" ]
then
return
fi
ICECC_VERSION="${@icecc_version(bb, d)}"
if [ "x${ICECC_VERSION}" = "x" ]
then
bbwarn "Cannot use icecc: could not get ICECC_VERSION"
return
fi
ICE_PATH="${@icecc_path(bb, d)}"
if [ "x${ICE_PATH}" = "x" ]
then
bbwarn "Cannot use icecc: could not get ICE_PATH"
return
fi
ICECC_BIN="${@get_icecc(d)}"
if [ -z "${ICECC_BIN}" ]; then
bbwarn "Cannot use icecc: icecc binary not found"
return
fi
if [ -z "$(which patchelf patchelf-uninative)" ]; then
bbwarn "Cannot use icecc: patchelf not found"
return
fi
ICECC_CC="${@icecc_get_and_check_tool(bb, d, "gcc")}"
ICECC_CXX="${@icecc_get_and_check_tool(bb, d, "g++")}"
# cannot use icecc_get_and_check_tool here because it assumes as without target_sys prefix
ICECC_WHICH_AS="${@bb.utils.which(os.getenv('PATH'), 'as')}"
if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
then
bbnote "Cannot use icecc: could not get ICECC_CC or ICECC_CXX"
return
fi
ICE_VERSION="$($ICECC_CC -dumpversion)"
ICECC_VERSION=$(echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g")
if [ ! -x "${ICECC_ENV_EXEC}" ]
then
bbwarn "Cannot use icecc: invalid ICECC_ENV_EXEC"
return
fi
# Create symlinks to icecc and wrapper-scripts in the recipe-sysroot directory
mkdir -p $ICE_PATH/symlinks
if [ -n "${KERNEL_CC}" ]; then
compilers="${@get_cross_kernel_cc(bb,d)}"
else
compilers="${HOST_PREFIX}gcc ${HOST_PREFIX}g++"
fi
for compiler in $compilers; do
ln -sf $ICECC_BIN $ICE_PATH/symlinks/$compiler
cat <<-__EOF__ > $ICE_PATH/$compiler
#!/bin/sh -e
export ICECC_VERSION=$ICECC_VERSION
export ICECC_CC=$ICECC_CC
export ICECC_CXX=$ICECC_CXX
$ICE_PATH/symlinks/$compiler "\$@"
__EOF__
chmod 775 $ICE_PATH/$compiler
done
ICECC_AS="$(${ICECC_CC} -print-prog-name=as)"
# for target recipes should return something like:
# /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
# and just "as" for native, if it returns "as" in current directory (for whatever reason) use "as" from PATH
if [ "$(dirname "${ICECC_AS}")" = "." ]
then
ICECC_AS="${ICECC_WHICH_AS}"
fi
if [ ! -f "${ICECC_VERSION}.done" ]
then
mkdir -p "$(dirname "${ICECC_VERSION}")"
# the ICECC_VERSION generation step must be locked by a mutex
# in order to prevent race conditions
if flock -n "${ICECC_VERSION}.lock" \
${ICECC_ENV_EXEC} ${ICECC_ENV_DEBUG} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
then
touch "${ICECC_VERSION}.done"
elif ! wait_for_file "${ICECC_VERSION}.done" 30
then
# locking failed so wait for ${ICECC_VERSION}.done to appear
bbwarn "Timeout waiting for ${ICECC_VERSION}.done"
return
fi
fi
# Don't let ccache find the icecream compiler links that have been created, otherwise
# it can end up invoking icecream recursively.
export CCACHE_PATH="$PATH"
export CCACHE_DISABLE="1"
export PATH="$ICE_PATH:$PATH"
bbnote "Using icecc path: $ICE_PATH"
bbnote "Using icecc tarball: $ICECC_VERSION"
}
do_configure:prepend() {
set_icecc_env
}
do_compile:prepend() {
set_icecc_env
}
do_compile_kernelmodules:prepend() {
set_icecc_env
}
do_install:prepend() {
set_icecc_env
}
# Icecream is not (currently) supported in the extensible SDK
ICECC_SDK_HOST_TASK = "nativesdk-icecc-toolchain"
ICECC_SDK_HOST_TASK:task-populate-sdk-ext = ""
# Don't include icecream in uninative tarball
ICECC_SDK_HOST_TASK:pn-uninative-tarball = ""
# Add the toolchain scripts to the SDK
TOOLCHAIN_HOST_TASK:append = " ${ICECC_SDK_HOST_TASK}"
python () {
if d.getVar('ICECC_DISABLED') != "1":
for task in ['do_configure', 'do_compile', 'do_compile_kernelmodules', 'do_install']:
d.setVarFlag(task, 'network', '1')
}

View File

@@ -0,0 +1,81 @@
#
# Writes build information to target filesystem on /etc/buildinfo
#
# Copyright (C) 2014 Intel Corporation
# Author: Alejandro Enedino Hernandez Samaniego <alejandro.hernandez@intel.com>
#
# SPDX-License-Identifier: MIT
#
# Usage: add INHERIT += "image-buildinfo" to your conf file
#
# Desired variables to display
IMAGE_BUILDINFO_VARS ?= "DISTRO DISTRO_VERSION"
# Desired location of the output file in the image.
IMAGE_BUILDINFO_FILE ??= "${sysconfdir}/buildinfo"
SDK_BUILDINFO_FILE ??= "/buildinfo"
# From buildhistory.bbclass
def image_buildinfo_outputvars(vars, d):
vars = vars.split()
ret = ""
for var in vars:
value = d.getVar(var) or ""
if (d.getVarFlag(var, 'type') == "list"):
value = oe.utils.squashspaces(value)
ret += "%s = %s\n" % (var, value)
return ret.rstrip('\n')
# Returns layer revisions along with their respective status
def get_layer_revs(d):
revisions = oe.buildcfg.get_layer_revisions(d)
medadata_revs = ["%-17s = %s:%s%s" % (r[1], r[2], r[3], r[4]) for r in revisions]
return '\n'.join(medadata_revs)
def buildinfo_target(d):
# Get context
if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
# Single and list variables to be read
vars = (d.getVar("IMAGE_BUILDINFO_VARS") or "")
return image_buildinfo_outputvars(vars, d)
python buildinfo() {
if not d.getVar('IMAGE_BUILDINFO_FILE'):
return
destfile = d.expand('${BUILDINFODEST}${IMAGE_BUILDINFO_FILE}')
bb.utils.mkdirhier(os.path.dirname(destfile))
with open(destfile, 'w') as build:
build.writelines((
'''-----------------------
Build Configuration: |
-----------------------
''',
buildinfo_target(d),
'''
-----------------------
Layer Revisions: |
-----------------------
''',
get_layer_revs(d),
'''
'''
))
}
# Write build information to target filesystem
python buildinfo_image () {
d.setVar("BUILDINFODEST", "${IMAGE_ROOTFS}")
bb.build.exec_func("buildinfo", d)
}
python buildinfo_sdk () {
d.setVar("BUILDINFODEST", "${SDK_OUTPUT}/${SDKPATH}")
d.setVar("IMAGE_BUILDINFO_FILE", d.getVar("SDK_BUILDINFO_FILE"))
bb.build.exec_func("buildinfo", d)
}
IMAGE_PREPROCESS_COMMAND += "buildinfo_image"
POPULATE_SDK_PRE_TARGET_COMMAND += "buildinfo_sdk"

View File

@@ -0,0 +1,22 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
python mcextend_virtclass_handler () {
cls = e.data.getVar("BBEXTENDCURR")
variant = e.data.getVar("BBEXTENDVARIANT")
if cls != "mcextend" or not variant:
return
override = ":virtclass-mcextend-" + variant
e.data.setVar("PN", e.data.getVar("PN", False) + "-" + variant)
e.data.setVar("MCNAME", variant)
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
}
addhandler mcextend_virtclass_handler
mcextend_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"

View File

@@ -0,0 +1,10 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
METADATA_BRANCH := "${@oe.buildcfg.detect_branch(d)}"
METADATA_BRANCH[vardepvalue] = "${METADATA_BRANCH}"
METADATA_REVISION := "${@oe.buildcfg.detect_revision(d)}"
METADATA_REVISION[vardepvalue] = "${METADATA_REVISION}"

View File

@@ -0,0 +1,52 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
LOCALCOUNT_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv-localcount-exports.inc"
python migrate_localcount_handler () {
import bb.event
if not e.data:
return
pv = e.data.getVar('PV')
if not 'AUTOINC' in pv:
return
localcounts = bb.persist_data.persist('BB_URI_LOCALCOUNT', e.data)
pn = e.data.getVar('PN')
revs = localcounts.get_by_pattern('%%-%s_rev' % pn)
counts = localcounts.get_by_pattern('%%-%s_count' % pn)
if not revs or not counts:
return
if len(revs) != len(counts):
bb.warn("The number of revs and localcounts don't match in %s" % pn)
return
version = e.data.getVar('PRAUTOINX')
srcrev = bb.fetch2.get_srcrev(e.data)
base_ver = 'AUTOINC-%s' % version[:version.find(srcrev)]
pkgarch = e.data.getVar('PACKAGE_ARCH')
value = max(int(count) for count in counts)
if len(revs) == 1:
if srcrev != ('AUTOINC+%s' % revs[0]):
value += 1
else:
value += 1
bb.utils.mkdirhier(e.data.getVar('PRSERV_DUMPDIR'))
df = e.data.getVar('LOCALCOUNT_DUMPFILE')
flock = bb.utils.lockfile("%s.lock" % df)
with open(df, 'a') as fd:
fd.write('PRAUTO$%s$%s$%s = "%s"\n' %
(base_ver, pkgarch, srcrev, str(value)))
bb.utils.unlockfile(flock)
}
addhandler migrate_localcount_handler
migrate_localcount_handler[eventmask] = "bb.event.RecipeParsed"

View File

@@ -0,0 +1,249 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
python multilib_virtclass_handler () {
cls = d.getVar("BBEXTENDCURR")
variant = d.getVar("BBEXTENDVARIANT")
if cls != "multilib" or not variant:
return
localdata = bb.data.createCopy(d)
localdata.delVar('TMPDIR')
d.setVar('STAGING_KERNEL_DIR', localdata.getVar('STAGING_KERNEL_DIR'))
# There should only be one kernel in multilib configs
# We also skip multilib setup for module packages.
provides = (d.getVar("PROVIDES") or "").split()
non_ml_recipes = d.getVar('NON_MULTILIB_RECIPES').split()
bpn = d.getVar("BPN")
if ("virtual/kernel" in provides
or bb.data.inherits_class('module-base', d)
or bpn in non_ml_recipes):
raise bb.parse.SkipRecipe("We shouldn't have multilib variants for %s" % bpn)
save_var_name = d.getVar("MULTILIB_SAVE_VARNAME") or ""
for name in save_var_name.split():
val = d.getVar(name)
if val:
d.setVar(name + "_MULTILIB_ORIGINAL", val)
# We nearly don't need this but dependencies on NON_MULTILIB_RECIPES don't work without it
d.setVar("SSTATE_ARCHS_TUNEPKG", "${@all_multilib_tune_values(d, 'TUNE_PKGARCH')}")
overrides = e.data.getVar("OVERRIDES", False)
pn = e.data.getVar("PN", False)
overrides = overrides.replace("pn-${PN}", "pn-${PN}:pn-" + pn)
d.setVar("OVERRIDES", overrides)
if bb.data.inherits_class('image', d):
d.setVar("MLPREFIX", variant + "-")
d.setVar("PN", variant + "-" + d.getVar("PN", False))
d.setVar('SDKTARGETSYSROOT', d.getVar('SDKTARGETSYSROOT'))
override = ":virtclass-multilib-" + variant
d.setVar("OVERRIDES", d.getVar("OVERRIDES", False) + override)
target_vendor = d.getVar("TARGET_VENDOR:" + "virtclass-multilib-" + variant, False)
if target_vendor:
d.setVar("TARGET_VENDOR", target_vendor)
return
if bb.data.inherits_class('cross-canadian', d):
# Multilib cross-candian should use the same nativesdk sysroot without MLPREFIX
d.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot")
d.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot")
d.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot")
d.setVar("RECIPE_SYSROOT_MANIFEST_SUBDIR", "nativesdk-" + variant)
d.setVar("MLPREFIX", variant + "-")
override = ":virtclass-multilib-" + variant
d.setVar("OVERRIDES", d.getVar("OVERRIDES", False) + override)
return
if bb.data.inherits_class('native', d):
raise bb.parse.SkipRecipe("We can't extend native recipes")
if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d):
raise bb.parse.SkipRecipe("We can't extend nativesdk recipes")
if (bb.data.inherits_class('allarch', d)
and not d.getVar('MULTILIB_VARIANTS')
and not bb.data.inherits_class('packagegroup', d)):
raise bb.parse.SkipRecipe("Don't extend allarch recipes which are not packagegroups")
# Expand this since this won't work correctly once we set a multilib into place
d.setVar("ALL_MULTILIB_PACKAGE_ARCHS", d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
override = ":virtclass-multilib-" + variant
skip_msg = d.getVarFlag('SKIP_RECIPE', d.getVar('PN'))
if skip_msg:
pn_new = variant + "-" + d.getVar('PN')
if not d.getVarFlag('SKIP_RECIPE', pn_new):
d.setVarFlag('SKIP_RECIPE', pn_new, skip_msg)
d.setVar("MLPREFIX", variant + "-")
d.setVar("PN", variant + "-" + d.getVar("PN", False))
d.setVar("OVERRIDES", d.getVar("OVERRIDES", False) + override)
# Expand INCOMPATIBLE_LICENSE_EXCEPTIONS with multilib prefix
pkgs = d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS")
if pkgs:
for pkg in pkgs.split():
pkgs += " " + variant + "-" + pkg
d.setVar("INCOMPATIBLE_LICENSE_EXCEPTIONS", pkgs)
# DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
newtune = d.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + variant, False)
if newtune:
d.setVar("DEFAULTTUNE", newtune)
}
addhandler multilib_virtclass_handler
multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
python __anonymous () {
if bb.data.inherits_class('image', d):
# set rpm preferred file color for 32-bit multilib image
if d.getVar("SITEINFO_BITS") == "32":
d.setVar("RPM_PREFER_ELF_ARCH", "1")
variant = d.getVar("BBEXTENDVARIANT")
import oe.classextend
clsextend = oe.classextend.ClassExtender(variant, d)
clsextend.map_depends_variable("PACKAGE_INSTALL")
clsextend.map_depends_variable("LINGUAS_INSTALL")
clsextend.map_depends_variable("RDEPENDS")
pinstall = d.getVar("LINGUAS_INSTALL") + " " + d.getVar("PACKAGE_INSTALL")
d.setVar("PACKAGE_INSTALL", pinstall)
d.setVar("LINGUAS_INSTALL", "")
# FIXME, we need to map this to something, not delete it!
d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
bb.build.deltask('do_populate_sdk_ext', d)
return
}
python multilib_virtclass_handler_postkeyexp () {
cls = d.getVar("BBEXTENDCURR")
variant = d.getVar("BBEXTENDVARIANT")
if cls != "multilib" or not variant:
return
variant = d.getVar("BBEXTENDVARIANT")
import oe.classextend
clsextend = oe.classextend.ClassExtender(variant, d)
if bb.data.inherits_class('image', d):
return
clsextend.map_depends_variable("DEPENDS")
clsextend.map_depends_variable("PACKAGE_WRITE_DEPS")
clsextend.map_variable("PROVIDES")
if bb.data.inherits_class('cross-canadian', d):
return
clsextend.rename_packages()
clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split())
clsextend.map_packagevars()
clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
clsextend.map_variable("INITSCRIPT_PACKAGES")
clsextend.map_variable("USERADD_PACKAGES")
clsextend.map_variable("SYSTEMD_PACKAGES")
clsextend.map_variable("UPDATERCPN")
reset_alternative_priority(d)
}
addhandler multilib_virtclass_handler_postkeyexp
multilib_virtclass_handler_postkeyexp[eventmask] = "bb.event.RecipePostKeyExpansion"
def reset_alternative_priority(d):
if not bb.data.inherits_class('update-alternatives', d):
return
# There might be multiple multilibs at the same time, e.g., lib32 and
# lib64, each of them should have a different priority.
multilib_variants = d.getVar('MULTILIB_VARIANTS')
bbextendvariant = d.getVar('BBEXTENDVARIANT')
reset_gap = multilib_variants.split().index(bbextendvariant) + 1
# ALTERNATIVE_PRIORITY = priority
alt_priority_recipe = d.getVar('ALTERNATIVE_PRIORITY')
# Reset ALTERNATIVE_PRIORITY when found
if alt_priority_recipe:
reset_priority = int(alt_priority_recipe) - reset_gap
bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY to %s' % (d.getVar('PN'), reset_priority))
d.setVar('ALTERNATIVE_PRIORITY', reset_priority)
handled_pkgs = []
for pkg in (d.getVar('PACKAGES') or "").split():
# ALTERNATIVE_PRIORITY_pkg = priority
alt_priority_pkg = d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg)
# Reset ALTERNATIVE_PRIORITY_pkg when found
if alt_priority_pkg:
reset_priority = int(alt_priority_pkg) - reset_gap
if not pkg in handled_pkgs:
handled_pkgs.append(pkg)
bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s to %s' % (pkg, pkg, reset_priority))
d.setVar('ALTERNATIVE_PRIORITY_%s' % pkg, reset_priority)
for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
# ALTERNATIVE_PRIORITY_pkg[tool] = priority
alt_priority_pkg_name = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name)
# ALTERNATIVE_PRIORITY[tool] = priority
alt_priority_name = d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name)
if alt_priority_pkg_name:
reset_priority = int(alt_priority_pkg_name) - reset_gap
bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s[%s] to %s' % (pkg, pkg, alt_name, reset_priority))
d.setVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name, reset_priority)
elif alt_priority_name:
reset_priority = int(alt_priority_name) - reset_gap
bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY[%s] to %s' % (pkg, alt_name, reset_priority))
d.setVarFlag('ALTERNATIVE_PRIORITY', alt_name, reset_priority)
PACKAGEFUNCS:append = " do_package_qa_multilib"
python do_package_qa_multilib() {
def check_mlprefix(pkg, var, mlprefix):
values = bb.utils.explode_deps(d.getVar('%s:%s' % (var, pkg)) or d.getVar(var) or "")
candidates = []
for i in values:
if i.startswith('virtual/'):
i = i[len('virtual/'):]
if (not (i.startswith(mlprefix) or i.startswith("kernel-") \
or ('cross-canadian' in i) or i.startswith("nativesdk-") \
or i.startswith("rtld") or i.startswith("/"))):
candidates.append(i)
if len(candidates) > 0:
msg = "%s package %s - suspicious values '%s' in %s" \
% (d.getVar('PN'), pkg, ' '.join(candidates), var)
oe.qa.handle_error("multilib", msg, d)
ml = d.getVar('MLPREFIX')
if not ml:
return
# exception for ${MLPREFIX}target-sdk-provides-dummy
if 'target-sdk-provides-dummy' in d.getVar('PN'):
return
packages = d.getVar('PACKAGES')
for pkg in packages.split():
check_mlprefix(pkg, 'RDEPENDS', ml)
check_mlprefix(pkg, 'RPROVIDES', ml)
check_mlprefix(pkg, 'RRECOMMENDS', ml)
check_mlprefix(pkg, 'RSUGGESTS', ml)
check_mlprefix(pkg, 'RREPLACES', ml)
check_mlprefix(pkg, 'RCONFLICTS', ml)
oe.qa.exit_if_errors(d)
}

View File

@@ -0,0 +1,229 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
def preferred_ml_updates(d):
# If any of PREFERRED_PROVIDER, PREFERRED_RPROVIDER, REQUIRED_VERSION
# or PREFERRED_VERSION are set, we need to mirror these variables in
# the multilib case;
multilibs = d.getVar('MULTILIBS') or ""
if not multilibs:
return
prefixes = []
for ext in multilibs.split():
eext = ext.split(':')
if len(eext) > 1 and eext[0] == 'multilib':
prefixes.append(eext[1])
required_versions = []
preferred_versions = []
providers = []
rproviders = []
for v in d.keys():
if v.startswith("REQUIRED_VERSION_"):
required_versions.append(v)
if v.startswith("PREFERRED_VERSION_"):
preferred_versions.append(v)
if v.startswith("PREFERRED_PROVIDER_"):
providers.append(v)
if v.startswith("PREFERRED_RPROVIDER_"):
rproviders.append(v)
def sort_versions(versions, keyword):
version_str = "_".join([keyword, "VERSION", ""])
for v in versions:
val = d.getVar(v, False)
pkg = v.replace(version_str, "")
if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
continue
if '-cross-' in pkg and '${' in pkg:
for p in prefixes:
localdata = bb.data.createCopy(d)
override = ":virtclass-multilib-" + p
localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
if "-canadian-" in pkg:
newtune = localdata.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + p, False)
if newtune:
localdata.setVar("DEFAULTTUNE", newtune)
newname = localdata.expand(v)
else:
newname = localdata.expand(v).replace(version_str, version_str + p + '-')
if newname != v:
newval = localdata.expand(val)
d.setVar(newname, newval)
# Avoid future variable key expansion
vexp = d.expand(v)
if v != vexp and d.getVar(v, False):
d.renameVar(v, vexp)
continue
for p in prefixes:
newname = version_str + p + "-" + pkg
if not d.getVar(newname, False):
d.setVar(newname, val)
sort_versions(required_versions, "REQUIRED")
sort_versions(preferred_versions, "PREFERRED")
for prov in providers:
val = d.getVar(prov, False)
pkg = prov.replace("PREFERRED_PROVIDER_", "")
if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
continue
if 'cross-canadian' in pkg:
for p in prefixes:
localdata = bb.data.createCopy(d)
override = ":virtclass-multilib-" + p
localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
newname = localdata.expand(prov)
if newname != prov:
newval = localdata.expand(val)
d.setVar(newname, newval)
# Avoid future variable key expansion
provexp = d.expand(prov)
if prov != provexp and d.getVar(prov, False):
d.renameVar(prov, provexp)
continue
virt = ""
if pkg.startswith("virtual/"):
pkg = pkg.replace("virtual/", "")
virt = "virtual/"
for p in prefixes:
newval = None
if pkg != "kernel":
newval = p + "-" + val
# implement variable keys
localdata = bb.data.createCopy(d)
override = ":virtclass-multilib-" + p
localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
newname = localdata.expand(prov)
if newname != prov and not d.getVar(newname, False):
d.setVar(newname, localdata.expand(newval))
# implement alternative multilib name
newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg)
if not d.getVar(newname, False) and newval != None:
d.setVar(newname, localdata.expand(newval))
# Avoid future variable key expansion
provexp = d.expand(prov)
if prov != provexp and d.getVar(prov, False):
d.renameVar(prov, provexp)
for prov in rproviders:
val = d.getVar(prov, False)
pkg = prov.replace("PREFERRED_RPROVIDER_", "")
for p in prefixes:
newval = p + "-" + val
# implement variable keys
localdata = bb.data.createCopy(d)
override = ":virtclass-multilib-" + p
localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
newname = localdata.expand(prov)
if newname != prov and not d.getVar(newname, False):
d.setVar(newname, localdata.expand(newval))
# implement alternative multilib name
newname = localdata.expand("PREFERRED_RPROVIDER_" + p + "-" + pkg)
if not d.getVar(newname, False) and newval != None:
d.setVar(newname, localdata.expand(newval))
# Avoid future variable key expansion
provexp = d.expand(prov)
if prov != provexp and d.getVar(prov, False):
d.renameVar(prov, provexp)
def translate_provide(prefix, prov):
# Really need to know if kernel modules class is inherited somehow
if prov == "lttng-modules":
return prov
if not prov.startswith("virtual/"):
return prefix + "-" + prov
if prov == "virtual/kernel":
return prov
prov = prov.replace("virtual/", "")
return "virtual/" + prefix + "-" + prov
mp = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
extramp = []
for p in mp:
if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
continue
for pref in prefixes:
extramp.append(translate_provide(pref, p))
d.setVar("BB_MULTI_PROVIDER_ALLOWED", " ".join(mp + extramp))
abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
extras = []
for p in prefixes:
for a in abisafe:
extras.append(p + "-" + a)
d.appendVar("SIGGEN_EXCLUDERECIPES_ABISAFE", " " + " ".join(extras))
siggen_exclude = (d.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
extras = []
for p in prefixes:
for a in siggen_exclude:
a1, a2 = a.split("->")
extras.append(translate_provide(p, a1) + "->" + translate_provide(p, a2))
d.appendVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", " " + " ".join(extras))
python multilib_virtclass_handler_vendor () {
for v in d.getVar("MULTILIB_VARIANTS").split():
if d.getVar("TARGET_VENDOR:virtclass-multilib-" + v, False) is None:
d.setVar("TARGET_VENDOR:virtclass-multilib-" + v, d.getVar("TARGET_VENDOR", False) + "ml" + v)
preferred_ml_updates(d)
}
addhandler multilib_virtclass_handler_vendor
multilib_virtclass_handler_vendor[eventmask] = "bb.event.ConfigParsed"
python multilib_virtclass_handler_global () {
variant = d.getVar("BBEXTENDVARIANT")
if variant:
return
non_ml_recipes = d.getVar('NON_MULTILIB_RECIPES').split()
if bb.data.inherits_class('kernel', d) or \
bb.data.inherits_class('module-base', d) or \
d.getVar('BPN') in non_ml_recipes:
# We need to avoid expanding KERNEL_VERSION which we can do by deleting it
# from a copy of the datastore
localdata = bb.data.createCopy(d)
localdata.delVar("KERNEL_VERSION")
localdata.delVar("KERNEL_VERSION_PKG_NAME")
variants = (d.getVar("MULTILIB_VARIANTS") or "").split()
import oe.classextend
clsextends = []
for variant in variants:
clsextends.append(oe.classextend.ClassExtender(variant, localdata))
# Process PROVIDES
origprovs = provs = localdata.getVar("PROVIDES") or ""
for clsextend in clsextends:
provs = provs + " " + clsextend.map_variable("PROVIDES", setvar=False)
d.setVar("PROVIDES", provs)
# Process RPROVIDES
origrprovs = rprovs = localdata.getVar("RPROVIDES") or ""
for clsextend in clsextends:
rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES", setvar=False)
if rprovs.strip():
d.setVar("RPROVIDES", rprovs)
# Process RPROVIDES:${PN}...
for pkg in (d.getVar("PACKAGES") or "").split():
origrprovs = rprovs = localdata.getVar("RPROVIDES:%s" % pkg) or ""
for clsextend in clsextends:
rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES:%s" % pkg, setvar=False)
rprovs = rprovs + " " + clsextend.extname + "-" + pkg
d.setVar("RPROVIDES:%s" % pkg, rprovs)
}
addhandler multilib_virtclass_handler_global
multilib_virtclass_handler_global[eventmask] = "bb.event.RecipeTaskPreProcess"

View File

@@ -0,0 +1,90 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
addtask lint before do_build
do_lint[nostamp] = "1"
python do_lint() {
pkgname = d.getVar("PN")
##############################
# Test that DESCRIPTION exists
#
description = d.getVar("DESCRIPTION", False)
if description[1:10] == '{SUMMARY}':
bb.warn("%s: DESCRIPTION is not set" % pkgname)
##############################
# Test that HOMEPAGE exists
#
homepage = d.getVar("HOMEPAGE", False)
if homepage == '':
bb.warn("%s: HOMEPAGE is not set" % pkgname)
elif not homepage.startswith("http://") and not homepage.startswith("https://"):
bb.warn("%s: HOMEPAGE doesn't start with http:// or https://" % pkgname)
##############################
# Test for valid SECTION
#
section = d.getVar("SECTION", False)
if section == '':
bb.warn("%s: SECTION is not set" % pkgname)
elif not section.islower():
bb.warn("%s: SECTION should only use lower case" % pkgname)
##############################
# Check that all patches have Signed-off-by and Upstream-Status
#
srcuri = d.getVar("SRC_URI", False).split()
fpaths = (d.getVar('FILESPATH') or '').split(':')
def findPatch(patchname):
for dir in fpaths:
patchpath = dir + patchname
if os.path.exists(patchpath):
return patchpath
def findKey(path, key):
ret = True
f = open('%s' % path, mode = 'r')
line = f.readline()
while line:
if line.find(key) != -1:
ret = False
line = f.readline()
f.close()
return ret
def checkPN(pkgname, varname, str):
if str.find("{PN}") != -1:
bb.warn("%s: should use BPN instead of PN in %s" % (pkgname, varname))
if str.find("{P}") != -1:
bb.warn("%s: should use BP instead of P in %s" % (pkgname, varname))
length = len("file://")
for item in srcuri:
if item.startswith("file://"):
item = item[length:]
if item.endswith(".patch") or item.endswith(".diff"):
path = findPatch(item)
if findKey(path, "Signed-off-by"):
bb.warn("%s: %s doesn't have Signed-off-by" % (pkgname, item))
if findKey(path, "Upstream-Status"):
bb.warn("%s: %s doesn't have Upstream-Status" % (pkgname, item))
##############################
# Check for ${PN} or ${P} usage in SRC_URI or S
# Should use ${BPN} or ${BP} instead to avoid breaking multilib
#
for s in srcuri:
if not s.startswith("file://"):
checkPN(pkgname, 'SRC_URI', s)
checkPN(pkgname, 'S', d.getVar('S', False))
}

View File

@@ -0,0 +1,22 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
PREMIRRORS:prepend = " \
cvs://.*/.* ${SOURCE_MIRROR_URL} \
svn://.*/.* ${SOURCE_MIRROR_URL} \
git://.*/.* ${SOURCE_MIRROR_URL} \
gitsm://.*/.* ${SOURCE_MIRROR_URL} \
hg://.*/.* ${SOURCE_MIRROR_URL} \
bzr://.*/.* ${SOURCE_MIRROR_URL} \
p4://.*/.* ${SOURCE_MIRROR_URL} \
osc://.*/.* ${SOURCE_MIRROR_URL} \
https?://.*/.* ${SOURCE_MIRROR_URL} \
ftp://.*/.* ${SOURCE_MIRROR_URL} \
npm://.*/?.* ${SOURCE_MIRROR_URL} \
s3://.*/.* ${SOURCE_MIRROR_URL} \
crate://.*/.* ${SOURCE_MIRROR_URL} \
gs://.*/.* ${SOURCE_MIRROR_URL} \
"

View File

@@ -0,0 +1,65 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
PRSERV_DUMPOPT_VERSION = "${PRAUTOINX}"
PRSERV_DUMPOPT_PKGARCH = ""
PRSERV_DUMPOPT_CHECKSUM = ""
PRSERV_DUMPOPT_COL = "0"
PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
PRSERV_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv.inc"
python prexport_handler () {
import bb.event
if not e.data or bb.data.inherits_class('native', e.data) or \
bb.data.inherits_class('crosssdk', e.data):
return
if isinstance(e, bb.event.RecipeParsed):
import oe.prservice
#get all PR values for the current PRAUTOINX
ver = e.data.getVar('PRSERV_DUMPOPT_VERSION')
ver = ver.replace('%','-')
retval = oe.prservice.prserv_dump_db(e.data)
if not retval:
bb.fatal("prexport_handler: export failed!")
(metainfo, datainfo) = retval
if not datainfo:
bb.note("prexport_handler: No AUTOPR values found for %s" % ver)
return
oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
if 'AUTOINC' in ver:
import re
srcpv = bb.fetch2.get_srcrev(e.data)
base_ver = "AUTOINC-%s" % ver[:ver.find(srcpv)]
e.data.setVar('PRSERV_DUMPOPT_VERSION', base_ver)
retval = oe.prservice.prserv_dump_db(e.data)
if not retval:
bb.fatal("prexport_handler: export failed!")
(metainfo, datainfo) = retval
oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
elif isinstance(e, bb.event.ParseStarted):
import bb.utils
import oe.prservice
oe.prservice.prserv_check_avail(e.data)
#remove dumpfile
bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE'))
elif isinstance(e, bb.event.ParseCompleted):
import oe.prservice
#dump meta info of tables
d = e.data.createCopy()
d.setVar('PRSERV_DUMPOPT_COL', "1")
retval = oe.prservice.prserv_dump_db(d)
if not retval:
bb.error("prexport_handler: export failed!")
return
(metainfo, datainfo) = retval
oe.prservice.prserv_export_tofile(d, metainfo, None, True)
}
addhandler prexport_handler
prexport_handler[eventmask] = "bb.event.RecipeParsed bb.event.ParseStarted bb.event.ParseCompleted"

View File

@@ -0,0 +1,27 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
python primport_handler () {
import bb.event
if not e.data:
return
if isinstance(e, bb.event.ParseCompleted):
import oe.prservice
#import all exported AUTOPR values
imported = oe.prservice.prserv_import_db(e.data)
if imported is None:
bb.fatal("import failed!")
for (version, pkgarch, checksum, value) in imported:
bb.note("imported (%s,%s,%s,%d)" % (version, pkgarch, checksum, value))
elif isinstance(e, bb.event.ParseStarted):
import oe.prservice
oe.prservice.prserv_check_avail(e.data)
}
addhandler primport_handler
primport_handler[eventmask] = "bb.event.ParseCompleted bb.event.ParseStarted"

View File

@@ -0,0 +1,155 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
def __note(msg, d):
bb.note("%s: recipe_sanity: %s" % (d.getVar("P"), msg))
__recipe_sanity_badruntimevars = "RDEPENDS RPROVIDES RRECOMMENDS RCONFLICTS"
def bad_runtime_vars(cfgdata, d):
if bb.data.inherits_class("native", d) or \
bb.data.inherits_class("cross", d):
return
for var in d.getVar("__recipe_sanity_badruntimevars").split():
val = d.getVar(var, False)
if val and val != cfgdata.get(var):
__note("%s should be %s:${PN}" % (var, var), d)
__recipe_sanity_reqvars = "DESCRIPTION"
__recipe_sanity_reqdiffvars = ""
def req_vars(cfgdata, d):
for var in d.getVar("__recipe_sanity_reqvars").split():
if not d.getVar(var, False):
__note("%s should be set" % var, d)
for var in d.getVar("__recipe_sanity_reqdiffvars").split():
val = d.getVar(var, False)
cfgval = cfgdata.get(var)
if not val:
__note("%s should be set" % var, d)
elif val == cfgval:
__note("%s should be defined to something other than default (%s)" % (var, cfgval), d)
def var_renames_overwrite(cfgdata, d):
renames = d.getVar("__recipe_sanity_renames", False)
if renames:
for (key, newkey, oldvalue, newvalue) in renames:
if oldvalue != newvalue and oldvalue != cfgdata.get(newkey):
__note("rename of variable '%s' to '%s' overwrote existing value '%s' with '%s'." % (key, newkey, oldvalue, newvalue), d)
def incorrect_nonempty_PACKAGES(cfgdata, d):
if bb.data.inherits_class("native", d) or \
bb.data.inherits_class("cross", d):
if d.getVar("PACKAGES"):
return True
def can_use_autotools_base(cfgdata, d):
cfg = d.getVar("do_configure")
if not bb.data.inherits_class("autotools", d):
return False
for i in ["autoreconf"] + ["%s_do_configure" % cls for cls in ["gnomebase", "gnome", "e", "autotools", "efl", "gpephone", "openmoko", "openmoko2", "xfce", "xlibs"]]:
if cfg.find(i) != -1:
return False
for clsfile in d.getVar("__inherit_cache", False):
(base, _) = os.path.splitext(os.path.basename(clsfile))
if cfg.find("%s_do_configure" % base) != -1:
__note("autotools_base usage needs verification, spotted %s_do_configure" % base, d)
return True
def can_delete_FILESPATH(cfgdata, d):
expected = cfgdata.get("FILESPATH")
expectedpaths = d.expand(expected)
unexpanded = d.getVar("FILESPATH", False)
filespath = d.getVar("FILESPATH").split(":")
filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
for fp in filespath:
if not fp in expectedpaths:
# __note("Path %s in FILESPATH not in the expected paths %s" %
# (fp, expectedpaths), d)
return False
return expected != unexpanded
def can_delete_others(p, cfgdata, d):
for k in ["S", "PV", "PN", "DESCRIPTION", "DEPENDS",
"SECTION", "PACKAGES", "EXTRA_OECONF", "EXTRA_OEMAKE"]:
#for k in cfgdata:
unexpanded = d.getVar(k, False)
cfgunexpanded = cfgdata.get(k)
if not cfgunexpanded:
continue
try:
expanded = d.getVar(k)
cfgexpanded = d.expand(cfgunexpanded)
except bb.fetch.ParameterError:
continue
if unexpanded != cfgunexpanded and \
cfgexpanded == expanded:
__note("candidate for removal of %s" % k, d)
bb.debug(1, "%s: recipe_sanity: cfg's '%s' and d's '%s' both expand to %s" %
(p, cfgunexpanded, unexpanded, expanded))
python do_recipe_sanity () {
p = d.getVar("P")
p = "%s %s %s" % (d.getVar("PN"), d.getVar("PV"), d.getVar("PR"))
sanitychecks = [
(can_delete_FILESPATH, "candidate for removal of FILESPATH"),
#(can_use_autotools_base, "candidate for use of autotools_base"),
(incorrect_nonempty_PACKAGES, "native or cross recipe with non-empty PACKAGES"),
]
cfgdata = d.getVar("__recipe_sanity_cfgdata", False)
for (func, msg) in sanitychecks:
if func(cfgdata, d):
__note(msg, d)
can_delete_others(p, cfgdata, d)
var_renames_overwrite(cfgdata, d)
req_vars(cfgdata, d)
bad_runtime_vars(cfgdata, d)
}
do_recipe_sanity[nostamp] = "1"
addtask recipe_sanity
do_recipe_sanity_all[nostamp] = "1"
do_recipe_sanity_all[recrdeptask] = "do_recipe_sanity_all do_recipe_sanity"
do_recipe_sanity_all () {
:
}
addtask recipe_sanity_all after do_recipe_sanity
python recipe_sanity_eh () {
d = e.data
cfgdata = {}
for k in d.keys():
if not isinstance(d.getVar(k, False), bb.data_smart.DataSmart):
cfgdata[k] = d.getVar(k, False)
d.setVar("__recipe_sanity_cfgdata", cfgdata)
#d.setVar("__recipe_sanity_cfgdata", d)
# Sick, very sick..
from bb.data_smart import DataSmart
old = DataSmart.renameVar
def myrename(self, key, newkey):
oldvalue = self.getVar(newkey, 0)
old(self, key, newkey)
newvalue = self.getVar(newkey, 0)
if oldvalue:
renames = self.getVar("__recipe_sanity_renames", 0) or set()
renames.add((key, newkey, oldvalue, newvalue))
self.setVar("__recipe_sanity_renames", renames)
DataSmart.renameVar = myrename
}
addhandler recipe_sanity_eh
recipe_sanity_eh[eventmask] = "bb.event.ConfigParsed"

View File

@@ -0,0 +1,11 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
do_install[postfuncs] += "install_relative_symlinks"
python install_relative_symlinks () {
oe.path.replace_absolute_symlinks(d.getVar('D'), d)
}

View File

@@ -0,0 +1,26 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
inherit chrpath
SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess relocatable_native_pcfiles"
python relocatable_binaries_preprocess() {
rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
}
relocatable_native_pcfiles() {
for dir in ${libdir}/pkgconfig ${datadir}/pkgconfig; do
files_template=${SYSROOT_DESTDIR}$dir/*.pc
# Expand to any files matching $files_template
files=$(echo $files_template)
# $files_template and $files will differ if any files were found
if [ "$files_template" != "$files" ]; then
rel=$(realpath -m --relative-to=$dir ${base_prefix})
sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" $files
fi
done
}

View File

@@ -0,0 +1,17 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# This class removes libtool .la files after do_install
REMOVE_LIBTOOL_LA ?= "1"
remove_libtool_la() {
if [ "${REMOVE_LIBTOOL_LA}" != "0" ]; then
find "${D}" -ignore_readdir_race -name "*.la" -delete
fi
}
do_install[postfuncs] += "remove_libtool_la"

View File

@@ -0,0 +1,158 @@
#
# Collects debug information in order to create error report files.
#
# Copyright (C) 2013 Intel Corporation
# Author: Andreea Brandusa Proca <andreea.b.proca@intel.com>
#
# SPDX-License-Identifier: MIT
#
ERR_REPORT_DIR ?= "${LOG_DIR}/error-report"
def errorreport_getdata(e):
import codecs
logpath = e.data.getVar('ERR_REPORT_DIR')
datafile = os.path.join(logpath, "error-report.txt")
with codecs.open(datafile, 'r', 'utf-8') as f:
data = f.read()
return data
def errorreport_savedata(e, newdata, file):
import json
import codecs
logpath = e.data.getVar('ERR_REPORT_DIR')
datafile = os.path.join(logpath, file)
with codecs.open(datafile, 'w', 'utf-8') as f:
json.dump(newdata, f, indent=4, sort_keys=True)
return datafile
def get_conf_data(e, filename):
builddir = e.data.getVar('TOPDIR')
filepath = os.path.join(builddir, "conf", filename)
jsonstring = ""
if os.path.exists(filepath):
with open(filepath, 'r') as f:
for line in f.readlines():
if line.startswith("#") or len(line.strip()) == 0:
continue
else:
jsonstring=jsonstring + line
return jsonstring
def get_common_data(e):
data = {}
data['machine'] = e.data.getVar("MACHINE")
data['build_sys'] = e.data.getVar("BUILD_SYS")
data['distro'] = e.data.getVar("DISTRO")
data['target_sys'] = e.data.getVar("TARGET_SYS")
data['branch_commit'] = str(oe.buildcfg.detect_branch(e.data)) + ": " + str(oe.buildcfg.detect_revision(e.data))
data['bitbake_version'] = e.data.getVar("BB_VERSION")
data['layer_version'] = get_layers_branch_rev(e.data)
data['local_conf'] = get_conf_data(e, 'local.conf')
data['auto_conf'] = get_conf_data(e, 'auto.conf')
return data
python errorreport_handler () {
import json
import codecs
def nativelsb():
nativelsbstr = e.data.getVar("NATIVELSBSTRING")
# provide a bit more host info in case of uninative build
if e.data.getVar('UNINATIVE_URL') != 'unset':
return '/'.join([nativelsbstr, lsb_distro_identifier(e.data)])
return nativelsbstr
logpath = e.data.getVar('ERR_REPORT_DIR')
datafile = os.path.join(logpath, "error-report.txt")
if isinstance(e, bb.event.BuildStarted):
bb.utils.mkdirhier(logpath)
data = {}
data = get_common_data(e)
data['nativelsb'] = nativelsb()
data['failures'] = []
data['component'] = " ".join(e.getPkgs())
lock = bb.utils.lockfile(datafile + '.lock')
errorreport_savedata(e, data, "error-report.txt")
bb.utils.unlockfile(lock)
elif isinstance(e, bb.build.TaskFailed):
task = e.task
taskdata={}
log = e.data.getVar('BB_LOGFILE')
taskdata['package'] = e.data.expand("${PF}")
taskdata['task'] = task
if log:
try:
with codecs.open(log, encoding='utf-8') as logFile:
logdata = logFile.read()
# Replace host-specific paths so the logs are cleaner
for d in ("TOPDIR", "TMPDIR"):
s = e.data.getVar(d)
if s:
logdata = logdata.replace(s, d)
except:
logdata = "Unable to read log file"
else:
logdata = "No Log"
# server will refuse failures longer than param specified in project.settings.py
# MAX_UPLOAD_SIZE = "5242880"
# use lower value, because 650 chars can be spent in task, package, version
max_logdata_size = 5242000
# upload last max_logdata_size characters
if len(logdata) > max_logdata_size:
logdata = "..." + logdata[-max_logdata_size:]
taskdata['log'] = logdata
lock = bb.utils.lockfile(datafile + '.lock')
jsondata = json.loads(errorreport_getdata(e))
jsondata['failures'].append(taskdata)
errorreport_savedata(e, jsondata, "error-report.txt")
bb.utils.unlockfile(lock)
elif isinstance(e, bb.event.NoProvider):
bb.utils.mkdirhier(logpath)
data = {}
data = get_common_data(e)
data['nativelsb'] = nativelsb()
data['failures'] = []
data['component'] = str(e._item)
taskdata={}
taskdata['log'] = str(e)
taskdata['package'] = str(e._item)
taskdata['task'] = "Nothing provides " + "'" + str(e._item) + "'"
data['failures'].append(taskdata)
lock = bb.utils.lockfile(datafile + '.lock')
errorreport_savedata(e, data, "error-report.txt")
bb.utils.unlockfile(lock)
elif isinstance(e, bb.event.ParseError):
bb.utils.mkdirhier(logpath)
data = {}
data = get_common_data(e)
data['nativelsb'] = nativelsb()
data['failures'] = []
data['component'] = "parse"
taskdata={}
taskdata['log'] = str(e._msg)
taskdata['task'] = str(e._msg)
data['failures'].append(taskdata)
lock = bb.utils.lockfile(datafile + '.lock')
errorreport_savedata(e, data, "error-report.txt")
bb.utils.unlockfile(lock)
elif isinstance(e, bb.event.BuildCompleted):
lock = bb.utils.lockfile(datafile + '.lock')
jsondata = json.loads(errorreport_getdata(e))
bb.utils.unlockfile(lock)
failures = jsondata['failures']
if(len(failures) > 0):
filename = "error_report_" + e.data.getVar("BUILDNAME")+".txt"
datafile = errorreport_savedata(e, jsondata, filename)
bb.note("The errors for this build are stored in %s\nYou can send the errors to a reports server by running:\n send-error-report %s [-s server]" % (datafile, datafile))
bb.note("The contents of these logs will be posted in public if you use the above command with the default server. Please ensure you remove any identifying or proprietary information when prompted before sending.")
}
addhandler errorreport_handler
errorreport_handler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskFailed bb.event.NoProvider bb.event.ParseError"

View File

@@ -0,0 +1,197 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
#
# Removes source after build
#
# To use it add that line to conf/local.conf:
#
# INHERIT += "rm_work"
#
# To inhibit rm_work for some recipes, specify them in RM_WORK_EXCLUDE.
# For example, in conf/local.conf:
#
# RM_WORK_EXCLUDE += "icu-native icu busybox"
#
# Recipes can also configure which entries in their ${WORKDIR}
# are preserved besides temp, which already gets excluded by default
# because it contains logs:
# do_install:append () {
# echo "bar" >${WORKDIR}/foo
# }
# RM_WORK_EXCLUDE_ITEMS += "foo"
RM_WORK_EXCLUDE_ITEMS = "temp"
# Use the completion scheduler by default when rm_work is active
# to try and reduce disk usage
BB_SCHEDULER ?= "completion"
# Run the rm_work task in the idle scheduling class
BB_TASK_IONICE_LEVEL:task-rm_work = "3.0"
do_rm_work () {
# Force using the HOSTTOOLS 'rm' - otherwise the SYSROOT_NATIVE 'rm' can be selected depending on PATH
# Avoids race-condition accessing 'rm' when deleting WORKDIR folders at the end of this function
RM_BIN="$(PATH=${HOSTTOOLS_DIR} command -v rm)"
if [ -z "${RM_BIN}" ]; then
bbfatal "Binary 'rm' not found in HOSTTOOLS_DIR, cannot remove WORKDIR data."
fi
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
for p in ${RM_WORK_EXCLUDE}; do
if [ "$p" = "${PN}" ]; then
bbnote "rm_work: Skipping ${PN} since it is in RM_WORK_EXCLUDE"
exit 0
fi
done
# Need to add pseudo back or subsqeuent work in this workdir
# might fail since setscene may not rerun to recreate it
mkdir -p ${WORKDIR}/pseudo/
excludes='${RM_WORK_EXCLUDE_ITEMS}'
# Change normal stamps into setscene stamps as they better reflect the
# fact WORKDIR is now empty
# Also leave noexec stamps since setscene stamps don't cover them
STAMPDIR=`dirname ${STAMP}`
if test -d $STAMPDIR; then
cd $STAMPDIR
for i in `basename ${STAMP}`*
do
case $i in
*sigdata*|*sigbasedata*)
# Save/skip anything that looks like a signature data file.
;;
*do_image_complete_setscene*|*do_image_qa_setscene*)
# Ensure we don't 'stack' setscene extensions to these stamps with the sections below
;;
*do_image_complete*)
# Promote do_image_complete stamps to setscene versions (ahead of *do_image* below)
mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
;;
*do_image_qa*)
# Promote do_image_qa stamps to setscene versions (ahead of *do_image* below)
mv $i `echo $i | sed -e "s#do_image_qa#do_image_qa_setscene#"`
;;
*do_package_write*|*do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*|*do_build*)
;;
*do_addto_recipe_sysroot*)
# Preserve recipe-sysroot-native if do_addto_recipe_sysroot has been used
excludes="$excludes recipe-sysroot-native"
;;
*do_package|*do_package.*|*do_package_setscene.*)
# We remove do_package entirely, including any
# sstate version since otherwise we'd need to leave 'plaindirs' around
# such as 'packages' and 'packages-split' and these can be large. No end
# of chain tasks depend directly on do_package anymore.
"${RM_BIN}" -f -- $i;
;;
*_setscene*)
# Skip stamps which are already setscene versions
;;
*)
# For everything else: if suitable, promote the stamp to a setscene
# version, otherwise remove it
for j in ${SSTATETASKS} do_shared_workdir
do
case $i in
*$j|*$j.*)
mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
break
;;
esac
done
"${RM_BIN}" -f -- $i
esac
done
fi
cd ${WORKDIR}
for dir in *
do
# Retain only logs and other files in temp, safely ignore
# failures of removing pseudo folers on NFS2/3 server.
if [ $dir = 'pseudo' ]; then
"${RM_BIN}" -rf -- $dir 2> /dev/null || true
elif ! echo "$excludes" | grep -q -w "$dir"; then
"${RM_BIN}" -rf -- $dir
fi
done
}
do_rm_work[vardepsexclude] += "SSTATETASKS"
do_rm_work_all () {
:
}
do_rm_work_all[recrdeptask] = "do_rm_work"
do_rm_work_all[noexec] = "1"
addtask rm_work_all before do_build
do_populate_sdk[postfuncs] += "rm_work_populatesdk"
rm_work_populatesdk () {
:
}
rm_work_populatesdk[cleandirs] = "${WORKDIR}/sdk"
do_image_complete[postfuncs] += "rm_work_rootfs"
rm_work_rootfs () {
:
}
rm_work_rootfs[cleandirs] = "${WORKDIR}/rootfs"
# This task can be used instead of do_build to trigger building
# without also invoking do_rm_work. It only exists when rm_work.bbclass
# is active, otherwise do_build needs to be used.
#
# The intended usage is
# ${@ d.getVar('RM_WORK_BUILD_WITHOUT') or 'do_build'}
# in places that previously used just 'do_build'.
RM_WORK_BUILD_WITHOUT = "do_build_without_rm_work"
do_build_without_rm_work () {
:
}
do_build_without_rm_work[noexec] = "1"
# We have to add these tasks already now, because all tasks are
# meant to be defined before the RecipeTaskPreProcess event triggers.
# The inject_rm_work event handler then merely changes task dependencies.
addtask do_rm_work
addtask do_build_without_rm_work
addhandler inject_rm_work
inject_rm_work[eventmask] = "bb.event.RecipeTaskPreProcess"
python inject_rm_work() {
if bb.data.inherits_class('kernel', d):
d.appendVar("RM_WORK_EXCLUDE", ' ' + d.getVar("PN"))
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
excludes = (d.getVar("RM_WORK_EXCLUDE") or "").split()
pn = d.getVar("PN")
# Determine what do_build depends upon, without including do_build
# itself or our own special do_rm_work_all.
deps = sorted((set(bb.build.preceedtask('do_build', True, d))).difference(('do_build', 'do_rm_work_all')) or "")
# deps can be empty if do_build doesn't exist, e.g. *-inital recipes
if not deps:
deps = ["do_populate_sysroot", "do_populate_lic"]
if pn in excludes:
d.delVarFlag('rm_work_rootfs', 'cleandirs')
d.delVarFlag('rm_work_populatesdk', 'cleandirs')
else:
# Inject do_rm_work into the tasks of the current recipe such that do_build
# depends on it and that it runs after all other tasks that block do_build,
# i.e. after all work on the current recipe is done. The reason for taking
# this approach instead of making do_rm_work depend on do_build is that
# do_build inherits additional runtime dependencies on
# other recipes and thus will typically run much later than completion of
# work in the recipe itself.
# In practice, addtask() here merely updates the dependencies.
bb.build.addtask('do_rm_work', 'do_rm_work_all do_build', ' '.join(deps), d)
# Always update do_build_without_rm_work dependencies.
bb.build.addtask('do_build_without_rm_work', '', ' '.join(deps), d)
}

View File

@@ -0,0 +1,32 @@
# Author: Patrick Ohly <patrick.ohly@intel.com>
# Copyright: Copyright (C) 2015 Intel Corporation
#
# SPDX-License-Identifier: MIT
# This class is used like rm_work:
# INHERIT += "rm_work_and_downloads"
#
# In addition to removing local build directories of a recipe, it also
# removes the downloaded source. This is achieved by making the DL_DIR
# recipe-specific. While reducing disk usage, it increases network usage (for
# example, compiling the same source for target and host implies downloading
# the source twice).
#
# Because the "do_fetch" task does not get re-run after removing the downloaded
# sources, this class is also not suitable for incremental builds.
#
# Where it works well is in well-connected build environments with limited
# disk space (like TravisCI).
inherit rm_work
# This would ensure that the existing do_rm_work() removes the downloads,
# but does not work because some recipes have a circular dependency between
# WORKDIR and DL_DIR (via ${SRCPV}?).
# DL_DIR = "${WORKDIR}/downloads"
# Instead go up one level and remove ourself.
DL_DIR = "${BASE_WORKDIR}/${MULTIMACH_TARGET_SYS}/${PN}/downloads"
do_rm_work:append () {
rm -rf ${DL_DIR}
}

View File

@@ -0,0 +1,58 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# Class for generating signed IPK packages.
#
# Configuration variables used by this class:
# IPK_GPG_PASSPHRASE_FILE
# Path to a file containing the passphrase of the signing key.
# IPK_GPG_NAME
# Name of the key to sign with.
# IPK_GPG_BACKEND
# Optional variable for specifying the backend to use for signing.
# Currently the only available option is 'local', i.e. local signing
# on the build host.
# IPK_GPG_SIGNATURE_TYPE
# Optional variable for specifying the type of gpg signatures, can be:
# 1. Ascii armored (ASC), default if not set
# 2. Binary (BIN)
# GPG_BIN
# Optional variable for specifying the gpg binary/wrapper to use for
# signing.
# GPG_PATH
# Optional variable for specifying the gnupg "home" directory:
#
inherit sanity
IPK_SIGN_PACKAGES = '1'
IPK_GPG_BACKEND ?= 'local'
IPK_GPG_SIGNATURE_TYPE ?= 'ASC'
python () {
# Check configuration
for var in ('IPK_GPG_NAME', 'IPK_GPG_PASSPHRASE_FILE'):
if not d.getVar(var):
raise_sanity_error("You need to define %s in the config" % var, d)
sigtype = d.getVar("IPK_GPG_SIGNATURE_TYPE")
if sigtype.upper() != "ASC" and sigtype.upper() != "BIN":
raise_sanity_error("Bad value for IPK_GPG_SIGNATURE_TYPE (%s), use either ASC or BIN" % sigtype)
}
def sign_ipk(d, ipk_to_sign):
from oe.gpg_sign import get_signer
bb.debug(1, 'Signing ipk: %s' % ipk_to_sign)
signer = get_signer(d, d.getVar('IPK_GPG_BACKEND'))
sig_type = d.getVar('IPK_GPG_SIGNATURE_TYPE')
is_ascii_sig = (sig_type.upper() != "BIN")
signer.detach_sign(ipk_to_sign,
d.getVar('IPK_GPG_NAME'),
d.getVar('IPK_GPG_PASSPHRASE_FILE'),
armor=is_ascii_sig)

View File

@@ -0,0 +1,53 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# Class for signing package feeds
#
# Related configuration variables that will be used after this class is
# iherited:
# PACKAGE_FEED_PASSPHRASE_FILE
# Path to a file containing the passphrase of the signing key.
# PACKAGE_FEED_GPG_NAME
# Name of the key to sign with. May be key id or key name.
# PACKAGE_FEED_GPG_BACKEND
# Optional variable for specifying the backend to use for signing.
# Currently the only available option is 'local', i.e. local signing
# on the build host.
# PACKAGE_FEED_GPG_SIGNATURE_TYPE
# Optional variable for specifying the type of gpg signature, can be:
# 1. Ascii armored (ASC), default if not set
# 2. Binary (BIN)
# This variable is only available for IPK feeds. It is ignored on
# other packaging backends.
# GPG_BIN
# Optional variable for specifying the gpg binary/wrapper to use for
# signing.
# GPG_PATH
# Optional variable for specifying the gnupg "home" directory:
#
inherit sanity
PACKAGE_FEED_SIGN = '1'
PACKAGE_FEED_GPG_BACKEND ?= 'local'
PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC'
PACKAGEINDEXDEPS += "gnupg-native:do_populate_sysroot"
# Make feed signing key to be present in rootfs
FEATURE_PACKAGES_package-management:append = " signing-keys-packagefeed"
python () {
# Check sanity of configuration
for var in ('PACKAGE_FEED_GPG_NAME', 'PACKAGE_FEED_GPG_PASSPHRASE_FILE'):
if not d.getVar(var):
raise_sanity_error("You need to define %s in the config" % var, d)
sigtype = d.getVar("PACKAGE_FEED_GPG_SIGNATURE_TYPE")
if sigtype.upper() != "ASC" and sigtype.upper() != "BIN":
raise_sanity_error("Bad value for PACKAGE_FEED_GPG_SIGNATURE_TYPE (%s), use either ASC or BIN" % sigtype)
}
do_package_index[depends] += "signing-keys:do_deploy"
do_rootfs[depends] += "signing-keys:do_populate_sysroot gnupg-native:do_populate_sysroot"

View File

@@ -0,0 +1,78 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# Class for generating signed RPM packages.
#
# Configuration variables used by this class:
# RPM_GPG_PASSPHRASE
# The passphrase of the signing key.
# RPM_GPG_NAME
# Name of the key to sign with. May be key id or key name.
# RPM_GPG_BACKEND
# Optional variable for specifying the backend to use for signing.
# Currently the only available option is 'local', i.e. local signing
# on the build host.
# RPM_FILE_CHECKSUM_DIGEST
# Optional variable for specifying the algorithm for generating file
# checksum digest.
# RPM_FSK_PATH
# Optional variable for the file signing key.
# RPM_FSK_PASSWORD
# Optional variable for the file signing key password.
# GPG_BIN
# Optional variable for specifying the gpg binary/wrapper to use for
# signing.
# RPM_GPG_SIGN_CHUNK
# Optional variable indicating the number of packages used per gpg
# invocation
# GPG_PATH
# Optional variable for specifying the gnupg "home" directory:
inherit sanity
RPM_SIGN_PACKAGES='1'
RPM_SIGN_FILES ?= '0'
RPM_GPG_BACKEND ?= 'local'
# SHA-256 is used by default
RPM_FILE_CHECKSUM_DIGEST ?= '8'
RPM_GPG_SIGN_CHUNK ?= "${BB_NUMBER_THREADS}"
python () {
if d.getVar('RPM_GPG_PASSPHRASE_FILE'):
raise_sanity_error('RPM_GPG_PASSPHRASE_FILE is replaced by RPM_GPG_PASSPHRASE', d)
# Check configuration
for var in ('RPM_GPG_NAME', 'RPM_GPG_PASSPHRASE'):
if not d.getVar(var):
raise_sanity_error("You need to define %s in the config" % var, d)
if d.getVar('RPM_SIGN_FILES') == '1':
for var in ('RPM_FSK_PATH', 'RPM_FSK_PASSWORD'):
if not d.getVar(var):
raise_sanity_error("You need to define %s in the config" % var, d)
}
python sign_rpm () {
import glob
from oe.gpg_sign import get_signer
signer = get_signer(d, d.getVar('RPM_GPG_BACKEND'))
rpms = glob.glob(d.getVar('RPM_PKGWRITEDIR') + '/*')
signer.sign_rpms(rpms,
d.getVar('RPM_GPG_NAME'),
d.getVar('RPM_GPG_PASSPHRASE'),
d.getVar('RPM_FILE_CHECKSUM_DIGEST'),
int(d.getVar('RPM_GPG_SIGN_CHUNK')),
d.getVar('RPM_FSK_PATH'),
d.getVar('RPM_FSK_PASSWORD'))
}
sign_rpm[vardepsexclude] += "RPM_GPG_SIGN_CHUNK"
do_package_index[depends] += "signing-keys:do_deploy"
do_rootfs[depends] += "signing-keys:do_populate_sysroot"
PACKAGE_WRITE_DEPS += "gnupg-native"

View File

@@ -0,0 +1,39 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
python siteconfig_do_siteconfig () {
shared_state = sstate_state_fromvars(d)
if shared_state['task'] != 'populate_sysroot':
return
if not os.path.isdir(os.path.join(d.getVar('FILE_DIRNAME'), 'site_config')):
bb.debug(1, "No site_config directory, skipping do_siteconfig")
return
sstate_install(shared_state, d)
bb.build.exec_func('do_siteconfig_gencache', d)
sstate_clean(shared_state, d)
}
EXTRASITECONFIG ?= ""
siteconfig_do_siteconfig_gencache () {
mkdir -p ${WORKDIR}/site_config_${MACHINE}
gen-site-config ${FILE_DIRNAME}/site_config \
>${WORKDIR}/site_config_${MACHINE}/configure.ac
cd ${WORKDIR}/site_config_${MACHINE}
autoconf
rm -f ${BPN}_cache
CONFIG_SITE="" ${EXTRASITECONFIG} ./configure ${CONFIGUREOPTS} --cache-file ${BPN}_cache
sed -n -e "/ac_cv_c_bigendian/p" -e "/ac_cv_sizeof_/p" \
-e "/ac_cv_type_/p" -e "/ac_cv_header_/p" -e "/ac_cv_func_/p" \
< ${BPN}_cache > ${BPN}_config
mkdir -p ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
cp ${BPN}_config ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
}
do_populate_sysroot[sstate-interceptfuncs] += "do_siteconfig "
EXPORT_FUNCTIONS do_siteconfig do_siteconfig_gencache

View File

@@ -0,0 +1,115 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
OE_TERMINAL ?= 'auto'
OE_TERMINAL[type] = 'choice'
OE_TERMINAL[choices] = 'auto none \
${@oe_terminal_prioritized()}'
OE_TERMINAL_EXPORTS += 'EXTRA_OEMAKE CACHED_CONFIGUREVARS CONFIGUREOPTS EXTRA_OECONF'
OE_TERMINAL_EXPORTS[type] = 'list'
XAUTHORITY ?= "${HOME}/.Xauthority"
SHELL ?= "bash"
def oe_terminal_prioritized():
import oe.terminal
return " ".join(o.name for o in oe.terminal.prioritized())
def emit_terminal_func(command, envdata, d):
import bb.build
cmd_func = 'do_terminal'
envdata.setVar(cmd_func, 'exec ' + command)
envdata.setVarFlag(cmd_func, 'func', '1')
runfmt = d.getVar('BB_RUNFMT') or "run.{func}.{pid}"
runfile = runfmt.format(func=cmd_func, task=cmd_func, taskfunc=cmd_func, pid=os.getpid())
runfile = os.path.join(d.getVar('T'), runfile)
bb.utils.mkdirhier(os.path.dirname(runfile))
with open(runfile, 'w') as script:
# Override the shell shell_trap_code specifies.
# If our shell is bash, we might well face silent death.
script.write("#!/bin/bash\n")
script.write(bb.build.shell_trap_code())
bb.data.emit_func(cmd_func, script, envdata)
script.write(cmd_func)
script.write("\n")
os.chmod(runfile, 0o755)
return runfile
def oe_terminal(command, title, d):
import oe.data
import oe.terminal
envdata = bb.data.init()
for v in os.environ:
envdata.setVar(v, os.environ[v])
envdata.setVarFlag(v, 'export', '1')
for export in oe.data.typed_value('OE_TERMINAL_EXPORTS', d):
value = d.getVar(export)
if value is not None:
os.environ[export] = str(value)
envdata.setVar(export, str(value))
envdata.setVarFlag(export, 'export', '1')
if export == "PSEUDO_DISABLED":
if "PSEUDO_UNLOAD" in os.environ:
del os.environ["PSEUDO_UNLOAD"]
envdata.delVar("PSEUDO_UNLOAD")
# Add in all variables from the user's original environment which
# haven't subsequntly been set/changed
origbbenv = d.getVar("BB_ORIGENV", False) or {}
for key in origbbenv:
if key in envdata:
continue
value = origbbenv.getVar(key)
if value is not None:
os.environ[key] = str(value)
envdata.setVar(key, str(value))
envdata.setVarFlag(key, 'export', '1')
# Use original PATH as a fallback
path = d.getVar('PATH') + ":" + origbbenv.getVar('PATH')
os.environ['PATH'] = path
envdata.setVar('PATH', path)
# A complex PS1 might need more escaping of chars.
# Lets not export PS1 instead.
envdata.delVar("PS1")
# Replace command with an executable wrapper script
command = emit_terminal_func(command, envdata, d)
terminal = oe.data.typed_value('OE_TERMINAL', d).lower()
if terminal == 'none':
bb.fatal('Devshell usage disabled with OE_TERMINAL')
elif terminal != 'auto':
try:
oe.terminal.spawn(terminal, command, title, None, d)
return
except oe.terminal.UnsupportedTerminal:
bb.warn('Unsupported terminal "%s", defaulting to "auto"' %
terminal)
except oe.terminal.ExecutionError as exc:
bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
try:
oe.terminal.spawn_preferred(command, title, None, d)
except oe.terminal.NoSupportedTerminals as nosup:
nosup.terms.remove("false")
cmds = '\n\t'.join(nosup.terms).replace("{command}",
"do_terminal").replace("{title}", title)
bb.fatal('No valid terminal found, unable to open devshell.\n' +
'Tried the following commands:\n\t%s' % cmds)
except oe.terminal.ExecutionError as exc:
bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
oe_terminal[vardepsexclude] = "BB_ORIGENV"

View File

@@ -0,0 +1,388 @@
#
# Toaster helper class
#
# Copyright (C) 2013 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
# This bbclass is designed to extract data used by OE-Core during the build process,
# for recording in the Toaster system.
# The data access is synchronous, preserving the build data integrity across
# different builds.
#
# The data is transferred through the event system, using the MetadataEvent objects.
#
# The model is to enable the datadump functions as postfuncs, and have the dump
# executed after the real taskfunc has been executed. This prevents task signature changing
# is toaster is enabled or not. Build performance is not affected if Toaster is not enabled.
#
# To enable, use INHERIT in local.conf:
#
# INHERIT += "toaster"
#
#
#
#
# Find and dump layer info when we got the layers parsed
python toaster_layerinfo_dumpdata() {
import subprocess
def _get_git_branch(layer_path):
branch = subprocess.Popen("git symbolic-ref HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0]
branch = branch.decode('utf-8')
branch = branch.replace('refs/heads/', '').rstrip()
return branch
def _get_git_revision(layer_path):
revision = subprocess.Popen("git rev-parse HEAD 2>/dev/null ", cwd=layer_path, shell=True, stdout=subprocess.PIPE).communicate()[0].rstrip()
return revision
def _get_url_map_name(layer_name):
""" Some layers have a different name on openembedded.org site,
this method returns the correct name to use in the URL
"""
url_name = layer_name
url_mapping = {'meta': 'openembedded-core'}
for key in url_mapping.keys():
if key == layer_name:
url_name = url_mapping[key]
return url_name
def _get_layer_version_information(layer_path):
layer_version_info = {}
layer_version_info['branch'] = _get_git_branch(layer_path)
layer_version_info['commit'] = _get_git_revision(layer_path)
layer_version_info['priority'] = 0
return layer_version_info
def _get_layer_dict(layer_path):
layer_info = {}
layer_name = layer_path.split('/')[-1]
layer_url = 'http://layers.openembedded.org/layerindex/layer/{layer}/'
layer_url_name = _get_url_map_name(layer_name)
layer_info['name'] = layer_url_name
layer_info['local_path'] = layer_path
layer_info['layer_index_url'] = layer_url.format(layer=layer_url_name)
layer_info['version'] = _get_layer_version_information(layer_path)
return layer_info
bblayers = e.data.getVar("BBLAYERS")
llayerinfo = {}
for layer in { l for l in bblayers.strip().split(" ") if len(l) }:
llayerinfo[layer] = _get_layer_dict(layer)
bb.event.fire(bb.event.MetadataEvent("LayerInfo", llayerinfo), e.data)
}
# Dump package file info data
def _toaster_load_pkgdatafile(dirpath, filepath):
import json
import re
pkgdata = {}
with open(os.path.join(dirpath, filepath), "r") as fin:
for line in fin:
try:
kn, kv = line.strip().split(": ", 1)
m = re.match(r"^PKG:([^A-Z:]*)", kn)
if m:
pkgdata['OPKGN'] = m.group(1)
kn = kn.split(":")[0]
pkgdata[kn] = kv
if kn.startswith('FILES_INFO'):
pkgdata[kn] = json.loads(kv)
except ValueError:
pass # ignore lines without valid key: value pairs
return pkgdata
def _toaster_dumpdata(pkgdatadir, d):
"""
Dumps the data about the packages created by a recipe
"""
# No need to try and dumpdata if the recipe isn't generating packages
if not d.getVar('PACKAGES'):
return
lpkgdata = {}
datadir = os.path.join(pkgdatadir, 'runtime')
# scan and send data for each generated package
if os.path.exists(datadir):
for datafile in os.listdir(datadir):
if not datafile.endswith('.packaged'):
lpkgdata = _toaster_load_pkgdatafile(datadir, datafile)
# Fire an event containing the pkg data
bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d)
python toaster_package_dumpdata() {
_toaster_dumpdata(d.getVar('PKGDESTWORK'), d)
}
python toaster_packagedata_dumpdata() {
# This path needs to match do_packagedata[sstate-inputdirs]
_toaster_dumpdata(os.path.join(d.getVar('WORKDIR'), 'pkgdata-pdata-input'), d)
}
# 2. Dump output image files information
python toaster_artifact_dumpdata() {
"""
Dump data about SDK variables
"""
event_data = {
"TOOLCHAIN_OUTPUTNAME": d.getVar("TOOLCHAIN_OUTPUTNAME")
}
bb.event.fire(bb.event.MetadataEvent("SDKArtifactInfo", event_data), d)
}
# collect list of buildstats files based on fired events; when the build completes, collect all stats and fire an event with collected data
python toaster_collect_task_stats() {
import bb.build
import bb.event
import bb.data
import bb.utils
import os
if not e.data.getVar('BUILDSTATS_BASE'):
return # if we don't have buildstats, we cannot collect stats
toaster_statlist_file = os.path.join(e.data.getVar('BUILDSTATS_BASE'), "toasterstatlist")
def stat_to_float(value):
return float(value.strip('% \n\r'))
def _append_read_list(v):
lock = bb.utils.lockfile(e.data.expand("${TOPDIR}/toaster.lock"), False, True)
with open(toaster_statlist_file, "a") as fout:
taskdir = e.data.expand("${BUILDSTATS_BASE}/${BUILDNAME}/${PF}")
fout.write("%s::%s::%s::%s\n" % (e.taskfile, e.taskname, os.path.join(taskdir, e.task), e.data.expand("${PN}")))
bb.utils.unlockfile(lock)
def _read_stats(filename):
# seconds
cpu_time_user = 0
cpu_time_system = 0
# bytes
disk_io_read = 0
disk_io_write = 0
started = 0
ended = 0
taskname = ''
statinfo = {}
with open(filename, 'r') as task_bs:
for line in task_bs.readlines():
k,v = line.strip().split(": ", 1)
statinfo[k] = v
if "Started" in statinfo:
started = stat_to_float(statinfo["Started"])
if "Ended" in statinfo:
ended = stat_to_float(statinfo["Ended"])
if "Child rusage ru_utime" in statinfo:
cpu_time_user = cpu_time_user + stat_to_float(statinfo["Child rusage ru_utime"])
if "Child rusage ru_stime" in statinfo:
cpu_time_system = cpu_time_system + stat_to_float(statinfo["Child rusage ru_stime"])
if "IO write_bytes" in statinfo:
write_bytes = int(statinfo["IO write_bytes"].strip('% \n\r'))
disk_io_write = disk_io_write + write_bytes
if "IO read_bytes" in statinfo:
read_bytes = int(statinfo["IO read_bytes"].strip('% \n\r'))
disk_io_read = disk_io_read + read_bytes
return {
'stat_file': filename,
'cpu_time_user': cpu_time_user,
'cpu_time_system': cpu_time_system,
'disk_io_read': disk_io_read,
'disk_io_write': disk_io_write,
'started': started,
'ended': ended
}
if isinstance(e, (bb.build.TaskSucceeded, bb.build.TaskFailed)):
_append_read_list(e)
pass
if isinstance(e, bb.event.BuildCompleted) and os.path.exists(toaster_statlist_file):
events = []
with open(toaster_statlist_file, "r") as fin:
for line in fin:
(taskfile, taskname, filename, recipename) = line.strip().split("::")
stats = _read_stats(filename)
events.append((taskfile, taskname, stats, recipename))
bb.event.fire(bb.event.MetadataEvent("BuildStatsList", events), e.data)
os.unlink(toaster_statlist_file)
}
# dump relevant build history data as an event when the build is completed
python toaster_buildhistory_dump() {
import re
BUILDHISTORY_DIR = e.data.expand("${TOPDIR}/buildhistory")
BUILDHISTORY_DIR_IMAGE_BASE = e.data.expand("%s/images/${MACHINE_ARCH}/${TCLIBC}/"% BUILDHISTORY_DIR)
pkgdata_dir = e.data.getVar("PKGDATA_DIR")
# scan the build targets for this build
images = {}
allpkgs = {}
files = {}
for target in e._pkgs:
target = target.split(':')[0] # strip ':<task>' suffix from the target
installed_img_path = e.data.expand(os.path.join(BUILDHISTORY_DIR_IMAGE_BASE, target))
if os.path.exists(installed_img_path):
images[target] = {}
files[target] = {}
files[target]['dirs'] = []
files[target]['syms'] = []
files[target]['files'] = []
with open("%s/installed-package-sizes.txt" % installed_img_path, "r") as fin:
for line in fin:
line = line.rstrip(";")
psize, punit, pname = line.split()
# this size is "installed-size" as it measures how much space it takes on disk
images[target][pname.strip()] = {'size':int(psize)*1024, 'depends' : []}
with open("%s/depends.dot" % installed_img_path, "r") as fin:
p = re.compile(r'\s*"(?P<name>[^"]+)"\s*->\s*"(?P<dep>[^"]+)"(?P<rec>.*?\[style=dotted\])?')
for line in fin:
m = p.match(line)
if not m:
continue
pname = m.group('name')
dependsname = m.group('dep')
deptype = 'recommends' if m.group('rec') else 'depends'
# If RPM is used for packaging, then there may be
# dependencies such as "/bin/sh", which will confuse
# _toaster_load_pkgdatafile() later on. While at it, ignore
# any dependencies that contain parentheses, e.g.,
# "libc.so.6(GLIBC_2.7)".
if dependsname.startswith('/') or '(' in dependsname:
continue
if not pname in images[target]:
images[target][pname] = {'size': 0, 'depends' : []}
if not dependsname in images[target]:
images[target][dependsname] = {'size': 0, 'depends' : []}
images[target][pname]['depends'].append((dependsname, deptype))
# files-in-image.txt is only generated if an image file is created,
# so the file entries ('syms', 'dirs', 'files') for a target will be
# empty for rootfs builds and other "image" tasks which don't
# produce image files
# (e.g. "bitbake core-image-minimal -c populate_sdk")
files_in_image_path = "%s/files-in-image.txt" % installed_img_path
if os.path.exists(files_in_image_path):
with open(files_in_image_path, "r") as fin:
for line in fin:
lc = [ x for x in line.strip().split(" ") if len(x) > 0 ]
if lc[0].startswith("l"):
files[target]['syms'].append(lc)
elif lc[0].startswith("d"):
files[target]['dirs'].append(lc)
else:
files[target]['files'].append(lc)
for pname in images[target]:
if not pname in allpkgs:
try:
pkgdata = _toaster_load_pkgdatafile("%s/runtime-reverse/" % pkgdata_dir, pname)
except IOError as err:
if err.errno == 2:
# We expect this e.g. for RRECOMMENDS that are unsatisfied at runtime
continue
else:
raise
allpkgs[pname] = pkgdata
data = { 'pkgdata' : allpkgs, 'imgdata' : images, 'filedata' : files }
bb.event.fire(bb.event.MetadataEvent("ImagePkgList", data), e.data)
}
# get list of artifacts from sstate manifest
python toaster_artifacts() {
if e.taskname in ["do_deploy", "do_image_complete", "do_populate_sdk", "do_populate_sdk_ext"]:
d2 = d.createCopy()
d2.setVar('FILE', e.taskfile)
# Use 'stamp-extra-info' if present, else use workaround
# to determine 'SSTATE_MANMACH'
extrainf = d2.getVarFlag(e.taskname, 'stamp-extra-info')
if extrainf:
d2.setVar('SSTATE_MANMACH', extrainf)
else:
if "do_populate_sdk" == e.taskname:
d2.setVar('SSTATE_MANMACH', d2.expand("${MACHINE}${SDKMACHINE}"))
else:
d2.setVar('SSTATE_MANMACH', d2.expand("${MACHINE}"))
manifest = oe.sstatesig.sstate_get_manifest_filename(e.taskname[3:], d2)[0]
if os.access(manifest, os.R_OK):
with open(manifest) as fmanifest:
artifacts = [fname.strip() for fname in fmanifest]
data = {"task": e.taskid, "artifacts": artifacts}
bb.event.fire(bb.event.MetadataEvent("TaskArtifacts", data), d2)
}
# set event handlers
addhandler toaster_layerinfo_dumpdata
toaster_layerinfo_dumpdata[eventmask] = "bb.event.TreeDataPreparationCompleted"
addhandler toaster_collect_task_stats
toaster_collect_task_stats[eventmask] = "bb.event.BuildCompleted bb.build.TaskSucceeded bb.build.TaskFailed"
addhandler toaster_buildhistory_dump
toaster_buildhistory_dump[eventmask] = "bb.event.BuildCompleted"
addhandler toaster_artifacts
toaster_artifacts[eventmask] = "bb.runqueue.runQueueTaskSkipped bb.runqueue.runQueueTaskCompleted"
do_packagedata_setscene[postfuncs] += "toaster_packagedata_dumpdata "
do_packagedata_setscene[vardepsexclude] += "toaster_packagedata_dumpdata "
do_package[postfuncs] += "toaster_package_dumpdata "
do_package[vardepsexclude] += "toaster_package_dumpdata "
#do_populate_sdk[postfuncs] += "toaster_artifact_dumpdata "
#do_populate_sdk[vardepsexclude] += "toaster_artifact_dumpdata "
#do_populate_sdk_ext[postfuncs] += "toaster_artifact_dumpdata "
#do_populate_sdk_ext[vardepsexclude] += "toaster_artifact_dumpdata "

View File

@@ -0,0 +1,18 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# Check types of bitbake configuration variables
#
# See oe.types for details.
python check_types() {
import oe.types
for key in e.data.keys():
if e.data.getVarFlag(key, "type"):
oe.data.typed_value(key, e.data)
}
addhandler check_types
check_types[eventmask] = "bb.event.ConfigParsed"

View File

@@ -0,0 +1,313 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# In order to support a deterministic set of 'dynamic' users/groups,
# we need a function to reformat the params based on a static file
def update_useradd_static_config(d):
import itertools
import re
import errno
import oe.useradd
def list_extend(iterable, length, obj = None):
"""Ensure that iterable is the specified length by extending with obj
and return it as a list"""
return list(itertools.islice(itertools.chain(iterable, itertools.repeat(obj)), length))
def merge_files(file_list, exp_fields):
"""Read each passwd/group file in file_list, split each line and create
a dictionary with the user/group names as keys and the split lines as
values. If the user/group name already exists in the dictionary, then
update any fields in the list with the values from the new list (if they
are set)."""
id_table = dict()
for conf in file_list.split():
try:
with open(conf, "r") as f:
for line in f:
if line.startswith('#'):
continue
# Make sure there always are at least exp_fields
# elements in the field list. This allows for leaving
# out trailing colons in the files.
fields = list_extend(line.rstrip().split(":"), exp_fields)
if fields[0] not in id_table:
id_table[fields[0]] = fields
else:
id_table[fields[0]] = list(map(lambda x, y: x or y, fields, id_table[fields[0]]))
except IOError as e:
if e.errno == errno.ENOENT:
pass
return id_table
def handle_missing_id(id, type, pkg, files, var, value):
# For backwards compatibility we accept "1" in addition to "error"
error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC')
msg = 'Recipe %s, package %s: %sname "%s" does not have a static ID defined.' % (d.getVar('PN'), pkg, type, id)
if files:
msg += " Add %s to one of these files: %s" % (id, files)
else:
msg += " %s file(s) not found in BBPATH: %s" % (var, value)
if error_dynamic == 'error' or error_dynamic == '1':
raise NotImplementedError(msg)
elif error_dynamic == 'warn':
bb.warn(msg)
elif error_dynamic == 'skip':
raise bb.parse.SkipRecipe(msg)
# Return a list of configuration files based on either the default
# files/group or the contents of USERADD_GID_TABLES, resp.
# files/passwd for USERADD_UID_TABLES.
# Paths are resolved via BBPATH.
def get_table_list(d, var, default):
files = []
bbpath = d.getVar('BBPATH')
tables = d.getVar(var)
if not tables:
tables = default
for conf_file in tables.split():
files.append(bb.utils.which(bbpath, conf_file))
return (' '.join(files), var, default)
# We parse and rewrite the useradd components
def rewrite_useradd(params, is_pkg):
parser = oe.useradd.build_useradd_parser()
newparams = []
users = None
for param in oe.useradd.split_commands(params):
try:
uaargs = parser.parse_args(oe.useradd.split_args(param))
except Exception as e:
bb.fatal("%s: Unable to parse arguments for USERADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all passwd files specified in USERADD_UID_TABLES or files/passwd
# Use the standard passwd layout:
# username:password:user_id:group_id:comment:home_directory:login_shell
#
# If a field is left blank, the original value will be used. The 'username'
# field is required.
#
# Note: we ignore the password field, as including even the hashed password
# in the useradd command may introduce a security hole. It's assumed that
# all new users get the default ('*' which prevents login) until the user is
# specifically configured by the system admin.
if not users:
files, table_var, table_value = get_table_list(d, 'USERADD_UID_TABLES', 'files/passwd')
users = merge_files(files, 7)
type = 'system user' if uaargs.system else 'normal user'
if uaargs.LOGIN not in users:
handle_missing_id(uaargs.LOGIN, type, pkg, files, table_var, table_value)
newparams.append(param)
continue
field = users[uaargs.LOGIN]
if uaargs.uid and field[2] and (uaargs.uid != field[2]):
bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN'), uaargs.LOGIN, uaargs.uid, field[2]))
uaargs.uid = field[2] or uaargs.uid
# Determine the possible groupname
# Unless the group name (or gid) is specified, we assume that the LOGIN is the groupname
#
# By default the system has creation of the matching groups enabled
# So if the implicit username-group creation is on, then the implicit groupname (LOGIN)
# is used, and we disable the user_group option.
#
if uaargs.gid:
uaargs.groupname = uaargs.gid
elif uaargs.user_group is not False:
uaargs.groupname = uaargs.LOGIN
else:
uaargs.groupname = 'users'
uaargs.groupid = field[3] or uaargs.groupname
if uaargs.groupid and uaargs.gid != uaargs.groupid:
newgroup = None
if not uaargs.groupid.isdigit():
# We don't have a group number, so we have to add a name
bb.debug(1, "Adding group %s!" % uaargs.groupid)
newgroup = "%s %s" % (' --system' if uaargs.system else '', uaargs.groupid)
elif uaargs.groupname and not uaargs.groupname.isdigit():
# We have a group name and a group number to assign it to
bb.debug(1, "Adding group %s (gid %s)!" % (uaargs.groupname, uaargs.groupid))
newgroup = "-g %s %s" % (uaargs.groupid, uaargs.groupname)
else:
# We want to add a group, but we don't know it's name... so we can't add the group...
# We have to assume the group has previously been added or we'll fail on the adduser...
# Note: specifying the actual gid is very rare in OE, usually the group name is specified.
bb.warn("%s: Changing gid for login %s to %s, verify configuration files!" % (d.getVar('PN'), uaargs.LOGIN, uaargs.groupid))
uaargs.gid = uaargs.groupid
uaargs.user_group = None
if newgroup and is_pkg:
groupadd = d.getVar("GROUPADD_PARAM:%s" % pkg)
if groupadd:
# Only add the group if not already specified
if not uaargs.groupname in groupadd:
d.setVar("GROUPADD_PARAM:%s" % pkg, "%s; %s" % (groupadd, newgroup))
else:
d.setVar("GROUPADD_PARAM:%s" % pkg, newgroup)
uaargs.comment = "'%s'" % field[4] if field[4] else uaargs.comment
uaargs.home_dir = field[5] or uaargs.home_dir
uaargs.shell = field[6] or uaargs.shell
# Should be an error if a specific option is set...
if not uaargs.uid or not uaargs.uid.isdigit() or not uaargs.gid:
handle_missing_id(uaargs.LOGIN, type, pkg, files, table_var, table_value)
# Reconstruct the args...
newparam = ['', ' --defaults'][uaargs.defaults]
newparam += ['', ' --base-dir %s' % uaargs.base_dir][uaargs.base_dir != None]
newparam += ['', ' --comment %s' % uaargs.comment][uaargs.comment != None]
newparam += ['', ' --home-dir %s' % uaargs.home_dir][uaargs.home_dir != None]
newparam += ['', ' --expiredate %s' % uaargs.expiredate][uaargs.expiredate != None]
newparam += ['', ' --inactive %s' % uaargs.inactive][uaargs.inactive != None]
newparam += ['', ' --gid %s' % uaargs.gid][uaargs.gid != None]
newparam += ['', ' --groups %s' % uaargs.groups][uaargs.groups != None]
newparam += ['', ' --skel %s' % uaargs.skel][uaargs.skel != None]
newparam += ['', ' --key %s' % uaargs.key][uaargs.key != None]
newparam += ['', ' --no-log-init'][uaargs.no_log_init]
newparam += ['', ' --create-home'][uaargs.create_home is True]
newparam += ['', ' --no-create-home'][uaargs.create_home is False]
newparam += ['', ' --no-user-group'][uaargs.user_group is False]
newparam += ['', ' --non-unique'][uaargs.non_unique]
if uaargs.password != None:
newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
newparam += ['', ' --system'][uaargs.system]
newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
newparam += ['', ' --uid %s' % uaargs.uid][uaargs.uid != None]
newparam += ['', ' --user-group'][uaargs.user_group is True]
newparam += ' %s' % uaargs.LOGIN
newparams.append(newparam)
return ";".join(newparams).strip()
# We parse and rewrite the groupadd components
def rewrite_groupadd(params, is_pkg):
parser = oe.useradd.build_groupadd_parser()
newparams = []
groups = None
for param in oe.useradd.split_commands(params):
try:
# If we're processing multiple lines, we could have left over values here...
gaargs = parser.parse_args(oe.useradd.split_args(param))
except Exception as e:
bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all group files specified in USERADD_GID_TABLES or files/group
# Use the standard group layout:
# groupname:password:group_id:group_members
#
# If a field is left blank, the original value will be used. The 'groupname' field
# is required.
#
# Note: similar to the passwd file, the 'password' filed is ignored
# Note: group_members is ignored, group members must be configured with the GROUPMEMS_PARAM
if not groups:
files, table_var, table_value = get_table_list(d, 'USERADD_GID_TABLES', 'files/group')
groups = merge_files(files, 4)
type = 'system group' if gaargs.system else 'normal group'
if gaargs.GROUP not in groups:
handle_missing_id(gaargs.GROUP, type, pkg, files, table_var, table_value)
newparams.append(param)
continue
field = groups[gaargs.GROUP]
if field[2]:
if gaargs.gid and (gaargs.gid != field[2]):
bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN'), gaargs.GROUP, gaargs.gid, field[2]))
gaargs.gid = field[2]
if not gaargs.gid or not gaargs.gid.isdigit():
handle_missing_id(gaargs.GROUP, type, pkg, files, table_var, table_value)
# Reconstruct the args...
newparam = ['', ' --force'][gaargs.force]
newparam += ['', ' --gid %s' % gaargs.gid][gaargs.gid != None]
newparam += ['', ' --key %s' % gaargs.key][gaargs.key != None]
newparam += ['', ' --non-unique'][gaargs.non_unique]
if gaargs.password != None:
newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None]
newparam += ['', ' --system'][gaargs.system]
newparam += ' %s' % gaargs.GROUP
newparams.append(newparam)
return ";".join(newparams).strip()
# The parsing of the current recipe depends on the content of
# the files listed in USERADD_UID/GID_TABLES. We need to tell bitbake
# about that explicitly to trigger re-parsing and thus re-execution of
# this code when the files change.
bbpath = d.getVar('BBPATH')
for varname, default in (('USERADD_UID_TABLES', 'files/passwd'),
('USERADD_GID_TABLES', 'files/group')):
tables = d.getVar(varname)
if not tables:
tables = default
for conf_file in tables.split():
bb.parse.mark_dependency(d, bb.utils.which(bbpath, conf_file))
# Load and process the users and groups, rewriting the adduser/addgroup params
useradd_packages = d.getVar('USERADD_PACKAGES') or ""
for pkg in useradd_packages.split():
# Groupmems doesn't have anything we might want to change, so simply validating
# is a bit of a waste -- only process useradd/groupadd
useradd_param = d.getVar('USERADD_PARAM:%s' % pkg)
if useradd_param:
#bb.warn("Before: 'USERADD_PARAM:%s' - '%s'" % (pkg, useradd_param))
d.setVar('USERADD_PARAM:%s' % pkg, rewrite_useradd(useradd_param, True))
#bb.warn("After: 'USERADD_PARAM:%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM:%s' % pkg)))
groupadd_param = d.getVar('GROUPADD_PARAM:%s' % pkg)
if groupadd_param:
#bb.warn("Before: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, groupadd_param))
d.setVar('GROUPADD_PARAM:%s' % pkg, rewrite_groupadd(groupadd_param, True))
#bb.warn("After: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM:%s' % pkg)))
# Load and process extra users and groups, rewriting only adduser/addgroup params
pkg = d.getVar('PN')
extrausers = d.getVar('EXTRA_USERS_PARAMS') or ""
#bb.warn("Before: 'EXTRA_USERS_PARAMS' - '%s'" % (d.getVar('EXTRA_USERS_PARAMS')))
new_extrausers = []
for cmd in oe.useradd.split_commands(extrausers):
if re.match('''useradd (.*)''', cmd):
useradd_param = re.match('''useradd (.*)''', cmd).group(1)
useradd_param = rewrite_useradd(useradd_param, False)
cmd = 'useradd %s' % useradd_param
elif re.match('''groupadd (.*)''', cmd):
groupadd_param = re.match('''groupadd (.*)''', cmd).group(1)
groupadd_param = rewrite_groupadd(groupadd_param, False)
cmd = 'groupadd %s' % groupadd_param
new_extrausers.append(cmd)
new_extrausers.append('')
d.setVar('EXTRA_USERS_PARAMS', ';'.join(new_extrausers))
#bb.warn("After: 'EXTRA_USERS_PARAMS' - '%s'" % (d.getVar('EXTRA_USERS_PARAMS')))
python __anonymous() {
if not bb.data.inherits_class('nativesdk', d) \
and not bb.data.inherits_class('native', d):
try:
update_useradd_static_config(d)
except NotImplementedError as f:
bb.debug(1, "Skipping recipe %s: %s" % (d.getVar('PN'), f))
raise bb.parse.SkipRecipe(f)
}

View File

@@ -0,0 +1,290 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
inherit useradd_base
# base-passwd-cross provides the default passwd and group files in the
# target sysroot, and shadow -native and -sysroot provide the utilities
# and support files needed to add and modify user and group accounts
DEPENDS:append:class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
PACKAGE_WRITE_DEPS += "shadow-native"
# This preinstall function can be run in four different contexts:
#
# a) Before do_install
# b) At do_populate_sysroot_setscene when installing from sstate packages
# c) As the preinst script in the target package at do_rootfs time
# d) As the preinst script in the target package on device as a package upgrade
#
useradd_preinst () {
OPT=""
SYSROOT=""
if test "x$D" != "x"; then
# Installing into a sysroot
SYSROOT="$D"
OPT="--root $D"
# Make sure login.defs is there, this is to make debian package backend work
# correctly while doing rootfs.
# The problem here is that if /etc/login.defs is treated as a config file for
# shadow package, then while performing preinsts for packages that depend on
# shadow, there might only be /etc/login.def.dpkg-new there in root filesystem.
if [ ! -e $D${sysconfdir}/login.defs -a -e $D${sysconfdir}/login.defs.dpkg-new ]; then
cp $D${sysconfdir}/login.defs.dpkg-new $D${sysconfdir}/login.defs
fi
# user/group lookups should match useradd/groupadd --root
export PSEUDO_PASSWD="$SYSROOT"
fi
# If we're not doing a special SSTATE/SYSROOT install
# then set the values, otherwise use the environment
if test "x$UA_SYSROOT" = "x"; then
# Installing onto a target
# Add groups and users defined only for this package
GROUPADD_PARAM="${GROUPADD_PARAM}"
USERADD_PARAM="${USERADD_PARAM}"
GROUPMEMS_PARAM="${GROUPMEMS_PARAM}"
fi
# Perform group additions first, since user additions may depend
# on these groups existing
if test "x`echo $GROUPADD_PARAM | tr -d '[:space:]'`" != "x"; then
echo "Running groupadd commands..."
# Invoke multiple instances of groupadd for parameter lists
# separated by ';'
opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
while test "x$opts" != "x"; do
perform_groupadd "$SYSROOT" "$OPT $opts"
if test "x$opts" = "x$remaining"; then
break
fi
opts=`echo "$remaining" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
remaining=`echo "$remaining" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
done
fi
if test "x`echo $USERADD_PARAM | tr -d '[:space:]'`" != "x"; then
echo "Running useradd commands..."
# Invoke multiple instances of useradd for parameter lists
# separated by ';'
opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
while test "x$opts" != "x"; do
perform_useradd "$SYSROOT" "$OPT $opts"
if test "x$opts" = "x$remaining"; then
break
fi
opts=`echo "$remaining" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
remaining=`echo "$remaining" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
done
fi
if test "x`echo $GROUPMEMS_PARAM | tr -d '[:space:]'`" != "x"; then
echo "Running groupmems commands..."
# Invoke multiple instances of groupmems for parameter lists
# separated by ';'
opts=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
remaining=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
while test "x$opts" != "x"; do
perform_groupmems "$SYSROOT" "$OPT $opts"
if test "x$opts" = "x$remaining"; then
break
fi
opts=`echo "$remaining" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
remaining=`echo "$remaining" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
done
fi
}
useradd_sysroot () {
user_group_groupmems_add_sysroot user
}
groupadd_sysroot () {
user_group_groupmems_add_sysroot group
}
groupmemsadd_sysroot () {
user_group_groupmems_add_sysroot groupmems
}
user_group_groupmems_add_sysroot () {
# Pseudo may (do_prepare_recipe_sysroot) or may not (do_populate_sysroot_setscene) be running
# at this point so we're explicit about the environment so pseudo can load if
# not already present.
# PSEUDO_SYSROOT can contain references to the build architecture and COMPONENT_DIR
# so needs the STAGING_FIXME below
export PSEUDO="${FAKEROOTENV} ${PSEUDO_SYSROOT}${bindir_native}/pseudo"
# Explicitly set $D since it isn't set to anything
# before do_prepare_recipe_sysroot
D=${STAGING_DIR_TARGET}
# base-passwd's postinst may not have run yet in which case we'll get called later, just exit.
# Beware that in some cases we might see the fake pseudo passwd here, in which case we also must
# exit.
if [ ! -f $D${sysconfdir}/passwd ] ||
grep -q this-is-the-pseudo-passwd $D${sysconfdir}/passwd; then
exit 0
fi
# It is also possible we may be in a recipe which doesn't have useradd dependencies and hence the
# useradd/groupadd tools are unavailable. If there is no dependency, we assume we don't want to
# create users in the sysroot
if ! command -v useradd; then
bbwarn "command useradd not found!"
exit 0
fi
# Add groups and users defined for all recipe packages
if test "$1" = "group"; then
GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
elif test "$1" = "user"; then
USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
elif test "$1" = "groupmems"; then
GROUPMEMS_PARAM="${@get_all_cmd_params(d, 'groupmems')}"
elif test "x$1" = "x"; then
bbwarn "missing type of passwd db action"
fi
# Tell the system to use the environment vars
UA_SYSROOT=1
useradd_preinst
}
# The export of PSEUDO in useradd_sysroot() above contains references to
# ${PSEUDO_SYSROOT} and ${PSEUDO_LOCALSTATEDIR}. Additionally, the logging
# shell functions use ${LOGFIFO}. These need to be handled when restoring
# postinst-useradd-${PN} from the sstate cache.
EXTRA_STAGING_FIXMES += "PSEUDO_SYSROOT PSEUDO_LOCALSTATEDIR LOGFIFO"
python useradd_sysroot_sstate () {
for type, sort_prefix in [("group", "01"), ("user", "02"), ("groupmems", "03")]:
scriptfile = None
task = d.getVar("BB_CURRENTTASK")
if task == "package_setscene":
bb.build.exec_func(type + "add_sysroot", d)
elif task == "prepare_recipe_sysroot":
# Used to update this recipe's own sysroot so the user/groups are available to do_install
# If do_populate_sysroot is triggered and we write the file here, there would be an overlapping
# files. See usergrouptests.UserGroupTests.test_add_task_between_p_sysroot_and_package
scriptfile = d.expand("${RECIPE_SYSROOT}${bindir}/postinst-useradd-" + sort_prefix + type + "-${PN}-recipedebug")
bb.build.exec_func(type + "add_sysroot", d)
elif task == "populate_sysroot":
# Used when installed in dependent task sysroots
scriptfile = d.expand("${SYSROOT_DESTDIR}${bindir}/postinst-useradd-" + sort_prefix + type + "-${PN}")
if scriptfile:
bb.utils.mkdirhier(os.path.dirname(scriptfile))
with open(scriptfile, 'w') as script:
script.write("#!/bin/sh -e\n")
bb.data.emit_func(type + "add_sysroot", script, d)
script.write(type + "add_sysroot\n")
os.chmod(scriptfile, 0o755)
}
do_prepare_recipe_sysroot[postfuncs] += "${SYSROOTFUNC}"
SYSROOTFUNC:class-target = "useradd_sysroot_sstate"
SYSROOTFUNC = ""
SYSROOT_PREPROCESS_FUNCS += "${SYSROOTFUNC}"
SSTATEPREINSTFUNCS:append:class-target = " useradd_sysroot_sstate"
USERADD_DEPENDS ??= ""
DEPENDS += "${USERADD_DEPENDS}"
do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}"
USERADDSETSCENEDEPS:class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene ${@' '.join(['%s:do_populate_sysroot_setscene' % pkg for pkg in d.getVar("USERADD_DEPENDS").split()])}"
USERADDSETSCENEDEPS = ""
# Recipe parse-time sanity checks
def update_useradd_after_parse(d):
useradd_packages = d.getVar('USERADD_PACKAGES')
if not useradd_packages:
bb.fatal("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
for pkg in useradd_packages.split():
d.appendVarFlag("do_populate_sysroot", "vardeps", " USERADD_PARAM:%s GROUPADD_PARAM:%s GROUPMEMS_PARAM:%s" % (pkg, pkg, pkg))
if not d.getVar('USERADD_PARAM:%s' % pkg) and not d.getVar('GROUPADD_PARAM:%s' % pkg) and not d.getVar('GROUPMEMS_PARAM:%s' % pkg):
bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE', False), pkg))
python __anonymous() {
if not bb.data.inherits_class('nativesdk', d) \
and not bb.data.inherits_class('native', d):
update_useradd_after_parse(d)
}
# Return a single [GROUP|USER]ADD_PARAM formatted string which includes the
# [group|user]add parameters for all USERADD_PACKAGES in this recipe
def get_all_cmd_params(d, cmd_type):
import string
param_type = cmd_type.upper() + "_PARAM:%s"
params = []
useradd_packages = d.getVar('USERADD_PACKAGES') or ""
for pkg in useradd_packages.split():
param = d.getVar(param_type % pkg)
if param:
params.append(param.rstrip(" ;"))
return "; ".join(params)
# Adds the preinst script into generated packages
fakeroot python populate_packages:prepend () {
def update_useradd_package(pkg):
bb.debug(1, 'adding user/group calls to preinst for %s' % pkg)
"""
useradd preinst is appended here because pkg_preinst may be
required to execute on the target. Not doing so may cause
useradd preinst to be invoked twice, causing unwanted warnings.
"""
preinst = d.getVar('pkg_preinst:%s' % pkg) or d.getVar('pkg_preinst')
if not preinst:
preinst = '#!/bin/sh\n'
preinst += 'bbnote () {\n\techo "NOTE: $*"\n}\n'
preinst += 'bbwarn () {\n\techo "WARNING: $*"\n}\n'
preinst += 'bbfatal () {\n\techo "ERROR: $*"\n\texit 1\n}\n'
preinst += 'perform_groupadd () {\n%s}\n' % d.getVar('perform_groupadd')
preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd')
preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems')
preinst += d.getVar('useradd_preinst')
# Expand out the *_PARAM variables to the package specific versions
for rep in ["GROUPADD_PARAM", "USERADD_PARAM", "GROUPMEMS_PARAM"]:
val = d.getVar(rep + ":" + pkg) or ""
preinst = preinst.replace("${" + rep + "}", val)
d.setVar('pkg_preinst:%s' % pkg, preinst)
# RDEPENDS setup
rdepends = d.getVar("RDEPENDS:%s" % pkg) or ""
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-passwd'
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'shadow'
# base-files is where the default /etc/skel is packaged
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-files'
d.setVar("RDEPENDS:%s" % pkg, rdepends)
# Add the user/group preinstall scripts and RDEPENDS requirements
# to packages specified by USERADD_PACKAGES
if not bb.data.inherits_class('nativesdk', d) \
and not bb.data.inherits_class('native', d):
useradd_packages = d.getVar('USERADD_PACKAGES') or ""
for pkg in useradd_packages.split():
update_useradd_package(pkg)
}
# Use the following to extend the useradd with custom functions
USERADDEXTENSION ?= ""
inherit_defer ${USERADDEXTENSION}

View File

@@ -0,0 +1,171 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# This bbclass provides basic functionality for user/group settings.
# This bbclass is intended to be inherited by useradd.bbclass and
# extrausers.bbclass.
# The following functions basically have similar logic.
# *) Perform necessary checks before invoking the actual command
# *) Invoke the actual command with flock
# *) Error out if an error occurs.
# Note that before invoking these functions, make sure the global variable
# PSEUDO is set up correctly.
perform_groupadd () {
local rootdir="$1"
local opts="$2"
bbnote "${PN}: Performing groupadd with [$opts]"
local groupname=`echo "$opts" | awk '{ print $NF }'`
local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
if test "x$group_exists" = "x"; then
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupadd \$opts\" || true
group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
if test "x$group_exists" = "x"; then
bbfatal "${PN}: groupadd command did not succeed."
fi
else
bbnote "${PN}: group $groupname already exists, not re-creating it"
fi
}
perform_useradd () {
local rootdir="$1"
local opts="$2"
bbnote "${PN}: Performing useradd with [$opts]"
local username=`echo "$opts" | awk '{ print $NF }'`
local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
if test "x$user_exists" = "x"; then
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO useradd \$opts\" || true
user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
if test "x$user_exists" = "x"; then
bbfatal "${PN}: useradd command did not succeed."
fi
else
bbnote "${PN}: user $username already exists, not re-creating it"
fi
}
perform_groupmems () {
local rootdir="$1"
local opts="$2"
bbnote "${PN}: Performing groupmems with [$opts]"
local groupname=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-g" || $i == "--group") print $(i+1) }'`
local username=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-a" || $i == "--add") print $(i+1) }'`
bbnote "${PN}: Running groupmems command with group $groupname and user $username"
local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*$" $rootdir/etc/group || true`"
if test "x$mem_exists" = "x"; then
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupmems \$opts\" || true
mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*$" $rootdir/etc/group || true`"
if test "x$mem_exists" = "x"; then
bbfatal "${PN}: groupmems command did not succeed."
fi
else
bbnote "${PN}: group $groupname already contains $username, not re-adding it"
fi
}
perform_groupdel () {
local rootdir="$1"
local opts="$2"
bbnote "${PN}: Performing groupdel with [$opts]"
local groupname=`echo "$opts" | awk '{ print $NF }'`
local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
if test "x$group_exists" != "x"; then
local awk_input='BEGIN {FS=":"}; $1=="'$groupname'" { print $3 }'
local groupid=`echo "$awk_input" | awk -f- $rootdir/etc/group`
local awk_check_users='BEGIN {FS=":"}; $4=="'$groupid'" {print $1}'
local other_users=`echo "$awk_check_users" | awk -f- $rootdir/etc/passwd`
if test "x$other_users" = "x"; then
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupdel \$opts\" || true
group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
if test "x$group_exists" != "x"; then
bbfatal "${PN}: groupdel command did not succeed."
fi
else
bbnote "${PN}: '$groupname' is primary group for users '$other_users', not removing it"
fi
else
bbnote "${PN}: group $groupname doesn't exist, not removing it"
fi
}
perform_userdel () {
local rootdir="$1"
local opts="$2"
bbnote "${PN}: Performing userdel with [$opts]"
local username=`echo "$opts" | awk '{ print $NF }'`
local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
if test "x$user_exists" != "x"; then
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO userdel \$opts\" || true
user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
if test "x$user_exists" != "x"; then
bbfatal "${PN}: userdel command did not succeed."
fi
else
bbnote "${PN}: user $username doesn't exist, not removing it"
fi
}
perform_groupmod () {
# Other than the return value of groupmod, there's no simple way to judge whether the command
# succeeds, so we disable -e option temporarily
set +e
local rootdir="$1"
local opts="$2"
bbnote "${PN}: Performing groupmod with [$opts]"
local groupname=`echo "$opts" | awk '{ print $NF }'`
local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
if test "x$group_exists" != "x"; then
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupmod \$opts\"
if test $? != 0; then
bbwarn "${PN}: groupmod command did not succeed."
fi
else
bbwarn "${PN}: group $groupname doesn't exist, unable to modify it"
fi
set -e
}
perform_usermod () {
# Same reason with groupmod, temporarily disable -e option
set +e
local rootdir="$1"
local opts="$2"
bbnote "${PN}: Performing usermod with [$opts]"
local username=`echo "$opts" | awk '{ print $NF }'`
local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
if test "x$user_exists" != "x"; then
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO usermod \$opts\"
if test $? != 0; then
bbfatal "${PN}: usermod command did not succeed."
fi
else
bbwarn "${PN}: user $username doesn't exist, unable to modify it"
fi
set -e
}
perform_passwd_expire () {
local rootdir="$1"
local opts="$2"
bbnote "${PN}: Performing equivalent of passwd --expire with [$opts]"
# Directly set sp_lstchg to 0 without using the passwd command: Only root can do that
local username=`echo "$opts" | awk '{ print $NF }'`
local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
if test "x$user_exists" != "x"; then
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO sed --follow-symlinks -i \''s/^\('$username':[^:]*\):[^:]*:/\1:0:/'\' $rootdir/etc/shadow \" || true
local passwd_lastchanged="`grep "^$username:" $rootdir/etc/shadow | cut -d: -f3`"
if test "x$passwd_lastchanged" != "x0"; then
bbfatal "${PN}: passwd --expire operation did not succeed."
fi
else
bbnote "${PN}: user $username doesn't exist, not expiring its password"
fi
}

View File

@@ -0,0 +1,22 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
#
# This class is used by yocto-check-layer script for additional per-recipe tests
# The first test ensures that the layer has no recipes skipping 'installed-vs-shipped' QA checks
#
WARN_QA:remove = "installed-vs-shipped"
ERROR_QA:append = " installed-vs-shipped"
python () {
packages = set((d.getVar('PACKAGES') or '').split())
for package in packages:
skip = set((d.getVar('INSANE_SKIP') or "").split() +
(d.getVar('INSANE_SKIP:' + package) or "").split())
if 'installed-vs-shipped' in skip:
oe.qa.handle_error("installed-vs-shipped", 'Package %s is skipping "installed-vs-shipped" QA test.' % package, d)
}