Complete Yocto mirror with license table for TQMa6UL (2038-compliance)
- 264 license table entries with exact download URLs (224/264 resolved) - Complete sources/ directory with all BitBake recipes - Build configuration: tqma6ul-multi-mba6ulx, spaetzle (musl) - Full traceability for Softwarefreigabeantrag - GCC 13.4.0, Linux 6.6.102, U-Boot 2023.04, musl 1.2.4 - License distribution: GPL-2.0 (24), MIT (23), GPL-2.0+ (18), BSD-3 (16)
This commit is contained in:
749
sources/poky/meta/classes-global/base.bbclass
Normal file
749
sources/poky/meta/classes-global/base.bbclass
Normal file
@@ -0,0 +1,749 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
BB_DEFAULT_TASK ?= "build"
|
||||
CLASSOVERRIDE ?= "class-target"
|
||||
|
||||
inherit patch
|
||||
inherit staging
|
||||
|
||||
inherit mirrors
|
||||
inherit utils
|
||||
inherit utility-tasks
|
||||
inherit logging
|
||||
|
||||
PACKAGECONFIG_CONFARGS ??= ""
|
||||
|
||||
inherit metadata_scm
|
||||
|
||||
def lsb_distro_identifier(d):
|
||||
adjust = d.getVar('LSB_DISTRO_ADJUST')
|
||||
adjust_func = None
|
||||
if adjust:
|
||||
try:
|
||||
adjust_func = globals()[adjust]
|
||||
except KeyError:
|
||||
pass
|
||||
return oe.lsb.distro_identifier(adjust_func)
|
||||
|
||||
die() {
|
||||
bbfatal_log "$*"
|
||||
}
|
||||
|
||||
oe_runmake_call() {
|
||||
bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
|
||||
${MAKE} ${EXTRA_OEMAKE} "$@"
|
||||
}
|
||||
|
||||
oe_runmake() {
|
||||
oe_runmake_call "$@" || die "oe_runmake failed"
|
||||
}
|
||||
|
||||
|
||||
def get_base_dep(d):
|
||||
if d.getVar('INHIBIT_DEFAULT_DEPS', False):
|
||||
return ""
|
||||
return "${BASE_DEFAULT_DEPS}"
|
||||
|
||||
BASE_DEFAULT_DEPS = "virtual/${HOST_PREFIX}gcc virtual/${HOST_PREFIX}compilerlibs virtual/libc"
|
||||
|
||||
BASEDEPENDS = ""
|
||||
BASEDEPENDS:class-target = "${@get_base_dep(d)}"
|
||||
BASEDEPENDS:class-nativesdk = "${@get_base_dep(d)}"
|
||||
|
||||
DEPENDS:prepend="${BASEDEPENDS} "
|
||||
|
||||
FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
|
||||
# THISDIR only works properly with imediate expansion as it has to run
|
||||
# in the context of the location its used (:=)
|
||||
THISDIR = "${@os.path.dirname(d.getVar('FILE'))}"
|
||||
|
||||
def extra_path_elements(d):
|
||||
path = ""
|
||||
elements = (d.getVar('EXTRANATIVEPATH') or "").split()
|
||||
for e in elements:
|
||||
path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
|
||||
return path
|
||||
|
||||
PATH:prepend = "${@extra_path_elements(d)}"
|
||||
|
||||
def get_lic_checksum_file_list(d):
|
||||
filelist = []
|
||||
lic_files = d.getVar("LIC_FILES_CHKSUM") or ''
|
||||
tmpdir = d.getVar("TMPDIR")
|
||||
s = d.getVar("S")
|
||||
b = d.getVar("B")
|
||||
workdir = d.getVar("WORKDIR")
|
||||
|
||||
urls = lic_files.split()
|
||||
for url in urls:
|
||||
# We only care about items that are absolute paths since
|
||||
# any others should be covered by SRC_URI.
|
||||
try:
|
||||
(method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
|
||||
if method != "file" or not path:
|
||||
raise bb.fetch.MalformedUrl(url)
|
||||
|
||||
if path[0] == '/':
|
||||
if path.startswith((tmpdir, s, b, workdir)):
|
||||
continue
|
||||
filelist.append(path + ":" + str(os.path.exists(path)))
|
||||
except bb.fetch.MalformedUrl:
|
||||
bb.fatal(d.getVar('PN') + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
|
||||
return " ".join(filelist)
|
||||
|
||||
def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
|
||||
tools = d.getVar(toolsvar).split()
|
||||
origbbenv = d.getVar("BB_ORIGENV", False)
|
||||
path = origbbenv.getVar("PATH")
|
||||
# Need to ignore our own scripts directories to avoid circular links
|
||||
for p in path.split(":"):
|
||||
if p.endswith("/scripts"):
|
||||
path = path.replace(p, "/ignoreme")
|
||||
bb.utils.mkdirhier(dest)
|
||||
notfound = []
|
||||
for tool in tools:
|
||||
desttool = os.path.join(dest, tool)
|
||||
if not os.path.exists(desttool):
|
||||
# clean up dead symlink
|
||||
if os.path.islink(desttool):
|
||||
os.unlink(desttool)
|
||||
srctool = bb.utils.which(path, tool, executable=True)
|
||||
# gcc/g++ may link to ccache on some hosts, e.g.,
|
||||
# /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
|
||||
# would return /usr/local/bin/ccache/gcc, but what we need is
|
||||
# /usr/bin/gcc, this code can check and fix that.
|
||||
if os.path.islink(srctool) and os.path.basename(os.readlink(srctool)) == 'ccache':
|
||||
srctool = bb.utils.which(path, tool, executable=True, direction=1)
|
||||
if srctool:
|
||||
os.symlink(srctool, desttool)
|
||||
else:
|
||||
notfound.append(tool)
|
||||
|
||||
if notfound and fatal:
|
||||
bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound))
|
||||
|
||||
# We can't use vardepvalue against do_fetch directly since that would overwrite
|
||||
# the other task dependencies so we use an indirect function.
|
||||
python fetcher_hashes_dummyfunc() {
|
||||
return
|
||||
}
|
||||
fetcher_hashes_dummyfunc[vardepvalue] = "${@bb.fetch.get_hashvalue(d)}"
|
||||
|
||||
addtask fetch
|
||||
do_fetch[dirs] = "${DL_DIR}"
|
||||
do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
|
||||
do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
|
||||
do_fetch[prefuncs] += "fetcher_hashes_dummyfunc"
|
||||
do_fetch[network] = "1"
|
||||
python base_do_fetch() {
|
||||
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
if not src_uri:
|
||||
return
|
||||
|
||||
try:
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
fetcher.download()
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
bb.fatal("Bitbake Fetcher Error: " + repr(e))
|
||||
}
|
||||
|
||||
addtask unpack after do_fetch
|
||||
do_unpack[dirs] = "${WORKDIR}"
|
||||
|
||||
do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != os.path.normpath(d.getVar('WORKDIR')) else os.path.join('${S}', 'patches')}"
|
||||
|
||||
python base_do_unpack() {
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
if not src_uri:
|
||||
return
|
||||
|
||||
try:
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
fetcher.unpack(d.getVar('WORKDIR'))
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
bb.fatal("Bitbake Fetcher Error: " + repr(e))
|
||||
}
|
||||
|
||||
SSTATETASKS += "do_deploy_source_date_epoch"
|
||||
|
||||
do_deploy_source_date_epoch () {
|
||||
mkdir -p ${SDE_DEPLOYDIR}
|
||||
if [ -e ${SDE_FILE} ]; then
|
||||
echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
|
||||
cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
|
||||
else
|
||||
echo "${SDE_FILE} not found!"
|
||||
fi
|
||||
}
|
||||
|
||||
python do_deploy_source_date_epoch_setscene () {
|
||||
sstate_setscene(d)
|
||||
bb.utils.mkdirhier(d.getVar('SDE_DIR'))
|
||||
sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
|
||||
if os.path.exists(sde_file):
|
||||
target = d.getVar('SDE_FILE')
|
||||
bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
|
||||
bb.utils.rename(sde_file, target)
|
||||
else:
|
||||
bb.debug(1, "%s not found!" % sde_file)
|
||||
}
|
||||
|
||||
do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
|
||||
do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
|
||||
addtask do_deploy_source_date_epoch_setscene
|
||||
addtask do_deploy_source_date_epoch before do_configure after do_patch
|
||||
|
||||
python create_source_date_epoch_stamp() {
|
||||
# Version: 1
|
||||
source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
|
||||
oe.reproducible.epochfile_write(source_date_epoch, d.getVar('SDE_FILE'), d)
|
||||
}
|
||||
do_unpack[postfuncs] += "create_source_date_epoch_stamp"
|
||||
|
||||
def get_source_date_epoch_value(d):
|
||||
return oe.reproducible.epochfile_read(d.getVar('SDE_FILE'), d)
|
||||
|
||||
def get_layers_branch_rev(d):
|
||||
revisions = oe.buildcfg.get_layer_revisions(d)
|
||||
layers_branch_rev = ["%-20s = \"%s:%s\"" % (r[1], r[2], r[3]) for r in revisions]
|
||||
i = len(layers_branch_rev)-1
|
||||
p1 = layers_branch_rev[i].find("=")
|
||||
s1 = layers_branch_rev[i][p1:]
|
||||
while i > 0:
|
||||
p2 = layers_branch_rev[i-1].find("=")
|
||||
s2= layers_branch_rev[i-1][p2:]
|
||||
if s1 == s2:
|
||||
layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
|
||||
i -= 1
|
||||
else:
|
||||
i -= 1
|
||||
p1 = layers_branch_rev[i].find("=")
|
||||
s1= layers_branch_rev[i][p1:]
|
||||
return layers_branch_rev
|
||||
|
||||
|
||||
BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
|
||||
BUILDCFG_FUNCS[type] = "list"
|
||||
|
||||
def buildcfg_vars(d):
|
||||
statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
|
||||
for var in statusvars:
|
||||
value = d.getVar(var)
|
||||
if value is not None:
|
||||
yield '%-20s = "%s"' % (var, value)
|
||||
|
||||
def buildcfg_neededvars(d):
|
||||
needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
|
||||
pesteruser = []
|
||||
for v in needed_vars:
|
||||
val = d.getVar(v)
|
||||
if not val or val == 'INVALID':
|
||||
pesteruser.append(v)
|
||||
|
||||
if pesteruser:
|
||||
bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
|
||||
|
||||
addhandler base_eventhandler
|
||||
base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.event.RecipeParsed"
|
||||
python base_eventhandler() {
|
||||
import bb.runqueue
|
||||
|
||||
if isinstance(e, bb.event.ConfigParsed):
|
||||
if not d.getVar("NATIVELSBSTRING", False):
|
||||
d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
|
||||
d.setVar("ORIGNATIVELSBSTRING", d.getVar("NATIVELSBSTRING", False))
|
||||
d.setVar('BB_VERSION', bb.__version__)
|
||||
|
||||
# There might be no bb.event.ConfigParsed event if bitbake server is
|
||||
# running, so check bb.event.BuildStarted too to make sure ${HOSTTOOLS_DIR}
|
||||
# exists.
|
||||
if isinstance(e, bb.event.ConfigParsed) or \
|
||||
(isinstance(e, bb.event.BuildStarted) and not os.path.exists(d.getVar('HOSTTOOLS_DIR'))):
|
||||
# Works with the line in layer.conf which changes PATH to point here
|
||||
setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
|
||||
setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
|
||||
|
||||
if isinstance(e, bb.event.MultiConfigParsed):
|
||||
# We need to expand SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS in each of the multiconfig data stores
|
||||
# own contexts so the variables get expanded correctly for that arch, then inject back into
|
||||
# the main data store.
|
||||
deps = []
|
||||
for config in e.mcdata:
|
||||
deps.append(e.mcdata[config].getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS"))
|
||||
deps = " ".join(deps)
|
||||
e.mcdata[''].setVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", deps)
|
||||
|
||||
if isinstance(e, bb.event.BuildStarted):
|
||||
localdata = bb.data.createCopy(d)
|
||||
statuslines = []
|
||||
for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
|
||||
g = globals()
|
||||
if func not in g:
|
||||
bb.warn("Build configuration function '%s' does not exist" % func)
|
||||
else:
|
||||
flines = g[func](localdata)
|
||||
if flines:
|
||||
statuslines.extend(flines)
|
||||
|
||||
statusheader = d.getVar('BUILDCFG_HEADER')
|
||||
if statusheader:
|
||||
bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
|
||||
|
||||
# This code is to silence warnings where the SDK variables overwrite the
|
||||
# target ones and we'd see duplicate key names overwriting each other
|
||||
# for various PREFERRED_PROVIDERS
|
||||
if isinstance(e, bb.event.RecipePreFinalise):
|
||||
if d.getVar("TARGET_PREFIX") == d.getVar("SDK_PREFIX"):
|
||||
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
|
||||
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
|
||||
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
|
||||
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
|
||||
|
||||
if isinstance(e, bb.event.RecipeParsed):
|
||||
#
|
||||
# If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set
|
||||
# skip parsing for all the other providers which will mean they get uninstalled from the
|
||||
# sysroot since they're now "unreachable". This makes switching virtual/kernel work in
|
||||
# particular.
|
||||
#
|
||||
pn = d.getVar('PN')
|
||||
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
|
||||
if not source_mirror_fetch:
|
||||
provs = (d.getVar("PROVIDES") or "").split()
|
||||
multiprovidersallowed = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
|
||||
for p in provs:
|
||||
if p.startswith("virtual/") and p not in multiprovidersallowed:
|
||||
profprov = d.getVar("PREFERRED_PROVIDER_" + p)
|
||||
if profprov and pn != profprov:
|
||||
raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
|
||||
}
|
||||
|
||||
CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
|
||||
CLEANBROKEN = "0"
|
||||
|
||||
addtask configure after do_patch
|
||||
do_configure[dirs] = "${B}"
|
||||
base_do_configure() {
|
||||
if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
|
||||
if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
|
||||
cd ${B}
|
||||
if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
|
||||
oe_runmake clean
|
||||
fi
|
||||
# -ignore_readdir_race does not work correctly with -delete;
|
||||
# use xargs to avoid spurious build failures
|
||||
find ${B} -ignore_readdir_race -name \*.la -type f -print0 | xargs -0 rm -f
|
||||
fi
|
||||
fi
|
||||
if [ -n "${CONFIGURESTAMPFILE}" ]; then
|
||||
mkdir -p `dirname ${CONFIGURESTAMPFILE}`
|
||||
echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
|
||||
fi
|
||||
}
|
||||
|
||||
addtask compile after do_configure
|
||||
do_compile[dirs] = "${B}"
|
||||
base_do_compile() {
|
||||
if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
|
||||
oe_runmake || die "make failed"
|
||||
else
|
||||
bbnote "nothing to compile"
|
||||
fi
|
||||
}
|
||||
|
||||
addtask install after do_compile
|
||||
do_install[dirs] = "${B}"
|
||||
# Remove and re-create ${D} so that it is guaranteed to be empty
|
||||
do_install[cleandirs] = "${D}"
|
||||
|
||||
base_do_install() {
|
||||
:
|
||||
}
|
||||
|
||||
addtask build after do_populate_sysroot
|
||||
do_build[noexec] = "1"
|
||||
do_build[recrdeptask] += "do_deploy"
|
||||
do_build () {
|
||||
:
|
||||
}
|
||||
|
||||
def set_packagetriplet(d):
|
||||
archs = []
|
||||
tos = []
|
||||
tvs = []
|
||||
|
||||
archs.append(d.getVar("PACKAGE_ARCHS").split())
|
||||
tos.append(d.getVar("TARGET_OS"))
|
||||
tvs.append(d.getVar("TARGET_VENDOR"))
|
||||
|
||||
def settriplet(d, varname, archs, tos, tvs):
|
||||
triplets = []
|
||||
for i in range(len(archs)):
|
||||
for arch in archs[i]:
|
||||
triplets.append(arch + tvs[i] + "-" + tos[i])
|
||||
triplets.reverse()
|
||||
d.setVar(varname, " ".join(triplets))
|
||||
|
||||
settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
|
||||
|
||||
variants = d.getVar("MULTILIB_VARIANTS") or ""
|
||||
for item in variants.split():
|
||||
localdata = bb.data.createCopy(d)
|
||||
overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
|
||||
localdata.setVar("OVERRIDES", overrides)
|
||||
|
||||
archs.append(localdata.getVar("PACKAGE_ARCHS").split())
|
||||
tos.append(localdata.getVar("TARGET_OS"))
|
||||
tvs.append(localdata.getVar("TARGET_VENDOR"))
|
||||
|
||||
settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
|
||||
|
||||
python () {
|
||||
import string, re
|
||||
|
||||
# Handle backfilling
|
||||
oe.utils.features_backfill("DISTRO_FEATURES", d)
|
||||
oe.utils.features_backfill("MACHINE_FEATURES", d)
|
||||
|
||||
if d.getVar("S")[-1] == '/':
|
||||
bb.warn("Recipe %s sets S variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("S")))
|
||||
if d.getVar("B")[-1] == '/':
|
||||
bb.warn("Recipe %s sets B variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("B")))
|
||||
|
||||
if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("S")):
|
||||
d.appendVar("PSEUDO_IGNORE_PATHS", ",${S}")
|
||||
if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("B")):
|
||||
d.appendVar("PSEUDO_IGNORE_PATHS", ",${B}")
|
||||
|
||||
# To add a recipe to the skip list , set:
|
||||
# SKIP_RECIPE[pn] = "message"
|
||||
pn = d.getVar('PN')
|
||||
skip_msg = d.getVarFlag('SKIP_RECIPE', pn)
|
||||
if skip_msg:
|
||||
bb.debug(1, "Skipping %s %s" % (pn, skip_msg))
|
||||
raise bb.parse.SkipRecipe("Recipe will be skipped because: %s" % (skip_msg))
|
||||
|
||||
# Handle PACKAGECONFIG
|
||||
#
|
||||
# These take the form:
|
||||
#
|
||||
# PACKAGECONFIG ??= "<default options>"
|
||||
# PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends,foo_conflict_packageconfig"
|
||||
pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
|
||||
if pkgconfigflags:
|
||||
pkgconfig = (d.getVar('PACKAGECONFIG') or "").split()
|
||||
pn = d.getVar("PN")
|
||||
|
||||
mlprefix = d.getVar("MLPREFIX")
|
||||
|
||||
def expandFilter(appends, extension, prefix):
|
||||
appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
|
||||
newappends = []
|
||||
for a in appends:
|
||||
if a.endswith("-native") or ("-cross-" in a):
|
||||
newappends.append(a)
|
||||
elif a.startswith("virtual/"):
|
||||
subs = a.split("/", 1)[1]
|
||||
if subs.startswith(prefix):
|
||||
newappends.append(a + extension)
|
||||
else:
|
||||
newappends.append("virtual/" + prefix + subs + extension)
|
||||
else:
|
||||
if a.startswith(prefix):
|
||||
newappends.append(a + extension)
|
||||
else:
|
||||
newappends.append(prefix + a + extension)
|
||||
return newappends
|
||||
|
||||
def appendVar(varname, appends):
|
||||
if not appends:
|
||||
return
|
||||
if varname.find("DEPENDS") != -1:
|
||||
if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d) :
|
||||
appends = expandFilter(appends, "", "nativesdk-")
|
||||
elif bb.data.inherits_class('native', d):
|
||||
appends = expandFilter(appends, "-native", "")
|
||||
elif mlprefix:
|
||||
appends = expandFilter(appends, "", mlprefix)
|
||||
varname = d.expand(varname)
|
||||
d.appendVar(varname, " " + " ".join(appends))
|
||||
|
||||
extradeps = []
|
||||
extrardeps = []
|
||||
extrarrecs = []
|
||||
extraconf = []
|
||||
for flag, flagval in sorted(pkgconfigflags.items()):
|
||||
items = flagval.split(",")
|
||||
num = len(items)
|
||||
if num > 6:
|
||||
bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend,conflict_packageconfig can be specified!"
|
||||
% (d.getVar('PN'), flag))
|
||||
|
||||
if flag in pkgconfig:
|
||||
if num >= 3 and items[2]:
|
||||
extradeps.append(items[2])
|
||||
if num >= 4 and items[3]:
|
||||
extrardeps.append(items[3])
|
||||
if num >= 5 and items[4]:
|
||||
extrarrecs.append(items[4])
|
||||
if num >= 1 and items[0]:
|
||||
extraconf.append(items[0])
|
||||
elif num >= 2 and items[1]:
|
||||
extraconf.append(items[1])
|
||||
|
||||
if num >= 6 and items[5]:
|
||||
conflicts = set(items[5].split())
|
||||
invalid = conflicts.difference(set(pkgconfigflags.keys()))
|
||||
if invalid:
|
||||
bb.error("%s: PACKAGECONFIG[%s] Invalid conflict package config%s '%s' specified."
|
||||
% (d.getVar('PN'), flag, 's' if len(invalid) > 1 else '', ' '.join(invalid)))
|
||||
|
||||
if flag in pkgconfig:
|
||||
intersec = conflicts.intersection(set(pkgconfig))
|
||||
if intersec:
|
||||
bb.fatal("%s: PACKAGECONFIG[%s] Conflict package config%s '%s' set in PACKAGECONFIG."
|
||||
% (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec)))
|
||||
|
||||
appendVar('DEPENDS', extradeps)
|
||||
appendVar('RDEPENDS:${PN}', extrardeps)
|
||||
appendVar('RRECOMMENDS:${PN}', extrarrecs)
|
||||
appendVar('PACKAGECONFIG_CONFARGS', extraconf)
|
||||
|
||||
pn = d.getVar('PN')
|
||||
license = d.getVar('LICENSE')
|
||||
if license == "INVALID" and pn != "defaultpkgname":
|
||||
bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
|
||||
|
||||
if bb.data.inherits_class('license', d):
|
||||
check_license_format(d)
|
||||
unmatched_license_flags = check_license_flags(d)
|
||||
if unmatched_license_flags:
|
||||
for unmatched in unmatched_license_flags:
|
||||
message = "Has a restricted license '%s' which is not listed in your LICENSE_FLAGS_ACCEPTED." % unmatched
|
||||
details = d.getVarFlag("LICENSE_FLAGS_DETAILS", unmatched)
|
||||
if details:
|
||||
message += "\n" + details
|
||||
bb.debug(1, "Skipping %s: %s" % (pn, message))
|
||||
raise bb.parse.SkipRecipe(message)
|
||||
|
||||
# If we're building a target package we need to use fakeroot (pseudo)
|
||||
# in order to capture permissions, owners, groups and special files
|
||||
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
|
||||
d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
|
||||
d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
|
||||
d.setVarFlag('do_install', 'fakeroot', '1')
|
||||
d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
|
||||
d.setVarFlag('do_package', 'fakeroot', '1')
|
||||
d.setVarFlag('do_package_setscene', 'fakeroot', '1')
|
||||
d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
|
||||
d.setVarFlag('do_devshell', 'fakeroot', '1')
|
||||
d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
|
||||
|
||||
need_machine = d.getVar('COMPATIBLE_MACHINE')
|
||||
if need_machine and not d.getVar('PARSE_ALL_RECIPES', False):
|
||||
import re
|
||||
compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":")
|
||||
for m in compat_machines:
|
||||
if re.match(need_machine, m):
|
||||
break
|
||||
else:
|
||||
raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
|
||||
|
||||
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False) or d.getVar('PARSE_ALL_RECIPES', False)
|
||||
if not source_mirror_fetch:
|
||||
need_host = d.getVar('COMPATIBLE_HOST')
|
||||
if need_host:
|
||||
import re
|
||||
this_host = d.getVar('HOST_SYS')
|
||||
if not re.match(need_host, this_host):
|
||||
raise bb.parse.SkipRecipe("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
|
||||
|
||||
bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
|
||||
|
||||
check_license = False if pn.startswith("nativesdk-") else True
|
||||
for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}",
|
||||
"-crosssdk-${SDK_SYS}", "-crosssdk-initial-${SDK_SYS}",
|
||||
"-cross-canadian-${TRANSLATED_TARGET_ARCH}"]:
|
||||
if pn.endswith(d.expand(t)):
|
||||
check_license = False
|
||||
if pn.startswith("gcc-source-"):
|
||||
check_license = False
|
||||
|
||||
if check_license and bad_licenses:
|
||||
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
|
||||
|
||||
exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
|
||||
|
||||
for lic_exception in exceptions:
|
||||
if ":" in lic_exception:
|
||||
lic_exception = lic_exception.split(":")[1]
|
||||
if lic_exception in oe.license.obsolete_license_list():
|
||||
bb.fatal("Obsolete license %s used in INCOMPATIBLE_LICENSE_EXCEPTIONS" % lic_exception)
|
||||
|
||||
pkgs = d.getVar('PACKAGES').split()
|
||||
skipped_pkgs = {}
|
||||
unskipped_pkgs = []
|
||||
for pkg in pkgs:
|
||||
remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
|
||||
|
||||
incompatible_lic = incompatible_license(d, remaining_bad_licenses, pkg)
|
||||
if incompatible_lic:
|
||||
skipped_pkgs[pkg] = incompatible_lic
|
||||
else:
|
||||
unskipped_pkgs.append(pkg)
|
||||
|
||||
if unskipped_pkgs:
|
||||
for pkg in skipped_pkgs:
|
||||
bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
|
||||
d.setVar('_exclude_incompatible-' + pkg, ' '.join(skipped_pkgs[pkg]))
|
||||
for pkg in unskipped_pkgs:
|
||||
bb.debug(1, "Including the package %s" % pkg)
|
||||
else:
|
||||
incompatible_lic = incompatible_license(d, bad_licenses)
|
||||
for pkg in skipped_pkgs:
|
||||
incompatible_lic += skipped_pkgs[pkg]
|
||||
incompatible_lic = sorted(list(set(incompatible_lic)))
|
||||
|
||||
if incompatible_lic:
|
||||
bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
|
||||
raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
|
||||
|
||||
srcuri = d.getVar('SRC_URI')
|
||||
for uri_string in srcuri.split():
|
||||
uri = bb.fetch.URI(uri_string)
|
||||
# Also check downloadfilename as the URL path might not be useful for sniffing
|
||||
path = uri.params.get("downloadfilename", uri.path)
|
||||
|
||||
# HTTP/FTP use the wget fetcher
|
||||
if uri.scheme in ("http", "https", "ftp"):
|
||||
d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot')
|
||||
|
||||
# Svn packages should DEPEND on subversion-native
|
||||
if uri.scheme == "svn":
|
||||
d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
|
||||
|
||||
# Git packages should DEPEND on git-native
|
||||
elif uri.scheme in ("git", "gitsm"):
|
||||
d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
|
||||
|
||||
# Mercurial packages should DEPEND on mercurial-native
|
||||
elif uri.scheme == "hg":
|
||||
d.appendVar("EXTRANATIVEPATH", ' python3-native ')
|
||||
d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot ca-certificates-native:do_populate_sysroot')
|
||||
|
||||
# OSC packages should DEPEND on osc-native
|
||||
elif uri.scheme == "osc":
|
||||
d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
|
||||
|
||||
elif uri.scheme == "npm":
|
||||
d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
|
||||
|
||||
elif uri.scheme == "repo":
|
||||
d.appendVarFlag('do_fetch', 'depends', ' repo-native:do_populate_sysroot')
|
||||
|
||||
# *.lz4 should DEPEND on lz4-native for unpacking
|
||||
if path.endswith('.lz4'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
|
||||
|
||||
# *.zst should DEPEND on zstd-native for unpacking
|
||||
elif path.endswith('.zst'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' zstd-native:do_populate_sysroot')
|
||||
|
||||
# *.lz should DEPEND on lzip-native for unpacking
|
||||
elif path.endswith('.lz'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
|
||||
|
||||
# *.xz should DEPEND on xz-native for unpacking
|
||||
elif path.endswith('.xz') or path.endswith('.txz'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
|
||||
|
||||
# .zip should DEPEND on unzip-native for unpacking
|
||||
elif path.endswith('.zip') or path.endswith('.jar'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
|
||||
|
||||
# Some rpm files may be compressed internally using xz (for example, rpms from Fedora)
|
||||
elif path.endswith('.rpm'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
|
||||
|
||||
# *.deb should DEPEND on xz-native for unpacking
|
||||
elif path.endswith('.deb'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
|
||||
|
||||
# *.7z should DEPEND on p7zip-native for unpacking
|
||||
elif path.endswith('.7z'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' p7zip-native:do_populate_sysroot')
|
||||
|
||||
set_packagetriplet(d)
|
||||
|
||||
# 'multimachine' handling
|
||||
mach_arch = d.getVar('MACHINE_ARCH')
|
||||
pkg_arch = d.getVar('PACKAGE_ARCH')
|
||||
|
||||
if (pkg_arch == mach_arch):
|
||||
# Already machine specific - nothing further to do
|
||||
return
|
||||
|
||||
#
|
||||
# We always try to scan SRC_URI for urls with machine overrides
|
||||
# unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
|
||||
#
|
||||
override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH')
|
||||
if override != '0':
|
||||
paths = []
|
||||
fpaths = (d.getVar('FILESPATH') or '').split(':')
|
||||
machine = d.getVar('MACHINE')
|
||||
for p in fpaths:
|
||||
if os.path.basename(p) == machine and os.path.isdir(p):
|
||||
paths.append(p)
|
||||
|
||||
if paths:
|
||||
for s in srcuri.split():
|
||||
if not s.startswith("file://"):
|
||||
continue
|
||||
fetcher = bb.fetch2.Fetch([s], d)
|
||||
local = fetcher.localpath(s)
|
||||
for mp in paths:
|
||||
if local.startswith(mp):
|
||||
#bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
|
||||
d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
|
||||
return
|
||||
|
||||
packages = d.getVar('PACKAGES').split()
|
||||
for pkg in packages:
|
||||
pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg)
|
||||
|
||||
# We could look for != PACKAGE_ARCH here but how to choose
|
||||
# if multiple differences are present?
|
||||
# Look through PACKAGE_ARCHS for the priority order?
|
||||
if pkgarch and pkgarch == mach_arch:
|
||||
d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
|
||||
bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN"))
|
||||
}
|
||||
|
||||
addtask cleansstate after do_clean
|
||||
python do_cleansstate() {
|
||||
sstate_clean_cachefiles(d)
|
||||
}
|
||||
addtask cleanall after do_cleansstate
|
||||
do_cleansstate[nostamp] = "1"
|
||||
|
||||
python do_cleanall() {
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
if not src_uri:
|
||||
return
|
||||
|
||||
try:
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
fetcher.clean()
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
bb.fatal(str(e))
|
||||
}
|
||||
do_cleanall[nostamp] = "1"
|
||||
|
||||
|
||||
EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install
|
||||
302
sources/poky/meta/classes-global/buildstats.bbclass
Normal file
302
sources/poky/meta/classes-global/buildstats.bbclass
Normal file
@@ -0,0 +1,302 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
|
||||
|
||||
################################################################################
|
||||
# Build statistics gathering.
|
||||
#
|
||||
# The CPU and Time gathering/tracking functions and bbevent inspiration
|
||||
# were written by Christopher Larson.
|
||||
#
|
||||
################################################################################
|
||||
|
||||
def get_buildprocess_cputime(pid):
|
||||
with open("/proc/%d/stat" % pid, "r") as f:
|
||||
fields = f.readline().rstrip().split()
|
||||
# 13: utime, 14: stime, 15: cutime, 16: cstime
|
||||
return sum(int(field) for field in fields[13:16])
|
||||
|
||||
def get_process_cputime(pid):
|
||||
import resource
|
||||
with open("/proc/%d/stat" % pid, "r") as f:
|
||||
fields = f.readline().rstrip().split()
|
||||
stats = {
|
||||
'utime' : fields[13],
|
||||
'stime' : fields[14],
|
||||
'cutime' : fields[15],
|
||||
'cstime' : fields[16],
|
||||
}
|
||||
iostats = {}
|
||||
if os.path.isfile("/proc/%d/io" % pid):
|
||||
with open("/proc/%d/io" % pid, "r") as f:
|
||||
while True:
|
||||
i = f.readline().strip()
|
||||
if not i:
|
||||
break
|
||||
if not ":" in i:
|
||||
# one more extra line is appended (empty or containing "0")
|
||||
# most probably due to race condition in kernel while
|
||||
# updating IO stats
|
||||
break
|
||||
i = i.split(": ")
|
||||
iostats[i[0]] = i[1]
|
||||
resources = resource.getrusage(resource.RUSAGE_SELF)
|
||||
childres = resource.getrusage(resource.RUSAGE_CHILDREN)
|
||||
return stats, iostats, resources, childres
|
||||
|
||||
def get_cputime():
|
||||
with open("/proc/stat", "r") as f:
|
||||
fields = f.readline().rstrip().split()[1:]
|
||||
return sum(int(field) for field in fields)
|
||||
|
||||
def set_timedata(var, d, server_time):
|
||||
d.setVar(var, server_time)
|
||||
|
||||
def get_timedata(var, d, end_time):
|
||||
oldtime = d.getVar(var, False)
|
||||
if oldtime is None:
|
||||
return
|
||||
return end_time - oldtime
|
||||
|
||||
def set_buildtimedata(var, d):
|
||||
import time
|
||||
time = time.time()
|
||||
cputime = get_cputime()
|
||||
proctime = get_buildprocess_cputime(os.getpid())
|
||||
d.setVar(var, (time, cputime, proctime))
|
||||
|
||||
def get_buildtimedata(var, d):
|
||||
import time
|
||||
timedata = d.getVar(var, False)
|
||||
if timedata is None:
|
||||
return
|
||||
oldtime, oldcpu, oldproc = timedata
|
||||
procdiff = get_buildprocess_cputime(os.getpid()) - oldproc
|
||||
cpudiff = get_cputime() - oldcpu
|
||||
end_time = time.time()
|
||||
timediff = end_time - oldtime
|
||||
if cpudiff > 0:
|
||||
cpuperc = float(procdiff) * 100 / cpudiff
|
||||
else:
|
||||
cpuperc = None
|
||||
return timediff, cpuperc
|
||||
|
||||
def write_task_data(status, logfile, e, d):
|
||||
with open(os.path.join(logfile), "a") as f:
|
||||
elapsedtime = get_timedata("__timedata_task", d, e.time)
|
||||
if elapsedtime:
|
||||
f.write(d.expand("${PF}: %s\n" % e.task))
|
||||
f.write(d.expand("Elapsed time: %0.2f seconds\n" % elapsedtime))
|
||||
cpu, iostats, resources, childres = get_process_cputime(os.getpid())
|
||||
if cpu:
|
||||
f.write("utime: %s\n" % cpu['utime'])
|
||||
f.write("stime: %s\n" % cpu['stime'])
|
||||
f.write("cutime: %s\n" % cpu['cutime'])
|
||||
f.write("cstime: %s\n" % cpu['cstime'])
|
||||
for i in iostats:
|
||||
f.write("IO %s: %s\n" % (i, iostats[i]))
|
||||
rusages = ["ru_utime", "ru_stime", "ru_maxrss", "ru_minflt", "ru_majflt", "ru_inblock", "ru_oublock", "ru_nvcsw", "ru_nivcsw"]
|
||||
for i in rusages:
|
||||
f.write("rusage %s: %s\n" % (i, getattr(resources, i)))
|
||||
for i in rusages:
|
||||
f.write("Child rusage %s: %s\n" % (i, getattr(childres, i)))
|
||||
if status == "passed":
|
||||
f.write("Status: PASSED \n")
|
||||
else:
|
||||
f.write("Status: FAILED \n")
|
||||
f.write("Ended: %0.2f \n" % e.time)
|
||||
|
||||
def write_host_data(logfile, e, d, type):
|
||||
import subprocess, os, datetime
|
||||
# minimum time allowed for each command to run, in seconds
|
||||
time_threshold = 0.5
|
||||
limit = 10
|
||||
# the total number of commands
|
||||
num_cmds = 0
|
||||
msg = ""
|
||||
if type == "interval":
|
||||
# interval at which data will be logged
|
||||
interval = d.getVar("BB_HEARTBEAT_EVENT", False)
|
||||
if interval is None:
|
||||
bb.warn("buildstats: Collecting host data at intervals failed. Set BB_HEARTBEAT_EVENT=\"<interval>\" in conf/local.conf for the interval at which host data will be logged.")
|
||||
d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
|
||||
return
|
||||
interval = int(interval)
|
||||
cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_INTERVAL')
|
||||
msg = "Host Stats: Collecting data at %d second intervals.\n" % interval
|
||||
if cmds is None:
|
||||
d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
|
||||
bb.warn("buildstats: Collecting host data at intervals failed. Set BB_LOG_HOST_STAT_CMDS_INTERVAL=\"command1 ; command2 ; ... \" in conf/local.conf.")
|
||||
return
|
||||
if type == "failure":
|
||||
cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_FAILURE')
|
||||
msg = "Host Stats: Collecting data on failure.\n"
|
||||
msg += "Failed at task: " + e.task + "\n"
|
||||
if cmds is None:
|
||||
d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
|
||||
bb.warn("buildstats: Collecting host data on failure failed. Set BB_LOG_HOST_STAT_CMDS_FAILURE=\"command1 ; command2 ; ... \" in conf/local.conf.")
|
||||
return
|
||||
c_san = []
|
||||
for cmd in cmds.split(";"):
|
||||
if len(cmd) == 0:
|
||||
continue
|
||||
num_cmds += 1
|
||||
c_san.append(cmd)
|
||||
if num_cmds == 0:
|
||||
if type == "interval":
|
||||
d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
|
||||
if type == "failure":
|
||||
d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
|
||||
return
|
||||
|
||||
# return if the interval is not enough to run all commands within the specified BB_HEARTBEAT_EVENT interval
|
||||
if type == "interval":
|
||||
limit = interval / num_cmds
|
||||
if limit <= time_threshold:
|
||||
d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
|
||||
bb.warn("buildstats: Collecting host data failed. BB_HEARTBEAT_EVENT interval not enough to run the specified commands. Increase value of BB_HEARTBEAT_EVENT in conf/local.conf.")
|
||||
return
|
||||
|
||||
# set the environment variables
|
||||
path = d.getVar("PATH")
|
||||
opath = d.getVar("BB_ORIGENV", False).getVar("PATH")
|
||||
ospath = os.environ['PATH']
|
||||
os.environ['PATH'] = path + ":" + opath + ":" + ospath
|
||||
with open(logfile, "a") as f:
|
||||
f.write("Event Time: %f\nDate: %s\n" % (e.time, datetime.datetime.now()))
|
||||
f.write("%s" % msg)
|
||||
for c in c_san:
|
||||
try:
|
||||
output = subprocess.check_output(c.split(), stderr=subprocess.STDOUT, timeout=limit).decode('utf-8')
|
||||
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError) as err:
|
||||
output = "Error running command: %s\n%s\n" % (c, err)
|
||||
f.write("%s\n%s\n" % (c, output))
|
||||
# reset the environment
|
||||
os.environ['PATH'] = ospath
|
||||
|
||||
python run_buildstats () {
|
||||
import bb.build
|
||||
import bb.event
|
||||
import time, subprocess, platform
|
||||
|
||||
bn = d.getVar('BUILDNAME')
|
||||
########################################################################
|
||||
# bitbake fires HeartbeatEvent even before a build has been
|
||||
# triggered, causing BUILDNAME to be None
|
||||
########################################################################
|
||||
if bn is not None:
|
||||
bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
|
||||
taskdir = os.path.join(bsdir, d.getVar('PF'))
|
||||
if isinstance(e, bb.event.HeartbeatEvent) and bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_INTERVAL")):
|
||||
bb.utils.mkdirhier(bsdir)
|
||||
write_host_data(os.path.join(bsdir, "host_stats_interval"), e, d, "interval")
|
||||
|
||||
if isinstance(e, bb.event.BuildStarted):
|
||||
########################################################################
|
||||
# If the kernel was not configured to provide I/O statistics, issue
|
||||
# a one time warning.
|
||||
########################################################################
|
||||
if not os.path.isfile("/proc/%d/io" % os.getpid()):
|
||||
bb.warn("The Linux kernel on your build host was not configured to provide process I/O statistics. (CONFIG_TASK_IO_ACCOUNTING is not set)")
|
||||
|
||||
########################################################################
|
||||
# at first pass make the buildstats hierarchy and then
|
||||
# set the buildname
|
||||
########################################################################
|
||||
bb.utils.mkdirhier(bsdir)
|
||||
set_buildtimedata("__timedata_build", d)
|
||||
build_time = os.path.join(bsdir, "build_stats")
|
||||
# write start of build into build_time
|
||||
with open(build_time, "a") as f:
|
||||
host_info = platform.uname()
|
||||
f.write("Host Info: ")
|
||||
for x in host_info:
|
||||
if x:
|
||||
f.write(x + " ")
|
||||
f.write("\n")
|
||||
f.write("Build Started: %0.2f \n" % d.getVar('__timedata_build', False)[0])
|
||||
|
||||
elif isinstance(e, bb.event.BuildCompleted):
|
||||
build_time = os.path.join(bsdir, "build_stats")
|
||||
with open(build_time, "a") as f:
|
||||
########################################################################
|
||||
# Write build statistics for the build
|
||||
########################################################################
|
||||
timedata = get_buildtimedata("__timedata_build", d)
|
||||
if timedata:
|
||||
time, cpu = timedata
|
||||
# write end of build and cpu used into build_time
|
||||
f.write("Elapsed time: %0.2f seconds \n" % (time))
|
||||
if cpu:
|
||||
f.write("CPU usage: %0.1f%% \n" % cpu)
|
||||
|
||||
if isinstance(e, bb.build.TaskStarted):
|
||||
set_timedata("__timedata_task", d, e.time)
|
||||
bb.utils.mkdirhier(taskdir)
|
||||
# write into the task event file the name and start time
|
||||
with open(os.path.join(taskdir, e.task), "a") as f:
|
||||
f.write("Event: %s \n" % bb.event.getName(e))
|
||||
f.write("Started: %0.2f \n" % e.time)
|
||||
|
||||
elif isinstance(e, bb.build.TaskSucceeded):
|
||||
write_task_data("passed", os.path.join(taskdir, e.task), e, d)
|
||||
if e.task == "do_rootfs":
|
||||
bs = os.path.join(bsdir, "build_stats")
|
||||
with open(bs, "a") as f:
|
||||
rootfs = d.getVar('IMAGE_ROOTFS')
|
||||
if os.path.isdir(rootfs):
|
||||
try:
|
||||
rootfs_size = subprocess.check_output(["du", "-sh", rootfs],
|
||||
stderr=subprocess.STDOUT).decode('utf-8')
|
||||
f.write("Uncompressed Rootfs size: %s" % rootfs_size)
|
||||
except subprocess.CalledProcessError as err:
|
||||
bb.warn("Failed to get rootfs size: %s" % err.output.decode('utf-8'))
|
||||
|
||||
elif isinstance(e, bb.build.TaskFailed):
|
||||
# Can have a failure before TaskStarted so need to mkdir here too
|
||||
bb.utils.mkdirhier(taskdir)
|
||||
write_task_data("failed", os.path.join(taskdir, e.task), e, d)
|
||||
########################################################################
|
||||
# Lets make things easier and tell people where the build failed in
|
||||
# build_status. We do this here because BuildCompleted triggers no
|
||||
# matter what the status of the build actually is
|
||||
########################################################################
|
||||
build_status = os.path.join(bsdir, "build_stats")
|
||||
with open(build_status, "a") as f:
|
||||
f.write(d.expand("Failed at: ${PF} at task: %s \n" % e.task))
|
||||
if bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_FAILURE")):
|
||||
write_host_data(os.path.join(bsdir, "host_stats_%s_failure" % e.task), e, d, "failure")
|
||||
}
|
||||
|
||||
addhandler run_buildstats
|
||||
run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.event.HeartbeatEvent bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
|
||||
|
||||
python runqueue_stats () {
|
||||
import buildstats
|
||||
from bb import event, runqueue
|
||||
# We should not record any samples before the first task has started,
|
||||
# because that's the first activity shown in the process chart.
|
||||
# Besides, at that point we are sure that the build variables
|
||||
# are available that we need to find the output directory.
|
||||
# The persistent SystemStats is stored in the datastore and
|
||||
# closed when the build is done.
|
||||
system_stats = d.getVar('_buildstats_system_stats', False)
|
||||
if not system_stats and isinstance(e, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted)):
|
||||
system_stats = buildstats.SystemStats(d)
|
||||
d.setVar('_buildstats_system_stats', system_stats)
|
||||
if system_stats:
|
||||
# Ensure that we sample at important events.
|
||||
done = isinstance(e, bb.event.BuildCompleted)
|
||||
if system_stats.sample(e, force=done):
|
||||
d.setVar('_buildstats_system_stats', system_stats)
|
||||
if done:
|
||||
system_stats.close()
|
||||
d.delVar('_buildstats_system_stats')
|
||||
}
|
||||
|
||||
addhandler runqueue_stats
|
||||
runqueue_stats[eventmask] = "bb.runqueue.sceneQueueTaskStarted bb.runqueue.runQueueTaskStarted bb.event.HeartbeatEvent bb.event.BuildCompleted bb.event.MonitorDiskEvent"
|
||||
141
sources/poky/meta/classes-global/debian.bbclass
Normal file
141
sources/poky/meta/classes-global/debian.bbclass
Normal file
@@ -0,0 +1,141 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Debian package renaming only occurs when a package is built
|
||||
# We therefore have to make sure we build all runtime packages
|
||||
# before building the current package to make the packages runtime
|
||||
# depends are correct
|
||||
#
|
||||
# Custom library package names can be defined setting
|
||||
# DEBIANNAME: + pkgname to the desired name.
|
||||
#
|
||||
# Better expressed as ensure all RDEPENDS package before we package
|
||||
# This means we can't have circular RDEPENDS/RRECOMMENDS
|
||||
|
||||
AUTO_LIBNAME_PKGS = "${PACKAGES}"
|
||||
|
||||
inherit package
|
||||
|
||||
python debian_package_name_hook () {
|
||||
import glob, copy, stat, errno, re, pathlib, subprocess
|
||||
|
||||
pkgdest = d.getVar("PKGDEST")
|
||||
packages = d.getVar('PACKAGES')
|
||||
so_re = re.compile(r"lib.*\.so")
|
||||
|
||||
def socrunch(s):
|
||||
s = s.lower().replace('_', '-')
|
||||
m = re.match(r"^(.*)(.)\.so\.(.*)$", s)
|
||||
if m is None:
|
||||
return None
|
||||
if m.group(2) in '0123456789':
|
||||
bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
|
||||
else:
|
||||
bin = m.group(1) + m.group(2) + m.group(3)
|
||||
dev = m.group(1) + m.group(2)
|
||||
return (bin, dev)
|
||||
|
||||
def isexec(path):
|
||||
try:
|
||||
s = os.stat(path)
|
||||
except (os.error, AttributeError):
|
||||
return 0
|
||||
return (s[stat.ST_MODE] & stat.S_IEXEC)
|
||||
|
||||
def add_rprovides(pkg, d):
|
||||
newpkg = d.getVar('PKG:' + pkg)
|
||||
if newpkg and newpkg != pkg:
|
||||
provs = (d.getVar('RPROVIDES:' + pkg) or "").split()
|
||||
if pkg not in provs:
|
||||
d.appendVar('RPROVIDES:' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
|
||||
|
||||
def auto_libname(packages, orig_pkg):
|
||||
p = lambda var: pathlib.PurePath(d.getVar(var))
|
||||
libdirs = (p("base_libdir"), p("libdir"))
|
||||
bindirs = (p("base_bindir"), p("base_sbindir"), p("bindir"), p("sbindir"))
|
||||
|
||||
sonames = []
|
||||
has_bins = 0
|
||||
has_libs = 0
|
||||
for f in pkgfiles[orig_pkg]:
|
||||
# This is .../packages-split/orig_pkg/
|
||||
pkgpath = pathlib.PurePath(pkgdest, orig_pkg)
|
||||
# Strip pkgpath off the full path to a file in the package, re-root
|
||||
# so it is absolute, and then get the parent directory of the file.
|
||||
path = pathlib.PurePath("/") / (pathlib.PurePath(f).relative_to(pkgpath).parent)
|
||||
if path in bindirs:
|
||||
has_bins = 1
|
||||
if path in libdirs:
|
||||
has_libs = 1
|
||||
if so_re.match(os.path.basename(f)):
|
||||
try:
|
||||
cmd = [d.expand("${TARGET_PREFIX}objdump"), "-p", f]
|
||||
output = subprocess.check_output(cmd).decode("utf-8")
|
||||
for m in re.finditer(r"\s+SONAME\s+([^\s]+)", output):
|
||||
if m.group(1) not in sonames:
|
||||
sonames.append(m.group(1))
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
|
||||
soname = None
|
||||
if len(sonames) == 1:
|
||||
soname = sonames[0]
|
||||
elif len(sonames) > 1:
|
||||
lead = d.getVar('LEAD_SONAME')
|
||||
if lead:
|
||||
r = re.compile(lead)
|
||||
filtered = []
|
||||
for s in sonames:
|
||||
if r.match(s):
|
||||
filtered.append(s)
|
||||
if len(filtered) == 1:
|
||||
soname = filtered[0]
|
||||
elif len(filtered) > 1:
|
||||
bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
|
||||
else:
|
||||
bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
|
||||
else:
|
||||
bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
|
||||
|
||||
if has_libs and not has_bins and soname:
|
||||
soname_result = socrunch(soname)
|
||||
if soname_result:
|
||||
(pkgname, devname) = soname_result
|
||||
for pkg in packages.split():
|
||||
if (d.getVar('PKG:' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME:' + pkg, False)):
|
||||
add_rprovides(pkg, d)
|
||||
continue
|
||||
debian_pn = d.getVar('DEBIANNAME:' + pkg, False)
|
||||
if debian_pn:
|
||||
newpkg = debian_pn
|
||||
elif pkg == orig_pkg:
|
||||
newpkg = pkgname
|
||||
else:
|
||||
newpkg = pkg.replace(orig_pkg, devname, 1)
|
||||
mlpre=d.getVar('MLPREFIX')
|
||||
if mlpre:
|
||||
if not newpkg.find(mlpre) == 0:
|
||||
newpkg = mlpre + newpkg
|
||||
if newpkg != pkg:
|
||||
bb.note("debian: renaming %s to %s" % (pkg, newpkg))
|
||||
d.setVar('PKG:' + pkg, newpkg)
|
||||
add_rprovides(pkg, d)
|
||||
else:
|
||||
add_rprovides(orig_pkg, d)
|
||||
|
||||
# reversed sort is needed when some package is substring of another
|
||||
# ie in ncurses we get without reverse sort:
|
||||
# DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
|
||||
# and later
|
||||
# DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
|
||||
# so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
|
||||
for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS') or "").split(), reverse=True):
|
||||
auto_libname(packages, pkg)
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS package_name_hook
|
||||
|
||||
DEBIAN_NAMES = "1"
|
||||
164
sources/poky/meta/classes-global/devshell.bbclass
Normal file
164
sources/poky/meta/classes-global/devshell.bbclass
Normal file
@@ -0,0 +1,164 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit terminal
|
||||
|
||||
DEVSHELL = "${SHELL}"
|
||||
|
||||
python do_devshell () {
|
||||
if d.getVarFlag("do_devshell", "manualfakeroot"):
|
||||
d.prependVar("DEVSHELL", "pseudo ")
|
||||
fakeenv = d.getVar("FAKEROOTENV").split()
|
||||
for f in fakeenv:
|
||||
k = f.split("=")
|
||||
d.setVar(k[0], k[1])
|
||||
d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
|
||||
d.delVarFlag("do_devshell", "fakeroot")
|
||||
|
||||
oe_terminal(d.getVar('DEVSHELL'), 'OpenEmbedded Developer Shell', d)
|
||||
}
|
||||
|
||||
addtask devshell after do_patch do_prepare_recipe_sysroot
|
||||
|
||||
# The directory that the terminal starts in
|
||||
DEVSHELL_STARTDIR ?= "${S}"
|
||||
do_devshell[dirs] = "${DEVSHELL_STARTDIR}"
|
||||
do_devshell[nostamp] = "1"
|
||||
do_devshell[network] = "1"
|
||||
|
||||
# devshell and fakeroot/pseudo need careful handling since only the final
|
||||
# command should run under fakeroot emulation, any X connection should
|
||||
# be done as the normal user. We therfore carefully construct the envionment
|
||||
# manually
|
||||
python () {
|
||||
if d.getVarFlag("do_devshell", "fakeroot"):
|
||||
# We need to signal our code that we want fakeroot however we
|
||||
# can't manipulate the environment and variables here yet (see YOCTO #4795)
|
||||
d.setVarFlag("do_devshell", "manualfakeroot", "1")
|
||||
d.delVarFlag("do_devshell", "fakeroot")
|
||||
}
|
||||
|
||||
def pydevshell(d):
|
||||
|
||||
import code
|
||||
import select
|
||||
import signal
|
||||
import termios
|
||||
|
||||
m, s = os.openpty()
|
||||
sname = os.ttyname(s)
|
||||
|
||||
def noechoicanon(fd):
|
||||
old = termios.tcgetattr(fd)
|
||||
old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
|
||||
# &~ termios.ISIG
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old)
|
||||
|
||||
# No echo or buffering over the pty
|
||||
noechoicanon(s)
|
||||
|
||||
pid = os.fork()
|
||||
if pid:
|
||||
os.close(m)
|
||||
oe_terminal("oepydevshell-internal.py %s %d" % (sname, pid), 'OpenEmbedded Developer PyShell', d)
|
||||
os._exit(0)
|
||||
else:
|
||||
os.close(s)
|
||||
|
||||
os.dup2(m, sys.stdin.fileno())
|
||||
os.dup2(m, sys.stdout.fileno())
|
||||
os.dup2(m, sys.stderr.fileno())
|
||||
|
||||
bb.utils.nonblockingfd(sys.stdout)
|
||||
bb.utils.nonblockingfd(sys.stderr)
|
||||
bb.utils.nonblockingfd(sys.stdin)
|
||||
|
||||
_context = {
|
||||
"os": os,
|
||||
"bb": bb,
|
||||
"time": time,
|
||||
"d": d,
|
||||
}
|
||||
|
||||
ps1 = "pydevshell> "
|
||||
ps2 = "... "
|
||||
buf = []
|
||||
more = False
|
||||
|
||||
i = code.InteractiveInterpreter(locals=_context)
|
||||
print("OE PyShell (PN = %s)\n" % d.getVar("PN"))
|
||||
|
||||
def prompt(more):
|
||||
if more:
|
||||
prompt = ps2
|
||||
else:
|
||||
prompt = ps1
|
||||
sys.stdout.write(prompt)
|
||||
sys.stdout.flush()
|
||||
|
||||
# Restore Ctrl+C since bitbake masks this
|
||||
def signal_handler(signal, frame):
|
||||
raise KeyboardInterrupt
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
child = None
|
||||
|
||||
prompt(more)
|
||||
while True:
|
||||
try:
|
||||
try:
|
||||
(r, _, _) = select.select([sys.stdin], [], [], 1)
|
||||
if not r:
|
||||
continue
|
||||
line = sys.stdin.readline().strip()
|
||||
if not line:
|
||||
prompt(more)
|
||||
continue
|
||||
except EOFError as e:
|
||||
sys.stdout.write("\n")
|
||||
sys.stdout.flush()
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno == 11:
|
||||
continue
|
||||
if e.errno == 5:
|
||||
return
|
||||
raise
|
||||
else:
|
||||
if not child:
|
||||
child = int(line)
|
||||
continue
|
||||
buf.append(line)
|
||||
source = "\n".join(buf)
|
||||
more = i.runsource(source, "<pyshell>")
|
||||
if not more:
|
||||
buf = []
|
||||
sys.stderr.flush()
|
||||
prompt(more)
|
||||
except KeyboardInterrupt:
|
||||
i.write("\nKeyboardInterrupt\n")
|
||||
buf = []
|
||||
more = False
|
||||
prompt(more)
|
||||
except SystemExit:
|
||||
# Easiest way to ensure everything exits
|
||||
os.kill(child, signal.SIGTERM)
|
||||
break
|
||||
|
||||
python do_pydevshell() {
|
||||
import signal
|
||||
|
||||
try:
|
||||
pydevshell(d)
|
||||
except SystemExit:
|
||||
# Stop the SIGTERM above causing an error exit code
|
||||
return
|
||||
finally:
|
||||
return
|
||||
}
|
||||
addtask pydevshell after do_patch
|
||||
|
||||
do_pydevshell[nostamp] = "1"
|
||||
do_pydevshell[network] = "1"
|
||||
1655
sources/poky/meta/classes-global/insane.bbclass
Normal file
1655
sources/poky/meta/classes-global/insane.bbclass
Normal file
File diff suppressed because it is too large
Load Diff
432
sources/poky/meta/classes-global/license.bbclass
Normal file
432
sources/poky/meta/classes-global/license.bbclass
Normal file
@@ -0,0 +1,432 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
|
||||
# LIC_FILES_CHKSUM.
|
||||
# TODO:
|
||||
# - There is a real issue revolving around license naming standards.
|
||||
|
||||
LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
|
||||
LICSSTATEDIR = "${WORKDIR}/license-destdir/"
|
||||
|
||||
# Create extra package with license texts and add it to RRECOMMENDS:${PN}
|
||||
LICENSE_CREATE_PACKAGE[type] = "boolean"
|
||||
LICENSE_CREATE_PACKAGE ??= "0"
|
||||
LICENSE_PACKAGE_SUFFIX ??= "-lic"
|
||||
LICENSE_FILES_DIRECTORY ??= "${datadir}/licenses/"
|
||||
|
||||
LICENSE_DEPLOY_PATHCOMPONENT = "${SSTATE_PKGARCH}"
|
||||
LICENSE_DEPLOY_PATHCOMPONENT:class-cross = "native"
|
||||
LICENSE_DEPLOY_PATHCOMPONENT:class-native = "native"
|
||||
# Ensure the *value* of SSTATE_PKGARCH is captured as it is used in the output paths
|
||||
LICENSE_DEPLOY_PATHCOMPONENT[vardepvalue] += "${LICENSE_DEPLOY_PATHCOMPONENT}"
|
||||
|
||||
addtask populate_lic after do_patch before do_build
|
||||
do_populate_lic[dirs] = "${LICSSTATEDIR}/${LICENSE_DEPLOY_PATHCOMPONENT}/${PN}"
|
||||
do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
|
||||
|
||||
python do_populate_lic() {
|
||||
"""
|
||||
Populate LICENSE_DIRECTORY with licenses.
|
||||
"""
|
||||
lic_files_paths = find_license_files(d)
|
||||
|
||||
# The base directory we wrangle licenses to
|
||||
destdir = os.path.join(d.getVar('LICSSTATEDIR'), d.getVar('LICENSE_DEPLOY_PATHCOMPONENT'), d.getVar('PN'))
|
||||
copy_license_files(lic_files_paths, destdir)
|
||||
info = get_recipe_info(d)
|
||||
with open(os.path.join(destdir, "recipeinfo"), "w") as f:
|
||||
for key in sorted(info.keys()):
|
||||
f.write("%s: %s\n" % (key, info[key]))
|
||||
oe.qa.exit_if_errors(d)
|
||||
}
|
||||
|
||||
PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '') + ' ' + d.getVar('COREBASE') + '/meta/COPYING').split())}"
|
||||
# it would be better to copy them in do_install:append, but find_license_filesa is python
|
||||
python perform_packagecopy:prepend () {
|
||||
enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
|
||||
if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
|
||||
lic_files_paths = find_license_files(d)
|
||||
|
||||
# LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
|
||||
destdir = d.getVar('D') + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY'), d.getVar('PN'))
|
||||
copy_license_files(lic_files_paths, destdir)
|
||||
add_package_and_files(d)
|
||||
}
|
||||
perform_packagecopy[vardeps] += "LICENSE_CREATE_PACKAGE"
|
||||
|
||||
def get_recipe_info(d):
|
||||
info = {}
|
||||
info["PV"] = d.getVar("PV")
|
||||
info["PR"] = d.getVar("PR")
|
||||
info["LICENSE"] = d.getVar("LICENSE")
|
||||
return info
|
||||
|
||||
def add_package_and_files(d):
|
||||
packages = d.getVar('PACKAGES')
|
||||
files = d.getVar('LICENSE_FILES_DIRECTORY')
|
||||
pn = d.getVar('PN')
|
||||
pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX', False))
|
||||
if pn_lic in packages.split():
|
||||
bb.warn("%s package already existed in %s." % (pn_lic, pn))
|
||||
else:
|
||||
# first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
|
||||
d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
|
||||
d.setVar('FILES:' + pn_lic, files)
|
||||
|
||||
def copy_license_files(lic_files_paths, destdir):
|
||||
import shutil
|
||||
import errno
|
||||
|
||||
bb.utils.mkdirhier(destdir)
|
||||
for (basename, path, beginline, endline) in lic_files_paths:
|
||||
try:
|
||||
src = path
|
||||
dst = os.path.join(destdir, basename)
|
||||
if os.path.exists(dst):
|
||||
os.remove(dst)
|
||||
if os.path.islink(src):
|
||||
src = os.path.realpath(src)
|
||||
canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev) and beginline is None and endline is None
|
||||
if canlink:
|
||||
try:
|
||||
os.link(src, dst)
|
||||
except OSError as err:
|
||||
if err.errno == errno.EXDEV:
|
||||
# Copy license files if hardlink is not possible even if st_dev is the
|
||||
# same on source and destination (docker container with device-mapper?)
|
||||
canlink = False
|
||||
else:
|
||||
raise
|
||||
# Only chown if we did hardlink and we're running under pseudo
|
||||
if canlink and os.environ.get('PSEUDO_DISABLED') == '0':
|
||||
os.chown(dst,0,0)
|
||||
if not canlink:
|
||||
begin_idx = max(0, int(beginline) - 1) if beginline is not None else None
|
||||
end_idx = max(0, int(endline)) if endline is not None else None
|
||||
if begin_idx is None and end_idx is None:
|
||||
shutil.copyfile(src, dst)
|
||||
else:
|
||||
with open(src, 'rb') as src_f:
|
||||
with open(dst, 'wb') as dst_f:
|
||||
dst_f.write(b''.join(src_f.readlines()[begin_idx:end_idx]))
|
||||
|
||||
except Exception as e:
|
||||
bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
|
||||
|
||||
def find_license_files(d):
|
||||
"""
|
||||
Creates list of files used in LIC_FILES_CHKSUM and generic LICENSE files.
|
||||
"""
|
||||
import shutil
|
||||
import oe.license
|
||||
from collections import defaultdict, OrderedDict
|
||||
|
||||
# All the license files for the package
|
||||
lic_files = d.getVar('LIC_FILES_CHKSUM') or ""
|
||||
pn = d.getVar('PN')
|
||||
# The license files are located in S/LIC_FILE_CHECKSUM.
|
||||
srcdir = d.getVar('S')
|
||||
# Directory we store the generic licenses as set in the distro configuration
|
||||
generic_directory = d.getVar('COMMON_LICENSE_DIR')
|
||||
# List of basename, path tuples
|
||||
lic_files_paths = []
|
||||
# hash for keep track generic lics mappings
|
||||
non_generic_lics = {}
|
||||
# Entries from LIC_FILES_CHKSUM
|
||||
lic_chksums = {}
|
||||
license_source_dirs = []
|
||||
license_source_dirs.append(generic_directory)
|
||||
try:
|
||||
additional_lic_dirs = d.getVar('LICENSE_PATH').split()
|
||||
for lic_dir in additional_lic_dirs:
|
||||
license_source_dirs.append(lic_dir)
|
||||
except:
|
||||
pass
|
||||
|
||||
class FindVisitor(oe.license.LicenseVisitor):
|
||||
def visit_Str(self, node):
|
||||
#
|
||||
# Until I figure out what to do with
|
||||
# the two modifiers I support (or greater = +
|
||||
# and "with exceptions" being *
|
||||
# we'll just strip out the modifier and put
|
||||
# the base license.
|
||||
find_license(node.s.replace("+", "").replace("*", ""))
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_Constant(self, node):
|
||||
find_license(node.value.replace("+", "").replace("*", ""))
|
||||
self.generic_visit(node)
|
||||
|
||||
def find_license(license_type):
|
||||
try:
|
||||
bb.utils.mkdirhier(gen_lic_dest)
|
||||
except:
|
||||
pass
|
||||
spdx_generic = None
|
||||
license_source = None
|
||||
# If the generic does not exist we need to check to see if there is an SPDX mapping to it,
|
||||
# unless NO_GENERIC_LICENSE is set.
|
||||
for lic_dir in license_source_dirs:
|
||||
if not os.path.isfile(os.path.join(lic_dir, license_type)):
|
||||
if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
|
||||
# Great, there is an SPDXLICENSEMAP. We can copy!
|
||||
bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
|
||||
spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
|
||||
license_source = lic_dir
|
||||
break
|
||||
elif os.path.isfile(os.path.join(lic_dir, license_type)):
|
||||
spdx_generic = license_type
|
||||
license_source = lic_dir
|
||||
break
|
||||
|
||||
non_generic_lic = d.getVarFlag('NO_GENERIC_LICENSE', license_type)
|
||||
if spdx_generic and license_source:
|
||||
# we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
|
||||
# audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
|
||||
|
||||
lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic),
|
||||
None, None))
|
||||
|
||||
# The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
|
||||
# and should not be allowed, warn the user in this case.
|
||||
if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
|
||||
oe.qa.handle_error("license-no-generic",
|
||||
"%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type), d)
|
||||
|
||||
elif non_generic_lic and non_generic_lic in lic_chksums:
|
||||
# if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
|
||||
# of the package rather than the license_source_dirs.
|
||||
lic_files_paths.append(("generic_" + license_type,
|
||||
os.path.join(srcdir, non_generic_lic), None, None))
|
||||
non_generic_lics[non_generic_lic] = license_type
|
||||
else:
|
||||
# Explicitly avoid the CLOSED license because this isn't generic
|
||||
if license_type != 'CLOSED':
|
||||
# And here is where we warn people that their licenses are lousy
|
||||
oe.qa.handle_error("license-exists",
|
||||
"%s: No generic license file exists for: %s in any provider" % (pn, license_type), d)
|
||||
pass
|
||||
|
||||
if not generic_directory:
|
||||
bb.fatal("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
|
||||
|
||||
for url in lic_files.split():
|
||||
try:
|
||||
(method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
|
||||
if method != "file" or not path:
|
||||
raise bb.fetch.MalformedUrl()
|
||||
except bb.fetch.MalformedUrl:
|
||||
bb.fatal("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF'), url))
|
||||
# We want the license filename and path
|
||||
chksum = parm.get('md5', None)
|
||||
beginline = parm.get('beginline')
|
||||
endline = parm.get('endline')
|
||||
lic_chksums[path] = (chksum, beginline, endline)
|
||||
|
||||
v = FindVisitor()
|
||||
try:
|
||||
v.visit_string(d.getVar('LICENSE'))
|
||||
except oe.license.InvalidLicense as exc:
|
||||
bb.fatal('%s: %s' % (d.getVar('PF'), exc))
|
||||
except SyntaxError:
|
||||
oe.qa.handle_error("license-syntax",
|
||||
"%s: Failed to parse LICENSE: %s" % (d.getVar('PF'), d.getVar('LICENSE')), d)
|
||||
# Add files from LIC_FILES_CHKSUM to list of license files
|
||||
lic_chksum_paths = defaultdict(OrderedDict)
|
||||
for path, data in sorted(lic_chksums.items()):
|
||||
lic_chksum_paths[os.path.basename(path)][data] = (os.path.join(srcdir, path), data[1], data[2])
|
||||
for basename, files in lic_chksum_paths.items():
|
||||
if len(files) == 1:
|
||||
# Don't copy again a LICENSE already handled as non-generic
|
||||
if basename in non_generic_lics:
|
||||
continue
|
||||
data = list(files.values())[0]
|
||||
lic_files_paths.append(tuple([basename] + list(data)))
|
||||
else:
|
||||
# If there are multiple different license files with identical
|
||||
# basenames we rename them to <file>.0, <file>.1, ...
|
||||
for i, data in enumerate(files.values()):
|
||||
lic_files_paths.append(tuple(["%s.%d" % (basename, i)] + list(data)))
|
||||
|
||||
return lic_files_paths
|
||||
|
||||
def return_spdx(d, license):
|
||||
"""
|
||||
This function returns the spdx mapping of a license if it exists.
|
||||
"""
|
||||
return d.getVarFlag('SPDXLICENSEMAP', license)
|
||||
|
||||
def canonical_license(d, license):
|
||||
"""
|
||||
Return the canonical (SPDX) form of the license if available (so GPLv3
|
||||
becomes GPL-3.0-only) or the passed license if there is no canonical form.
|
||||
"""
|
||||
return d.getVarFlag('SPDXLICENSEMAP', license) or license
|
||||
|
||||
def expand_wildcard_licenses(d, wildcard_licenses):
|
||||
"""
|
||||
There are some common wildcard values users may want to use. Support them
|
||||
here.
|
||||
"""
|
||||
licenses = set(wildcard_licenses)
|
||||
mapping = {
|
||||
"AGPL-3.0*" : ["AGPL-3.0-only", "AGPL-3.0-or-later"],
|
||||
"GPL-3.0*" : ["GPL-3.0-only", "GPL-3.0-or-later"],
|
||||
"LGPL-3.0*" : ["LGPL-3.0-only", "LGPL-3.0-or-later"],
|
||||
}
|
||||
for k in mapping:
|
||||
if k in wildcard_licenses:
|
||||
licenses.remove(k)
|
||||
for item in mapping[k]:
|
||||
licenses.add(item)
|
||||
|
||||
for l in licenses:
|
||||
if l in oe.license.obsolete_license_list():
|
||||
bb.fatal("Error, %s is an obsolete license, please use an SPDX reference in INCOMPATIBLE_LICENSE" % l)
|
||||
if "*" in l:
|
||||
bb.fatal("Error, %s is an invalid license wildcard entry" % l)
|
||||
|
||||
return list(licenses)
|
||||
|
||||
def incompatible_license_contains(license, truevalue, falsevalue, d):
|
||||
license = canonical_license(d, license)
|
||||
bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
|
||||
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
|
||||
return truevalue if license in bad_licenses else falsevalue
|
||||
|
||||
def incompatible_pkg_license(d, dont_want_licenses, license):
|
||||
# Handles an "or" or two license sets provided by
|
||||
# flattened_licenses(), pick one that works if possible.
|
||||
def choose_lic_set(a, b):
|
||||
return a if all(oe.license.license_ok(canonical_license(d, lic),
|
||||
dont_want_licenses) for lic in a) else b
|
||||
|
||||
try:
|
||||
licenses = oe.license.flattened_licenses(license, choose_lic_set)
|
||||
except oe.license.LicenseError as exc:
|
||||
bb.fatal('%s: %s' % (d.getVar('P'), exc))
|
||||
|
||||
incompatible_lic = []
|
||||
for l in licenses:
|
||||
license = canonical_license(d, l)
|
||||
if not oe.license.license_ok(license, dont_want_licenses):
|
||||
incompatible_lic.append(license)
|
||||
|
||||
return sorted(incompatible_lic)
|
||||
|
||||
def incompatible_license(d, dont_want_licenses, package=None):
|
||||
"""
|
||||
This function checks if a recipe has only incompatible licenses. It also
|
||||
take into consideration 'or' operand. dont_want_licenses should be passed
|
||||
as canonical (SPDX) names.
|
||||
"""
|
||||
import oe.license
|
||||
license = d.getVar("LICENSE:%s" % package) if package else None
|
||||
if not license:
|
||||
license = d.getVar('LICENSE')
|
||||
|
||||
return incompatible_pkg_license(d, dont_want_licenses, license)
|
||||
|
||||
def check_license_flags(d):
|
||||
"""
|
||||
This function checks if a recipe has any LICENSE_FLAGS that
|
||||
aren't acceptable.
|
||||
|
||||
If it does, it returns the all LICENSE_FLAGS missing from the list
|
||||
of acceptable license flags, or all of the LICENSE_FLAGS if there
|
||||
is no list of acceptable flags.
|
||||
|
||||
If everything is is acceptable, it returns None.
|
||||
"""
|
||||
|
||||
def license_flag_matches(flag, acceptlist, pn):
|
||||
"""
|
||||
Return True if flag matches something in acceptlist, None if not.
|
||||
|
||||
Before we test a flag against the acceptlist, we append _${PN}
|
||||
to it. We then try to match that string against the
|
||||
acceptlist. This covers the normal case, where we expect
|
||||
LICENSE_FLAGS to be a simple string like 'commercial', which
|
||||
the user typically matches exactly in the acceptlist by
|
||||
explicitly appending the package name e.g 'commercial_foo'.
|
||||
If we fail the match however, we then split the flag across
|
||||
'_' and append each fragment and test until we either match or
|
||||
run out of fragments.
|
||||
"""
|
||||
flag_pn = ("%s_%s" % (flag, pn))
|
||||
for candidate in acceptlist:
|
||||
if flag_pn == candidate:
|
||||
return True
|
||||
|
||||
flag_cur = ""
|
||||
flagments = flag_pn.split("_")
|
||||
flagments.pop() # we've already tested the full string
|
||||
for flagment in flagments:
|
||||
if flag_cur:
|
||||
flag_cur += "_"
|
||||
flag_cur += flagment
|
||||
for candidate in acceptlist:
|
||||
if flag_cur == candidate:
|
||||
return True
|
||||
return False
|
||||
|
||||
def all_license_flags_match(license_flags, acceptlist):
|
||||
""" Return all unmatched flags, None if all flags match """
|
||||
pn = d.getVar('PN')
|
||||
split_acceptlist = acceptlist.split()
|
||||
flags = []
|
||||
for flag in license_flags.split():
|
||||
if not license_flag_matches(flag, split_acceptlist, pn):
|
||||
flags.append(flag)
|
||||
return flags if flags else None
|
||||
|
||||
license_flags = d.getVar('LICENSE_FLAGS')
|
||||
if license_flags:
|
||||
acceptlist = d.getVar('LICENSE_FLAGS_ACCEPTED')
|
||||
if not acceptlist:
|
||||
return license_flags.split()
|
||||
unmatched_flags = all_license_flags_match(license_flags, acceptlist)
|
||||
if unmatched_flags:
|
||||
return unmatched_flags
|
||||
return None
|
||||
|
||||
def check_license_format(d):
|
||||
"""
|
||||
This function checks if LICENSE is well defined,
|
||||
Validate operators in LICENSES.
|
||||
No spaces are allowed between LICENSES.
|
||||
"""
|
||||
pn = d.getVar('PN')
|
||||
licenses = d.getVar('LICENSE')
|
||||
from oe.license import license_operator, license_operator_chars, license_pattern
|
||||
|
||||
elements = list(filter(lambda x: x.strip(), license_operator.split(licenses)))
|
||||
for pos, element in enumerate(elements):
|
||||
if license_pattern.match(element):
|
||||
if pos > 0 and license_pattern.match(elements[pos - 1]):
|
||||
oe.qa.handle_error('license-format',
|
||||
'%s: LICENSE value "%s" has an invalid format - license names ' \
|
||||
'must be separated by the following characters to indicate ' \
|
||||
'the license selection: %s' %
|
||||
(pn, licenses, license_operator_chars), d)
|
||||
elif not license_operator.match(element):
|
||||
oe.qa.handle_error('license-format',
|
||||
'%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
|
||||
'in the valid list of separators (%s)' %
|
||||
(pn, licenses, element, license_operator_chars), d)
|
||||
|
||||
SSTATETASKS += "do_populate_lic"
|
||||
do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
|
||||
do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
|
||||
|
||||
IMAGE_CLASSES:append = " license_image"
|
||||
|
||||
python do_populate_lic_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_populate_lic_setscene
|
||||
107
sources/poky/meta/classes-global/logging.bbclass
Normal file
107
sources/poky/meta/classes-global/logging.bbclass
Normal file
@@ -0,0 +1,107 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# The following logging mechanisms are to be used in bash functions of recipes.
|
||||
# They are intended to map one to one in intention and output format with the
|
||||
# python recipe logging functions of a similar naming convention: bb.plain(),
|
||||
# bb.note(), etc.
|
||||
|
||||
LOGFIFO = "${T}/fifo.${@os.getpid()}"
|
||||
|
||||
# Print the output exactly as it is passed in. Typically used for output of
|
||||
# tasks that should be seen on the console. Use sparingly.
|
||||
# Output: logs console
|
||||
bbplain() {
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bbplain $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "$*"
|
||||
fi
|
||||
}
|
||||
|
||||
# Notify the user of a noteworthy condition.
|
||||
# Output: logs
|
||||
bbnote() {
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bbnote $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "NOTE: $*"
|
||||
fi
|
||||
}
|
||||
|
||||
# Print a warning to the log. Warnings are non-fatal, and do not
|
||||
# indicate a build failure.
|
||||
# Output: logs console
|
||||
bbwarn() {
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bbwarn $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "WARNING: $*"
|
||||
fi
|
||||
}
|
||||
|
||||
# Print an error to the log. Errors are non-fatal in that the build can
|
||||
# continue, but they do indicate a build failure.
|
||||
# Output: logs console
|
||||
bberror() {
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bberror $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "ERROR: $*"
|
||||
fi
|
||||
}
|
||||
|
||||
# Print a fatal error to the log. Fatal errors indicate build failure
|
||||
# and halt the build, exiting with an error code.
|
||||
# Output: logs console
|
||||
bbfatal() {
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bbfatal $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "ERROR: $*"
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Like bbfatal, except prevents the suppression of the error log by
|
||||
# bitbake's UI.
|
||||
# Output: logs console
|
||||
bbfatal_log() {
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bbfatal_log $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "ERROR: $*"
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Print debug messages. These are appropriate for progress checkpoint
|
||||
# messages to the logs. Depending on the debug log level, they may also
|
||||
# go to the console.
|
||||
# Output: logs console
|
||||
# Usage: bbdebug 1 "first level debug message"
|
||||
# bbdebug 2 "second level debug message"
|
||||
bbdebug() {
|
||||
USAGE='Usage: bbdebug [123] "message"'
|
||||
if [ $# -lt 2 ]; then
|
||||
bbfatal "$USAGE"
|
||||
fi
|
||||
|
||||
# Strip off the debug level and ensure it is an integer
|
||||
DBGLVL=$1; shift
|
||||
NONDIGITS=$(echo "$DBGLVL" | tr -d "[:digit:]")
|
||||
if [ "$NONDIGITS" ]; then
|
||||
bbfatal "$USAGE"
|
||||
fi
|
||||
|
||||
# All debug output is printed to the logs
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bbdebug $DBGLVL $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "DEBUG: $*"
|
||||
fi
|
||||
}
|
||||
|
||||
104
sources/poky/meta/classes-global/mirrors.bbclass
Normal file
104
sources/poky/meta/classes-global/mirrors.bbclass
Normal file
@@ -0,0 +1,104 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
MIRRORS += "\
|
||||
${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \
|
||||
${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \
|
||||
${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \
|
||||
${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \
|
||||
${GNU_MIRROR} https://mirrors.kernel.org/gnu \
|
||||
${KERNELORG_MIRROR} http://www.kernel.org/pub \
|
||||
${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \
|
||||
${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \
|
||||
${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \
|
||||
ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \
|
||||
ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \
|
||||
ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \
|
||||
ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \
|
||||
http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \
|
||||
http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \
|
||||
${APACHE_MIRROR} http://www.us.apache.org/dist \
|
||||
${APACHE_MIRROR} http://archive.apache.org/dist \
|
||||
http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \
|
||||
${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \
|
||||
${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \
|
||||
ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \
|
||||
ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \
|
||||
ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \
|
||||
cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
gitsm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
https?://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
cvs://.*/.* http://sources.openembedded.org/ \
|
||||
svn://.*/.* http://sources.openembedded.org/ \
|
||||
git://.*/.* http://sources.openembedded.org/ \
|
||||
gitsm://.*/.* http://sources.openembedded.org/ \
|
||||
hg://.*/.* http://sources.openembedded.org/ \
|
||||
bzr://.*/.* http://sources.openembedded.org/ \
|
||||
p4://.*/.* http://sources.openembedded.org/ \
|
||||
osc://.*/.* http://sources.openembedded.org/ \
|
||||
https?://.*/.* http://sources.openembedded.org/ \
|
||||
ftp://.*/.* http://sources.openembedded.org/ \
|
||||
npm://.*/?.* http://sources.openembedded.org/ \
|
||||
${CPAN_MIRROR} https://cpan.metacpan.org/ \
|
||||
https?://downloads.yoctoproject.org/releases/uninative/ https://mirrors.kernel.org/yocto/uninative/ \
|
||||
https?://downloads.yoctoproject.org/mirror/sources/ https://mirrors.kernel.org/yocto-sources/ \
|
||||
"
|
||||
|
||||
# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
|
||||
# where git native protocol fetches may fail due to local firewall rules, etc.
|
||||
|
||||
MIRRORS += "\
|
||||
git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \
|
||||
git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \
|
||||
git://git.infradead.org/.* git://git.infraroot.at/PATH;protocol=https \
|
||||
git://.*/.* git://HOST/PATH;protocol=https \
|
||||
git://.*/.* git://HOST/git/PATH;protocol=https \
|
||||
"
|
||||
|
||||
# Switch llvm, glibc and binutils recipes to use shallow clones as they're large and this
|
||||
# improves user experience whilst allowing the flexibility of git urls in the recipes
|
||||
BB_GIT_SHALLOW:pn-binutils = "1"
|
||||
BB_GIT_SHALLOW:pn-binutils-cross-${TARGET_ARCH} = "1"
|
||||
BB_GIT_SHALLOW:pn-binutils-cross-canadian-${TRANSLATED_TARGET_ARCH} = "1"
|
||||
BB_GIT_SHALLOW:pn-binutils-cross-testsuite = "1"
|
||||
BB_GIT_SHALLOW:pn-binutils-crosssdk-${SDK_SYS} = "1"
|
||||
BB_GIT_SHALLOW:pn-binutils-native = "1"
|
||||
BB_GIT_SHALLOW:pn-nativesdk-binutils = "1"
|
||||
|
||||
BB_GIT_SHALLOW:pn-cross-localedef-native = "1"
|
||||
BB_GIT_SHALLOW:pn-glibc = "1"
|
||||
BB_GIT_SHALLOW:pn-glibc-tests = "1"
|
||||
PREMIRRORS += "git://sourceware.org/git/glibc.git https://downloads.yoctoproject.org/mirror/sources/ \
|
||||
git://sourceware.org/git/binutils-gdb.git https://downloads.yoctoproject.org/mirror/sources/"
|
||||
|
||||
BB_GIT_SHALLOW:pn-llvm = "1"
|
||||
BB_GIT_SHALLOW:pn-llvm-native = "1"
|
||||
BB_GIT_SHALLOW:pn-nativesdk-llvm = "1"
|
||||
616
sources/poky/meta/classes-global/package.bbclass
Normal file
616
sources/poky/meta/classes-global/package.bbclass
Normal file
@@ -0,0 +1,616 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
#
|
||||
# Packaging process
|
||||
#
|
||||
# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS
|
||||
# Taking D and splitting it up into the packages listed in PACKAGES, placing the
|
||||
# resulting output in PKGDEST.
|
||||
#
|
||||
# There are the following default steps but PACKAGEFUNCS can be extended:
|
||||
#
|
||||
# a) package_convert_pr_autoinc - convert AUTOINC in PKGV to ${PRSERV_PV_AUTOINC}
|
||||
#
|
||||
# b) perform_packagecopy - Copy D into PKGD
|
||||
#
|
||||
# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES
|
||||
#
|
||||
# d) split_and_strip_files - split the files into runtime and debug and strip them.
|
||||
# Debug files include debug info split, and associated sources that end up in -dbg packages
|
||||
#
|
||||
# e) fixup_perms - Fix up permissions in the package before we split it.
|
||||
#
|
||||
# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/<pkgname>
|
||||
# Also triggers the binary stripping code to put files in -dbg packages.
|
||||
#
|
||||
# g) package_do_filedeps - Collect perfile run-time dependency metadata
|
||||
# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with
|
||||
# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
|
||||
#
|
||||
# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
|
||||
# dependencies found. Also stores the package name so anyone else using this library
|
||||
# knows which package to depend on.
|
||||
#
|
||||
# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
|
||||
#
|
||||
# j) read_shlibdeps - Reads the stored shlibs information into the metadata
|
||||
#
|
||||
# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages
|
||||
#
|
||||
# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later
|
||||
# packaging steps
|
||||
|
||||
inherit packagedata
|
||||
inherit chrpath
|
||||
inherit package_pkgdata
|
||||
inherit insane
|
||||
|
||||
PKGD = "${WORKDIR}/package"
|
||||
PKGDEST = "${WORKDIR}/packages-split"
|
||||
|
||||
LOCALE_SECTION ?= ''
|
||||
|
||||
ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
|
||||
|
||||
# rpm is used for the per-file dependency identification
|
||||
# dwarfsrcfiles is used to determine the list of debug source files
|
||||
PACKAGE_DEPENDS += "rpm-native dwarfsrcfiles-native"
|
||||
|
||||
# If your postinstall can execute at rootfs creation time rather than on
|
||||
# target but depends on a native/cross tool in order to execute, you need to
|
||||
# list that tool in PACKAGE_WRITE_DEPS. Target package dependencies belong
|
||||
# in the package dependencies as normal, this is just for native/cross support
|
||||
# tools at rootfs build time.
|
||||
PACKAGE_WRITE_DEPS ??= ""
|
||||
|
||||
def legitimize_package_name(s):
|
||||
return oe.package.legitimize_package_name(s)
|
||||
|
||||
def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False, summary=None):
|
||||
"""
|
||||
Used in .bb files to split up dynamically generated subpackages of a
|
||||
given package, usually plugins or modules.
|
||||
|
||||
Arguments:
|
||||
root -- the path in which to search
|
||||
file_regex -- regular expression to match searched files. Use
|
||||
parentheses () to mark the part of this expression
|
||||
that should be used to derive the module name (to be
|
||||
substituted where %s is used in other function
|
||||
arguments as noted below)
|
||||
output_pattern -- pattern to use for the package names. Must include %s.
|
||||
description -- description to set for each package. Must include %s.
|
||||
postinst -- postinstall script to use for all packages (as a
|
||||
string)
|
||||
recursive -- True to perform a recursive search - default False
|
||||
hook -- a hook function to be called for every match. The
|
||||
function will be called with the following arguments
|
||||
(in the order listed):
|
||||
f: full path to the file/directory match
|
||||
pkg: the package name
|
||||
file_regex: as above
|
||||
output_pattern: as above
|
||||
modulename: the module name derived using file_regex
|
||||
extra_depends -- extra runtime dependencies (RDEPENDS) to be set for
|
||||
all packages. The default value of None causes a
|
||||
dependency on the main package (${PN}) - if you do
|
||||
not want this, pass '' for this parameter.
|
||||
aux_files_pattern -- extra item(s) to be added to FILES for each
|
||||
package. Can be a single string item or a list of
|
||||
strings for multiple items. Must include %s.
|
||||
postrm -- postrm script to use for all packages (as a string)
|
||||
allow_dirs -- True allow directories to be matched - default False
|
||||
prepend -- if True, prepend created packages to PACKAGES instead
|
||||
of the default False which appends them
|
||||
match_path -- match file_regex on the whole relative path to the
|
||||
root rather than just the file name
|
||||
aux_files_pattern_verbatim -- extra item(s) to be added to FILES for
|
||||
each package, using the actual derived module name
|
||||
rather than converting it to something legal for a
|
||||
package name. Can be a single string item or a list
|
||||
of strings for multiple items. Must include %s.
|
||||
allow_links -- True to allow symlinks to be matched - default False
|
||||
summary -- Summary to set for each package. Must include %s;
|
||||
defaults to description if not set.
|
||||
|
||||
"""
|
||||
|
||||
dvar = d.getVar('PKGD')
|
||||
root = d.expand(root)
|
||||
output_pattern = d.expand(output_pattern)
|
||||
extra_depends = d.expand(extra_depends)
|
||||
|
||||
# If the root directory doesn't exist, don't error out later but silently do
|
||||
# no splitting.
|
||||
if not os.path.exists(dvar + root):
|
||||
return []
|
||||
|
||||
ml = d.getVar("MLPREFIX")
|
||||
if ml:
|
||||
if not output_pattern.startswith(ml):
|
||||
output_pattern = ml + output_pattern
|
||||
|
||||
newdeps = []
|
||||
for dep in (extra_depends or "").split():
|
||||
if dep.startswith(ml):
|
||||
newdeps.append(dep)
|
||||
else:
|
||||
newdeps.append(ml + dep)
|
||||
if newdeps:
|
||||
extra_depends = " ".join(newdeps)
|
||||
|
||||
|
||||
packages = d.getVar('PACKAGES').split()
|
||||
split_packages = set()
|
||||
|
||||
if postinst:
|
||||
postinst = '#!/bin/sh\n' + postinst + '\n'
|
||||
if postrm:
|
||||
postrm = '#!/bin/sh\n' + postrm + '\n'
|
||||
if not recursive:
|
||||
objs = os.listdir(dvar + root)
|
||||
else:
|
||||
objs = []
|
||||
for walkroot, dirs, files in os.walk(dvar + root):
|
||||
for file in files:
|
||||
relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
|
||||
if relpath:
|
||||
objs.append(relpath)
|
||||
|
||||
if extra_depends == None:
|
||||
extra_depends = d.getVar("PN")
|
||||
|
||||
if not summary:
|
||||
summary = description
|
||||
|
||||
for o in sorted(objs):
|
||||
import re, stat
|
||||
if match_path:
|
||||
m = re.match(file_regex, o)
|
||||
else:
|
||||
m = re.match(file_regex, os.path.basename(o))
|
||||
|
||||
if not m:
|
||||
continue
|
||||
f = os.path.join(dvar + root, o)
|
||||
mode = os.lstat(f).st_mode
|
||||
if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
|
||||
continue
|
||||
on = oe.package.legitimize_package_name(m.group(1))
|
||||
pkg = output_pattern % on
|
||||
split_packages.add(pkg)
|
||||
if not pkg in packages:
|
||||
if prepend:
|
||||
packages = [pkg] + packages
|
||||
else:
|
||||
packages.append(pkg)
|
||||
oldfiles = d.getVar('FILES:' + pkg)
|
||||
newfile = os.path.join(root, o)
|
||||
# These names will be passed through glob() so if the filename actually
|
||||
# contains * or ? (rare, but possible) we need to handle that specially
|
||||
newfile = newfile.replace('*', '[*]')
|
||||
newfile = newfile.replace('?', '[?]')
|
||||
if not oldfiles:
|
||||
the_files = [newfile]
|
||||
if aux_files_pattern:
|
||||
if type(aux_files_pattern) is list:
|
||||
for fp in aux_files_pattern:
|
||||
the_files.append(fp % on)
|
||||
else:
|
||||
the_files.append(aux_files_pattern % on)
|
||||
if aux_files_pattern_verbatim:
|
||||
if type(aux_files_pattern_verbatim) is list:
|
||||
for fp in aux_files_pattern_verbatim:
|
||||
the_files.append(fp % m.group(1))
|
||||
else:
|
||||
the_files.append(aux_files_pattern_verbatim % m.group(1))
|
||||
d.setVar('FILES:' + pkg, " ".join(the_files))
|
||||
else:
|
||||
d.setVar('FILES:' + pkg, oldfiles + " " + newfile)
|
||||
if extra_depends != '':
|
||||
d.appendVar('RDEPENDS:' + pkg, ' ' + extra_depends)
|
||||
if not d.getVar('DESCRIPTION:' + pkg):
|
||||
d.setVar('DESCRIPTION:' + pkg, description % on)
|
||||
if not d.getVar('SUMMARY:' + pkg):
|
||||
d.setVar('SUMMARY:' + pkg, summary % on)
|
||||
if postinst:
|
||||
d.setVar('pkg_postinst:' + pkg, postinst)
|
||||
if postrm:
|
||||
d.setVar('pkg_postrm:' + pkg, postrm)
|
||||
if callable(hook):
|
||||
hook(f, pkg, file_regex, output_pattern, m.group(1))
|
||||
|
||||
d.setVar('PACKAGES', ' '.join(packages))
|
||||
return list(split_packages)
|
||||
|
||||
PACKAGE_DEPENDS += "file-native"
|
||||
|
||||
python () {
|
||||
if d.getVar('PACKAGES') != '':
|
||||
deps = ""
|
||||
for dep in (d.getVar('PACKAGE_DEPENDS') or "").split():
|
||||
deps += " %s:do_populate_sysroot" % dep
|
||||
if bb.utils.contains('DISTRO_FEATURES', 'minidebuginfo', True, False, d):
|
||||
deps += ' xz-native:do_populate_sysroot'
|
||||
d.appendVarFlag('do_package', 'depends', deps)
|
||||
|
||||
# shlibs requires any DEPENDS to have already packaged for the *.list files
|
||||
d.appendVarFlag('do_package', 'deptask', " do_packagedata")
|
||||
}
|
||||
|
||||
|
||||
PRSERV_ACTIVE = "${@bool(d.getVar("PRSERV_HOST"))}"
|
||||
PRSERV_ACTIVE[vardepvalue] = "${PRSERV_ACTIVE}"
|
||||
package_get_auto_pr[vardepsexclude] = "BB_TASKDEPDATA"
|
||||
package_get_auto_pr[vardeps] += "PRSERV_ACTIVE"
|
||||
python package_get_auto_pr() {
|
||||
import oe.prservice
|
||||
|
||||
def get_do_package_hash(pn):
|
||||
if d.getVar("BB_RUNTASK") != "do_package":
|
||||
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
|
||||
for dep in taskdepdata:
|
||||
if taskdepdata[dep][1] == "do_package" and taskdepdata[dep][0] == pn:
|
||||
return taskdepdata[dep][6]
|
||||
return None
|
||||
|
||||
# Support per recipe PRSERV_HOST
|
||||
pn = d.getVar('PN')
|
||||
host = d.getVar("PRSERV_HOST_" + pn)
|
||||
if not (host is None):
|
||||
d.setVar("PRSERV_HOST", host)
|
||||
|
||||
pkgv = d.getVar("PKGV")
|
||||
|
||||
# PR Server not active, handle AUTOINC
|
||||
if not d.getVar('PRSERV_HOST'):
|
||||
d.setVar("PRSERV_PV_AUTOINC", "0")
|
||||
return
|
||||
|
||||
auto_pr = None
|
||||
pv = d.getVar("PV")
|
||||
version = d.getVar("PRAUTOINX")
|
||||
pkgarch = d.getVar("PACKAGE_ARCH")
|
||||
checksum = get_do_package_hash(pn)
|
||||
|
||||
# If do_package isn't in the dependencies, we can't get the checksum...
|
||||
if not checksum:
|
||||
bb.warn('Task %s requested do_package unihash, but it was not available.' % d.getVar('BB_RUNTASK'))
|
||||
#taskdepdata = d.getVar("BB_TASKDEPDATA", False)
|
||||
#for dep in taskdepdata:
|
||||
# bb.warn('%s:%s = %s' % (taskdepdata[dep][0], taskdepdata[dep][1], taskdepdata[dep][6]))
|
||||
return
|
||||
|
||||
if d.getVar('PRSERV_LOCKDOWN'):
|
||||
auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
|
||||
if auto_pr is None:
|
||||
bb.fatal("Can NOT get PRAUTO from lockdown exported file")
|
||||
d.setVar('PRAUTO',str(auto_pr))
|
||||
return
|
||||
|
||||
try:
|
||||
conn = oe.prservice.prserv_make_conn(d)
|
||||
if conn is not None:
|
||||
if "AUTOINC" in pkgv:
|
||||
srcpv = bb.fetch2.get_srcrev(d)
|
||||
base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
|
||||
value = conn.getPR(base_ver, pkgarch, srcpv)
|
||||
d.setVar("PRSERV_PV_AUTOINC", str(value))
|
||||
|
||||
auto_pr = conn.getPR(version, pkgarch, checksum)
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
|
||||
if auto_pr is None:
|
||||
bb.fatal("Can NOT get PRAUTO from remote PR service")
|
||||
d.setVar('PRAUTO',str(auto_pr))
|
||||
}
|
||||
|
||||
#
|
||||
# Package functions suitable for inclusion in PACKAGEFUNCS
|
||||
#
|
||||
|
||||
python package_setup_pkgv() {
|
||||
pkgv = d.getVar("PKGV")
|
||||
# Expand SRCPV into PKGV if not present
|
||||
srcpv = bb.fetch.get_pkgv_string(d)
|
||||
if srcpv and "+" in pkgv:
|
||||
d.appendVar("PKGV", srcpv)
|
||||
pkgv = d.getVar("PKGV")
|
||||
|
||||
# Adjust pkgv as necessary...
|
||||
if 'AUTOINC' in pkgv:
|
||||
d.setVar("PKGV", pkgv.replace("AUTOINC", "${PRSERV_PV_AUTOINC}"))
|
||||
}
|
||||
|
||||
|
||||
python package_convert_pr_autoinc() {
|
||||
# Change PRSERV_PV_AUTOINC and EXTENDPRAUTO usage to special values
|
||||
d.setVar('PRSERV_PV_AUTOINC', '@PRSERV_PV_AUTOINC@')
|
||||
d.setVar('EXTENDPRAUTO', '@EXTENDPRAUTO@')
|
||||
}
|
||||
|
||||
LOCALEBASEPN ??= "${PN}"
|
||||
LOCALE_PATHS ?= "${datadir}/locale"
|
||||
|
||||
python package_do_split_locales() {
|
||||
oe.package.split_locales(d)
|
||||
}
|
||||
|
||||
python perform_packagecopy () {
|
||||
import subprocess
|
||||
import shutil
|
||||
|
||||
dest = d.getVar('D')
|
||||
dvar = d.getVar('PKGD')
|
||||
|
||||
# Start by package population by taking a copy of the installed
|
||||
# files to operate on
|
||||
# Preserve sparse files and hard links
|
||||
cmd = 'tar --exclude=./sysroot-only -cf - -C %s -p -S . | tar -xf - -C %s' % (dest, dvar)
|
||||
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
|
||||
|
||||
# replace RPATHs for the nativesdk binaries, to make them relocatable
|
||||
if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
|
||||
rpath_replace (dvar, d)
|
||||
}
|
||||
perform_packagecopy[cleandirs] = "${PKGD}"
|
||||
perform_packagecopy[dirs] = "${PKGD}"
|
||||
|
||||
python populate_packages () {
|
||||
oe.package.populate_packages(d)
|
||||
}
|
||||
populate_packages[dirs] = "${D}"
|
||||
|
||||
python package_fixsymlinks () {
|
||||
oe.package.process_fixsymlinks(pkgfiles, d)
|
||||
}
|
||||
|
||||
python package_package_name_hook() {
|
||||
"""
|
||||
A package_name_hook function can be used to rewrite the package names by
|
||||
changing PKG. For an example, see debian.bbclass.
|
||||
"""
|
||||
pass
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS package_name_hook
|
||||
|
||||
|
||||
PKGDESTWORK = "${WORKDIR}/pkgdata"
|
||||
|
||||
PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS RPROVIDES RRECOMMENDS RSUGGESTS RREPLACES RCONFLICTS SECTION PKG ALLOW_EMPTY FILES CONFFILES FILES_INFO PACKAGE_ADD_METADATA pkg_postinst pkg_postrm pkg_preinst pkg_prerm"
|
||||
|
||||
python emit_pkgdata() {
|
||||
import oe.packagedata
|
||||
oe.packagedata.emit_pkgdata(pkgfiles, d)
|
||||
}
|
||||
emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides ${PKGDESTWORK}/extended"
|
||||
|
||||
ldconfig_postinst_fragment() {
|
||||
if [ x"$D" = "x" ]; then
|
||||
if [ -x /sbin/ldconfig ]; then /sbin/ldconfig ; fi
|
||||
fi
|
||||
}
|
||||
|
||||
RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps --define '__font_provides %{nil}'"
|
||||
|
||||
python package_do_filedeps() {
|
||||
oe.package.process_filedeps(pkgfiles, d)
|
||||
}
|
||||
|
||||
SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
|
||||
SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
|
||||
|
||||
python package_do_shlibs() {
|
||||
oe.package.process_shlibs(pkgfiles, d)
|
||||
}
|
||||
|
||||
python package_do_pkgconfig () {
|
||||
oe.package.process_pkgconfig(pkgfiles, d)
|
||||
}
|
||||
|
||||
python read_shlibdeps () {
|
||||
pkglibdeps = oe.package.read_libdep_files(d)
|
||||
|
||||
packages = d.getVar('PACKAGES').split()
|
||||
for pkg in packages:
|
||||
rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
|
||||
for dep in sorted(pkglibdeps[pkg]):
|
||||
# Add the dep if it's not already there, or if no comparison is set
|
||||
if dep not in rdepends:
|
||||
rdepends[dep] = []
|
||||
for v in pkglibdeps[pkg][dep]:
|
||||
if v not in rdepends[dep]:
|
||||
rdepends[dep].append(v)
|
||||
d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
|
||||
}
|
||||
|
||||
python package_depchains() {
|
||||
oe.package.process_depchains(pkgfiles, d)
|
||||
}
|
||||
|
||||
# Since bitbake can't determine which variables are accessed during package
|
||||
# iteration, we need to list them here:
|
||||
PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm pkg_postinst_ontarget INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE SKIP_FILEDEPS PRIVATE_LIBS PACKAGE_ADD_METADATA"
|
||||
|
||||
def gen_packagevar(d, pkgvars="PACKAGEVARS"):
|
||||
ret = []
|
||||
pkgs = (d.getVar("PACKAGES") or "").split()
|
||||
vars = (d.getVar(pkgvars) or "").split()
|
||||
for v in vars:
|
||||
ret.append(v)
|
||||
for p in pkgs:
|
||||
for v in vars:
|
||||
ret.append(v + ":" + p)
|
||||
|
||||
# Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
|
||||
# affected recipes.
|
||||
ret.append('_exclude_incompatible-%s' % p)
|
||||
return " ".join(ret)
|
||||
|
||||
|
||||
# Functions for setting up PKGD
|
||||
PACKAGE_PREPROCESS_FUNCS ?= ""
|
||||
# Functions which split PKGD up into separate packages
|
||||
PACKAGESPLITFUNCS ?= " \
|
||||
package_do_split_locales \
|
||||
populate_packages"
|
||||
# Functions which process metadata based on split packages
|
||||
PACKAGEFUNCS += " \
|
||||
package_fixsymlinks \
|
||||
package_name_hook \
|
||||
package_do_filedeps \
|
||||
package_do_shlibs \
|
||||
package_do_pkgconfig \
|
||||
read_shlibdeps \
|
||||
package_depchains \
|
||||
emit_pkgdata"
|
||||
|
||||
python do_package () {
|
||||
# Change the following version to cause sstate to invalidate the package
|
||||
# cache. This is useful if an item this class depends on changes in a
|
||||
# way that the output of this class changes. rpmdeps is a good example
|
||||
# as any change to rpmdeps requires this to be rerun.
|
||||
# PACKAGE_BBCLASS_VERSION = "5"
|
||||
|
||||
# Init cachedpath
|
||||
global cpath
|
||||
cpath = oe.cachedpath.CachedPath()
|
||||
|
||||
###########################################################################
|
||||
# Sanity test the setup
|
||||
###########################################################################
|
||||
|
||||
packages = (d.getVar('PACKAGES') or "").split()
|
||||
if len(packages) < 1:
|
||||
bb.debug(1, "No packages to build, skipping do_package")
|
||||
return
|
||||
|
||||
workdir = d.getVar('WORKDIR')
|
||||
outdir = d.getVar('DEPLOY_DIR')
|
||||
dest = d.getVar('D')
|
||||
dvar = d.getVar('PKGD')
|
||||
pn = d.getVar('PN')
|
||||
|
||||
if not workdir or not outdir or not dest or not dvar or not pn:
|
||||
msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
|
||||
oe.qa.handle_error("var-undefined", msg, d)
|
||||
return
|
||||
|
||||
bb.build.exec_func("package_setup_pkgv", d)
|
||||
bb.build.exec_func("package_convert_pr_autoinc", d)
|
||||
|
||||
# Check for conflict between renamed packages and existing ones
|
||||
# for each package in PACKAGES, check if it will be renamed to an existing one
|
||||
for p in packages:
|
||||
rename = d.getVar('PKG:%s' % p)
|
||||
if rename and rename in packages:
|
||||
bb.fatal('package "%s" is renamed to "%s" using PKG:%s, but package name already exists' % (p, rename, p))
|
||||
|
||||
###########################################################################
|
||||
# Optimisations
|
||||
###########################################################################
|
||||
|
||||
# Continually expanding complex expressions is inefficient, particularly
|
||||
# when we write to the datastore and invalidate the expansion cache. This
|
||||
# code pre-expands some frequently used variables
|
||||
|
||||
def expandVar(x, d):
|
||||
d.setVar(x, d.getVar(x))
|
||||
|
||||
for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
|
||||
expandVar(x, d)
|
||||
|
||||
###########################################################################
|
||||
# Setup PKGD (from D)
|
||||
###########################################################################
|
||||
|
||||
bb.build.exec_func("package_prepare_pkgdata", d)
|
||||
bb.build.exec_func("perform_packagecopy", d)
|
||||
for f in (d.getVar('PACKAGE_PREPROCESS_FUNCS') or '').split():
|
||||
bb.build.exec_func(f, d)
|
||||
oe.package.process_split_and_strip_files(d)
|
||||
oe.package.fixup_perms(d)
|
||||
|
||||
###########################################################################
|
||||
# Split up PKGD into PKGDEST
|
||||
###########################################################################
|
||||
|
||||
cpath = oe.cachedpath.CachedPath()
|
||||
|
||||
for f in (d.getVar('PACKAGESPLITFUNCS') or '').split():
|
||||
bb.build.exec_func(f, d)
|
||||
|
||||
###########################################################################
|
||||
# Process PKGDEST
|
||||
###########################################################################
|
||||
|
||||
# Build global list of files in each split package
|
||||
global pkgfiles
|
||||
pkgfiles = {}
|
||||
packages = d.getVar('PACKAGES').split()
|
||||
pkgdest = d.getVar('PKGDEST')
|
||||
for pkg in packages:
|
||||
pkgfiles[pkg] = []
|
||||
for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
|
||||
for file in files:
|
||||
pkgfiles[pkg].append(walkroot + os.sep + file)
|
||||
|
||||
for f in (d.getVar('PACKAGEFUNCS') or '').split():
|
||||
bb.build.exec_func(f, d)
|
||||
|
||||
oe.qa.exit_if_errors(d)
|
||||
}
|
||||
|
||||
do_package[dirs] = "${SHLIBSWORKDIR} ${D}"
|
||||
do_package[vardeps] += "${PACKAGE_PREPROCESS_FUNCS} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
|
||||
addtask package after do_install
|
||||
|
||||
SSTATETASKS += "do_package"
|
||||
do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
|
||||
do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
|
||||
do_package_setscene[dirs] = "${STAGING_DIR}"
|
||||
|
||||
python do_package_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_package_setscene
|
||||
|
||||
# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
|
||||
# do_package_setscene and do_packagedata_setscene leading to races
|
||||
python do_packagedata () {
|
||||
bb.build.exec_func("package_setup_pkgv", d)
|
||||
bb.build.exec_func("package_get_auto_pr", d)
|
||||
|
||||
src = d.expand("${PKGDESTWORK}")
|
||||
dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
|
||||
oe.path.copyhardlinktree(src, dest)
|
||||
|
||||
bb.build.exec_func("packagedata_translate_pr_autoinc", d)
|
||||
}
|
||||
do_packagedata[cleandirs] += "${WORKDIR}/pkgdata-pdata-input"
|
||||
|
||||
# Translate the EXTENDPRAUTO and AUTOINC to the final values
|
||||
packagedata_translate_pr_autoinc() {
|
||||
find ${WORKDIR}/pkgdata-pdata-input -type f | xargs --no-run-if-empty \
|
||||
sed -e 's,@PRSERV_PV_AUTOINC@,${PRSERV_PV_AUTOINC},g' \
|
||||
-e 's,@EXTENDPRAUTO@,${EXTENDPRAUTO},g' -i
|
||||
}
|
||||
|
||||
addtask packagedata before do_build after do_package
|
||||
|
||||
SSTATETASKS += "do_packagedata"
|
||||
do_packagedata[sstate-inputdirs] = "${WORKDIR}/pkgdata-pdata-input"
|
||||
do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
|
||||
do_packagedata[stamp-extra-info] = "${MACHINE_ARCH}"
|
||||
|
||||
python do_packagedata_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_packagedata_setscene
|
||||
|
||||
333
sources/poky/meta/classes-global/package_deb.bbclass
Normal file
333
sources/poky/meta/classes-global/package_deb.bbclass
Normal file
@@ -0,0 +1,333 @@
|
||||
#
|
||||
# Copyright 2006-2008 OpenedHand Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit package
|
||||
|
||||
IMAGE_PKGTYPE ?= "deb"
|
||||
|
||||
DPKG_BUILDCMD ??= "dpkg-deb"
|
||||
|
||||
DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'))}"
|
||||
DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
|
||||
|
||||
PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
|
||||
|
||||
APTCONF_TARGET = "${WORKDIR}"
|
||||
|
||||
APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
|
||||
|
||||
def debian_arch_map(arch, tune):
|
||||
tune_features = tune.split()
|
||||
if arch == "allarch":
|
||||
return "all"
|
||||
if arch in ["i586", "i686"]:
|
||||
return "i386"
|
||||
if arch == "x86_64":
|
||||
if "mx32" in tune_features:
|
||||
return "x32"
|
||||
return "amd64"
|
||||
if arch.startswith("mips"):
|
||||
endian = ["el", ""]["bigendian" in tune_features]
|
||||
if "n64" in tune_features:
|
||||
return "mips64" + endian
|
||||
if "n32" in tune_features:
|
||||
return "mipsn32" + endian
|
||||
return "mips" + endian
|
||||
if arch == "powerpc":
|
||||
return arch + ["", "spe"]["spe" in tune_features]
|
||||
if arch == "aarch64":
|
||||
return "arm64"
|
||||
if arch == "arm":
|
||||
return arch + ["el", "hf"]["callconvention-hard" in tune_features]
|
||||
return arch
|
||||
|
||||
python do_package_deb () {
|
||||
packages = d.getVar('PACKAGES')
|
||||
if not packages:
|
||||
bb.debug(1, "PACKAGES not defined, nothing to package")
|
||||
return
|
||||
|
||||
tmpdir = d.getVar('TMPDIR')
|
||||
if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
|
||||
os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
|
||||
|
||||
oe.utils.multiprocess_launch(deb_write_pkg, packages.split(), d, extraargs=(d,))
|
||||
}
|
||||
do_package_deb[vardeps] += "deb_write_pkg"
|
||||
do_package_deb[vardepsexclude] = "BB_NUMBER_THREADS"
|
||||
|
||||
def deb_write_pkg(pkg, d):
|
||||
import re, copy
|
||||
import textwrap
|
||||
import subprocess
|
||||
import collections
|
||||
import codecs
|
||||
|
||||
outdir = d.getVar('PKGWRITEDIRDEB')
|
||||
pkgdest = d.getVar('PKGDEST')
|
||||
|
||||
def cleanupcontrol(root):
|
||||
for p in ['CONTROL', 'DEBIAN']:
|
||||
p = os.path.join(root, p)
|
||||
if os.path.exists(p):
|
||||
bb.utils.prunedir(p)
|
||||
|
||||
localdata = bb.data.createCopy(d)
|
||||
root = "%s/%s" % (pkgdest, pkg)
|
||||
|
||||
lf = bb.utils.lockfile(root + ".lock")
|
||||
try:
|
||||
|
||||
localdata.setVar('ROOT', '')
|
||||
localdata.setVar('ROOT_%s' % pkg, root)
|
||||
pkgname = localdata.getVar('PKG:%s' % pkg)
|
||||
if not pkgname:
|
||||
pkgname = pkg
|
||||
localdata.setVar('PKG', pkgname)
|
||||
|
||||
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
|
||||
|
||||
basedir = os.path.join(os.path.dirname(root))
|
||||
|
||||
pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH'))
|
||||
bb.utils.mkdirhier(pkgoutdir)
|
||||
|
||||
os.chdir(root)
|
||||
cleanupcontrol(root)
|
||||
from glob import glob
|
||||
g = glob('*')
|
||||
if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
|
||||
bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
|
||||
return
|
||||
|
||||
controldir = os.path.join(root, 'DEBIAN')
|
||||
bb.utils.mkdirhier(controldir)
|
||||
os.chmod(controldir, 0o755)
|
||||
|
||||
ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
|
||||
|
||||
fields = []
|
||||
pe = d.getVar('PKGE')
|
||||
if pe and int(pe) > 0:
|
||||
fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
|
||||
else:
|
||||
fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
|
||||
fields.append(["Description: %s\n", ['DESCRIPTION']])
|
||||
fields.append(["Section: %s\n", ['SECTION']])
|
||||
fields.append(["Priority: %s\n", ['PRIORITY']])
|
||||
fields.append(["Maintainer: %s\n", ['MAINTAINER']])
|
||||
fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
|
||||
fields.append(["OE: %s\n", ['PN']])
|
||||
fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
|
||||
if d.getVar('HOMEPAGE'):
|
||||
fields.append(["Homepage: %s\n", ['HOMEPAGE']])
|
||||
|
||||
# Package, Version, Maintainer, Description - mandatory
|
||||
# Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
|
||||
|
||||
|
||||
def pullData(l, d):
|
||||
l2 = []
|
||||
for i in l:
|
||||
data = d.getVar(i)
|
||||
if data is None:
|
||||
raise KeyError(i)
|
||||
if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH') == 'all':
|
||||
data = 'all'
|
||||
elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
|
||||
# The params in deb package control don't allow character
|
||||
# `_', so change the arch's `_' to `-'. Such as `x86_64'
|
||||
# -->`x86-64'
|
||||
data = data.replace('_', '-')
|
||||
l2.append(data)
|
||||
return l2
|
||||
|
||||
ctrlfile.write("Package: %s\n" % pkgname)
|
||||
if d.getVar('PACKAGE_ARCH') == "all":
|
||||
ctrlfile.write("Multi-Arch: foreign\n")
|
||||
# check for required fields
|
||||
for (c, fs) in fields:
|
||||
# Special behavior for description...
|
||||
if 'DESCRIPTION' in fs:
|
||||
summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
|
||||
ctrlfile.write('Description: %s\n' % summary)
|
||||
description = localdata.getVar('DESCRIPTION') or "."
|
||||
description = textwrap.dedent(description).strip()
|
||||
if '\\n' in description:
|
||||
# Manually indent
|
||||
for t in description.split('\\n'):
|
||||
ctrlfile.write(' %s\n' % (t.strip() or '.'))
|
||||
else:
|
||||
# Auto indent
|
||||
ctrlfile.write('%s\n' % textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))
|
||||
|
||||
else:
|
||||
ctrlfile.write(c % tuple(pullData(fs, localdata)))
|
||||
|
||||
# more fields
|
||||
|
||||
custom_fields_chunk = oe.packagedata.get_package_additional_metadata("deb", localdata)
|
||||
if custom_fields_chunk:
|
||||
ctrlfile.write(custom_fields_chunk)
|
||||
ctrlfile.write("\n")
|
||||
|
||||
oe.packagedata.mapping_rename_hook(localdata)
|
||||
|
||||
def debian_cmp_remap(var):
|
||||
# dpkg does not allow for '(', ')' or ':' in a dependency name
|
||||
# Replace any instances of them with '__'
|
||||
#
|
||||
# In debian '>' and '<' do not mean what it appears they mean
|
||||
# '<' = less or equal
|
||||
# '>' = greater or equal
|
||||
# adjust these to the '<<' and '>>' equivalents
|
||||
# Also, "=" specifiers only work if they have the PR in, so 1.2.3 != 1.2.3-r0
|
||||
# so to avoid issues, map this to ">= 1.2.3 << 1.2.3.0"
|
||||
for dep in list(var.keys()):
|
||||
if '(' in dep or '/' in dep:
|
||||
newdep = re.sub(r'[(:)/]', '__', dep)
|
||||
if newdep.startswith("__"):
|
||||
newdep = "A" + newdep
|
||||
if newdep != dep:
|
||||
var[newdep] = var[dep]
|
||||
del var[dep]
|
||||
for dep in var:
|
||||
for i, v in enumerate(var[dep]):
|
||||
if (v or "").startswith("< "):
|
||||
var[dep][i] = var[dep][i].replace("< ", "<< ")
|
||||
elif (v or "").startswith("> "):
|
||||
var[dep][i] = var[dep][i].replace("> ", ">> ")
|
||||
elif (v or "").startswith("= ") and "-r" not in v:
|
||||
ver = var[dep][i].replace("= ", "")
|
||||
var[dep][i] = var[dep][i].replace("= ", ">= ")
|
||||
var[dep].append("<< " + ver + ".0")
|
||||
|
||||
rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
|
||||
debian_cmp_remap(rdepends)
|
||||
for dep in list(rdepends.keys()):
|
||||
if dep == pkg:
|
||||
del rdepends[dep]
|
||||
continue
|
||||
if '*' in dep:
|
||||
del rdepends[dep]
|
||||
rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
|
||||
debian_cmp_remap(rrecommends)
|
||||
for dep in list(rrecommends.keys()):
|
||||
if '*' in dep:
|
||||
del rrecommends[dep]
|
||||
rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
|
||||
debian_cmp_remap(rsuggests)
|
||||
# Deliberately drop version information here, not wanted/supported by deb
|
||||
rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
|
||||
# Remove file paths if any from rprovides, debian does not support custom providers
|
||||
for key in list(rprovides.keys()):
|
||||
if key.startswith('/'):
|
||||
del rprovides[key]
|
||||
rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
|
||||
debian_cmp_remap(rprovides)
|
||||
rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
|
||||
debian_cmp_remap(rreplaces)
|
||||
rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
|
||||
debian_cmp_remap(rconflicts)
|
||||
if rdepends:
|
||||
ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
|
||||
if rsuggests:
|
||||
ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
|
||||
if rrecommends:
|
||||
ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
|
||||
if rprovides:
|
||||
ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
|
||||
if rreplaces:
|
||||
ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
|
||||
if rconflicts:
|
||||
ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
|
||||
ctrlfile.close()
|
||||
|
||||
for script in ["preinst", "postinst", "prerm", "postrm"]:
|
||||
scriptvar = localdata.getVar('pkg_%s' % script)
|
||||
if not scriptvar:
|
||||
continue
|
||||
scriptvar = scriptvar.strip()
|
||||
scriptfile = open(os.path.join(controldir, script), 'w')
|
||||
|
||||
if scriptvar.startswith("#!"):
|
||||
pos = scriptvar.find("\n") + 1
|
||||
scriptfile.write(scriptvar[:pos])
|
||||
else:
|
||||
pos = 0
|
||||
scriptfile.write("#!/bin/sh\n")
|
||||
|
||||
# Prevent the prerm/postrm scripts from being run during an upgrade
|
||||
if script in ('prerm', 'postrm'):
|
||||
scriptfile.write('[ "$1" != "upgrade" ] || exit 0\n')
|
||||
|
||||
scriptfile.write(scriptvar[pos:])
|
||||
scriptfile.write('\n')
|
||||
scriptfile.close()
|
||||
os.chmod(os.path.join(controldir, script), 0o755)
|
||||
|
||||
conffiles_str = ' '.join(oe.package.get_conffiles(pkg, d))
|
||||
if conffiles_str:
|
||||
conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
|
||||
for f in conffiles_str.split():
|
||||
if os.path.exists(oe.path.join(root, f)):
|
||||
conffiles.write('%s\n' % f)
|
||||
conffiles.close()
|
||||
|
||||
os.chdir(basedir)
|
||||
subprocess.check_output("PATH=\"%s\" %s -b %s %s" % (localdata.getVar("PATH"), localdata.getVar("DPKG_BUILDCMD"),
|
||||
root, pkgoutdir),
|
||||
stderr=subprocess.STDOUT,
|
||||
shell=True)
|
||||
|
||||
finally:
|
||||
cleanupcontrol(root)
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
# Otherwise allarch packages may change depending on override configuration
|
||||
deb_write_pkg[vardepsexclude] = "OVERRIDES"
|
||||
|
||||
# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
|
||||
DEBEXTRAVARS = "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE PACKAGE_ADD_METADATA_DEB"
|
||||
do_package_write_deb[vardeps] += "${@gen_packagevar(d, 'DEBEXTRAVARS')}"
|
||||
|
||||
SSTATETASKS += "do_package_write_deb"
|
||||
do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
|
||||
do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
|
||||
|
||||
python do_package_write_deb_setscene () {
|
||||
tmpdir = d.getVar('TMPDIR')
|
||||
|
||||
if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
|
||||
os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
|
||||
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_package_write_deb_setscene
|
||||
|
||||
python () {
|
||||
if d.getVar('PACKAGES') != '':
|
||||
deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
|
||||
d.appendVarFlag('do_package_write_deb', 'depends', deps)
|
||||
d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
|
||||
|
||||
# Needed to ensure PKG_xxx renaming of dependency packages works
|
||||
d.setVarFlag('do_package_write_deb', 'deptask', "do_packagedata")
|
||||
d.setVarFlag('do_package_write_deb', 'rdeptask', "do_packagedata")
|
||||
}
|
||||
|
||||
python do_package_write_deb () {
|
||||
bb.build.exec_func("read_subpackage_metadata", d)
|
||||
bb.build.exec_func("do_package_deb", d)
|
||||
}
|
||||
do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
|
||||
do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
|
||||
do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
|
||||
addtask package_write_deb after do_packagedata do_package do_deploy_source_date_epoch before do_build
|
||||
do_build[rdeptask] += "do_package_write_deb"
|
||||
|
||||
PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
|
||||
PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
|
||||
300
sources/poky/meta/classes-global/package_ipk.bbclass
Normal file
300
sources/poky/meta/classes-global/package_ipk.bbclass
Normal file
@@ -0,0 +1,300 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit package
|
||||
|
||||
IMAGE_PKGTYPE ?= "ipk"
|
||||
|
||||
IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
|
||||
IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
|
||||
IPKGCONF_SDK_TARGET = "${WORKDIR}/opkg-sdk-target.conf"
|
||||
|
||||
PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
|
||||
|
||||
# Program to be used to build opkg packages
|
||||
OPKGBUILDCMD ??= 'opkg-build -Z zstd -a "${ZSTD_DEFAULTS}"'
|
||||
|
||||
OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
|
||||
OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
|
||||
OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "").strip() != ""]}"
|
||||
|
||||
OPKGLIBDIR ??= "${localstatedir}/lib"
|
||||
|
||||
python do_package_ipk () {
|
||||
workdir = d.getVar('WORKDIR')
|
||||
outdir = d.getVar('PKGWRITEDIRIPK')
|
||||
tmpdir = d.getVar('TMPDIR')
|
||||
pkgdest = d.getVar('PKGDEST')
|
||||
if not workdir or not outdir or not tmpdir:
|
||||
bb.error("Variables incorrectly set, unable to package")
|
||||
return
|
||||
|
||||
packages = d.getVar('PACKAGES')
|
||||
if not packages or packages == '':
|
||||
bb.debug(1, "No packages; nothing to do")
|
||||
return
|
||||
|
||||
# We're about to add new packages so the index needs to be checked
|
||||
# so remove the appropriate stamp file.
|
||||
if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
|
||||
os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
|
||||
|
||||
oe.utils.multiprocess_launch(ipk_write_pkg, packages.split(), d, extraargs=(d,))
|
||||
}
|
||||
do_package_ipk[vardeps] += "ipk_write_pkg"
|
||||
do_package_ipk[vardepsexclude] = "BB_NUMBER_THREADS"
|
||||
|
||||
# FILE isn't included by default but we want the recipe to change if basename() changes
|
||||
IPK_RECIPE_FILE = "${@os.path.basename(d.getVar('FILE'))}"
|
||||
IPK_RECIPE_FILE[vardepvalue] = "${IPK_RECIPE_FILE}"
|
||||
|
||||
def ipk_write_pkg(pkg, d):
|
||||
import re, copy
|
||||
import subprocess
|
||||
import textwrap
|
||||
import collections
|
||||
import glob
|
||||
|
||||
def cleanupcontrol(root):
|
||||
for p in ['CONTROL', 'DEBIAN']:
|
||||
p = os.path.join(root, p)
|
||||
if os.path.exists(p):
|
||||
bb.utils.prunedir(p)
|
||||
|
||||
outdir = d.getVar('PKGWRITEDIRIPK')
|
||||
pkgdest = d.getVar('PKGDEST')
|
||||
recipesource = d.getVar('IPK_RECIPE_FILE')
|
||||
|
||||
localdata = bb.data.createCopy(d)
|
||||
root = "%s/%s" % (pkgdest, pkg)
|
||||
|
||||
lf = bb.utils.lockfile(root + ".lock")
|
||||
try:
|
||||
localdata.setVar('ROOT', '')
|
||||
localdata.setVar('ROOT_%s' % pkg, root)
|
||||
pkgname = localdata.getVar('PKG:%s' % pkg)
|
||||
if not pkgname:
|
||||
pkgname = pkg
|
||||
localdata.setVar('PKG', pkgname)
|
||||
|
||||
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
|
||||
|
||||
basedir = os.path.join(os.path.dirname(root))
|
||||
arch = localdata.getVar('PACKAGE_ARCH')
|
||||
|
||||
if localdata.getVar('IPK_HIERARCHICAL_FEED', False) == "1":
|
||||
# Spread packages across subdirectories so each isn't too crowded
|
||||
if pkgname.startswith('lib'):
|
||||
pkg_prefix = 'lib' + pkgname[3]
|
||||
else:
|
||||
pkg_prefix = pkgname[0]
|
||||
|
||||
# Keep -dbg, -dev, -doc, -staticdev, -locale and -locale-* packages
|
||||
# together. These package suffixes are taken from the definitions of
|
||||
# PACKAGES and PACKAGES_DYNAMIC in meta/conf/bitbake.conf
|
||||
if pkgname[-4:] in ('-dbg', '-dev', '-doc'):
|
||||
pkg_subdir = pkgname[:-4]
|
||||
elif pkgname.endswith('-staticdev'):
|
||||
pkg_subdir = pkgname[:-10]
|
||||
elif pkgname.endswith('-locale'):
|
||||
pkg_subdir = pkgname[:-7]
|
||||
elif '-locale-' in pkgname:
|
||||
pkg_subdir = pkgname[:pkgname.find('-locale-')]
|
||||
else:
|
||||
pkg_subdir = pkgname
|
||||
|
||||
pkgoutdir = "%s/%s/%s/%s" % (outdir, arch, pkg_prefix, pkg_subdir)
|
||||
else:
|
||||
pkgoutdir = "%s/%s" % (outdir, arch)
|
||||
|
||||
bb.utils.mkdirhier(pkgoutdir)
|
||||
os.chdir(root)
|
||||
cleanupcontrol(root)
|
||||
g = glob.glob('*')
|
||||
if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
|
||||
bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
|
||||
return
|
||||
|
||||
controldir = os.path.join(root, 'CONTROL')
|
||||
bb.utils.mkdirhier(controldir)
|
||||
ctrlfile = open(os.path.join(controldir, 'control'), 'w')
|
||||
|
||||
fields = []
|
||||
pe = d.getVar('PKGE')
|
||||
if pe and int(pe) > 0:
|
||||
fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
|
||||
else:
|
||||
fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
|
||||
fields.append(["Description: %s\n", ['DESCRIPTION']])
|
||||
fields.append(["Section: %s\n", ['SECTION']])
|
||||
fields.append(["Priority: %s\n", ['PRIORITY']])
|
||||
fields.append(["Maintainer: %s\n", ['MAINTAINER']])
|
||||
fields.append(["License: %s\n", ['LICENSE']])
|
||||
fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
|
||||
fields.append(["OE: %s\n", ['PN']])
|
||||
if d.getVar('HOMEPAGE'):
|
||||
fields.append(["Homepage: %s\n", ['HOMEPAGE']])
|
||||
|
||||
def pullData(l, d):
|
||||
l2 = []
|
||||
for i in l:
|
||||
l2.append(d.getVar(i))
|
||||
return l2
|
||||
|
||||
ctrlfile.write("Package: %s\n" % pkgname)
|
||||
# check for required fields
|
||||
for (c, fs) in fields:
|
||||
for f in fs:
|
||||
if localdata.getVar(f, False) is None:
|
||||
raise KeyError(f)
|
||||
# Special behavior for description...
|
||||
if 'DESCRIPTION' in fs:
|
||||
summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
|
||||
ctrlfile.write('Description: %s\n' % summary)
|
||||
description = localdata.getVar('DESCRIPTION') or "."
|
||||
description = textwrap.dedent(description).strip()
|
||||
if '\\n' in description:
|
||||
# Manually indent: multiline description includes a leading space
|
||||
for t in description.split('\\n'):
|
||||
ctrlfile.write(' %s\n' % (t.strip() or ' .'))
|
||||
else:
|
||||
# Auto indent
|
||||
ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
|
||||
else:
|
||||
ctrlfile.write(c % tuple(pullData(fs, localdata)))
|
||||
|
||||
custom_fields_chunk = oe.packagedata.get_package_additional_metadata("ipk", localdata)
|
||||
if custom_fields_chunk is not None:
|
||||
ctrlfile.write(custom_fields_chunk)
|
||||
ctrlfile.write("\n")
|
||||
|
||||
oe.packagedata.mapping_rename_hook(localdata)
|
||||
|
||||
def debian_cmp_remap(var):
|
||||
# In debian '>' and '<' do not mean what it appears they mean
|
||||
# '<' = less or equal
|
||||
# '>' = greater or equal
|
||||
# adjust these to the '<<' and '>>' equivalents
|
||||
# Also, "=" specifiers only work if they have the PR in, so 1.2.3 != 1.2.3-r0
|
||||
# so to avoid issues, map this to ">= 1.2.3 << 1.2.3.0"
|
||||
for dep in var:
|
||||
for i, v in enumerate(var[dep]):
|
||||
if (v or "").startswith("< "):
|
||||
var[dep][i] = var[dep][i].replace("< ", "<< ")
|
||||
elif (v or "").startswith("> "):
|
||||
var[dep][i] = var[dep][i].replace("> ", ">> ")
|
||||
elif (v or "").startswith("= ") and "-r" not in v:
|
||||
ver = var[dep][i].replace("= ", "")
|
||||
var[dep][i] = var[dep][i].replace("= ", ">= ")
|
||||
var[dep].append("<< " + ver + ".0")
|
||||
|
||||
rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
|
||||
debian_cmp_remap(rdepends)
|
||||
rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
|
||||
debian_cmp_remap(rrecommends)
|
||||
rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
|
||||
debian_cmp_remap(rsuggests)
|
||||
# Deliberately drop version information here, not wanted/supported by ipk
|
||||
rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
|
||||
rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
|
||||
debian_cmp_remap(rprovides)
|
||||
rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
|
||||
debian_cmp_remap(rreplaces)
|
||||
rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
|
||||
debian_cmp_remap(rconflicts)
|
||||
|
||||
if rdepends:
|
||||
ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
|
||||
if rsuggests:
|
||||
ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
|
||||
if rrecommends:
|
||||
ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
|
||||
if rprovides:
|
||||
ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
|
||||
if rreplaces:
|
||||
ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
|
||||
if rconflicts:
|
||||
ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
|
||||
ctrlfile.write("Source: %s\n" % recipesource)
|
||||
ctrlfile.close()
|
||||
|
||||
for script in ["preinst", "postinst", "prerm", "postrm"]:
|
||||
scriptvar = localdata.getVar('pkg_%s' % script)
|
||||
if not scriptvar:
|
||||
continue
|
||||
scriptfile = open(os.path.join(controldir, script), 'w')
|
||||
scriptfile.write(scriptvar)
|
||||
scriptfile.close()
|
||||
os.chmod(os.path.join(controldir, script), 0o755)
|
||||
|
||||
conffiles_str = ' '.join(oe.package.get_conffiles(pkg, d))
|
||||
if conffiles_str:
|
||||
conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
|
||||
for f in conffiles_str.split():
|
||||
if os.path.exists(oe.path.join(root, f)):
|
||||
conffiles.write('%s\n' % f)
|
||||
conffiles.close()
|
||||
|
||||
os.chdir(basedir)
|
||||
subprocess.check_output("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH"),
|
||||
d.getVar("OPKGBUILDCMD"), pkg, pkgoutdir),
|
||||
stderr=subprocess.STDOUT,
|
||||
shell=True)
|
||||
|
||||
if d.getVar('IPK_SIGN_PACKAGES') == '1':
|
||||
ipkver = "%s-%s" % (localdata.getVar('PKGV'), localdata.getVar('PKGR'))
|
||||
ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, localdata.getVar('PACKAGE_ARCH'))
|
||||
sign_ipk(d, ipk_to_sign)
|
||||
|
||||
finally:
|
||||
cleanupcontrol(root)
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
|
||||
IPKEXTRAVARS = "PRIORITY MAINTAINER PACKAGE_ARCH HOMEPAGE PACKAGE_ADD_METADATA_IPK"
|
||||
ipk_write_pkg[vardeps] += "${@gen_packagevar(d, 'IPKEXTRAVARS')}"
|
||||
|
||||
# Otherwise allarch packages may change depending on override configuration
|
||||
ipk_write_pkg[vardepsexclude] = "OVERRIDES"
|
||||
|
||||
|
||||
SSTATETASKS += "do_package_write_ipk"
|
||||
do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
|
||||
do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
|
||||
|
||||
python do_package_write_ipk_setscene () {
|
||||
tmpdir = d.getVar('TMPDIR')
|
||||
|
||||
if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
|
||||
os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
|
||||
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_package_write_ipk_setscene
|
||||
|
||||
python () {
|
||||
if d.getVar('PACKAGES') != '':
|
||||
deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot zstd-native:do_populate_sysroot'
|
||||
d.appendVarFlag('do_package_write_ipk', 'depends', deps)
|
||||
d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
|
||||
|
||||
# Needed to ensure PKG_xxx renaming of dependency packages works
|
||||
d.setVarFlag('do_package_write_ipk', 'deptask', "do_packagedata")
|
||||
d.setVarFlag('do_package_write_ipk', 'rdeptask', "do_packagedata")
|
||||
}
|
||||
|
||||
python do_package_write_ipk () {
|
||||
bb.build.exec_func("read_subpackage_metadata", d)
|
||||
bb.build.exec_func("do_package_ipk", d)
|
||||
}
|
||||
do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
|
||||
do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
|
||||
do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
|
||||
addtask package_write_ipk after do_packagedata do_package do_deploy_source_date_epoch before do_build
|
||||
do_build[rdeptask] += "do_package_write_ipk"
|
||||
|
||||
PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
|
||||
PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
|
||||
173
sources/poky/meta/classes-global/package_pkgdata.bbclass
Normal file
173
sources/poky/meta/classes-global/package_pkgdata.bbclass
Normal file
@@ -0,0 +1,173 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
WORKDIR_PKGDATA = "${WORKDIR}/pkgdata-sysroot"
|
||||
|
||||
def package_populate_pkgdata_dir(pkgdatadir, d):
|
||||
import glob
|
||||
|
||||
postinsts = []
|
||||
seendirs = set()
|
||||
stagingdir = d.getVar("PKGDATA_DIR")
|
||||
pkgarchs = ['${MACHINE_ARCH}']
|
||||
pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
|
||||
pkgarchs.append('allarch')
|
||||
|
||||
bb.utils.mkdirhier(pkgdatadir)
|
||||
for pkgarch in pkgarchs:
|
||||
for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.packagedata" % pkgarch)):
|
||||
with open(manifest, "r") as f:
|
||||
for l in f:
|
||||
l = l.strip()
|
||||
dest = l.replace(stagingdir, "")
|
||||
if l.endswith("/"):
|
||||
staging_copydir(l, pkgdatadir, dest, seendirs)
|
||||
continue
|
||||
try:
|
||||
staging_copyfile(l, pkgdatadir, dest, postinsts, seendirs)
|
||||
except FileExistsError:
|
||||
continue
|
||||
|
||||
python package_prepare_pkgdata() {
|
||||
import copy
|
||||
import glob
|
||||
|
||||
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
|
||||
mytaskname = d.getVar("BB_RUNTASK")
|
||||
if mytaskname.endswith("_setscene"):
|
||||
mytaskname = mytaskname.replace("_setscene", "")
|
||||
workdir = d.getVar("WORKDIR")
|
||||
pn = d.getVar("PN")
|
||||
stagingdir = d.getVar("PKGDATA_DIR")
|
||||
pkgdatadir = d.getVar("WORKDIR_PKGDATA")
|
||||
|
||||
# Detect bitbake -b usage
|
||||
nodeps = d.getVar("BB_LIMITEDDEPS") or False
|
||||
if nodeps:
|
||||
staging_package_populate_pkgdata_dir(pkgdatadir, d)
|
||||
return
|
||||
|
||||
start = None
|
||||
configuredeps = []
|
||||
for dep in taskdepdata:
|
||||
data = taskdepdata[dep]
|
||||
if data[1] == mytaskname and data[0] == pn:
|
||||
start = dep
|
||||
break
|
||||
if start is None:
|
||||
bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
|
||||
|
||||
# We need to figure out which sysroot files we need to expose to this task.
|
||||
# This needs to match what would get restored from sstate, which is controlled
|
||||
# ultimately by calls from bitbake to setscene_depvalid().
|
||||
# That function expects a setscene dependency tree. We build a dependency tree
|
||||
# condensed to inter-sstate task dependencies, similar to that used by setscene
|
||||
# tasks. We can then call into setscene_depvalid() and decide
|
||||
# which dependencies we can "see" and should expose in the recipe specific sysroot.
|
||||
setscenedeps = copy.deepcopy(taskdepdata)
|
||||
|
||||
start = set([start])
|
||||
|
||||
sstatetasks = d.getVar("SSTATETASKS").split()
|
||||
# Add recipe specific tasks referenced by setscene_depvalid()
|
||||
sstatetasks.append("do_stash_locale")
|
||||
|
||||
# If start is an sstate task (like do_package) we need to add in its direct dependencies
|
||||
# else the code below won't recurse into them.
|
||||
for dep in set(start):
|
||||
for dep2 in setscenedeps[dep][3]:
|
||||
start.add(dep2)
|
||||
start.remove(dep)
|
||||
|
||||
# Create collapsed do_populate_sysroot -> do_populate_sysroot tree
|
||||
for dep in taskdepdata:
|
||||
data = setscenedeps[dep]
|
||||
if data[1] not in sstatetasks:
|
||||
for dep2 in setscenedeps:
|
||||
data2 = setscenedeps[dep2]
|
||||
if dep in data2[3]:
|
||||
data2[3].update(setscenedeps[dep][3])
|
||||
data2[3].remove(dep)
|
||||
if dep in start:
|
||||
start.update(setscenedeps[dep][3])
|
||||
start.remove(dep)
|
||||
del setscenedeps[dep]
|
||||
|
||||
# Remove circular references
|
||||
for dep in setscenedeps:
|
||||
if dep in setscenedeps[dep][3]:
|
||||
setscenedeps[dep][3].remove(dep)
|
||||
|
||||
# Direct dependencies should be present and can be depended upon
|
||||
for dep in set(start):
|
||||
if setscenedeps[dep][1] == "do_packagedata":
|
||||
if dep not in configuredeps:
|
||||
configuredeps.append(dep)
|
||||
|
||||
msgbuf = []
|
||||
# Call into setscene_depvalid for each sub-dependency and only copy sysroot files
|
||||
# for ones that would be restored from sstate.
|
||||
done = list(start)
|
||||
next = list(start)
|
||||
while next:
|
||||
new = []
|
||||
for dep in next:
|
||||
data = setscenedeps[dep]
|
||||
for datadep in data[3]:
|
||||
if datadep in done:
|
||||
continue
|
||||
taskdeps = {}
|
||||
taskdeps[dep] = setscenedeps[dep][:2]
|
||||
taskdeps[datadep] = setscenedeps[datadep][:2]
|
||||
retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
|
||||
done.append(datadep)
|
||||
new.append(datadep)
|
||||
if retval:
|
||||
msgbuf.append("Skipping setscene dependency %s" % datadep)
|
||||
continue
|
||||
if datadep not in configuredeps and setscenedeps[datadep][1] == "do_packagedata":
|
||||
configuredeps.append(datadep)
|
||||
msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
|
||||
else:
|
||||
msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
|
||||
next = new
|
||||
|
||||
# This logging is too verbose for day to day use sadly
|
||||
#bb.debug(2, "\n".join(msgbuf))
|
||||
|
||||
seendirs = set()
|
||||
postinsts = []
|
||||
multilibs = {}
|
||||
manifests = {}
|
||||
|
||||
msg_adding = []
|
||||
|
||||
for dep in configuredeps:
|
||||
c = setscenedeps[dep][0]
|
||||
msg_adding.append(c)
|
||||
|
||||
manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "packagedata", d, multilibs)
|
||||
destsysroot = pkgdatadir
|
||||
|
||||
if manifest:
|
||||
targetdir = destsysroot
|
||||
with open(manifest, "r") as f:
|
||||
manifests[dep] = manifest
|
||||
for l in f:
|
||||
l = l.strip()
|
||||
dest = targetdir + l.replace(stagingdir, "")
|
||||
if l.endswith("/"):
|
||||
staging_copydir(l, targetdir, dest, seendirs)
|
||||
continue
|
||||
staging_copyfile(l, targetdir, dest, postinsts, seendirs)
|
||||
|
||||
bb.note("Installed into pkgdata-sysroot: %s" % str(msg_adding))
|
||||
|
||||
}
|
||||
package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
|
||||
package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA SSTATETASKS"
|
||||
|
||||
|
||||
793
sources/poky/meta/classes-global/package_rpm.bbclass
Normal file
793
sources/poky/meta/classes-global/package_rpm.bbclass
Normal file
@@ -0,0 +1,793 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit package
|
||||
|
||||
IMAGE_PKGTYPE ?= "rpm"
|
||||
|
||||
RPM = "rpm"
|
||||
RPMBUILD = "rpmbuild"
|
||||
RPMBUILD_COMPMODE ?= "${@'w3T%d.zstdio' % int(d.getVar('ZSTD_THREADS'))}"
|
||||
|
||||
PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
|
||||
|
||||
# Maintaining the perfile dependencies has significant overhead when writing the
|
||||
# packages. When set, this value merges them for efficiency.
|
||||
MERGEPERFILEDEPS = "1"
|
||||
|
||||
# Filter dependencies based on a provided function.
|
||||
def filter_deps(var, f):
|
||||
import collections
|
||||
|
||||
depends_dict = bb.utils.explode_dep_versions2(var)
|
||||
newdeps_dict = collections.OrderedDict()
|
||||
for dep in depends_dict:
|
||||
if f(dep):
|
||||
newdeps_dict[dep] = depends_dict[dep]
|
||||
return bb.utils.join_deps(newdeps_dict, commasep=False)
|
||||
|
||||
# Filter out absolute paths (typically /bin/sh and /usr/bin/env) and any perl
|
||||
# dependencies for nativesdk packages.
|
||||
def filter_nativesdk_deps(srcname, var):
|
||||
if var and srcname.startswith("nativesdk-"):
|
||||
var = filter_deps(var, lambda dep: not dep.startswith('/') and dep != 'perl' and not dep.startswith('perl('))
|
||||
return var
|
||||
|
||||
# Construct per file dependencies file
|
||||
def write_rpm_perfiledata(srcname, d):
|
||||
workdir = d.getVar('WORKDIR')
|
||||
packages = d.getVar('PACKAGES')
|
||||
pkgd = d.getVar('PKGD')
|
||||
|
||||
def dump_filerdeps(varname, outfile, d):
|
||||
outfile.write("#!/usr/bin/env python3\n\n")
|
||||
outfile.write("# Dependency table\n")
|
||||
outfile.write('deps = {\n')
|
||||
for pkg in packages.split():
|
||||
dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
|
||||
dependsflist = (d.getVar(dependsflist_key) or "")
|
||||
for dfile in dependsflist.split():
|
||||
key = "FILE" + varname + ":" + dfile + ":" + pkg
|
||||
deps = filter_nativesdk_deps(srcname, d.getVar(key) or "")
|
||||
depends_dict = bb.utils.explode_dep_versions(deps)
|
||||
file = dfile.replace("@underscore@", "_")
|
||||
file = file.replace("@closebrace@", "]")
|
||||
file = file.replace("@openbrace@", "[")
|
||||
file = file.replace("@tab@", "\t")
|
||||
file = file.replace("@space@", " ")
|
||||
file = file.replace("@at@", "@")
|
||||
outfile.write('"' + pkgd + file + '" : "')
|
||||
for dep in depends_dict:
|
||||
ver = depends_dict[dep]
|
||||
if dep and ver:
|
||||
ver = ver.replace("(", "")
|
||||
ver = ver.replace(")", "")
|
||||
outfile.write(dep + " " + ver + " ")
|
||||
else:
|
||||
outfile.write(dep + " ")
|
||||
outfile.write('",\n')
|
||||
outfile.write('}\n\n')
|
||||
outfile.write("import sys\n")
|
||||
outfile.write("while 1:\n")
|
||||
outfile.write("\tline = sys.stdin.readline().strip()\n")
|
||||
outfile.write("\tif not line:\n")
|
||||
outfile.write("\t\tsys.exit(0)\n")
|
||||
outfile.write("\tif line in deps:\n")
|
||||
outfile.write("\t\tprint(deps[line] + '\\n')\n")
|
||||
|
||||
# OE-core dependencies a.k.a. RPM requires
|
||||
outdepends = workdir + "/" + srcname + ".requires"
|
||||
|
||||
dependsfile = open(outdepends, 'w')
|
||||
|
||||
dump_filerdeps('RDEPENDS', dependsfile, d)
|
||||
|
||||
dependsfile.close()
|
||||
os.chmod(outdepends, 0o755)
|
||||
|
||||
# OE-core / RPM Provides
|
||||
outprovides = workdir + "/" + srcname + ".provides"
|
||||
|
||||
providesfile = open(outprovides, 'w')
|
||||
|
||||
dump_filerdeps('RPROVIDES', providesfile, d)
|
||||
|
||||
providesfile.close()
|
||||
os.chmod(outprovides, 0o755)
|
||||
|
||||
return (outdepends, outprovides)
|
||||
|
||||
|
||||
python write_specfile () {
|
||||
import oe.packagedata
|
||||
import os,pwd,grp,stat
|
||||
|
||||
# append information for logs and patches to %prep
|
||||
def add_prep(d, spec_files_bottom):
|
||||
if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
|
||||
spec_files_bottom.append('%%prep -n %s' % d.getVar('PN'))
|
||||
spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
|
||||
spec_files_bottom.append('')
|
||||
|
||||
# append the name of tarball to key word 'SOURCE' in xxx.spec.
|
||||
def tail_source(d):
|
||||
if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
|
||||
ar_outdir = d.getVar('ARCHIVER_OUTDIR')
|
||||
if not os.path.exists(ar_outdir):
|
||||
return
|
||||
source_list = os.listdir(ar_outdir)
|
||||
source_number = 0
|
||||
for source in source_list:
|
||||
# do_deploy_archives may have already run (from sstate) meaning a .src.rpm may already
|
||||
# exist in ARCHIVER_OUTDIR so skip if present.
|
||||
if source.endswith(".src.rpm"):
|
||||
continue
|
||||
# The rpmbuild doesn't need the root permission, but it needs
|
||||
# to know the file's user and group name, the only user and
|
||||
# group in fakeroot is "root" when working in fakeroot.
|
||||
f = os.path.join(ar_outdir, source)
|
||||
os.chown(f, 0, 0)
|
||||
spec_preamble_top.append('Source%s: %s' % (source_number, source))
|
||||
source_number += 1
|
||||
|
||||
# In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
|
||||
# This format is similar to OE, however there are restrictions on the
|
||||
# characters that can be in a field. In the Version field, "-"
|
||||
# characters are not allowed. "-" is allowed in the Release field.
|
||||
#
|
||||
# We translate the "-" in the version to a "+", by loading the PKGV
|
||||
# from the dependent recipe, replacing the - with a +, and then using
|
||||
# that value to do a replace inside of this recipe's dependencies.
|
||||
# This preserves the "-" separator between the version and release, as
|
||||
# well as any "-" characters inside of the release field.
|
||||
#
|
||||
# All of this has to happen BEFORE the mapping_rename_hook as
|
||||
# after renaming we cannot look up the dependencies in the packagedata
|
||||
# store.
|
||||
def translate_vers(varname, d):
|
||||
depends = d.getVar(varname)
|
||||
if depends:
|
||||
depends_dict = bb.utils.explode_dep_versions2(depends)
|
||||
newdeps_dict = {}
|
||||
for dep in depends_dict:
|
||||
verlist = []
|
||||
for ver in depends_dict[dep]:
|
||||
if '-' in ver:
|
||||
subd = oe.packagedata.read_subpkgdata_dict(dep, d)
|
||||
if 'PKGV' in subd:
|
||||
pv = subd['PV']
|
||||
pkgv = subd['PKGV']
|
||||
reppv = pkgv.replace('-', '+')
|
||||
if ver.startswith(pv):
|
||||
ver = ver.replace(pv, reppv)
|
||||
ver = ver.replace(pkgv, reppv)
|
||||
if 'PKGR' in subd:
|
||||
# Make sure PKGR rather than PR in ver
|
||||
pr = '-' + subd['PR']
|
||||
pkgr = '-' + subd['PKGR']
|
||||
if pkgr not in ver:
|
||||
ver = ver.replace(pr, pkgr)
|
||||
verlist.append(ver)
|
||||
else:
|
||||
verlist.append(ver)
|
||||
newdeps_dict[dep] = verlist
|
||||
depends = bb.utils.join_deps(newdeps_dict)
|
||||
d.setVar(varname, depends.strip())
|
||||
|
||||
# We need to change the style the dependency from BB to RPM
|
||||
# This needs to happen AFTER the mapping_rename_hook
|
||||
def print_deps(variable, tag, array, d):
|
||||
depends = variable
|
||||
if depends:
|
||||
depends_dict = bb.utils.explode_dep_versions2(depends)
|
||||
for dep in depends_dict:
|
||||
for ver in depends_dict[dep]:
|
||||
ver = ver.replace('(', '')
|
||||
ver = ver.replace(')', '')
|
||||
array.append("%s: %s %s" % (tag, dep, ver))
|
||||
if not len(depends_dict[dep]):
|
||||
array.append("%s: %s" % (tag, dep))
|
||||
|
||||
def walk_files(walkpath, target, conffiles, dirfiles):
|
||||
# We can race against the ipk/deb backends which create CONTROL or DEBIAN directories
|
||||
# when packaging. We just ignore these files which are created in
|
||||
# packages-split/ and not package/
|
||||
# We have the odd situation where the CONTROL/DEBIAN directory can be removed in the middle of
|
||||
# of the walk, the isdir() test would then fail and the walk code would assume its a file
|
||||
# hence we check for the names in files too.
|
||||
for rootpath, dirs, files in os.walk(walkpath):
|
||||
def get_attr(path):
|
||||
stat_f = os.stat(rootpath + "/" + path, follow_symlinks=False)
|
||||
mode = stat.S_IMODE(stat_f.st_mode)
|
||||
try:
|
||||
owner = pwd.getpwuid(stat_f.st_uid).pw_name
|
||||
except Exception as e:
|
||||
filename = d.getVar('RECIPE_SYSROOT') + '/etc/passwd'
|
||||
if os.path.exists(filename):
|
||||
bb.error("Content of /etc/passwd in sysroot:\n{}".format(
|
||||
open(filename).read()))
|
||||
else:
|
||||
bb.error("File {} doesn't exist in sysroot!".format(filename))
|
||||
raise e
|
||||
try:
|
||||
group = grp.getgrgid(stat_f.st_gid).gr_name
|
||||
except Exception as e:
|
||||
filename = d.getVar("RECIPE_SYSROOT") +"/etc/group"
|
||||
if os.path.exists(filename):
|
||||
bb.error("Content of /etc/group in sysroot:\n{}".format(
|
||||
open(filename).read()))
|
||||
else:
|
||||
bb.error("File {} doesn't exists in sysroot!".format(filename))
|
||||
raise e
|
||||
return "%attr({:o},{},{}) ".format(mode, owner, group)
|
||||
|
||||
def escape_chars(p):
|
||||
return p.replace("%", "%%").replace("\\", "\\\\").replace('"', '\\"')
|
||||
|
||||
path = rootpath.replace(walkpath, "")
|
||||
if path.endswith("DEBIAN") or path.endswith("CONTROL"):
|
||||
continue
|
||||
|
||||
# Treat all symlinks to directories as normal files.
|
||||
# os.walk() lists them as directories.
|
||||
def move_to_files(dir):
|
||||
if os.path.islink(os.path.join(rootpath, dir)):
|
||||
files.append(dir)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
dirs[:] = [dir for dir in dirs if not move_to_files(dir)]
|
||||
|
||||
# Directory handling can happen in two ways, either DIRFILES is not set at all
|
||||
# in which case we fall back to the older behaviour of packages owning all their
|
||||
# directories
|
||||
if dirfiles is None:
|
||||
for dir in dirs:
|
||||
if dir == "CONTROL" or dir == "DEBIAN":
|
||||
continue
|
||||
p = path + '/' + dir
|
||||
# All packages own the directories their files are in...
|
||||
target.append(get_attr(dir) + '%dir "' + escape_chars(p) + '"')
|
||||
elif path:
|
||||
# packages own only empty directories or explict directory.
|
||||
# This will prevent the overlapping of security permission.
|
||||
attr = get_attr(path)
|
||||
if (not files and not dirs) or path in dirfiles:
|
||||
target.append(attr + '%dir "' + escape_chars(path) + '"')
|
||||
|
||||
for file in files:
|
||||
if file == "CONTROL" or file == "DEBIAN":
|
||||
continue
|
||||
attr = get_attr(file)
|
||||
p = path + '/' + file
|
||||
if conffiles.count(p):
|
||||
target.append(attr + '%config "' + escape_chars(p) + '"')
|
||||
else:
|
||||
target.append(attr + '"' + escape_chars(p) + '"')
|
||||
|
||||
# Prevent the prerm/postrm scripts from being run during an upgrade
|
||||
def wrap_uninstall(scriptvar):
|
||||
scr = scriptvar.strip()
|
||||
if scr.startswith("#!"):
|
||||
pos = scr.find("\n") + 1
|
||||
else:
|
||||
pos = 0
|
||||
scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi'
|
||||
return scr
|
||||
|
||||
def get_perfile(varname, pkg, d):
|
||||
deps = []
|
||||
dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
|
||||
dependsflist = (d.getVar(dependsflist_key) or "")
|
||||
for dfile in dependsflist.split():
|
||||
key = "FILE" + varname + ":" + dfile + ":" + pkg
|
||||
depends = d.getVar(key)
|
||||
if depends:
|
||||
deps.append(depends)
|
||||
return " ".join(deps)
|
||||
|
||||
def append_description(spec_preamble, text):
|
||||
"""
|
||||
Add the description to the spec file.
|
||||
"""
|
||||
import textwrap
|
||||
dedent_text = textwrap.dedent(text).strip()
|
||||
# Bitbake saves "\n" as "\\n"
|
||||
if '\\n' in dedent_text:
|
||||
for t in dedent_text.split('\\n'):
|
||||
spec_preamble.append(t.strip())
|
||||
else:
|
||||
spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
|
||||
|
||||
packages = d.getVar('PACKAGES')
|
||||
if not packages or packages == '':
|
||||
bb.debug(1, "No packages; nothing to do")
|
||||
return
|
||||
|
||||
pkgdest = d.getVar('PKGDEST')
|
||||
if not pkgdest:
|
||||
bb.fatal("No PKGDEST")
|
||||
|
||||
outspecfile = d.getVar('OUTSPECFILE')
|
||||
if not outspecfile:
|
||||
bb.fatal("No OUTSPECFILE")
|
||||
|
||||
# Construct the SPEC file...
|
||||
srcname = d.getVar('PN')
|
||||
localdata = bb.data.createCopy(d)
|
||||
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + srcname)
|
||||
srcsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
|
||||
srcversion = localdata.getVar('PKGV').replace('-', '+')
|
||||
srcrelease = localdata.getVar('PKGR')
|
||||
srcepoch = (localdata.getVar('PKGE') or "")
|
||||
srclicense = localdata.getVar('LICENSE')
|
||||
srcsection = localdata.getVar('SECTION')
|
||||
srcmaintainer = localdata.getVar('MAINTAINER')
|
||||
srchomepage = localdata.getVar('HOMEPAGE')
|
||||
srcdescription = localdata.getVar('DESCRIPTION') or "."
|
||||
srccustomtagschunk = oe.packagedata.get_package_additional_metadata("rpm", localdata)
|
||||
|
||||
srcdepends = d.getVar('DEPENDS')
|
||||
srcrdepends = ""
|
||||
srcrrecommends = ""
|
||||
srcrsuggests = ""
|
||||
srcrprovides = ""
|
||||
srcrreplaces = ""
|
||||
srcrconflicts = ""
|
||||
srcrobsoletes = ""
|
||||
|
||||
srcrpreinst = []
|
||||
srcrpostinst = []
|
||||
srcrprerm = []
|
||||
srcrpostrm = []
|
||||
|
||||
spec_preamble_top = []
|
||||
spec_preamble_bottom = []
|
||||
|
||||
spec_scriptlets_top = []
|
||||
spec_scriptlets_bottom = []
|
||||
|
||||
spec_files_top = []
|
||||
spec_files_bottom = []
|
||||
|
||||
perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
|
||||
extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA") or "0") == "1"
|
||||
|
||||
for pkg in packages.split():
|
||||
localdata = bb.data.createCopy(d)
|
||||
|
||||
root = "%s/%s" % (pkgdest, pkg)
|
||||
|
||||
localdata.setVar('ROOT', '')
|
||||
localdata.setVar('ROOT_%s' % pkg, root)
|
||||
pkgname = localdata.getVar('PKG:%s' % pkg)
|
||||
if not pkgname:
|
||||
pkgname = pkg
|
||||
localdata.setVar('PKG', pkgname)
|
||||
|
||||
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
|
||||
|
||||
conffiles = oe.package.get_conffiles(pkg, d)
|
||||
dirfiles = localdata.getVar('DIRFILES')
|
||||
if dirfiles is not None:
|
||||
dirfiles = dirfiles.split()
|
||||
|
||||
splitname = pkgname
|
||||
|
||||
splitsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
|
||||
splitversion = (localdata.getVar('PKGV') or "").replace('-', '+')
|
||||
splitrelease = (localdata.getVar('PKGR') or "")
|
||||
splitepoch = (localdata.getVar('PKGE') or "")
|
||||
splitlicense = (localdata.getVar('LICENSE') or "")
|
||||
splitsection = (localdata.getVar('SECTION') or "")
|
||||
splitdescription = (localdata.getVar('DESCRIPTION') or ".")
|
||||
splitcustomtagschunk = oe.packagedata.get_package_additional_metadata("rpm", localdata)
|
||||
|
||||
translate_vers('RDEPENDS', localdata)
|
||||
translate_vers('RRECOMMENDS', localdata)
|
||||
translate_vers('RSUGGESTS', localdata)
|
||||
translate_vers('RPROVIDES', localdata)
|
||||
translate_vers('RREPLACES', localdata)
|
||||
translate_vers('RCONFLICTS', localdata)
|
||||
|
||||
# Map the dependencies into their final form
|
||||
oe.packagedata.mapping_rename_hook(localdata)
|
||||
|
||||
splitrdepends = localdata.getVar('RDEPENDS') or ""
|
||||
splitrrecommends = localdata.getVar('RRECOMMENDS') or ""
|
||||
splitrsuggests = localdata.getVar('RSUGGESTS') or ""
|
||||
splitrprovides = localdata.getVar('RPROVIDES') or ""
|
||||
splitrreplaces = localdata.getVar('RREPLACES') or ""
|
||||
splitrconflicts = localdata.getVar('RCONFLICTS') or ""
|
||||
splitrobsoletes = ""
|
||||
|
||||
splitrpreinst = localdata.getVar('pkg_preinst')
|
||||
splitrpostinst = localdata.getVar('pkg_postinst')
|
||||
splitrprerm = localdata.getVar('pkg_prerm')
|
||||
splitrpostrm = localdata.getVar('pkg_postrm')
|
||||
|
||||
|
||||
if not perfiledeps:
|
||||
# Add in summary of per file dependencies
|
||||
splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
|
||||
splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
|
||||
|
||||
splitrdepends = filter_nativesdk_deps(srcname, splitrdepends)
|
||||
|
||||
# Gather special src/first package data
|
||||
if srcname == splitname:
|
||||
archiving = d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and \
|
||||
bb.data.inherits_class('archiver', d)
|
||||
if archiving and srclicense != splitlicense:
|
||||
bb.warn("The SRPM produced may not have the correct overall source license in the License tag. This is due to the LICENSE for the primary package and SRPM conflicting.")
|
||||
|
||||
srclicense = splitlicense
|
||||
srcrdepends = splitrdepends
|
||||
srcrrecommends = splitrrecommends
|
||||
srcrsuggests = splitrsuggests
|
||||
srcrprovides = splitrprovides
|
||||
srcrreplaces = splitrreplaces
|
||||
srcrconflicts = splitrconflicts
|
||||
|
||||
srcrpreinst = splitrpreinst
|
||||
srcrpostinst = splitrpostinst
|
||||
srcrprerm = splitrprerm
|
||||
srcrpostrm = splitrpostrm
|
||||
|
||||
file_list = []
|
||||
walk_files(root, file_list, conffiles, dirfiles)
|
||||
if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
|
||||
bb.note("Not creating empty RPM package for %s" % splitname)
|
||||
else:
|
||||
spec_files_top.append('%files')
|
||||
if extra_pkgdata:
|
||||
package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
|
||||
spec_files_top.append('%defattr(-,-,-,-)')
|
||||
if file_list:
|
||||
bb.note("Creating RPM package for %s" % splitname)
|
||||
spec_files_top.extend(file_list)
|
||||
else:
|
||||
bb.note("Creating empty RPM package for %s" % splitname)
|
||||
spec_files_top.append('')
|
||||
continue
|
||||
|
||||
# Process subpackage data
|
||||
spec_preamble_bottom.append('%%package -n %s' % splitname)
|
||||
spec_preamble_bottom.append('Summary: %s' % splitsummary)
|
||||
if srcversion != splitversion:
|
||||
spec_preamble_bottom.append('Version: %s' % splitversion)
|
||||
if srcrelease != splitrelease:
|
||||
spec_preamble_bottom.append('Release: %s' % splitrelease)
|
||||
if srcepoch != splitepoch:
|
||||
spec_preamble_bottom.append('Epoch: %s' % splitepoch)
|
||||
spec_preamble_bottom.append('License: %s' % splitlicense)
|
||||
spec_preamble_bottom.append('Group: %s' % splitsection)
|
||||
|
||||
if srccustomtagschunk != splitcustomtagschunk:
|
||||
spec_preamble_bottom.append(splitcustomtagschunk)
|
||||
|
||||
# Replaces == Obsoletes && Provides
|
||||
robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes)
|
||||
rprovides = bb.utils.explode_dep_versions2(splitrprovides)
|
||||
rreplaces = bb.utils.explode_dep_versions2(splitrreplaces)
|
||||
for dep in rreplaces:
|
||||
if dep not in robsoletes:
|
||||
robsoletes[dep] = rreplaces[dep]
|
||||
if dep not in rprovides:
|
||||
rprovides[dep] = rreplaces[dep]
|
||||
splitrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
|
||||
splitrprovides = bb.utils.join_deps(rprovides, commasep=False)
|
||||
|
||||
print_deps(splitrdepends, "Requires", spec_preamble_bottom, d)
|
||||
if splitrpreinst:
|
||||
print_deps(splitrdepends, "Requires(pre)", spec_preamble_bottom, d)
|
||||
if splitrpostinst:
|
||||
print_deps(splitrdepends, "Requires(post)", spec_preamble_bottom, d)
|
||||
if splitrprerm:
|
||||
print_deps(splitrdepends, "Requires(preun)", spec_preamble_bottom, d)
|
||||
if splitrpostrm:
|
||||
print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
|
||||
|
||||
print_deps(splitrrecommends, "Recommends", spec_preamble_bottom, d)
|
||||
print_deps(splitrsuggests, "Suggests", spec_preamble_bottom, d)
|
||||
print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
|
||||
print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
|
||||
print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
|
||||
|
||||
spec_preamble_bottom.append('')
|
||||
|
||||
spec_preamble_bottom.append('%%description -n %s' % splitname)
|
||||
append_description(spec_preamble_bottom, splitdescription)
|
||||
|
||||
spec_preamble_bottom.append('')
|
||||
|
||||
# Now process scriptlets
|
||||
if splitrpreinst:
|
||||
spec_scriptlets_bottom.append('%%pre -n %s' % splitname)
|
||||
spec_scriptlets_bottom.append('# %s - preinst' % splitname)
|
||||
spec_scriptlets_bottom.append(splitrpreinst)
|
||||
spec_scriptlets_bottom.append('')
|
||||
if splitrpostinst:
|
||||
spec_scriptlets_bottom.append('%%post -n %s' % splitname)
|
||||
spec_scriptlets_bottom.append('# %s - postinst' % splitname)
|
||||
spec_scriptlets_bottom.append(splitrpostinst)
|
||||
spec_scriptlets_bottom.append('')
|
||||
if splitrprerm:
|
||||
spec_scriptlets_bottom.append('%%preun -n %s' % splitname)
|
||||
spec_scriptlets_bottom.append('# %s - prerm' % splitname)
|
||||
scriptvar = wrap_uninstall(splitrprerm)
|
||||
spec_scriptlets_bottom.append(scriptvar)
|
||||
spec_scriptlets_bottom.append('')
|
||||
if splitrpostrm:
|
||||
spec_scriptlets_bottom.append('%%postun -n %s' % splitname)
|
||||
spec_scriptlets_bottom.append('# %s - postrm' % splitname)
|
||||
scriptvar = wrap_uninstall(splitrpostrm)
|
||||
spec_scriptlets_bottom.append(scriptvar)
|
||||
spec_scriptlets_bottom.append('')
|
||||
|
||||
# Now process files
|
||||
file_list = []
|
||||
walk_files(root, file_list, conffiles, dirfiles)
|
||||
if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
|
||||
bb.note("Not creating empty RPM package for %s" % splitname)
|
||||
else:
|
||||
spec_files_bottom.append('%%files -n %s' % splitname)
|
||||
if extra_pkgdata:
|
||||
package_rpm_extra_pkgdata(splitname, spec_files_bottom, localdata)
|
||||
spec_files_bottom.append('%defattr(-,-,-,-)')
|
||||
if file_list:
|
||||
bb.note("Creating RPM package for %s" % splitname)
|
||||
spec_files_bottom.extend(file_list)
|
||||
else:
|
||||
bb.note("Creating empty RPM package for %s" % splitname)
|
||||
spec_files_bottom.append('')
|
||||
|
||||
del localdata
|
||||
|
||||
add_prep(d, spec_files_bottom)
|
||||
spec_preamble_top.append('Summary: %s' % srcsummary)
|
||||
spec_preamble_top.append('Name: %s' % srcname)
|
||||
spec_preamble_top.append('Version: %s' % srcversion)
|
||||
spec_preamble_top.append('Release: %s' % srcrelease)
|
||||
if srcepoch and srcepoch.strip() != "":
|
||||
spec_preamble_top.append('Epoch: %s' % srcepoch)
|
||||
spec_preamble_top.append('License: %s' % srclicense)
|
||||
spec_preamble_top.append('Group: %s' % srcsection)
|
||||
spec_preamble_top.append('Packager: %s' % srcmaintainer)
|
||||
if srchomepage:
|
||||
spec_preamble_top.append('URL: %s' % srchomepage)
|
||||
if srccustomtagschunk:
|
||||
spec_preamble_top.append(srccustomtagschunk)
|
||||
tail_source(d)
|
||||
|
||||
# Replaces == Obsoletes && Provides
|
||||
robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes)
|
||||
rprovides = bb.utils.explode_dep_versions2(srcrprovides)
|
||||
rreplaces = bb.utils.explode_dep_versions2(srcrreplaces)
|
||||
for dep in rreplaces:
|
||||
if dep not in robsoletes:
|
||||
robsoletes[dep] = rreplaces[dep]
|
||||
if dep not in rprovides:
|
||||
rprovides[dep] = rreplaces[dep]
|
||||
srcrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
|
||||
srcrprovides = bb.utils.join_deps(rprovides, commasep=False)
|
||||
|
||||
print_deps(srcdepends, "BuildRequires", spec_preamble_top, d)
|
||||
print_deps(srcrdepends, "Requires", spec_preamble_top, d)
|
||||
if srcrpreinst:
|
||||
print_deps(srcrdepends, "Requires(pre)", spec_preamble_top, d)
|
||||
if srcrpostinst:
|
||||
print_deps(srcrdepends, "Requires(post)", spec_preamble_top, d)
|
||||
if srcrprerm:
|
||||
print_deps(srcrdepends, "Requires(preun)", spec_preamble_top, d)
|
||||
if srcrpostrm:
|
||||
print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
|
||||
|
||||
print_deps(srcrrecommends, "Recommends", spec_preamble_top, d)
|
||||
print_deps(srcrsuggests, "Suggests", spec_preamble_top, d)
|
||||
print_deps(srcrprovides, "Provides", spec_preamble_top, d)
|
||||
print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
|
||||
print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
|
||||
|
||||
spec_preamble_top.append('')
|
||||
|
||||
spec_preamble_top.append('%description')
|
||||
append_description(spec_preamble_top, srcdescription)
|
||||
|
||||
spec_preamble_top.append('')
|
||||
|
||||
if srcrpreinst:
|
||||
spec_scriptlets_top.append('%pre')
|
||||
spec_scriptlets_top.append('# %s - preinst' % srcname)
|
||||
spec_scriptlets_top.append(srcrpreinst)
|
||||
spec_scriptlets_top.append('')
|
||||
if srcrpostinst:
|
||||
spec_scriptlets_top.append('%post')
|
||||
spec_scriptlets_top.append('# %s - postinst' % srcname)
|
||||
spec_scriptlets_top.append(srcrpostinst)
|
||||
spec_scriptlets_top.append('')
|
||||
if srcrprerm:
|
||||
spec_scriptlets_top.append('%preun')
|
||||
spec_scriptlets_top.append('# %s - prerm' % srcname)
|
||||
scriptvar = wrap_uninstall(srcrprerm)
|
||||
spec_scriptlets_top.append(scriptvar)
|
||||
spec_scriptlets_top.append('')
|
||||
if srcrpostrm:
|
||||
spec_scriptlets_top.append('%postun')
|
||||
spec_scriptlets_top.append('# %s - postrm' % srcname)
|
||||
scriptvar = wrap_uninstall(srcrpostrm)
|
||||
spec_scriptlets_top.append(scriptvar)
|
||||
spec_scriptlets_top.append('')
|
||||
|
||||
# Write the SPEC file
|
||||
specfile = open(outspecfile, 'w')
|
||||
|
||||
# RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
|
||||
# of the generated spec file
|
||||
external_preamble = d.getVar("RPMSPEC_PREAMBLE")
|
||||
if external_preamble:
|
||||
specfile.write(external_preamble + "\n")
|
||||
|
||||
for line in spec_preamble_top:
|
||||
specfile.write(line + "\n")
|
||||
|
||||
for line in spec_preamble_bottom:
|
||||
specfile.write(line + "\n")
|
||||
|
||||
for line in spec_scriptlets_top:
|
||||
specfile.write(line + "\n")
|
||||
|
||||
for line in spec_scriptlets_bottom:
|
||||
specfile.write(line + "\n")
|
||||
|
||||
for line in spec_files_top:
|
||||
specfile.write(line + "\n")
|
||||
|
||||
for line in spec_files_bottom:
|
||||
specfile.write(line + "\n")
|
||||
|
||||
specfile.close()
|
||||
}
|
||||
# Otherwise allarch packages may change depending on override configuration
|
||||
write_specfile[vardepsexclude] = "OVERRIDES"
|
||||
|
||||
# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
|
||||
RPMEXTRAVARS = "PACKAGE_ADD_METADATA_RPM"
|
||||
write_specfile[vardeps] += "${@gen_packagevar(d, 'RPMEXTRAVARS')}"
|
||||
|
||||
python do_package_rpm () {
|
||||
workdir = d.getVar('WORKDIR')
|
||||
tmpdir = d.getVar('TMPDIR')
|
||||
pkgd = d.getVar('PKGD')
|
||||
if not workdir or not pkgd or not tmpdir:
|
||||
bb.error("Variables incorrectly set, unable to package")
|
||||
return
|
||||
|
||||
packages = d.getVar('PACKAGES')
|
||||
if not packages or packages == '':
|
||||
bb.debug(1, "No packages; nothing to do")
|
||||
return
|
||||
|
||||
# Construct the spec file...
|
||||
# If the spec file already exist, and has not been stored into
|
||||
# pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
|
||||
# so remove it before doing rpmbuild src.rpm.
|
||||
srcname = d.getVar('PN')
|
||||
outspecfile = workdir + "/" + srcname + ".spec"
|
||||
if os.path.isfile(outspecfile):
|
||||
os.remove(outspecfile)
|
||||
d.setVar('OUTSPECFILE', outspecfile)
|
||||
bb.build.exec_func('write_specfile', d)
|
||||
|
||||
perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
|
||||
if perfiledeps:
|
||||
outdepends, outprovides = write_rpm_perfiledata(srcname, d)
|
||||
|
||||
# Setup the rpmbuild arguments...
|
||||
rpmbuild = d.getVar('RPMBUILD')
|
||||
rpmbuild_compmode = d.getVar('RPMBUILD_COMPMODE')
|
||||
rpmbuild_extra_params = d.getVar('RPMBUILD_EXTRA_PARAMS') or ""
|
||||
|
||||
# Too many places in dnf stack assume that arch-independent packages are "noarch".
|
||||
# Let's not fight against this.
|
||||
package_arch = (d.getVar('PACKAGE_ARCH') or "").replace("-", "_")
|
||||
if package_arch == "all":
|
||||
package_arch = "noarch"
|
||||
|
||||
d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
|
||||
pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
|
||||
d.setVar('RPM_PKGWRITEDIR', pkgwritedir)
|
||||
bb.debug(1, 'PKGWRITEDIR: %s' % d.getVar('RPM_PKGWRITEDIR'))
|
||||
pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-linux')
|
||||
bb.utils.mkdirhier(pkgwritedir)
|
||||
os.chmod(pkgwritedir, 0o755)
|
||||
|
||||
cmd = rpmbuild
|
||||
cmd = cmd + " --noclean --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
|
||||
cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
|
||||
cmd = cmd + " --define '_builddir " + d.getVar('B') + "'"
|
||||
cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
|
||||
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
|
||||
cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
|
||||
cmd = cmd + " --define '_build_id_links none'"
|
||||
cmd = cmd + " --define '_smp_ncpus_max 4'"
|
||||
cmd = cmd + " --define '_source_payload %s'" % rpmbuild_compmode
|
||||
cmd = cmd + " --define '_binary_payload %s'" % rpmbuild_compmode
|
||||
cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
|
||||
cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
|
||||
cmd = cmd + " --define '_buildhost reproducible'"
|
||||
cmd = cmd + " --define '__font_provides %{nil}'"
|
||||
if perfiledeps:
|
||||
cmd = cmd + " --define '__find_requires " + outdepends + "'"
|
||||
cmd = cmd + " --define '__find_provides " + outprovides + "'"
|
||||
else:
|
||||
cmd = cmd + " --define '__find_requires %{nil}'"
|
||||
cmd = cmd + " --define '__find_provides %{nil}'"
|
||||
cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
|
||||
cmd = cmd + " --define 'debug_package %{nil}'"
|
||||
cmd = cmd + " --define '_tmppath " + workdir + "'"
|
||||
cmd = cmd + " --define '_use_weak_usergroup_deps 1'"
|
||||
cmd = cmd + " --define '_passwd_path " + "/completely/bogus/path" + "'"
|
||||
cmd = cmd + " --define '_group_path " + "/completely/bogus/path" + "'"
|
||||
cmd = cmd + rpmbuild_extra_params
|
||||
if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
|
||||
cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR') + "'"
|
||||
cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_RPMOUTDIR') + "'"
|
||||
cmdsrpm = cmdsrpm + " -bs " + outspecfile
|
||||
# Build the .src.rpm
|
||||
d.setVar('SBUILDSPEC', cmdsrpm + "\n")
|
||||
d.setVarFlag('SBUILDSPEC', 'func', '1')
|
||||
bb.build.exec_func('SBUILDSPEC', d)
|
||||
cmd = cmd + " -bb " + outspecfile
|
||||
|
||||
# rpm 4 creates various empty directories in _topdir, let's clean them up
|
||||
cleanupcmd = "rm -rf %s/BUILDROOT %s/SOURCES %s/SPECS %s/SRPMS" % (workdir, workdir, workdir, workdir)
|
||||
|
||||
# Build the rpm package!
|
||||
d.setVar('BUILDSPEC', cmd + "\n" + cleanupcmd + "\n")
|
||||
d.setVarFlag('BUILDSPEC', 'func', '1')
|
||||
bb.build.exec_func('BUILDSPEC', d)
|
||||
|
||||
if d.getVar('RPM_SIGN_PACKAGES') == '1':
|
||||
bb.build.exec_func("sign_rpm", d)
|
||||
}
|
||||
|
||||
python () {
|
||||
if d.getVar('PACKAGES') != '':
|
||||
deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
|
||||
d.appendVarFlag('do_package_write_rpm', 'depends', deps)
|
||||
d.setVarFlag('do_package_write_rpm', 'fakeroot', '1')
|
||||
|
||||
# Needed to ensure PKG_xxx renaming of dependency packages works
|
||||
d.setVarFlag('do_package_write_rpm', 'deptask', "do_packagedata")
|
||||
d.setVarFlag('do_package_write_rpm', 'rdeptask', "do_packagedata")
|
||||
}
|
||||
|
||||
SSTATETASKS += "do_package_write_rpm"
|
||||
do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}"
|
||||
do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}"
|
||||
# Take a shared lock, we can write multiple packages at the same time...
|
||||
# but we need to stop the rootfs/solver from running while we do...
|
||||
do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock"
|
||||
|
||||
python do_package_write_rpm_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_package_write_rpm_setscene
|
||||
|
||||
python do_package_write_rpm () {
|
||||
bb.build.exec_func("read_subpackage_metadata", d)
|
||||
bb.build.exec_func("do_package_rpm", d)
|
||||
}
|
||||
|
||||
do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
|
||||
do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
|
||||
do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
|
||||
addtask package_write_rpm after do_packagedata do_package do_deploy_source_date_epoch before do_build
|
||||
do_build[rdeptask] += "do_package_write_rpm"
|
||||
|
||||
PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
|
||||
PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
|
||||
40
sources/poky/meta/classes-global/packagedata.bbclass
Normal file
40
sources/poky/meta/classes-global/packagedata.bbclass
Normal file
@@ -0,0 +1,40 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
python read_subpackage_metadata () {
|
||||
import oe.packagedata
|
||||
|
||||
vars = {
|
||||
"PN" : d.getVar('PN'),
|
||||
"PE" : d.getVar('PE'),
|
||||
"PV" : d.getVar('PV'),
|
||||
"PR" : d.getVar('PR'),
|
||||
}
|
||||
|
||||
data = oe.packagedata.read_pkgdata(vars["PN"], d)
|
||||
|
||||
for key in data.keys():
|
||||
d.setVar(key, data[key])
|
||||
|
||||
for pkg in d.getVar('PACKAGES').split():
|
||||
sdata = oe.packagedata.read_subpkgdata(pkg, d)
|
||||
for key in sdata.keys():
|
||||
if key in vars:
|
||||
if sdata[key] != vars[key]:
|
||||
if key == "PN":
|
||||
bb.fatal("Recipe %s is trying to create package %s which was already written by recipe %s. This will cause corruption, please resolve this and only provide the package from one recipe or the other or only build one of the recipes." % (vars[key], pkg, sdata[key]))
|
||||
bb.fatal("Recipe %s is trying to change %s from '%s' to '%s'. This will cause do_package_write_* failures since the incorrect data will be used and they will be unable to find the right workdir." % (vars["PN"], key, vars[key], sdata[key]))
|
||||
continue
|
||||
#
|
||||
# If we set unsuffixed variables here there is a chance they could clobber override versions
|
||||
# of that variable, e.g. DESCRIPTION could clobber DESCRIPTION:<pkgname>
|
||||
# We therefore don't clobber for the unsuffixed variable versions
|
||||
#
|
||||
if key.endswith(":" + pkg):
|
||||
d.setVar(key, sdata[key])
|
||||
else:
|
||||
d.setVar(key, sdata[key], parsing=True)
|
||||
}
|
||||
169
sources/poky/meta/classes-global/patch.bbclass
Normal file
169
sources/poky/meta/classes-global/patch.bbclass
Normal file
@@ -0,0 +1,169 @@
|
||||
# Copyright (C) 2006 OpenedHand LTD
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# Point to an empty file so any user's custom settings don't break things
|
||||
QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
|
||||
|
||||
PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
|
||||
|
||||
# There is a bug in patch 2.7.3 and earlier where index lines
|
||||
# in patches can change file modes when they shouldn't:
|
||||
# http://git.savannah.gnu.org/cgit/patch.git/patch/?id=82b800c9552a088a241457948219d25ce0a407a4
|
||||
# This leaks into debug sources in particular. Add the dependency
|
||||
# to target recipes to avoid this problem until we can rely on 2.7.4 or later.
|
||||
PATCHDEPENDENCY:append:class-target = " patch-replacement-native:do_populate_sysroot"
|
||||
|
||||
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
|
||||
PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
|
||||
|
||||
inherit terminal
|
||||
|
||||
python () {
|
||||
if d.getVar('PATCHTOOL') == 'git' and d.getVar('PATCH_COMMIT_FUNCTIONS') == '1':
|
||||
extratasks = bb.build.tasksbetween('do_unpack', 'do_patch', d)
|
||||
try:
|
||||
extratasks.remove('do_unpack')
|
||||
except ValueError:
|
||||
# For some recipes do_unpack doesn't exist, ignore it
|
||||
pass
|
||||
|
||||
d.appendVarFlag('do_patch', 'prefuncs', ' patch_task_patch_prefunc')
|
||||
for task in extratasks:
|
||||
d.appendVarFlag(task, 'postfuncs', ' patch_task_postfunc')
|
||||
}
|
||||
|
||||
python patch_task_patch_prefunc() {
|
||||
# Prefunc for do_patch
|
||||
srcsubdir = d.getVar('S')
|
||||
|
||||
workdir = os.path.abspath(d.getVar('WORKDIR'))
|
||||
testsrcdir = os.path.abspath(srcsubdir)
|
||||
if (testsrcdir + os.sep).startswith(workdir + os.sep):
|
||||
# Double-check that either workdir or S or some directory in-between is a git repository
|
||||
found = False
|
||||
while testsrcdir != workdir:
|
||||
if os.path.exists(os.path.join(testsrcdir, '.git')):
|
||||
found = True
|
||||
break
|
||||
if testsrcdir == workdir:
|
||||
break
|
||||
testsrcdir = os.path.dirname(testsrcdir)
|
||||
if not found:
|
||||
bb.fatal('PATCHTOOL = "git" set for source tree that is not a git repository. Refusing to continue as that may result in commits being made in your metadata repository.')
|
||||
|
||||
patchdir = os.path.join(srcsubdir, 'patches')
|
||||
if os.path.exists(patchdir):
|
||||
if os.listdir(patchdir):
|
||||
d.setVar('PATCH_HAS_PATCHES_DIR', '1')
|
||||
else:
|
||||
os.rmdir(patchdir)
|
||||
}
|
||||
|
||||
python patch_task_postfunc() {
|
||||
# Prefunc for task functions between do_unpack and do_patch
|
||||
import oe.patch
|
||||
import shutil
|
||||
func = d.getVar('BB_RUNTASK')
|
||||
srcsubdir = d.getVar('S')
|
||||
|
||||
if os.path.exists(srcsubdir):
|
||||
if func == 'do_patch':
|
||||
haspatches = (d.getVar('PATCH_HAS_PATCHES_DIR') == '1')
|
||||
patchdir = os.path.join(srcsubdir, 'patches')
|
||||
if os.path.exists(patchdir):
|
||||
shutil.rmtree(patchdir)
|
||||
if haspatches:
|
||||
stdout, _ = bb.process.run('git status --porcelain patches', cwd=srcsubdir)
|
||||
if stdout:
|
||||
bb.process.run('git checkout patches', cwd=srcsubdir)
|
||||
stdout, _ = bb.process.run('git status --porcelain .', cwd=srcsubdir)
|
||||
if stdout:
|
||||
oe.patch.GitApplyTree.commitIgnored("Add changes from %s" % func, dir=srcsubdir, files=['.'], d=d)
|
||||
}
|
||||
|
||||
def src_patches(d, all=False, expand=True):
|
||||
import oe.patch
|
||||
return oe.patch.src_patches(d, all, expand)
|
||||
|
||||
def should_apply(parm, d):
|
||||
"""Determine if we should apply the given patch"""
|
||||
import oe.patch
|
||||
return oe.patch.should_apply(parm, d)
|
||||
|
||||
should_apply[vardepsexclude] = "DATE SRCDATE"
|
||||
|
||||
python patch_do_patch() {
|
||||
import oe.patch
|
||||
|
||||
patchsetmap = {
|
||||
"patch": oe.patch.PatchTree,
|
||||
"quilt": oe.patch.QuiltTree,
|
||||
"git": oe.patch.GitApplyTree,
|
||||
}
|
||||
|
||||
cls = patchsetmap[d.getVar('PATCHTOOL') or 'quilt']
|
||||
|
||||
resolvermap = {
|
||||
"noop": oe.patch.NOOPResolver,
|
||||
"user": oe.patch.UserResolver,
|
||||
}
|
||||
|
||||
rcls = resolvermap[d.getVar('PATCHRESOLVE') or 'user']
|
||||
|
||||
classes = {}
|
||||
|
||||
s = d.getVar('S')
|
||||
|
||||
os.putenv('PATH', d.getVar('PATH'))
|
||||
|
||||
# We must use one TMPDIR per process so that the "patch" processes
|
||||
# don't generate the same temp file name.
|
||||
|
||||
import tempfile
|
||||
process_tmpdir = tempfile.mkdtemp()
|
||||
os.environ['TMPDIR'] = process_tmpdir
|
||||
|
||||
for patch in src_patches(d):
|
||||
_, _, local, _, _, parm = bb.fetch.decodeurl(patch)
|
||||
|
||||
if "patchdir" in parm:
|
||||
patchdir = parm["patchdir"]
|
||||
if not os.path.isabs(patchdir):
|
||||
patchdir = os.path.join(s, patchdir)
|
||||
if not os.path.isdir(patchdir):
|
||||
bb.fatal("Target directory '%s' not found, patchdir '%s' is incorrect in patch file '%s'" %
|
||||
(patchdir, parm["patchdir"], parm['patchname']))
|
||||
else:
|
||||
patchdir = s
|
||||
|
||||
if not patchdir in classes:
|
||||
patchset = cls(patchdir, d)
|
||||
resolver = rcls(patchset, oe_terminal)
|
||||
classes[patchdir] = (patchset, resolver)
|
||||
patchset.Clean()
|
||||
else:
|
||||
patchset, resolver = classes[patchdir]
|
||||
|
||||
bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
|
||||
try:
|
||||
patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
|
||||
except Exception as exc:
|
||||
bb.utils.remove(process_tmpdir, True)
|
||||
bb.fatal("Importing patch '%s' with striplevel '%s'\n%s" % (parm['patchname'], parm['striplevel'], repr(exc).replace("\\n", "\n")))
|
||||
try:
|
||||
resolver.Resolve()
|
||||
except bb.BBHandledException as e:
|
||||
bb.utils.remove(process_tmpdir, True)
|
||||
bb.fatal("Applying patch '%s' on target directory '%s'\n%s" % (parm['patchname'], patchdir, repr(e).replace("\\n", "\n")))
|
||||
|
||||
bb.utils.remove(process_tmpdir, True)
|
||||
del os.environ['TMPDIR']
|
||||
}
|
||||
patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
|
||||
|
||||
addtask patch after do_unpack
|
||||
do_patch[dirs] = "${WORKDIR}"
|
||||
do_patch[depends] = "${PATCHDEPENDENCY}"
|
||||
|
||||
EXPORT_FUNCTIONS do_patch
|
||||
1062
sources/poky/meta/classes-global/sanity.bbclass
Normal file
1062
sources/poky/meta/classes-global/sanity.bbclass
Normal file
File diff suppressed because it is too large
Load Diff
1377
sources/poky/meta/classes-global/sstate.bbclass
Normal file
1377
sources/poky/meta/classes-global/sstate.bbclass
Normal file
File diff suppressed because it is too large
Load Diff
695
sources/poky/meta/classes-global/staging.bbclass
Normal file
695
sources/poky/meta/classes-global/staging.bbclass
Normal file
@@ -0,0 +1,695 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# These directories will be staged in the sysroot
|
||||
SYSROOT_DIRS = " \
|
||||
${includedir} \
|
||||
${libdir} \
|
||||
${base_libdir} \
|
||||
${nonarch_base_libdir} \
|
||||
${datadir} \
|
||||
/sysroot-only \
|
||||
"
|
||||
|
||||
# These directories are also staged in the sysroot when they contain files that
|
||||
# are usable on the build system
|
||||
SYSROOT_DIRS_NATIVE = " \
|
||||
${bindir} \
|
||||
${sbindir} \
|
||||
${base_bindir} \
|
||||
${base_sbindir} \
|
||||
${libexecdir} \
|
||||
${sysconfdir} \
|
||||
${localstatedir} \
|
||||
"
|
||||
SYSROOT_DIRS:append:class-native = " ${SYSROOT_DIRS_NATIVE}"
|
||||
SYSROOT_DIRS:append:class-cross = " ${SYSROOT_DIRS_NATIVE}"
|
||||
SYSROOT_DIRS:append:class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
|
||||
|
||||
# These directories will not be staged in the sysroot
|
||||
SYSROOT_DIRS_IGNORE = " \
|
||||
${mandir} \
|
||||
${docdir} \
|
||||
${infodir} \
|
||||
${datadir}/X11/locale \
|
||||
${datadir}/applications \
|
||||
${datadir}/bash-completion \
|
||||
${datadir}/fonts \
|
||||
${datadir}/gtk-doc/html \
|
||||
${datadir}/installed-tests \
|
||||
${datadir}/locale \
|
||||
${datadir}/pixmaps \
|
||||
${datadir}/terminfo \
|
||||
${libdir}/${BPN}/ptest \
|
||||
"
|
||||
|
||||
sysroot_stage_dir() {
|
||||
src="$1"
|
||||
dest="$2"
|
||||
# if the src doesn't exist don't do anything
|
||||
if [ ! -d "$src" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
mkdir -p "$dest"
|
||||
rdest=$(realpath --relative-to="$src" "$dest")
|
||||
(
|
||||
cd $src
|
||||
find . -print0 | cpio --null -pdlu $rdest
|
||||
)
|
||||
}
|
||||
|
||||
sysroot_stage_dirs() {
|
||||
from="$1"
|
||||
to="$2"
|
||||
|
||||
for dir in ${SYSROOT_DIRS}; do
|
||||
sysroot_stage_dir "$from$dir" "$to$dir"
|
||||
done
|
||||
|
||||
# Remove directories we do not care about
|
||||
for dir in ${SYSROOT_DIRS_IGNORE}; do
|
||||
rm -rf "$to$dir"
|
||||
done
|
||||
}
|
||||
|
||||
sysroot_stage_all() {
|
||||
sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
|
||||
}
|
||||
|
||||
python sysroot_strip () {
|
||||
inhibit_sysroot = d.getVar('INHIBIT_SYSROOT_STRIP')
|
||||
if inhibit_sysroot and oe.types.boolean(inhibit_sysroot):
|
||||
return
|
||||
|
||||
dstdir = d.getVar('SYSROOT_DESTDIR')
|
||||
pn = d.getVar('PN')
|
||||
libdir = d.getVar("libdir")
|
||||
base_libdir = d.getVar("base_libdir")
|
||||
qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split()
|
||||
strip_cmd = d.getVar("STRIP")
|
||||
|
||||
max_process = oe.utils.get_bb_number_threads(d)
|
||||
oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, max_process,
|
||||
qa_already_stripped=qa_already_stripped)
|
||||
}
|
||||
|
||||
do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
|
||||
|
||||
addtask populate_sysroot after do_install
|
||||
|
||||
SYSROOT_PREPROCESS_FUNCS ?= ""
|
||||
SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir"
|
||||
|
||||
python do_populate_sysroot () {
|
||||
# SYSROOT 'version' 2
|
||||
bb.build.exec_func("sysroot_stage_all", d)
|
||||
bb.build.exec_func("sysroot_strip", d)
|
||||
for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split():
|
||||
bb.build.exec_func(f, d)
|
||||
pn = d.getVar("PN")
|
||||
multiprov = d.getVar("BB_MULTI_PROVIDER_ALLOWED").split()
|
||||
provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
|
||||
bb.utils.mkdirhier(provdir)
|
||||
for p in d.getVar("PROVIDES").split():
|
||||
if p in multiprov:
|
||||
continue
|
||||
p = p.replace("/", "_")
|
||||
with open(provdir + p, "w") as f:
|
||||
f.write(pn)
|
||||
}
|
||||
|
||||
do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
|
||||
do_populate_sysroot[vardepsexclude] += "BB_MULTI_PROVIDER_ALLOWED"
|
||||
|
||||
POPULATESYSROOTDEPS = ""
|
||||
POPULATESYSROOTDEPS:class-target = "virtual/${HOST_PREFIX}binutils:do_populate_sysroot"
|
||||
POPULATESYSROOTDEPS:class-nativesdk = "virtual/${HOST_PREFIX}binutils:do_populate_sysroot"
|
||||
do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}"
|
||||
|
||||
SSTATETASKS += "do_populate_sysroot"
|
||||
do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
|
||||
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
|
||||
do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
|
||||
do_populate_sysroot[sstate-fixmedir] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
|
||||
|
||||
python do_populate_sysroot_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_populate_sysroot_setscene
|
||||
|
||||
def staging_copyfile(c, target, dest, postinsts, seendirs):
|
||||
import errno
|
||||
|
||||
destdir = os.path.dirname(dest)
|
||||
if destdir not in seendirs:
|
||||
bb.utils.mkdirhier(destdir)
|
||||
seendirs.add(destdir)
|
||||
if "/usr/bin/postinst-" in c:
|
||||
postinsts.append(dest)
|
||||
if os.path.islink(c):
|
||||
linkto = os.readlink(c)
|
||||
if os.path.lexists(dest):
|
||||
if not os.path.islink(dest):
|
||||
raise OSError(errno.EEXIST, "Link %s already exists as a file" % dest, dest)
|
||||
if os.readlink(dest) == linkto:
|
||||
return dest
|
||||
raise OSError(errno.EEXIST, "Link %s already exists to a different location? (%s vs %s)" % (dest, os.readlink(dest), linkto), dest)
|
||||
os.symlink(linkto, dest)
|
||||
#bb.warn(c)
|
||||
else:
|
||||
try:
|
||||
os.link(c, dest)
|
||||
except OSError as err:
|
||||
if err.errno == errno.EXDEV:
|
||||
bb.utils.copyfile(c, dest)
|
||||
else:
|
||||
raise
|
||||
return dest
|
||||
|
||||
def staging_copydir(c, target, dest, seendirs):
|
||||
if dest not in seendirs:
|
||||
bb.utils.mkdirhier(dest)
|
||||
seendirs.add(dest)
|
||||
|
||||
def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
|
||||
import subprocess
|
||||
|
||||
if not fixme:
|
||||
return
|
||||
cmd = "sed -e 's:^[^/]*/:%s/:g' %s | xargs sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (target, " ".join(fixme), recipesysroot, recipesysrootnative)
|
||||
for fixmevar in ['PSEUDO_SYSROOT', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
|
||||
fixme_path = d.getVar(fixmevar)
|
||||
cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
|
||||
bb.debug(2, cmd)
|
||||
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
|
||||
|
||||
|
||||
def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
|
||||
import glob
|
||||
import subprocess
|
||||
import errno
|
||||
|
||||
fixme = []
|
||||
postinsts = []
|
||||
seendirs = set()
|
||||
stagingdir = d.getVar("STAGING_DIR")
|
||||
if native:
|
||||
pkgarchs = ['${BUILD_ARCH}', '${BUILD_ARCH}_*']
|
||||
targetdir = nativesysroot
|
||||
else:
|
||||
pkgarchs = ['${MACHINE_ARCH}']
|
||||
pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
|
||||
pkgarchs.append('allarch')
|
||||
targetdir = targetsysroot
|
||||
|
||||
bb.utils.mkdirhier(targetdir)
|
||||
for pkgarch in pkgarchs:
|
||||
for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.populate_sysroot" % pkgarch)):
|
||||
if manifest.endswith("-initial.populate_sysroot"):
|
||||
# skip libgcc-initial due to file overlap
|
||||
continue
|
||||
if not native and (manifest.endswith("-native.populate_sysroot") or "nativesdk-" in manifest):
|
||||
continue
|
||||
if native and not (manifest.endswith("-native.populate_sysroot") or manifest.endswith("-cross.populate_sysroot") or "-cross-" in manifest):
|
||||
continue
|
||||
tmanifest = targetdir + "/" + os.path.basename(manifest)
|
||||
if os.path.exists(tmanifest):
|
||||
continue
|
||||
try:
|
||||
os.link(manifest, tmanifest)
|
||||
except OSError as err:
|
||||
if err.errno == errno.EXDEV:
|
||||
bb.utils.copyfile(manifest, tmanifest)
|
||||
else:
|
||||
raise
|
||||
with open(manifest, "r") as f:
|
||||
for l in f:
|
||||
l = l.strip()
|
||||
if l.endswith("/fixmepath"):
|
||||
fixme.append(l)
|
||||
continue
|
||||
if l.endswith("/fixmepath.cmd"):
|
||||
continue
|
||||
dest = l.replace(stagingdir, "")
|
||||
dest = targetdir + "/" + "/".join(dest.split("/")[3:])
|
||||
if l.endswith("/"):
|
||||
staging_copydir(l, targetdir, dest, seendirs)
|
||||
continue
|
||||
try:
|
||||
staging_copyfile(l, targetdir, dest, postinsts, seendirs)
|
||||
except FileExistsError:
|
||||
continue
|
||||
|
||||
staging_processfixme(fixme, targetdir, targetsysroot, nativesysroot, d)
|
||||
for p in sorted(postinsts):
|
||||
bb.note("Running postinst {}, output:\n{}".format(p, subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)))
|
||||
|
||||
#
|
||||
# Manifests here are complicated. The main sysroot area has the unpacked sstate
|
||||
# which us unrelocated and tracked by the main sstate manifests. Each recipe
|
||||
# specific sysroot has manifests for each dependency that is installed there.
|
||||
# The task hash is used to tell whether the data needs to be reinstalled. We
|
||||
# use a symlink to point to the currently installed hash. There is also a
|
||||
# "complete" stamp file which is used to mark if installation completed. If
|
||||
# something fails (e.g. a postinst), this won't get written and we would
|
||||
# remove and reinstall the dependency. This also means partially installed
|
||||
# dependencies should get cleaned up correctly.
|
||||
#
|
||||
|
||||
python extend_recipe_sysroot() {
|
||||
import copy
|
||||
import subprocess
|
||||
import errno
|
||||
import collections
|
||||
import glob
|
||||
|
||||
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
|
||||
mytaskname = d.getVar("BB_RUNTASK")
|
||||
if mytaskname.endswith("_setscene"):
|
||||
mytaskname = mytaskname.replace("_setscene", "")
|
||||
workdir = d.getVar("WORKDIR")
|
||||
#bb.warn(str(taskdepdata))
|
||||
pn = d.getVar("PN")
|
||||
stagingdir = d.getVar("STAGING_DIR")
|
||||
sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
|
||||
# only needed by multilib cross-canadian since it redefines RECIPE_SYSROOT
|
||||
manifestprefix = d.getVar("RECIPE_SYSROOT_MANIFEST_SUBDIR")
|
||||
if manifestprefix:
|
||||
sharedmanifests = sharedmanifests + "/" + manifestprefix
|
||||
recipesysroot = d.getVar("RECIPE_SYSROOT")
|
||||
recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
|
||||
|
||||
# Detect bitbake -b usage
|
||||
nodeps = d.getVar("BB_LIMITEDDEPS") or False
|
||||
if nodeps:
|
||||
lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
|
||||
staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, True, d)
|
||||
staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, False, d)
|
||||
bb.utils.unlockfile(lock)
|
||||
return
|
||||
|
||||
start = None
|
||||
configuredeps = []
|
||||
owntaskdeps = []
|
||||
for dep in taskdepdata:
|
||||
data = taskdepdata[dep]
|
||||
if data[1] == mytaskname and data[0] == pn:
|
||||
start = dep
|
||||
elif data[0] == pn:
|
||||
owntaskdeps.append(data[1])
|
||||
if start is None:
|
||||
bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
|
||||
|
||||
# We need to figure out which sysroot files we need to expose to this task.
|
||||
# This needs to match what would get restored from sstate, which is controlled
|
||||
# ultimately by calls from bitbake to setscene_depvalid().
|
||||
# That function expects a setscene dependency tree. We build a dependency tree
|
||||
# condensed to inter-sstate task dependencies, similar to that used by setscene
|
||||
# tasks. We can then call into setscene_depvalid() and decide
|
||||
# which dependencies we can "see" and should expose in the recipe specific sysroot.
|
||||
setscenedeps = copy.deepcopy(taskdepdata)
|
||||
|
||||
start = set([start])
|
||||
|
||||
sstatetasks = d.getVar("SSTATETASKS").split()
|
||||
# Add recipe specific tasks referenced by setscene_depvalid()
|
||||
sstatetasks.append("do_stash_locale")
|
||||
sstatetasks.append("do_deploy")
|
||||
|
||||
def print_dep_tree(deptree):
|
||||
data = ""
|
||||
for dep in deptree:
|
||||
deps = " " + "\n ".join(deptree[dep][3]) + "\n"
|
||||
data = data + "%s:\n %s\n %s\n%s %s\n %s\n" % (deptree[dep][0], deptree[dep][1], deptree[dep][2], deps, deptree[dep][4], deptree[dep][5])
|
||||
return data
|
||||
|
||||
#bb.note("Full dep tree is:\n%s" % print_dep_tree(taskdepdata))
|
||||
|
||||
#bb.note(" start2 is %s" % str(start))
|
||||
|
||||
# If start is an sstate task (like do_package) we need to add in its direct dependencies
|
||||
# else the code below won't recurse into them.
|
||||
for dep in set(start):
|
||||
for dep2 in setscenedeps[dep][3]:
|
||||
start.add(dep2)
|
||||
start.remove(dep)
|
||||
|
||||
#bb.note(" start3 is %s" % str(start))
|
||||
|
||||
# Create collapsed do_populate_sysroot -> do_populate_sysroot tree
|
||||
for dep in taskdepdata:
|
||||
data = setscenedeps[dep]
|
||||
if data[1] not in sstatetasks:
|
||||
for dep2 in setscenedeps:
|
||||
data2 = setscenedeps[dep2]
|
||||
if dep in data2[3]:
|
||||
data2[3].update(setscenedeps[dep][3])
|
||||
data2[3].remove(dep)
|
||||
if dep in start:
|
||||
start.update(setscenedeps[dep][3])
|
||||
start.remove(dep)
|
||||
del setscenedeps[dep]
|
||||
|
||||
# Remove circular references
|
||||
for dep in setscenedeps:
|
||||
if dep in setscenedeps[dep][3]:
|
||||
setscenedeps[dep][3].remove(dep)
|
||||
|
||||
#bb.note("Computed dep tree is:\n%s" % print_dep_tree(setscenedeps))
|
||||
#bb.note(" start is %s" % str(start))
|
||||
|
||||
# Direct dependencies should be present and can be depended upon
|
||||
for dep in sorted(set(start)):
|
||||
if setscenedeps[dep][1] == "do_populate_sysroot":
|
||||
if dep not in configuredeps:
|
||||
configuredeps.append(dep)
|
||||
bb.note("Direct dependencies are %s" % str(configuredeps))
|
||||
#bb.note(" or %s" % str(start))
|
||||
|
||||
msgbuf = []
|
||||
# Call into setscene_depvalid for each sub-dependency and only copy sysroot files
|
||||
# for ones that would be restored from sstate.
|
||||
done = list(start)
|
||||
next = list(start)
|
||||
while next:
|
||||
new = []
|
||||
for dep in next:
|
||||
data = setscenedeps[dep]
|
||||
for datadep in data[3]:
|
||||
if datadep in done:
|
||||
continue
|
||||
taskdeps = {}
|
||||
taskdeps[dep] = setscenedeps[dep][:2]
|
||||
taskdeps[datadep] = setscenedeps[datadep][:2]
|
||||
retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
|
||||
if retval:
|
||||
msgbuf.append("Skipping setscene dependency %s for installation into the sysroot" % datadep)
|
||||
continue
|
||||
done.append(datadep)
|
||||
new.append(datadep)
|
||||
if datadep not in configuredeps and setscenedeps[datadep][1] == "do_populate_sysroot":
|
||||
configuredeps.append(datadep)
|
||||
msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
|
||||
else:
|
||||
msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
|
||||
next = new
|
||||
|
||||
# This logging is too verbose for day to day use sadly
|
||||
#bb.debug(2, "\n".join(msgbuf))
|
||||
|
||||
depdir = recipesysrootnative + "/installeddeps"
|
||||
bb.utils.mkdirhier(depdir)
|
||||
bb.utils.mkdirhier(sharedmanifests)
|
||||
|
||||
lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
|
||||
|
||||
fixme = {}
|
||||
seendirs = set()
|
||||
postinsts = []
|
||||
multilibs = {}
|
||||
manifests = {}
|
||||
# All files that we're going to be installing, to find conflicts.
|
||||
fileset = {}
|
||||
|
||||
invalidate_tasks = set()
|
||||
for f in os.listdir(depdir):
|
||||
removed = []
|
||||
if not f.endswith(".complete"):
|
||||
continue
|
||||
f = depdir + "/" + f
|
||||
if os.path.islink(f) and not os.path.exists(f):
|
||||
bb.note("%s no longer exists, removing from sysroot" % f)
|
||||
lnk = os.readlink(f.replace(".complete", ""))
|
||||
sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
|
||||
os.unlink(f)
|
||||
os.unlink(f.replace(".complete", ""))
|
||||
removed.append(os.path.basename(f.replace(".complete", "")))
|
||||
|
||||
# If we've removed files from the sysroot above, the task that installed them may still
|
||||
# have a stamp file present for the task. This is probably invalid right now but may become
|
||||
# valid again if the user were to change configuration back for example. Since we've removed
|
||||
# the files a task might need, remove the stamp file too to force it to rerun.
|
||||
# YOCTO #14790
|
||||
if removed:
|
||||
for i in glob.glob(depdir + "/index.*"):
|
||||
if i.endswith("." + mytaskname):
|
||||
continue
|
||||
with open(i, "r") as f:
|
||||
for l in f:
|
||||
if l.startswith("TaskDeps:"):
|
||||
continue
|
||||
l = l.strip()
|
||||
if l in removed:
|
||||
invalidate_tasks.add(i.rsplit(".", 1)[1])
|
||||
break
|
||||
for t in invalidate_tasks:
|
||||
bb.note("Invalidating stamps for task %s" % t)
|
||||
bb.build.clean_stamp(t, d)
|
||||
|
||||
installed = []
|
||||
for dep in configuredeps:
|
||||
c = setscenedeps[dep][0]
|
||||
if mytaskname in ["do_sdk_depends", "do_populate_sdk_ext"] and c.endswith("-initial"):
|
||||
bb.note("Skipping initial setscene dependency %s for installation into the sysroot" % c)
|
||||
continue
|
||||
installed.append(c)
|
||||
|
||||
# We want to remove anything which this task previously installed but is no longer a dependency
|
||||
taskindex = depdir + "/" + "index." + mytaskname
|
||||
if os.path.exists(taskindex):
|
||||
potential = []
|
||||
with open(taskindex, "r") as f:
|
||||
for l in f:
|
||||
l = l.strip()
|
||||
if l not in installed:
|
||||
fl = depdir + "/" + l
|
||||
if not os.path.exists(fl):
|
||||
# Was likely already uninstalled
|
||||
continue
|
||||
potential.append(l)
|
||||
# We need to ensure no other task needs this dependency. We hold the sysroot
|
||||
# lock so we ca search the indexes to check
|
||||
if potential:
|
||||
for i in glob.glob(depdir + "/index.*"):
|
||||
if i.endswith("." + mytaskname):
|
||||
continue
|
||||
with open(i, "r") as f:
|
||||
for l in f:
|
||||
if l.startswith("TaskDeps:"):
|
||||
prevtasks = l.split()[1:]
|
||||
if mytaskname in prevtasks:
|
||||
# We're a dependency of this task so we can clear items out the sysroot
|
||||
break
|
||||
l = l.strip()
|
||||
if l in potential:
|
||||
potential.remove(l)
|
||||
for l in potential:
|
||||
fl = depdir + "/" + l
|
||||
bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
|
||||
lnk = os.readlink(fl)
|
||||
sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
|
||||
os.unlink(fl)
|
||||
os.unlink(fl + ".complete")
|
||||
|
||||
msg_exists = []
|
||||
msg_adding = []
|
||||
|
||||
# Handle all removals first since files may move between recipes
|
||||
for dep in configuredeps:
|
||||
c = setscenedeps[dep][0]
|
||||
if c not in installed:
|
||||
continue
|
||||
taskhash = setscenedeps[dep][5]
|
||||
taskmanifest = depdir + "/" + c + "." + taskhash
|
||||
|
||||
if os.path.exists(depdir + "/" + c):
|
||||
lnk = os.readlink(depdir + "/" + c)
|
||||
if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
|
||||
continue
|
||||
else:
|
||||
bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
|
||||
sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
|
||||
os.unlink(depdir + "/" + c)
|
||||
if os.path.lexists(depdir + "/" + c + ".complete"):
|
||||
os.unlink(depdir + "/" + c + ".complete")
|
||||
elif os.path.lexists(depdir + "/" + c):
|
||||
os.unlink(depdir + "/" + c)
|
||||
|
||||
binfiles = {}
|
||||
# Now handle installs
|
||||
for dep in sorted(configuredeps):
|
||||
c = setscenedeps[dep][0]
|
||||
if c not in installed:
|
||||
continue
|
||||
taskhash = setscenedeps[dep][5]
|
||||
taskmanifest = depdir + "/" + c + "." + taskhash
|
||||
|
||||
if os.path.exists(depdir + "/" + c):
|
||||
lnk = os.readlink(depdir + "/" + c)
|
||||
if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
|
||||
msg_exists.append(c)
|
||||
continue
|
||||
|
||||
msg_adding.append(c)
|
||||
|
||||
os.symlink(c + "." + taskhash, depdir + "/" + c)
|
||||
|
||||
manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "populate_sysroot", d, multilibs)
|
||||
if d2 is not d:
|
||||
# If we don't do this, the recipe sysroot will be placed in the wrong WORKDIR for multilibs
|
||||
# We need a consistent WORKDIR for the image
|
||||
d2.setVar("WORKDIR", d.getVar("WORKDIR"))
|
||||
destsysroot = d2.getVar("RECIPE_SYSROOT")
|
||||
# We put allarch recipes into the default sysroot
|
||||
if manifest and "allarch" in manifest:
|
||||
destsysroot = d.getVar("RECIPE_SYSROOT")
|
||||
|
||||
native = False
|
||||
if c.endswith("-native") or "-cross-" in c or "-crosssdk" in c:
|
||||
native = True
|
||||
|
||||
if manifest:
|
||||
newmanifest = collections.OrderedDict()
|
||||
targetdir = destsysroot
|
||||
if native:
|
||||
targetdir = recipesysrootnative
|
||||
if targetdir not in fixme:
|
||||
fixme[targetdir] = []
|
||||
fm = fixme[targetdir]
|
||||
|
||||
with open(manifest, "r") as f:
|
||||
manifests[dep] = manifest
|
||||
for l in f:
|
||||
l = l.strip()
|
||||
if l.endswith("/fixmepath"):
|
||||
fm.append(l)
|
||||
continue
|
||||
if l.endswith("/fixmepath.cmd"):
|
||||
continue
|
||||
dest = l.replace(stagingdir, "")
|
||||
dest = "/" + "/".join(dest.split("/")[3:])
|
||||
newmanifest[l] = targetdir + dest
|
||||
|
||||
# Check if files have already been installed by another
|
||||
# recipe and abort if they have, explaining what recipes are
|
||||
# conflicting.
|
||||
hashname = targetdir + dest
|
||||
if not hashname.endswith("/"):
|
||||
if hashname in fileset:
|
||||
bb.fatal("The file %s is installed by both %s and %s, aborting" % (dest, c, fileset[hashname]))
|
||||
else:
|
||||
fileset[hashname] = c
|
||||
|
||||
# Having multiple identical manifests in each sysroot eats diskspace so
|
||||
# create a shared pool of them and hardlink if we can.
|
||||
# We create the manifest in advance so that if something fails during installation,
|
||||
# or the build is interrupted, subsequent exeuction can cleanup.
|
||||
sharedm = sharedmanifests + "/" + os.path.basename(taskmanifest)
|
||||
if not os.path.exists(sharedm):
|
||||
smlock = bb.utils.lockfile(sharedm + ".lock")
|
||||
# Can race here. You'd think it just means we may not end up with all copies hardlinked to each other
|
||||
# but python can lose file handles so we need to do this under a lock.
|
||||
if not os.path.exists(sharedm):
|
||||
with open(sharedm, 'w') as m:
|
||||
for l in newmanifest:
|
||||
dest = newmanifest[l]
|
||||
m.write(dest.replace(workdir + "/", "") + "\n")
|
||||
bb.utils.unlockfile(smlock)
|
||||
try:
|
||||
os.link(sharedm, taskmanifest)
|
||||
except OSError as err:
|
||||
if err.errno == errno.EXDEV:
|
||||
bb.utils.copyfile(sharedm, taskmanifest)
|
||||
else:
|
||||
raise
|
||||
# Finally actually install the files
|
||||
for l in newmanifest:
|
||||
dest = newmanifest[l]
|
||||
if l.endswith("/"):
|
||||
staging_copydir(l, targetdir, dest, seendirs)
|
||||
continue
|
||||
if "/bin/" in l or "/sbin/" in l:
|
||||
# defer /*bin/* files until last in case they need libs
|
||||
binfiles[l] = (targetdir, dest)
|
||||
else:
|
||||
staging_copyfile(l, targetdir, dest, postinsts, seendirs)
|
||||
|
||||
# Handle deferred binfiles
|
||||
for l in binfiles:
|
||||
(targetdir, dest) = binfiles[l]
|
||||
staging_copyfile(l, targetdir, dest, postinsts, seendirs)
|
||||
|
||||
bb.note("Installed into sysroot: %s" % str(msg_adding))
|
||||
bb.note("Skipping as already exists in sysroot: %s" % str(msg_exists))
|
||||
|
||||
for f in fixme:
|
||||
staging_processfixme(fixme[f], f, recipesysroot, recipesysrootnative, d)
|
||||
|
||||
for p in sorted(postinsts):
|
||||
bb.note("Running postinst {}, output:\n{}".format(p, subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)))
|
||||
|
||||
for dep in manifests:
|
||||
c = setscenedeps[dep][0]
|
||||
os.symlink(manifests[dep], depdir + "/" + c + ".complete")
|
||||
|
||||
with open(taskindex, "w") as f:
|
||||
f.write("TaskDeps: " + " ".join(owntaskdeps) + "\n")
|
||||
for l in sorted(installed):
|
||||
f.write(l + "\n")
|
||||
|
||||
bb.utils.unlockfile(lock)
|
||||
}
|
||||
extend_recipe_sysroot[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
|
||||
|
||||
do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
|
||||
python do_prepare_recipe_sysroot () {
|
||||
bb.build.exec_func("extend_recipe_sysroot", d)
|
||||
}
|
||||
addtask do_prepare_recipe_sysroot before do_configure after do_fetch
|
||||
|
||||
python staging_taskhandler() {
|
||||
bbtasks = e.tasklist
|
||||
for task in bbtasks:
|
||||
deps = d.getVarFlag(task, "depends")
|
||||
if task != 'do_prepare_recipe_sysroot' and (task == "do_configure" or (deps and "populate_sysroot" in deps)):
|
||||
d.prependVarFlag(task, "prefuncs", "extend_recipe_sysroot ")
|
||||
}
|
||||
staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
|
||||
addhandler staging_taskhandler
|
||||
|
||||
|
||||
#
|
||||
# Target build output, stored in do_populate_sysroot or do_package can depend
|
||||
# not only upon direct dependencies but also indirect ones. A good example is
|
||||
# linux-libc-headers. The toolchain depends on this but most target recipes do
|
||||
# not. There are some headers which are not used by the toolchain build and do
|
||||
# not change the toolchain task output, hence the task hashes can change without
|
||||
# changing the sysroot output of that recipe yet they can influence others.
|
||||
#
|
||||
# A specific example is rtc.h which can change rtcwake.c in util-linux but is not
|
||||
# used in the glibc or gcc build. To account for this, we need to account for the
|
||||
# populate_sysroot hashes in the task output hashes.
|
||||
#
|
||||
python target_add_sysroot_deps () {
|
||||
current_task = "do_" + d.getVar("BB_CURRENTTASK")
|
||||
if current_task not in ["do_populate_sysroot", "do_package"]:
|
||||
return
|
||||
|
||||
pn = d.getVar("PN")
|
||||
if pn.endswith("-native"):
|
||||
return
|
||||
|
||||
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
|
||||
deps = {}
|
||||
for dep in taskdepdata.values():
|
||||
if dep[1] == "do_populate_sysroot" and not dep[0].endswith(("-native", "-initial")) and "-cross-" not in dep[0] and dep[0] != pn:
|
||||
deps[dep[0]] = dep[6]
|
||||
|
||||
d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
|
||||
}
|
||||
SSTATECREATEFUNCS += "target_add_sysroot_deps"
|
||||
|
||||
181
sources/poky/meta/classes-global/uninative.bbclass
Normal file
181
sources/poky/meta/classes-global/uninative.bbclass
Normal file
@@ -0,0 +1,181 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}${@bb.utils.contains('BUILD_ARCH', 'ppc64le', 'ld64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'riscv64', 'ld-linux-riscv64-lp64d.so.1', '', d)}"
|
||||
UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
|
||||
|
||||
UNINATIVE_URL ?= "unset"
|
||||
UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc-${UNINATIVE_VERSION}.tar.xz"
|
||||
# Example checksums
|
||||
#UNINATIVE_CHECKSUM[aarch64] = "dead"
|
||||
#UNINATIVE_CHECKSUM[i686] = "dead"
|
||||
#UNINATIVE_CHECKSUM[x86_64] = "dead"
|
||||
UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
|
||||
|
||||
# Enabling uninative will change the following variables so they need to go the parsing ignored variables list to prevent multiple recipe parsing
|
||||
BB_HASHCONFIG_IGNORE_VARS += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
|
||||
|
||||
addhandler uninative_event_fetchloader
|
||||
uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
|
||||
|
||||
addhandler uninative_event_enable
|
||||
uninative_event_enable[eventmask] = "bb.event.ConfigParsed"
|
||||
|
||||
python uninative_event_fetchloader() {
|
||||
"""
|
||||
This event fires on the parent and will try to fetch the tarball if the
|
||||
loader isn't already present.
|
||||
"""
|
||||
|
||||
chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH"))
|
||||
if not chksum:
|
||||
bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH"))
|
||||
|
||||
loader = d.getVar("UNINATIVE_LOADER")
|
||||
loaderchksum = loader + ".chksum"
|
||||
if os.path.exists(loader) and os.path.exists(loaderchksum):
|
||||
with open(loaderchksum, "r") as f:
|
||||
readchksum = f.read().strip()
|
||||
if readchksum == chksum:
|
||||
if "uninative" not in d.getVar("SSTATEPOSTUNPACKFUNCS"):
|
||||
enable_uninative(d)
|
||||
return
|
||||
|
||||
import subprocess
|
||||
try:
|
||||
# Save and restore cwd as Fetch.download() does a chdir()
|
||||
olddir = os.getcwd()
|
||||
|
||||
tarball = d.getVar("UNINATIVE_TARBALL")
|
||||
tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
|
||||
tarballpath = os.path.join(tarballdir, tarball)
|
||||
|
||||
if not os.path.exists(tarballpath + ".done"):
|
||||
bb.utils.mkdirhier(tarballdir)
|
||||
if d.getVar("UNINATIVE_URL") == "unset":
|
||||
bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
|
||||
|
||||
localdata = bb.data.createCopy(d)
|
||||
localdata.setVar('FILESPATH', "")
|
||||
localdata.setVar('DL_DIR', tarballdir)
|
||||
# Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
|
||||
# and we can't easily put 'chksum' into the url path from a url parameter with
|
||||
# the current fetcher url handling
|
||||
premirrors = bb.fetch2.mirror_from_string(localdata.getVar("PREMIRRORS"))
|
||||
for line in premirrors:
|
||||
try:
|
||||
(find, replace) = line
|
||||
except ValueError:
|
||||
continue
|
||||
if find.startswith("http"):
|
||||
localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} %s/uninative/%s/${UNINATIVE_TARBALL}" % (replace, chksum))
|
||||
|
||||
srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
|
||||
bb.note("Fetching uninative binary shim %s (will check PREMIRRORS first)" % srcuri)
|
||||
|
||||
fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
|
||||
fetcher.download()
|
||||
localpath = fetcher.localpath(srcuri)
|
||||
if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath):
|
||||
# Follow the symlink behavior from the bitbake fetch2.
|
||||
# This will cover the case where an existing symlink is broken
|
||||
# as well as if there are two processes trying to create it
|
||||
# at the same time.
|
||||
if os.path.islink(tarballpath):
|
||||
# Broken symbolic link
|
||||
os.unlink(tarballpath)
|
||||
|
||||
# Deal with two processes trying to make symlink at once
|
||||
try:
|
||||
os.symlink(localpath, tarballpath)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
# ldd output is "ldd (Ubuntu GLIBC 2.23-0ubuntu10) 2.23", extract last option from first line
|
||||
glibcver = subprocess.check_output(["ldd", "--version"]).decode('utf-8').split('\n')[0].split()[-1]
|
||||
if bb.utils.vercmp_string(d.getVar("UNINATIVE_MAXGLIBCVERSION"), glibcver) < 0:
|
||||
raise RuntimeError("Your host glibc version (%s) is newer than that in uninative (%s). Disabling uninative so that sstate is not corrupted." % (glibcver, d.getVar("UNINATIVE_MAXGLIBCVERSION")))
|
||||
|
||||
cmd = d.expand("\
|
||||
mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; \
|
||||
cd ${UNINATIVE_STAGING_DIR}-uninative; \
|
||||
tar -xJf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; \
|
||||
${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
|
||||
${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux \
|
||||
${UNINATIVE_LOADER} \
|
||||
${UNINATIVE_LOADER} \
|
||||
${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
|
||||
${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so*" % chksum)
|
||||
subprocess.check_output(cmd, shell=True)
|
||||
|
||||
with open(loaderchksum, "w") as f:
|
||||
f.write(chksum)
|
||||
|
||||
enable_uninative(d)
|
||||
|
||||
except RuntimeError as e:
|
||||
bb.warn(str(e))
|
||||
except bb.fetch2.BBFetchException as exc:
|
||||
bb.warn("Disabling uninative as unable to fetch uninative tarball: %s" % str(exc))
|
||||
bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
|
||||
except subprocess.CalledProcessError as exc:
|
||||
bb.warn("Disabling uninative as unable to install uninative tarball: %s" % str(exc))
|
||||
bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
|
||||
finally:
|
||||
os.chdir(olddir)
|
||||
}
|
||||
|
||||
python uninative_event_enable() {
|
||||
"""
|
||||
This event handler is called in the workers and is responsible for setting
|
||||
up uninative if a loader is found.
|
||||
"""
|
||||
enable_uninative(d)
|
||||
}
|
||||
|
||||
def enable_uninative(d):
|
||||
loader = d.getVar("UNINATIVE_LOADER")
|
||||
if os.path.exists(loader):
|
||||
bb.debug(2, "Enabling uninative")
|
||||
d.setVar("NATIVELSBSTRING", "universal%s" % oe.utils.host_gcc_version(d))
|
||||
d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp")
|
||||
d.appendVarFlag("SSTATEPOSTUNPACKFUNCS", "vardepvalueexclude", "| uninative_changeinterp")
|
||||
d.appendVar("BUILD_LDFLAGS", " -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER} -pthread")
|
||||
d.appendVarFlag("BUILD_LDFLAGS", "vardepvalueexclude", "| -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER} -pthread")
|
||||
d.appendVarFlag("BUILD_LDFLAGS", "vardepsexclude", "UNINATIVE_LOADER")
|
||||
d.prependVar("PATH", "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
|
||||
|
||||
python uninative_changeinterp () {
|
||||
import subprocess
|
||||
import stat
|
||||
import oe.qa
|
||||
|
||||
if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)):
|
||||
return
|
||||
|
||||
sstateinst = d.getVar('SSTATE_INSTDIR')
|
||||
for walkroot, dirs, files in os.walk(sstateinst):
|
||||
for file in files:
|
||||
if file.endswith(".so") or ".so." in file:
|
||||
continue
|
||||
f = os.path.join(walkroot, file)
|
||||
if os.path.islink(f):
|
||||
continue
|
||||
s = os.stat(f)
|
||||
if not ((s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH)):
|
||||
continue
|
||||
elf = oe.qa.ELFFile(f)
|
||||
try:
|
||||
elf.open()
|
||||
except oe.qa.NotELFFileError:
|
||||
continue
|
||||
if not elf.isDynamic():
|
||||
continue
|
||||
|
||||
os.chmod(f, s[stat.ST_MODE] | stat.S_IWUSR)
|
||||
subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
|
||||
os.chmod(f, s[stat.ST_MODE])
|
||||
}
|
||||
60
sources/poky/meta/classes-global/utility-tasks.bbclass
Normal file
60
sources/poky/meta/classes-global/utility-tasks.bbclass
Normal file
@@ -0,0 +1,60 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
addtask listtasks
|
||||
do_listtasks[nostamp] = "1"
|
||||
python do_listtasks() {
|
||||
taskdescs = {}
|
||||
maxlen = 0
|
||||
for e in d.keys():
|
||||
if d.getVarFlag(e, 'task'):
|
||||
maxlen = max(maxlen, len(e))
|
||||
if e.endswith('_setscene'):
|
||||
desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc') or '')
|
||||
else:
|
||||
desc = d.getVarFlag(e, 'doc') or ''
|
||||
taskdescs[e] = desc
|
||||
|
||||
tasks = sorted(taskdescs.keys())
|
||||
for taskname in tasks:
|
||||
bb.plain("%s %s" % (taskname.ljust(maxlen), taskdescs[taskname]))
|
||||
}
|
||||
|
||||
CLEANFUNCS ?= ""
|
||||
|
||||
T:task-clean = "${LOG_DIR}/cleanlogs/${PN}"
|
||||
addtask clean
|
||||
do_clean[nostamp] = "1"
|
||||
python do_clean() {
|
||||
"""clear the build and temp directories"""
|
||||
dir = d.expand("${WORKDIR}")
|
||||
bb.note("Removing " + dir)
|
||||
oe.path.remove(dir)
|
||||
|
||||
dir = "%s.*" % d.getVar('STAMP')
|
||||
bb.note("Removing " + dir)
|
||||
oe.path.remove(dir)
|
||||
|
||||
for f in (d.getVar('CLEANFUNCS') or '').split():
|
||||
bb.build.exec_func(f, d)
|
||||
}
|
||||
|
||||
addtask checkuri
|
||||
do_checkuri[nostamp] = "1"
|
||||
do_checkuri[network] = "1"
|
||||
python do_checkuri() {
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
if len(src_uri) == 0:
|
||||
return
|
||||
|
||||
try:
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
fetcher.checkstatus()
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
bb.fatal(str(e))
|
||||
}
|
||||
|
||||
|
||||
369
sources/poky/meta/classes-global/utils.bbclass
Normal file
369
sources/poky/meta/classes-global/utils.bbclass
Normal file
@@ -0,0 +1,369 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
oe_soinstall() {
|
||||
# Purpose: Install shared library file and
|
||||
# create the necessary links
|
||||
# Example: oe_soinstall libfoo.so.1.2.3 ${D}${libdir}
|
||||
libname=`basename $1`
|
||||
case "$libname" in
|
||||
*.so)
|
||||
bbfatal "oe_soinstall: Shared library must haved versioned filename (e.g. libfoo.so.1.2.3)"
|
||||
;;
|
||||
esac
|
||||
install -m 755 $1 $2/$libname
|
||||
sonamelink=`${READELF} -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
|
||||
if [ -z $sonamelink ]; then
|
||||
bbfatal "oe_soinstall: $libname is missing ELF tag 'SONAME'."
|
||||
fi
|
||||
solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
|
||||
ln -sf $libname $2/$sonamelink
|
||||
ln -sf $libname $2/$solink
|
||||
}
|
||||
|
||||
oe_libinstall() {
|
||||
# Purpose: Install a library, in all its forms
|
||||
# Example
|
||||
#
|
||||
# oe_libinstall libltdl ${STAGING_LIBDIR}/
|
||||
# oe_libinstall -C src/libblah libblah ${D}/${libdir}/
|
||||
dir=""
|
||||
libtool=""
|
||||
silent=""
|
||||
require_static=""
|
||||
require_shared=""
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-C)
|
||||
shift
|
||||
dir="$1"
|
||||
;;
|
||||
-s)
|
||||
silent=1
|
||||
;;
|
||||
-a)
|
||||
require_static=1
|
||||
;;
|
||||
-so)
|
||||
require_shared=1
|
||||
;;
|
||||
-*)
|
||||
bbfatal "oe_libinstall: unknown option: $1"
|
||||
;;
|
||||
*)
|
||||
break;
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
libname="$1"
|
||||
shift
|
||||
destpath="$1"
|
||||
if [ -z "$destpath" ]; then
|
||||
bbfatal "oe_libinstall: no destination path specified"
|
||||
fi
|
||||
|
||||
__runcmd () {
|
||||
if [ -z "$silent" ]; then
|
||||
echo >&2 "oe_libinstall: $*"
|
||||
fi
|
||||
$*
|
||||
}
|
||||
|
||||
if [ -z "$dir" ]; then
|
||||
dir=`pwd`
|
||||
fi
|
||||
|
||||
dotlai=$libname.lai
|
||||
|
||||
# Sanity check that the libname.lai is unique
|
||||
number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
|
||||
if [ $number_of_files -gt 1 ]; then
|
||||
bbfatal "oe_libinstall: $dotlai is not unique in $dir"
|
||||
fi
|
||||
|
||||
|
||||
dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
|
||||
olddir=`pwd`
|
||||
__runcmd cd $dir
|
||||
|
||||
lafile=$libname.la
|
||||
|
||||
# If such file doesn't exist, try to cut version suffix
|
||||
if [ ! -f "$lafile" ]; then
|
||||
libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
|
||||
lafile1=$libname.la
|
||||
if [ -f "$lafile1" ]; then
|
||||
libname=$libname1
|
||||
lafile=$lafile1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f "$lafile" ]; then
|
||||
# libtool archive
|
||||
eval `cat $lafile|grep "^library_names="`
|
||||
libtool=1
|
||||
else
|
||||
library_names="$libname.so* $libname.dll.a $libname.*.dylib"
|
||||
fi
|
||||
|
||||
__runcmd install -d $destpath/
|
||||
dota=$libname.a
|
||||
if [ -f "$dota" -o -n "$require_static" ]; then
|
||||
rm -f $destpath/$dota
|
||||
__runcmd install -m 0644 $dota $destpath/
|
||||
fi
|
||||
if [ -f "$dotlai" -a -n "$libtool" ]; then
|
||||
rm -f $destpath/$libname.la
|
||||
__runcmd install -m 0644 $dotlai $destpath/$libname.la
|
||||
fi
|
||||
|
||||
for name in $library_names; do
|
||||
files=`eval echo $name`
|
||||
for f in $files; do
|
||||
if [ ! -e "$f" ]; then
|
||||
if [ -n "$libtool" ]; then
|
||||
bbfatal "oe_libinstall: $dir/$f not found."
|
||||
fi
|
||||
elif [ -L "$f" ]; then
|
||||
__runcmd cp -P "$f" $destpath/
|
||||
elif [ ! -L "$f" ]; then
|
||||
libfile="$f"
|
||||
rm -f $destpath/$libfile
|
||||
__runcmd install -m 0755 $libfile $destpath/
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
if [ -z "$libfile" ]; then
|
||||
if [ -n "$require_shared" ]; then
|
||||
bbfatal "oe_libinstall: unable to locate shared library"
|
||||
fi
|
||||
elif [ -z "$libtool" ]; then
|
||||
# special case hack for non-libtool .so.#.#.# links
|
||||
baselibfile=`basename "$libfile"`
|
||||
if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
|
||||
sonamelink=`${READELF} -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
|
||||
solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
|
||||
if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
|
||||
__runcmd ln -sf $baselibfile $destpath/$sonamelink
|
||||
fi
|
||||
__runcmd ln -sf $baselibfile $destpath/$solink
|
||||
fi
|
||||
fi
|
||||
|
||||
__runcmd cd "$olddir"
|
||||
}
|
||||
|
||||
create_cmdline_wrapper () {
|
||||
# Create a wrapper script where commandline options are needed
|
||||
#
|
||||
# These are useful to work around relocation issues, by passing extra options
|
||||
# to a program
|
||||
#
|
||||
# Usage: create_cmdline_wrapper FILENAME <extra-options>
|
||||
|
||||
cmd=$1
|
||||
shift
|
||||
|
||||
echo "Generating wrapper script for $cmd"
|
||||
|
||||
mv $cmd $cmd.real
|
||||
cmdname=`basename $cmd`
|
||||
dirname=`dirname $cmd`
|
||||
cmdoptions=$@
|
||||
if [ "${base_prefix}" != "" ]; then
|
||||
relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
|
||||
cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
|
||||
fi
|
||||
cat <<END >$cmd
|
||||
#!/bin/bash
|
||||
realpath=\`readlink -fn \$0\`
|
||||
realdir=\`dirname \$realpath\`
|
||||
exec -a \$realdir/$cmdname \$realdir/$cmdname.real $cmdoptions "\$@"
|
||||
END
|
||||
chmod +x $cmd
|
||||
}
|
||||
|
||||
create_cmdline_shebang_wrapper () {
|
||||
# Create a wrapper script where commandline options are needed
|
||||
#
|
||||
# These are useful to work around shebang relocation issues, where shebangs are too
|
||||
# long or have arguments in them, thus preventing them from using the /usr/bin/env
|
||||
# shebang
|
||||
#
|
||||
# Usage: create_cmdline_wrapper FILENAME <extra-options>
|
||||
|
||||
cmd=$1
|
||||
shift
|
||||
|
||||
echo "Generating wrapper script for $cmd"
|
||||
|
||||
# Strip #! and get remaining interpreter + arg
|
||||
argument="$(sed -ne 's/^#! *//p;q' $cmd)"
|
||||
# strip the shebang from the real script as we do not want it to be usable anyway
|
||||
tail -n +2 $cmd > $cmd.real
|
||||
chown --reference=$cmd $cmd.real
|
||||
chmod --reference=$cmd $cmd.real
|
||||
rm -f $cmd
|
||||
cmdname=$(basename $cmd)
|
||||
dirname=$(dirname $cmd)
|
||||
cmdoptions=$@
|
||||
if [ "${base_prefix}" != "" ]; then
|
||||
relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
|
||||
cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
|
||||
fi
|
||||
cat <<END >$cmd
|
||||
#!/usr/bin/env bash
|
||||
realpath=\`readlink -fn \$0\`
|
||||
realdir=\`dirname \$realpath\`
|
||||
exec -a \$realdir/$cmdname $argument \$realdir/$cmdname.real $cmdoptions "\$@"
|
||||
END
|
||||
chmod +x $cmd
|
||||
}
|
||||
|
||||
create_wrapper () {
|
||||
# Create a wrapper script where extra environment variables are needed
|
||||
#
|
||||
# These are useful to work around relocation issues, by setting environment
|
||||
# variables which point to paths in the filesystem.
|
||||
#
|
||||
# Usage: create_wrapper FILENAME [[VAR=VALUE]..]
|
||||
|
||||
cmd=$1
|
||||
shift
|
||||
|
||||
echo "Generating wrapper script for $cmd"
|
||||
|
||||
mv $cmd $cmd.real
|
||||
cmdname=`basename $cmd`
|
||||
dirname=`dirname $cmd`
|
||||
exportstring=$@
|
||||
if [ "${base_prefix}" != "" ]; then
|
||||
relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
|
||||
exportstring=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
|
||||
fi
|
||||
cat <<END >$cmd
|
||||
#!/bin/bash
|
||||
realpath=\`readlink -fn \$0\`
|
||||
realdir=\`dirname \$realpath\`
|
||||
export $exportstring
|
||||
exec -a "\$0" \$realdir/$cmdname.real "\$@"
|
||||
END
|
||||
chmod +x $cmd
|
||||
}
|
||||
|
||||
# Copy files/directories from $1 to $2 but using hardlinks
|
||||
# (preserve symlinks)
|
||||
hardlinkdir () {
|
||||
from=$1
|
||||
to=$2
|
||||
(cd $from; find . -print0 | cpio --null -pdlu $to)
|
||||
}
|
||||
|
||||
|
||||
def check_app_exists(app, d):
|
||||
app = d.expand(app).split()[0].strip()
|
||||
path = d.getVar('PATH')
|
||||
return bool(bb.utils.which(path, app))
|
||||
|
||||
def explode_deps(s):
|
||||
return bb.utils.explode_deps(s)
|
||||
|
||||
def base_set_filespath(path, d):
|
||||
filespath = []
|
||||
extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
|
||||
# Remove default flag which was used for checking
|
||||
extrapaths = extrapaths.replace("__default:", "")
|
||||
# Don't prepend empty strings to the path list
|
||||
if extrapaths != "":
|
||||
path = extrapaths.split(":") + path
|
||||
# The ":" ensures we have an 'empty' override
|
||||
overrides = (":" + (d.getVar("FILESOVERRIDES") or "")).split(":")
|
||||
overrides.reverse()
|
||||
for o in overrides:
|
||||
for p in path:
|
||||
if p != "":
|
||||
filespath.append(os.path.join(p, o))
|
||||
return ":".join(filespath)
|
||||
|
||||
def extend_variants(d, var, extend, delim=':'):
|
||||
"""Return a string of all bb class extend variants for the given extend"""
|
||||
variants = []
|
||||
whole = d.getVar(var) or ""
|
||||
for ext in whole.split():
|
||||
eext = ext.split(delim)
|
||||
if len(eext) > 1 and eext[0] == extend:
|
||||
variants.append(eext[1])
|
||||
return " ".join(variants)
|
||||
|
||||
def multilib_pkg_extend(d, pkg):
|
||||
variants = (d.getVar("MULTILIB_VARIANTS") or "").split()
|
||||
if not variants:
|
||||
return pkg
|
||||
pkgs = pkg
|
||||
for v in variants:
|
||||
pkgs = pkgs + " " + v + "-" + pkg
|
||||
return pkgs
|
||||
|
||||
def get_multilib_datastore(variant, d):
|
||||
return oe.utils.get_multilib_datastore(variant, d)
|
||||
|
||||
def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
|
||||
"""Return a string of all ${var} in all multilib tune configuration"""
|
||||
values = []
|
||||
variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
|
||||
for item in variants:
|
||||
localdata = get_multilib_datastore(item, d)
|
||||
# We need WORKDIR to be consistent with the original datastore
|
||||
localdata.setVar("WORKDIR", d.getVar("WORKDIR"))
|
||||
value = localdata.getVar(var) or ""
|
||||
if value != "":
|
||||
if need_split:
|
||||
for item in value.split(delim):
|
||||
values.append(item)
|
||||
else:
|
||||
values.append(value)
|
||||
if unique:
|
||||
#we do this to keep order as much as possible
|
||||
ret = []
|
||||
for value in values:
|
||||
if not value in ret:
|
||||
ret.append(value)
|
||||
else:
|
||||
ret = values
|
||||
return " ".join(ret)
|
||||
|
||||
def all_multilib_tune_list(vars, d):
|
||||
"""
|
||||
Return a list of ${VAR} for each variable VAR in vars from each
|
||||
multilib tune configuration.
|
||||
Is safe to be called from a multilib recipe/context as it can
|
||||
figure out the original tune and remove the multilib overrides.
|
||||
"""
|
||||
values = {}
|
||||
for v in vars:
|
||||
values[v] = []
|
||||
values['ml'] = ['']
|
||||
|
||||
variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
|
||||
for item in variants:
|
||||
localdata = get_multilib_datastore(item, d)
|
||||
values[v].append(localdata.getVar(v))
|
||||
values['ml'].append(item)
|
||||
return values
|
||||
all_multilib_tune_list[vardepsexclude] = "OVERRIDES"
|
||||
|
||||
# If the user hasn't set up their name/email, set some defaults
|
||||
check_git_config() {
|
||||
if ! git config user.email > /dev/null ; then
|
||||
git config --local user.email "${PATCH_GIT_USER_EMAIL}"
|
||||
fi
|
||||
if ! git config user.name > /dev/null ; then
|
||||
git config --local user.name "${PATCH_GIT_USER_NAME}"
|
||||
fi
|
||||
}
|
||||
Reference in New Issue
Block a user