Complete Yocto mirror with license table for TQMa6UL (2038-compliance)
- 264 license table entries with exact download URLs (224/264 resolved) - Complete sources/ directory with all BitBake recipes - Build configuration: tqma6ul-multi-mba6ulx, spaetzle (musl) - Full traceability for Softwarefreigabeantrag - GCC 13.4.0, Linux 6.6.102, U-Boot 2023.04, musl 1.2.4 - License distribution: GPL-2.0 (24), MIT (23), GPL-2.0+ (18), BSD-3 (16)
This commit is contained in:
17
sources/poky/meta/COPYING.MIT
Normal file
17
sources/poky/meta/COPYING.MIT
Normal file
@@ -0,0 +1,17 @@
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
749
sources/poky/meta/classes-global/base.bbclass
Normal file
749
sources/poky/meta/classes-global/base.bbclass
Normal file
@@ -0,0 +1,749 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
BB_DEFAULT_TASK ?= "build"
|
||||
CLASSOVERRIDE ?= "class-target"
|
||||
|
||||
inherit patch
|
||||
inherit staging
|
||||
|
||||
inherit mirrors
|
||||
inherit utils
|
||||
inherit utility-tasks
|
||||
inherit logging
|
||||
|
||||
PACKAGECONFIG_CONFARGS ??= ""
|
||||
|
||||
inherit metadata_scm
|
||||
|
||||
def lsb_distro_identifier(d):
|
||||
adjust = d.getVar('LSB_DISTRO_ADJUST')
|
||||
adjust_func = None
|
||||
if adjust:
|
||||
try:
|
||||
adjust_func = globals()[adjust]
|
||||
except KeyError:
|
||||
pass
|
||||
return oe.lsb.distro_identifier(adjust_func)
|
||||
|
||||
die() {
|
||||
bbfatal_log "$*"
|
||||
}
|
||||
|
||||
oe_runmake_call() {
|
||||
bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
|
||||
${MAKE} ${EXTRA_OEMAKE} "$@"
|
||||
}
|
||||
|
||||
oe_runmake() {
|
||||
oe_runmake_call "$@" || die "oe_runmake failed"
|
||||
}
|
||||
|
||||
|
||||
def get_base_dep(d):
|
||||
if d.getVar('INHIBIT_DEFAULT_DEPS', False):
|
||||
return ""
|
||||
return "${BASE_DEFAULT_DEPS}"
|
||||
|
||||
BASE_DEFAULT_DEPS = "virtual/${HOST_PREFIX}gcc virtual/${HOST_PREFIX}compilerlibs virtual/libc"
|
||||
|
||||
BASEDEPENDS = ""
|
||||
BASEDEPENDS:class-target = "${@get_base_dep(d)}"
|
||||
BASEDEPENDS:class-nativesdk = "${@get_base_dep(d)}"
|
||||
|
||||
DEPENDS:prepend="${BASEDEPENDS} "
|
||||
|
||||
FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
|
||||
# THISDIR only works properly with imediate expansion as it has to run
|
||||
# in the context of the location its used (:=)
|
||||
THISDIR = "${@os.path.dirname(d.getVar('FILE'))}"
|
||||
|
||||
def extra_path_elements(d):
|
||||
path = ""
|
||||
elements = (d.getVar('EXTRANATIVEPATH') or "").split()
|
||||
for e in elements:
|
||||
path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
|
||||
return path
|
||||
|
||||
PATH:prepend = "${@extra_path_elements(d)}"
|
||||
|
||||
def get_lic_checksum_file_list(d):
|
||||
filelist = []
|
||||
lic_files = d.getVar("LIC_FILES_CHKSUM") or ''
|
||||
tmpdir = d.getVar("TMPDIR")
|
||||
s = d.getVar("S")
|
||||
b = d.getVar("B")
|
||||
workdir = d.getVar("WORKDIR")
|
||||
|
||||
urls = lic_files.split()
|
||||
for url in urls:
|
||||
# We only care about items that are absolute paths since
|
||||
# any others should be covered by SRC_URI.
|
||||
try:
|
||||
(method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
|
||||
if method != "file" or not path:
|
||||
raise bb.fetch.MalformedUrl(url)
|
||||
|
||||
if path[0] == '/':
|
||||
if path.startswith((tmpdir, s, b, workdir)):
|
||||
continue
|
||||
filelist.append(path + ":" + str(os.path.exists(path)))
|
||||
except bb.fetch.MalformedUrl:
|
||||
bb.fatal(d.getVar('PN') + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
|
||||
return " ".join(filelist)
|
||||
|
||||
def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
|
||||
tools = d.getVar(toolsvar).split()
|
||||
origbbenv = d.getVar("BB_ORIGENV", False)
|
||||
path = origbbenv.getVar("PATH")
|
||||
# Need to ignore our own scripts directories to avoid circular links
|
||||
for p in path.split(":"):
|
||||
if p.endswith("/scripts"):
|
||||
path = path.replace(p, "/ignoreme")
|
||||
bb.utils.mkdirhier(dest)
|
||||
notfound = []
|
||||
for tool in tools:
|
||||
desttool = os.path.join(dest, tool)
|
||||
if not os.path.exists(desttool):
|
||||
# clean up dead symlink
|
||||
if os.path.islink(desttool):
|
||||
os.unlink(desttool)
|
||||
srctool = bb.utils.which(path, tool, executable=True)
|
||||
# gcc/g++ may link to ccache on some hosts, e.g.,
|
||||
# /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
|
||||
# would return /usr/local/bin/ccache/gcc, but what we need is
|
||||
# /usr/bin/gcc, this code can check and fix that.
|
||||
if os.path.islink(srctool) and os.path.basename(os.readlink(srctool)) == 'ccache':
|
||||
srctool = bb.utils.which(path, tool, executable=True, direction=1)
|
||||
if srctool:
|
||||
os.symlink(srctool, desttool)
|
||||
else:
|
||||
notfound.append(tool)
|
||||
|
||||
if notfound and fatal:
|
||||
bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound))
|
||||
|
||||
# We can't use vardepvalue against do_fetch directly since that would overwrite
|
||||
# the other task dependencies so we use an indirect function.
|
||||
python fetcher_hashes_dummyfunc() {
|
||||
return
|
||||
}
|
||||
fetcher_hashes_dummyfunc[vardepvalue] = "${@bb.fetch.get_hashvalue(d)}"
|
||||
|
||||
addtask fetch
|
||||
do_fetch[dirs] = "${DL_DIR}"
|
||||
do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
|
||||
do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
|
||||
do_fetch[prefuncs] += "fetcher_hashes_dummyfunc"
|
||||
do_fetch[network] = "1"
|
||||
python base_do_fetch() {
|
||||
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
if not src_uri:
|
||||
return
|
||||
|
||||
try:
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
fetcher.download()
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
bb.fatal("Bitbake Fetcher Error: " + repr(e))
|
||||
}
|
||||
|
||||
addtask unpack after do_fetch
|
||||
do_unpack[dirs] = "${WORKDIR}"
|
||||
|
||||
do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != os.path.normpath(d.getVar('WORKDIR')) else os.path.join('${S}', 'patches')}"
|
||||
|
||||
python base_do_unpack() {
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
if not src_uri:
|
||||
return
|
||||
|
||||
try:
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
fetcher.unpack(d.getVar('WORKDIR'))
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
bb.fatal("Bitbake Fetcher Error: " + repr(e))
|
||||
}
|
||||
|
||||
SSTATETASKS += "do_deploy_source_date_epoch"
|
||||
|
||||
do_deploy_source_date_epoch () {
|
||||
mkdir -p ${SDE_DEPLOYDIR}
|
||||
if [ -e ${SDE_FILE} ]; then
|
||||
echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
|
||||
cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
|
||||
else
|
||||
echo "${SDE_FILE} not found!"
|
||||
fi
|
||||
}
|
||||
|
||||
python do_deploy_source_date_epoch_setscene () {
|
||||
sstate_setscene(d)
|
||||
bb.utils.mkdirhier(d.getVar('SDE_DIR'))
|
||||
sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
|
||||
if os.path.exists(sde_file):
|
||||
target = d.getVar('SDE_FILE')
|
||||
bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
|
||||
bb.utils.rename(sde_file, target)
|
||||
else:
|
||||
bb.debug(1, "%s not found!" % sde_file)
|
||||
}
|
||||
|
||||
do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
|
||||
do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
|
||||
addtask do_deploy_source_date_epoch_setscene
|
||||
addtask do_deploy_source_date_epoch before do_configure after do_patch
|
||||
|
||||
python create_source_date_epoch_stamp() {
|
||||
# Version: 1
|
||||
source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
|
||||
oe.reproducible.epochfile_write(source_date_epoch, d.getVar('SDE_FILE'), d)
|
||||
}
|
||||
do_unpack[postfuncs] += "create_source_date_epoch_stamp"
|
||||
|
||||
def get_source_date_epoch_value(d):
|
||||
return oe.reproducible.epochfile_read(d.getVar('SDE_FILE'), d)
|
||||
|
||||
def get_layers_branch_rev(d):
|
||||
revisions = oe.buildcfg.get_layer_revisions(d)
|
||||
layers_branch_rev = ["%-20s = \"%s:%s\"" % (r[1], r[2], r[3]) for r in revisions]
|
||||
i = len(layers_branch_rev)-1
|
||||
p1 = layers_branch_rev[i].find("=")
|
||||
s1 = layers_branch_rev[i][p1:]
|
||||
while i > 0:
|
||||
p2 = layers_branch_rev[i-1].find("=")
|
||||
s2= layers_branch_rev[i-1][p2:]
|
||||
if s1 == s2:
|
||||
layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
|
||||
i -= 1
|
||||
else:
|
||||
i -= 1
|
||||
p1 = layers_branch_rev[i].find("=")
|
||||
s1= layers_branch_rev[i][p1:]
|
||||
return layers_branch_rev
|
||||
|
||||
|
||||
BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
|
||||
BUILDCFG_FUNCS[type] = "list"
|
||||
|
||||
def buildcfg_vars(d):
|
||||
statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
|
||||
for var in statusvars:
|
||||
value = d.getVar(var)
|
||||
if value is not None:
|
||||
yield '%-20s = "%s"' % (var, value)
|
||||
|
||||
def buildcfg_neededvars(d):
|
||||
needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
|
||||
pesteruser = []
|
||||
for v in needed_vars:
|
||||
val = d.getVar(v)
|
||||
if not val or val == 'INVALID':
|
||||
pesteruser.append(v)
|
||||
|
||||
if pesteruser:
|
||||
bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
|
||||
|
||||
addhandler base_eventhandler
|
||||
base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.event.RecipeParsed"
|
||||
python base_eventhandler() {
|
||||
import bb.runqueue
|
||||
|
||||
if isinstance(e, bb.event.ConfigParsed):
|
||||
if not d.getVar("NATIVELSBSTRING", False):
|
||||
d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
|
||||
d.setVar("ORIGNATIVELSBSTRING", d.getVar("NATIVELSBSTRING", False))
|
||||
d.setVar('BB_VERSION', bb.__version__)
|
||||
|
||||
# There might be no bb.event.ConfigParsed event if bitbake server is
|
||||
# running, so check bb.event.BuildStarted too to make sure ${HOSTTOOLS_DIR}
|
||||
# exists.
|
||||
if isinstance(e, bb.event.ConfigParsed) or \
|
||||
(isinstance(e, bb.event.BuildStarted) and not os.path.exists(d.getVar('HOSTTOOLS_DIR'))):
|
||||
# Works with the line in layer.conf which changes PATH to point here
|
||||
setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
|
||||
setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
|
||||
|
||||
if isinstance(e, bb.event.MultiConfigParsed):
|
||||
# We need to expand SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS in each of the multiconfig data stores
|
||||
# own contexts so the variables get expanded correctly for that arch, then inject back into
|
||||
# the main data store.
|
||||
deps = []
|
||||
for config in e.mcdata:
|
||||
deps.append(e.mcdata[config].getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS"))
|
||||
deps = " ".join(deps)
|
||||
e.mcdata[''].setVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", deps)
|
||||
|
||||
if isinstance(e, bb.event.BuildStarted):
|
||||
localdata = bb.data.createCopy(d)
|
||||
statuslines = []
|
||||
for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
|
||||
g = globals()
|
||||
if func not in g:
|
||||
bb.warn("Build configuration function '%s' does not exist" % func)
|
||||
else:
|
||||
flines = g[func](localdata)
|
||||
if flines:
|
||||
statuslines.extend(flines)
|
||||
|
||||
statusheader = d.getVar('BUILDCFG_HEADER')
|
||||
if statusheader:
|
||||
bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
|
||||
|
||||
# This code is to silence warnings where the SDK variables overwrite the
|
||||
# target ones and we'd see duplicate key names overwriting each other
|
||||
# for various PREFERRED_PROVIDERS
|
||||
if isinstance(e, bb.event.RecipePreFinalise):
|
||||
if d.getVar("TARGET_PREFIX") == d.getVar("SDK_PREFIX"):
|
||||
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
|
||||
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
|
||||
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
|
||||
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
|
||||
|
||||
if isinstance(e, bb.event.RecipeParsed):
|
||||
#
|
||||
# If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set
|
||||
# skip parsing for all the other providers which will mean they get uninstalled from the
|
||||
# sysroot since they're now "unreachable". This makes switching virtual/kernel work in
|
||||
# particular.
|
||||
#
|
||||
pn = d.getVar('PN')
|
||||
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
|
||||
if not source_mirror_fetch:
|
||||
provs = (d.getVar("PROVIDES") or "").split()
|
||||
multiprovidersallowed = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
|
||||
for p in provs:
|
||||
if p.startswith("virtual/") and p not in multiprovidersallowed:
|
||||
profprov = d.getVar("PREFERRED_PROVIDER_" + p)
|
||||
if profprov and pn != profprov:
|
||||
raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
|
||||
}
|
||||
|
||||
CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
|
||||
CLEANBROKEN = "0"
|
||||
|
||||
addtask configure after do_patch
|
||||
do_configure[dirs] = "${B}"
|
||||
base_do_configure() {
|
||||
if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
|
||||
if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
|
||||
cd ${B}
|
||||
if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
|
||||
oe_runmake clean
|
||||
fi
|
||||
# -ignore_readdir_race does not work correctly with -delete;
|
||||
# use xargs to avoid spurious build failures
|
||||
find ${B} -ignore_readdir_race -name \*.la -type f -print0 | xargs -0 rm -f
|
||||
fi
|
||||
fi
|
||||
if [ -n "${CONFIGURESTAMPFILE}" ]; then
|
||||
mkdir -p `dirname ${CONFIGURESTAMPFILE}`
|
||||
echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
|
||||
fi
|
||||
}
|
||||
|
||||
addtask compile after do_configure
|
||||
do_compile[dirs] = "${B}"
|
||||
base_do_compile() {
|
||||
if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
|
||||
oe_runmake || die "make failed"
|
||||
else
|
||||
bbnote "nothing to compile"
|
||||
fi
|
||||
}
|
||||
|
||||
addtask install after do_compile
|
||||
do_install[dirs] = "${B}"
|
||||
# Remove and re-create ${D} so that it is guaranteed to be empty
|
||||
do_install[cleandirs] = "${D}"
|
||||
|
||||
base_do_install() {
|
||||
:
|
||||
}
|
||||
|
||||
addtask build after do_populate_sysroot
|
||||
do_build[noexec] = "1"
|
||||
do_build[recrdeptask] += "do_deploy"
|
||||
do_build () {
|
||||
:
|
||||
}
|
||||
|
||||
def set_packagetriplet(d):
|
||||
archs = []
|
||||
tos = []
|
||||
tvs = []
|
||||
|
||||
archs.append(d.getVar("PACKAGE_ARCHS").split())
|
||||
tos.append(d.getVar("TARGET_OS"))
|
||||
tvs.append(d.getVar("TARGET_VENDOR"))
|
||||
|
||||
def settriplet(d, varname, archs, tos, tvs):
|
||||
triplets = []
|
||||
for i in range(len(archs)):
|
||||
for arch in archs[i]:
|
||||
triplets.append(arch + tvs[i] + "-" + tos[i])
|
||||
triplets.reverse()
|
||||
d.setVar(varname, " ".join(triplets))
|
||||
|
||||
settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
|
||||
|
||||
variants = d.getVar("MULTILIB_VARIANTS") or ""
|
||||
for item in variants.split():
|
||||
localdata = bb.data.createCopy(d)
|
||||
overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
|
||||
localdata.setVar("OVERRIDES", overrides)
|
||||
|
||||
archs.append(localdata.getVar("PACKAGE_ARCHS").split())
|
||||
tos.append(localdata.getVar("TARGET_OS"))
|
||||
tvs.append(localdata.getVar("TARGET_VENDOR"))
|
||||
|
||||
settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
|
||||
|
||||
python () {
|
||||
import string, re
|
||||
|
||||
# Handle backfilling
|
||||
oe.utils.features_backfill("DISTRO_FEATURES", d)
|
||||
oe.utils.features_backfill("MACHINE_FEATURES", d)
|
||||
|
||||
if d.getVar("S")[-1] == '/':
|
||||
bb.warn("Recipe %s sets S variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("S")))
|
||||
if d.getVar("B")[-1] == '/':
|
||||
bb.warn("Recipe %s sets B variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("B")))
|
||||
|
||||
if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("S")):
|
||||
d.appendVar("PSEUDO_IGNORE_PATHS", ",${S}")
|
||||
if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("B")):
|
||||
d.appendVar("PSEUDO_IGNORE_PATHS", ",${B}")
|
||||
|
||||
# To add a recipe to the skip list , set:
|
||||
# SKIP_RECIPE[pn] = "message"
|
||||
pn = d.getVar('PN')
|
||||
skip_msg = d.getVarFlag('SKIP_RECIPE', pn)
|
||||
if skip_msg:
|
||||
bb.debug(1, "Skipping %s %s" % (pn, skip_msg))
|
||||
raise bb.parse.SkipRecipe("Recipe will be skipped because: %s" % (skip_msg))
|
||||
|
||||
# Handle PACKAGECONFIG
|
||||
#
|
||||
# These take the form:
|
||||
#
|
||||
# PACKAGECONFIG ??= "<default options>"
|
||||
# PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends,foo_conflict_packageconfig"
|
||||
pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
|
||||
if pkgconfigflags:
|
||||
pkgconfig = (d.getVar('PACKAGECONFIG') or "").split()
|
||||
pn = d.getVar("PN")
|
||||
|
||||
mlprefix = d.getVar("MLPREFIX")
|
||||
|
||||
def expandFilter(appends, extension, prefix):
|
||||
appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
|
||||
newappends = []
|
||||
for a in appends:
|
||||
if a.endswith("-native") or ("-cross-" in a):
|
||||
newappends.append(a)
|
||||
elif a.startswith("virtual/"):
|
||||
subs = a.split("/", 1)[1]
|
||||
if subs.startswith(prefix):
|
||||
newappends.append(a + extension)
|
||||
else:
|
||||
newappends.append("virtual/" + prefix + subs + extension)
|
||||
else:
|
||||
if a.startswith(prefix):
|
||||
newappends.append(a + extension)
|
||||
else:
|
||||
newappends.append(prefix + a + extension)
|
||||
return newappends
|
||||
|
||||
def appendVar(varname, appends):
|
||||
if not appends:
|
||||
return
|
||||
if varname.find("DEPENDS") != -1:
|
||||
if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d) :
|
||||
appends = expandFilter(appends, "", "nativesdk-")
|
||||
elif bb.data.inherits_class('native', d):
|
||||
appends = expandFilter(appends, "-native", "")
|
||||
elif mlprefix:
|
||||
appends = expandFilter(appends, "", mlprefix)
|
||||
varname = d.expand(varname)
|
||||
d.appendVar(varname, " " + " ".join(appends))
|
||||
|
||||
extradeps = []
|
||||
extrardeps = []
|
||||
extrarrecs = []
|
||||
extraconf = []
|
||||
for flag, flagval in sorted(pkgconfigflags.items()):
|
||||
items = flagval.split(",")
|
||||
num = len(items)
|
||||
if num > 6:
|
||||
bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend,conflict_packageconfig can be specified!"
|
||||
% (d.getVar('PN'), flag))
|
||||
|
||||
if flag in pkgconfig:
|
||||
if num >= 3 and items[2]:
|
||||
extradeps.append(items[2])
|
||||
if num >= 4 and items[3]:
|
||||
extrardeps.append(items[3])
|
||||
if num >= 5 and items[4]:
|
||||
extrarrecs.append(items[4])
|
||||
if num >= 1 and items[0]:
|
||||
extraconf.append(items[0])
|
||||
elif num >= 2 and items[1]:
|
||||
extraconf.append(items[1])
|
||||
|
||||
if num >= 6 and items[5]:
|
||||
conflicts = set(items[5].split())
|
||||
invalid = conflicts.difference(set(pkgconfigflags.keys()))
|
||||
if invalid:
|
||||
bb.error("%s: PACKAGECONFIG[%s] Invalid conflict package config%s '%s' specified."
|
||||
% (d.getVar('PN'), flag, 's' if len(invalid) > 1 else '', ' '.join(invalid)))
|
||||
|
||||
if flag in pkgconfig:
|
||||
intersec = conflicts.intersection(set(pkgconfig))
|
||||
if intersec:
|
||||
bb.fatal("%s: PACKAGECONFIG[%s] Conflict package config%s '%s' set in PACKAGECONFIG."
|
||||
% (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec)))
|
||||
|
||||
appendVar('DEPENDS', extradeps)
|
||||
appendVar('RDEPENDS:${PN}', extrardeps)
|
||||
appendVar('RRECOMMENDS:${PN}', extrarrecs)
|
||||
appendVar('PACKAGECONFIG_CONFARGS', extraconf)
|
||||
|
||||
pn = d.getVar('PN')
|
||||
license = d.getVar('LICENSE')
|
||||
if license == "INVALID" and pn != "defaultpkgname":
|
||||
bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
|
||||
|
||||
if bb.data.inherits_class('license', d):
|
||||
check_license_format(d)
|
||||
unmatched_license_flags = check_license_flags(d)
|
||||
if unmatched_license_flags:
|
||||
for unmatched in unmatched_license_flags:
|
||||
message = "Has a restricted license '%s' which is not listed in your LICENSE_FLAGS_ACCEPTED." % unmatched
|
||||
details = d.getVarFlag("LICENSE_FLAGS_DETAILS", unmatched)
|
||||
if details:
|
||||
message += "\n" + details
|
||||
bb.debug(1, "Skipping %s: %s" % (pn, message))
|
||||
raise bb.parse.SkipRecipe(message)
|
||||
|
||||
# If we're building a target package we need to use fakeroot (pseudo)
|
||||
# in order to capture permissions, owners, groups and special files
|
||||
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
|
||||
d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
|
||||
d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
|
||||
d.setVarFlag('do_install', 'fakeroot', '1')
|
||||
d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
|
||||
d.setVarFlag('do_package', 'fakeroot', '1')
|
||||
d.setVarFlag('do_package_setscene', 'fakeroot', '1')
|
||||
d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
|
||||
d.setVarFlag('do_devshell', 'fakeroot', '1')
|
||||
d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
|
||||
|
||||
need_machine = d.getVar('COMPATIBLE_MACHINE')
|
||||
if need_machine and not d.getVar('PARSE_ALL_RECIPES', False):
|
||||
import re
|
||||
compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":")
|
||||
for m in compat_machines:
|
||||
if re.match(need_machine, m):
|
||||
break
|
||||
else:
|
||||
raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
|
||||
|
||||
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False) or d.getVar('PARSE_ALL_RECIPES', False)
|
||||
if not source_mirror_fetch:
|
||||
need_host = d.getVar('COMPATIBLE_HOST')
|
||||
if need_host:
|
||||
import re
|
||||
this_host = d.getVar('HOST_SYS')
|
||||
if not re.match(need_host, this_host):
|
||||
raise bb.parse.SkipRecipe("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
|
||||
|
||||
bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
|
||||
|
||||
check_license = False if pn.startswith("nativesdk-") else True
|
||||
for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}",
|
||||
"-crosssdk-${SDK_SYS}", "-crosssdk-initial-${SDK_SYS}",
|
||||
"-cross-canadian-${TRANSLATED_TARGET_ARCH}"]:
|
||||
if pn.endswith(d.expand(t)):
|
||||
check_license = False
|
||||
if pn.startswith("gcc-source-"):
|
||||
check_license = False
|
||||
|
||||
if check_license and bad_licenses:
|
||||
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
|
||||
|
||||
exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
|
||||
|
||||
for lic_exception in exceptions:
|
||||
if ":" in lic_exception:
|
||||
lic_exception = lic_exception.split(":")[1]
|
||||
if lic_exception in oe.license.obsolete_license_list():
|
||||
bb.fatal("Obsolete license %s used in INCOMPATIBLE_LICENSE_EXCEPTIONS" % lic_exception)
|
||||
|
||||
pkgs = d.getVar('PACKAGES').split()
|
||||
skipped_pkgs = {}
|
||||
unskipped_pkgs = []
|
||||
for pkg in pkgs:
|
||||
remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
|
||||
|
||||
incompatible_lic = incompatible_license(d, remaining_bad_licenses, pkg)
|
||||
if incompatible_lic:
|
||||
skipped_pkgs[pkg] = incompatible_lic
|
||||
else:
|
||||
unskipped_pkgs.append(pkg)
|
||||
|
||||
if unskipped_pkgs:
|
||||
for pkg in skipped_pkgs:
|
||||
bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
|
||||
d.setVar('_exclude_incompatible-' + pkg, ' '.join(skipped_pkgs[pkg]))
|
||||
for pkg in unskipped_pkgs:
|
||||
bb.debug(1, "Including the package %s" % pkg)
|
||||
else:
|
||||
incompatible_lic = incompatible_license(d, bad_licenses)
|
||||
for pkg in skipped_pkgs:
|
||||
incompatible_lic += skipped_pkgs[pkg]
|
||||
incompatible_lic = sorted(list(set(incompatible_lic)))
|
||||
|
||||
if incompatible_lic:
|
||||
bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
|
||||
raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
|
||||
|
||||
srcuri = d.getVar('SRC_URI')
|
||||
for uri_string in srcuri.split():
|
||||
uri = bb.fetch.URI(uri_string)
|
||||
# Also check downloadfilename as the URL path might not be useful for sniffing
|
||||
path = uri.params.get("downloadfilename", uri.path)
|
||||
|
||||
# HTTP/FTP use the wget fetcher
|
||||
if uri.scheme in ("http", "https", "ftp"):
|
||||
d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot')
|
||||
|
||||
# Svn packages should DEPEND on subversion-native
|
||||
if uri.scheme == "svn":
|
||||
d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
|
||||
|
||||
# Git packages should DEPEND on git-native
|
||||
elif uri.scheme in ("git", "gitsm"):
|
||||
d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
|
||||
|
||||
# Mercurial packages should DEPEND on mercurial-native
|
||||
elif uri.scheme == "hg":
|
||||
d.appendVar("EXTRANATIVEPATH", ' python3-native ')
|
||||
d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot ca-certificates-native:do_populate_sysroot')
|
||||
|
||||
# OSC packages should DEPEND on osc-native
|
||||
elif uri.scheme == "osc":
|
||||
d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
|
||||
|
||||
elif uri.scheme == "npm":
|
||||
d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
|
||||
|
||||
elif uri.scheme == "repo":
|
||||
d.appendVarFlag('do_fetch', 'depends', ' repo-native:do_populate_sysroot')
|
||||
|
||||
# *.lz4 should DEPEND on lz4-native for unpacking
|
||||
if path.endswith('.lz4'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
|
||||
|
||||
# *.zst should DEPEND on zstd-native for unpacking
|
||||
elif path.endswith('.zst'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' zstd-native:do_populate_sysroot')
|
||||
|
||||
# *.lz should DEPEND on lzip-native for unpacking
|
||||
elif path.endswith('.lz'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
|
||||
|
||||
# *.xz should DEPEND on xz-native for unpacking
|
||||
elif path.endswith('.xz') or path.endswith('.txz'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
|
||||
|
||||
# .zip should DEPEND on unzip-native for unpacking
|
||||
elif path.endswith('.zip') or path.endswith('.jar'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
|
||||
|
||||
# Some rpm files may be compressed internally using xz (for example, rpms from Fedora)
|
||||
elif path.endswith('.rpm'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
|
||||
|
||||
# *.deb should DEPEND on xz-native for unpacking
|
||||
elif path.endswith('.deb'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
|
||||
|
||||
# *.7z should DEPEND on p7zip-native for unpacking
|
||||
elif path.endswith('.7z'):
|
||||
d.appendVarFlag('do_unpack', 'depends', ' p7zip-native:do_populate_sysroot')
|
||||
|
||||
set_packagetriplet(d)
|
||||
|
||||
# 'multimachine' handling
|
||||
mach_arch = d.getVar('MACHINE_ARCH')
|
||||
pkg_arch = d.getVar('PACKAGE_ARCH')
|
||||
|
||||
if (pkg_arch == mach_arch):
|
||||
# Already machine specific - nothing further to do
|
||||
return
|
||||
|
||||
#
|
||||
# We always try to scan SRC_URI for urls with machine overrides
|
||||
# unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
|
||||
#
|
||||
override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH')
|
||||
if override != '0':
|
||||
paths = []
|
||||
fpaths = (d.getVar('FILESPATH') or '').split(':')
|
||||
machine = d.getVar('MACHINE')
|
||||
for p in fpaths:
|
||||
if os.path.basename(p) == machine and os.path.isdir(p):
|
||||
paths.append(p)
|
||||
|
||||
if paths:
|
||||
for s in srcuri.split():
|
||||
if not s.startswith("file://"):
|
||||
continue
|
||||
fetcher = bb.fetch2.Fetch([s], d)
|
||||
local = fetcher.localpath(s)
|
||||
for mp in paths:
|
||||
if local.startswith(mp):
|
||||
#bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
|
||||
d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
|
||||
return
|
||||
|
||||
packages = d.getVar('PACKAGES').split()
|
||||
for pkg in packages:
|
||||
pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg)
|
||||
|
||||
# We could look for != PACKAGE_ARCH here but how to choose
|
||||
# if multiple differences are present?
|
||||
# Look through PACKAGE_ARCHS for the priority order?
|
||||
if pkgarch and pkgarch == mach_arch:
|
||||
d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
|
||||
bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN"))
|
||||
}
|
||||
|
||||
addtask cleansstate after do_clean
|
||||
python do_cleansstate() {
|
||||
sstate_clean_cachefiles(d)
|
||||
}
|
||||
addtask cleanall after do_cleansstate
|
||||
do_cleansstate[nostamp] = "1"
|
||||
|
||||
python do_cleanall() {
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
if not src_uri:
|
||||
return
|
||||
|
||||
try:
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
fetcher.clean()
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
bb.fatal(str(e))
|
||||
}
|
||||
do_cleanall[nostamp] = "1"
|
||||
|
||||
|
||||
EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install
|
||||
302
sources/poky/meta/classes-global/buildstats.bbclass
Normal file
302
sources/poky/meta/classes-global/buildstats.bbclass
Normal file
@@ -0,0 +1,302 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
|
||||
|
||||
################################################################################
|
||||
# Build statistics gathering.
|
||||
#
|
||||
# The CPU and Time gathering/tracking functions and bbevent inspiration
|
||||
# were written by Christopher Larson.
|
||||
#
|
||||
################################################################################
|
||||
|
||||
def get_buildprocess_cputime(pid):
|
||||
with open("/proc/%d/stat" % pid, "r") as f:
|
||||
fields = f.readline().rstrip().split()
|
||||
# 13: utime, 14: stime, 15: cutime, 16: cstime
|
||||
return sum(int(field) for field in fields[13:16])
|
||||
|
||||
def get_process_cputime(pid):
|
||||
import resource
|
||||
with open("/proc/%d/stat" % pid, "r") as f:
|
||||
fields = f.readline().rstrip().split()
|
||||
stats = {
|
||||
'utime' : fields[13],
|
||||
'stime' : fields[14],
|
||||
'cutime' : fields[15],
|
||||
'cstime' : fields[16],
|
||||
}
|
||||
iostats = {}
|
||||
if os.path.isfile("/proc/%d/io" % pid):
|
||||
with open("/proc/%d/io" % pid, "r") as f:
|
||||
while True:
|
||||
i = f.readline().strip()
|
||||
if not i:
|
||||
break
|
||||
if not ":" in i:
|
||||
# one more extra line is appended (empty or containing "0")
|
||||
# most probably due to race condition in kernel while
|
||||
# updating IO stats
|
||||
break
|
||||
i = i.split(": ")
|
||||
iostats[i[0]] = i[1]
|
||||
resources = resource.getrusage(resource.RUSAGE_SELF)
|
||||
childres = resource.getrusage(resource.RUSAGE_CHILDREN)
|
||||
return stats, iostats, resources, childres
|
||||
|
||||
def get_cputime():
|
||||
with open("/proc/stat", "r") as f:
|
||||
fields = f.readline().rstrip().split()[1:]
|
||||
return sum(int(field) for field in fields)
|
||||
|
||||
def set_timedata(var, d, server_time):
|
||||
d.setVar(var, server_time)
|
||||
|
||||
def get_timedata(var, d, end_time):
|
||||
oldtime = d.getVar(var, False)
|
||||
if oldtime is None:
|
||||
return
|
||||
return end_time - oldtime
|
||||
|
||||
def set_buildtimedata(var, d):
|
||||
import time
|
||||
time = time.time()
|
||||
cputime = get_cputime()
|
||||
proctime = get_buildprocess_cputime(os.getpid())
|
||||
d.setVar(var, (time, cputime, proctime))
|
||||
|
||||
def get_buildtimedata(var, d):
|
||||
import time
|
||||
timedata = d.getVar(var, False)
|
||||
if timedata is None:
|
||||
return
|
||||
oldtime, oldcpu, oldproc = timedata
|
||||
procdiff = get_buildprocess_cputime(os.getpid()) - oldproc
|
||||
cpudiff = get_cputime() - oldcpu
|
||||
end_time = time.time()
|
||||
timediff = end_time - oldtime
|
||||
if cpudiff > 0:
|
||||
cpuperc = float(procdiff) * 100 / cpudiff
|
||||
else:
|
||||
cpuperc = None
|
||||
return timediff, cpuperc
|
||||
|
||||
def write_task_data(status, logfile, e, d):
|
||||
with open(os.path.join(logfile), "a") as f:
|
||||
elapsedtime = get_timedata("__timedata_task", d, e.time)
|
||||
if elapsedtime:
|
||||
f.write(d.expand("${PF}: %s\n" % e.task))
|
||||
f.write(d.expand("Elapsed time: %0.2f seconds\n" % elapsedtime))
|
||||
cpu, iostats, resources, childres = get_process_cputime(os.getpid())
|
||||
if cpu:
|
||||
f.write("utime: %s\n" % cpu['utime'])
|
||||
f.write("stime: %s\n" % cpu['stime'])
|
||||
f.write("cutime: %s\n" % cpu['cutime'])
|
||||
f.write("cstime: %s\n" % cpu['cstime'])
|
||||
for i in iostats:
|
||||
f.write("IO %s: %s\n" % (i, iostats[i]))
|
||||
rusages = ["ru_utime", "ru_stime", "ru_maxrss", "ru_minflt", "ru_majflt", "ru_inblock", "ru_oublock", "ru_nvcsw", "ru_nivcsw"]
|
||||
for i in rusages:
|
||||
f.write("rusage %s: %s\n" % (i, getattr(resources, i)))
|
||||
for i in rusages:
|
||||
f.write("Child rusage %s: %s\n" % (i, getattr(childres, i)))
|
||||
if status == "passed":
|
||||
f.write("Status: PASSED \n")
|
||||
else:
|
||||
f.write("Status: FAILED \n")
|
||||
f.write("Ended: %0.2f \n" % e.time)
|
||||
|
||||
def write_host_data(logfile, e, d, type):
|
||||
import subprocess, os, datetime
|
||||
# minimum time allowed for each command to run, in seconds
|
||||
time_threshold = 0.5
|
||||
limit = 10
|
||||
# the total number of commands
|
||||
num_cmds = 0
|
||||
msg = ""
|
||||
if type == "interval":
|
||||
# interval at which data will be logged
|
||||
interval = d.getVar("BB_HEARTBEAT_EVENT", False)
|
||||
if interval is None:
|
||||
bb.warn("buildstats: Collecting host data at intervals failed. Set BB_HEARTBEAT_EVENT=\"<interval>\" in conf/local.conf for the interval at which host data will be logged.")
|
||||
d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
|
||||
return
|
||||
interval = int(interval)
|
||||
cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_INTERVAL')
|
||||
msg = "Host Stats: Collecting data at %d second intervals.\n" % interval
|
||||
if cmds is None:
|
||||
d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
|
||||
bb.warn("buildstats: Collecting host data at intervals failed. Set BB_LOG_HOST_STAT_CMDS_INTERVAL=\"command1 ; command2 ; ... \" in conf/local.conf.")
|
||||
return
|
||||
if type == "failure":
|
||||
cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_FAILURE')
|
||||
msg = "Host Stats: Collecting data on failure.\n"
|
||||
msg += "Failed at task: " + e.task + "\n"
|
||||
if cmds is None:
|
||||
d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
|
||||
bb.warn("buildstats: Collecting host data on failure failed. Set BB_LOG_HOST_STAT_CMDS_FAILURE=\"command1 ; command2 ; ... \" in conf/local.conf.")
|
||||
return
|
||||
c_san = []
|
||||
for cmd in cmds.split(";"):
|
||||
if len(cmd) == 0:
|
||||
continue
|
||||
num_cmds += 1
|
||||
c_san.append(cmd)
|
||||
if num_cmds == 0:
|
||||
if type == "interval":
|
||||
d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
|
||||
if type == "failure":
|
||||
d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
|
||||
return
|
||||
|
||||
# return if the interval is not enough to run all commands within the specified BB_HEARTBEAT_EVENT interval
|
||||
if type == "interval":
|
||||
limit = interval / num_cmds
|
||||
if limit <= time_threshold:
|
||||
d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
|
||||
bb.warn("buildstats: Collecting host data failed. BB_HEARTBEAT_EVENT interval not enough to run the specified commands. Increase value of BB_HEARTBEAT_EVENT in conf/local.conf.")
|
||||
return
|
||||
|
||||
# set the environment variables
|
||||
path = d.getVar("PATH")
|
||||
opath = d.getVar("BB_ORIGENV", False).getVar("PATH")
|
||||
ospath = os.environ['PATH']
|
||||
os.environ['PATH'] = path + ":" + opath + ":" + ospath
|
||||
with open(logfile, "a") as f:
|
||||
f.write("Event Time: %f\nDate: %s\n" % (e.time, datetime.datetime.now()))
|
||||
f.write("%s" % msg)
|
||||
for c in c_san:
|
||||
try:
|
||||
output = subprocess.check_output(c.split(), stderr=subprocess.STDOUT, timeout=limit).decode('utf-8')
|
||||
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError) as err:
|
||||
output = "Error running command: %s\n%s\n" % (c, err)
|
||||
f.write("%s\n%s\n" % (c, output))
|
||||
# reset the environment
|
||||
os.environ['PATH'] = ospath
|
||||
|
||||
python run_buildstats () {
|
||||
import bb.build
|
||||
import bb.event
|
||||
import time, subprocess, platform
|
||||
|
||||
bn = d.getVar('BUILDNAME')
|
||||
########################################################################
|
||||
# bitbake fires HeartbeatEvent even before a build has been
|
||||
# triggered, causing BUILDNAME to be None
|
||||
########################################################################
|
||||
if bn is not None:
|
||||
bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
|
||||
taskdir = os.path.join(bsdir, d.getVar('PF'))
|
||||
if isinstance(e, bb.event.HeartbeatEvent) and bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_INTERVAL")):
|
||||
bb.utils.mkdirhier(bsdir)
|
||||
write_host_data(os.path.join(bsdir, "host_stats_interval"), e, d, "interval")
|
||||
|
||||
if isinstance(e, bb.event.BuildStarted):
|
||||
########################################################################
|
||||
# If the kernel was not configured to provide I/O statistics, issue
|
||||
# a one time warning.
|
||||
########################################################################
|
||||
if not os.path.isfile("/proc/%d/io" % os.getpid()):
|
||||
bb.warn("The Linux kernel on your build host was not configured to provide process I/O statistics. (CONFIG_TASK_IO_ACCOUNTING is not set)")
|
||||
|
||||
########################################################################
|
||||
# at first pass make the buildstats hierarchy and then
|
||||
# set the buildname
|
||||
########################################################################
|
||||
bb.utils.mkdirhier(bsdir)
|
||||
set_buildtimedata("__timedata_build", d)
|
||||
build_time = os.path.join(bsdir, "build_stats")
|
||||
# write start of build into build_time
|
||||
with open(build_time, "a") as f:
|
||||
host_info = platform.uname()
|
||||
f.write("Host Info: ")
|
||||
for x in host_info:
|
||||
if x:
|
||||
f.write(x + " ")
|
||||
f.write("\n")
|
||||
f.write("Build Started: %0.2f \n" % d.getVar('__timedata_build', False)[0])
|
||||
|
||||
elif isinstance(e, bb.event.BuildCompleted):
|
||||
build_time = os.path.join(bsdir, "build_stats")
|
||||
with open(build_time, "a") as f:
|
||||
########################################################################
|
||||
# Write build statistics for the build
|
||||
########################################################################
|
||||
timedata = get_buildtimedata("__timedata_build", d)
|
||||
if timedata:
|
||||
time, cpu = timedata
|
||||
# write end of build and cpu used into build_time
|
||||
f.write("Elapsed time: %0.2f seconds \n" % (time))
|
||||
if cpu:
|
||||
f.write("CPU usage: %0.1f%% \n" % cpu)
|
||||
|
||||
if isinstance(e, bb.build.TaskStarted):
|
||||
set_timedata("__timedata_task", d, e.time)
|
||||
bb.utils.mkdirhier(taskdir)
|
||||
# write into the task event file the name and start time
|
||||
with open(os.path.join(taskdir, e.task), "a") as f:
|
||||
f.write("Event: %s \n" % bb.event.getName(e))
|
||||
f.write("Started: %0.2f \n" % e.time)
|
||||
|
||||
elif isinstance(e, bb.build.TaskSucceeded):
|
||||
write_task_data("passed", os.path.join(taskdir, e.task), e, d)
|
||||
if e.task == "do_rootfs":
|
||||
bs = os.path.join(bsdir, "build_stats")
|
||||
with open(bs, "a") as f:
|
||||
rootfs = d.getVar('IMAGE_ROOTFS')
|
||||
if os.path.isdir(rootfs):
|
||||
try:
|
||||
rootfs_size = subprocess.check_output(["du", "-sh", rootfs],
|
||||
stderr=subprocess.STDOUT).decode('utf-8')
|
||||
f.write("Uncompressed Rootfs size: %s" % rootfs_size)
|
||||
except subprocess.CalledProcessError as err:
|
||||
bb.warn("Failed to get rootfs size: %s" % err.output.decode('utf-8'))
|
||||
|
||||
elif isinstance(e, bb.build.TaskFailed):
|
||||
# Can have a failure before TaskStarted so need to mkdir here too
|
||||
bb.utils.mkdirhier(taskdir)
|
||||
write_task_data("failed", os.path.join(taskdir, e.task), e, d)
|
||||
########################################################################
|
||||
# Lets make things easier and tell people where the build failed in
|
||||
# build_status. We do this here because BuildCompleted triggers no
|
||||
# matter what the status of the build actually is
|
||||
########################################################################
|
||||
build_status = os.path.join(bsdir, "build_stats")
|
||||
with open(build_status, "a") as f:
|
||||
f.write(d.expand("Failed at: ${PF} at task: %s \n" % e.task))
|
||||
if bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_FAILURE")):
|
||||
write_host_data(os.path.join(bsdir, "host_stats_%s_failure" % e.task), e, d, "failure")
|
||||
}
|
||||
|
||||
addhandler run_buildstats
|
||||
run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.event.HeartbeatEvent bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
|
||||
|
||||
python runqueue_stats () {
|
||||
import buildstats
|
||||
from bb import event, runqueue
|
||||
# We should not record any samples before the first task has started,
|
||||
# because that's the first activity shown in the process chart.
|
||||
# Besides, at that point we are sure that the build variables
|
||||
# are available that we need to find the output directory.
|
||||
# The persistent SystemStats is stored in the datastore and
|
||||
# closed when the build is done.
|
||||
system_stats = d.getVar('_buildstats_system_stats', False)
|
||||
if not system_stats and isinstance(e, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted)):
|
||||
system_stats = buildstats.SystemStats(d)
|
||||
d.setVar('_buildstats_system_stats', system_stats)
|
||||
if system_stats:
|
||||
# Ensure that we sample at important events.
|
||||
done = isinstance(e, bb.event.BuildCompleted)
|
||||
if system_stats.sample(e, force=done):
|
||||
d.setVar('_buildstats_system_stats', system_stats)
|
||||
if done:
|
||||
system_stats.close()
|
||||
d.delVar('_buildstats_system_stats')
|
||||
}
|
||||
|
||||
addhandler runqueue_stats
|
||||
runqueue_stats[eventmask] = "bb.runqueue.sceneQueueTaskStarted bb.runqueue.runQueueTaskStarted bb.event.HeartbeatEvent bb.event.BuildCompleted bb.event.MonitorDiskEvent"
|
||||
141
sources/poky/meta/classes-global/debian.bbclass
Normal file
141
sources/poky/meta/classes-global/debian.bbclass
Normal file
@@ -0,0 +1,141 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Debian package renaming only occurs when a package is built
|
||||
# We therefore have to make sure we build all runtime packages
|
||||
# before building the current package to make the packages runtime
|
||||
# depends are correct
|
||||
#
|
||||
# Custom library package names can be defined setting
|
||||
# DEBIANNAME: + pkgname to the desired name.
|
||||
#
|
||||
# Better expressed as ensure all RDEPENDS package before we package
|
||||
# This means we can't have circular RDEPENDS/RRECOMMENDS
|
||||
|
||||
AUTO_LIBNAME_PKGS = "${PACKAGES}"
|
||||
|
||||
inherit package
|
||||
|
||||
python debian_package_name_hook () {
|
||||
import glob, copy, stat, errno, re, pathlib, subprocess
|
||||
|
||||
pkgdest = d.getVar("PKGDEST")
|
||||
packages = d.getVar('PACKAGES')
|
||||
so_re = re.compile(r"lib.*\.so")
|
||||
|
||||
def socrunch(s):
|
||||
s = s.lower().replace('_', '-')
|
||||
m = re.match(r"^(.*)(.)\.so\.(.*)$", s)
|
||||
if m is None:
|
||||
return None
|
||||
if m.group(2) in '0123456789':
|
||||
bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
|
||||
else:
|
||||
bin = m.group(1) + m.group(2) + m.group(3)
|
||||
dev = m.group(1) + m.group(2)
|
||||
return (bin, dev)
|
||||
|
||||
def isexec(path):
|
||||
try:
|
||||
s = os.stat(path)
|
||||
except (os.error, AttributeError):
|
||||
return 0
|
||||
return (s[stat.ST_MODE] & stat.S_IEXEC)
|
||||
|
||||
def add_rprovides(pkg, d):
|
||||
newpkg = d.getVar('PKG:' + pkg)
|
||||
if newpkg and newpkg != pkg:
|
||||
provs = (d.getVar('RPROVIDES:' + pkg) or "").split()
|
||||
if pkg not in provs:
|
||||
d.appendVar('RPROVIDES:' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
|
||||
|
||||
def auto_libname(packages, orig_pkg):
|
||||
p = lambda var: pathlib.PurePath(d.getVar(var))
|
||||
libdirs = (p("base_libdir"), p("libdir"))
|
||||
bindirs = (p("base_bindir"), p("base_sbindir"), p("bindir"), p("sbindir"))
|
||||
|
||||
sonames = []
|
||||
has_bins = 0
|
||||
has_libs = 0
|
||||
for f in pkgfiles[orig_pkg]:
|
||||
# This is .../packages-split/orig_pkg/
|
||||
pkgpath = pathlib.PurePath(pkgdest, orig_pkg)
|
||||
# Strip pkgpath off the full path to a file in the package, re-root
|
||||
# so it is absolute, and then get the parent directory of the file.
|
||||
path = pathlib.PurePath("/") / (pathlib.PurePath(f).relative_to(pkgpath).parent)
|
||||
if path in bindirs:
|
||||
has_bins = 1
|
||||
if path in libdirs:
|
||||
has_libs = 1
|
||||
if so_re.match(os.path.basename(f)):
|
||||
try:
|
||||
cmd = [d.expand("${TARGET_PREFIX}objdump"), "-p", f]
|
||||
output = subprocess.check_output(cmd).decode("utf-8")
|
||||
for m in re.finditer(r"\s+SONAME\s+([^\s]+)", output):
|
||||
if m.group(1) not in sonames:
|
||||
sonames.append(m.group(1))
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
|
||||
soname = None
|
||||
if len(sonames) == 1:
|
||||
soname = sonames[0]
|
||||
elif len(sonames) > 1:
|
||||
lead = d.getVar('LEAD_SONAME')
|
||||
if lead:
|
||||
r = re.compile(lead)
|
||||
filtered = []
|
||||
for s in sonames:
|
||||
if r.match(s):
|
||||
filtered.append(s)
|
||||
if len(filtered) == 1:
|
||||
soname = filtered[0]
|
||||
elif len(filtered) > 1:
|
||||
bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
|
||||
else:
|
||||
bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
|
||||
else:
|
||||
bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
|
||||
|
||||
if has_libs and not has_bins and soname:
|
||||
soname_result = socrunch(soname)
|
||||
if soname_result:
|
||||
(pkgname, devname) = soname_result
|
||||
for pkg in packages.split():
|
||||
if (d.getVar('PKG:' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME:' + pkg, False)):
|
||||
add_rprovides(pkg, d)
|
||||
continue
|
||||
debian_pn = d.getVar('DEBIANNAME:' + pkg, False)
|
||||
if debian_pn:
|
||||
newpkg = debian_pn
|
||||
elif pkg == orig_pkg:
|
||||
newpkg = pkgname
|
||||
else:
|
||||
newpkg = pkg.replace(orig_pkg, devname, 1)
|
||||
mlpre=d.getVar('MLPREFIX')
|
||||
if mlpre:
|
||||
if not newpkg.find(mlpre) == 0:
|
||||
newpkg = mlpre + newpkg
|
||||
if newpkg != pkg:
|
||||
bb.note("debian: renaming %s to %s" % (pkg, newpkg))
|
||||
d.setVar('PKG:' + pkg, newpkg)
|
||||
add_rprovides(pkg, d)
|
||||
else:
|
||||
add_rprovides(orig_pkg, d)
|
||||
|
||||
# reversed sort is needed when some package is substring of another
|
||||
# ie in ncurses we get without reverse sort:
|
||||
# DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
|
||||
# and later
|
||||
# DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
|
||||
# so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
|
||||
for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS') or "").split(), reverse=True):
|
||||
auto_libname(packages, pkg)
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS package_name_hook
|
||||
|
||||
DEBIAN_NAMES = "1"
|
||||
164
sources/poky/meta/classes-global/devshell.bbclass
Normal file
164
sources/poky/meta/classes-global/devshell.bbclass
Normal file
@@ -0,0 +1,164 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit terminal
|
||||
|
||||
DEVSHELL = "${SHELL}"
|
||||
|
||||
python do_devshell () {
|
||||
if d.getVarFlag("do_devshell", "manualfakeroot"):
|
||||
d.prependVar("DEVSHELL", "pseudo ")
|
||||
fakeenv = d.getVar("FAKEROOTENV").split()
|
||||
for f in fakeenv:
|
||||
k = f.split("=")
|
||||
d.setVar(k[0], k[1])
|
||||
d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
|
||||
d.delVarFlag("do_devshell", "fakeroot")
|
||||
|
||||
oe_terminal(d.getVar('DEVSHELL'), 'OpenEmbedded Developer Shell', d)
|
||||
}
|
||||
|
||||
addtask devshell after do_patch do_prepare_recipe_sysroot
|
||||
|
||||
# The directory that the terminal starts in
|
||||
DEVSHELL_STARTDIR ?= "${S}"
|
||||
do_devshell[dirs] = "${DEVSHELL_STARTDIR}"
|
||||
do_devshell[nostamp] = "1"
|
||||
do_devshell[network] = "1"
|
||||
|
||||
# devshell and fakeroot/pseudo need careful handling since only the final
|
||||
# command should run under fakeroot emulation, any X connection should
|
||||
# be done as the normal user. We therfore carefully construct the envionment
|
||||
# manually
|
||||
python () {
|
||||
if d.getVarFlag("do_devshell", "fakeroot"):
|
||||
# We need to signal our code that we want fakeroot however we
|
||||
# can't manipulate the environment and variables here yet (see YOCTO #4795)
|
||||
d.setVarFlag("do_devshell", "manualfakeroot", "1")
|
||||
d.delVarFlag("do_devshell", "fakeroot")
|
||||
}
|
||||
|
||||
def pydevshell(d):
|
||||
|
||||
import code
|
||||
import select
|
||||
import signal
|
||||
import termios
|
||||
|
||||
m, s = os.openpty()
|
||||
sname = os.ttyname(s)
|
||||
|
||||
def noechoicanon(fd):
|
||||
old = termios.tcgetattr(fd)
|
||||
old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
|
||||
# &~ termios.ISIG
|
||||
termios.tcsetattr(fd, termios.TCSADRAIN, old)
|
||||
|
||||
# No echo or buffering over the pty
|
||||
noechoicanon(s)
|
||||
|
||||
pid = os.fork()
|
||||
if pid:
|
||||
os.close(m)
|
||||
oe_terminal("oepydevshell-internal.py %s %d" % (sname, pid), 'OpenEmbedded Developer PyShell', d)
|
||||
os._exit(0)
|
||||
else:
|
||||
os.close(s)
|
||||
|
||||
os.dup2(m, sys.stdin.fileno())
|
||||
os.dup2(m, sys.stdout.fileno())
|
||||
os.dup2(m, sys.stderr.fileno())
|
||||
|
||||
bb.utils.nonblockingfd(sys.stdout)
|
||||
bb.utils.nonblockingfd(sys.stderr)
|
||||
bb.utils.nonblockingfd(sys.stdin)
|
||||
|
||||
_context = {
|
||||
"os": os,
|
||||
"bb": bb,
|
||||
"time": time,
|
||||
"d": d,
|
||||
}
|
||||
|
||||
ps1 = "pydevshell> "
|
||||
ps2 = "... "
|
||||
buf = []
|
||||
more = False
|
||||
|
||||
i = code.InteractiveInterpreter(locals=_context)
|
||||
print("OE PyShell (PN = %s)\n" % d.getVar("PN"))
|
||||
|
||||
def prompt(more):
|
||||
if more:
|
||||
prompt = ps2
|
||||
else:
|
||||
prompt = ps1
|
||||
sys.stdout.write(prompt)
|
||||
sys.stdout.flush()
|
||||
|
||||
# Restore Ctrl+C since bitbake masks this
|
||||
def signal_handler(signal, frame):
|
||||
raise KeyboardInterrupt
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
child = None
|
||||
|
||||
prompt(more)
|
||||
while True:
|
||||
try:
|
||||
try:
|
||||
(r, _, _) = select.select([sys.stdin], [], [], 1)
|
||||
if not r:
|
||||
continue
|
||||
line = sys.stdin.readline().strip()
|
||||
if not line:
|
||||
prompt(more)
|
||||
continue
|
||||
except EOFError as e:
|
||||
sys.stdout.write("\n")
|
||||
sys.stdout.flush()
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno == 11:
|
||||
continue
|
||||
if e.errno == 5:
|
||||
return
|
||||
raise
|
||||
else:
|
||||
if not child:
|
||||
child = int(line)
|
||||
continue
|
||||
buf.append(line)
|
||||
source = "\n".join(buf)
|
||||
more = i.runsource(source, "<pyshell>")
|
||||
if not more:
|
||||
buf = []
|
||||
sys.stderr.flush()
|
||||
prompt(more)
|
||||
except KeyboardInterrupt:
|
||||
i.write("\nKeyboardInterrupt\n")
|
||||
buf = []
|
||||
more = False
|
||||
prompt(more)
|
||||
except SystemExit:
|
||||
# Easiest way to ensure everything exits
|
||||
os.kill(child, signal.SIGTERM)
|
||||
break
|
||||
|
||||
python do_pydevshell() {
|
||||
import signal
|
||||
|
||||
try:
|
||||
pydevshell(d)
|
||||
except SystemExit:
|
||||
# Stop the SIGTERM above causing an error exit code
|
||||
return
|
||||
finally:
|
||||
return
|
||||
}
|
||||
addtask pydevshell after do_patch
|
||||
|
||||
do_pydevshell[nostamp] = "1"
|
||||
do_pydevshell[network] = "1"
|
||||
1655
sources/poky/meta/classes-global/insane.bbclass
Normal file
1655
sources/poky/meta/classes-global/insane.bbclass
Normal file
File diff suppressed because it is too large
Load Diff
432
sources/poky/meta/classes-global/license.bbclass
Normal file
432
sources/poky/meta/classes-global/license.bbclass
Normal file
@@ -0,0 +1,432 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
|
||||
# LIC_FILES_CHKSUM.
|
||||
# TODO:
|
||||
# - There is a real issue revolving around license naming standards.
|
||||
|
||||
LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
|
||||
LICSSTATEDIR = "${WORKDIR}/license-destdir/"
|
||||
|
||||
# Create extra package with license texts and add it to RRECOMMENDS:${PN}
|
||||
LICENSE_CREATE_PACKAGE[type] = "boolean"
|
||||
LICENSE_CREATE_PACKAGE ??= "0"
|
||||
LICENSE_PACKAGE_SUFFIX ??= "-lic"
|
||||
LICENSE_FILES_DIRECTORY ??= "${datadir}/licenses/"
|
||||
|
||||
LICENSE_DEPLOY_PATHCOMPONENT = "${SSTATE_PKGARCH}"
|
||||
LICENSE_DEPLOY_PATHCOMPONENT:class-cross = "native"
|
||||
LICENSE_DEPLOY_PATHCOMPONENT:class-native = "native"
|
||||
# Ensure the *value* of SSTATE_PKGARCH is captured as it is used in the output paths
|
||||
LICENSE_DEPLOY_PATHCOMPONENT[vardepvalue] += "${LICENSE_DEPLOY_PATHCOMPONENT}"
|
||||
|
||||
addtask populate_lic after do_patch before do_build
|
||||
do_populate_lic[dirs] = "${LICSSTATEDIR}/${LICENSE_DEPLOY_PATHCOMPONENT}/${PN}"
|
||||
do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
|
||||
|
||||
python do_populate_lic() {
|
||||
"""
|
||||
Populate LICENSE_DIRECTORY with licenses.
|
||||
"""
|
||||
lic_files_paths = find_license_files(d)
|
||||
|
||||
# The base directory we wrangle licenses to
|
||||
destdir = os.path.join(d.getVar('LICSSTATEDIR'), d.getVar('LICENSE_DEPLOY_PATHCOMPONENT'), d.getVar('PN'))
|
||||
copy_license_files(lic_files_paths, destdir)
|
||||
info = get_recipe_info(d)
|
||||
with open(os.path.join(destdir, "recipeinfo"), "w") as f:
|
||||
for key in sorted(info.keys()):
|
||||
f.write("%s: %s\n" % (key, info[key]))
|
||||
oe.qa.exit_if_errors(d)
|
||||
}
|
||||
|
||||
PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '') + ' ' + d.getVar('COREBASE') + '/meta/COPYING').split())}"
|
||||
# it would be better to copy them in do_install:append, but find_license_filesa is python
|
||||
python perform_packagecopy:prepend () {
|
||||
enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
|
||||
if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
|
||||
lic_files_paths = find_license_files(d)
|
||||
|
||||
# LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
|
||||
destdir = d.getVar('D') + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY'), d.getVar('PN'))
|
||||
copy_license_files(lic_files_paths, destdir)
|
||||
add_package_and_files(d)
|
||||
}
|
||||
perform_packagecopy[vardeps] += "LICENSE_CREATE_PACKAGE"
|
||||
|
||||
def get_recipe_info(d):
|
||||
info = {}
|
||||
info["PV"] = d.getVar("PV")
|
||||
info["PR"] = d.getVar("PR")
|
||||
info["LICENSE"] = d.getVar("LICENSE")
|
||||
return info
|
||||
|
||||
def add_package_and_files(d):
|
||||
packages = d.getVar('PACKAGES')
|
||||
files = d.getVar('LICENSE_FILES_DIRECTORY')
|
||||
pn = d.getVar('PN')
|
||||
pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX', False))
|
||||
if pn_lic in packages.split():
|
||||
bb.warn("%s package already existed in %s." % (pn_lic, pn))
|
||||
else:
|
||||
# first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
|
||||
d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
|
||||
d.setVar('FILES:' + pn_lic, files)
|
||||
|
||||
def copy_license_files(lic_files_paths, destdir):
|
||||
import shutil
|
||||
import errno
|
||||
|
||||
bb.utils.mkdirhier(destdir)
|
||||
for (basename, path, beginline, endline) in lic_files_paths:
|
||||
try:
|
||||
src = path
|
||||
dst = os.path.join(destdir, basename)
|
||||
if os.path.exists(dst):
|
||||
os.remove(dst)
|
||||
if os.path.islink(src):
|
||||
src = os.path.realpath(src)
|
||||
canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev) and beginline is None and endline is None
|
||||
if canlink:
|
||||
try:
|
||||
os.link(src, dst)
|
||||
except OSError as err:
|
||||
if err.errno == errno.EXDEV:
|
||||
# Copy license files if hardlink is not possible even if st_dev is the
|
||||
# same on source and destination (docker container with device-mapper?)
|
||||
canlink = False
|
||||
else:
|
||||
raise
|
||||
# Only chown if we did hardlink and we're running under pseudo
|
||||
if canlink and os.environ.get('PSEUDO_DISABLED') == '0':
|
||||
os.chown(dst,0,0)
|
||||
if not canlink:
|
||||
begin_idx = max(0, int(beginline) - 1) if beginline is not None else None
|
||||
end_idx = max(0, int(endline)) if endline is not None else None
|
||||
if begin_idx is None and end_idx is None:
|
||||
shutil.copyfile(src, dst)
|
||||
else:
|
||||
with open(src, 'rb') as src_f:
|
||||
with open(dst, 'wb') as dst_f:
|
||||
dst_f.write(b''.join(src_f.readlines()[begin_idx:end_idx]))
|
||||
|
||||
except Exception as e:
|
||||
bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
|
||||
|
||||
def find_license_files(d):
|
||||
"""
|
||||
Creates list of files used in LIC_FILES_CHKSUM and generic LICENSE files.
|
||||
"""
|
||||
import shutil
|
||||
import oe.license
|
||||
from collections import defaultdict, OrderedDict
|
||||
|
||||
# All the license files for the package
|
||||
lic_files = d.getVar('LIC_FILES_CHKSUM') or ""
|
||||
pn = d.getVar('PN')
|
||||
# The license files are located in S/LIC_FILE_CHECKSUM.
|
||||
srcdir = d.getVar('S')
|
||||
# Directory we store the generic licenses as set in the distro configuration
|
||||
generic_directory = d.getVar('COMMON_LICENSE_DIR')
|
||||
# List of basename, path tuples
|
||||
lic_files_paths = []
|
||||
# hash for keep track generic lics mappings
|
||||
non_generic_lics = {}
|
||||
# Entries from LIC_FILES_CHKSUM
|
||||
lic_chksums = {}
|
||||
license_source_dirs = []
|
||||
license_source_dirs.append(generic_directory)
|
||||
try:
|
||||
additional_lic_dirs = d.getVar('LICENSE_PATH').split()
|
||||
for lic_dir in additional_lic_dirs:
|
||||
license_source_dirs.append(lic_dir)
|
||||
except:
|
||||
pass
|
||||
|
||||
class FindVisitor(oe.license.LicenseVisitor):
|
||||
def visit_Str(self, node):
|
||||
#
|
||||
# Until I figure out what to do with
|
||||
# the two modifiers I support (or greater = +
|
||||
# and "with exceptions" being *
|
||||
# we'll just strip out the modifier and put
|
||||
# the base license.
|
||||
find_license(node.s.replace("+", "").replace("*", ""))
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_Constant(self, node):
|
||||
find_license(node.value.replace("+", "").replace("*", ""))
|
||||
self.generic_visit(node)
|
||||
|
||||
def find_license(license_type):
|
||||
try:
|
||||
bb.utils.mkdirhier(gen_lic_dest)
|
||||
except:
|
||||
pass
|
||||
spdx_generic = None
|
||||
license_source = None
|
||||
# If the generic does not exist we need to check to see if there is an SPDX mapping to it,
|
||||
# unless NO_GENERIC_LICENSE is set.
|
||||
for lic_dir in license_source_dirs:
|
||||
if not os.path.isfile(os.path.join(lic_dir, license_type)):
|
||||
if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
|
||||
# Great, there is an SPDXLICENSEMAP. We can copy!
|
||||
bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
|
||||
spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
|
||||
license_source = lic_dir
|
||||
break
|
||||
elif os.path.isfile(os.path.join(lic_dir, license_type)):
|
||||
spdx_generic = license_type
|
||||
license_source = lic_dir
|
||||
break
|
||||
|
||||
non_generic_lic = d.getVarFlag('NO_GENERIC_LICENSE', license_type)
|
||||
if spdx_generic and license_source:
|
||||
# we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
|
||||
# audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
|
||||
|
||||
lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic),
|
||||
None, None))
|
||||
|
||||
# The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
|
||||
# and should not be allowed, warn the user in this case.
|
||||
if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
|
||||
oe.qa.handle_error("license-no-generic",
|
||||
"%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type), d)
|
||||
|
||||
elif non_generic_lic and non_generic_lic in lic_chksums:
|
||||
# if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
|
||||
# of the package rather than the license_source_dirs.
|
||||
lic_files_paths.append(("generic_" + license_type,
|
||||
os.path.join(srcdir, non_generic_lic), None, None))
|
||||
non_generic_lics[non_generic_lic] = license_type
|
||||
else:
|
||||
# Explicitly avoid the CLOSED license because this isn't generic
|
||||
if license_type != 'CLOSED':
|
||||
# And here is where we warn people that their licenses are lousy
|
||||
oe.qa.handle_error("license-exists",
|
||||
"%s: No generic license file exists for: %s in any provider" % (pn, license_type), d)
|
||||
pass
|
||||
|
||||
if not generic_directory:
|
||||
bb.fatal("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
|
||||
|
||||
for url in lic_files.split():
|
||||
try:
|
||||
(method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
|
||||
if method != "file" or not path:
|
||||
raise bb.fetch.MalformedUrl()
|
||||
except bb.fetch.MalformedUrl:
|
||||
bb.fatal("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF'), url))
|
||||
# We want the license filename and path
|
||||
chksum = parm.get('md5', None)
|
||||
beginline = parm.get('beginline')
|
||||
endline = parm.get('endline')
|
||||
lic_chksums[path] = (chksum, beginline, endline)
|
||||
|
||||
v = FindVisitor()
|
||||
try:
|
||||
v.visit_string(d.getVar('LICENSE'))
|
||||
except oe.license.InvalidLicense as exc:
|
||||
bb.fatal('%s: %s' % (d.getVar('PF'), exc))
|
||||
except SyntaxError:
|
||||
oe.qa.handle_error("license-syntax",
|
||||
"%s: Failed to parse LICENSE: %s" % (d.getVar('PF'), d.getVar('LICENSE')), d)
|
||||
# Add files from LIC_FILES_CHKSUM to list of license files
|
||||
lic_chksum_paths = defaultdict(OrderedDict)
|
||||
for path, data in sorted(lic_chksums.items()):
|
||||
lic_chksum_paths[os.path.basename(path)][data] = (os.path.join(srcdir, path), data[1], data[2])
|
||||
for basename, files in lic_chksum_paths.items():
|
||||
if len(files) == 1:
|
||||
# Don't copy again a LICENSE already handled as non-generic
|
||||
if basename in non_generic_lics:
|
||||
continue
|
||||
data = list(files.values())[0]
|
||||
lic_files_paths.append(tuple([basename] + list(data)))
|
||||
else:
|
||||
# If there are multiple different license files with identical
|
||||
# basenames we rename them to <file>.0, <file>.1, ...
|
||||
for i, data in enumerate(files.values()):
|
||||
lic_files_paths.append(tuple(["%s.%d" % (basename, i)] + list(data)))
|
||||
|
||||
return lic_files_paths
|
||||
|
||||
def return_spdx(d, license):
|
||||
"""
|
||||
This function returns the spdx mapping of a license if it exists.
|
||||
"""
|
||||
return d.getVarFlag('SPDXLICENSEMAP', license)
|
||||
|
||||
def canonical_license(d, license):
|
||||
"""
|
||||
Return the canonical (SPDX) form of the license if available (so GPLv3
|
||||
becomes GPL-3.0-only) or the passed license if there is no canonical form.
|
||||
"""
|
||||
return d.getVarFlag('SPDXLICENSEMAP', license) or license
|
||||
|
||||
def expand_wildcard_licenses(d, wildcard_licenses):
|
||||
"""
|
||||
There are some common wildcard values users may want to use. Support them
|
||||
here.
|
||||
"""
|
||||
licenses = set(wildcard_licenses)
|
||||
mapping = {
|
||||
"AGPL-3.0*" : ["AGPL-3.0-only", "AGPL-3.0-or-later"],
|
||||
"GPL-3.0*" : ["GPL-3.0-only", "GPL-3.0-or-later"],
|
||||
"LGPL-3.0*" : ["LGPL-3.0-only", "LGPL-3.0-or-later"],
|
||||
}
|
||||
for k in mapping:
|
||||
if k in wildcard_licenses:
|
||||
licenses.remove(k)
|
||||
for item in mapping[k]:
|
||||
licenses.add(item)
|
||||
|
||||
for l in licenses:
|
||||
if l in oe.license.obsolete_license_list():
|
||||
bb.fatal("Error, %s is an obsolete license, please use an SPDX reference in INCOMPATIBLE_LICENSE" % l)
|
||||
if "*" in l:
|
||||
bb.fatal("Error, %s is an invalid license wildcard entry" % l)
|
||||
|
||||
return list(licenses)
|
||||
|
||||
def incompatible_license_contains(license, truevalue, falsevalue, d):
|
||||
license = canonical_license(d, license)
|
||||
bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
|
||||
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
|
||||
return truevalue if license in bad_licenses else falsevalue
|
||||
|
||||
def incompatible_pkg_license(d, dont_want_licenses, license):
|
||||
# Handles an "or" or two license sets provided by
|
||||
# flattened_licenses(), pick one that works if possible.
|
||||
def choose_lic_set(a, b):
|
||||
return a if all(oe.license.license_ok(canonical_license(d, lic),
|
||||
dont_want_licenses) for lic in a) else b
|
||||
|
||||
try:
|
||||
licenses = oe.license.flattened_licenses(license, choose_lic_set)
|
||||
except oe.license.LicenseError as exc:
|
||||
bb.fatal('%s: %s' % (d.getVar('P'), exc))
|
||||
|
||||
incompatible_lic = []
|
||||
for l in licenses:
|
||||
license = canonical_license(d, l)
|
||||
if not oe.license.license_ok(license, dont_want_licenses):
|
||||
incompatible_lic.append(license)
|
||||
|
||||
return sorted(incompatible_lic)
|
||||
|
||||
def incompatible_license(d, dont_want_licenses, package=None):
|
||||
"""
|
||||
This function checks if a recipe has only incompatible licenses. It also
|
||||
take into consideration 'or' operand. dont_want_licenses should be passed
|
||||
as canonical (SPDX) names.
|
||||
"""
|
||||
import oe.license
|
||||
license = d.getVar("LICENSE:%s" % package) if package else None
|
||||
if not license:
|
||||
license = d.getVar('LICENSE')
|
||||
|
||||
return incompatible_pkg_license(d, dont_want_licenses, license)
|
||||
|
||||
def check_license_flags(d):
|
||||
"""
|
||||
This function checks if a recipe has any LICENSE_FLAGS that
|
||||
aren't acceptable.
|
||||
|
||||
If it does, it returns the all LICENSE_FLAGS missing from the list
|
||||
of acceptable license flags, or all of the LICENSE_FLAGS if there
|
||||
is no list of acceptable flags.
|
||||
|
||||
If everything is is acceptable, it returns None.
|
||||
"""
|
||||
|
||||
def license_flag_matches(flag, acceptlist, pn):
|
||||
"""
|
||||
Return True if flag matches something in acceptlist, None if not.
|
||||
|
||||
Before we test a flag against the acceptlist, we append _${PN}
|
||||
to it. We then try to match that string against the
|
||||
acceptlist. This covers the normal case, where we expect
|
||||
LICENSE_FLAGS to be a simple string like 'commercial', which
|
||||
the user typically matches exactly in the acceptlist by
|
||||
explicitly appending the package name e.g 'commercial_foo'.
|
||||
If we fail the match however, we then split the flag across
|
||||
'_' and append each fragment and test until we either match or
|
||||
run out of fragments.
|
||||
"""
|
||||
flag_pn = ("%s_%s" % (flag, pn))
|
||||
for candidate in acceptlist:
|
||||
if flag_pn == candidate:
|
||||
return True
|
||||
|
||||
flag_cur = ""
|
||||
flagments = flag_pn.split("_")
|
||||
flagments.pop() # we've already tested the full string
|
||||
for flagment in flagments:
|
||||
if flag_cur:
|
||||
flag_cur += "_"
|
||||
flag_cur += flagment
|
||||
for candidate in acceptlist:
|
||||
if flag_cur == candidate:
|
||||
return True
|
||||
return False
|
||||
|
||||
def all_license_flags_match(license_flags, acceptlist):
|
||||
""" Return all unmatched flags, None if all flags match """
|
||||
pn = d.getVar('PN')
|
||||
split_acceptlist = acceptlist.split()
|
||||
flags = []
|
||||
for flag in license_flags.split():
|
||||
if not license_flag_matches(flag, split_acceptlist, pn):
|
||||
flags.append(flag)
|
||||
return flags if flags else None
|
||||
|
||||
license_flags = d.getVar('LICENSE_FLAGS')
|
||||
if license_flags:
|
||||
acceptlist = d.getVar('LICENSE_FLAGS_ACCEPTED')
|
||||
if not acceptlist:
|
||||
return license_flags.split()
|
||||
unmatched_flags = all_license_flags_match(license_flags, acceptlist)
|
||||
if unmatched_flags:
|
||||
return unmatched_flags
|
||||
return None
|
||||
|
||||
def check_license_format(d):
|
||||
"""
|
||||
This function checks if LICENSE is well defined,
|
||||
Validate operators in LICENSES.
|
||||
No spaces are allowed between LICENSES.
|
||||
"""
|
||||
pn = d.getVar('PN')
|
||||
licenses = d.getVar('LICENSE')
|
||||
from oe.license import license_operator, license_operator_chars, license_pattern
|
||||
|
||||
elements = list(filter(lambda x: x.strip(), license_operator.split(licenses)))
|
||||
for pos, element in enumerate(elements):
|
||||
if license_pattern.match(element):
|
||||
if pos > 0 and license_pattern.match(elements[pos - 1]):
|
||||
oe.qa.handle_error('license-format',
|
||||
'%s: LICENSE value "%s" has an invalid format - license names ' \
|
||||
'must be separated by the following characters to indicate ' \
|
||||
'the license selection: %s' %
|
||||
(pn, licenses, license_operator_chars), d)
|
||||
elif not license_operator.match(element):
|
||||
oe.qa.handle_error('license-format',
|
||||
'%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
|
||||
'in the valid list of separators (%s)' %
|
||||
(pn, licenses, element, license_operator_chars), d)
|
||||
|
||||
SSTATETASKS += "do_populate_lic"
|
||||
do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
|
||||
do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
|
||||
|
||||
IMAGE_CLASSES:append = " license_image"
|
||||
|
||||
python do_populate_lic_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_populate_lic_setscene
|
||||
107
sources/poky/meta/classes-global/logging.bbclass
Normal file
107
sources/poky/meta/classes-global/logging.bbclass
Normal file
@@ -0,0 +1,107 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# The following logging mechanisms are to be used in bash functions of recipes.
|
||||
# They are intended to map one to one in intention and output format with the
|
||||
# python recipe logging functions of a similar naming convention: bb.plain(),
|
||||
# bb.note(), etc.
|
||||
|
||||
LOGFIFO = "${T}/fifo.${@os.getpid()}"
|
||||
|
||||
# Print the output exactly as it is passed in. Typically used for output of
|
||||
# tasks that should be seen on the console. Use sparingly.
|
||||
# Output: logs console
|
||||
bbplain() {
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bbplain $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "$*"
|
||||
fi
|
||||
}
|
||||
|
||||
# Notify the user of a noteworthy condition.
|
||||
# Output: logs
|
||||
bbnote() {
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bbnote $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "NOTE: $*"
|
||||
fi
|
||||
}
|
||||
|
||||
# Print a warning to the log. Warnings are non-fatal, and do not
|
||||
# indicate a build failure.
|
||||
# Output: logs console
|
||||
bbwarn() {
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bbwarn $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "WARNING: $*"
|
||||
fi
|
||||
}
|
||||
|
||||
# Print an error to the log. Errors are non-fatal in that the build can
|
||||
# continue, but they do indicate a build failure.
|
||||
# Output: logs console
|
||||
bberror() {
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bberror $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "ERROR: $*"
|
||||
fi
|
||||
}
|
||||
|
||||
# Print a fatal error to the log. Fatal errors indicate build failure
|
||||
# and halt the build, exiting with an error code.
|
||||
# Output: logs console
|
||||
bbfatal() {
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bbfatal $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "ERROR: $*"
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Like bbfatal, except prevents the suppression of the error log by
|
||||
# bitbake's UI.
|
||||
# Output: logs console
|
||||
bbfatal_log() {
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bbfatal_log $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "ERROR: $*"
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Print debug messages. These are appropriate for progress checkpoint
|
||||
# messages to the logs. Depending on the debug log level, they may also
|
||||
# go to the console.
|
||||
# Output: logs console
|
||||
# Usage: bbdebug 1 "first level debug message"
|
||||
# bbdebug 2 "second level debug message"
|
||||
bbdebug() {
|
||||
USAGE='Usage: bbdebug [123] "message"'
|
||||
if [ $# -lt 2 ]; then
|
||||
bbfatal "$USAGE"
|
||||
fi
|
||||
|
||||
# Strip off the debug level and ensure it is an integer
|
||||
DBGLVL=$1; shift
|
||||
NONDIGITS=$(echo "$DBGLVL" | tr -d "[:digit:]")
|
||||
if [ "$NONDIGITS" ]; then
|
||||
bbfatal "$USAGE"
|
||||
fi
|
||||
|
||||
# All debug output is printed to the logs
|
||||
if [ -p ${LOGFIFO} ] ; then
|
||||
printf "%b\0" "bbdebug $DBGLVL $*" > ${LOGFIFO}
|
||||
else
|
||||
echo "DEBUG: $*"
|
||||
fi
|
||||
}
|
||||
|
||||
104
sources/poky/meta/classes-global/mirrors.bbclass
Normal file
104
sources/poky/meta/classes-global/mirrors.bbclass
Normal file
@@ -0,0 +1,104 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
MIRRORS += "\
|
||||
${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \
|
||||
${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \
|
||||
${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \
|
||||
${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \
|
||||
${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \
|
||||
${GNU_MIRROR} https://mirrors.kernel.org/gnu \
|
||||
${KERNELORG_MIRROR} http://www.kernel.org/pub \
|
||||
${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \
|
||||
${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \
|
||||
${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \
|
||||
ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \
|
||||
ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \
|
||||
ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \
|
||||
ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \
|
||||
http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \
|
||||
http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \
|
||||
${APACHE_MIRROR} http://www.us.apache.org/dist \
|
||||
${APACHE_MIRROR} http://archive.apache.org/dist \
|
||||
http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \
|
||||
${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \
|
||||
${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \
|
||||
ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \
|
||||
ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \
|
||||
ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \
|
||||
cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
gitsm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
https?://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \
|
||||
cvs://.*/.* http://sources.openembedded.org/ \
|
||||
svn://.*/.* http://sources.openembedded.org/ \
|
||||
git://.*/.* http://sources.openembedded.org/ \
|
||||
gitsm://.*/.* http://sources.openembedded.org/ \
|
||||
hg://.*/.* http://sources.openembedded.org/ \
|
||||
bzr://.*/.* http://sources.openembedded.org/ \
|
||||
p4://.*/.* http://sources.openembedded.org/ \
|
||||
osc://.*/.* http://sources.openembedded.org/ \
|
||||
https?://.*/.* http://sources.openembedded.org/ \
|
||||
ftp://.*/.* http://sources.openembedded.org/ \
|
||||
npm://.*/?.* http://sources.openembedded.org/ \
|
||||
${CPAN_MIRROR} https://cpan.metacpan.org/ \
|
||||
https?://downloads.yoctoproject.org/releases/uninative/ https://mirrors.kernel.org/yocto/uninative/ \
|
||||
https?://downloads.yoctoproject.org/mirror/sources/ https://mirrors.kernel.org/yocto-sources/ \
|
||||
"
|
||||
|
||||
# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
|
||||
# where git native protocol fetches may fail due to local firewall rules, etc.
|
||||
|
||||
MIRRORS += "\
|
||||
git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \
|
||||
git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \
|
||||
git://git.infradead.org/.* git://git.infraroot.at/PATH;protocol=https \
|
||||
git://.*/.* git://HOST/PATH;protocol=https \
|
||||
git://.*/.* git://HOST/git/PATH;protocol=https \
|
||||
"
|
||||
|
||||
# Switch llvm, glibc and binutils recipes to use shallow clones as they're large and this
|
||||
# improves user experience whilst allowing the flexibility of git urls in the recipes
|
||||
BB_GIT_SHALLOW:pn-binutils = "1"
|
||||
BB_GIT_SHALLOW:pn-binutils-cross-${TARGET_ARCH} = "1"
|
||||
BB_GIT_SHALLOW:pn-binutils-cross-canadian-${TRANSLATED_TARGET_ARCH} = "1"
|
||||
BB_GIT_SHALLOW:pn-binutils-cross-testsuite = "1"
|
||||
BB_GIT_SHALLOW:pn-binutils-crosssdk-${SDK_SYS} = "1"
|
||||
BB_GIT_SHALLOW:pn-binutils-native = "1"
|
||||
BB_GIT_SHALLOW:pn-nativesdk-binutils = "1"
|
||||
|
||||
BB_GIT_SHALLOW:pn-cross-localedef-native = "1"
|
||||
BB_GIT_SHALLOW:pn-glibc = "1"
|
||||
BB_GIT_SHALLOW:pn-glibc-tests = "1"
|
||||
PREMIRRORS += "git://sourceware.org/git/glibc.git https://downloads.yoctoproject.org/mirror/sources/ \
|
||||
git://sourceware.org/git/binutils-gdb.git https://downloads.yoctoproject.org/mirror/sources/"
|
||||
|
||||
BB_GIT_SHALLOW:pn-llvm = "1"
|
||||
BB_GIT_SHALLOW:pn-llvm-native = "1"
|
||||
BB_GIT_SHALLOW:pn-nativesdk-llvm = "1"
|
||||
616
sources/poky/meta/classes-global/package.bbclass
Normal file
616
sources/poky/meta/classes-global/package.bbclass
Normal file
@@ -0,0 +1,616 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
#
|
||||
# Packaging process
|
||||
#
|
||||
# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS
|
||||
# Taking D and splitting it up into the packages listed in PACKAGES, placing the
|
||||
# resulting output in PKGDEST.
|
||||
#
|
||||
# There are the following default steps but PACKAGEFUNCS can be extended:
|
||||
#
|
||||
# a) package_convert_pr_autoinc - convert AUTOINC in PKGV to ${PRSERV_PV_AUTOINC}
|
||||
#
|
||||
# b) perform_packagecopy - Copy D into PKGD
|
||||
#
|
||||
# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES
|
||||
#
|
||||
# d) split_and_strip_files - split the files into runtime and debug and strip them.
|
||||
# Debug files include debug info split, and associated sources that end up in -dbg packages
|
||||
#
|
||||
# e) fixup_perms - Fix up permissions in the package before we split it.
|
||||
#
|
||||
# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/<pkgname>
|
||||
# Also triggers the binary stripping code to put files in -dbg packages.
|
||||
#
|
||||
# g) package_do_filedeps - Collect perfile run-time dependency metadata
|
||||
# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with
|
||||
# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
|
||||
#
|
||||
# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
|
||||
# dependencies found. Also stores the package name so anyone else using this library
|
||||
# knows which package to depend on.
|
||||
#
|
||||
# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
|
||||
#
|
||||
# j) read_shlibdeps - Reads the stored shlibs information into the metadata
|
||||
#
|
||||
# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages
|
||||
#
|
||||
# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later
|
||||
# packaging steps
|
||||
|
||||
inherit packagedata
|
||||
inherit chrpath
|
||||
inherit package_pkgdata
|
||||
inherit insane
|
||||
|
||||
PKGD = "${WORKDIR}/package"
|
||||
PKGDEST = "${WORKDIR}/packages-split"
|
||||
|
||||
LOCALE_SECTION ?= ''
|
||||
|
||||
ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
|
||||
|
||||
# rpm is used for the per-file dependency identification
|
||||
# dwarfsrcfiles is used to determine the list of debug source files
|
||||
PACKAGE_DEPENDS += "rpm-native dwarfsrcfiles-native"
|
||||
|
||||
# If your postinstall can execute at rootfs creation time rather than on
|
||||
# target but depends on a native/cross tool in order to execute, you need to
|
||||
# list that tool in PACKAGE_WRITE_DEPS. Target package dependencies belong
|
||||
# in the package dependencies as normal, this is just for native/cross support
|
||||
# tools at rootfs build time.
|
||||
PACKAGE_WRITE_DEPS ??= ""
|
||||
|
||||
def legitimize_package_name(s):
|
||||
return oe.package.legitimize_package_name(s)
|
||||
|
||||
def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False, summary=None):
|
||||
"""
|
||||
Used in .bb files to split up dynamically generated subpackages of a
|
||||
given package, usually plugins or modules.
|
||||
|
||||
Arguments:
|
||||
root -- the path in which to search
|
||||
file_regex -- regular expression to match searched files. Use
|
||||
parentheses () to mark the part of this expression
|
||||
that should be used to derive the module name (to be
|
||||
substituted where %s is used in other function
|
||||
arguments as noted below)
|
||||
output_pattern -- pattern to use for the package names. Must include %s.
|
||||
description -- description to set for each package. Must include %s.
|
||||
postinst -- postinstall script to use for all packages (as a
|
||||
string)
|
||||
recursive -- True to perform a recursive search - default False
|
||||
hook -- a hook function to be called for every match. The
|
||||
function will be called with the following arguments
|
||||
(in the order listed):
|
||||
f: full path to the file/directory match
|
||||
pkg: the package name
|
||||
file_regex: as above
|
||||
output_pattern: as above
|
||||
modulename: the module name derived using file_regex
|
||||
extra_depends -- extra runtime dependencies (RDEPENDS) to be set for
|
||||
all packages. The default value of None causes a
|
||||
dependency on the main package (${PN}) - if you do
|
||||
not want this, pass '' for this parameter.
|
||||
aux_files_pattern -- extra item(s) to be added to FILES for each
|
||||
package. Can be a single string item or a list of
|
||||
strings for multiple items. Must include %s.
|
||||
postrm -- postrm script to use for all packages (as a string)
|
||||
allow_dirs -- True allow directories to be matched - default False
|
||||
prepend -- if True, prepend created packages to PACKAGES instead
|
||||
of the default False which appends them
|
||||
match_path -- match file_regex on the whole relative path to the
|
||||
root rather than just the file name
|
||||
aux_files_pattern_verbatim -- extra item(s) to be added to FILES for
|
||||
each package, using the actual derived module name
|
||||
rather than converting it to something legal for a
|
||||
package name. Can be a single string item or a list
|
||||
of strings for multiple items. Must include %s.
|
||||
allow_links -- True to allow symlinks to be matched - default False
|
||||
summary -- Summary to set for each package. Must include %s;
|
||||
defaults to description if not set.
|
||||
|
||||
"""
|
||||
|
||||
dvar = d.getVar('PKGD')
|
||||
root = d.expand(root)
|
||||
output_pattern = d.expand(output_pattern)
|
||||
extra_depends = d.expand(extra_depends)
|
||||
|
||||
# If the root directory doesn't exist, don't error out later but silently do
|
||||
# no splitting.
|
||||
if not os.path.exists(dvar + root):
|
||||
return []
|
||||
|
||||
ml = d.getVar("MLPREFIX")
|
||||
if ml:
|
||||
if not output_pattern.startswith(ml):
|
||||
output_pattern = ml + output_pattern
|
||||
|
||||
newdeps = []
|
||||
for dep in (extra_depends or "").split():
|
||||
if dep.startswith(ml):
|
||||
newdeps.append(dep)
|
||||
else:
|
||||
newdeps.append(ml + dep)
|
||||
if newdeps:
|
||||
extra_depends = " ".join(newdeps)
|
||||
|
||||
|
||||
packages = d.getVar('PACKAGES').split()
|
||||
split_packages = set()
|
||||
|
||||
if postinst:
|
||||
postinst = '#!/bin/sh\n' + postinst + '\n'
|
||||
if postrm:
|
||||
postrm = '#!/bin/sh\n' + postrm + '\n'
|
||||
if not recursive:
|
||||
objs = os.listdir(dvar + root)
|
||||
else:
|
||||
objs = []
|
||||
for walkroot, dirs, files in os.walk(dvar + root):
|
||||
for file in files:
|
||||
relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
|
||||
if relpath:
|
||||
objs.append(relpath)
|
||||
|
||||
if extra_depends == None:
|
||||
extra_depends = d.getVar("PN")
|
||||
|
||||
if not summary:
|
||||
summary = description
|
||||
|
||||
for o in sorted(objs):
|
||||
import re, stat
|
||||
if match_path:
|
||||
m = re.match(file_regex, o)
|
||||
else:
|
||||
m = re.match(file_regex, os.path.basename(o))
|
||||
|
||||
if not m:
|
||||
continue
|
||||
f = os.path.join(dvar + root, o)
|
||||
mode = os.lstat(f).st_mode
|
||||
if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
|
||||
continue
|
||||
on = oe.package.legitimize_package_name(m.group(1))
|
||||
pkg = output_pattern % on
|
||||
split_packages.add(pkg)
|
||||
if not pkg in packages:
|
||||
if prepend:
|
||||
packages = [pkg] + packages
|
||||
else:
|
||||
packages.append(pkg)
|
||||
oldfiles = d.getVar('FILES:' + pkg)
|
||||
newfile = os.path.join(root, o)
|
||||
# These names will be passed through glob() so if the filename actually
|
||||
# contains * or ? (rare, but possible) we need to handle that specially
|
||||
newfile = newfile.replace('*', '[*]')
|
||||
newfile = newfile.replace('?', '[?]')
|
||||
if not oldfiles:
|
||||
the_files = [newfile]
|
||||
if aux_files_pattern:
|
||||
if type(aux_files_pattern) is list:
|
||||
for fp in aux_files_pattern:
|
||||
the_files.append(fp % on)
|
||||
else:
|
||||
the_files.append(aux_files_pattern % on)
|
||||
if aux_files_pattern_verbatim:
|
||||
if type(aux_files_pattern_verbatim) is list:
|
||||
for fp in aux_files_pattern_verbatim:
|
||||
the_files.append(fp % m.group(1))
|
||||
else:
|
||||
the_files.append(aux_files_pattern_verbatim % m.group(1))
|
||||
d.setVar('FILES:' + pkg, " ".join(the_files))
|
||||
else:
|
||||
d.setVar('FILES:' + pkg, oldfiles + " " + newfile)
|
||||
if extra_depends != '':
|
||||
d.appendVar('RDEPENDS:' + pkg, ' ' + extra_depends)
|
||||
if not d.getVar('DESCRIPTION:' + pkg):
|
||||
d.setVar('DESCRIPTION:' + pkg, description % on)
|
||||
if not d.getVar('SUMMARY:' + pkg):
|
||||
d.setVar('SUMMARY:' + pkg, summary % on)
|
||||
if postinst:
|
||||
d.setVar('pkg_postinst:' + pkg, postinst)
|
||||
if postrm:
|
||||
d.setVar('pkg_postrm:' + pkg, postrm)
|
||||
if callable(hook):
|
||||
hook(f, pkg, file_regex, output_pattern, m.group(1))
|
||||
|
||||
d.setVar('PACKAGES', ' '.join(packages))
|
||||
return list(split_packages)
|
||||
|
||||
PACKAGE_DEPENDS += "file-native"
|
||||
|
||||
python () {
|
||||
if d.getVar('PACKAGES') != '':
|
||||
deps = ""
|
||||
for dep in (d.getVar('PACKAGE_DEPENDS') or "").split():
|
||||
deps += " %s:do_populate_sysroot" % dep
|
||||
if bb.utils.contains('DISTRO_FEATURES', 'minidebuginfo', True, False, d):
|
||||
deps += ' xz-native:do_populate_sysroot'
|
||||
d.appendVarFlag('do_package', 'depends', deps)
|
||||
|
||||
# shlibs requires any DEPENDS to have already packaged for the *.list files
|
||||
d.appendVarFlag('do_package', 'deptask', " do_packagedata")
|
||||
}
|
||||
|
||||
|
||||
PRSERV_ACTIVE = "${@bool(d.getVar("PRSERV_HOST"))}"
|
||||
PRSERV_ACTIVE[vardepvalue] = "${PRSERV_ACTIVE}"
|
||||
package_get_auto_pr[vardepsexclude] = "BB_TASKDEPDATA"
|
||||
package_get_auto_pr[vardeps] += "PRSERV_ACTIVE"
|
||||
python package_get_auto_pr() {
|
||||
import oe.prservice
|
||||
|
||||
def get_do_package_hash(pn):
|
||||
if d.getVar("BB_RUNTASK") != "do_package":
|
||||
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
|
||||
for dep in taskdepdata:
|
||||
if taskdepdata[dep][1] == "do_package" and taskdepdata[dep][0] == pn:
|
||||
return taskdepdata[dep][6]
|
||||
return None
|
||||
|
||||
# Support per recipe PRSERV_HOST
|
||||
pn = d.getVar('PN')
|
||||
host = d.getVar("PRSERV_HOST_" + pn)
|
||||
if not (host is None):
|
||||
d.setVar("PRSERV_HOST", host)
|
||||
|
||||
pkgv = d.getVar("PKGV")
|
||||
|
||||
# PR Server not active, handle AUTOINC
|
||||
if not d.getVar('PRSERV_HOST'):
|
||||
d.setVar("PRSERV_PV_AUTOINC", "0")
|
||||
return
|
||||
|
||||
auto_pr = None
|
||||
pv = d.getVar("PV")
|
||||
version = d.getVar("PRAUTOINX")
|
||||
pkgarch = d.getVar("PACKAGE_ARCH")
|
||||
checksum = get_do_package_hash(pn)
|
||||
|
||||
# If do_package isn't in the dependencies, we can't get the checksum...
|
||||
if not checksum:
|
||||
bb.warn('Task %s requested do_package unihash, but it was not available.' % d.getVar('BB_RUNTASK'))
|
||||
#taskdepdata = d.getVar("BB_TASKDEPDATA", False)
|
||||
#for dep in taskdepdata:
|
||||
# bb.warn('%s:%s = %s' % (taskdepdata[dep][0], taskdepdata[dep][1], taskdepdata[dep][6]))
|
||||
return
|
||||
|
||||
if d.getVar('PRSERV_LOCKDOWN'):
|
||||
auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
|
||||
if auto_pr is None:
|
||||
bb.fatal("Can NOT get PRAUTO from lockdown exported file")
|
||||
d.setVar('PRAUTO',str(auto_pr))
|
||||
return
|
||||
|
||||
try:
|
||||
conn = oe.prservice.prserv_make_conn(d)
|
||||
if conn is not None:
|
||||
if "AUTOINC" in pkgv:
|
||||
srcpv = bb.fetch2.get_srcrev(d)
|
||||
base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
|
||||
value = conn.getPR(base_ver, pkgarch, srcpv)
|
||||
d.setVar("PRSERV_PV_AUTOINC", str(value))
|
||||
|
||||
auto_pr = conn.getPR(version, pkgarch, checksum)
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
|
||||
if auto_pr is None:
|
||||
bb.fatal("Can NOT get PRAUTO from remote PR service")
|
||||
d.setVar('PRAUTO',str(auto_pr))
|
||||
}
|
||||
|
||||
#
|
||||
# Package functions suitable for inclusion in PACKAGEFUNCS
|
||||
#
|
||||
|
||||
python package_setup_pkgv() {
|
||||
pkgv = d.getVar("PKGV")
|
||||
# Expand SRCPV into PKGV if not present
|
||||
srcpv = bb.fetch.get_pkgv_string(d)
|
||||
if srcpv and "+" in pkgv:
|
||||
d.appendVar("PKGV", srcpv)
|
||||
pkgv = d.getVar("PKGV")
|
||||
|
||||
# Adjust pkgv as necessary...
|
||||
if 'AUTOINC' in pkgv:
|
||||
d.setVar("PKGV", pkgv.replace("AUTOINC", "${PRSERV_PV_AUTOINC}"))
|
||||
}
|
||||
|
||||
|
||||
python package_convert_pr_autoinc() {
|
||||
# Change PRSERV_PV_AUTOINC and EXTENDPRAUTO usage to special values
|
||||
d.setVar('PRSERV_PV_AUTOINC', '@PRSERV_PV_AUTOINC@')
|
||||
d.setVar('EXTENDPRAUTO', '@EXTENDPRAUTO@')
|
||||
}
|
||||
|
||||
LOCALEBASEPN ??= "${PN}"
|
||||
LOCALE_PATHS ?= "${datadir}/locale"
|
||||
|
||||
python package_do_split_locales() {
|
||||
oe.package.split_locales(d)
|
||||
}
|
||||
|
||||
python perform_packagecopy () {
|
||||
import subprocess
|
||||
import shutil
|
||||
|
||||
dest = d.getVar('D')
|
||||
dvar = d.getVar('PKGD')
|
||||
|
||||
# Start by package population by taking a copy of the installed
|
||||
# files to operate on
|
||||
# Preserve sparse files and hard links
|
||||
cmd = 'tar --exclude=./sysroot-only -cf - -C %s -p -S . | tar -xf - -C %s' % (dest, dvar)
|
||||
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
|
||||
|
||||
# replace RPATHs for the nativesdk binaries, to make them relocatable
|
||||
if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
|
||||
rpath_replace (dvar, d)
|
||||
}
|
||||
perform_packagecopy[cleandirs] = "${PKGD}"
|
||||
perform_packagecopy[dirs] = "${PKGD}"
|
||||
|
||||
python populate_packages () {
|
||||
oe.package.populate_packages(d)
|
||||
}
|
||||
populate_packages[dirs] = "${D}"
|
||||
|
||||
python package_fixsymlinks () {
|
||||
oe.package.process_fixsymlinks(pkgfiles, d)
|
||||
}
|
||||
|
||||
python package_package_name_hook() {
|
||||
"""
|
||||
A package_name_hook function can be used to rewrite the package names by
|
||||
changing PKG. For an example, see debian.bbclass.
|
||||
"""
|
||||
pass
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS package_name_hook
|
||||
|
||||
|
||||
PKGDESTWORK = "${WORKDIR}/pkgdata"
|
||||
|
||||
PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS RPROVIDES RRECOMMENDS RSUGGESTS RREPLACES RCONFLICTS SECTION PKG ALLOW_EMPTY FILES CONFFILES FILES_INFO PACKAGE_ADD_METADATA pkg_postinst pkg_postrm pkg_preinst pkg_prerm"
|
||||
|
||||
python emit_pkgdata() {
|
||||
import oe.packagedata
|
||||
oe.packagedata.emit_pkgdata(pkgfiles, d)
|
||||
}
|
||||
emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides ${PKGDESTWORK}/extended"
|
||||
|
||||
ldconfig_postinst_fragment() {
|
||||
if [ x"$D" = "x" ]; then
|
||||
if [ -x /sbin/ldconfig ]; then /sbin/ldconfig ; fi
|
||||
fi
|
||||
}
|
||||
|
||||
RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps --define '__font_provides %{nil}'"
|
||||
|
||||
python package_do_filedeps() {
|
||||
oe.package.process_filedeps(pkgfiles, d)
|
||||
}
|
||||
|
||||
SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
|
||||
SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
|
||||
|
||||
python package_do_shlibs() {
|
||||
oe.package.process_shlibs(pkgfiles, d)
|
||||
}
|
||||
|
||||
python package_do_pkgconfig () {
|
||||
oe.package.process_pkgconfig(pkgfiles, d)
|
||||
}
|
||||
|
||||
python read_shlibdeps () {
|
||||
pkglibdeps = oe.package.read_libdep_files(d)
|
||||
|
||||
packages = d.getVar('PACKAGES').split()
|
||||
for pkg in packages:
|
||||
rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
|
||||
for dep in sorted(pkglibdeps[pkg]):
|
||||
# Add the dep if it's not already there, or if no comparison is set
|
||||
if dep not in rdepends:
|
||||
rdepends[dep] = []
|
||||
for v in pkglibdeps[pkg][dep]:
|
||||
if v not in rdepends[dep]:
|
||||
rdepends[dep].append(v)
|
||||
d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
|
||||
}
|
||||
|
||||
python package_depchains() {
|
||||
oe.package.process_depchains(pkgfiles, d)
|
||||
}
|
||||
|
||||
# Since bitbake can't determine which variables are accessed during package
|
||||
# iteration, we need to list them here:
|
||||
PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm pkg_postinst_ontarget INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE SKIP_FILEDEPS PRIVATE_LIBS PACKAGE_ADD_METADATA"
|
||||
|
||||
def gen_packagevar(d, pkgvars="PACKAGEVARS"):
|
||||
ret = []
|
||||
pkgs = (d.getVar("PACKAGES") or "").split()
|
||||
vars = (d.getVar(pkgvars) or "").split()
|
||||
for v in vars:
|
||||
ret.append(v)
|
||||
for p in pkgs:
|
||||
for v in vars:
|
||||
ret.append(v + ":" + p)
|
||||
|
||||
# Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
|
||||
# affected recipes.
|
||||
ret.append('_exclude_incompatible-%s' % p)
|
||||
return " ".join(ret)
|
||||
|
||||
|
||||
# Functions for setting up PKGD
|
||||
PACKAGE_PREPROCESS_FUNCS ?= ""
|
||||
# Functions which split PKGD up into separate packages
|
||||
PACKAGESPLITFUNCS ?= " \
|
||||
package_do_split_locales \
|
||||
populate_packages"
|
||||
# Functions which process metadata based on split packages
|
||||
PACKAGEFUNCS += " \
|
||||
package_fixsymlinks \
|
||||
package_name_hook \
|
||||
package_do_filedeps \
|
||||
package_do_shlibs \
|
||||
package_do_pkgconfig \
|
||||
read_shlibdeps \
|
||||
package_depchains \
|
||||
emit_pkgdata"
|
||||
|
||||
python do_package () {
|
||||
# Change the following version to cause sstate to invalidate the package
|
||||
# cache. This is useful if an item this class depends on changes in a
|
||||
# way that the output of this class changes. rpmdeps is a good example
|
||||
# as any change to rpmdeps requires this to be rerun.
|
||||
# PACKAGE_BBCLASS_VERSION = "5"
|
||||
|
||||
# Init cachedpath
|
||||
global cpath
|
||||
cpath = oe.cachedpath.CachedPath()
|
||||
|
||||
###########################################################################
|
||||
# Sanity test the setup
|
||||
###########################################################################
|
||||
|
||||
packages = (d.getVar('PACKAGES') or "").split()
|
||||
if len(packages) < 1:
|
||||
bb.debug(1, "No packages to build, skipping do_package")
|
||||
return
|
||||
|
||||
workdir = d.getVar('WORKDIR')
|
||||
outdir = d.getVar('DEPLOY_DIR')
|
||||
dest = d.getVar('D')
|
||||
dvar = d.getVar('PKGD')
|
||||
pn = d.getVar('PN')
|
||||
|
||||
if not workdir or not outdir or not dest or not dvar or not pn:
|
||||
msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
|
||||
oe.qa.handle_error("var-undefined", msg, d)
|
||||
return
|
||||
|
||||
bb.build.exec_func("package_setup_pkgv", d)
|
||||
bb.build.exec_func("package_convert_pr_autoinc", d)
|
||||
|
||||
# Check for conflict between renamed packages and existing ones
|
||||
# for each package in PACKAGES, check if it will be renamed to an existing one
|
||||
for p in packages:
|
||||
rename = d.getVar('PKG:%s' % p)
|
||||
if rename and rename in packages:
|
||||
bb.fatal('package "%s" is renamed to "%s" using PKG:%s, but package name already exists' % (p, rename, p))
|
||||
|
||||
###########################################################################
|
||||
# Optimisations
|
||||
###########################################################################
|
||||
|
||||
# Continually expanding complex expressions is inefficient, particularly
|
||||
# when we write to the datastore and invalidate the expansion cache. This
|
||||
# code pre-expands some frequently used variables
|
||||
|
||||
def expandVar(x, d):
|
||||
d.setVar(x, d.getVar(x))
|
||||
|
||||
for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
|
||||
expandVar(x, d)
|
||||
|
||||
###########################################################################
|
||||
# Setup PKGD (from D)
|
||||
###########################################################################
|
||||
|
||||
bb.build.exec_func("package_prepare_pkgdata", d)
|
||||
bb.build.exec_func("perform_packagecopy", d)
|
||||
for f in (d.getVar('PACKAGE_PREPROCESS_FUNCS') or '').split():
|
||||
bb.build.exec_func(f, d)
|
||||
oe.package.process_split_and_strip_files(d)
|
||||
oe.package.fixup_perms(d)
|
||||
|
||||
###########################################################################
|
||||
# Split up PKGD into PKGDEST
|
||||
###########################################################################
|
||||
|
||||
cpath = oe.cachedpath.CachedPath()
|
||||
|
||||
for f in (d.getVar('PACKAGESPLITFUNCS') or '').split():
|
||||
bb.build.exec_func(f, d)
|
||||
|
||||
###########################################################################
|
||||
# Process PKGDEST
|
||||
###########################################################################
|
||||
|
||||
# Build global list of files in each split package
|
||||
global pkgfiles
|
||||
pkgfiles = {}
|
||||
packages = d.getVar('PACKAGES').split()
|
||||
pkgdest = d.getVar('PKGDEST')
|
||||
for pkg in packages:
|
||||
pkgfiles[pkg] = []
|
||||
for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
|
||||
for file in files:
|
||||
pkgfiles[pkg].append(walkroot + os.sep + file)
|
||||
|
||||
for f in (d.getVar('PACKAGEFUNCS') or '').split():
|
||||
bb.build.exec_func(f, d)
|
||||
|
||||
oe.qa.exit_if_errors(d)
|
||||
}
|
||||
|
||||
do_package[dirs] = "${SHLIBSWORKDIR} ${D}"
|
||||
do_package[vardeps] += "${PACKAGE_PREPROCESS_FUNCS} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
|
||||
addtask package after do_install
|
||||
|
||||
SSTATETASKS += "do_package"
|
||||
do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
|
||||
do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
|
||||
do_package_setscene[dirs] = "${STAGING_DIR}"
|
||||
|
||||
python do_package_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_package_setscene
|
||||
|
||||
# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
|
||||
# do_package_setscene and do_packagedata_setscene leading to races
|
||||
python do_packagedata () {
|
||||
bb.build.exec_func("package_setup_pkgv", d)
|
||||
bb.build.exec_func("package_get_auto_pr", d)
|
||||
|
||||
src = d.expand("${PKGDESTWORK}")
|
||||
dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
|
||||
oe.path.copyhardlinktree(src, dest)
|
||||
|
||||
bb.build.exec_func("packagedata_translate_pr_autoinc", d)
|
||||
}
|
||||
do_packagedata[cleandirs] += "${WORKDIR}/pkgdata-pdata-input"
|
||||
|
||||
# Translate the EXTENDPRAUTO and AUTOINC to the final values
|
||||
packagedata_translate_pr_autoinc() {
|
||||
find ${WORKDIR}/pkgdata-pdata-input -type f | xargs --no-run-if-empty \
|
||||
sed -e 's,@PRSERV_PV_AUTOINC@,${PRSERV_PV_AUTOINC},g' \
|
||||
-e 's,@EXTENDPRAUTO@,${EXTENDPRAUTO},g' -i
|
||||
}
|
||||
|
||||
addtask packagedata before do_build after do_package
|
||||
|
||||
SSTATETASKS += "do_packagedata"
|
||||
do_packagedata[sstate-inputdirs] = "${WORKDIR}/pkgdata-pdata-input"
|
||||
do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
|
||||
do_packagedata[stamp-extra-info] = "${MACHINE_ARCH}"
|
||||
|
||||
python do_packagedata_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_packagedata_setscene
|
||||
|
||||
333
sources/poky/meta/classes-global/package_deb.bbclass
Normal file
333
sources/poky/meta/classes-global/package_deb.bbclass
Normal file
@@ -0,0 +1,333 @@
|
||||
#
|
||||
# Copyright 2006-2008 OpenedHand Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit package
|
||||
|
||||
IMAGE_PKGTYPE ?= "deb"
|
||||
|
||||
DPKG_BUILDCMD ??= "dpkg-deb"
|
||||
|
||||
DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'))}"
|
||||
DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
|
||||
|
||||
PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
|
||||
|
||||
APTCONF_TARGET = "${WORKDIR}"
|
||||
|
||||
APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
|
||||
|
||||
def debian_arch_map(arch, tune):
|
||||
tune_features = tune.split()
|
||||
if arch == "allarch":
|
||||
return "all"
|
||||
if arch in ["i586", "i686"]:
|
||||
return "i386"
|
||||
if arch == "x86_64":
|
||||
if "mx32" in tune_features:
|
||||
return "x32"
|
||||
return "amd64"
|
||||
if arch.startswith("mips"):
|
||||
endian = ["el", ""]["bigendian" in tune_features]
|
||||
if "n64" in tune_features:
|
||||
return "mips64" + endian
|
||||
if "n32" in tune_features:
|
||||
return "mipsn32" + endian
|
||||
return "mips" + endian
|
||||
if arch == "powerpc":
|
||||
return arch + ["", "spe"]["spe" in tune_features]
|
||||
if arch == "aarch64":
|
||||
return "arm64"
|
||||
if arch == "arm":
|
||||
return arch + ["el", "hf"]["callconvention-hard" in tune_features]
|
||||
return arch
|
||||
|
||||
python do_package_deb () {
|
||||
packages = d.getVar('PACKAGES')
|
||||
if not packages:
|
||||
bb.debug(1, "PACKAGES not defined, nothing to package")
|
||||
return
|
||||
|
||||
tmpdir = d.getVar('TMPDIR')
|
||||
if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
|
||||
os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
|
||||
|
||||
oe.utils.multiprocess_launch(deb_write_pkg, packages.split(), d, extraargs=(d,))
|
||||
}
|
||||
do_package_deb[vardeps] += "deb_write_pkg"
|
||||
do_package_deb[vardepsexclude] = "BB_NUMBER_THREADS"
|
||||
|
||||
def deb_write_pkg(pkg, d):
|
||||
import re, copy
|
||||
import textwrap
|
||||
import subprocess
|
||||
import collections
|
||||
import codecs
|
||||
|
||||
outdir = d.getVar('PKGWRITEDIRDEB')
|
||||
pkgdest = d.getVar('PKGDEST')
|
||||
|
||||
def cleanupcontrol(root):
|
||||
for p in ['CONTROL', 'DEBIAN']:
|
||||
p = os.path.join(root, p)
|
||||
if os.path.exists(p):
|
||||
bb.utils.prunedir(p)
|
||||
|
||||
localdata = bb.data.createCopy(d)
|
||||
root = "%s/%s" % (pkgdest, pkg)
|
||||
|
||||
lf = bb.utils.lockfile(root + ".lock")
|
||||
try:
|
||||
|
||||
localdata.setVar('ROOT', '')
|
||||
localdata.setVar('ROOT_%s' % pkg, root)
|
||||
pkgname = localdata.getVar('PKG:%s' % pkg)
|
||||
if not pkgname:
|
||||
pkgname = pkg
|
||||
localdata.setVar('PKG', pkgname)
|
||||
|
||||
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
|
||||
|
||||
basedir = os.path.join(os.path.dirname(root))
|
||||
|
||||
pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH'))
|
||||
bb.utils.mkdirhier(pkgoutdir)
|
||||
|
||||
os.chdir(root)
|
||||
cleanupcontrol(root)
|
||||
from glob import glob
|
||||
g = glob('*')
|
||||
if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
|
||||
bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
|
||||
return
|
||||
|
||||
controldir = os.path.join(root, 'DEBIAN')
|
||||
bb.utils.mkdirhier(controldir)
|
||||
os.chmod(controldir, 0o755)
|
||||
|
||||
ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
|
||||
|
||||
fields = []
|
||||
pe = d.getVar('PKGE')
|
||||
if pe and int(pe) > 0:
|
||||
fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
|
||||
else:
|
||||
fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
|
||||
fields.append(["Description: %s\n", ['DESCRIPTION']])
|
||||
fields.append(["Section: %s\n", ['SECTION']])
|
||||
fields.append(["Priority: %s\n", ['PRIORITY']])
|
||||
fields.append(["Maintainer: %s\n", ['MAINTAINER']])
|
||||
fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
|
||||
fields.append(["OE: %s\n", ['PN']])
|
||||
fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
|
||||
if d.getVar('HOMEPAGE'):
|
||||
fields.append(["Homepage: %s\n", ['HOMEPAGE']])
|
||||
|
||||
# Package, Version, Maintainer, Description - mandatory
|
||||
# Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
|
||||
|
||||
|
||||
def pullData(l, d):
|
||||
l2 = []
|
||||
for i in l:
|
||||
data = d.getVar(i)
|
||||
if data is None:
|
||||
raise KeyError(i)
|
||||
if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH') == 'all':
|
||||
data = 'all'
|
||||
elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
|
||||
# The params in deb package control don't allow character
|
||||
# `_', so change the arch's `_' to `-'. Such as `x86_64'
|
||||
# -->`x86-64'
|
||||
data = data.replace('_', '-')
|
||||
l2.append(data)
|
||||
return l2
|
||||
|
||||
ctrlfile.write("Package: %s\n" % pkgname)
|
||||
if d.getVar('PACKAGE_ARCH') == "all":
|
||||
ctrlfile.write("Multi-Arch: foreign\n")
|
||||
# check for required fields
|
||||
for (c, fs) in fields:
|
||||
# Special behavior for description...
|
||||
if 'DESCRIPTION' in fs:
|
||||
summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
|
||||
ctrlfile.write('Description: %s\n' % summary)
|
||||
description = localdata.getVar('DESCRIPTION') or "."
|
||||
description = textwrap.dedent(description).strip()
|
||||
if '\\n' in description:
|
||||
# Manually indent
|
||||
for t in description.split('\\n'):
|
||||
ctrlfile.write(' %s\n' % (t.strip() or '.'))
|
||||
else:
|
||||
# Auto indent
|
||||
ctrlfile.write('%s\n' % textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))
|
||||
|
||||
else:
|
||||
ctrlfile.write(c % tuple(pullData(fs, localdata)))
|
||||
|
||||
# more fields
|
||||
|
||||
custom_fields_chunk = oe.packagedata.get_package_additional_metadata("deb", localdata)
|
||||
if custom_fields_chunk:
|
||||
ctrlfile.write(custom_fields_chunk)
|
||||
ctrlfile.write("\n")
|
||||
|
||||
oe.packagedata.mapping_rename_hook(localdata)
|
||||
|
||||
def debian_cmp_remap(var):
|
||||
# dpkg does not allow for '(', ')' or ':' in a dependency name
|
||||
# Replace any instances of them with '__'
|
||||
#
|
||||
# In debian '>' and '<' do not mean what it appears they mean
|
||||
# '<' = less or equal
|
||||
# '>' = greater or equal
|
||||
# adjust these to the '<<' and '>>' equivalents
|
||||
# Also, "=" specifiers only work if they have the PR in, so 1.2.3 != 1.2.3-r0
|
||||
# so to avoid issues, map this to ">= 1.2.3 << 1.2.3.0"
|
||||
for dep in list(var.keys()):
|
||||
if '(' in dep or '/' in dep:
|
||||
newdep = re.sub(r'[(:)/]', '__', dep)
|
||||
if newdep.startswith("__"):
|
||||
newdep = "A" + newdep
|
||||
if newdep != dep:
|
||||
var[newdep] = var[dep]
|
||||
del var[dep]
|
||||
for dep in var:
|
||||
for i, v in enumerate(var[dep]):
|
||||
if (v or "").startswith("< "):
|
||||
var[dep][i] = var[dep][i].replace("< ", "<< ")
|
||||
elif (v or "").startswith("> "):
|
||||
var[dep][i] = var[dep][i].replace("> ", ">> ")
|
||||
elif (v or "").startswith("= ") and "-r" not in v:
|
||||
ver = var[dep][i].replace("= ", "")
|
||||
var[dep][i] = var[dep][i].replace("= ", ">= ")
|
||||
var[dep].append("<< " + ver + ".0")
|
||||
|
||||
rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
|
||||
debian_cmp_remap(rdepends)
|
||||
for dep in list(rdepends.keys()):
|
||||
if dep == pkg:
|
||||
del rdepends[dep]
|
||||
continue
|
||||
if '*' in dep:
|
||||
del rdepends[dep]
|
||||
rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
|
||||
debian_cmp_remap(rrecommends)
|
||||
for dep in list(rrecommends.keys()):
|
||||
if '*' in dep:
|
||||
del rrecommends[dep]
|
||||
rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
|
||||
debian_cmp_remap(rsuggests)
|
||||
# Deliberately drop version information here, not wanted/supported by deb
|
||||
rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
|
||||
# Remove file paths if any from rprovides, debian does not support custom providers
|
||||
for key in list(rprovides.keys()):
|
||||
if key.startswith('/'):
|
||||
del rprovides[key]
|
||||
rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
|
||||
debian_cmp_remap(rprovides)
|
||||
rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
|
||||
debian_cmp_remap(rreplaces)
|
||||
rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
|
||||
debian_cmp_remap(rconflicts)
|
||||
if rdepends:
|
||||
ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
|
||||
if rsuggests:
|
||||
ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
|
||||
if rrecommends:
|
||||
ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
|
||||
if rprovides:
|
||||
ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
|
||||
if rreplaces:
|
||||
ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
|
||||
if rconflicts:
|
||||
ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
|
||||
ctrlfile.close()
|
||||
|
||||
for script in ["preinst", "postinst", "prerm", "postrm"]:
|
||||
scriptvar = localdata.getVar('pkg_%s' % script)
|
||||
if not scriptvar:
|
||||
continue
|
||||
scriptvar = scriptvar.strip()
|
||||
scriptfile = open(os.path.join(controldir, script), 'w')
|
||||
|
||||
if scriptvar.startswith("#!"):
|
||||
pos = scriptvar.find("\n") + 1
|
||||
scriptfile.write(scriptvar[:pos])
|
||||
else:
|
||||
pos = 0
|
||||
scriptfile.write("#!/bin/sh\n")
|
||||
|
||||
# Prevent the prerm/postrm scripts from being run during an upgrade
|
||||
if script in ('prerm', 'postrm'):
|
||||
scriptfile.write('[ "$1" != "upgrade" ] || exit 0\n')
|
||||
|
||||
scriptfile.write(scriptvar[pos:])
|
||||
scriptfile.write('\n')
|
||||
scriptfile.close()
|
||||
os.chmod(os.path.join(controldir, script), 0o755)
|
||||
|
||||
conffiles_str = ' '.join(oe.package.get_conffiles(pkg, d))
|
||||
if conffiles_str:
|
||||
conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
|
||||
for f in conffiles_str.split():
|
||||
if os.path.exists(oe.path.join(root, f)):
|
||||
conffiles.write('%s\n' % f)
|
||||
conffiles.close()
|
||||
|
||||
os.chdir(basedir)
|
||||
subprocess.check_output("PATH=\"%s\" %s -b %s %s" % (localdata.getVar("PATH"), localdata.getVar("DPKG_BUILDCMD"),
|
||||
root, pkgoutdir),
|
||||
stderr=subprocess.STDOUT,
|
||||
shell=True)
|
||||
|
||||
finally:
|
||||
cleanupcontrol(root)
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
# Otherwise allarch packages may change depending on override configuration
|
||||
deb_write_pkg[vardepsexclude] = "OVERRIDES"
|
||||
|
||||
# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
|
||||
DEBEXTRAVARS = "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE PACKAGE_ADD_METADATA_DEB"
|
||||
do_package_write_deb[vardeps] += "${@gen_packagevar(d, 'DEBEXTRAVARS')}"
|
||||
|
||||
SSTATETASKS += "do_package_write_deb"
|
||||
do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
|
||||
do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
|
||||
|
||||
python do_package_write_deb_setscene () {
|
||||
tmpdir = d.getVar('TMPDIR')
|
||||
|
||||
if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
|
||||
os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
|
||||
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_package_write_deb_setscene
|
||||
|
||||
python () {
|
||||
if d.getVar('PACKAGES') != '':
|
||||
deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
|
||||
d.appendVarFlag('do_package_write_deb', 'depends', deps)
|
||||
d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
|
||||
|
||||
# Needed to ensure PKG_xxx renaming of dependency packages works
|
||||
d.setVarFlag('do_package_write_deb', 'deptask', "do_packagedata")
|
||||
d.setVarFlag('do_package_write_deb', 'rdeptask', "do_packagedata")
|
||||
}
|
||||
|
||||
python do_package_write_deb () {
|
||||
bb.build.exec_func("read_subpackage_metadata", d)
|
||||
bb.build.exec_func("do_package_deb", d)
|
||||
}
|
||||
do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
|
||||
do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
|
||||
do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
|
||||
addtask package_write_deb after do_packagedata do_package do_deploy_source_date_epoch before do_build
|
||||
do_build[rdeptask] += "do_package_write_deb"
|
||||
|
||||
PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
|
||||
PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
|
||||
300
sources/poky/meta/classes-global/package_ipk.bbclass
Normal file
300
sources/poky/meta/classes-global/package_ipk.bbclass
Normal file
@@ -0,0 +1,300 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit package
|
||||
|
||||
IMAGE_PKGTYPE ?= "ipk"
|
||||
|
||||
IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
|
||||
IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
|
||||
IPKGCONF_SDK_TARGET = "${WORKDIR}/opkg-sdk-target.conf"
|
||||
|
||||
PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
|
||||
|
||||
# Program to be used to build opkg packages
|
||||
OPKGBUILDCMD ??= 'opkg-build -Z zstd -a "${ZSTD_DEFAULTS}"'
|
||||
|
||||
OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
|
||||
OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
|
||||
OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "").strip() != ""]}"
|
||||
|
||||
OPKGLIBDIR ??= "${localstatedir}/lib"
|
||||
|
||||
python do_package_ipk () {
|
||||
workdir = d.getVar('WORKDIR')
|
||||
outdir = d.getVar('PKGWRITEDIRIPK')
|
||||
tmpdir = d.getVar('TMPDIR')
|
||||
pkgdest = d.getVar('PKGDEST')
|
||||
if not workdir or not outdir or not tmpdir:
|
||||
bb.error("Variables incorrectly set, unable to package")
|
||||
return
|
||||
|
||||
packages = d.getVar('PACKAGES')
|
||||
if not packages or packages == '':
|
||||
bb.debug(1, "No packages; nothing to do")
|
||||
return
|
||||
|
||||
# We're about to add new packages so the index needs to be checked
|
||||
# so remove the appropriate stamp file.
|
||||
if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
|
||||
os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
|
||||
|
||||
oe.utils.multiprocess_launch(ipk_write_pkg, packages.split(), d, extraargs=(d,))
|
||||
}
|
||||
do_package_ipk[vardeps] += "ipk_write_pkg"
|
||||
do_package_ipk[vardepsexclude] = "BB_NUMBER_THREADS"
|
||||
|
||||
# FILE isn't included by default but we want the recipe to change if basename() changes
|
||||
IPK_RECIPE_FILE = "${@os.path.basename(d.getVar('FILE'))}"
|
||||
IPK_RECIPE_FILE[vardepvalue] = "${IPK_RECIPE_FILE}"
|
||||
|
||||
def ipk_write_pkg(pkg, d):
|
||||
import re, copy
|
||||
import subprocess
|
||||
import textwrap
|
||||
import collections
|
||||
import glob
|
||||
|
||||
def cleanupcontrol(root):
|
||||
for p in ['CONTROL', 'DEBIAN']:
|
||||
p = os.path.join(root, p)
|
||||
if os.path.exists(p):
|
||||
bb.utils.prunedir(p)
|
||||
|
||||
outdir = d.getVar('PKGWRITEDIRIPK')
|
||||
pkgdest = d.getVar('PKGDEST')
|
||||
recipesource = d.getVar('IPK_RECIPE_FILE')
|
||||
|
||||
localdata = bb.data.createCopy(d)
|
||||
root = "%s/%s" % (pkgdest, pkg)
|
||||
|
||||
lf = bb.utils.lockfile(root + ".lock")
|
||||
try:
|
||||
localdata.setVar('ROOT', '')
|
||||
localdata.setVar('ROOT_%s' % pkg, root)
|
||||
pkgname = localdata.getVar('PKG:%s' % pkg)
|
||||
if not pkgname:
|
||||
pkgname = pkg
|
||||
localdata.setVar('PKG', pkgname)
|
||||
|
||||
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
|
||||
|
||||
basedir = os.path.join(os.path.dirname(root))
|
||||
arch = localdata.getVar('PACKAGE_ARCH')
|
||||
|
||||
if localdata.getVar('IPK_HIERARCHICAL_FEED', False) == "1":
|
||||
# Spread packages across subdirectories so each isn't too crowded
|
||||
if pkgname.startswith('lib'):
|
||||
pkg_prefix = 'lib' + pkgname[3]
|
||||
else:
|
||||
pkg_prefix = pkgname[0]
|
||||
|
||||
# Keep -dbg, -dev, -doc, -staticdev, -locale and -locale-* packages
|
||||
# together. These package suffixes are taken from the definitions of
|
||||
# PACKAGES and PACKAGES_DYNAMIC in meta/conf/bitbake.conf
|
||||
if pkgname[-4:] in ('-dbg', '-dev', '-doc'):
|
||||
pkg_subdir = pkgname[:-4]
|
||||
elif pkgname.endswith('-staticdev'):
|
||||
pkg_subdir = pkgname[:-10]
|
||||
elif pkgname.endswith('-locale'):
|
||||
pkg_subdir = pkgname[:-7]
|
||||
elif '-locale-' in pkgname:
|
||||
pkg_subdir = pkgname[:pkgname.find('-locale-')]
|
||||
else:
|
||||
pkg_subdir = pkgname
|
||||
|
||||
pkgoutdir = "%s/%s/%s/%s" % (outdir, arch, pkg_prefix, pkg_subdir)
|
||||
else:
|
||||
pkgoutdir = "%s/%s" % (outdir, arch)
|
||||
|
||||
bb.utils.mkdirhier(pkgoutdir)
|
||||
os.chdir(root)
|
||||
cleanupcontrol(root)
|
||||
g = glob.glob('*')
|
||||
if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
|
||||
bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
|
||||
return
|
||||
|
||||
controldir = os.path.join(root, 'CONTROL')
|
||||
bb.utils.mkdirhier(controldir)
|
||||
ctrlfile = open(os.path.join(controldir, 'control'), 'w')
|
||||
|
||||
fields = []
|
||||
pe = d.getVar('PKGE')
|
||||
if pe and int(pe) > 0:
|
||||
fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
|
||||
else:
|
||||
fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
|
||||
fields.append(["Description: %s\n", ['DESCRIPTION']])
|
||||
fields.append(["Section: %s\n", ['SECTION']])
|
||||
fields.append(["Priority: %s\n", ['PRIORITY']])
|
||||
fields.append(["Maintainer: %s\n", ['MAINTAINER']])
|
||||
fields.append(["License: %s\n", ['LICENSE']])
|
||||
fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
|
||||
fields.append(["OE: %s\n", ['PN']])
|
||||
if d.getVar('HOMEPAGE'):
|
||||
fields.append(["Homepage: %s\n", ['HOMEPAGE']])
|
||||
|
||||
def pullData(l, d):
|
||||
l2 = []
|
||||
for i in l:
|
||||
l2.append(d.getVar(i))
|
||||
return l2
|
||||
|
||||
ctrlfile.write("Package: %s\n" % pkgname)
|
||||
# check for required fields
|
||||
for (c, fs) in fields:
|
||||
for f in fs:
|
||||
if localdata.getVar(f, False) is None:
|
||||
raise KeyError(f)
|
||||
# Special behavior for description...
|
||||
if 'DESCRIPTION' in fs:
|
||||
summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
|
||||
ctrlfile.write('Description: %s\n' % summary)
|
||||
description = localdata.getVar('DESCRIPTION') or "."
|
||||
description = textwrap.dedent(description).strip()
|
||||
if '\\n' in description:
|
||||
# Manually indent: multiline description includes a leading space
|
||||
for t in description.split('\\n'):
|
||||
ctrlfile.write(' %s\n' % (t.strip() or ' .'))
|
||||
else:
|
||||
# Auto indent
|
||||
ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
|
||||
else:
|
||||
ctrlfile.write(c % tuple(pullData(fs, localdata)))
|
||||
|
||||
custom_fields_chunk = oe.packagedata.get_package_additional_metadata("ipk", localdata)
|
||||
if custom_fields_chunk is not None:
|
||||
ctrlfile.write(custom_fields_chunk)
|
||||
ctrlfile.write("\n")
|
||||
|
||||
oe.packagedata.mapping_rename_hook(localdata)
|
||||
|
||||
def debian_cmp_remap(var):
|
||||
# In debian '>' and '<' do not mean what it appears they mean
|
||||
# '<' = less or equal
|
||||
# '>' = greater or equal
|
||||
# adjust these to the '<<' and '>>' equivalents
|
||||
# Also, "=" specifiers only work if they have the PR in, so 1.2.3 != 1.2.3-r0
|
||||
# so to avoid issues, map this to ">= 1.2.3 << 1.2.3.0"
|
||||
for dep in var:
|
||||
for i, v in enumerate(var[dep]):
|
||||
if (v or "").startswith("< "):
|
||||
var[dep][i] = var[dep][i].replace("< ", "<< ")
|
||||
elif (v or "").startswith("> "):
|
||||
var[dep][i] = var[dep][i].replace("> ", ">> ")
|
||||
elif (v or "").startswith("= ") and "-r" not in v:
|
||||
ver = var[dep][i].replace("= ", "")
|
||||
var[dep][i] = var[dep][i].replace("= ", ">= ")
|
||||
var[dep].append("<< " + ver + ".0")
|
||||
|
||||
rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
|
||||
debian_cmp_remap(rdepends)
|
||||
rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
|
||||
debian_cmp_remap(rrecommends)
|
||||
rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
|
||||
debian_cmp_remap(rsuggests)
|
||||
# Deliberately drop version information here, not wanted/supported by ipk
|
||||
rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
|
||||
rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
|
||||
debian_cmp_remap(rprovides)
|
||||
rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
|
||||
debian_cmp_remap(rreplaces)
|
||||
rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
|
||||
debian_cmp_remap(rconflicts)
|
||||
|
||||
if rdepends:
|
||||
ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
|
||||
if rsuggests:
|
||||
ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
|
||||
if rrecommends:
|
||||
ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
|
||||
if rprovides:
|
||||
ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
|
||||
if rreplaces:
|
||||
ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
|
||||
if rconflicts:
|
||||
ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
|
||||
ctrlfile.write("Source: %s\n" % recipesource)
|
||||
ctrlfile.close()
|
||||
|
||||
for script in ["preinst", "postinst", "prerm", "postrm"]:
|
||||
scriptvar = localdata.getVar('pkg_%s' % script)
|
||||
if not scriptvar:
|
||||
continue
|
||||
scriptfile = open(os.path.join(controldir, script), 'w')
|
||||
scriptfile.write(scriptvar)
|
||||
scriptfile.close()
|
||||
os.chmod(os.path.join(controldir, script), 0o755)
|
||||
|
||||
conffiles_str = ' '.join(oe.package.get_conffiles(pkg, d))
|
||||
if conffiles_str:
|
||||
conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
|
||||
for f in conffiles_str.split():
|
||||
if os.path.exists(oe.path.join(root, f)):
|
||||
conffiles.write('%s\n' % f)
|
||||
conffiles.close()
|
||||
|
||||
os.chdir(basedir)
|
||||
subprocess.check_output("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH"),
|
||||
d.getVar("OPKGBUILDCMD"), pkg, pkgoutdir),
|
||||
stderr=subprocess.STDOUT,
|
||||
shell=True)
|
||||
|
||||
if d.getVar('IPK_SIGN_PACKAGES') == '1':
|
||||
ipkver = "%s-%s" % (localdata.getVar('PKGV'), localdata.getVar('PKGR'))
|
||||
ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, localdata.getVar('PACKAGE_ARCH'))
|
||||
sign_ipk(d, ipk_to_sign)
|
||||
|
||||
finally:
|
||||
cleanupcontrol(root)
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
|
||||
IPKEXTRAVARS = "PRIORITY MAINTAINER PACKAGE_ARCH HOMEPAGE PACKAGE_ADD_METADATA_IPK"
|
||||
ipk_write_pkg[vardeps] += "${@gen_packagevar(d, 'IPKEXTRAVARS')}"
|
||||
|
||||
# Otherwise allarch packages may change depending on override configuration
|
||||
ipk_write_pkg[vardepsexclude] = "OVERRIDES"
|
||||
|
||||
|
||||
SSTATETASKS += "do_package_write_ipk"
|
||||
do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
|
||||
do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
|
||||
|
||||
python do_package_write_ipk_setscene () {
|
||||
tmpdir = d.getVar('TMPDIR')
|
||||
|
||||
if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
|
||||
os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
|
||||
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_package_write_ipk_setscene
|
||||
|
||||
python () {
|
||||
if d.getVar('PACKAGES') != '':
|
||||
deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot zstd-native:do_populate_sysroot'
|
||||
d.appendVarFlag('do_package_write_ipk', 'depends', deps)
|
||||
d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
|
||||
|
||||
# Needed to ensure PKG_xxx renaming of dependency packages works
|
||||
d.setVarFlag('do_package_write_ipk', 'deptask', "do_packagedata")
|
||||
d.setVarFlag('do_package_write_ipk', 'rdeptask', "do_packagedata")
|
||||
}
|
||||
|
||||
python do_package_write_ipk () {
|
||||
bb.build.exec_func("read_subpackage_metadata", d)
|
||||
bb.build.exec_func("do_package_ipk", d)
|
||||
}
|
||||
do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
|
||||
do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
|
||||
do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
|
||||
addtask package_write_ipk after do_packagedata do_package do_deploy_source_date_epoch before do_build
|
||||
do_build[rdeptask] += "do_package_write_ipk"
|
||||
|
||||
PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
|
||||
PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
|
||||
173
sources/poky/meta/classes-global/package_pkgdata.bbclass
Normal file
173
sources/poky/meta/classes-global/package_pkgdata.bbclass
Normal file
@@ -0,0 +1,173 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
WORKDIR_PKGDATA = "${WORKDIR}/pkgdata-sysroot"
|
||||
|
||||
def package_populate_pkgdata_dir(pkgdatadir, d):
|
||||
import glob
|
||||
|
||||
postinsts = []
|
||||
seendirs = set()
|
||||
stagingdir = d.getVar("PKGDATA_DIR")
|
||||
pkgarchs = ['${MACHINE_ARCH}']
|
||||
pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
|
||||
pkgarchs.append('allarch')
|
||||
|
||||
bb.utils.mkdirhier(pkgdatadir)
|
||||
for pkgarch in pkgarchs:
|
||||
for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.packagedata" % pkgarch)):
|
||||
with open(manifest, "r") as f:
|
||||
for l in f:
|
||||
l = l.strip()
|
||||
dest = l.replace(stagingdir, "")
|
||||
if l.endswith("/"):
|
||||
staging_copydir(l, pkgdatadir, dest, seendirs)
|
||||
continue
|
||||
try:
|
||||
staging_copyfile(l, pkgdatadir, dest, postinsts, seendirs)
|
||||
except FileExistsError:
|
||||
continue
|
||||
|
||||
python package_prepare_pkgdata() {
|
||||
import copy
|
||||
import glob
|
||||
|
||||
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
|
||||
mytaskname = d.getVar("BB_RUNTASK")
|
||||
if mytaskname.endswith("_setscene"):
|
||||
mytaskname = mytaskname.replace("_setscene", "")
|
||||
workdir = d.getVar("WORKDIR")
|
||||
pn = d.getVar("PN")
|
||||
stagingdir = d.getVar("PKGDATA_DIR")
|
||||
pkgdatadir = d.getVar("WORKDIR_PKGDATA")
|
||||
|
||||
# Detect bitbake -b usage
|
||||
nodeps = d.getVar("BB_LIMITEDDEPS") or False
|
||||
if nodeps:
|
||||
staging_package_populate_pkgdata_dir(pkgdatadir, d)
|
||||
return
|
||||
|
||||
start = None
|
||||
configuredeps = []
|
||||
for dep in taskdepdata:
|
||||
data = taskdepdata[dep]
|
||||
if data[1] == mytaskname and data[0] == pn:
|
||||
start = dep
|
||||
break
|
||||
if start is None:
|
||||
bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
|
||||
|
||||
# We need to figure out which sysroot files we need to expose to this task.
|
||||
# This needs to match what would get restored from sstate, which is controlled
|
||||
# ultimately by calls from bitbake to setscene_depvalid().
|
||||
# That function expects a setscene dependency tree. We build a dependency tree
|
||||
# condensed to inter-sstate task dependencies, similar to that used by setscene
|
||||
# tasks. We can then call into setscene_depvalid() and decide
|
||||
# which dependencies we can "see" and should expose in the recipe specific sysroot.
|
||||
setscenedeps = copy.deepcopy(taskdepdata)
|
||||
|
||||
start = set([start])
|
||||
|
||||
sstatetasks = d.getVar("SSTATETASKS").split()
|
||||
# Add recipe specific tasks referenced by setscene_depvalid()
|
||||
sstatetasks.append("do_stash_locale")
|
||||
|
||||
# If start is an sstate task (like do_package) we need to add in its direct dependencies
|
||||
# else the code below won't recurse into them.
|
||||
for dep in set(start):
|
||||
for dep2 in setscenedeps[dep][3]:
|
||||
start.add(dep2)
|
||||
start.remove(dep)
|
||||
|
||||
# Create collapsed do_populate_sysroot -> do_populate_sysroot tree
|
||||
for dep in taskdepdata:
|
||||
data = setscenedeps[dep]
|
||||
if data[1] not in sstatetasks:
|
||||
for dep2 in setscenedeps:
|
||||
data2 = setscenedeps[dep2]
|
||||
if dep in data2[3]:
|
||||
data2[3].update(setscenedeps[dep][3])
|
||||
data2[3].remove(dep)
|
||||
if dep in start:
|
||||
start.update(setscenedeps[dep][3])
|
||||
start.remove(dep)
|
||||
del setscenedeps[dep]
|
||||
|
||||
# Remove circular references
|
||||
for dep in setscenedeps:
|
||||
if dep in setscenedeps[dep][3]:
|
||||
setscenedeps[dep][3].remove(dep)
|
||||
|
||||
# Direct dependencies should be present and can be depended upon
|
||||
for dep in set(start):
|
||||
if setscenedeps[dep][1] == "do_packagedata":
|
||||
if dep not in configuredeps:
|
||||
configuredeps.append(dep)
|
||||
|
||||
msgbuf = []
|
||||
# Call into setscene_depvalid for each sub-dependency and only copy sysroot files
|
||||
# for ones that would be restored from sstate.
|
||||
done = list(start)
|
||||
next = list(start)
|
||||
while next:
|
||||
new = []
|
||||
for dep in next:
|
||||
data = setscenedeps[dep]
|
||||
for datadep in data[3]:
|
||||
if datadep in done:
|
||||
continue
|
||||
taskdeps = {}
|
||||
taskdeps[dep] = setscenedeps[dep][:2]
|
||||
taskdeps[datadep] = setscenedeps[datadep][:2]
|
||||
retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
|
||||
done.append(datadep)
|
||||
new.append(datadep)
|
||||
if retval:
|
||||
msgbuf.append("Skipping setscene dependency %s" % datadep)
|
||||
continue
|
||||
if datadep not in configuredeps and setscenedeps[datadep][1] == "do_packagedata":
|
||||
configuredeps.append(datadep)
|
||||
msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
|
||||
else:
|
||||
msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
|
||||
next = new
|
||||
|
||||
# This logging is too verbose for day to day use sadly
|
||||
#bb.debug(2, "\n".join(msgbuf))
|
||||
|
||||
seendirs = set()
|
||||
postinsts = []
|
||||
multilibs = {}
|
||||
manifests = {}
|
||||
|
||||
msg_adding = []
|
||||
|
||||
for dep in configuredeps:
|
||||
c = setscenedeps[dep][0]
|
||||
msg_adding.append(c)
|
||||
|
||||
manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "packagedata", d, multilibs)
|
||||
destsysroot = pkgdatadir
|
||||
|
||||
if manifest:
|
||||
targetdir = destsysroot
|
||||
with open(manifest, "r") as f:
|
||||
manifests[dep] = manifest
|
||||
for l in f:
|
||||
l = l.strip()
|
||||
dest = targetdir + l.replace(stagingdir, "")
|
||||
if l.endswith("/"):
|
||||
staging_copydir(l, targetdir, dest, seendirs)
|
||||
continue
|
||||
staging_copyfile(l, targetdir, dest, postinsts, seendirs)
|
||||
|
||||
bb.note("Installed into pkgdata-sysroot: %s" % str(msg_adding))
|
||||
|
||||
}
|
||||
package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
|
||||
package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA SSTATETASKS"
|
||||
|
||||
|
||||
793
sources/poky/meta/classes-global/package_rpm.bbclass
Normal file
793
sources/poky/meta/classes-global/package_rpm.bbclass
Normal file
@@ -0,0 +1,793 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit package
|
||||
|
||||
IMAGE_PKGTYPE ?= "rpm"
|
||||
|
||||
RPM = "rpm"
|
||||
RPMBUILD = "rpmbuild"
|
||||
RPMBUILD_COMPMODE ?= "${@'w3T%d.zstdio' % int(d.getVar('ZSTD_THREADS'))}"
|
||||
|
||||
PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
|
||||
|
||||
# Maintaining the perfile dependencies has significant overhead when writing the
|
||||
# packages. When set, this value merges them for efficiency.
|
||||
MERGEPERFILEDEPS = "1"
|
||||
|
||||
# Filter dependencies based on a provided function.
|
||||
def filter_deps(var, f):
|
||||
import collections
|
||||
|
||||
depends_dict = bb.utils.explode_dep_versions2(var)
|
||||
newdeps_dict = collections.OrderedDict()
|
||||
for dep in depends_dict:
|
||||
if f(dep):
|
||||
newdeps_dict[dep] = depends_dict[dep]
|
||||
return bb.utils.join_deps(newdeps_dict, commasep=False)
|
||||
|
||||
# Filter out absolute paths (typically /bin/sh and /usr/bin/env) and any perl
|
||||
# dependencies for nativesdk packages.
|
||||
def filter_nativesdk_deps(srcname, var):
|
||||
if var and srcname.startswith("nativesdk-"):
|
||||
var = filter_deps(var, lambda dep: not dep.startswith('/') and dep != 'perl' and not dep.startswith('perl('))
|
||||
return var
|
||||
|
||||
# Construct per file dependencies file
|
||||
def write_rpm_perfiledata(srcname, d):
|
||||
workdir = d.getVar('WORKDIR')
|
||||
packages = d.getVar('PACKAGES')
|
||||
pkgd = d.getVar('PKGD')
|
||||
|
||||
def dump_filerdeps(varname, outfile, d):
|
||||
outfile.write("#!/usr/bin/env python3\n\n")
|
||||
outfile.write("# Dependency table\n")
|
||||
outfile.write('deps = {\n')
|
||||
for pkg in packages.split():
|
||||
dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
|
||||
dependsflist = (d.getVar(dependsflist_key) or "")
|
||||
for dfile in dependsflist.split():
|
||||
key = "FILE" + varname + ":" + dfile + ":" + pkg
|
||||
deps = filter_nativesdk_deps(srcname, d.getVar(key) or "")
|
||||
depends_dict = bb.utils.explode_dep_versions(deps)
|
||||
file = dfile.replace("@underscore@", "_")
|
||||
file = file.replace("@closebrace@", "]")
|
||||
file = file.replace("@openbrace@", "[")
|
||||
file = file.replace("@tab@", "\t")
|
||||
file = file.replace("@space@", " ")
|
||||
file = file.replace("@at@", "@")
|
||||
outfile.write('"' + pkgd + file + '" : "')
|
||||
for dep in depends_dict:
|
||||
ver = depends_dict[dep]
|
||||
if dep and ver:
|
||||
ver = ver.replace("(", "")
|
||||
ver = ver.replace(")", "")
|
||||
outfile.write(dep + " " + ver + " ")
|
||||
else:
|
||||
outfile.write(dep + " ")
|
||||
outfile.write('",\n')
|
||||
outfile.write('}\n\n')
|
||||
outfile.write("import sys\n")
|
||||
outfile.write("while 1:\n")
|
||||
outfile.write("\tline = sys.stdin.readline().strip()\n")
|
||||
outfile.write("\tif not line:\n")
|
||||
outfile.write("\t\tsys.exit(0)\n")
|
||||
outfile.write("\tif line in deps:\n")
|
||||
outfile.write("\t\tprint(deps[line] + '\\n')\n")
|
||||
|
||||
# OE-core dependencies a.k.a. RPM requires
|
||||
outdepends = workdir + "/" + srcname + ".requires"
|
||||
|
||||
dependsfile = open(outdepends, 'w')
|
||||
|
||||
dump_filerdeps('RDEPENDS', dependsfile, d)
|
||||
|
||||
dependsfile.close()
|
||||
os.chmod(outdepends, 0o755)
|
||||
|
||||
# OE-core / RPM Provides
|
||||
outprovides = workdir + "/" + srcname + ".provides"
|
||||
|
||||
providesfile = open(outprovides, 'w')
|
||||
|
||||
dump_filerdeps('RPROVIDES', providesfile, d)
|
||||
|
||||
providesfile.close()
|
||||
os.chmod(outprovides, 0o755)
|
||||
|
||||
return (outdepends, outprovides)
|
||||
|
||||
|
||||
python write_specfile () {
|
||||
import oe.packagedata
|
||||
import os,pwd,grp,stat
|
||||
|
||||
# append information for logs and patches to %prep
|
||||
def add_prep(d, spec_files_bottom):
|
||||
if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
|
||||
spec_files_bottom.append('%%prep -n %s' % d.getVar('PN'))
|
||||
spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
|
||||
spec_files_bottom.append('')
|
||||
|
||||
# append the name of tarball to key word 'SOURCE' in xxx.spec.
|
||||
def tail_source(d):
|
||||
if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
|
||||
ar_outdir = d.getVar('ARCHIVER_OUTDIR')
|
||||
if not os.path.exists(ar_outdir):
|
||||
return
|
||||
source_list = os.listdir(ar_outdir)
|
||||
source_number = 0
|
||||
for source in source_list:
|
||||
# do_deploy_archives may have already run (from sstate) meaning a .src.rpm may already
|
||||
# exist in ARCHIVER_OUTDIR so skip if present.
|
||||
if source.endswith(".src.rpm"):
|
||||
continue
|
||||
# The rpmbuild doesn't need the root permission, but it needs
|
||||
# to know the file's user and group name, the only user and
|
||||
# group in fakeroot is "root" when working in fakeroot.
|
||||
f = os.path.join(ar_outdir, source)
|
||||
os.chown(f, 0, 0)
|
||||
spec_preamble_top.append('Source%s: %s' % (source_number, source))
|
||||
source_number += 1
|
||||
|
||||
# In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
|
||||
# This format is similar to OE, however there are restrictions on the
|
||||
# characters that can be in a field. In the Version field, "-"
|
||||
# characters are not allowed. "-" is allowed in the Release field.
|
||||
#
|
||||
# We translate the "-" in the version to a "+", by loading the PKGV
|
||||
# from the dependent recipe, replacing the - with a +, and then using
|
||||
# that value to do a replace inside of this recipe's dependencies.
|
||||
# This preserves the "-" separator between the version and release, as
|
||||
# well as any "-" characters inside of the release field.
|
||||
#
|
||||
# All of this has to happen BEFORE the mapping_rename_hook as
|
||||
# after renaming we cannot look up the dependencies in the packagedata
|
||||
# store.
|
||||
def translate_vers(varname, d):
|
||||
depends = d.getVar(varname)
|
||||
if depends:
|
||||
depends_dict = bb.utils.explode_dep_versions2(depends)
|
||||
newdeps_dict = {}
|
||||
for dep in depends_dict:
|
||||
verlist = []
|
||||
for ver in depends_dict[dep]:
|
||||
if '-' in ver:
|
||||
subd = oe.packagedata.read_subpkgdata_dict(dep, d)
|
||||
if 'PKGV' in subd:
|
||||
pv = subd['PV']
|
||||
pkgv = subd['PKGV']
|
||||
reppv = pkgv.replace('-', '+')
|
||||
if ver.startswith(pv):
|
||||
ver = ver.replace(pv, reppv)
|
||||
ver = ver.replace(pkgv, reppv)
|
||||
if 'PKGR' in subd:
|
||||
# Make sure PKGR rather than PR in ver
|
||||
pr = '-' + subd['PR']
|
||||
pkgr = '-' + subd['PKGR']
|
||||
if pkgr not in ver:
|
||||
ver = ver.replace(pr, pkgr)
|
||||
verlist.append(ver)
|
||||
else:
|
||||
verlist.append(ver)
|
||||
newdeps_dict[dep] = verlist
|
||||
depends = bb.utils.join_deps(newdeps_dict)
|
||||
d.setVar(varname, depends.strip())
|
||||
|
||||
# We need to change the style the dependency from BB to RPM
|
||||
# This needs to happen AFTER the mapping_rename_hook
|
||||
def print_deps(variable, tag, array, d):
|
||||
depends = variable
|
||||
if depends:
|
||||
depends_dict = bb.utils.explode_dep_versions2(depends)
|
||||
for dep in depends_dict:
|
||||
for ver in depends_dict[dep]:
|
||||
ver = ver.replace('(', '')
|
||||
ver = ver.replace(')', '')
|
||||
array.append("%s: %s %s" % (tag, dep, ver))
|
||||
if not len(depends_dict[dep]):
|
||||
array.append("%s: %s" % (tag, dep))
|
||||
|
||||
def walk_files(walkpath, target, conffiles, dirfiles):
|
||||
# We can race against the ipk/deb backends which create CONTROL or DEBIAN directories
|
||||
# when packaging. We just ignore these files which are created in
|
||||
# packages-split/ and not package/
|
||||
# We have the odd situation where the CONTROL/DEBIAN directory can be removed in the middle of
|
||||
# of the walk, the isdir() test would then fail and the walk code would assume its a file
|
||||
# hence we check for the names in files too.
|
||||
for rootpath, dirs, files in os.walk(walkpath):
|
||||
def get_attr(path):
|
||||
stat_f = os.stat(rootpath + "/" + path, follow_symlinks=False)
|
||||
mode = stat.S_IMODE(stat_f.st_mode)
|
||||
try:
|
||||
owner = pwd.getpwuid(stat_f.st_uid).pw_name
|
||||
except Exception as e:
|
||||
filename = d.getVar('RECIPE_SYSROOT') + '/etc/passwd'
|
||||
if os.path.exists(filename):
|
||||
bb.error("Content of /etc/passwd in sysroot:\n{}".format(
|
||||
open(filename).read()))
|
||||
else:
|
||||
bb.error("File {} doesn't exist in sysroot!".format(filename))
|
||||
raise e
|
||||
try:
|
||||
group = grp.getgrgid(stat_f.st_gid).gr_name
|
||||
except Exception as e:
|
||||
filename = d.getVar("RECIPE_SYSROOT") +"/etc/group"
|
||||
if os.path.exists(filename):
|
||||
bb.error("Content of /etc/group in sysroot:\n{}".format(
|
||||
open(filename).read()))
|
||||
else:
|
||||
bb.error("File {} doesn't exists in sysroot!".format(filename))
|
||||
raise e
|
||||
return "%attr({:o},{},{}) ".format(mode, owner, group)
|
||||
|
||||
def escape_chars(p):
|
||||
return p.replace("%", "%%").replace("\\", "\\\\").replace('"', '\\"')
|
||||
|
||||
path = rootpath.replace(walkpath, "")
|
||||
if path.endswith("DEBIAN") or path.endswith("CONTROL"):
|
||||
continue
|
||||
|
||||
# Treat all symlinks to directories as normal files.
|
||||
# os.walk() lists them as directories.
|
||||
def move_to_files(dir):
|
||||
if os.path.islink(os.path.join(rootpath, dir)):
|
||||
files.append(dir)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
dirs[:] = [dir for dir in dirs if not move_to_files(dir)]
|
||||
|
||||
# Directory handling can happen in two ways, either DIRFILES is not set at all
|
||||
# in which case we fall back to the older behaviour of packages owning all their
|
||||
# directories
|
||||
if dirfiles is None:
|
||||
for dir in dirs:
|
||||
if dir == "CONTROL" or dir == "DEBIAN":
|
||||
continue
|
||||
p = path + '/' + dir
|
||||
# All packages own the directories their files are in...
|
||||
target.append(get_attr(dir) + '%dir "' + escape_chars(p) + '"')
|
||||
elif path:
|
||||
# packages own only empty directories or explict directory.
|
||||
# This will prevent the overlapping of security permission.
|
||||
attr = get_attr(path)
|
||||
if (not files and not dirs) or path in dirfiles:
|
||||
target.append(attr + '%dir "' + escape_chars(path) + '"')
|
||||
|
||||
for file in files:
|
||||
if file == "CONTROL" or file == "DEBIAN":
|
||||
continue
|
||||
attr = get_attr(file)
|
||||
p = path + '/' + file
|
||||
if conffiles.count(p):
|
||||
target.append(attr + '%config "' + escape_chars(p) + '"')
|
||||
else:
|
||||
target.append(attr + '"' + escape_chars(p) + '"')
|
||||
|
||||
# Prevent the prerm/postrm scripts from being run during an upgrade
|
||||
def wrap_uninstall(scriptvar):
|
||||
scr = scriptvar.strip()
|
||||
if scr.startswith("#!"):
|
||||
pos = scr.find("\n") + 1
|
||||
else:
|
||||
pos = 0
|
||||
scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi'
|
||||
return scr
|
||||
|
||||
def get_perfile(varname, pkg, d):
|
||||
deps = []
|
||||
dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
|
||||
dependsflist = (d.getVar(dependsflist_key) or "")
|
||||
for dfile in dependsflist.split():
|
||||
key = "FILE" + varname + ":" + dfile + ":" + pkg
|
||||
depends = d.getVar(key)
|
||||
if depends:
|
||||
deps.append(depends)
|
||||
return " ".join(deps)
|
||||
|
||||
def append_description(spec_preamble, text):
|
||||
"""
|
||||
Add the description to the spec file.
|
||||
"""
|
||||
import textwrap
|
||||
dedent_text = textwrap.dedent(text).strip()
|
||||
# Bitbake saves "\n" as "\\n"
|
||||
if '\\n' in dedent_text:
|
||||
for t in dedent_text.split('\\n'):
|
||||
spec_preamble.append(t.strip())
|
||||
else:
|
||||
spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
|
||||
|
||||
packages = d.getVar('PACKAGES')
|
||||
if not packages or packages == '':
|
||||
bb.debug(1, "No packages; nothing to do")
|
||||
return
|
||||
|
||||
pkgdest = d.getVar('PKGDEST')
|
||||
if not pkgdest:
|
||||
bb.fatal("No PKGDEST")
|
||||
|
||||
outspecfile = d.getVar('OUTSPECFILE')
|
||||
if not outspecfile:
|
||||
bb.fatal("No OUTSPECFILE")
|
||||
|
||||
# Construct the SPEC file...
|
||||
srcname = d.getVar('PN')
|
||||
localdata = bb.data.createCopy(d)
|
||||
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + srcname)
|
||||
srcsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
|
||||
srcversion = localdata.getVar('PKGV').replace('-', '+')
|
||||
srcrelease = localdata.getVar('PKGR')
|
||||
srcepoch = (localdata.getVar('PKGE') or "")
|
||||
srclicense = localdata.getVar('LICENSE')
|
||||
srcsection = localdata.getVar('SECTION')
|
||||
srcmaintainer = localdata.getVar('MAINTAINER')
|
||||
srchomepage = localdata.getVar('HOMEPAGE')
|
||||
srcdescription = localdata.getVar('DESCRIPTION') or "."
|
||||
srccustomtagschunk = oe.packagedata.get_package_additional_metadata("rpm", localdata)
|
||||
|
||||
srcdepends = d.getVar('DEPENDS')
|
||||
srcrdepends = ""
|
||||
srcrrecommends = ""
|
||||
srcrsuggests = ""
|
||||
srcrprovides = ""
|
||||
srcrreplaces = ""
|
||||
srcrconflicts = ""
|
||||
srcrobsoletes = ""
|
||||
|
||||
srcrpreinst = []
|
||||
srcrpostinst = []
|
||||
srcrprerm = []
|
||||
srcrpostrm = []
|
||||
|
||||
spec_preamble_top = []
|
||||
spec_preamble_bottom = []
|
||||
|
||||
spec_scriptlets_top = []
|
||||
spec_scriptlets_bottom = []
|
||||
|
||||
spec_files_top = []
|
||||
spec_files_bottom = []
|
||||
|
||||
perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
|
||||
extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA") or "0") == "1"
|
||||
|
||||
for pkg in packages.split():
|
||||
localdata = bb.data.createCopy(d)
|
||||
|
||||
root = "%s/%s" % (pkgdest, pkg)
|
||||
|
||||
localdata.setVar('ROOT', '')
|
||||
localdata.setVar('ROOT_%s' % pkg, root)
|
||||
pkgname = localdata.getVar('PKG:%s' % pkg)
|
||||
if not pkgname:
|
||||
pkgname = pkg
|
||||
localdata.setVar('PKG', pkgname)
|
||||
|
||||
localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
|
||||
|
||||
conffiles = oe.package.get_conffiles(pkg, d)
|
||||
dirfiles = localdata.getVar('DIRFILES')
|
||||
if dirfiles is not None:
|
||||
dirfiles = dirfiles.split()
|
||||
|
||||
splitname = pkgname
|
||||
|
||||
splitsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
|
||||
splitversion = (localdata.getVar('PKGV') or "").replace('-', '+')
|
||||
splitrelease = (localdata.getVar('PKGR') or "")
|
||||
splitepoch = (localdata.getVar('PKGE') or "")
|
||||
splitlicense = (localdata.getVar('LICENSE') or "")
|
||||
splitsection = (localdata.getVar('SECTION') or "")
|
||||
splitdescription = (localdata.getVar('DESCRIPTION') or ".")
|
||||
splitcustomtagschunk = oe.packagedata.get_package_additional_metadata("rpm", localdata)
|
||||
|
||||
translate_vers('RDEPENDS', localdata)
|
||||
translate_vers('RRECOMMENDS', localdata)
|
||||
translate_vers('RSUGGESTS', localdata)
|
||||
translate_vers('RPROVIDES', localdata)
|
||||
translate_vers('RREPLACES', localdata)
|
||||
translate_vers('RCONFLICTS', localdata)
|
||||
|
||||
# Map the dependencies into their final form
|
||||
oe.packagedata.mapping_rename_hook(localdata)
|
||||
|
||||
splitrdepends = localdata.getVar('RDEPENDS') or ""
|
||||
splitrrecommends = localdata.getVar('RRECOMMENDS') or ""
|
||||
splitrsuggests = localdata.getVar('RSUGGESTS') or ""
|
||||
splitrprovides = localdata.getVar('RPROVIDES') or ""
|
||||
splitrreplaces = localdata.getVar('RREPLACES') or ""
|
||||
splitrconflicts = localdata.getVar('RCONFLICTS') or ""
|
||||
splitrobsoletes = ""
|
||||
|
||||
splitrpreinst = localdata.getVar('pkg_preinst')
|
||||
splitrpostinst = localdata.getVar('pkg_postinst')
|
||||
splitrprerm = localdata.getVar('pkg_prerm')
|
||||
splitrpostrm = localdata.getVar('pkg_postrm')
|
||||
|
||||
|
||||
if not perfiledeps:
|
||||
# Add in summary of per file dependencies
|
||||
splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
|
||||
splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
|
||||
|
||||
splitrdepends = filter_nativesdk_deps(srcname, splitrdepends)
|
||||
|
||||
# Gather special src/first package data
|
||||
if srcname == splitname:
|
||||
archiving = d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and \
|
||||
bb.data.inherits_class('archiver', d)
|
||||
if archiving and srclicense != splitlicense:
|
||||
bb.warn("The SRPM produced may not have the correct overall source license in the License tag. This is due to the LICENSE for the primary package and SRPM conflicting.")
|
||||
|
||||
srclicense = splitlicense
|
||||
srcrdepends = splitrdepends
|
||||
srcrrecommends = splitrrecommends
|
||||
srcrsuggests = splitrsuggests
|
||||
srcrprovides = splitrprovides
|
||||
srcrreplaces = splitrreplaces
|
||||
srcrconflicts = splitrconflicts
|
||||
|
||||
srcrpreinst = splitrpreinst
|
||||
srcrpostinst = splitrpostinst
|
||||
srcrprerm = splitrprerm
|
||||
srcrpostrm = splitrpostrm
|
||||
|
||||
file_list = []
|
||||
walk_files(root, file_list, conffiles, dirfiles)
|
||||
if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
|
||||
bb.note("Not creating empty RPM package for %s" % splitname)
|
||||
else:
|
||||
spec_files_top.append('%files')
|
||||
if extra_pkgdata:
|
||||
package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
|
||||
spec_files_top.append('%defattr(-,-,-,-)')
|
||||
if file_list:
|
||||
bb.note("Creating RPM package for %s" % splitname)
|
||||
spec_files_top.extend(file_list)
|
||||
else:
|
||||
bb.note("Creating empty RPM package for %s" % splitname)
|
||||
spec_files_top.append('')
|
||||
continue
|
||||
|
||||
# Process subpackage data
|
||||
spec_preamble_bottom.append('%%package -n %s' % splitname)
|
||||
spec_preamble_bottom.append('Summary: %s' % splitsummary)
|
||||
if srcversion != splitversion:
|
||||
spec_preamble_bottom.append('Version: %s' % splitversion)
|
||||
if srcrelease != splitrelease:
|
||||
spec_preamble_bottom.append('Release: %s' % splitrelease)
|
||||
if srcepoch != splitepoch:
|
||||
spec_preamble_bottom.append('Epoch: %s' % splitepoch)
|
||||
spec_preamble_bottom.append('License: %s' % splitlicense)
|
||||
spec_preamble_bottom.append('Group: %s' % splitsection)
|
||||
|
||||
if srccustomtagschunk != splitcustomtagschunk:
|
||||
spec_preamble_bottom.append(splitcustomtagschunk)
|
||||
|
||||
# Replaces == Obsoletes && Provides
|
||||
robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes)
|
||||
rprovides = bb.utils.explode_dep_versions2(splitrprovides)
|
||||
rreplaces = bb.utils.explode_dep_versions2(splitrreplaces)
|
||||
for dep in rreplaces:
|
||||
if dep not in robsoletes:
|
||||
robsoletes[dep] = rreplaces[dep]
|
||||
if dep not in rprovides:
|
||||
rprovides[dep] = rreplaces[dep]
|
||||
splitrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
|
||||
splitrprovides = bb.utils.join_deps(rprovides, commasep=False)
|
||||
|
||||
print_deps(splitrdepends, "Requires", spec_preamble_bottom, d)
|
||||
if splitrpreinst:
|
||||
print_deps(splitrdepends, "Requires(pre)", spec_preamble_bottom, d)
|
||||
if splitrpostinst:
|
||||
print_deps(splitrdepends, "Requires(post)", spec_preamble_bottom, d)
|
||||
if splitrprerm:
|
||||
print_deps(splitrdepends, "Requires(preun)", spec_preamble_bottom, d)
|
||||
if splitrpostrm:
|
||||
print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
|
||||
|
||||
print_deps(splitrrecommends, "Recommends", spec_preamble_bottom, d)
|
||||
print_deps(splitrsuggests, "Suggests", spec_preamble_bottom, d)
|
||||
print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
|
||||
print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
|
||||
print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
|
||||
|
||||
spec_preamble_bottom.append('')
|
||||
|
||||
spec_preamble_bottom.append('%%description -n %s' % splitname)
|
||||
append_description(spec_preamble_bottom, splitdescription)
|
||||
|
||||
spec_preamble_bottom.append('')
|
||||
|
||||
# Now process scriptlets
|
||||
if splitrpreinst:
|
||||
spec_scriptlets_bottom.append('%%pre -n %s' % splitname)
|
||||
spec_scriptlets_bottom.append('# %s - preinst' % splitname)
|
||||
spec_scriptlets_bottom.append(splitrpreinst)
|
||||
spec_scriptlets_bottom.append('')
|
||||
if splitrpostinst:
|
||||
spec_scriptlets_bottom.append('%%post -n %s' % splitname)
|
||||
spec_scriptlets_bottom.append('# %s - postinst' % splitname)
|
||||
spec_scriptlets_bottom.append(splitrpostinst)
|
||||
spec_scriptlets_bottom.append('')
|
||||
if splitrprerm:
|
||||
spec_scriptlets_bottom.append('%%preun -n %s' % splitname)
|
||||
spec_scriptlets_bottom.append('# %s - prerm' % splitname)
|
||||
scriptvar = wrap_uninstall(splitrprerm)
|
||||
spec_scriptlets_bottom.append(scriptvar)
|
||||
spec_scriptlets_bottom.append('')
|
||||
if splitrpostrm:
|
||||
spec_scriptlets_bottom.append('%%postun -n %s' % splitname)
|
||||
spec_scriptlets_bottom.append('# %s - postrm' % splitname)
|
||||
scriptvar = wrap_uninstall(splitrpostrm)
|
||||
spec_scriptlets_bottom.append(scriptvar)
|
||||
spec_scriptlets_bottom.append('')
|
||||
|
||||
# Now process files
|
||||
file_list = []
|
||||
walk_files(root, file_list, conffiles, dirfiles)
|
||||
if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
|
||||
bb.note("Not creating empty RPM package for %s" % splitname)
|
||||
else:
|
||||
spec_files_bottom.append('%%files -n %s' % splitname)
|
||||
if extra_pkgdata:
|
||||
package_rpm_extra_pkgdata(splitname, spec_files_bottom, localdata)
|
||||
spec_files_bottom.append('%defattr(-,-,-,-)')
|
||||
if file_list:
|
||||
bb.note("Creating RPM package for %s" % splitname)
|
||||
spec_files_bottom.extend(file_list)
|
||||
else:
|
||||
bb.note("Creating empty RPM package for %s" % splitname)
|
||||
spec_files_bottom.append('')
|
||||
|
||||
del localdata
|
||||
|
||||
add_prep(d, spec_files_bottom)
|
||||
spec_preamble_top.append('Summary: %s' % srcsummary)
|
||||
spec_preamble_top.append('Name: %s' % srcname)
|
||||
spec_preamble_top.append('Version: %s' % srcversion)
|
||||
spec_preamble_top.append('Release: %s' % srcrelease)
|
||||
if srcepoch and srcepoch.strip() != "":
|
||||
spec_preamble_top.append('Epoch: %s' % srcepoch)
|
||||
spec_preamble_top.append('License: %s' % srclicense)
|
||||
spec_preamble_top.append('Group: %s' % srcsection)
|
||||
spec_preamble_top.append('Packager: %s' % srcmaintainer)
|
||||
if srchomepage:
|
||||
spec_preamble_top.append('URL: %s' % srchomepage)
|
||||
if srccustomtagschunk:
|
||||
spec_preamble_top.append(srccustomtagschunk)
|
||||
tail_source(d)
|
||||
|
||||
# Replaces == Obsoletes && Provides
|
||||
robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes)
|
||||
rprovides = bb.utils.explode_dep_versions2(srcrprovides)
|
||||
rreplaces = bb.utils.explode_dep_versions2(srcrreplaces)
|
||||
for dep in rreplaces:
|
||||
if dep not in robsoletes:
|
||||
robsoletes[dep] = rreplaces[dep]
|
||||
if dep not in rprovides:
|
||||
rprovides[dep] = rreplaces[dep]
|
||||
srcrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
|
||||
srcrprovides = bb.utils.join_deps(rprovides, commasep=False)
|
||||
|
||||
print_deps(srcdepends, "BuildRequires", spec_preamble_top, d)
|
||||
print_deps(srcrdepends, "Requires", spec_preamble_top, d)
|
||||
if srcrpreinst:
|
||||
print_deps(srcrdepends, "Requires(pre)", spec_preamble_top, d)
|
||||
if srcrpostinst:
|
||||
print_deps(srcrdepends, "Requires(post)", spec_preamble_top, d)
|
||||
if srcrprerm:
|
||||
print_deps(srcrdepends, "Requires(preun)", spec_preamble_top, d)
|
||||
if srcrpostrm:
|
||||
print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
|
||||
|
||||
print_deps(srcrrecommends, "Recommends", spec_preamble_top, d)
|
||||
print_deps(srcrsuggests, "Suggests", spec_preamble_top, d)
|
||||
print_deps(srcrprovides, "Provides", spec_preamble_top, d)
|
||||
print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
|
||||
print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
|
||||
|
||||
spec_preamble_top.append('')
|
||||
|
||||
spec_preamble_top.append('%description')
|
||||
append_description(spec_preamble_top, srcdescription)
|
||||
|
||||
spec_preamble_top.append('')
|
||||
|
||||
if srcrpreinst:
|
||||
spec_scriptlets_top.append('%pre')
|
||||
spec_scriptlets_top.append('# %s - preinst' % srcname)
|
||||
spec_scriptlets_top.append(srcrpreinst)
|
||||
spec_scriptlets_top.append('')
|
||||
if srcrpostinst:
|
||||
spec_scriptlets_top.append('%post')
|
||||
spec_scriptlets_top.append('# %s - postinst' % srcname)
|
||||
spec_scriptlets_top.append(srcrpostinst)
|
||||
spec_scriptlets_top.append('')
|
||||
if srcrprerm:
|
||||
spec_scriptlets_top.append('%preun')
|
||||
spec_scriptlets_top.append('# %s - prerm' % srcname)
|
||||
scriptvar = wrap_uninstall(srcrprerm)
|
||||
spec_scriptlets_top.append(scriptvar)
|
||||
spec_scriptlets_top.append('')
|
||||
if srcrpostrm:
|
||||
spec_scriptlets_top.append('%postun')
|
||||
spec_scriptlets_top.append('# %s - postrm' % srcname)
|
||||
scriptvar = wrap_uninstall(srcrpostrm)
|
||||
spec_scriptlets_top.append(scriptvar)
|
||||
spec_scriptlets_top.append('')
|
||||
|
||||
# Write the SPEC file
|
||||
specfile = open(outspecfile, 'w')
|
||||
|
||||
# RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
|
||||
# of the generated spec file
|
||||
external_preamble = d.getVar("RPMSPEC_PREAMBLE")
|
||||
if external_preamble:
|
||||
specfile.write(external_preamble + "\n")
|
||||
|
||||
for line in spec_preamble_top:
|
||||
specfile.write(line + "\n")
|
||||
|
||||
for line in spec_preamble_bottom:
|
||||
specfile.write(line + "\n")
|
||||
|
||||
for line in spec_scriptlets_top:
|
||||
specfile.write(line + "\n")
|
||||
|
||||
for line in spec_scriptlets_bottom:
|
||||
specfile.write(line + "\n")
|
||||
|
||||
for line in spec_files_top:
|
||||
specfile.write(line + "\n")
|
||||
|
||||
for line in spec_files_bottom:
|
||||
specfile.write(line + "\n")
|
||||
|
||||
specfile.close()
|
||||
}
|
||||
# Otherwise allarch packages may change depending on override configuration
|
||||
write_specfile[vardepsexclude] = "OVERRIDES"
|
||||
|
||||
# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
|
||||
RPMEXTRAVARS = "PACKAGE_ADD_METADATA_RPM"
|
||||
write_specfile[vardeps] += "${@gen_packagevar(d, 'RPMEXTRAVARS')}"
|
||||
|
||||
python do_package_rpm () {
|
||||
workdir = d.getVar('WORKDIR')
|
||||
tmpdir = d.getVar('TMPDIR')
|
||||
pkgd = d.getVar('PKGD')
|
||||
if not workdir or not pkgd or not tmpdir:
|
||||
bb.error("Variables incorrectly set, unable to package")
|
||||
return
|
||||
|
||||
packages = d.getVar('PACKAGES')
|
||||
if not packages or packages == '':
|
||||
bb.debug(1, "No packages; nothing to do")
|
||||
return
|
||||
|
||||
# Construct the spec file...
|
||||
# If the spec file already exist, and has not been stored into
|
||||
# pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
|
||||
# so remove it before doing rpmbuild src.rpm.
|
||||
srcname = d.getVar('PN')
|
||||
outspecfile = workdir + "/" + srcname + ".spec"
|
||||
if os.path.isfile(outspecfile):
|
||||
os.remove(outspecfile)
|
||||
d.setVar('OUTSPECFILE', outspecfile)
|
||||
bb.build.exec_func('write_specfile', d)
|
||||
|
||||
perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
|
||||
if perfiledeps:
|
||||
outdepends, outprovides = write_rpm_perfiledata(srcname, d)
|
||||
|
||||
# Setup the rpmbuild arguments...
|
||||
rpmbuild = d.getVar('RPMBUILD')
|
||||
rpmbuild_compmode = d.getVar('RPMBUILD_COMPMODE')
|
||||
rpmbuild_extra_params = d.getVar('RPMBUILD_EXTRA_PARAMS') or ""
|
||||
|
||||
# Too many places in dnf stack assume that arch-independent packages are "noarch".
|
||||
# Let's not fight against this.
|
||||
package_arch = (d.getVar('PACKAGE_ARCH') or "").replace("-", "_")
|
||||
if package_arch == "all":
|
||||
package_arch = "noarch"
|
||||
|
||||
d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
|
||||
pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
|
||||
d.setVar('RPM_PKGWRITEDIR', pkgwritedir)
|
||||
bb.debug(1, 'PKGWRITEDIR: %s' % d.getVar('RPM_PKGWRITEDIR'))
|
||||
pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-linux')
|
||||
bb.utils.mkdirhier(pkgwritedir)
|
||||
os.chmod(pkgwritedir, 0o755)
|
||||
|
||||
cmd = rpmbuild
|
||||
cmd = cmd + " --noclean --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
|
||||
cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
|
||||
cmd = cmd + " --define '_builddir " + d.getVar('B') + "'"
|
||||
cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
|
||||
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
|
||||
cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
|
||||
cmd = cmd + " --define '_build_id_links none'"
|
||||
cmd = cmd + " --define '_smp_ncpus_max 4'"
|
||||
cmd = cmd + " --define '_source_payload %s'" % rpmbuild_compmode
|
||||
cmd = cmd + " --define '_binary_payload %s'" % rpmbuild_compmode
|
||||
cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
|
||||
cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
|
||||
cmd = cmd + " --define '_buildhost reproducible'"
|
||||
cmd = cmd + " --define '__font_provides %{nil}'"
|
||||
if perfiledeps:
|
||||
cmd = cmd + " --define '__find_requires " + outdepends + "'"
|
||||
cmd = cmd + " --define '__find_provides " + outprovides + "'"
|
||||
else:
|
||||
cmd = cmd + " --define '__find_requires %{nil}'"
|
||||
cmd = cmd + " --define '__find_provides %{nil}'"
|
||||
cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
|
||||
cmd = cmd + " --define 'debug_package %{nil}'"
|
||||
cmd = cmd + " --define '_tmppath " + workdir + "'"
|
||||
cmd = cmd + " --define '_use_weak_usergroup_deps 1'"
|
||||
cmd = cmd + " --define '_passwd_path " + "/completely/bogus/path" + "'"
|
||||
cmd = cmd + " --define '_group_path " + "/completely/bogus/path" + "'"
|
||||
cmd = cmd + rpmbuild_extra_params
|
||||
if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
|
||||
cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR') + "'"
|
||||
cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_RPMOUTDIR') + "'"
|
||||
cmdsrpm = cmdsrpm + " -bs " + outspecfile
|
||||
# Build the .src.rpm
|
||||
d.setVar('SBUILDSPEC', cmdsrpm + "\n")
|
||||
d.setVarFlag('SBUILDSPEC', 'func', '1')
|
||||
bb.build.exec_func('SBUILDSPEC', d)
|
||||
cmd = cmd + " -bb " + outspecfile
|
||||
|
||||
# rpm 4 creates various empty directories in _topdir, let's clean them up
|
||||
cleanupcmd = "rm -rf %s/BUILDROOT %s/SOURCES %s/SPECS %s/SRPMS" % (workdir, workdir, workdir, workdir)
|
||||
|
||||
# Build the rpm package!
|
||||
d.setVar('BUILDSPEC', cmd + "\n" + cleanupcmd + "\n")
|
||||
d.setVarFlag('BUILDSPEC', 'func', '1')
|
||||
bb.build.exec_func('BUILDSPEC', d)
|
||||
|
||||
if d.getVar('RPM_SIGN_PACKAGES') == '1':
|
||||
bb.build.exec_func("sign_rpm", d)
|
||||
}
|
||||
|
||||
python () {
|
||||
if d.getVar('PACKAGES') != '':
|
||||
deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
|
||||
d.appendVarFlag('do_package_write_rpm', 'depends', deps)
|
||||
d.setVarFlag('do_package_write_rpm', 'fakeroot', '1')
|
||||
|
||||
# Needed to ensure PKG_xxx renaming of dependency packages works
|
||||
d.setVarFlag('do_package_write_rpm', 'deptask', "do_packagedata")
|
||||
d.setVarFlag('do_package_write_rpm', 'rdeptask', "do_packagedata")
|
||||
}
|
||||
|
||||
SSTATETASKS += "do_package_write_rpm"
|
||||
do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}"
|
||||
do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}"
|
||||
# Take a shared lock, we can write multiple packages at the same time...
|
||||
# but we need to stop the rootfs/solver from running while we do...
|
||||
do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock"
|
||||
|
||||
python do_package_write_rpm_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_package_write_rpm_setscene
|
||||
|
||||
python do_package_write_rpm () {
|
||||
bb.build.exec_func("read_subpackage_metadata", d)
|
||||
bb.build.exec_func("do_package_rpm", d)
|
||||
}
|
||||
|
||||
do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
|
||||
do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
|
||||
do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
|
||||
addtask package_write_rpm after do_packagedata do_package do_deploy_source_date_epoch before do_build
|
||||
do_build[rdeptask] += "do_package_write_rpm"
|
||||
|
||||
PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
|
||||
PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
|
||||
40
sources/poky/meta/classes-global/packagedata.bbclass
Normal file
40
sources/poky/meta/classes-global/packagedata.bbclass
Normal file
@@ -0,0 +1,40 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
python read_subpackage_metadata () {
|
||||
import oe.packagedata
|
||||
|
||||
vars = {
|
||||
"PN" : d.getVar('PN'),
|
||||
"PE" : d.getVar('PE'),
|
||||
"PV" : d.getVar('PV'),
|
||||
"PR" : d.getVar('PR'),
|
||||
}
|
||||
|
||||
data = oe.packagedata.read_pkgdata(vars["PN"], d)
|
||||
|
||||
for key in data.keys():
|
||||
d.setVar(key, data[key])
|
||||
|
||||
for pkg in d.getVar('PACKAGES').split():
|
||||
sdata = oe.packagedata.read_subpkgdata(pkg, d)
|
||||
for key in sdata.keys():
|
||||
if key in vars:
|
||||
if sdata[key] != vars[key]:
|
||||
if key == "PN":
|
||||
bb.fatal("Recipe %s is trying to create package %s which was already written by recipe %s. This will cause corruption, please resolve this and only provide the package from one recipe or the other or only build one of the recipes." % (vars[key], pkg, sdata[key]))
|
||||
bb.fatal("Recipe %s is trying to change %s from '%s' to '%s'. This will cause do_package_write_* failures since the incorrect data will be used and they will be unable to find the right workdir." % (vars["PN"], key, vars[key], sdata[key]))
|
||||
continue
|
||||
#
|
||||
# If we set unsuffixed variables here there is a chance they could clobber override versions
|
||||
# of that variable, e.g. DESCRIPTION could clobber DESCRIPTION:<pkgname>
|
||||
# We therefore don't clobber for the unsuffixed variable versions
|
||||
#
|
||||
if key.endswith(":" + pkg):
|
||||
d.setVar(key, sdata[key])
|
||||
else:
|
||||
d.setVar(key, sdata[key], parsing=True)
|
||||
}
|
||||
169
sources/poky/meta/classes-global/patch.bbclass
Normal file
169
sources/poky/meta/classes-global/patch.bbclass
Normal file
@@ -0,0 +1,169 @@
|
||||
# Copyright (C) 2006 OpenedHand LTD
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# Point to an empty file so any user's custom settings don't break things
|
||||
QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
|
||||
|
||||
PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
|
||||
|
||||
# There is a bug in patch 2.7.3 and earlier where index lines
|
||||
# in patches can change file modes when they shouldn't:
|
||||
# http://git.savannah.gnu.org/cgit/patch.git/patch/?id=82b800c9552a088a241457948219d25ce0a407a4
|
||||
# This leaks into debug sources in particular. Add the dependency
|
||||
# to target recipes to avoid this problem until we can rely on 2.7.4 or later.
|
||||
PATCHDEPENDENCY:append:class-target = " patch-replacement-native:do_populate_sysroot"
|
||||
|
||||
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
|
||||
PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
|
||||
|
||||
inherit terminal
|
||||
|
||||
python () {
|
||||
if d.getVar('PATCHTOOL') == 'git' and d.getVar('PATCH_COMMIT_FUNCTIONS') == '1':
|
||||
extratasks = bb.build.tasksbetween('do_unpack', 'do_patch', d)
|
||||
try:
|
||||
extratasks.remove('do_unpack')
|
||||
except ValueError:
|
||||
# For some recipes do_unpack doesn't exist, ignore it
|
||||
pass
|
||||
|
||||
d.appendVarFlag('do_patch', 'prefuncs', ' patch_task_patch_prefunc')
|
||||
for task in extratasks:
|
||||
d.appendVarFlag(task, 'postfuncs', ' patch_task_postfunc')
|
||||
}
|
||||
|
||||
python patch_task_patch_prefunc() {
|
||||
# Prefunc for do_patch
|
||||
srcsubdir = d.getVar('S')
|
||||
|
||||
workdir = os.path.abspath(d.getVar('WORKDIR'))
|
||||
testsrcdir = os.path.abspath(srcsubdir)
|
||||
if (testsrcdir + os.sep).startswith(workdir + os.sep):
|
||||
# Double-check that either workdir or S or some directory in-between is a git repository
|
||||
found = False
|
||||
while testsrcdir != workdir:
|
||||
if os.path.exists(os.path.join(testsrcdir, '.git')):
|
||||
found = True
|
||||
break
|
||||
if testsrcdir == workdir:
|
||||
break
|
||||
testsrcdir = os.path.dirname(testsrcdir)
|
||||
if not found:
|
||||
bb.fatal('PATCHTOOL = "git" set for source tree that is not a git repository. Refusing to continue as that may result in commits being made in your metadata repository.')
|
||||
|
||||
patchdir = os.path.join(srcsubdir, 'patches')
|
||||
if os.path.exists(patchdir):
|
||||
if os.listdir(patchdir):
|
||||
d.setVar('PATCH_HAS_PATCHES_DIR', '1')
|
||||
else:
|
||||
os.rmdir(patchdir)
|
||||
}
|
||||
|
||||
python patch_task_postfunc() {
|
||||
# Prefunc for task functions between do_unpack and do_patch
|
||||
import oe.patch
|
||||
import shutil
|
||||
func = d.getVar('BB_RUNTASK')
|
||||
srcsubdir = d.getVar('S')
|
||||
|
||||
if os.path.exists(srcsubdir):
|
||||
if func == 'do_patch':
|
||||
haspatches = (d.getVar('PATCH_HAS_PATCHES_DIR') == '1')
|
||||
patchdir = os.path.join(srcsubdir, 'patches')
|
||||
if os.path.exists(patchdir):
|
||||
shutil.rmtree(patchdir)
|
||||
if haspatches:
|
||||
stdout, _ = bb.process.run('git status --porcelain patches', cwd=srcsubdir)
|
||||
if stdout:
|
||||
bb.process.run('git checkout patches', cwd=srcsubdir)
|
||||
stdout, _ = bb.process.run('git status --porcelain .', cwd=srcsubdir)
|
||||
if stdout:
|
||||
oe.patch.GitApplyTree.commitIgnored("Add changes from %s" % func, dir=srcsubdir, files=['.'], d=d)
|
||||
}
|
||||
|
||||
def src_patches(d, all=False, expand=True):
|
||||
import oe.patch
|
||||
return oe.patch.src_patches(d, all, expand)
|
||||
|
||||
def should_apply(parm, d):
|
||||
"""Determine if we should apply the given patch"""
|
||||
import oe.patch
|
||||
return oe.patch.should_apply(parm, d)
|
||||
|
||||
should_apply[vardepsexclude] = "DATE SRCDATE"
|
||||
|
||||
python patch_do_patch() {
|
||||
import oe.patch
|
||||
|
||||
patchsetmap = {
|
||||
"patch": oe.patch.PatchTree,
|
||||
"quilt": oe.patch.QuiltTree,
|
||||
"git": oe.patch.GitApplyTree,
|
||||
}
|
||||
|
||||
cls = patchsetmap[d.getVar('PATCHTOOL') or 'quilt']
|
||||
|
||||
resolvermap = {
|
||||
"noop": oe.patch.NOOPResolver,
|
||||
"user": oe.patch.UserResolver,
|
||||
}
|
||||
|
||||
rcls = resolvermap[d.getVar('PATCHRESOLVE') or 'user']
|
||||
|
||||
classes = {}
|
||||
|
||||
s = d.getVar('S')
|
||||
|
||||
os.putenv('PATH', d.getVar('PATH'))
|
||||
|
||||
# We must use one TMPDIR per process so that the "patch" processes
|
||||
# don't generate the same temp file name.
|
||||
|
||||
import tempfile
|
||||
process_tmpdir = tempfile.mkdtemp()
|
||||
os.environ['TMPDIR'] = process_tmpdir
|
||||
|
||||
for patch in src_patches(d):
|
||||
_, _, local, _, _, parm = bb.fetch.decodeurl(patch)
|
||||
|
||||
if "patchdir" in parm:
|
||||
patchdir = parm["patchdir"]
|
||||
if not os.path.isabs(patchdir):
|
||||
patchdir = os.path.join(s, patchdir)
|
||||
if not os.path.isdir(patchdir):
|
||||
bb.fatal("Target directory '%s' not found, patchdir '%s' is incorrect in patch file '%s'" %
|
||||
(patchdir, parm["patchdir"], parm['patchname']))
|
||||
else:
|
||||
patchdir = s
|
||||
|
||||
if not patchdir in classes:
|
||||
patchset = cls(patchdir, d)
|
||||
resolver = rcls(patchset, oe_terminal)
|
||||
classes[patchdir] = (patchset, resolver)
|
||||
patchset.Clean()
|
||||
else:
|
||||
patchset, resolver = classes[patchdir]
|
||||
|
||||
bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
|
||||
try:
|
||||
patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
|
||||
except Exception as exc:
|
||||
bb.utils.remove(process_tmpdir, True)
|
||||
bb.fatal("Importing patch '%s' with striplevel '%s'\n%s" % (parm['patchname'], parm['striplevel'], repr(exc).replace("\\n", "\n")))
|
||||
try:
|
||||
resolver.Resolve()
|
||||
except bb.BBHandledException as e:
|
||||
bb.utils.remove(process_tmpdir, True)
|
||||
bb.fatal("Applying patch '%s' on target directory '%s'\n%s" % (parm['patchname'], patchdir, repr(e).replace("\\n", "\n")))
|
||||
|
||||
bb.utils.remove(process_tmpdir, True)
|
||||
del os.environ['TMPDIR']
|
||||
}
|
||||
patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
|
||||
|
||||
addtask patch after do_unpack
|
||||
do_patch[dirs] = "${WORKDIR}"
|
||||
do_patch[depends] = "${PATCHDEPENDENCY}"
|
||||
|
||||
EXPORT_FUNCTIONS do_patch
|
||||
1062
sources/poky/meta/classes-global/sanity.bbclass
Normal file
1062
sources/poky/meta/classes-global/sanity.bbclass
Normal file
File diff suppressed because it is too large
Load Diff
1377
sources/poky/meta/classes-global/sstate.bbclass
Normal file
1377
sources/poky/meta/classes-global/sstate.bbclass
Normal file
File diff suppressed because it is too large
Load Diff
695
sources/poky/meta/classes-global/staging.bbclass
Normal file
695
sources/poky/meta/classes-global/staging.bbclass
Normal file
@@ -0,0 +1,695 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# These directories will be staged in the sysroot
|
||||
SYSROOT_DIRS = " \
|
||||
${includedir} \
|
||||
${libdir} \
|
||||
${base_libdir} \
|
||||
${nonarch_base_libdir} \
|
||||
${datadir} \
|
||||
/sysroot-only \
|
||||
"
|
||||
|
||||
# These directories are also staged in the sysroot when they contain files that
|
||||
# are usable on the build system
|
||||
SYSROOT_DIRS_NATIVE = " \
|
||||
${bindir} \
|
||||
${sbindir} \
|
||||
${base_bindir} \
|
||||
${base_sbindir} \
|
||||
${libexecdir} \
|
||||
${sysconfdir} \
|
||||
${localstatedir} \
|
||||
"
|
||||
SYSROOT_DIRS:append:class-native = " ${SYSROOT_DIRS_NATIVE}"
|
||||
SYSROOT_DIRS:append:class-cross = " ${SYSROOT_DIRS_NATIVE}"
|
||||
SYSROOT_DIRS:append:class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
|
||||
|
||||
# These directories will not be staged in the sysroot
|
||||
SYSROOT_DIRS_IGNORE = " \
|
||||
${mandir} \
|
||||
${docdir} \
|
||||
${infodir} \
|
||||
${datadir}/X11/locale \
|
||||
${datadir}/applications \
|
||||
${datadir}/bash-completion \
|
||||
${datadir}/fonts \
|
||||
${datadir}/gtk-doc/html \
|
||||
${datadir}/installed-tests \
|
||||
${datadir}/locale \
|
||||
${datadir}/pixmaps \
|
||||
${datadir}/terminfo \
|
||||
${libdir}/${BPN}/ptest \
|
||||
"
|
||||
|
||||
sysroot_stage_dir() {
|
||||
src="$1"
|
||||
dest="$2"
|
||||
# if the src doesn't exist don't do anything
|
||||
if [ ! -d "$src" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
mkdir -p "$dest"
|
||||
rdest=$(realpath --relative-to="$src" "$dest")
|
||||
(
|
||||
cd $src
|
||||
find . -print0 | cpio --null -pdlu $rdest
|
||||
)
|
||||
}
|
||||
|
||||
sysroot_stage_dirs() {
|
||||
from="$1"
|
||||
to="$2"
|
||||
|
||||
for dir in ${SYSROOT_DIRS}; do
|
||||
sysroot_stage_dir "$from$dir" "$to$dir"
|
||||
done
|
||||
|
||||
# Remove directories we do not care about
|
||||
for dir in ${SYSROOT_DIRS_IGNORE}; do
|
||||
rm -rf "$to$dir"
|
||||
done
|
||||
}
|
||||
|
||||
sysroot_stage_all() {
|
||||
sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
|
||||
}
|
||||
|
||||
python sysroot_strip () {
|
||||
inhibit_sysroot = d.getVar('INHIBIT_SYSROOT_STRIP')
|
||||
if inhibit_sysroot and oe.types.boolean(inhibit_sysroot):
|
||||
return
|
||||
|
||||
dstdir = d.getVar('SYSROOT_DESTDIR')
|
||||
pn = d.getVar('PN')
|
||||
libdir = d.getVar("libdir")
|
||||
base_libdir = d.getVar("base_libdir")
|
||||
qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split()
|
||||
strip_cmd = d.getVar("STRIP")
|
||||
|
||||
max_process = oe.utils.get_bb_number_threads(d)
|
||||
oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, max_process,
|
||||
qa_already_stripped=qa_already_stripped)
|
||||
}
|
||||
|
||||
do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
|
||||
|
||||
addtask populate_sysroot after do_install
|
||||
|
||||
SYSROOT_PREPROCESS_FUNCS ?= ""
|
||||
SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir"
|
||||
|
||||
python do_populate_sysroot () {
|
||||
# SYSROOT 'version' 2
|
||||
bb.build.exec_func("sysroot_stage_all", d)
|
||||
bb.build.exec_func("sysroot_strip", d)
|
||||
for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split():
|
||||
bb.build.exec_func(f, d)
|
||||
pn = d.getVar("PN")
|
||||
multiprov = d.getVar("BB_MULTI_PROVIDER_ALLOWED").split()
|
||||
provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
|
||||
bb.utils.mkdirhier(provdir)
|
||||
for p in d.getVar("PROVIDES").split():
|
||||
if p in multiprov:
|
||||
continue
|
||||
p = p.replace("/", "_")
|
||||
with open(provdir + p, "w") as f:
|
||||
f.write(pn)
|
||||
}
|
||||
|
||||
do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
|
||||
do_populate_sysroot[vardepsexclude] += "BB_MULTI_PROVIDER_ALLOWED"
|
||||
|
||||
POPULATESYSROOTDEPS = ""
|
||||
POPULATESYSROOTDEPS:class-target = "virtual/${HOST_PREFIX}binutils:do_populate_sysroot"
|
||||
POPULATESYSROOTDEPS:class-nativesdk = "virtual/${HOST_PREFIX}binutils:do_populate_sysroot"
|
||||
do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}"
|
||||
|
||||
SSTATETASKS += "do_populate_sysroot"
|
||||
do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
|
||||
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
|
||||
do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
|
||||
do_populate_sysroot[sstate-fixmedir] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
|
||||
|
||||
python do_populate_sysroot_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_populate_sysroot_setscene
|
||||
|
||||
def staging_copyfile(c, target, dest, postinsts, seendirs):
|
||||
import errno
|
||||
|
||||
destdir = os.path.dirname(dest)
|
||||
if destdir not in seendirs:
|
||||
bb.utils.mkdirhier(destdir)
|
||||
seendirs.add(destdir)
|
||||
if "/usr/bin/postinst-" in c:
|
||||
postinsts.append(dest)
|
||||
if os.path.islink(c):
|
||||
linkto = os.readlink(c)
|
||||
if os.path.lexists(dest):
|
||||
if not os.path.islink(dest):
|
||||
raise OSError(errno.EEXIST, "Link %s already exists as a file" % dest, dest)
|
||||
if os.readlink(dest) == linkto:
|
||||
return dest
|
||||
raise OSError(errno.EEXIST, "Link %s already exists to a different location? (%s vs %s)" % (dest, os.readlink(dest), linkto), dest)
|
||||
os.symlink(linkto, dest)
|
||||
#bb.warn(c)
|
||||
else:
|
||||
try:
|
||||
os.link(c, dest)
|
||||
except OSError as err:
|
||||
if err.errno == errno.EXDEV:
|
||||
bb.utils.copyfile(c, dest)
|
||||
else:
|
||||
raise
|
||||
return dest
|
||||
|
||||
def staging_copydir(c, target, dest, seendirs):
|
||||
if dest not in seendirs:
|
||||
bb.utils.mkdirhier(dest)
|
||||
seendirs.add(dest)
|
||||
|
||||
def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
|
||||
import subprocess
|
||||
|
||||
if not fixme:
|
||||
return
|
||||
cmd = "sed -e 's:^[^/]*/:%s/:g' %s | xargs sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (target, " ".join(fixme), recipesysroot, recipesysrootnative)
|
||||
for fixmevar in ['PSEUDO_SYSROOT', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
|
||||
fixme_path = d.getVar(fixmevar)
|
||||
cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
|
||||
bb.debug(2, cmd)
|
||||
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
|
||||
|
||||
|
||||
def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
|
||||
import glob
|
||||
import subprocess
|
||||
import errno
|
||||
|
||||
fixme = []
|
||||
postinsts = []
|
||||
seendirs = set()
|
||||
stagingdir = d.getVar("STAGING_DIR")
|
||||
if native:
|
||||
pkgarchs = ['${BUILD_ARCH}', '${BUILD_ARCH}_*']
|
||||
targetdir = nativesysroot
|
||||
else:
|
||||
pkgarchs = ['${MACHINE_ARCH}']
|
||||
pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
|
||||
pkgarchs.append('allarch')
|
||||
targetdir = targetsysroot
|
||||
|
||||
bb.utils.mkdirhier(targetdir)
|
||||
for pkgarch in pkgarchs:
|
||||
for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.populate_sysroot" % pkgarch)):
|
||||
if manifest.endswith("-initial.populate_sysroot"):
|
||||
# skip libgcc-initial due to file overlap
|
||||
continue
|
||||
if not native and (manifest.endswith("-native.populate_sysroot") or "nativesdk-" in manifest):
|
||||
continue
|
||||
if native and not (manifest.endswith("-native.populate_sysroot") or manifest.endswith("-cross.populate_sysroot") or "-cross-" in manifest):
|
||||
continue
|
||||
tmanifest = targetdir + "/" + os.path.basename(manifest)
|
||||
if os.path.exists(tmanifest):
|
||||
continue
|
||||
try:
|
||||
os.link(manifest, tmanifest)
|
||||
except OSError as err:
|
||||
if err.errno == errno.EXDEV:
|
||||
bb.utils.copyfile(manifest, tmanifest)
|
||||
else:
|
||||
raise
|
||||
with open(manifest, "r") as f:
|
||||
for l in f:
|
||||
l = l.strip()
|
||||
if l.endswith("/fixmepath"):
|
||||
fixme.append(l)
|
||||
continue
|
||||
if l.endswith("/fixmepath.cmd"):
|
||||
continue
|
||||
dest = l.replace(stagingdir, "")
|
||||
dest = targetdir + "/" + "/".join(dest.split("/")[3:])
|
||||
if l.endswith("/"):
|
||||
staging_copydir(l, targetdir, dest, seendirs)
|
||||
continue
|
||||
try:
|
||||
staging_copyfile(l, targetdir, dest, postinsts, seendirs)
|
||||
except FileExistsError:
|
||||
continue
|
||||
|
||||
staging_processfixme(fixme, targetdir, targetsysroot, nativesysroot, d)
|
||||
for p in sorted(postinsts):
|
||||
bb.note("Running postinst {}, output:\n{}".format(p, subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)))
|
||||
|
||||
#
|
||||
# Manifests here are complicated. The main sysroot area has the unpacked sstate
|
||||
# which us unrelocated and tracked by the main sstate manifests. Each recipe
|
||||
# specific sysroot has manifests for each dependency that is installed there.
|
||||
# The task hash is used to tell whether the data needs to be reinstalled. We
|
||||
# use a symlink to point to the currently installed hash. There is also a
|
||||
# "complete" stamp file which is used to mark if installation completed. If
|
||||
# something fails (e.g. a postinst), this won't get written and we would
|
||||
# remove and reinstall the dependency. This also means partially installed
|
||||
# dependencies should get cleaned up correctly.
|
||||
#
|
||||
|
||||
python extend_recipe_sysroot() {
|
||||
import copy
|
||||
import subprocess
|
||||
import errno
|
||||
import collections
|
||||
import glob
|
||||
|
||||
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
|
||||
mytaskname = d.getVar("BB_RUNTASK")
|
||||
if mytaskname.endswith("_setscene"):
|
||||
mytaskname = mytaskname.replace("_setscene", "")
|
||||
workdir = d.getVar("WORKDIR")
|
||||
#bb.warn(str(taskdepdata))
|
||||
pn = d.getVar("PN")
|
||||
stagingdir = d.getVar("STAGING_DIR")
|
||||
sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
|
||||
# only needed by multilib cross-canadian since it redefines RECIPE_SYSROOT
|
||||
manifestprefix = d.getVar("RECIPE_SYSROOT_MANIFEST_SUBDIR")
|
||||
if manifestprefix:
|
||||
sharedmanifests = sharedmanifests + "/" + manifestprefix
|
||||
recipesysroot = d.getVar("RECIPE_SYSROOT")
|
||||
recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
|
||||
|
||||
# Detect bitbake -b usage
|
||||
nodeps = d.getVar("BB_LIMITEDDEPS") or False
|
||||
if nodeps:
|
||||
lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
|
||||
staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, True, d)
|
||||
staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, False, d)
|
||||
bb.utils.unlockfile(lock)
|
||||
return
|
||||
|
||||
start = None
|
||||
configuredeps = []
|
||||
owntaskdeps = []
|
||||
for dep in taskdepdata:
|
||||
data = taskdepdata[dep]
|
||||
if data[1] == mytaskname and data[0] == pn:
|
||||
start = dep
|
||||
elif data[0] == pn:
|
||||
owntaskdeps.append(data[1])
|
||||
if start is None:
|
||||
bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
|
||||
|
||||
# We need to figure out which sysroot files we need to expose to this task.
|
||||
# This needs to match what would get restored from sstate, which is controlled
|
||||
# ultimately by calls from bitbake to setscene_depvalid().
|
||||
# That function expects a setscene dependency tree. We build a dependency tree
|
||||
# condensed to inter-sstate task dependencies, similar to that used by setscene
|
||||
# tasks. We can then call into setscene_depvalid() and decide
|
||||
# which dependencies we can "see" and should expose in the recipe specific sysroot.
|
||||
setscenedeps = copy.deepcopy(taskdepdata)
|
||||
|
||||
start = set([start])
|
||||
|
||||
sstatetasks = d.getVar("SSTATETASKS").split()
|
||||
# Add recipe specific tasks referenced by setscene_depvalid()
|
||||
sstatetasks.append("do_stash_locale")
|
||||
sstatetasks.append("do_deploy")
|
||||
|
||||
def print_dep_tree(deptree):
|
||||
data = ""
|
||||
for dep in deptree:
|
||||
deps = " " + "\n ".join(deptree[dep][3]) + "\n"
|
||||
data = data + "%s:\n %s\n %s\n%s %s\n %s\n" % (deptree[dep][0], deptree[dep][1], deptree[dep][2], deps, deptree[dep][4], deptree[dep][5])
|
||||
return data
|
||||
|
||||
#bb.note("Full dep tree is:\n%s" % print_dep_tree(taskdepdata))
|
||||
|
||||
#bb.note(" start2 is %s" % str(start))
|
||||
|
||||
# If start is an sstate task (like do_package) we need to add in its direct dependencies
|
||||
# else the code below won't recurse into them.
|
||||
for dep in set(start):
|
||||
for dep2 in setscenedeps[dep][3]:
|
||||
start.add(dep2)
|
||||
start.remove(dep)
|
||||
|
||||
#bb.note(" start3 is %s" % str(start))
|
||||
|
||||
# Create collapsed do_populate_sysroot -> do_populate_sysroot tree
|
||||
for dep in taskdepdata:
|
||||
data = setscenedeps[dep]
|
||||
if data[1] not in sstatetasks:
|
||||
for dep2 in setscenedeps:
|
||||
data2 = setscenedeps[dep2]
|
||||
if dep in data2[3]:
|
||||
data2[3].update(setscenedeps[dep][3])
|
||||
data2[3].remove(dep)
|
||||
if dep in start:
|
||||
start.update(setscenedeps[dep][3])
|
||||
start.remove(dep)
|
||||
del setscenedeps[dep]
|
||||
|
||||
# Remove circular references
|
||||
for dep in setscenedeps:
|
||||
if dep in setscenedeps[dep][3]:
|
||||
setscenedeps[dep][3].remove(dep)
|
||||
|
||||
#bb.note("Computed dep tree is:\n%s" % print_dep_tree(setscenedeps))
|
||||
#bb.note(" start is %s" % str(start))
|
||||
|
||||
# Direct dependencies should be present and can be depended upon
|
||||
for dep in sorted(set(start)):
|
||||
if setscenedeps[dep][1] == "do_populate_sysroot":
|
||||
if dep not in configuredeps:
|
||||
configuredeps.append(dep)
|
||||
bb.note("Direct dependencies are %s" % str(configuredeps))
|
||||
#bb.note(" or %s" % str(start))
|
||||
|
||||
msgbuf = []
|
||||
# Call into setscene_depvalid for each sub-dependency and only copy sysroot files
|
||||
# for ones that would be restored from sstate.
|
||||
done = list(start)
|
||||
next = list(start)
|
||||
while next:
|
||||
new = []
|
||||
for dep in next:
|
||||
data = setscenedeps[dep]
|
||||
for datadep in data[3]:
|
||||
if datadep in done:
|
||||
continue
|
||||
taskdeps = {}
|
||||
taskdeps[dep] = setscenedeps[dep][:2]
|
||||
taskdeps[datadep] = setscenedeps[datadep][:2]
|
||||
retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
|
||||
if retval:
|
||||
msgbuf.append("Skipping setscene dependency %s for installation into the sysroot" % datadep)
|
||||
continue
|
||||
done.append(datadep)
|
||||
new.append(datadep)
|
||||
if datadep not in configuredeps and setscenedeps[datadep][1] == "do_populate_sysroot":
|
||||
configuredeps.append(datadep)
|
||||
msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
|
||||
else:
|
||||
msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
|
||||
next = new
|
||||
|
||||
# This logging is too verbose for day to day use sadly
|
||||
#bb.debug(2, "\n".join(msgbuf))
|
||||
|
||||
depdir = recipesysrootnative + "/installeddeps"
|
||||
bb.utils.mkdirhier(depdir)
|
||||
bb.utils.mkdirhier(sharedmanifests)
|
||||
|
||||
lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
|
||||
|
||||
fixme = {}
|
||||
seendirs = set()
|
||||
postinsts = []
|
||||
multilibs = {}
|
||||
manifests = {}
|
||||
# All files that we're going to be installing, to find conflicts.
|
||||
fileset = {}
|
||||
|
||||
invalidate_tasks = set()
|
||||
for f in os.listdir(depdir):
|
||||
removed = []
|
||||
if not f.endswith(".complete"):
|
||||
continue
|
||||
f = depdir + "/" + f
|
||||
if os.path.islink(f) and not os.path.exists(f):
|
||||
bb.note("%s no longer exists, removing from sysroot" % f)
|
||||
lnk = os.readlink(f.replace(".complete", ""))
|
||||
sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
|
||||
os.unlink(f)
|
||||
os.unlink(f.replace(".complete", ""))
|
||||
removed.append(os.path.basename(f.replace(".complete", "")))
|
||||
|
||||
# If we've removed files from the sysroot above, the task that installed them may still
|
||||
# have a stamp file present for the task. This is probably invalid right now but may become
|
||||
# valid again if the user were to change configuration back for example. Since we've removed
|
||||
# the files a task might need, remove the stamp file too to force it to rerun.
|
||||
# YOCTO #14790
|
||||
if removed:
|
||||
for i in glob.glob(depdir + "/index.*"):
|
||||
if i.endswith("." + mytaskname):
|
||||
continue
|
||||
with open(i, "r") as f:
|
||||
for l in f:
|
||||
if l.startswith("TaskDeps:"):
|
||||
continue
|
||||
l = l.strip()
|
||||
if l in removed:
|
||||
invalidate_tasks.add(i.rsplit(".", 1)[1])
|
||||
break
|
||||
for t in invalidate_tasks:
|
||||
bb.note("Invalidating stamps for task %s" % t)
|
||||
bb.build.clean_stamp(t, d)
|
||||
|
||||
installed = []
|
||||
for dep in configuredeps:
|
||||
c = setscenedeps[dep][0]
|
||||
if mytaskname in ["do_sdk_depends", "do_populate_sdk_ext"] and c.endswith("-initial"):
|
||||
bb.note("Skipping initial setscene dependency %s for installation into the sysroot" % c)
|
||||
continue
|
||||
installed.append(c)
|
||||
|
||||
# We want to remove anything which this task previously installed but is no longer a dependency
|
||||
taskindex = depdir + "/" + "index." + mytaskname
|
||||
if os.path.exists(taskindex):
|
||||
potential = []
|
||||
with open(taskindex, "r") as f:
|
||||
for l in f:
|
||||
l = l.strip()
|
||||
if l not in installed:
|
||||
fl = depdir + "/" + l
|
||||
if not os.path.exists(fl):
|
||||
# Was likely already uninstalled
|
||||
continue
|
||||
potential.append(l)
|
||||
# We need to ensure no other task needs this dependency. We hold the sysroot
|
||||
# lock so we ca search the indexes to check
|
||||
if potential:
|
||||
for i in glob.glob(depdir + "/index.*"):
|
||||
if i.endswith("." + mytaskname):
|
||||
continue
|
||||
with open(i, "r") as f:
|
||||
for l in f:
|
||||
if l.startswith("TaskDeps:"):
|
||||
prevtasks = l.split()[1:]
|
||||
if mytaskname in prevtasks:
|
||||
# We're a dependency of this task so we can clear items out the sysroot
|
||||
break
|
||||
l = l.strip()
|
||||
if l in potential:
|
||||
potential.remove(l)
|
||||
for l in potential:
|
||||
fl = depdir + "/" + l
|
||||
bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
|
||||
lnk = os.readlink(fl)
|
||||
sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
|
||||
os.unlink(fl)
|
||||
os.unlink(fl + ".complete")
|
||||
|
||||
msg_exists = []
|
||||
msg_adding = []
|
||||
|
||||
# Handle all removals first since files may move between recipes
|
||||
for dep in configuredeps:
|
||||
c = setscenedeps[dep][0]
|
||||
if c not in installed:
|
||||
continue
|
||||
taskhash = setscenedeps[dep][5]
|
||||
taskmanifest = depdir + "/" + c + "." + taskhash
|
||||
|
||||
if os.path.exists(depdir + "/" + c):
|
||||
lnk = os.readlink(depdir + "/" + c)
|
||||
if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
|
||||
continue
|
||||
else:
|
||||
bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
|
||||
sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
|
||||
os.unlink(depdir + "/" + c)
|
||||
if os.path.lexists(depdir + "/" + c + ".complete"):
|
||||
os.unlink(depdir + "/" + c + ".complete")
|
||||
elif os.path.lexists(depdir + "/" + c):
|
||||
os.unlink(depdir + "/" + c)
|
||||
|
||||
binfiles = {}
|
||||
# Now handle installs
|
||||
for dep in sorted(configuredeps):
|
||||
c = setscenedeps[dep][0]
|
||||
if c not in installed:
|
||||
continue
|
||||
taskhash = setscenedeps[dep][5]
|
||||
taskmanifest = depdir + "/" + c + "." + taskhash
|
||||
|
||||
if os.path.exists(depdir + "/" + c):
|
||||
lnk = os.readlink(depdir + "/" + c)
|
||||
if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
|
||||
msg_exists.append(c)
|
||||
continue
|
||||
|
||||
msg_adding.append(c)
|
||||
|
||||
os.symlink(c + "." + taskhash, depdir + "/" + c)
|
||||
|
||||
manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "populate_sysroot", d, multilibs)
|
||||
if d2 is not d:
|
||||
# If we don't do this, the recipe sysroot will be placed in the wrong WORKDIR for multilibs
|
||||
# We need a consistent WORKDIR for the image
|
||||
d2.setVar("WORKDIR", d.getVar("WORKDIR"))
|
||||
destsysroot = d2.getVar("RECIPE_SYSROOT")
|
||||
# We put allarch recipes into the default sysroot
|
||||
if manifest and "allarch" in manifest:
|
||||
destsysroot = d.getVar("RECIPE_SYSROOT")
|
||||
|
||||
native = False
|
||||
if c.endswith("-native") or "-cross-" in c or "-crosssdk" in c:
|
||||
native = True
|
||||
|
||||
if manifest:
|
||||
newmanifest = collections.OrderedDict()
|
||||
targetdir = destsysroot
|
||||
if native:
|
||||
targetdir = recipesysrootnative
|
||||
if targetdir not in fixme:
|
||||
fixme[targetdir] = []
|
||||
fm = fixme[targetdir]
|
||||
|
||||
with open(manifest, "r") as f:
|
||||
manifests[dep] = manifest
|
||||
for l in f:
|
||||
l = l.strip()
|
||||
if l.endswith("/fixmepath"):
|
||||
fm.append(l)
|
||||
continue
|
||||
if l.endswith("/fixmepath.cmd"):
|
||||
continue
|
||||
dest = l.replace(stagingdir, "")
|
||||
dest = "/" + "/".join(dest.split("/")[3:])
|
||||
newmanifest[l] = targetdir + dest
|
||||
|
||||
# Check if files have already been installed by another
|
||||
# recipe and abort if they have, explaining what recipes are
|
||||
# conflicting.
|
||||
hashname = targetdir + dest
|
||||
if not hashname.endswith("/"):
|
||||
if hashname in fileset:
|
||||
bb.fatal("The file %s is installed by both %s and %s, aborting" % (dest, c, fileset[hashname]))
|
||||
else:
|
||||
fileset[hashname] = c
|
||||
|
||||
# Having multiple identical manifests in each sysroot eats diskspace so
|
||||
# create a shared pool of them and hardlink if we can.
|
||||
# We create the manifest in advance so that if something fails during installation,
|
||||
# or the build is interrupted, subsequent exeuction can cleanup.
|
||||
sharedm = sharedmanifests + "/" + os.path.basename(taskmanifest)
|
||||
if not os.path.exists(sharedm):
|
||||
smlock = bb.utils.lockfile(sharedm + ".lock")
|
||||
# Can race here. You'd think it just means we may not end up with all copies hardlinked to each other
|
||||
# but python can lose file handles so we need to do this under a lock.
|
||||
if not os.path.exists(sharedm):
|
||||
with open(sharedm, 'w') as m:
|
||||
for l in newmanifest:
|
||||
dest = newmanifest[l]
|
||||
m.write(dest.replace(workdir + "/", "") + "\n")
|
||||
bb.utils.unlockfile(smlock)
|
||||
try:
|
||||
os.link(sharedm, taskmanifest)
|
||||
except OSError as err:
|
||||
if err.errno == errno.EXDEV:
|
||||
bb.utils.copyfile(sharedm, taskmanifest)
|
||||
else:
|
||||
raise
|
||||
# Finally actually install the files
|
||||
for l in newmanifest:
|
||||
dest = newmanifest[l]
|
||||
if l.endswith("/"):
|
||||
staging_copydir(l, targetdir, dest, seendirs)
|
||||
continue
|
||||
if "/bin/" in l or "/sbin/" in l:
|
||||
# defer /*bin/* files until last in case they need libs
|
||||
binfiles[l] = (targetdir, dest)
|
||||
else:
|
||||
staging_copyfile(l, targetdir, dest, postinsts, seendirs)
|
||||
|
||||
# Handle deferred binfiles
|
||||
for l in binfiles:
|
||||
(targetdir, dest) = binfiles[l]
|
||||
staging_copyfile(l, targetdir, dest, postinsts, seendirs)
|
||||
|
||||
bb.note("Installed into sysroot: %s" % str(msg_adding))
|
||||
bb.note("Skipping as already exists in sysroot: %s" % str(msg_exists))
|
||||
|
||||
for f in fixme:
|
||||
staging_processfixme(fixme[f], f, recipesysroot, recipesysrootnative, d)
|
||||
|
||||
for p in sorted(postinsts):
|
||||
bb.note("Running postinst {}, output:\n{}".format(p, subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)))
|
||||
|
||||
for dep in manifests:
|
||||
c = setscenedeps[dep][0]
|
||||
os.symlink(manifests[dep], depdir + "/" + c + ".complete")
|
||||
|
||||
with open(taskindex, "w") as f:
|
||||
f.write("TaskDeps: " + " ".join(owntaskdeps) + "\n")
|
||||
for l in sorted(installed):
|
||||
f.write(l + "\n")
|
||||
|
||||
bb.utils.unlockfile(lock)
|
||||
}
|
||||
extend_recipe_sysroot[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
|
||||
|
||||
do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
|
||||
python do_prepare_recipe_sysroot () {
|
||||
bb.build.exec_func("extend_recipe_sysroot", d)
|
||||
}
|
||||
addtask do_prepare_recipe_sysroot before do_configure after do_fetch
|
||||
|
||||
python staging_taskhandler() {
|
||||
bbtasks = e.tasklist
|
||||
for task in bbtasks:
|
||||
deps = d.getVarFlag(task, "depends")
|
||||
if task != 'do_prepare_recipe_sysroot' and (task == "do_configure" or (deps and "populate_sysroot" in deps)):
|
||||
d.prependVarFlag(task, "prefuncs", "extend_recipe_sysroot ")
|
||||
}
|
||||
staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
|
||||
addhandler staging_taskhandler
|
||||
|
||||
|
||||
#
|
||||
# Target build output, stored in do_populate_sysroot or do_package can depend
|
||||
# not only upon direct dependencies but also indirect ones. A good example is
|
||||
# linux-libc-headers. The toolchain depends on this but most target recipes do
|
||||
# not. There are some headers which are not used by the toolchain build and do
|
||||
# not change the toolchain task output, hence the task hashes can change without
|
||||
# changing the sysroot output of that recipe yet they can influence others.
|
||||
#
|
||||
# A specific example is rtc.h which can change rtcwake.c in util-linux but is not
|
||||
# used in the glibc or gcc build. To account for this, we need to account for the
|
||||
# populate_sysroot hashes in the task output hashes.
|
||||
#
|
||||
python target_add_sysroot_deps () {
|
||||
current_task = "do_" + d.getVar("BB_CURRENTTASK")
|
||||
if current_task not in ["do_populate_sysroot", "do_package"]:
|
||||
return
|
||||
|
||||
pn = d.getVar("PN")
|
||||
if pn.endswith("-native"):
|
||||
return
|
||||
|
||||
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
|
||||
deps = {}
|
||||
for dep in taskdepdata.values():
|
||||
if dep[1] == "do_populate_sysroot" and not dep[0].endswith(("-native", "-initial")) and "-cross-" not in dep[0] and dep[0] != pn:
|
||||
deps[dep[0]] = dep[6]
|
||||
|
||||
d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
|
||||
}
|
||||
SSTATECREATEFUNCS += "target_add_sysroot_deps"
|
||||
|
||||
181
sources/poky/meta/classes-global/uninative.bbclass
Normal file
181
sources/poky/meta/classes-global/uninative.bbclass
Normal file
@@ -0,0 +1,181 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}${@bb.utils.contains('BUILD_ARCH', 'ppc64le', 'ld64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'riscv64', 'ld-linux-riscv64-lp64d.so.1', '', d)}"
|
||||
UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
|
||||
|
||||
UNINATIVE_URL ?= "unset"
|
||||
UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc-${UNINATIVE_VERSION}.tar.xz"
|
||||
# Example checksums
|
||||
#UNINATIVE_CHECKSUM[aarch64] = "dead"
|
||||
#UNINATIVE_CHECKSUM[i686] = "dead"
|
||||
#UNINATIVE_CHECKSUM[x86_64] = "dead"
|
||||
UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
|
||||
|
||||
# Enabling uninative will change the following variables so they need to go the parsing ignored variables list to prevent multiple recipe parsing
|
||||
BB_HASHCONFIG_IGNORE_VARS += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
|
||||
|
||||
addhandler uninative_event_fetchloader
|
||||
uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
|
||||
|
||||
addhandler uninative_event_enable
|
||||
uninative_event_enable[eventmask] = "bb.event.ConfigParsed"
|
||||
|
||||
python uninative_event_fetchloader() {
|
||||
"""
|
||||
This event fires on the parent and will try to fetch the tarball if the
|
||||
loader isn't already present.
|
||||
"""
|
||||
|
||||
chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH"))
|
||||
if not chksum:
|
||||
bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH"))
|
||||
|
||||
loader = d.getVar("UNINATIVE_LOADER")
|
||||
loaderchksum = loader + ".chksum"
|
||||
if os.path.exists(loader) and os.path.exists(loaderchksum):
|
||||
with open(loaderchksum, "r") as f:
|
||||
readchksum = f.read().strip()
|
||||
if readchksum == chksum:
|
||||
if "uninative" not in d.getVar("SSTATEPOSTUNPACKFUNCS"):
|
||||
enable_uninative(d)
|
||||
return
|
||||
|
||||
import subprocess
|
||||
try:
|
||||
# Save and restore cwd as Fetch.download() does a chdir()
|
||||
olddir = os.getcwd()
|
||||
|
||||
tarball = d.getVar("UNINATIVE_TARBALL")
|
||||
tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
|
||||
tarballpath = os.path.join(tarballdir, tarball)
|
||||
|
||||
if not os.path.exists(tarballpath + ".done"):
|
||||
bb.utils.mkdirhier(tarballdir)
|
||||
if d.getVar("UNINATIVE_URL") == "unset":
|
||||
bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
|
||||
|
||||
localdata = bb.data.createCopy(d)
|
||||
localdata.setVar('FILESPATH', "")
|
||||
localdata.setVar('DL_DIR', tarballdir)
|
||||
# Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
|
||||
# and we can't easily put 'chksum' into the url path from a url parameter with
|
||||
# the current fetcher url handling
|
||||
premirrors = bb.fetch2.mirror_from_string(localdata.getVar("PREMIRRORS"))
|
||||
for line in premirrors:
|
||||
try:
|
||||
(find, replace) = line
|
||||
except ValueError:
|
||||
continue
|
||||
if find.startswith("http"):
|
||||
localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} %s/uninative/%s/${UNINATIVE_TARBALL}" % (replace, chksum))
|
||||
|
||||
srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
|
||||
bb.note("Fetching uninative binary shim %s (will check PREMIRRORS first)" % srcuri)
|
||||
|
||||
fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
|
||||
fetcher.download()
|
||||
localpath = fetcher.localpath(srcuri)
|
||||
if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath):
|
||||
# Follow the symlink behavior from the bitbake fetch2.
|
||||
# This will cover the case where an existing symlink is broken
|
||||
# as well as if there are two processes trying to create it
|
||||
# at the same time.
|
||||
if os.path.islink(tarballpath):
|
||||
# Broken symbolic link
|
||||
os.unlink(tarballpath)
|
||||
|
||||
# Deal with two processes trying to make symlink at once
|
||||
try:
|
||||
os.symlink(localpath, tarballpath)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
# ldd output is "ldd (Ubuntu GLIBC 2.23-0ubuntu10) 2.23", extract last option from first line
|
||||
glibcver = subprocess.check_output(["ldd", "--version"]).decode('utf-8').split('\n')[0].split()[-1]
|
||||
if bb.utils.vercmp_string(d.getVar("UNINATIVE_MAXGLIBCVERSION"), glibcver) < 0:
|
||||
raise RuntimeError("Your host glibc version (%s) is newer than that in uninative (%s). Disabling uninative so that sstate is not corrupted." % (glibcver, d.getVar("UNINATIVE_MAXGLIBCVERSION")))
|
||||
|
||||
cmd = d.expand("\
|
||||
mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; \
|
||||
cd ${UNINATIVE_STAGING_DIR}-uninative; \
|
||||
tar -xJf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; \
|
||||
${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
|
||||
${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux \
|
||||
${UNINATIVE_LOADER} \
|
||||
${UNINATIVE_LOADER} \
|
||||
${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
|
||||
${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so*" % chksum)
|
||||
subprocess.check_output(cmd, shell=True)
|
||||
|
||||
with open(loaderchksum, "w") as f:
|
||||
f.write(chksum)
|
||||
|
||||
enable_uninative(d)
|
||||
|
||||
except RuntimeError as e:
|
||||
bb.warn(str(e))
|
||||
except bb.fetch2.BBFetchException as exc:
|
||||
bb.warn("Disabling uninative as unable to fetch uninative tarball: %s" % str(exc))
|
||||
bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
|
||||
except subprocess.CalledProcessError as exc:
|
||||
bb.warn("Disabling uninative as unable to install uninative tarball: %s" % str(exc))
|
||||
bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
|
||||
finally:
|
||||
os.chdir(olddir)
|
||||
}
|
||||
|
||||
python uninative_event_enable() {
|
||||
"""
|
||||
This event handler is called in the workers and is responsible for setting
|
||||
up uninative if a loader is found.
|
||||
"""
|
||||
enable_uninative(d)
|
||||
}
|
||||
|
||||
def enable_uninative(d):
|
||||
loader = d.getVar("UNINATIVE_LOADER")
|
||||
if os.path.exists(loader):
|
||||
bb.debug(2, "Enabling uninative")
|
||||
d.setVar("NATIVELSBSTRING", "universal%s" % oe.utils.host_gcc_version(d))
|
||||
d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp")
|
||||
d.appendVarFlag("SSTATEPOSTUNPACKFUNCS", "vardepvalueexclude", "| uninative_changeinterp")
|
||||
d.appendVar("BUILD_LDFLAGS", " -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER} -pthread")
|
||||
d.appendVarFlag("BUILD_LDFLAGS", "vardepvalueexclude", "| -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER} -pthread")
|
||||
d.appendVarFlag("BUILD_LDFLAGS", "vardepsexclude", "UNINATIVE_LOADER")
|
||||
d.prependVar("PATH", "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
|
||||
|
||||
python uninative_changeinterp () {
|
||||
import subprocess
|
||||
import stat
|
||||
import oe.qa
|
||||
|
||||
if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)):
|
||||
return
|
||||
|
||||
sstateinst = d.getVar('SSTATE_INSTDIR')
|
||||
for walkroot, dirs, files in os.walk(sstateinst):
|
||||
for file in files:
|
||||
if file.endswith(".so") or ".so." in file:
|
||||
continue
|
||||
f = os.path.join(walkroot, file)
|
||||
if os.path.islink(f):
|
||||
continue
|
||||
s = os.stat(f)
|
||||
if not ((s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH)):
|
||||
continue
|
||||
elf = oe.qa.ELFFile(f)
|
||||
try:
|
||||
elf.open()
|
||||
except oe.qa.NotELFFileError:
|
||||
continue
|
||||
if not elf.isDynamic():
|
||||
continue
|
||||
|
||||
os.chmod(f, s[stat.ST_MODE] | stat.S_IWUSR)
|
||||
subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
|
||||
os.chmod(f, s[stat.ST_MODE])
|
||||
}
|
||||
60
sources/poky/meta/classes-global/utility-tasks.bbclass
Normal file
60
sources/poky/meta/classes-global/utility-tasks.bbclass
Normal file
@@ -0,0 +1,60 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
addtask listtasks
|
||||
do_listtasks[nostamp] = "1"
|
||||
python do_listtasks() {
|
||||
taskdescs = {}
|
||||
maxlen = 0
|
||||
for e in d.keys():
|
||||
if d.getVarFlag(e, 'task'):
|
||||
maxlen = max(maxlen, len(e))
|
||||
if e.endswith('_setscene'):
|
||||
desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc') or '')
|
||||
else:
|
||||
desc = d.getVarFlag(e, 'doc') or ''
|
||||
taskdescs[e] = desc
|
||||
|
||||
tasks = sorted(taskdescs.keys())
|
||||
for taskname in tasks:
|
||||
bb.plain("%s %s" % (taskname.ljust(maxlen), taskdescs[taskname]))
|
||||
}
|
||||
|
||||
CLEANFUNCS ?= ""
|
||||
|
||||
T:task-clean = "${LOG_DIR}/cleanlogs/${PN}"
|
||||
addtask clean
|
||||
do_clean[nostamp] = "1"
|
||||
python do_clean() {
|
||||
"""clear the build and temp directories"""
|
||||
dir = d.expand("${WORKDIR}")
|
||||
bb.note("Removing " + dir)
|
||||
oe.path.remove(dir)
|
||||
|
||||
dir = "%s.*" % d.getVar('STAMP')
|
||||
bb.note("Removing " + dir)
|
||||
oe.path.remove(dir)
|
||||
|
||||
for f in (d.getVar('CLEANFUNCS') or '').split():
|
||||
bb.build.exec_func(f, d)
|
||||
}
|
||||
|
||||
addtask checkuri
|
||||
do_checkuri[nostamp] = "1"
|
||||
do_checkuri[network] = "1"
|
||||
python do_checkuri() {
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
if len(src_uri) == 0:
|
||||
return
|
||||
|
||||
try:
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
fetcher.checkstatus()
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
bb.fatal(str(e))
|
||||
}
|
||||
|
||||
|
||||
369
sources/poky/meta/classes-global/utils.bbclass
Normal file
369
sources/poky/meta/classes-global/utils.bbclass
Normal file
@@ -0,0 +1,369 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
oe_soinstall() {
|
||||
# Purpose: Install shared library file and
|
||||
# create the necessary links
|
||||
# Example: oe_soinstall libfoo.so.1.2.3 ${D}${libdir}
|
||||
libname=`basename $1`
|
||||
case "$libname" in
|
||||
*.so)
|
||||
bbfatal "oe_soinstall: Shared library must haved versioned filename (e.g. libfoo.so.1.2.3)"
|
||||
;;
|
||||
esac
|
||||
install -m 755 $1 $2/$libname
|
||||
sonamelink=`${READELF} -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
|
||||
if [ -z $sonamelink ]; then
|
||||
bbfatal "oe_soinstall: $libname is missing ELF tag 'SONAME'."
|
||||
fi
|
||||
solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
|
||||
ln -sf $libname $2/$sonamelink
|
||||
ln -sf $libname $2/$solink
|
||||
}
|
||||
|
||||
oe_libinstall() {
|
||||
# Purpose: Install a library, in all its forms
|
||||
# Example
|
||||
#
|
||||
# oe_libinstall libltdl ${STAGING_LIBDIR}/
|
||||
# oe_libinstall -C src/libblah libblah ${D}/${libdir}/
|
||||
dir=""
|
||||
libtool=""
|
||||
silent=""
|
||||
require_static=""
|
||||
require_shared=""
|
||||
while [ "$#" -gt 0 ]; do
|
||||
case "$1" in
|
||||
-C)
|
||||
shift
|
||||
dir="$1"
|
||||
;;
|
||||
-s)
|
||||
silent=1
|
||||
;;
|
||||
-a)
|
||||
require_static=1
|
||||
;;
|
||||
-so)
|
||||
require_shared=1
|
||||
;;
|
||||
-*)
|
||||
bbfatal "oe_libinstall: unknown option: $1"
|
||||
;;
|
||||
*)
|
||||
break;
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
libname="$1"
|
||||
shift
|
||||
destpath="$1"
|
||||
if [ -z "$destpath" ]; then
|
||||
bbfatal "oe_libinstall: no destination path specified"
|
||||
fi
|
||||
|
||||
__runcmd () {
|
||||
if [ -z "$silent" ]; then
|
||||
echo >&2 "oe_libinstall: $*"
|
||||
fi
|
||||
$*
|
||||
}
|
||||
|
||||
if [ -z "$dir" ]; then
|
||||
dir=`pwd`
|
||||
fi
|
||||
|
||||
dotlai=$libname.lai
|
||||
|
||||
# Sanity check that the libname.lai is unique
|
||||
number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
|
||||
if [ $number_of_files -gt 1 ]; then
|
||||
bbfatal "oe_libinstall: $dotlai is not unique in $dir"
|
||||
fi
|
||||
|
||||
|
||||
dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
|
||||
olddir=`pwd`
|
||||
__runcmd cd $dir
|
||||
|
||||
lafile=$libname.la
|
||||
|
||||
# If such file doesn't exist, try to cut version suffix
|
||||
if [ ! -f "$lafile" ]; then
|
||||
libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
|
||||
lafile1=$libname.la
|
||||
if [ -f "$lafile1" ]; then
|
||||
libname=$libname1
|
||||
lafile=$lafile1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f "$lafile" ]; then
|
||||
# libtool archive
|
||||
eval `cat $lafile|grep "^library_names="`
|
||||
libtool=1
|
||||
else
|
||||
library_names="$libname.so* $libname.dll.a $libname.*.dylib"
|
||||
fi
|
||||
|
||||
__runcmd install -d $destpath/
|
||||
dota=$libname.a
|
||||
if [ -f "$dota" -o -n "$require_static" ]; then
|
||||
rm -f $destpath/$dota
|
||||
__runcmd install -m 0644 $dota $destpath/
|
||||
fi
|
||||
if [ -f "$dotlai" -a -n "$libtool" ]; then
|
||||
rm -f $destpath/$libname.la
|
||||
__runcmd install -m 0644 $dotlai $destpath/$libname.la
|
||||
fi
|
||||
|
||||
for name in $library_names; do
|
||||
files=`eval echo $name`
|
||||
for f in $files; do
|
||||
if [ ! -e "$f" ]; then
|
||||
if [ -n "$libtool" ]; then
|
||||
bbfatal "oe_libinstall: $dir/$f not found."
|
||||
fi
|
||||
elif [ -L "$f" ]; then
|
||||
__runcmd cp -P "$f" $destpath/
|
||||
elif [ ! -L "$f" ]; then
|
||||
libfile="$f"
|
||||
rm -f $destpath/$libfile
|
||||
__runcmd install -m 0755 $libfile $destpath/
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
if [ -z "$libfile" ]; then
|
||||
if [ -n "$require_shared" ]; then
|
||||
bbfatal "oe_libinstall: unable to locate shared library"
|
||||
fi
|
||||
elif [ -z "$libtool" ]; then
|
||||
# special case hack for non-libtool .so.#.#.# links
|
||||
baselibfile=`basename "$libfile"`
|
||||
if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
|
||||
sonamelink=`${READELF} -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
|
||||
solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
|
||||
if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
|
||||
__runcmd ln -sf $baselibfile $destpath/$sonamelink
|
||||
fi
|
||||
__runcmd ln -sf $baselibfile $destpath/$solink
|
||||
fi
|
||||
fi
|
||||
|
||||
__runcmd cd "$olddir"
|
||||
}
|
||||
|
||||
create_cmdline_wrapper () {
|
||||
# Create a wrapper script where commandline options are needed
|
||||
#
|
||||
# These are useful to work around relocation issues, by passing extra options
|
||||
# to a program
|
||||
#
|
||||
# Usage: create_cmdline_wrapper FILENAME <extra-options>
|
||||
|
||||
cmd=$1
|
||||
shift
|
||||
|
||||
echo "Generating wrapper script for $cmd"
|
||||
|
||||
mv $cmd $cmd.real
|
||||
cmdname=`basename $cmd`
|
||||
dirname=`dirname $cmd`
|
||||
cmdoptions=$@
|
||||
if [ "${base_prefix}" != "" ]; then
|
||||
relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
|
||||
cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
|
||||
fi
|
||||
cat <<END >$cmd
|
||||
#!/bin/bash
|
||||
realpath=\`readlink -fn \$0\`
|
||||
realdir=\`dirname \$realpath\`
|
||||
exec -a \$realdir/$cmdname \$realdir/$cmdname.real $cmdoptions "\$@"
|
||||
END
|
||||
chmod +x $cmd
|
||||
}
|
||||
|
||||
create_cmdline_shebang_wrapper () {
|
||||
# Create a wrapper script where commandline options are needed
|
||||
#
|
||||
# These are useful to work around shebang relocation issues, where shebangs are too
|
||||
# long or have arguments in them, thus preventing them from using the /usr/bin/env
|
||||
# shebang
|
||||
#
|
||||
# Usage: create_cmdline_wrapper FILENAME <extra-options>
|
||||
|
||||
cmd=$1
|
||||
shift
|
||||
|
||||
echo "Generating wrapper script for $cmd"
|
||||
|
||||
# Strip #! and get remaining interpreter + arg
|
||||
argument="$(sed -ne 's/^#! *//p;q' $cmd)"
|
||||
# strip the shebang from the real script as we do not want it to be usable anyway
|
||||
tail -n +2 $cmd > $cmd.real
|
||||
chown --reference=$cmd $cmd.real
|
||||
chmod --reference=$cmd $cmd.real
|
||||
rm -f $cmd
|
||||
cmdname=$(basename $cmd)
|
||||
dirname=$(dirname $cmd)
|
||||
cmdoptions=$@
|
||||
if [ "${base_prefix}" != "" ]; then
|
||||
relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
|
||||
cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
|
||||
fi
|
||||
cat <<END >$cmd
|
||||
#!/usr/bin/env bash
|
||||
realpath=\`readlink -fn \$0\`
|
||||
realdir=\`dirname \$realpath\`
|
||||
exec -a \$realdir/$cmdname $argument \$realdir/$cmdname.real $cmdoptions "\$@"
|
||||
END
|
||||
chmod +x $cmd
|
||||
}
|
||||
|
||||
create_wrapper () {
|
||||
# Create a wrapper script where extra environment variables are needed
|
||||
#
|
||||
# These are useful to work around relocation issues, by setting environment
|
||||
# variables which point to paths in the filesystem.
|
||||
#
|
||||
# Usage: create_wrapper FILENAME [[VAR=VALUE]..]
|
||||
|
||||
cmd=$1
|
||||
shift
|
||||
|
||||
echo "Generating wrapper script for $cmd"
|
||||
|
||||
mv $cmd $cmd.real
|
||||
cmdname=`basename $cmd`
|
||||
dirname=`dirname $cmd`
|
||||
exportstring=$@
|
||||
if [ "${base_prefix}" != "" ]; then
|
||||
relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
|
||||
exportstring=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
|
||||
fi
|
||||
cat <<END >$cmd
|
||||
#!/bin/bash
|
||||
realpath=\`readlink -fn \$0\`
|
||||
realdir=\`dirname \$realpath\`
|
||||
export $exportstring
|
||||
exec -a "\$0" \$realdir/$cmdname.real "\$@"
|
||||
END
|
||||
chmod +x $cmd
|
||||
}
|
||||
|
||||
# Copy files/directories from $1 to $2 but using hardlinks
|
||||
# (preserve symlinks)
|
||||
hardlinkdir () {
|
||||
from=$1
|
||||
to=$2
|
||||
(cd $from; find . -print0 | cpio --null -pdlu $to)
|
||||
}
|
||||
|
||||
|
||||
def check_app_exists(app, d):
|
||||
app = d.expand(app).split()[0].strip()
|
||||
path = d.getVar('PATH')
|
||||
return bool(bb.utils.which(path, app))
|
||||
|
||||
def explode_deps(s):
|
||||
return bb.utils.explode_deps(s)
|
||||
|
||||
def base_set_filespath(path, d):
|
||||
filespath = []
|
||||
extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
|
||||
# Remove default flag which was used for checking
|
||||
extrapaths = extrapaths.replace("__default:", "")
|
||||
# Don't prepend empty strings to the path list
|
||||
if extrapaths != "":
|
||||
path = extrapaths.split(":") + path
|
||||
# The ":" ensures we have an 'empty' override
|
||||
overrides = (":" + (d.getVar("FILESOVERRIDES") or "")).split(":")
|
||||
overrides.reverse()
|
||||
for o in overrides:
|
||||
for p in path:
|
||||
if p != "":
|
||||
filespath.append(os.path.join(p, o))
|
||||
return ":".join(filespath)
|
||||
|
||||
def extend_variants(d, var, extend, delim=':'):
|
||||
"""Return a string of all bb class extend variants for the given extend"""
|
||||
variants = []
|
||||
whole = d.getVar(var) or ""
|
||||
for ext in whole.split():
|
||||
eext = ext.split(delim)
|
||||
if len(eext) > 1 and eext[0] == extend:
|
||||
variants.append(eext[1])
|
||||
return " ".join(variants)
|
||||
|
||||
def multilib_pkg_extend(d, pkg):
|
||||
variants = (d.getVar("MULTILIB_VARIANTS") or "").split()
|
||||
if not variants:
|
||||
return pkg
|
||||
pkgs = pkg
|
||||
for v in variants:
|
||||
pkgs = pkgs + " " + v + "-" + pkg
|
||||
return pkgs
|
||||
|
||||
def get_multilib_datastore(variant, d):
|
||||
return oe.utils.get_multilib_datastore(variant, d)
|
||||
|
||||
def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
|
||||
"""Return a string of all ${var} in all multilib tune configuration"""
|
||||
values = []
|
||||
variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
|
||||
for item in variants:
|
||||
localdata = get_multilib_datastore(item, d)
|
||||
# We need WORKDIR to be consistent with the original datastore
|
||||
localdata.setVar("WORKDIR", d.getVar("WORKDIR"))
|
||||
value = localdata.getVar(var) or ""
|
||||
if value != "":
|
||||
if need_split:
|
||||
for item in value.split(delim):
|
||||
values.append(item)
|
||||
else:
|
||||
values.append(value)
|
||||
if unique:
|
||||
#we do this to keep order as much as possible
|
||||
ret = []
|
||||
for value in values:
|
||||
if not value in ret:
|
||||
ret.append(value)
|
||||
else:
|
||||
ret = values
|
||||
return " ".join(ret)
|
||||
|
||||
def all_multilib_tune_list(vars, d):
|
||||
"""
|
||||
Return a list of ${VAR} for each variable VAR in vars from each
|
||||
multilib tune configuration.
|
||||
Is safe to be called from a multilib recipe/context as it can
|
||||
figure out the original tune and remove the multilib overrides.
|
||||
"""
|
||||
values = {}
|
||||
for v in vars:
|
||||
values[v] = []
|
||||
values['ml'] = ['']
|
||||
|
||||
variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
|
||||
for item in variants:
|
||||
localdata = get_multilib_datastore(item, d)
|
||||
values[v].append(localdata.getVar(v))
|
||||
values['ml'].append(item)
|
||||
return values
|
||||
all_multilib_tune_list[vardepsexclude] = "OVERRIDES"
|
||||
|
||||
# If the user hasn't set up their name/email, set some defaults
|
||||
check_git_config() {
|
||||
if ! git config user.email > /dev/null ; then
|
||||
git config --local user.email "${PATCH_GIT_USER_EMAIL}"
|
||||
fi
|
||||
if ! git config user.name > /dev/null ; then
|
||||
git config --local user.name "${PATCH_GIT_USER_NAME}"
|
||||
fi
|
||||
}
|
||||
71
sources/poky/meta/classes-recipe/allarch.bbclass
Normal file
71
sources/poky/meta/classes-recipe/allarch.bbclass
Normal file
@@ -0,0 +1,71 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
#
|
||||
# This class is used for architecture independent recipes/data files (usually scripts)
|
||||
#
|
||||
|
||||
python allarch_package_arch_handler () {
|
||||
if bb.data.inherits_class("native", d) or bb.data.inherits_class("nativesdk", d) \
|
||||
or bb.data.inherits_class("crosssdk", d):
|
||||
return
|
||||
|
||||
variants = d.getVar("MULTILIB_VARIANTS")
|
||||
if not variants:
|
||||
d.setVar("PACKAGE_ARCH", "all" )
|
||||
}
|
||||
|
||||
addhandler allarch_package_arch_handler
|
||||
allarch_package_arch_handler[eventmask] = "bb.event.RecipePreFinalise"
|
||||
|
||||
python () {
|
||||
# Allow this class to be included but overridden - only set
|
||||
# the values if we're still "all" package arch.
|
||||
if d.getVar("PACKAGE_ARCH") == "all":
|
||||
# No need for virtual/libc or a cross compiler
|
||||
d.setVar("INHIBIT_DEFAULT_DEPS","1")
|
||||
|
||||
# Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
|
||||
# naming anyway
|
||||
d.setVar("baselib", "lib")
|
||||
d.setVar("TARGET_ARCH", "allarch")
|
||||
d.setVar("TARGET_OS", "linux")
|
||||
d.setVar("TARGET_CC_ARCH", "none")
|
||||
d.setVar("TARGET_LD_ARCH", "none")
|
||||
d.setVar("TARGET_AS_ARCH", "none")
|
||||
d.setVar("TARGET_FPU", "")
|
||||
d.setVar("TARGET_PREFIX", "")
|
||||
# Expand PACKAGE_EXTRA_ARCHS since the staging code needs this
|
||||
# (this removes any dependencies from the hash perspective)
|
||||
d.setVar("PACKAGE_EXTRA_ARCHS", d.getVar("PACKAGE_EXTRA_ARCHS"))
|
||||
d.setVar("SDK_ARCH", "none")
|
||||
d.setVar("SDK_CC_ARCH", "none")
|
||||
d.setVar("TARGET_CPPFLAGS", "none")
|
||||
d.setVar("TARGET_CFLAGS", "none")
|
||||
d.setVar("TARGET_CXXFLAGS", "none")
|
||||
d.setVar("TARGET_LDFLAGS", "none")
|
||||
d.setVar("POPULATESYSROOTDEPS", "")
|
||||
|
||||
# Avoid this being unnecessarily different due to nuances of
|
||||
# the target machine that aren't important for "all" arch
|
||||
# packages.
|
||||
d.setVar("LDFLAGS", "")
|
||||
|
||||
# No need to do shared library processing or debug symbol handling
|
||||
d.setVar("EXCLUDE_FROM_SHLIBS", "1")
|
||||
d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
|
||||
d.setVar("INHIBIT_PACKAGE_STRIP", "1")
|
||||
|
||||
# These multilib values shouldn't change allarch packages so exclude them
|
||||
d.appendVarFlag("emit_pkgdata", "vardepsexclude", " MULTILIB_VARIANTS")
|
||||
d.appendVarFlag("write_specfile", "vardepsexclude", " MULTILIBS")
|
||||
d.appendVarFlag("do_package", "vardepsexclude", " package_do_shlibs")
|
||||
|
||||
d.setVar("qemu_wrapper_cmdline", "def qemu_wrapper_cmdline(data, rootfs_path, library_paths):\n return 'false'")
|
||||
elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
|
||||
bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
|
||||
}
|
||||
|
||||
11
sources/poky/meta/classes-recipe/autotools-brokensep.bbclass
Normal file
11
sources/poky/meta/classes-recipe/autotools-brokensep.bbclass
Normal file
@@ -0,0 +1,11 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Autotools class for recipes where separate build dir doesn't work
|
||||
# Ideally we should fix software so it does work. Standard autotools supports
|
||||
# this.
|
||||
inherit autotools
|
||||
B = "${S}"
|
||||
261
sources/poky/meta/classes-recipe/autotools.bbclass
Normal file
261
sources/poky/meta/classes-recipe/autotools.bbclass
Normal file
@@ -0,0 +1,261 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
def get_autotools_dep(d):
|
||||
if d.getVar('INHIBIT_AUTOTOOLS_DEPS'):
|
||||
return ''
|
||||
|
||||
pn = d.getVar('PN')
|
||||
deps = ''
|
||||
|
||||
if pn in ['autoconf-native', 'automake-native']:
|
||||
return deps
|
||||
deps += 'autoconf-native automake-native '
|
||||
|
||||
if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
|
||||
deps += 'libtool-native '
|
||||
if not bb.data.inherits_class('native', d) \
|
||||
and not bb.data.inherits_class('nativesdk', d) \
|
||||
and not bb.data.inherits_class('cross', d) \
|
||||
and not d.getVar('INHIBIT_DEFAULT_DEPS'):
|
||||
deps += 'libtool-cross '
|
||||
|
||||
return deps
|
||||
|
||||
|
||||
DEPENDS:prepend = "${@get_autotools_dep(d)} "
|
||||
|
||||
inherit siteinfo
|
||||
|
||||
# Space separated list of shell scripts with variables defined to supply test
|
||||
# results for autoconf tests we cannot run at build time.
|
||||
# The value of this variable is filled in in a prefunc because it depends on
|
||||
# the contents of the sysroot.
|
||||
export CONFIG_SITE
|
||||
|
||||
acpaths ?= "default"
|
||||
EXTRA_AUTORECONF += "--exclude=autopoint"
|
||||
|
||||
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
|
||||
|
||||
# When building tools for use at build-time it's recommended for the build
|
||||
# system to use these variables when cross-compiling.
|
||||
# https://www.gnu.org/software/autoconf-archive/ax_prog_cc_for_build.html
|
||||
# https://stackoverflow.com/questions/24201260/autotools-cross-compilation-and-generated-sources/24208587#24208587
|
||||
export CPP_FOR_BUILD = "${BUILD_CPP}"
|
||||
export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}"
|
||||
|
||||
export CC_FOR_BUILD = "${BUILD_CC}"
|
||||
export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}"
|
||||
|
||||
export CXX_FOR_BUILD = "${BUILD_CXX}"
|
||||
export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}"
|
||||
|
||||
export LD_FOR_BUILD = "${BUILD_LD}"
|
||||
export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}"
|
||||
|
||||
def append_libtool_sysroot(d):
|
||||
# Only supply libtool sysroot option for non-native packages
|
||||
if not bb.data.inherits_class('native', d):
|
||||
return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
|
||||
return ""
|
||||
|
||||
CONFIGUREOPTS = " --build=${BUILD_SYS} \
|
||||
--host=${HOST_SYS} \
|
||||
--target=${TARGET_SYS} \
|
||||
--prefix=${prefix} \
|
||||
--exec_prefix=${exec_prefix} \
|
||||
--bindir=${bindir} \
|
||||
--sbindir=${sbindir} \
|
||||
--libexecdir=${libexecdir} \
|
||||
--datadir=${datadir} \
|
||||
--sysconfdir=${sysconfdir} \
|
||||
--sharedstatedir=${sharedstatedir} \
|
||||
--localstatedir=${localstatedir} \
|
||||
--libdir=${libdir} \
|
||||
--includedir=${includedir} \
|
||||
--oldincludedir=${includedir} \
|
||||
--infodir=${infodir} \
|
||||
--mandir=${mandir} \
|
||||
--disable-silent-rules \
|
||||
${CONFIGUREOPT_DEPTRACK} \
|
||||
${@append_libtool_sysroot(d)}"
|
||||
CONFIGUREOPT_DEPTRACK ?= "--disable-dependency-tracking"
|
||||
|
||||
CACHED_CONFIGUREVARS ?= ""
|
||||
|
||||
AUTOTOOLS_SCRIPT_PATH ?= "${S}"
|
||||
CONFIGURE_SCRIPT ?= "${AUTOTOOLS_SCRIPT_PATH}/configure"
|
||||
|
||||
AUTOTOOLS_AUXDIR ?= "${AUTOTOOLS_SCRIPT_PATH}"
|
||||
|
||||
oe_runconf () {
|
||||
# Use relative path to avoid buildpaths in files
|
||||
cfgscript_name="`basename ${CONFIGURE_SCRIPT}`"
|
||||
cfgscript=`python3 -c "import os; print(os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.'))"`/$cfgscript_name
|
||||
if [ -x "$cfgscript" ] ; then
|
||||
bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
|
||||
if ! CONFIG_SHELL=${CONFIG_SHELL-/bin/bash} ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
|
||||
bbnote "The following config.log files may provide further information."
|
||||
bbnote `find ${B} -ignore_readdir_race -type f -name config.log`
|
||||
bbfatal_log "configure failed"
|
||||
fi
|
||||
else
|
||||
bbfatal "no configure script found at $cfgscript"
|
||||
fi
|
||||
}
|
||||
|
||||
CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
|
||||
|
||||
autotools_preconfigure() {
|
||||
if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
|
||||
if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
|
||||
if [ "${S}" != "${B}" ]; then
|
||||
echo "Previously configured separate build directory detected, cleaning ${B}"
|
||||
rm -rf ${B}
|
||||
mkdir -p ${B}
|
||||
else
|
||||
# At least remove the .la files since automake won't automatically
|
||||
# regenerate them even if CFLAGS/LDFLAGS are different
|
||||
cd ${S}
|
||||
if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
|
||||
oe_runmake clean
|
||||
fi
|
||||
find ${S} -ignore_readdir_race -name \*.la -delete
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
autotools_postconfigure(){
|
||||
if [ -n "${CONFIGURESTAMPFILE}" ]; then
|
||||
mkdir -p `dirname ${CONFIGURESTAMPFILE}`
|
||||
echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
|
||||
fi
|
||||
}
|
||||
|
||||
EXTRACONFFUNCS ??= ""
|
||||
|
||||
EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}"
|
||||
|
||||
do_configure[prefuncs] += "autotools_preconfigure autotools_aclocals ${EXTRACONFFUNCS}"
|
||||
do_compile[prefuncs] += "autotools_aclocals"
|
||||
do_install[prefuncs] += "autotools_aclocals"
|
||||
do_configure[postfuncs] += "autotools_postconfigure"
|
||||
|
||||
ACLOCALDIR = "${STAGING_DATADIR}/aclocal"
|
||||
ACLOCALEXTRAPATH = ""
|
||||
ACLOCALEXTRAPATH:class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
|
||||
ACLOCALEXTRAPATH:class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
|
||||
|
||||
python autotools_aclocals () {
|
||||
sitefiles, searched = siteinfo_get_files(d, sysrootcache=True)
|
||||
d.setVar("CONFIG_SITE", " ".join(sitefiles))
|
||||
}
|
||||
|
||||
do_configure[file-checksums] += "${@' '.join(siteinfo_get_files(d, sysrootcache=False)[1])}"
|
||||
|
||||
CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in *.m4 Makefile.am"
|
||||
|
||||
autotools_do_configure() {
|
||||
# WARNING: gross hack follows:
|
||||
# An autotools built package generally needs these scripts, however only
|
||||
# automake or libtoolize actually install the current versions of them.
|
||||
# This is a problem in builds that do not use libtool or automake, in the case
|
||||
# where we -need- the latest version of these scripts. e.g. running a build
|
||||
# for a package whose autotools are old, on an x86_64 machine, which the old
|
||||
# config.sub does not support. Work around this by installing them manually
|
||||
# regardless.
|
||||
|
||||
PRUNE_M4=""
|
||||
|
||||
for ac in `find ${S} -ignore_readdir_race -name configure.in -o -name configure.ac`; do
|
||||
rm -f `dirname $ac`/configure
|
||||
done
|
||||
if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then
|
||||
olddir=`pwd`
|
||||
cd ${AUTOTOOLS_SCRIPT_PATH}
|
||||
mkdir -p ${ACLOCALDIR}
|
||||
ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
|
||||
if [ x"${acpaths}" = xdefault ]; then
|
||||
acpaths=
|
||||
for i in `find ${AUTOTOOLS_SCRIPT_PATH} -ignore_readdir_race -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
|
||||
grep -v 'acinclude.m4' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
|
||||
acpaths="$acpaths -I $i"
|
||||
done
|
||||
else
|
||||
acpaths="${acpaths}"
|
||||
fi
|
||||
acpaths="$acpaths ${ACLOCALEXTRAPATH}"
|
||||
AUTOV=`automake --version | sed -e '1{s/.* //;s/\.[0-9]\+$//};q'`
|
||||
automake --version
|
||||
echo "AUTOV is $AUTOV"
|
||||
if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
|
||||
ACLOCAL="$ACLOCAL --automake-acdir=${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
|
||||
fi
|
||||
# autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
|
||||
# like it was auto-generated. Work around this by blowing it away
|
||||
# by hand, unless the package specifically asked not to run aclocal.
|
||||
if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
|
||||
rm -f aclocal.m4
|
||||
fi
|
||||
if [ -e configure.in ]; then
|
||||
CONFIGURE_AC=configure.in
|
||||
else
|
||||
CONFIGURE_AC=configure.ac
|
||||
fi
|
||||
if grep -q "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC; then
|
||||
if grep -q "sed.*POTFILES" $CONFIGURE_AC; then
|
||||
: do nothing -- we still have an old unmodified configure.ac
|
||||
else
|
||||
bbnote Executing glib-gettextize --force --copy
|
||||
echo "no" | glib-gettextize --force --copy
|
||||
fi
|
||||
elif [ "${BPN}" != "gettext" ] && grep -q "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC; then
|
||||
# We'd call gettextize here if it wasn't so broken...
|
||||
cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
|
||||
if [ -d ${S}/po/ ]; then
|
||||
cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
|
||||
if [ ! -e ${S}/po/remove-potcdate.sin ]; then
|
||||
cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
|
||||
fi
|
||||
fi
|
||||
PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4"
|
||||
fi
|
||||
mkdir -p m4
|
||||
|
||||
for i in $PRUNE_M4; do
|
||||
find ${S} -ignore_readdir_race -name $i -delete
|
||||
done
|
||||
|
||||
bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
|
||||
ACLOCAL="$ACLOCAL" autoreconf -Wcross -Wno-obsolete --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
|
||||
cd $olddir
|
||||
fi
|
||||
if [ -e ${CONFIGURE_SCRIPT} ]; then
|
||||
oe_runconf
|
||||
else
|
||||
bbnote "nothing to configure"
|
||||
fi
|
||||
}
|
||||
|
||||
autotools_do_compile() {
|
||||
oe_runmake
|
||||
}
|
||||
|
||||
autotools_do_install() {
|
||||
oe_runmake 'DESTDIR=${D}' install
|
||||
# Info dir listing isn't interesting at this point so remove it if it exists.
|
||||
if [ -e "${D}${infodir}/dir" ]; then
|
||||
rm -f ${D}${infodir}/dir
|
||||
fi
|
||||
}
|
||||
|
||||
inherit siteconfig
|
||||
|
||||
EXPORT_FUNCTIONS do_configure do_compile do_install
|
||||
|
||||
B = "${WORKDIR}/build"
|
||||
144
sources/poky/meta/classes-recipe/baremetal-image.bbclass
Normal file
144
sources/poky/meta/classes-recipe/baremetal-image.bbclass
Normal file
@@ -0,0 +1,144 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Baremetal image class
|
||||
#
|
||||
# This class is meant to be inherited by recipes for baremetal/RTOS applications
|
||||
# It contains code that would be used by all of them, every recipe just needs to
|
||||
# override certain variables.
|
||||
#
|
||||
# For scalability purposes, code within this class focuses on the "image" wiring
|
||||
# to satisfy the OpenEmbedded image creation and testing infrastructure.
|
||||
#
|
||||
# See meta-skeleton for a working example.
|
||||
|
||||
|
||||
# Toolchain should be baremetal or newlib based.
|
||||
# TCLIBC="baremetal" or TCLIBC="newlib"
|
||||
COMPATIBLE_HOST:libc-musl:class-target = "null"
|
||||
COMPATIBLE_HOST:libc-glibc:class-target = "null"
|
||||
|
||||
|
||||
inherit rootfs-postcommands
|
||||
|
||||
# Set some defaults, but these should be overriden by each recipe if required
|
||||
IMGDEPLOYDIR ?= "${WORKDIR}/deploy-${PN}-image-complete"
|
||||
BAREMETAL_BINNAME ?= "hello_baremetal_${MACHINE}"
|
||||
IMAGE_LINK_NAME ?= "baremetal-helloworld-image-${MACHINE}"
|
||||
IMAGE_NAME_SUFFIX ?= ""
|
||||
|
||||
do_rootfs[dirs] = "${IMGDEPLOYDIR} ${DEPLOY_DIR_IMAGE}"
|
||||
|
||||
do_image(){
|
||||
install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.bin ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.bin
|
||||
install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.elf ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.elf
|
||||
}
|
||||
|
||||
do_image_complete(){
|
||||
:
|
||||
}
|
||||
|
||||
python do_rootfs(){
|
||||
from oe.utils import execute_pre_post_process
|
||||
from pathlib import Path
|
||||
|
||||
# Write empty manifest file to satisfy test infrastructure
|
||||
deploy_dir = d.getVar('IMGDEPLOYDIR')
|
||||
link_name = d.getVar('IMAGE_LINK_NAME')
|
||||
manifest_name = d.getVar('IMAGE_MANIFEST')
|
||||
|
||||
Path(manifest_name).touch()
|
||||
if os.path.exists(manifest_name) and link_name:
|
||||
manifest_link = deploy_dir + "/" + link_name + ".manifest"
|
||||
if manifest_link != manifest_name:
|
||||
if os.path.lexists(manifest_link):
|
||||
os.remove(manifest_link)
|
||||
os.symlink(os.path.basename(manifest_name), manifest_link)
|
||||
# A lot of postprocess commands assume the existence of rootfs/etc
|
||||
sysconfdir = d.getVar("IMAGE_ROOTFS") + d.getVar('sysconfdir')
|
||||
bb.utils.mkdirhier(sysconfdir)
|
||||
|
||||
execute_pre_post_process(d, d.getVar('ROOTFS_POSTPROCESS_COMMAND'))
|
||||
}
|
||||
|
||||
|
||||
# Assure binaries, manifest and qemubootconf are populated on DEPLOY_DIR_IMAGE
|
||||
do_image_complete[dirs] = "${TOPDIR}"
|
||||
SSTATETASKS += "do_image_complete"
|
||||
SSTATE_SKIP_CREATION:task-image-complete = '1'
|
||||
do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
|
||||
do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
|
||||
do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
|
||||
addtask do_image_complete after do_image before do_build
|
||||
|
||||
python do_image_complete_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_image_complete_setscene
|
||||
|
||||
# QEMU generic Baremetal/RTOS parameters
|
||||
QB_DEFAULT_KERNEL ?= "${IMAGE_LINK_NAME}.bin"
|
||||
QB_MEM ?= "-m 256"
|
||||
QB_DEFAULT_FSTYPE ?= "bin"
|
||||
QB_DTB ?= ""
|
||||
QB_OPT_APPEND:append = " -nographic"
|
||||
|
||||
# QEMU x86 requires an .elf kernel to boot rather than a .bin
|
||||
QB_DEFAULT_KERNEL:qemux86 ?= "${IMAGE_LINK_NAME}.elf"
|
||||
# QEMU x86-64 refuses to boot from -kernel, needs a multiboot compatible image
|
||||
QB_DEFAULT_FSTYPE:qemux86-64 ?= "iso"
|
||||
|
||||
# RISC-V tunes set the BIOS, unset, and instruct QEMU to
|
||||
# ignore the BIOS and boot from -kernel
|
||||
QB_DEFAULT_BIOS:qemuriscv64 = ""
|
||||
QB_DEFAULT_BIOS:qemuriscv32 = ""
|
||||
QB_OPT_APPEND:append:qemuriscv64 = " -bios none"
|
||||
QB_OPT_APPEND:append:qemuriscv32 = " -bios none"
|
||||
|
||||
|
||||
# Use the medium-any code model for the RISC-V 64 bit implementation,
|
||||
# since medlow can only access addresses below 0x80000000 and RAM
|
||||
# starts at 0x80000000 on RISC-V 64
|
||||
# Keep RISC-V 32 using -mcmodel=medlow (symbols lie between -2GB:2GB)
|
||||
CFLAGS:append:qemuriscv64 = " -mcmodel=medany"
|
||||
|
||||
|
||||
## Emulate image.bbclass
|
||||
# Handle inherits of any of the image classes we need
|
||||
IMAGE_CLASSES ??= ""
|
||||
IMGCLASSES = " ${IMAGE_CLASSES}"
|
||||
inherit_defer ${IMGCLASSES}
|
||||
# Set defaults to satisfy IMAGE_FEATURES check
|
||||
IMAGE_FEATURES ?= ""
|
||||
IMAGE_FEATURES[type] = "list"
|
||||
IMAGE_FEATURES[validitems] += ""
|
||||
|
||||
|
||||
# This next part is necessary to trick the build system into thinking
|
||||
# its building an image recipe so it generates the qemuboot.conf
|
||||
addtask do_rootfs before do_image after do_install
|
||||
addtask do_image after do_rootfs before do_image_complete
|
||||
addtask do_image_complete after do_image before do_build
|
||||
inherit qemuboot
|
||||
|
||||
# Based on image.bbclass to make sure we build qemu
|
||||
python(){
|
||||
# do_addto_recipe_sysroot doesnt exist for all recipes, but we need it to have
|
||||
# /usr/bin on recipe-sysroot (qemu) populated
|
||||
# The do_addto_recipe_sysroot dependency is coming from EXTRA_IMAGDEPENDS now,
|
||||
# we just need to add the logic to add its dependency to do_image.
|
||||
def extraimage_getdepends(task):
|
||||
deps = ""
|
||||
for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
|
||||
# Make sure we only add it for qemu
|
||||
if 'qemu' in dep:
|
||||
if ":" in dep:
|
||||
deps += " %s " % (dep)
|
||||
else:
|
||||
deps += " %s:%s" % (dep, task)
|
||||
return deps
|
||||
d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot'))
|
||||
}
|
||||
13
sources/poky/meta/classes-recipe/bash-completion.bbclass
Normal file
13
sources/poky/meta/classes-recipe/bash-completion.bbclass
Normal file
@@ -0,0 +1,13 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
DEPENDS:append:class-target = " bash-completion"
|
||||
|
||||
PACKAGES += "${PN}-bash-completion"
|
||||
|
||||
FILES:${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
|
||||
|
||||
RDEPENDS:${PN}-bash-completion = "bash-completion"
|
||||
42
sources/poky/meta/classes-recipe/bin_package.bbclass
Normal file
42
sources/poky/meta/classes-recipe/bin_package.bbclass
Normal file
@@ -0,0 +1,42 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Common variable and task for the binary package recipe.
|
||||
# Basic principle:
|
||||
# * The files have been unpacked to ${S} by base.bbclass
|
||||
# * Skip do_configure and do_compile
|
||||
# * Use do_install to install the files to ${D}
|
||||
#
|
||||
# Note:
|
||||
# The "subdir" parameter in the SRC_URI is useful when the input package
|
||||
# is rpm, ipk, deb and so on, for example:
|
||||
#
|
||||
# SRC_URI = "http://foo.com/foo-1.0-r1.i586.rpm;subdir=foo-1.0"
|
||||
#
|
||||
# Then the files would be unpacked to ${WORKDIR}/foo-1.0, otherwise
|
||||
# they would be in ${WORKDIR}.
|
||||
#
|
||||
|
||||
# Skip the unwanted steps
|
||||
do_configure[noexec] = "1"
|
||||
do_compile[noexec] = "1"
|
||||
|
||||
# Install the files to ${D}
|
||||
bin_package_do_install () {
|
||||
# Do it carefully
|
||||
[ -d "${S}" ] || exit 1
|
||||
if [ -z "$(ls -A ${S})" ]; then
|
||||
bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S.
|
||||
fi
|
||||
cd ${S}
|
||||
install -d ${D}${base_prefix}
|
||||
tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
|
||||
| tar --no-same-owner -xpf - -C ${D}${base_prefix}
|
||||
}
|
||||
|
||||
FILES:${PN} = "/"
|
||||
|
||||
EXPORT_FUNCTIONS do_install
|
||||
36
sources/poky/meta/classes-recipe/binconfig-disabled.bbclass
Normal file
36
sources/poky/meta/classes-recipe/binconfig-disabled.bbclass
Normal file
@@ -0,0 +1,36 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
#
|
||||
# Class to disable binconfig files instead of installing them
|
||||
#
|
||||
|
||||
# The list of scripts which should be disabled.
|
||||
BINCONFIG ?= ""
|
||||
|
||||
FILES:${PN}-dev += "${bindir}/*-config"
|
||||
|
||||
do_install:append () {
|
||||
for x in ${BINCONFIG}; do
|
||||
# Make the disabled script emit invalid parameters for those configure
|
||||
# scripts which call it without checking the return code.
|
||||
echo "#!/bin/sh" > ${D}$x
|
||||
echo "echo 'ERROR: $x should not be used, use an alternative such as pkg-config' >&2" >> ${D}$x
|
||||
echo "echo '--should-not-have-used-$x'" >> ${D}$x
|
||||
echo "exit 1" >> ${D}$x
|
||||
chmod +x ${D}$x
|
||||
done
|
||||
}
|
||||
|
||||
SYSROOT_PREPROCESS_FUNCS += "binconfig_disabled_sysroot_preprocess"
|
||||
|
||||
binconfig_disabled_sysroot_preprocess () {
|
||||
for x in ${BINCONFIG}; do
|
||||
configname=`basename $x`
|
||||
install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
|
||||
install ${D}$x ${SYSROOT_DESTDIR}${bindir_crossscripts}
|
||||
done
|
||||
}
|
||||
60
sources/poky/meta/classes-recipe/binconfig.bbclass
Normal file
60
sources/poky/meta/classes-recipe/binconfig.bbclass
Normal file
@@ -0,0 +1,60 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
FILES:${PN}-dev += "${bindir}/*-config"
|
||||
|
||||
# The namespaces can clash here hence the two step replace
|
||||
def get_binconfig_mangle(d):
|
||||
s = "-e ''"
|
||||
if not bb.data.inherits_class('native', d):
|
||||
optional_quote = r"\(\"\?\)"
|
||||
s += " -e 's:=%s${base_libdir}:=\\1OEBASELIBDIR:;'" % optional_quote
|
||||
s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
|
||||
s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
|
||||
s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
|
||||
s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote
|
||||
s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
|
||||
s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
|
||||
s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
|
||||
s += " -e 's:-L${WORKDIR}:-LOELIBDIR:'"
|
||||
s += " -e 's:-I${WORKDIR}:-IOEINCDIR:'"
|
||||
s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'"
|
||||
s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
|
||||
s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
|
||||
s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
|
||||
s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
|
||||
s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
|
||||
if d.getVar("OE_BINCONFIG_EXTRA_MANGLE", False):
|
||||
s += d.getVar("OE_BINCONFIG_EXTRA_MANGLE")
|
||||
|
||||
return s
|
||||
|
||||
BINCONFIG_GLOB ?= "*-config"
|
||||
|
||||
PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
|
||||
|
||||
binconfig_package_preprocess () {
|
||||
for config in `find ${PKGD} -type f -name '${BINCONFIG_GLOB}'`; do
|
||||
sed -i \
|
||||
-e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
|
||||
-e 's:${STAGING_LIBDIR}:${libdir}:g;' \
|
||||
-e 's:${STAGING_INCDIR}:${includedir}:g;' \
|
||||
-e 's:${STAGING_DATADIR}:${datadir}:' \
|
||||
-e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
|
||||
$config
|
||||
done
|
||||
}
|
||||
|
||||
SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
|
||||
|
||||
binconfig_sysroot_preprocess () {
|
||||
for config in `find ${S} -type f -name '${BINCONFIG_GLOB}'` `find ${B} -type f -name '${BINCONFIG_GLOB}'`; do
|
||||
configname=`basename $config`
|
||||
install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
|
||||
sed ${@get_binconfig_mangle(d)} $config > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
|
||||
chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
|
||||
done
|
||||
}
|
||||
@@ -0,0 +1,79 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
##
|
||||
## Purpose:
|
||||
## This class is used to update the list of crates in SRC_URI
|
||||
## by reading Cargo.lock in the source tree.
|
||||
##
|
||||
## See meta/recipes-devtools/python/python3-bcrypt_*.bb for an example
|
||||
##
|
||||
## To perform the update: bitbake -c update_crates recipe-name
|
||||
|
||||
addtask do_update_crates after do_patch
|
||||
do_update_crates[depends] = "python3-native:do_populate_sysroot"
|
||||
do_update_crates[nostamp] = "1"
|
||||
do_update_crates[doc] = "Update the recipe by reading Cargo.lock and write in ${THISDIR}/${BPN}-crates.inc"
|
||||
|
||||
# The directory where to search for Cargo.lock files
|
||||
CARGO_LOCK_SRC_DIR ??= "${S}"
|
||||
|
||||
do_update_crates() {
|
||||
TARGET_FILE="${THISDIR}/${BPN}-crates.inc"
|
||||
|
||||
nativepython3 - <<EOF
|
||||
|
||||
def get_crates(f):
|
||||
import tomllib
|
||||
c_list = '# from %s' % os.path.relpath(f, '${CARGO_LOCK_SRC_DIR}')
|
||||
c_list += '\nSRC_URI += " \\\'
|
||||
crates = tomllib.load(open(f, 'rb'))
|
||||
|
||||
# Build a list with crates info that have crates.io in the source
|
||||
crates_candidates = list(filter(lambda c: 'crates.io' in c.get('source', ''), crates['package']))
|
||||
|
||||
if not crates_candidates:
|
||||
raise ValueError("Unable to find any candidate crates that use crates.io")
|
||||
|
||||
# Update crates uri and their checksum, to avoid name clashing on the checksum
|
||||
# we need to rename crates with name and version to have a unique key
|
||||
cksum_list = ''
|
||||
for c in crates_candidates:
|
||||
rename = "%s-%s" % (c['name'], c['version'])
|
||||
c_list += '\n crate://crates.io/%s/%s \\\' % (c['name'], c['version'])
|
||||
if 'checksum' in c:
|
||||
cksum_list += '\nSRC_URI[%s.sha256sum] = "%s"' % (rename, c['checksum'])
|
||||
|
||||
c_list += '\n"\n'
|
||||
c_list += cksum_list
|
||||
c_list += '\n'
|
||||
return c_list
|
||||
|
||||
import os
|
||||
crates = "# Autogenerated with 'bitbake -c update_crates ${PN}'\n\n"
|
||||
found = False
|
||||
for root, dirs, files in os.walk('${CARGO_LOCK_SRC_DIR}'):
|
||||
# ignore git and patches directories
|
||||
if root.startswith(os.path.join('${CARGO_LOCK_SRC_DIR}', '.pc')):
|
||||
continue
|
||||
if root.startswith(os.path.join('${CARGO_LOCK_SRC_DIR}', '.git')):
|
||||
continue
|
||||
for file in files:
|
||||
if file == 'Cargo.lock':
|
||||
try:
|
||||
cargo_lock_path = os.path.join(root, file)
|
||||
crates += get_crates(os.path.join(root, file))
|
||||
except Exception as e:
|
||||
raise ValueError("Cannot parse '%s'" % cargo_lock_path) from e
|
||||
else:
|
||||
found = True
|
||||
if not found:
|
||||
raise ValueError("Unable to find any Cargo.lock in ${CARGO_LOCK_SRC_DIR}")
|
||||
open("${TARGET_FILE}", 'w').write(crates)
|
||||
EOF
|
||||
|
||||
bbnote "Successfully update crates inside '${TARGET_FILE}'"
|
||||
}
|
||||
93
sources/poky/meta/classes-recipe/cargo.bbclass
Normal file
93
sources/poky/meta/classes-recipe/cargo.bbclass
Normal file
@@ -0,0 +1,93 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
##
|
||||
## Purpose:
|
||||
## This class is used by any recipes that are built using
|
||||
## Cargo.
|
||||
|
||||
inherit cargo_common
|
||||
inherit rust-target-config
|
||||
|
||||
# the binary we will use
|
||||
CARGO = "cargo"
|
||||
|
||||
# We need cargo to compile for the target
|
||||
BASEDEPENDS:append = " cargo-native"
|
||||
|
||||
# Ensure we get the right rust variant
|
||||
DEPENDS:append:class-target = " rust-native ${RUSTLIB_DEP}"
|
||||
DEPENDS:append:class-nativesdk = " rust-native ${RUSTLIB_DEP}"
|
||||
DEPENDS:append:class-native = " rust-native"
|
||||
|
||||
# Enable build separation
|
||||
B = "${WORKDIR}/build"
|
||||
|
||||
# In case something fails in the build process, give a bit more feedback on
|
||||
# where the issue occured
|
||||
export RUST_BACKTRACE = "1"
|
||||
|
||||
RUSTFLAGS ??= ""
|
||||
BUILD_MODE = "${@['--release', ''][d.getVar('DEBUG_BUILD') == '1']}"
|
||||
# --frozen flag will prevent network access (which is required since only
|
||||
# the do_fetch step is authorized to access network)
|
||||
# and will require an up to date Cargo.lock file.
|
||||
# This force the package being built to already ship a Cargo.lock, in the end
|
||||
# this is what we want, at least, for reproducibility of the build.
|
||||
CARGO_BUILD_FLAGS = "-v --frozen --target ${RUST_HOST_SYS} ${BUILD_MODE} --manifest-path=${CARGO_MANIFEST_PATH}"
|
||||
|
||||
# This is based on the content of CARGO_BUILD_FLAGS and generally will need to
|
||||
# change if CARGO_BUILD_FLAGS changes.
|
||||
BUILD_DIR = "${@['release', 'debug'][d.getVar('DEBUG_BUILD') == '1']}"
|
||||
CARGO_TARGET_SUBDIR="${RUST_HOST_SYS}/${BUILD_DIR}"
|
||||
oe_cargo_build () {
|
||||
export RUSTFLAGS="${RUSTFLAGS}"
|
||||
bbnote "Using rust targets from ${RUST_TARGET_PATH}"
|
||||
bbnote "cargo = $(which ${CARGO})"
|
||||
bbnote "${CARGO} build ${CARGO_BUILD_FLAGS} $@"
|
||||
"${CARGO}" build ${CARGO_BUILD_FLAGS} "$@"
|
||||
}
|
||||
|
||||
do_compile[progress] = "outof:\s+(\d+)/(\d+)"
|
||||
cargo_do_compile () {
|
||||
oe_cargo_build
|
||||
}
|
||||
|
||||
cargo_do_install () {
|
||||
local have_installed=false
|
||||
for tgt in "${B}/target/${CARGO_TARGET_SUBDIR}/"*; do
|
||||
case $tgt in
|
||||
*.so|*.rlib)
|
||||
install -d "${D}${rustlibdir}"
|
||||
install -m755 "$tgt" "${D}${rustlibdir}"
|
||||
have_installed=true
|
||||
;;
|
||||
*examples)
|
||||
if [ -d "$tgt" ]; then
|
||||
for example in "$tgt/"*; do
|
||||
if [ -f "$example" ] && [ -x "$example" ]; then
|
||||
install -d "${D}${bindir}"
|
||||
install -m755 "$example" "${D}${bindir}"
|
||||
have_installed=true
|
||||
fi
|
||||
done
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
if [ -f "$tgt" ] && [ -x "$tgt" ]; then
|
||||
install -d "${D}${bindir}"
|
||||
install -m755 "$tgt" "${D}${bindir}"
|
||||
have_installed=true
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if ! $have_installed; then
|
||||
die "Did not find anything to install"
|
||||
fi
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_compile do_install
|
||||
41
sources/poky/meta/classes-recipe/cargo_c.bbclass
Normal file
41
sources/poky/meta/classes-recipe/cargo_c.bbclass
Normal file
@@ -0,0 +1,41 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
##
|
||||
## Purpose:
|
||||
## This class is used by any recipes that want to compile a C ABI compatible
|
||||
## library with header and pkg config file
|
||||
|
||||
inherit cargo pkgconfig
|
||||
|
||||
# the binaries we will use
|
||||
CARGO_C_BUILD = "cargo-cbuild"
|
||||
CARGO_C_INSTALL = "cargo-cinstall"
|
||||
|
||||
# We need cargo-c to compile for the target
|
||||
BASEDEPENDS:append = " cargo-c-native"
|
||||
|
||||
do_compile[progress] = "outof:\s+(\d+)/(\d+)"
|
||||
cargo_c_do_compile() {
|
||||
oe_cargo_fix_env
|
||||
export RUSTFLAGS="${RUSTFLAGS}"
|
||||
bbnote "Using rust targets from ${RUST_TARGET_PATH}"
|
||||
bbnote "cargo-cbuild = $(which ${CARGO_C_BUILD})"
|
||||
bbnote "${CARGO_C_BUILD} cbuild ${CARGO_BUILD_FLAGS}"
|
||||
"${CARGO_C_BUILD}" cbuild ${CARGO_BUILD_FLAGS}
|
||||
}
|
||||
|
||||
cargo_c_do_install() {
|
||||
oe_cargo_fix_env
|
||||
export RUSTFLAGS="${RUSTFLAGS}"
|
||||
bbnote "cargo-cinstall = $(which ${CARGO_C_INSTALL})"
|
||||
"${CARGO_C_INSTALL}" cinstall ${CARGO_BUILD_FLAGS} \
|
||||
--destdir ${D} \
|
||||
--prefix ${prefix} \
|
||||
--library-type cdylib
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_compile do_install
|
||||
238
sources/poky/meta/classes-recipe/cargo_common.bbclass
Normal file
238
sources/poky/meta/classes-recipe/cargo_common.bbclass
Normal file
@@ -0,0 +1,238 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
##
|
||||
## Purpose:
|
||||
## This class is to support building with cargo. It
|
||||
## must be different than cargo.bbclass because Rust
|
||||
## now builds with Cargo but cannot use cargo.bbclass
|
||||
## due to dependencies and assumptions in cargo.bbclass
|
||||
## that Rust & Cargo are already installed. So this
|
||||
## is used by cargo.bbclass and Rust
|
||||
##
|
||||
|
||||
# add crate fetch support
|
||||
inherit rust-common
|
||||
|
||||
# Where we download our registry and dependencies to
|
||||
export CARGO_HOME = "${WORKDIR}/cargo_home"
|
||||
|
||||
# The pkg-config-rs library used by cargo build scripts disables itself when
|
||||
# cross compiling unless this is defined. We set up pkg-config appropriately
|
||||
# for cross compilation, so tell it we know better than it.
|
||||
export PKG_CONFIG_ALLOW_CROSS = "1"
|
||||
|
||||
# Don't instruct cargo to use crates downloaded by bitbake. Some rust packages,
|
||||
# for example the rust compiler itself, come with their own vendored sources.
|
||||
# Specifying two [source.crates-io] will not work.
|
||||
CARGO_DISABLE_BITBAKE_VENDORING ??= "0"
|
||||
|
||||
# Used by libstd-rs to point to the vendor dir included in rustc src
|
||||
CARGO_VENDORING_DIRECTORY ??= "${CARGO_HOME}/bitbake"
|
||||
|
||||
# The directory of the Cargo.toml relative to the root directory, per default
|
||||
# assume there's a Cargo.toml directly in the root directory
|
||||
CARGO_SRC_DIR ??= ""
|
||||
|
||||
# The actual path to the Cargo.toml
|
||||
CARGO_MANIFEST_PATH ??= "${S}/${CARGO_SRC_DIR}/Cargo.toml"
|
||||
|
||||
# Path to Cargo.lock
|
||||
CARGO_LOCK_PATH ??= "${@ os.path.join(os.path.dirname(d.getVar('CARGO_MANIFEST_PATH')), 'Cargo.lock')}"
|
||||
|
||||
CARGO_RUST_TARGET_CCLD ??= "${RUST_TARGET_CCLD}"
|
||||
cargo_common_do_configure () {
|
||||
mkdir -p ${CARGO_HOME}/bitbake
|
||||
|
||||
cat <<- EOF > ${CARGO_HOME}/config
|
||||
# EXTRA_OECARGO_PATHS
|
||||
paths = [
|
||||
$(for p in ${EXTRA_OECARGO_PATHS}; do echo \"$p\",; done)
|
||||
]
|
||||
EOF
|
||||
|
||||
cat <<- EOF >> ${CARGO_HOME}/config
|
||||
|
||||
# Local mirror vendored by bitbake
|
||||
[source.bitbake]
|
||||
directory = "${CARGO_VENDORING_DIRECTORY}"
|
||||
EOF
|
||||
|
||||
if [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
|
||||
cat <<- EOF >> ${CARGO_HOME}/config
|
||||
|
||||
[source.crates-io]
|
||||
replace-with = "bitbake"
|
||||
local-registry = "/nonexistent"
|
||||
EOF
|
||||
fi
|
||||
|
||||
cat <<- EOF >> ${CARGO_HOME}/config
|
||||
|
||||
[http]
|
||||
# Multiplexing can't be enabled because http2 can't be enabled
|
||||
# in curl-native without dependency loops
|
||||
multiplexing = false
|
||||
|
||||
# Ignore the hard coded and incorrect path to certificates
|
||||
cainfo = "${STAGING_ETCDIR_NATIVE}/ssl/certs/ca-certificates.crt"
|
||||
|
||||
EOF
|
||||
|
||||
cat <<- EOF >> ${CARGO_HOME}/config
|
||||
|
||||
# HOST_SYS
|
||||
[target.${RUST_HOST_SYS}]
|
||||
linker = "${CARGO_RUST_TARGET_CCLD}"
|
||||
EOF
|
||||
|
||||
if [ "${RUST_HOST_SYS}" != "${RUST_BUILD_SYS}" ]; then
|
||||
cat <<- EOF >> ${CARGO_HOME}/config
|
||||
|
||||
# BUILD_SYS
|
||||
[target.${RUST_BUILD_SYS}]
|
||||
linker = "${RUST_BUILD_CCLD}"
|
||||
EOF
|
||||
fi
|
||||
|
||||
if [ "${RUST_TARGET_SYS}" != "${RUST_BUILD_SYS}" -a "${RUST_TARGET_SYS}" != "${RUST_HOST_SYS}" ]; then
|
||||
cat <<- EOF >> ${CARGO_HOME}/config
|
||||
|
||||
# TARGET_SYS
|
||||
[target.${RUST_TARGET_SYS}]
|
||||
linker = "${RUST_TARGET_CCLD}"
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Put build output in build directory preferred by bitbake instead of
|
||||
# inside source directory unless they are the same
|
||||
if [ "${B}" != "${S}" ]; then
|
||||
cat <<- EOF >> ${CARGO_HOME}/config
|
||||
|
||||
[build]
|
||||
# Use out of tree build destination to avoid polluting the source tree
|
||||
target-dir = "${B}/target"
|
||||
EOF
|
||||
fi
|
||||
|
||||
cat <<- EOF >> ${CARGO_HOME}/config
|
||||
|
||||
[term]
|
||||
progress.when = 'always'
|
||||
progress.width = 80
|
||||
EOF
|
||||
}
|
||||
|
||||
python cargo_common_do_patch_paths() {
|
||||
import shutil
|
||||
|
||||
cargo_config = os.path.join(d.getVar("CARGO_HOME"), "config")
|
||||
if not os.path.exists(cargo_config):
|
||||
return
|
||||
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
if len(src_uri) == 0:
|
||||
return
|
||||
|
||||
patches = dict()
|
||||
workdir = d.getVar('WORKDIR')
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
for url in fetcher.urls:
|
||||
ud = fetcher.ud[url]
|
||||
if ud.type == 'git':
|
||||
name = ud.parm.get('name')
|
||||
destsuffix = ud.parm.get('destsuffix')
|
||||
if name is not None and destsuffix is not None:
|
||||
if ud.user:
|
||||
repo = '%s://%s@%s%s' % (ud.proto, ud.user, ud.host, ud.path)
|
||||
else:
|
||||
repo = '%s://%s%s' % (ud.proto, ud.host, ud.path)
|
||||
path = '%s = { path = "%s" }' % (name, os.path.join(workdir, destsuffix))
|
||||
patches.setdefault(repo, []).append(path)
|
||||
|
||||
with open(cargo_config, "a+") as config:
|
||||
for k, v in patches.items():
|
||||
print('\n[patch."%s"]' % k, file=config)
|
||||
for name in v:
|
||||
print(name, file=config)
|
||||
|
||||
if not patches:
|
||||
return
|
||||
|
||||
# Cargo.lock file is needed for to be sure that artifacts
|
||||
# downloaded by the fetch steps are those expected by the
|
||||
# project and that the possible patches are correctly applied.
|
||||
# Moreover since we do not want any modification
|
||||
# of this file (for reproducibility purpose), we prevent it by
|
||||
# using --frozen flag (in CARGO_BUILD_FLAGS) and raise a clear error
|
||||
# here is better than letting cargo tell (in case the file is missing)
|
||||
# "Cargo.lock should be modified but --frozen was given"
|
||||
|
||||
lockfile = d.getVar("CARGO_LOCK_PATH")
|
||||
if not os.path.exists(lockfile):
|
||||
bb.fatal(f"{lockfile} file doesn't exist")
|
||||
|
||||
# There are patched files and so Cargo.lock should be modified but we use
|
||||
# --frozen so let's handle that modifications here.
|
||||
#
|
||||
# Note that a "better" (more elegant ?) would have been to use cargo update for
|
||||
# patched packages:
|
||||
# cargo update --offline -p package_1 -p package_2
|
||||
# But this is not possible since it requires that cargo local git db
|
||||
# to be populated and this is not the case as we fetch git repo ourself.
|
||||
|
||||
lockfile_orig = lockfile + ".orig"
|
||||
if not os.path.exists(lockfile_orig):
|
||||
shutil.copy(lockfile, lockfile_orig)
|
||||
|
||||
newlines = []
|
||||
with open(lockfile_orig, "r") as f:
|
||||
for line in f.readlines():
|
||||
if not line.startswith("source = \"git"):
|
||||
newlines.append(line)
|
||||
|
||||
with open(lockfile, "w") as f:
|
||||
f.writelines(newlines)
|
||||
}
|
||||
do_configure[postfuncs] += "cargo_common_do_patch_paths"
|
||||
|
||||
do_compile:prepend () {
|
||||
oe_cargo_fix_env
|
||||
}
|
||||
|
||||
oe_cargo_fix_env () {
|
||||
export CC="${RUST_TARGET_CC}"
|
||||
export CXX="${RUST_TARGET_CXX}"
|
||||
export CFLAGS="${CFLAGS}"
|
||||
export CXXFLAGS="${CXXFLAGS}"
|
||||
export AR="${AR}"
|
||||
export TARGET_CC="${RUST_TARGET_CC}"
|
||||
export TARGET_CXX="${RUST_TARGET_CXX}"
|
||||
export TARGET_CFLAGS="${CFLAGS}"
|
||||
export TARGET_CXXFLAGS="${CXXFLAGS}"
|
||||
export TARGET_AR="${AR}"
|
||||
export HOST_CC="${RUST_BUILD_CC}"
|
||||
export HOST_CXX="${RUST_BUILD_CXX}"
|
||||
export HOST_CFLAGS="${BUILD_CFLAGS}"
|
||||
export HOST_CXXFLAGS="${BUILD_CXXFLAGS}"
|
||||
export HOST_AR="${BUILD_AR}"
|
||||
}
|
||||
|
||||
EXTRA_OECARGO_PATHS ??= ""
|
||||
|
||||
EXPORT_FUNCTIONS do_configure
|
||||
|
||||
# The culprit for this setting is the libc crate,
|
||||
# which as of Jun 2023 calls directly into 32 bit time functions in glibc,
|
||||
# bypassing all of glibc provisions to choose the right Y2038-safe functions. As
|
||||
# rust components statically link with that crate, pretty much everything
|
||||
# is affected, and so there's no point trying to have recipe-specific
|
||||
# INSANE_SKIP entries.
|
||||
#
|
||||
# Upstream ticket and PR:
|
||||
# https://github.com/rust-lang/libc/issues/3223
|
||||
# https://github.com/rust-lang/libc/pull/3175
|
||||
INSANE_SKIP:append = " 32bit-time"
|
||||
32
sources/poky/meta/classes-recipe/cmake-qemu.bbclass
Normal file
32
sources/poky/meta/classes-recipe/cmake-qemu.bbclass
Normal file
@@ -0,0 +1,32 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# Not all platforms are supported by Qemu. Using qemu-user therefore
|
||||
# involves a certain risk, which is also the reason why this feature
|
||||
# is not part of the main cmake class by default.
|
||||
#
|
||||
# One use case is the execution of cross-compiled unit tests with CTest
|
||||
# on the build machine. If CMAKE_EXEWRAPPER_ENABLED is configured,
|
||||
# cmake --build --target test
|
||||
# works transparently with qemu-user. If the cmake project is developed
|
||||
# with this use case in mind this works very nicely also out of an IDE
|
||||
# configured to use cmake-native for cross compiling.
|
||||
|
||||
inherit qemu cmake
|
||||
|
||||
DEPENDS:append:class-target = "${@' qemu-native' if bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', True, False, d) else ''}"
|
||||
|
||||
cmake_do_generate_toolchain_file:append:class-target() {
|
||||
if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true', 'false', d)}; then
|
||||
# Write out a qemu wrapper that will be used as exe_wrapper so that cmake
|
||||
# can run target helper binaries through that. This also allows to execute ctest.
|
||||
qemu_binary="${@qemu_wrapper_cmdline(d, '${STAGING_DIR_HOST}', ['${STAGING_DIR_HOST}/${libdir}','${STAGING_DIR_HOST}/${base_libdir}'])}"
|
||||
echo "#!/bin/sh" > "${WORKDIR}/cmake-qemuwrapper"
|
||||
echo "$qemu_binary \"\$@\"" >> "${WORKDIR}/cmake-qemuwrapper"
|
||||
chmod +x "${WORKDIR}/cmake-qemuwrapper"
|
||||
echo "set( CMAKE_CROSSCOMPILING_EMULATOR ${WORKDIR}/cmake-qemuwrapper)" \
|
||||
>> ${WORKDIR}/toolchain.cmake
|
||||
fi
|
||||
}
|
||||
249
sources/poky/meta/classes-recipe/cmake.bbclass
Normal file
249
sources/poky/meta/classes-recipe/cmake.bbclass
Normal file
@@ -0,0 +1,249 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Path to the CMake file to process.
|
||||
OECMAKE_SOURCEPATH ??= "${S}"
|
||||
|
||||
DEPENDS:prepend = "cmake-native "
|
||||
B = "${WORKDIR}/build"
|
||||
|
||||
# What CMake generator to use.
|
||||
# The supported options are "Unix Makefiles" or "Ninja".
|
||||
OECMAKE_GENERATOR ?= "Ninja"
|
||||
|
||||
python() {
|
||||
generator = d.getVar("OECMAKE_GENERATOR")
|
||||
if "Unix Makefiles" in generator:
|
||||
args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=" + d.getVar("MAKE")
|
||||
d.setVar("OECMAKE_GENERATOR_ARGS", args)
|
||||
d.setVarFlag("do_compile", "progress", "percent")
|
||||
elif "Ninja" in generator:
|
||||
args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=ninja"
|
||||
d.appendVar("DEPENDS", " ninja-native")
|
||||
d.setVar("OECMAKE_GENERATOR_ARGS", args)
|
||||
d.setVarFlag("do_compile", "progress", r"outof:^\[(\d+)/(\d+)\]\s+")
|
||||
else:
|
||||
bb.fatal("Unknown CMake Generator %s" % generator)
|
||||
}
|
||||
OECMAKE_AR ?= "${AR}"
|
||||
|
||||
# Compiler flags
|
||||
OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
|
||||
OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS}"
|
||||
OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG"
|
||||
OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG"
|
||||
OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
|
||||
OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
|
||||
|
||||
def oecmake_map_compiler(compiler, d):
|
||||
args = d.getVar(compiler).split()
|
||||
if args[0] == "ccache":
|
||||
return args[1], args[0]
|
||||
return args[0], ""
|
||||
|
||||
# C/C++ Compiler (without cpu arch/tune arguments)
|
||||
OECMAKE_C_COMPILER ?= "${@oecmake_map_compiler('CC', d)[0]}"
|
||||
OECMAKE_C_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CC', d)[1]}"
|
||||
OECMAKE_CXX_COMPILER ?= "${@oecmake_map_compiler('CXX', d)[0]}"
|
||||
OECMAKE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CXX', d)[1]}"
|
||||
|
||||
# clear compiler vars for allarch to avoid sig hash difference
|
||||
OECMAKE_C_COMPILER:allarch = ""
|
||||
OECMAKE_C_COMPILER_LAUNCHER:allarch = ""
|
||||
OECMAKE_CXX_COMPILER:allarch = ""
|
||||
OECMAKE_CXX_COMPILER_LAUNCHER:allarch = ""
|
||||
|
||||
OECMAKE_RPATH ?= ""
|
||||
OECMAKE_PERLNATIVE_DIR ??= ""
|
||||
OECMAKE_EXTRA_ROOT_PATH ?= ""
|
||||
|
||||
OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "ONLY"
|
||||
|
||||
EXTRA_OECMAKE:append = " ${PACKAGECONFIG_CONFARGS}"
|
||||
|
||||
export CMAKE_BUILD_PARALLEL_LEVEL
|
||||
CMAKE_BUILD_PARALLEL_LEVEL:task-compile = "${@oe.utils.parallel_make(d, False)}"
|
||||
CMAKE_BUILD_PARALLEL_LEVEL:task-install = "${@oe.utils.parallel_make(d, True)}"
|
||||
CMAKE_BUILD_PARALLEL_LEVEL:task-compile-ptest-base = "${@oe.utils.parallel_make(d, False)}"
|
||||
CMAKE_BUILD_PARALLEL_LEVEL:task-install-ptest-base = "${@oe.utils.parallel_make(d, True)}"
|
||||
|
||||
OECMAKE_TARGET_COMPILE ?= "all"
|
||||
OECMAKE_TARGET_INSTALL ?= "install"
|
||||
|
||||
def map_host_os_to_system_name(host_os):
|
||||
if host_os.startswith('darwin'):
|
||||
return 'Darwin'
|
||||
if host_os.startswith('mingw'):
|
||||
return 'Windows'
|
||||
if host_os.startswith('linux'):
|
||||
return 'Linux'
|
||||
return host_os
|
||||
|
||||
# CMake expects target architectures in the format of uname(2),
|
||||
# which do not always match TARGET_ARCH, so all the necessary
|
||||
# conversions should happen here.
|
||||
def map_host_arch_to_uname_arch(host_arch):
|
||||
if host_arch == "powerpc":
|
||||
return "ppc"
|
||||
if host_arch == "powerpc64le":
|
||||
return "ppc64le"
|
||||
if host_arch == "powerpc64":
|
||||
return "ppc64"
|
||||
return host_arch
|
||||
|
||||
|
||||
cmake_do_generate_toolchain_file() {
|
||||
if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
|
||||
cmake_crosscompiling="set( CMAKE_CROSSCOMPILING FALSE )"
|
||||
else
|
||||
cmake_sysroot="set( CMAKE_SYSROOT \"${RECIPE_SYSROOT}\" )"
|
||||
fi
|
||||
|
||||
cat > ${WORKDIR}/toolchain.cmake <<EOF
|
||||
# CMake system name must be something like "Linux".
|
||||
# This is important for cross-compiling.
|
||||
$cmake_crosscompiling
|
||||
set( CMAKE_SYSTEM_NAME ${@map_host_os_to_system_name(d.getVar('HOST_OS'))} )
|
||||
set( CMAKE_SYSTEM_PROCESSOR ${@map_host_arch_to_uname_arch(d.getVar('HOST_ARCH'))} )
|
||||
set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
|
||||
set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
|
||||
set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} )
|
||||
set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} )
|
||||
set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
|
||||
find_program( CMAKE_AR ${OECMAKE_AR} DOC "Archiver" REQUIRED )
|
||||
|
||||
set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
|
||||
set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
|
||||
set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
|
||||
set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional CFLAGS for release" )
|
||||
set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "Additional CXXFLAGS for release" )
|
||||
set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional ASM FLAGS for release" )
|
||||
set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
|
||||
set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
|
||||
|
||||
# only search in the paths provided so cmake doesnt pick
|
||||
# up libraries and tools from the native build machine
|
||||
set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN} ${HOSTTOOLS_DIR})
|
||||
set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
|
||||
set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ${OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM} )
|
||||
set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
|
||||
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
|
||||
set( CMAKE_PROGRAM_PATH "/" )
|
||||
|
||||
$cmake_sysroot
|
||||
|
||||
# Use qt.conf settings
|
||||
set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
|
||||
|
||||
# We need to set the rpath to the correct directory as cmake does not provide any
|
||||
# directory as rpath by default
|
||||
set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
|
||||
|
||||
# Use RPATHs relative to build directory for reproducibility
|
||||
set( CMAKE_BUILD_RPATH_USE_ORIGIN ON )
|
||||
|
||||
# Use our cmake modules
|
||||
list(APPEND CMAKE_MODULE_PATH "${STAGING_DATADIR}/cmake/Modules/")
|
||||
|
||||
# add for non /usr/lib libdir, e.g. /usr/lib64
|
||||
set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
|
||||
|
||||
# add include dir to implicit includes in case it differs from /usr/include
|
||||
list(APPEND CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
|
||||
list(APPEND CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
addtask generate_toolchain_file after do_patch before do_configure
|
||||
|
||||
CONFIGURE_FILES = "CMakeLists.txt *.cmake"
|
||||
|
||||
do_configure[cleandirs] = "${@d.getVar('B') if d.getVar('S') != d.getVar('B') else ''}"
|
||||
|
||||
OECMAKE_ARGS = "\
|
||||
-DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
|
||||
-DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix') + '/')} \
|
||||
-DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \
|
||||
-DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \
|
||||
-DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
|
||||
-DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix') + '/')} \
|
||||
-DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
|
||||
-DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \
|
||||
-DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \
|
||||
-DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix') + '/')} \
|
||||
-DPYTHON_EXECUTABLE:PATH=${PYTHON} \
|
||||
-DPython_EXECUTABLE:PATH=${PYTHON} \
|
||||
-DPython3_EXECUTABLE:PATH=${PYTHON} \
|
||||
-DLIB_SUFFIX=${@d.getVar('baselib').replace('lib', '')} \
|
||||
-DCMAKE_INSTALL_SO_NO_EXE=0 \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=${WORKDIR}/toolchain.cmake \
|
||||
-DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
|
||||
-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON \
|
||||
-DFETCHCONTENT_FULLY_DISCONNECTED=ON \
|
||||
-DCMAKE_EXPORT_COMPILE_COMMANDS:BOOL=ON \
|
||||
"
|
||||
|
||||
cmake_do_configure() {
|
||||
if [ "${OECMAKE_BUILDPATH}" ]; then
|
||||
bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
|
||||
fi
|
||||
|
||||
if [ "${S}" = "${B}" ]; then
|
||||
find ${B} -name CMakeFiles -or -name Makefile -or -name cmake_install.cmake -or -name CMakeCache.txt -delete
|
||||
fi
|
||||
|
||||
# Just like autotools cmake can use a site file to cache result that need generated binaries to run
|
||||
if [ -e ${WORKDIR}/site-file.cmake ] ; then
|
||||
oecmake_sitefile="-C ${WORKDIR}/site-file.cmake"
|
||||
else
|
||||
oecmake_sitefile=
|
||||
fi
|
||||
|
||||
cmake \
|
||||
${OECMAKE_GENERATOR_ARGS} \
|
||||
$oecmake_sitefile \
|
||||
${OECMAKE_SOURCEPATH} \
|
||||
${OECMAKE_ARGS} \
|
||||
${EXTRA_OECMAKE} \
|
||||
-Wno-dev
|
||||
}
|
||||
|
||||
# To disable verbose cmake logs for a given recipe or globally config metadata e.g. local.conf
|
||||
# add following
|
||||
#
|
||||
# CMAKE_VERBOSE = ""
|
||||
#
|
||||
|
||||
CMAKE_VERBOSE ??= "VERBOSE=1"
|
||||
|
||||
# Then run do_compile again
|
||||
cmake_runcmake_build() {
|
||||
bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
|
||||
eval ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
|
||||
}
|
||||
|
||||
# Install an already-generated project binary tree. Not checking the compile
|
||||
# dependencies again is particularly important for SDK use cases.
|
||||
cmake_runcmake_install() {
|
||||
bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --install '${B}'
|
||||
eval ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --install '${B}'
|
||||
}
|
||||
|
||||
cmake_do_compile() {
|
||||
cmake_runcmake_build --target ${OECMAKE_TARGET_COMPILE}
|
||||
}
|
||||
|
||||
cmake_do_install() {
|
||||
if [ "${OECMAKE_TARGET_INSTALL}" = "install" ]; then
|
||||
DESTDIR='${D}' cmake_runcmake_install
|
||||
else
|
||||
# Legacy path which supports also custom install targets
|
||||
DESTDIR='${D}' cmake_runcmake_build --target ${OECMAKE_TARGET_INSTALL}
|
||||
fi
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
|
||||
114
sources/poky/meta/classes-recipe/cml1.bbclass
Normal file
114
sources/poky/meta/classes-recipe/cml1.bbclass
Normal file
@@ -0,0 +1,114 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# returns all the elements from the src uri that are .cfg files
|
||||
def find_cfgs(d):
|
||||
sources=src_patches(d, True)
|
||||
sources_list=[]
|
||||
for s in sources:
|
||||
if s.endswith('.cfg'):
|
||||
sources_list.append(s)
|
||||
|
||||
return sources_list
|
||||
|
||||
cml1_do_configure() {
|
||||
set -e
|
||||
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
|
||||
yes '' | oe_runmake oldconfig
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_configure
|
||||
|
||||
inherit terminal
|
||||
|
||||
OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS TERMINFO CROSS_CURSES_LIB CROSS_CURSES_INC"
|
||||
HOST_EXTRACFLAGS = "${BUILD_CFLAGS} ${BUILD_LDFLAGS}"
|
||||
HOSTLDFLAGS = "${BUILD_LDFLAGS}"
|
||||
CROSS_CURSES_LIB = "-lncurses -ltinfo"
|
||||
CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
|
||||
TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
|
||||
|
||||
KCONFIG_CONFIG_COMMAND ??= "menuconfig"
|
||||
KCONFIG_CONFIG_ENABLE_MENUCONFIG ??= "true"
|
||||
KCONFIG_CONFIG_ROOTDIR ??= "${B}"
|
||||
python do_menuconfig() {
|
||||
import shutil
|
||||
|
||||
if not bb.utils.to_boolean(d.getVar("KCONFIG_CONFIG_ENABLE_MENUCONFIG")):
|
||||
bb.fatal("do_menuconfig is disabled, please check KCONFIG_CONFIG_ENABLE_MENUCONFIG variable.")
|
||||
return
|
||||
|
||||
config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
|
||||
configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
|
||||
|
||||
try:
|
||||
mtime = os.path.getmtime(config)
|
||||
shutil.copy(config, configorig)
|
||||
except OSError:
|
||||
mtime = 0
|
||||
|
||||
# setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
|
||||
d.setVar("PKG_CONFIG_DIR", "${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig")
|
||||
d.setVar("PKG_CONFIG_PATH", "${PKG_CONFIG_DIR}:${STAGING_DATADIR_NATIVE}/pkgconfig")
|
||||
d.setVar("PKG_CONFIG_LIBDIR", "${PKG_CONFIG_DIR}")
|
||||
d.setVarFlag("PKG_CONFIG_SYSROOT_DIR", "unexport", "1")
|
||||
# ensure that environment variables are overwritten with this tasks 'd' values
|
||||
d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
|
||||
|
||||
oe_terminal("sh -c 'make %s; if [ \\$? -ne 0 ]; then echo \"Command failed.\"; printf \"Press any key to continue... \"; read r; fi'" % d.getVar('KCONFIG_CONFIG_COMMAND'),
|
||||
d.getVar('PN') + ' Configuration', d)
|
||||
|
||||
try:
|
||||
newmtime = os.path.getmtime(config)
|
||||
except OSError:
|
||||
newmtime = 0
|
||||
|
||||
if newmtime > mtime:
|
||||
bb.plain("Changed configuration saved at:\n %s\nRecompile will be forced" % config)
|
||||
bb.build.write_taint('do_compile', d)
|
||||
}
|
||||
do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
|
||||
do_menuconfig[nostamp] = "1"
|
||||
do_menuconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
|
||||
addtask menuconfig after do_configure
|
||||
|
||||
python do_diffconfig() {
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
workdir = d.getVar('WORKDIR')
|
||||
fragment = workdir + '/fragment.cfg'
|
||||
configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
|
||||
config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
|
||||
|
||||
try:
|
||||
md5newconfig = bb.utils.md5_file(configorig)
|
||||
md5config = bb.utils.md5_file(config)
|
||||
isdiff = md5newconfig != md5config
|
||||
except IOError as e:
|
||||
bb.fatal("No config files found. Did you do menuconfig ?\n%s" % e)
|
||||
|
||||
if isdiff:
|
||||
statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment
|
||||
# No need to check the exit code as we know it's going to be
|
||||
# non-zero, but that's what we expect.
|
||||
subprocess.call(statement, shell=True)
|
||||
|
||||
bb.plain("Config fragment has been dumped into:\n %s" % fragment)
|
||||
else:
|
||||
if os.path.exists(fragment):
|
||||
os.unlink(fragment)
|
||||
}
|
||||
|
||||
do_diffconfig[nostamp] = "1"
|
||||
do_diffconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
|
||||
addtask diffconfig
|
||||
|
||||
do_showconfig() {
|
||||
bbplain "Config file written to ${KCONFIG_CONFIG_ROOTDIR}/.config"
|
||||
}
|
||||
do_showconfig[nostamp] = "1"
|
||||
addtask showconfig after do_configure
|
||||
269
sources/poky/meta/classes-recipe/compress_doc.bbclass
Normal file
269
sources/poky/meta/classes-recipe/compress_doc.bbclass
Normal file
@@ -0,0 +1,269 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Compress man pages in ${mandir} and info pages in ${infodir}
|
||||
#
|
||||
# 1. The doc will be compressed to gz format by default.
|
||||
#
|
||||
# 2. It will automatically correct the compressed doc which is not
|
||||
# in ${DOC_COMPRESS} but in ${DOC_COMPRESS_LIST} to the format
|
||||
# of ${DOC_COMPRESS} policy
|
||||
#
|
||||
# 3. It is easy to add a new type compression by editing
|
||||
# local.conf, such as:
|
||||
# DOC_COMPRESS_LIST:append = ' abc'
|
||||
# DOC_COMPRESS = 'abc'
|
||||
# DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***'
|
||||
# DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***'
|
||||
|
||||
# All supported compression policy
|
||||
DOC_COMPRESS_LIST ?= "gz xz bz2"
|
||||
|
||||
# Compression policy, must be one of ${DOC_COMPRESS_LIST}
|
||||
DOC_COMPRESS ?= "gz"
|
||||
|
||||
# Compression shell command
|
||||
DOC_COMPRESS_CMD[gz] ?= 'gzip -v -9 -n'
|
||||
DOC_COMPRESS_CMD[bz2] ?= "bzip2 -v -9"
|
||||
DOC_COMPRESS_CMD[xz] ?= "xz -v"
|
||||
|
||||
# Decompression shell command
|
||||
DOC_DECOMPRESS_CMD[gz] ?= 'gunzip -v'
|
||||
DOC_DECOMPRESS_CMD[bz2] ?= "bunzip2 -v"
|
||||
DOC_DECOMPRESS_CMD[xz] ?= "unxz -v"
|
||||
|
||||
PACKAGE_PREPROCESS_FUNCS += "package_do_compress_doc compress_doc_updatealternatives"
|
||||
python package_do_compress_doc() {
|
||||
compress_mode = d.getVar('DOC_COMPRESS')
|
||||
compress_list = (d.getVar('DOC_COMPRESS_LIST') or '').split()
|
||||
if compress_mode not in compress_list:
|
||||
bb.fatal('Compression policy %s not supported (not listed in %s)\n' % (compress_mode, compress_list))
|
||||
|
||||
dvar = d.getVar('PKGD')
|
||||
compress_cmds = {}
|
||||
decompress_cmds = {}
|
||||
for mode in compress_list:
|
||||
compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode)
|
||||
decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode)
|
||||
|
||||
mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir"))
|
||||
if os.path.exists(mandir):
|
||||
# Decompress doc files which format is not compress_mode
|
||||
decompress_doc(mandir, compress_mode, decompress_cmds)
|
||||
compress_doc(mandir, compress_mode, compress_cmds)
|
||||
|
||||
infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir"))
|
||||
if os.path.exists(infodir):
|
||||
# Decompress doc files which format is not compress_mode
|
||||
decompress_doc(infodir, compress_mode, decompress_cmds)
|
||||
compress_doc(infodir, compress_mode, compress_cmds)
|
||||
}
|
||||
|
||||
def _get_compress_format(file, compress_format_list):
|
||||
for compress_format in compress_format_list:
|
||||
compress_suffix = '.' + compress_format
|
||||
if file.endswith(compress_suffix):
|
||||
return compress_format
|
||||
|
||||
return ''
|
||||
|
||||
# Collect hardlinks to dict, each element in dict lists hardlinks
|
||||
# which points to the same doc file.
|
||||
# {hardlink10: [hardlink11, hardlink12],,,}
|
||||
# The hardlink10, hardlink11 and hardlink12 are the same file.
|
||||
def _collect_hardlink(hardlink_dict, file):
|
||||
for hardlink in hardlink_dict:
|
||||
# Add to the existed hardlink
|
||||
if os.path.samefile(hardlink, file):
|
||||
hardlink_dict[hardlink].append(file)
|
||||
return hardlink_dict
|
||||
|
||||
hardlink_dict[file] = []
|
||||
return hardlink_dict
|
||||
|
||||
def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False):
|
||||
import subprocess
|
||||
for target in hardlink_dict:
|
||||
if decompress:
|
||||
compress_format = _get_compress_format(target, shell_cmds.keys())
|
||||
cmd = "%s -f %s" % (shell_cmds[compress_format], target)
|
||||
bb.note('decompress hardlink %s' % target)
|
||||
else:
|
||||
cmd = "%s -f %s" % (shell_cmds[compress_mode], target)
|
||||
bb.note('compress hardlink %s' % target)
|
||||
(retval, output) = subprocess.getstatusoutput(cmd)
|
||||
if retval:
|
||||
bb.warn("de/compress file failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
|
||||
return
|
||||
|
||||
for hardlink_dup in hardlink_dict[target]:
|
||||
if decompress:
|
||||
# Remove compress suffix
|
||||
compress_suffix = '.' + compress_format
|
||||
new_hardlink = hardlink_dup[:-len(compress_suffix)]
|
||||
new_target = target[:-len(compress_suffix)]
|
||||
else:
|
||||
# Append compress suffix
|
||||
compress_suffix = '.' + compress_mode
|
||||
new_hardlink = hardlink_dup + compress_suffix
|
||||
new_target = target + compress_suffix
|
||||
|
||||
bb.note('hardlink %s-->%s' % (new_hardlink, new_target))
|
||||
if not os.path.exists(new_hardlink):
|
||||
os.link(new_target, new_hardlink)
|
||||
if os.path.exists(hardlink_dup):
|
||||
os.unlink(hardlink_dup)
|
||||
|
||||
def _process_symlink(file, compress_format, decompress=False):
|
||||
compress_suffix = '.' + compress_format
|
||||
if decompress:
|
||||
# Remove compress suffix
|
||||
new_linkname = file[:-len(compress_suffix)]
|
||||
new_source = os.readlink(file)[:-len(compress_suffix)]
|
||||
else:
|
||||
# Append compress suffix
|
||||
new_linkname = file + compress_suffix
|
||||
new_source = os.readlink(file) + compress_suffix
|
||||
|
||||
bb.note('symlink %s-->%s' % (new_linkname, new_source))
|
||||
if not os.path.exists(new_linkname):
|
||||
os.symlink(new_source, new_linkname)
|
||||
|
||||
os.unlink(file)
|
||||
|
||||
def _is_info(file):
|
||||
flags = '.info .info-'.split()
|
||||
for flag in flags:
|
||||
if flag in os.path.basename(file):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _is_man(file):
|
||||
import re
|
||||
|
||||
# It refers MANSECT-var in man(1.6g)'s man.config
|
||||
# ".1:.1p:.8:.2:.3:.3p:.4:.5:.6:.7:.9:.0p:.tcl:.n:.l:.p:.o"
|
||||
# Not start with '.', and contain the above colon-seperate element
|
||||
p = re.compile(r'[^\.]+\.([1-9lnop]|0p|tcl)')
|
||||
if p.search(file):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _is_compress_doc(file, compress_format_list):
|
||||
compress_format = _get_compress_format(file, compress_format_list)
|
||||
compress_suffix = '.' + compress_format
|
||||
if file.endswith(compress_suffix):
|
||||
# Remove the compress suffix
|
||||
uncompress_file = file[:-len(compress_suffix)]
|
||||
if _is_info(uncompress_file) or _is_man(uncompress_file):
|
||||
return True, compress_format
|
||||
|
||||
return False, ''
|
||||
|
||||
def compress_doc(topdir, compress_mode, compress_cmds):
|
||||
import subprocess
|
||||
hardlink_dict = {}
|
||||
for root, dirs, files in os.walk(topdir):
|
||||
for f in files:
|
||||
file = os.path.join(root, f)
|
||||
if os.path.isdir(file):
|
||||
continue
|
||||
|
||||
if _is_info(file) or _is_man(file):
|
||||
# Symlink
|
||||
if os.path.islink(file):
|
||||
_process_symlink(file, compress_mode)
|
||||
# Hardlink
|
||||
elif os.lstat(file).st_nlink > 1:
|
||||
_collect_hardlink(hardlink_dict, file)
|
||||
# Normal file
|
||||
elif os.path.isfile(file):
|
||||
cmd = "%s %s" % (compress_cmds[compress_mode], file)
|
||||
(retval, output) = subprocess.getstatusoutput(cmd)
|
||||
if retval:
|
||||
bb.warn("compress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
|
||||
continue
|
||||
bb.note('compress file %s' % file)
|
||||
|
||||
_process_hardlink(hardlink_dict, compress_mode, compress_cmds)
|
||||
|
||||
# Decompress doc files which format is not compress_mode
|
||||
def decompress_doc(topdir, compress_mode, decompress_cmds):
|
||||
import subprocess
|
||||
hardlink_dict = {}
|
||||
decompress = True
|
||||
for root, dirs, files in os.walk(topdir):
|
||||
for f in files:
|
||||
file = os.path.join(root, f)
|
||||
if os.path.isdir(file):
|
||||
continue
|
||||
|
||||
res, compress_format = _is_compress_doc(file, decompress_cmds.keys())
|
||||
# Decompress files which format is not compress_mode
|
||||
if res and compress_mode!=compress_format:
|
||||
# Symlink
|
||||
if os.path.islink(file):
|
||||
_process_symlink(file, compress_format, decompress)
|
||||
# Hardlink
|
||||
elif os.lstat(file).st_nlink > 1:
|
||||
_collect_hardlink(hardlink_dict, file)
|
||||
# Normal file
|
||||
elif os.path.isfile(file):
|
||||
cmd = "%s %s" % (decompress_cmds[compress_format], file)
|
||||
(retval, output) = subprocess.getstatusoutput(cmd)
|
||||
if retval:
|
||||
bb.warn("decompress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
|
||||
continue
|
||||
bb.note('decompress file %s' % file)
|
||||
|
||||
_process_hardlink(hardlink_dict, compress_mode, decompress_cmds, decompress)
|
||||
|
||||
python compress_doc_updatealternatives () {
|
||||
if not bb.data.inherits_class('update-alternatives', d):
|
||||
return
|
||||
|
||||
mandir = d.getVar("mandir")
|
||||
infodir = d.getVar("infodir")
|
||||
compress_mode = d.getVar('DOC_COMPRESS')
|
||||
for pkg in (d.getVar('PACKAGES') or "").split():
|
||||
old_names = (d.getVar('ALTERNATIVE:%s' % pkg) or "").split()
|
||||
new_names = []
|
||||
for old_name in old_names:
|
||||
old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name)
|
||||
old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name) or \
|
||||
d.getVarFlag('ALTERNATIVE_TARGET', old_name) or \
|
||||
d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
|
||||
d.getVar('ALTERNATIVE_TARGET') or \
|
||||
old_link
|
||||
# Sometimes old_target is specified as relative to the link name.
|
||||
old_target = os.path.join(os.path.dirname(old_link), old_target)
|
||||
|
||||
# The updatealternatives used for compress doc
|
||||
if mandir in old_target or infodir in old_target:
|
||||
new_name = old_name + '.' + compress_mode
|
||||
new_link = old_link + '.' + compress_mode
|
||||
new_target = old_target + '.' + compress_mode
|
||||
d.delVarFlag('ALTERNATIVE_LINK_NAME', old_name)
|
||||
d.setVarFlag('ALTERNATIVE_LINK_NAME', new_name, new_link)
|
||||
if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name):
|
||||
d.delVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name)
|
||||
d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, new_name, new_target)
|
||||
elif d.getVarFlag('ALTERNATIVE_TARGET', old_name):
|
||||
d.delVarFlag('ALTERNATIVE_TARGET', old_name)
|
||||
d.setVarFlag('ALTERNATIVE_TARGET', new_name, new_target)
|
||||
elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg):
|
||||
d.setVar('ALTERNATIVE_TARGET_%s' % pkg, new_target)
|
||||
elif d.getVar('ALTERNATIVE_TARGET'):
|
||||
d.setVar('ALTERNATIVE_TARGET', new_target)
|
||||
|
||||
new_names.append(new_name)
|
||||
|
||||
if new_names:
|
||||
d.setVar('ALTERNATIVE:%s' % pkg, ' '.join(new_names))
|
||||
}
|
||||
|
||||
86
sources/poky/meta/classes-recipe/core-image.bbclass
Normal file
86
sources/poky/meta/classes-recipe/core-image.bbclass
Normal file
@@ -0,0 +1,86 @@
|
||||
# Common code for generating core reference images
|
||||
#
|
||||
# Copyright (C) 2007-2011 Linux Foundation
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# IMAGE_FEATURES control content of the core reference images
|
||||
#
|
||||
# By default we install packagegroup-core-boot and packagegroup-base-extended packages;
|
||||
# this gives us working (console only) rootfs.
|
||||
#
|
||||
# Available IMAGE_FEATURES:
|
||||
#
|
||||
# - weston - Weston Wayland compositor
|
||||
# - x11 - X server
|
||||
# - x11-base - X server with minimal environment
|
||||
# - x11-sato - OpenedHand Sato environment
|
||||
# - tools-debug - debugging tools
|
||||
# - eclipse-debug - Eclipse remote debugging support
|
||||
# - tools-profile - profiling tools
|
||||
# - tools-testapps - tools usable to make some device tests
|
||||
# - tools-sdk - SDK (C/C++ compiler, autotools, etc.)
|
||||
# - nfs-server - NFS server
|
||||
# - nfs-client - NFS client
|
||||
# - ssh-server-dropbear - SSH server (dropbear)
|
||||
# - ssh-server-openssh - SSH server (openssh)
|
||||
# - hwcodecs - Install hardware acceleration codecs
|
||||
# - package-management - installs package management tools and preserves the package manager database
|
||||
# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
|
||||
# - empty-root-password
|
||||
# - allow-empty-password
|
||||
# - allow-root-login
|
||||
# - post-install-logging
|
||||
# - serial-autologin-root - with 'empty-root-password': autologin 'root' on the serial console
|
||||
# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
|
||||
# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
|
||||
# - lic-pkgs - license packages for all installed pacakges in the rootfs, requires
|
||||
# LICENSE_CREATE_PACKAGE="1" to be set when building packages too
|
||||
# - doc-pkgs - documentation packages for all installed packages in the rootfs
|
||||
# - bash-completion-pkgs - bash-completion packages for recipes using bash-completion bbclass
|
||||
# - ptest-pkgs - ptest packages for all ptest-enabled recipes
|
||||
# - read-only-rootfs - tweaks an image to support read-only rootfs
|
||||
# - stateless-rootfs - systemctl-native not run, image populated by systemd at runtime
|
||||
# - splash - bootup splash screen
|
||||
#
|
||||
FEATURE_PACKAGES_weston = "packagegroup-core-weston"
|
||||
FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
|
||||
FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
|
||||
FEATURE_PACKAGES_x11-sato = "packagegroup-core-x11-sato"
|
||||
FEATURE_PACKAGES_tools-debug = "packagegroup-core-tools-debug"
|
||||
FEATURE_PACKAGES_eclipse-debug = "packagegroup-core-eclipse-debug"
|
||||
FEATURE_PACKAGES_tools-profile = "packagegroup-core-tools-profile"
|
||||
FEATURE_PACKAGES_tools-testapps = "packagegroup-core-tools-testapps"
|
||||
FEATURE_PACKAGES_tools-sdk = "packagegroup-core-sdk packagegroup-core-standalone-sdk-target"
|
||||
FEATURE_PACKAGES_nfs-server = "packagegroup-core-nfs-server"
|
||||
FEATURE_PACKAGES_nfs-client = "packagegroup-core-nfs-client"
|
||||
FEATURE_PACKAGES_ssh-server-dropbear = "packagegroup-core-ssh-dropbear"
|
||||
FEATURE_PACKAGES_ssh-server-openssh = "packagegroup-core-ssh-openssh"
|
||||
FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}"
|
||||
|
||||
|
||||
# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2'
|
||||
# Including image feature foo would replace the image features bar1 and bar2
|
||||
IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear"
|
||||
# Do not install openssh complementary packages if either packagegroup-core-ssh-dropbear or dropbear
|
||||
# is installed # to avoid openssh-dropbear conflict
|
||||
# see [Yocto #14858] for more information
|
||||
PACKAGE_EXCLUDE_COMPLEMENTARY:append = "${@bb.utils.contains_any('PACKAGE_INSTALL', 'packagegroup-core-ssh-dropbear dropbear', ' openssh', '' , d)}"
|
||||
|
||||
# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2'
|
||||
# An error exception would be raised if both image features foo and bar1(or bar2) are included
|
||||
|
||||
MACHINE_HWCODECS ??= ""
|
||||
|
||||
CORE_IMAGE_BASE_INSTALL = '\
|
||||
packagegroup-core-boot \
|
||||
packagegroup-base-extended \
|
||||
\
|
||||
${CORE_IMAGE_EXTRA_INSTALL} \
|
||||
'
|
||||
|
||||
CORE_IMAGE_EXTRA_INSTALL ?= ""
|
||||
|
||||
IMAGE_INSTALL ?= "${CORE_IMAGE_BASE_INSTALL}"
|
||||
|
||||
inherit image
|
||||
33
sources/poky/meta/classes-recipe/cpan-base.bbclass
Normal file
33
sources/poky/meta/classes-recipe/cpan-base.bbclass
Normal file
@@ -0,0 +1,33 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
#
|
||||
# cpan-base providers various perl related information needed for building
|
||||
# cpan modules
|
||||
#
|
||||
FILES:${PN} += "${libdir}/perl5 ${datadir}/perl5"
|
||||
|
||||
DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
|
||||
RDEPENDS:${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
|
||||
|
||||
inherit perl-version
|
||||
|
||||
def is_target(d):
|
||||
if not bb.data.inherits_class('native', d):
|
||||
return "yes"
|
||||
return "no"
|
||||
|
||||
PERLLIBDIRS = "${libdir}/perl5"
|
||||
PERLLIBDIRS:class-native = "${libdir}/perl5"
|
||||
|
||||
def cpan_upstream_check_pattern(d):
|
||||
for x in (d.getVar('SRC_URI') or '').split(' '):
|
||||
if x.startswith("https://cpan.metacpan.org"):
|
||||
_pattern = x.split('/')[-1].replace(d.getVar('PV'), r'(?P<pver>\d+.\d+)')
|
||||
return _pattern
|
||||
return ''
|
||||
|
||||
UPSTREAM_CHECK_REGEX ?= "${@cpan_upstream_check_pattern(d)}"
|
||||
71
sources/poky/meta/classes-recipe/cpan.bbclass
Normal file
71
sources/poky/meta/classes-recipe/cpan.bbclass
Normal file
@@ -0,0 +1,71 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
#
|
||||
# This is for perl modules that use the old Makefile.PL build system
|
||||
#
|
||||
inherit cpan-base perlnative
|
||||
|
||||
EXTRA_CPANFLAGS ?= ""
|
||||
EXTRA_PERLFLAGS ?= ""
|
||||
|
||||
# Env var which tells perl if it should use host (no) or target (yes) settings
|
||||
export PERLCONFIGTARGET = "${@is_target(d)}"
|
||||
|
||||
# Env var which tells perl where the perl include files are
|
||||
export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}/CORE"
|
||||
export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}"
|
||||
export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
|
||||
export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
|
||||
export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
|
||||
|
||||
cpan_do_configure () {
|
||||
yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor NO_PERLLOCAL=1 NO_PACKLIST=1 PERL=$(which perl) ${EXTRA_CPANFLAGS}
|
||||
|
||||
# Makefile.PLs can exit with success without generating a
|
||||
# Makefile, e.g. in cases of missing configure time
|
||||
# dependencies. This is considered a best practice by
|
||||
# cpantesters.org. See:
|
||||
# * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
|
||||
# * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
|
||||
[ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL"
|
||||
|
||||
if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
|
||||
. ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh
|
||||
# Use find since there can be a Makefile generated for each Makefile.PL
|
||||
for f in `find -name Makefile.PL`; do
|
||||
f2=`echo $f | sed -e 's/.PL//'`
|
||||
test -f $f2 || continue
|
||||
sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \
|
||||
-e 's/perl.real/perl/' \
|
||||
-e "s|^\(CCFLAGS =.*\)|\1 ${CFLAGS}|" \
|
||||
$f2
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
do_configure:append:class-target() {
|
||||
find . -name Makefile | xargs sed -E -i \
|
||||
-e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
|
||||
}
|
||||
|
||||
do_configure:append:class-nativesdk() {
|
||||
find . -name Makefile | xargs sed -E -i \
|
||||
-e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
|
||||
}
|
||||
|
||||
cpan_do_compile () {
|
||||
oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}"
|
||||
}
|
||||
|
||||
cpan_do_install () {
|
||||
oe_runmake DESTDIR="${D}" install_vendor
|
||||
for PERLSCRIPT in `grep -rIEl '#! *${bindir}/perl-native.*/perl' ${D}`; do
|
||||
sed -i -e 's|${bindir}/perl-native.*/perl|/usr/bin/env nativeperl|' $PERLSCRIPT
|
||||
done
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_configure do_compile do_install
|
||||
47
sources/poky/meta/classes-recipe/cpan_build.bbclass
Normal file
47
sources/poky/meta/classes-recipe/cpan_build.bbclass
Normal file
@@ -0,0 +1,47 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
#
|
||||
# This is for perl modules that use the new Build.PL build system
|
||||
#
|
||||
inherit cpan-base perlnative
|
||||
|
||||
EXTRA_CPAN_BUILD_FLAGS ?= ""
|
||||
|
||||
# Env var which tells perl if it should use host (no) or target (yes) settings
|
||||
export PERLCONFIGTARGET = "${@is_target(d)}"
|
||||
export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
|
||||
export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
|
||||
export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
|
||||
export LD = "${CCLD}"
|
||||
|
||||
cpan_build_do_configure () {
|
||||
if [ "${@is_target(d)}" = "yes" ]; then
|
||||
# build for target
|
||||
. ${STAGING_LIBDIR}/perl5/config.sh
|
||||
fi
|
||||
|
||||
perl Build.PL --installdirs vendor --destdir ${D} \
|
||||
${EXTRA_CPAN_BUILD_FLAGS}
|
||||
|
||||
# Build.PLs can exit with success without generating a
|
||||
# Build, e.g. in cases of missing configure time
|
||||
# dependencies. This is considered a best practice by
|
||||
# cpantesters.org. See:
|
||||
# * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
|
||||
# * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
|
||||
[ -e Build ] || bbfatal "No Build was generated by Build.PL"
|
||||
}
|
||||
|
||||
cpan_build_do_compile () {
|
||||
perl Build --perl "${bindir}/perl" verbose=1
|
||||
}
|
||||
|
||||
cpan_build_do_install () {
|
||||
perl Build install --destdir ${D}
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_configure do_compile do_install
|
||||
200
sources/poky/meta/classes-recipe/cross-canadian.bbclass
Normal file
200
sources/poky/meta/classes-recipe/cross-canadian.bbclass
Normal file
@@ -0,0 +1,200 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# NOTE - When using this class the user is responsible for ensuring that
|
||||
# TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH
|
||||
# is changed, another nativesdk xxx-canadian-cross can be installed
|
||||
#
|
||||
|
||||
|
||||
# SDK packages are built either explicitly by the user,
|
||||
# or indirectly via dependency. No need to be in 'world'.
|
||||
EXCLUDE_FROM_WORLD = "1"
|
||||
NATIVESDKLIBC ?= "libc-glibc"
|
||||
LIBCOVERRIDE = ":${NATIVESDKLIBC}"
|
||||
CLASSOVERRIDE = "class-cross-canadian"
|
||||
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
|
||||
|
||||
#
|
||||
# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
|
||||
#
|
||||
PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
|
||||
BASECANADIANEXTRAOS ?= "linux-musl"
|
||||
CANADIANEXTRAOS = "${BASECANADIANEXTRAOS}"
|
||||
CANADIANEXTRAVENDOR = ""
|
||||
MODIFYTOS ??= "1"
|
||||
python () {
|
||||
archs = d.getVar('PACKAGE_ARCHS').split()
|
||||
sdkarchs = []
|
||||
for arch in archs:
|
||||
sdkarchs.append(arch + '-${SDKPKGSUFFIX}')
|
||||
d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
|
||||
|
||||
# Allow the following code segment to be disabled, e.g. meta-environment
|
||||
if d.getVar("MODIFYTOS") != "1":
|
||||
return
|
||||
|
||||
if d.getVar("TCLIBC") in [ 'baremetal', 'newlib' ]:
|
||||
return
|
||||
|
||||
tos = d.getVar("TARGET_OS")
|
||||
tos_known = ["mingw32"]
|
||||
extralibcs = [""]
|
||||
if "musl" in d.getVar("BASECANADIANEXTRAOS"):
|
||||
extralibcs.append("musl")
|
||||
if "android" in tos:
|
||||
extralibcs.append("android")
|
||||
for variant in ["", "spe", "x32", "eabi", "n32", "_ilp32"]:
|
||||
for libc in extralibcs:
|
||||
entry = "linux"
|
||||
if variant and libc:
|
||||
entry = entry + "-" + libc + variant
|
||||
elif variant:
|
||||
entry = entry + "-gnu" + variant
|
||||
elif libc:
|
||||
entry = entry + "-" + libc
|
||||
tos_known.append(entry)
|
||||
if tos not in tos_known:
|
||||
bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS"))
|
||||
|
||||
for n in ["PROVIDES", "DEPENDS"]:
|
||||
d.setVar(n, d.getVar(n))
|
||||
d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN"))
|
||||
for prefix in ["AR", "AS", "DLLTOOL", "CC", "CXX", "GCC", "LD", "LIPO", "NM", "OBJDUMP", "RANLIB", "STRIP", "WINDRES"]:
|
||||
n = prefix + "_FOR_TARGET"
|
||||
d.setVar(n, d.getVar(n))
|
||||
# This is a bit ugly. We need to zero LIBC/ABI extension which will change TARGET_OS
|
||||
# however we need the old value in some variables. We expand those here first.
|
||||
tarch = d.getVar("TARGET_ARCH")
|
||||
if tarch == "x86_64":
|
||||
d.setVar("LIBCEXTENSION", "")
|
||||
d.setVar("ABIEXTENSION", "")
|
||||
d.appendVar("CANADIANEXTRAOS", " linux-gnux32")
|
||||
for extraos in d.getVar("BASECANADIANEXTRAOS").split():
|
||||
d.appendVar("CANADIANEXTRAOS", " " + extraos + "x32")
|
||||
elif tarch == "powerpc":
|
||||
# PowerPC can build "linux" and "linux-gnuspe"
|
||||
d.setVar("LIBCEXTENSION", "")
|
||||
d.setVar("ABIEXTENSION", "")
|
||||
d.appendVar("CANADIANEXTRAOS", " linux-gnuspe")
|
||||
for extraos in d.getVar("BASECANADIANEXTRAOS").split():
|
||||
d.appendVar("CANADIANEXTRAOS", " " + extraos + "spe")
|
||||
elif tarch == "mips64":
|
||||
d.appendVar("CANADIANEXTRAOS", " linux-gnun32")
|
||||
for extraos in d.getVar("BASECANADIANEXTRAOS").split():
|
||||
d.appendVar("CANADIANEXTRAOS", " " + extraos + "n32")
|
||||
if tarch == "arm" or tarch == "armeb":
|
||||
d.appendVar("CANADIANEXTRAOS", " linux-gnueabi linux-musleabi")
|
||||
d.setVar("TARGET_OS", "linux-gnueabi")
|
||||
else:
|
||||
d.setVar("TARGET_OS", "linux")
|
||||
|
||||
# Also need to handle multilib target vendors
|
||||
vendors = d.getVar("CANADIANEXTRAVENDOR")
|
||||
if not vendors:
|
||||
vendors = all_multilib_tune_values(d, 'TARGET_VENDOR')
|
||||
origvendor = d.getVar("TARGET_VENDOR_MULTILIB_ORIGINAL")
|
||||
if origvendor:
|
||||
d.setVar("TARGET_VENDOR", origvendor)
|
||||
if origvendor not in vendors.split():
|
||||
vendors = origvendor + " " + vendors
|
||||
d.setVar("CANADIANEXTRAVENDOR", vendors)
|
||||
}
|
||||
MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
|
||||
|
||||
INHIBIT_DEFAULT_DEPS = "1"
|
||||
|
||||
STAGING_DIR_HOST = "${RECIPE_SYSROOT}"
|
||||
|
||||
TOOLCHAIN_OPTIONS = " --sysroot=${RECIPE_SYSROOT}"
|
||||
|
||||
PATH:append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
|
||||
PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
|
||||
|
||||
HOST_ARCH = "${SDK_ARCH}"
|
||||
HOST_VENDOR = "${SDK_VENDOR}"
|
||||
HOST_OS = "${SDK_OS}"
|
||||
HOST_PREFIX = "${SDK_PREFIX}"
|
||||
HOST_CC_ARCH = "${SDK_CC_ARCH}"
|
||||
HOST_LD_ARCH = "${SDK_LD_ARCH}"
|
||||
HOST_AS_ARCH = "${SDK_AS_ARCH}"
|
||||
|
||||
#assign DPKG_ARCH
|
||||
DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH'), '')}"
|
||||
|
||||
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
|
||||
CFLAGS = "${BUILDSDK_CFLAGS}"
|
||||
CXXFLAGS = "${BUILDSDK_CFLAGS}"
|
||||
LDFLAGS = "${BUILDSDK_LDFLAGS} \
|
||||
-Wl,-rpath-link,${STAGING_LIBDIR}/.. \
|
||||
-Wl,-rpath,${libdir}/.. "
|
||||
|
||||
#
|
||||
# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
|
||||
# binaries
|
||||
#
|
||||
DEPENDS:append = " chrpath-replacement-native"
|
||||
EXTRANATIVEPATH += "chrpath-native"
|
||||
|
||||
# Path mangling needed by the cross packaging
|
||||
# Note that we use := here to ensure that libdir and includedir are
|
||||
# target paths.
|
||||
target_base_prefix := "${base_prefix}"
|
||||
target_prefix := "${prefix}"
|
||||
target_exec_prefix := "${exec_prefix}"
|
||||
target_base_libdir = "${target_base_prefix}/${baselib}"
|
||||
target_libdir = "${target_exec_prefix}/${baselib}"
|
||||
target_includedir := "${includedir}"
|
||||
|
||||
# Change to place files in SDKPATH
|
||||
base_prefix = "${SDKPATHNATIVE}"
|
||||
prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
|
||||
exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
|
||||
bindir = "${exec_prefix}/bin/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
|
||||
sbindir = "${bindir}"
|
||||
base_bindir = "${bindir}"
|
||||
base_sbindir = "${bindir}"
|
||||
libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
|
||||
libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
|
||||
|
||||
FILES:${PN} = "${prefix}"
|
||||
|
||||
export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${exec_prefix}/lib/pkgconfig"
|
||||
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
|
||||
|
||||
do_populate_sysroot[stamp-extra-info] = ""
|
||||
do_packagedata[stamp-extra-info] = ""
|
||||
|
||||
USE_NLS = "${SDKUSE_NLS}"
|
||||
|
||||
# We have to us TARGET_ARCH but we care about the absolute value
|
||||
# and not any particular tune that is enabled.
|
||||
TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
|
||||
|
||||
PKGDATA_DIR = "${PKGDATA_DIR_SDK}"
|
||||
# If MLPREFIX is set by multilib code, shlibs
|
||||
# points to the wrong place so force it
|
||||
SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
|
||||
SHLIBSWORKDIR = "${PKGDATA_DIR}/nativesdk-shlibs2"
|
||||
|
||||
cross_canadian_bindirlinks () {
|
||||
for i in linux ${CANADIANEXTRAOS}
|
||||
do
|
||||
for v in ${CANADIANEXTRAVENDOR}
|
||||
do
|
||||
d=${D}${bindir}/../${TARGET_ARCH}$v-$i
|
||||
if [ -d $d ];
|
||||
then
|
||||
continue
|
||||
fi
|
||||
install -d $d
|
||||
for j in `ls ${D}${bindir}`
|
||||
do
|
||||
p=${TARGET_ARCH}$v-$i-`echo $j | sed -e s,${TARGET_PREFIX},,`
|
||||
ln -s ../${TARGET_SYS}/$j $d/$p
|
||||
done
|
||||
done
|
||||
done
|
||||
}
|
||||
103
sources/poky/meta/classes-recipe/cross.bbclass
Normal file
103
sources/poky/meta/classes-recipe/cross.bbclass
Normal file
@@ -0,0 +1,103 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit relocatable
|
||||
|
||||
# Cross packages are built indirectly via dependency,
|
||||
# no need for them to be a direct target of 'world'
|
||||
EXCLUDE_FROM_WORLD = "1"
|
||||
|
||||
CLASSOVERRIDE = "class-cross"
|
||||
PACKAGES = ""
|
||||
PACKAGES_DYNAMIC = ""
|
||||
PACKAGES_DYNAMIC:class-native = ""
|
||||
|
||||
HOST_ARCH = "${BUILD_ARCH}"
|
||||
HOST_VENDOR = "${BUILD_VENDOR}"
|
||||
HOST_OS = "${BUILD_OS}"
|
||||
HOST_PREFIX = "${BUILD_PREFIX}"
|
||||
HOST_CC_ARCH = "${BUILD_CC_ARCH}"
|
||||
HOST_LD_ARCH = "${BUILD_LD_ARCH}"
|
||||
HOST_AS_ARCH = "${BUILD_AS_ARCH}"
|
||||
|
||||
# No strip sysroot when DEBUG_BUILD is enabled
|
||||
INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
|
||||
|
||||
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
|
||||
|
||||
STAGING_DIR_HOST = "${RECIPE_SYSROOT_NATIVE}"
|
||||
|
||||
PACKAGE_ARCH = "${BUILD_ARCH}"
|
||||
|
||||
MULTIMACH_TARGET_SYS = "${BUILD_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
|
||||
|
||||
export PKG_CONFIG_DIR = "${exec_prefix}/lib/pkgconfig"
|
||||
export PKG_CONFIG_SYSROOT_DIR = ""
|
||||
|
||||
TARGET_CPPFLAGS = ""
|
||||
TARGET_CFLAGS = ""
|
||||
TARGET_CXXFLAGS = ""
|
||||
TARGET_LDFLAGS = ""
|
||||
|
||||
CPPFLAGS = "${BUILD_CPPFLAGS}"
|
||||
CFLAGS = "${BUILD_CFLAGS}"
|
||||
CXXFLAGS = "${BUILD_CFLAGS}"
|
||||
LDFLAGS = "${BUILD_LDFLAGS}"
|
||||
|
||||
TOOLCHAIN_OPTIONS = ""
|
||||
|
||||
# This class encodes staging paths into its scripts data so can only be
|
||||
# reused if we manipulate the paths.
|
||||
SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
|
||||
|
||||
# Path mangling needed by the cross packaging
|
||||
# Note that we use := here to ensure that libdir and includedir are
|
||||
# target paths.
|
||||
target_base_prefix := "${root_prefix}"
|
||||
target_prefix := "${prefix}"
|
||||
target_exec_prefix := "${exec_prefix}"
|
||||
target_base_libdir = "${target_base_prefix}/${baselib}"
|
||||
target_libdir = "${target_exec_prefix}/${baselib}"
|
||||
target_includedir := "${includedir}"
|
||||
|
||||
# Overrides for paths
|
||||
CROSS_TARGET_SYS_DIR = "${TARGET_SYS}"
|
||||
prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
|
||||
base_prefix = "${STAGING_DIR_NATIVE}"
|
||||
exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
|
||||
bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}"
|
||||
sbindir = "${bindir}"
|
||||
base_bindir = "${bindir}"
|
||||
base_sbindir = "${bindir}"
|
||||
libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
|
||||
libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
|
||||
|
||||
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
|
||||
do_packagedata[stamp-extra-info] = ""
|
||||
|
||||
USE_NLS = "no"
|
||||
|
||||
export CC = "${BUILD_CC}"
|
||||
export CXX = "${BUILD_CXX}"
|
||||
export FC = "${BUILD_FC}"
|
||||
export CPP = "${BUILD_CPP}"
|
||||
export LD = "${BUILD_LD}"
|
||||
export CCLD = "${BUILD_CCLD}"
|
||||
export AR = "${BUILD_AR}"
|
||||
export AS = "${BUILD_AS}"
|
||||
export RANLIB = "${BUILD_RANLIB}"
|
||||
export STRIP = "${BUILD_STRIP}"
|
||||
export NM = "${BUILD_NM}"
|
||||
|
||||
inherit nopackages
|
||||
|
||||
python do_addto_recipe_sysroot () {
|
||||
bb.build.exec_func("extend_recipe_sysroot", d)
|
||||
}
|
||||
addtask addto_recipe_sysroot after do_populate_sysroot
|
||||
do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
|
||||
|
||||
PATH:prepend = "${COREBASE}/scripts/cross-intercept:"
|
||||
57
sources/poky/meta/classes-recipe/crosssdk.bbclass
Normal file
57
sources/poky/meta/classes-recipe/crosssdk.bbclass
Normal file
@@ -0,0 +1,57 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit cross
|
||||
|
||||
CLASSOVERRIDE = "class-crosssdk"
|
||||
NATIVESDKLIBC ?= "libc-glibc"
|
||||
LIBCOVERRIDE = ":${NATIVESDKLIBC}"
|
||||
MACHINEOVERRIDES = ""
|
||||
PACKAGE_ARCH = "${SDK_ARCH}"
|
||||
|
||||
python () {
|
||||
# set TUNE_PKGARCH to SDK_ARCH
|
||||
d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH'))
|
||||
# Set features here to prevent appends and distro features backfill
|
||||
# from modifying nativesdk distro features
|
||||
features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
|
||||
filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
|
||||
d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
|
||||
}
|
||||
|
||||
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
|
||||
|
||||
# This class encodes staging paths into its scripts data so can only be
|
||||
# reused if we manipulate the paths.
|
||||
SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
|
||||
|
||||
TARGET_ARCH = "${SDK_ARCH}"
|
||||
TARGET_VENDOR = "${SDK_VENDOR}"
|
||||
TARGET_OS = "${SDK_OS}"
|
||||
TARGET_PREFIX = "${SDK_PREFIX}"
|
||||
TARGET_CC_ARCH = "${SDK_CC_ARCH}"
|
||||
TARGET_LD_ARCH = "${SDK_LD_ARCH}"
|
||||
TARGET_AS_ARCH = "${SDK_AS_ARCH}"
|
||||
TARGET_CPPFLAGS = ""
|
||||
TARGET_CFLAGS = ""
|
||||
TARGET_CXXFLAGS = ""
|
||||
TARGET_LDFLAGS = ""
|
||||
TARGET_FPU = ""
|
||||
|
||||
|
||||
target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
|
||||
target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
|
||||
target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
|
||||
target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
|
||||
target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
|
||||
baselib = "lib"
|
||||
|
||||
do_packagedata[stamp-extra-info] = ""
|
||||
|
||||
# Need to force this to ensure consitency across architectures
|
||||
EXTRA_OECONF_GCC_FLOAT = ""
|
||||
|
||||
USE_NLS = "no"
|
||||
18
sources/poky/meta/classes-recipe/deploy.bbclass
Normal file
18
sources/poky/meta/classes-recipe/deploy.bbclass
Normal file
@@ -0,0 +1,18 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
DEPLOYDIR = "${WORKDIR}/deploy-${PN}"
|
||||
SSTATETASKS += "do_deploy"
|
||||
do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"
|
||||
do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
|
||||
|
||||
python do_deploy_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_deploy_setscene
|
||||
do_deploy[dirs] = "${B}"
|
||||
do_deploy[cleandirs] = "${DEPLOYDIR}"
|
||||
do_deploy[stamp-extra-info] = "${MACHINE_ARCH}"
|
||||
159
sources/poky/meta/classes-recipe/devicetree.bbclass
Normal file
159
sources/poky/meta/classes-recipe/devicetree.bbclass
Normal file
@@ -0,0 +1,159 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# This bbclass implements device tree compilation for user provided device tree
|
||||
# sources. The compilation of the device tree sources is the same as the kernel
|
||||
# device tree compilation process, this includes being able to include sources
|
||||
# from the kernel such as soc dtsi files or header files such as gpio.h. In
|
||||
# addition to device trees this bbclass also handles compilation of device tree
|
||||
# overlays.
|
||||
#
|
||||
# The output of this class behaves similar to how kernel-devicetree.bbclass
|
||||
# operates in that the output files are installed into /boot/devicetree.
|
||||
# However this class on purpose separates the deployed device trees into the
|
||||
# 'devicetree' subdirectory. This prevents clashes with the kernel-devicetree
|
||||
# output. Additionally the device trees are populated into the sysroot for
|
||||
# access via the sysroot from within other recipes.
|
||||
|
||||
SECTION ?= "bsp"
|
||||
|
||||
# The default inclusion of kernel device tree includes and headers means that
|
||||
# device trees built with them are at least GPL-2.0-only (and in some cases dual
|
||||
# licensed). Default to GPL-2.0-only if the recipe does not specify a license.
|
||||
LICENSE ?= "GPL-2.0-only"
|
||||
LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0-only;md5=801f80980d171dd6425610833a22dbe6"
|
||||
|
||||
INHIBIT_DEFAULT_DEPS = "1"
|
||||
DEPENDS += "dtc-native"
|
||||
|
||||
inherit deploy kernel-arch
|
||||
|
||||
COMPATIBLE_MACHINE ?= "^$"
|
||||
|
||||
PROVIDES = "virtual/dtb"
|
||||
|
||||
PACKAGE_ARCH = "${MACHINE_ARCH}"
|
||||
|
||||
SYSROOT_DIRS += "/boot/devicetree"
|
||||
FILES:${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
|
||||
|
||||
S = "${WORKDIR}"
|
||||
B = "${WORKDIR}/build"
|
||||
|
||||
# Default kernel includes, these represent what are normally used for in-kernel
|
||||
# sources.
|
||||
KERNEL_INCLUDE ??= " \
|
||||
${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts \
|
||||
${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts/* \
|
||||
${STAGING_KERNEL_DIR}/scripts/dtc/include-prefixes \
|
||||
"
|
||||
|
||||
DT_INCLUDE[doc] = "Search paths to be made available to both the device tree compiler and preprocessor for inclusion."
|
||||
DT_INCLUDE ?= "${DT_FILES_PATH} ${KERNEL_INCLUDE}"
|
||||
DT_FILES_PATH[doc] = "Path to the directory containing dts files to build. Defaults to source directory."
|
||||
DT_FILES_PATH ?= "${S}"
|
||||
DT_FILES[doc] = "Space-separated list of dts or dtb files (relative to DT_FILES_PATH) to build. If empty, all dts files are built."
|
||||
DT_FILES ?= ""
|
||||
|
||||
DT_PADDING_SIZE[doc] = "Size of padding on the device tree blob, used as extra space typically for additional properties during boot."
|
||||
DT_PADDING_SIZE ??= "0x3000"
|
||||
DT_RESERVED_MAP[doc] = "Number of reserved map entires."
|
||||
DT_RESERVED_MAP ??= "8"
|
||||
DT_BOOT_CPU[doc] = "The boot cpu, defaults to 0"
|
||||
DT_BOOT_CPU ??= "0"
|
||||
|
||||
DTC_FLAGS ?= "-R ${DT_RESERVED_MAP} -b ${DT_BOOT_CPU}"
|
||||
DTC_PPFLAGS ?= "-nostdinc -undef -D__DTS__ -x assembler-with-cpp"
|
||||
DTC_BFLAGS ?= "-p ${DT_PADDING_SIZE} -@"
|
||||
DTC_OFLAGS ?= "-p 0 -@ -H epapr"
|
||||
|
||||
python () {
|
||||
if d.getVar("KERNEL_INCLUDE"):
|
||||
# auto add dependency on kernel tree, but only if kernel include paths
|
||||
# are specified.
|
||||
d.appendVarFlag("do_compile", "depends", " virtual/kernel:do_configure")
|
||||
}
|
||||
|
||||
def expand_includes(varname, d):
|
||||
import glob
|
||||
includes = set()
|
||||
# expand all includes with glob
|
||||
for i in (d.getVar(varname) or "").split():
|
||||
for g in glob.glob(i):
|
||||
if os.path.isdir(g): # only add directories to include path
|
||||
includes.add(g)
|
||||
return includes
|
||||
|
||||
def devicetree_source_is_overlay(path):
|
||||
# determine if a dts file is an overlay by checking if it uses "/plugin/;"
|
||||
with open(path, "r") as f:
|
||||
for i in f:
|
||||
if i.startswith("/plugin/;"):
|
||||
return True
|
||||
return False
|
||||
|
||||
def devicetree_compile(dtspath, includes, d):
|
||||
import subprocess
|
||||
dts = os.path.basename(dtspath)
|
||||
dtname = os.path.splitext(dts)[0]
|
||||
bb.note("Processing {0} [{1}]".format(dtname, dts))
|
||||
|
||||
# preprocess
|
||||
ppargs = d.getVar("BUILD_CPP").split()
|
||||
ppargs += (d.getVar("DTC_PPFLAGS") or "").split()
|
||||
for i in includes:
|
||||
ppargs.append("-I{0}".format(i))
|
||||
ppargs += ["-o", "{0}.pp".format(dts), dtspath]
|
||||
bb.note("Running {0}".format(" ".join(ppargs)))
|
||||
subprocess.run(ppargs, check = True)
|
||||
|
||||
# determine if the file is an overlay or not (using the preprocessed file)
|
||||
isoverlay = devicetree_source_is_overlay("{0}.pp".format(dts))
|
||||
|
||||
# compile
|
||||
dtcargs = ["dtc"] + (d.getVar("DTC_FLAGS") or "").split()
|
||||
if isoverlay:
|
||||
dtcargs += (d.getVar("DTC_OFLAGS") or "").split()
|
||||
else:
|
||||
dtcargs += (d.getVar("DTC_BFLAGS") or "").split()
|
||||
for i in includes:
|
||||
dtcargs += ["-i", i]
|
||||
dtcargs += ["-o", "{0}.{1}".format(dtname, "dtbo" if isoverlay else "dtb")]
|
||||
dtcargs += ["-I", "dts", "-O", "dtb", "{0}.pp".format(dts)]
|
||||
bb.note("Running {0}".format(" ".join(dtcargs)))
|
||||
subprocess.run(dtcargs, check = True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
|
||||
python devicetree_do_compile() {
|
||||
import re
|
||||
includes = expand_includes("DT_INCLUDE", d)
|
||||
dtfiles = d.getVar("DT_FILES").split()
|
||||
dtfiles = [ re.sub(r"\.dtbo?$", ".dts", dtfile) for dtfile in dtfiles ]
|
||||
listpath = d.getVar("DT_FILES_PATH")
|
||||
for dts in dtfiles or os.listdir(listpath):
|
||||
dtspath = os.path.join(listpath, dts)
|
||||
try:
|
||||
if not(os.path.isfile(dtspath)) or not(dts.endswith(".dts") or devicetree_source_is_overlay(dtspath)):
|
||||
continue # skip non-.dts files and non-overlay files
|
||||
except:
|
||||
continue # skip if can't determine if overlay
|
||||
devicetree_compile(dtspath, includes, d)
|
||||
}
|
||||
|
||||
devicetree_do_install() {
|
||||
for DTB_FILE in `ls *.dtb *.dtbo`; do
|
||||
install -Dm 0644 ${B}/${DTB_FILE} ${D}/boot/devicetree/${DTB_FILE}
|
||||
done
|
||||
}
|
||||
|
||||
devicetree_do_deploy() {
|
||||
for DTB_FILE in `ls *.dtb *.dtbo`; do
|
||||
install -Dm 0644 ${B}/${DTB_FILE} ${DEPLOYDIR}/devicetree/${DTB_FILE}
|
||||
done
|
||||
}
|
||||
addtask deploy before do_build after do_install
|
||||
|
||||
EXPORT_FUNCTIONS do_compile do_install do_deploy
|
||||
|
||||
61
sources/poky/meta/classes-recipe/devupstream.bbclass
Normal file
61
sources/poky/meta/classes-recipe/devupstream.bbclass
Normal file
@@ -0,0 +1,61 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Class for use in BBCLASSEXTEND to make it easier to have a single recipe that
|
||||
# can build both stable tarballs and snapshots from upstream source
|
||||
# repositories.
|
||||
#
|
||||
# Usage:
|
||||
# BBCLASSEXTEND = "devupstream:target"
|
||||
# SRC_URI:class-devupstream = "git://git.example.com/example;branch=master"
|
||||
# SRCREV:class-devupstream = "abcdef"
|
||||
#
|
||||
# If the first entry in SRC_URI is a git: URL then S is rewritten to
|
||||
# WORKDIR/git.
|
||||
#
|
||||
# There are a few caveats that remain to be solved:
|
||||
# - You can't build native or nativesdk recipes using for example
|
||||
# devupstream:native, you can only build target recipes.
|
||||
# - If the fetcher requires native tools (such as subversion-native) then
|
||||
# bitbake won't be able to add them automatically.
|
||||
|
||||
python devupstream_virtclass_handler () {
|
||||
# Do nothing if this is inherited, as it's for BBCLASSEXTEND
|
||||
if "devupstream" not in (d.getVar('BBCLASSEXTEND') or ""):
|
||||
bb.error("Don't inherit devupstream, use BBCLASSEXTEND")
|
||||
return
|
||||
|
||||
variant = d.getVar("BBEXTENDVARIANT")
|
||||
if variant not in ("target", "native"):
|
||||
bb.error("Unsupported variant %s. Pass the variant when using devupstream, for example devupstream:target" % variant)
|
||||
return
|
||||
|
||||
# Develpment releases are never preferred by default
|
||||
d.setVar("DEFAULT_PREFERENCE", "-1")
|
||||
|
||||
src_uri = d.getVar("SRC_URI:class-devupstream") or d.getVar("SRC_URI")
|
||||
uri = bb.fetch2.URI(src_uri.split()[0])
|
||||
|
||||
if uri.scheme == "git" and not d.getVar("S:class-devupstream"):
|
||||
d.setVar("S", "${WORKDIR}/git")
|
||||
|
||||
# Modify the PV if the recipe hasn't already overridden it
|
||||
pv = d.getVar("PV")
|
||||
proto_marker = "+" + uri.scheme
|
||||
if proto_marker not in pv and not d.getVar("PV:class-devupstream"):
|
||||
d.setVar("PV", pv + proto_marker)
|
||||
|
||||
if variant == "native":
|
||||
pn = d.getVar("PN")
|
||||
d.setVar("PN", "%s-native" % (pn))
|
||||
fn = d.getVar("FILE")
|
||||
bb.parse.BBHandler.inherit("native", fn, 0, d)
|
||||
|
||||
d.appendVar("CLASSOVERRIDE", ":class-devupstream")
|
||||
}
|
||||
|
||||
addhandler devupstream_virtclass_handler
|
||||
devupstream_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
|
||||
@@ -0,0 +1,13 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Temporarily provide fallback to the old name of the class
|
||||
|
||||
python __anonymous() {
|
||||
bb.warn("distro_features_check.bbclass is deprecated, please use features_check.bbclass instead")
|
||||
}
|
||||
|
||||
inherit features_check
|
||||
20
sources/poky/meta/classes-recipe/dos2unix.bbclass
Normal file
20
sources/poky/meta/classes-recipe/dos2unix.bbclass
Normal file
@@ -0,0 +1,20 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Class for use to convert all CRLF line terminators to LF
|
||||
# provided that some projects are being developed/maintained
|
||||
# on Windows so they have different line terminators(CRLF) vs
|
||||
# on Linux(LF), which can cause annoying patching errors during
|
||||
# git push/checkout processes.
|
||||
|
||||
do_convert_crlf_to_lf[depends] += "dos2unix-native:do_populate_sysroot"
|
||||
|
||||
# Convert CRLF line terminators to LF
|
||||
do_convert_crlf_to_lf () {
|
||||
find ${S} -type f -exec dos2unix {} \;
|
||||
}
|
||||
|
||||
addtask convert_crlf_to_lf after do_unpack before do_patch
|
||||
57
sources/poky/meta/classes-recipe/features_check.bbclass
Normal file
57
sources/poky/meta/classes-recipe/features_check.bbclass
Normal file
@@ -0,0 +1,57 @@
|
||||
# Allow checking of required and conflicting features
|
||||
#
|
||||
# xxx = [DISTRO,MACHINE,COMBINED,IMAGE]
|
||||
#
|
||||
# ANY_OF_xxx_FEATURES: ensure at least one item on this list is included
|
||||
# in xxx_FEATURES.
|
||||
# REQUIRED_xxx_FEATURES: ensure every item on this list is included
|
||||
# in xxx_FEATURES.
|
||||
# CONFLICT_xxx_FEATURES: ensure no item in this list is included in
|
||||
# xxx_FEATURES.
|
||||
#
|
||||
# Copyright 2019 (C) Texas Instruments Inc.
|
||||
# Copyright 2013 (C) O.S. Systems Software LTDA.
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
|
||||
python () {
|
||||
if d.getVar('PARSE_ALL_RECIPES', False):
|
||||
return
|
||||
|
||||
unused = True
|
||||
|
||||
for kind in ['DISTRO', 'MACHINE', 'COMBINED', 'IMAGE']:
|
||||
if d.getVar('ANY_OF_' + kind + '_FEATURES') is None and not d.hasOverrides('ANY_OF_' + kind + '_FEATURES') and \
|
||||
d.getVar('REQUIRED_' + kind + '_FEATURES') is None and not d.hasOverrides('REQUIRED_' + kind + '_FEATURES') and \
|
||||
d.getVar('CONFLICT_' + kind + '_FEATURES') is None and not d.hasOverrides('CONFLICT_' + kind + '_FEATURES'):
|
||||
continue
|
||||
|
||||
unused = False
|
||||
|
||||
# Assume at least one var is set.
|
||||
features = set((d.getVar(kind + '_FEATURES') or '').split())
|
||||
|
||||
any_of_features = set((d.getVar('ANY_OF_' + kind + '_FEATURES') or '').split())
|
||||
if any_of_features:
|
||||
if set.isdisjoint(any_of_features, features):
|
||||
raise bb.parse.SkipRecipe("one of '%s' needs to be in %s_FEATURES"
|
||||
% (' '.join(any_of_features), kind))
|
||||
|
||||
required_features = set((d.getVar('REQUIRED_' + kind + '_FEATURES') or '').split())
|
||||
if required_features:
|
||||
missing = set.difference(required_features, features)
|
||||
if missing:
|
||||
raise bb.parse.SkipRecipe("missing required %s feature%s '%s' (not in %s_FEATURES)"
|
||||
% (kind.lower(), 's' if len(missing) > 1 else '', ' '.join(missing), kind))
|
||||
|
||||
conflict_features = set((d.getVar('CONFLICT_' + kind + '_FEATURES') or '').split())
|
||||
if conflict_features:
|
||||
conflicts = set.intersection(conflict_features, features)
|
||||
if conflicts:
|
||||
raise bb.parse.SkipRecipe("conflicting %s feature%s '%s' (in %s_FEATURES)"
|
||||
% (kind.lower(), 's' if len(conflicts) > 1 else '', ' '.join(conflicts), kind))
|
||||
|
||||
if unused:
|
||||
bb.warn("Recipe inherits features_check but doesn't use it")
|
||||
}
|
||||
64
sources/poky/meta/classes-recipe/fontcache.bbclass
Normal file
64
sources/poky/meta/classes-recipe/fontcache.bbclass
Normal file
@@ -0,0 +1,64 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
#
|
||||
# This class will generate the proper postinst/postrm scriptlets for font
|
||||
# packages.
|
||||
#
|
||||
|
||||
PACKAGE_WRITE_DEPS += "qemu-native"
|
||||
inherit qemu
|
||||
|
||||
FONT_PACKAGES ??= "${PN}"
|
||||
FONT_PACKAGES:class-native = ""
|
||||
FONT_EXTRA_RDEPENDS ?= "${MLPREFIX}fontconfig-utils"
|
||||
FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
|
||||
FONTCONFIG_CACHE_PARAMS ?= "-v"
|
||||
# You can change this to e.g. FC_DEBUG=16 to debug fc-cache issues,
|
||||
# something has to be set, because qemuwrapper is using this variable after -E
|
||||
# multiple variables aren't allowed because for qemu they are separated
|
||||
# by comma and in -n "$D" case they should be separated by space
|
||||
FONTCONFIG_CACHE_ENV ?= "FC_DEBUG=1"
|
||||
fontcache_common() {
|
||||
if [ -n "$D" ] ; then
|
||||
$INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} \
|
||||
'bindir="${bindir}"' \
|
||||
'libdir="${libdir}"' \
|
||||
'libexecdir="${libexecdir}"' \
|
||||
'base_libdir="${base_libdir}"' \
|
||||
'fontconfigcachedir="${FONTCONFIG_CACHE_DIR}"' \
|
||||
'fontconfigcacheparams="${FONTCONFIG_CACHE_PARAMS}"' \
|
||||
'fontconfigcacheenv="${FONTCONFIG_CACHE_ENV}"'
|
||||
else
|
||||
${FONTCONFIG_CACHE_ENV} fc-cache ${FONTCONFIG_CACHE_PARAMS}
|
||||
fi
|
||||
}
|
||||
|
||||
python () {
|
||||
font_pkgs = d.getVar('FONT_PACKAGES').split()
|
||||
deps = d.getVar("FONT_EXTRA_RDEPENDS")
|
||||
|
||||
for pkg in font_pkgs:
|
||||
if deps: d.appendVar('RDEPENDS:' + pkg, ' '+deps)
|
||||
}
|
||||
|
||||
python add_fontcache_postinsts() {
|
||||
for pkg in d.getVar('FONT_PACKAGES').split():
|
||||
bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
|
||||
postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
|
||||
if not postinst:
|
||||
postinst = '#!/bin/sh\n'
|
||||
postinst += d.getVar('fontcache_common')
|
||||
d.setVar('pkg_postinst:%s' % pkg, postinst)
|
||||
|
||||
postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
|
||||
if not postrm:
|
||||
postrm = '#!/bin/sh\n'
|
||||
postrm += d.getVar('fontcache_common')
|
||||
d.setVar('pkg_postrm:%s' % pkg, postrm)
|
||||
}
|
||||
|
||||
PACKAGEFUNCS =+ "add_fontcache_postinsts"
|
||||
30
sources/poky/meta/classes-recipe/fs-uuid.bbclass
Normal file
30
sources/poky/meta/classes-recipe/fs-uuid.bbclass
Normal file
@@ -0,0 +1,30 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Extract UUID from ${ROOTFS}, which must have been built
|
||||
# by the time that this function gets called. Only works
|
||||
# on ext file systems and depends on tune2fs.
|
||||
def get_rootfs_uuid(d):
|
||||
import subprocess
|
||||
rootfs = d.getVar('ROOTFS')
|
||||
output = subprocess.check_output(['tune2fs', '-l', rootfs], text=True)
|
||||
for line in output.split('\n'):
|
||||
if line.startswith('Filesystem UUID:'):
|
||||
uuid = line.split()[-1]
|
||||
bb.note('UUID of %s: %s' % (rootfs, uuid))
|
||||
return uuid
|
||||
bb.fatal('Could not determine filesystem UUID of %s' % rootfs)
|
||||
|
||||
# Replace the special <<uuid-of-rootfs>> inside a string (like the
|
||||
# root= APPEND string in a syslinux.cfg or systemd-boot entry) with the
|
||||
# actual UUID of the rootfs. Does nothing if the special string
|
||||
# is not used.
|
||||
def replace_rootfs_uuid(d, string):
|
||||
UUID_PLACEHOLDER = '<<uuid-of-rootfs>>'
|
||||
if UUID_PLACEHOLDER in string:
|
||||
uuid = get_rootfs_uuid(d)
|
||||
string = string.replace(UUID_PLACEHOLDER, uuid)
|
||||
return string
|
||||
77
sources/poky/meta/classes-recipe/gconf.bbclass
Normal file
77
sources/poky/meta/classes-recipe/gconf.bbclass
Normal file
@@ -0,0 +1,77 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
DEPENDS += "gconf"
|
||||
PACKAGE_WRITE_DEPS += "gconf-native"
|
||||
|
||||
# These are for when gconftool is used natively and the prefix isn't necessarily
|
||||
# the sysroot. TODO: replicate the postinst logic for -native packages going
|
||||
# into sysroot as they won't be running their own install-time schema
|
||||
# registration (disabled below) nor the postinst script (as they don't happen).
|
||||
export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults"
|
||||
export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2"
|
||||
|
||||
# Disable install-time schema registration as we're a packaging system so this
|
||||
# happens in the postinst script, not at install time. Set both the configure
|
||||
# script option and the traditional envionment variable just to make sure.
|
||||
EXTRA_OECONF += "--disable-schemas-install"
|
||||
export GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL = "1"
|
||||
|
||||
gconf_postinst() {
|
||||
if [ "x$D" != "x" ]; then
|
||||
export GCONF_CONFIG_SOURCE="xml::$D${sysconfdir}/gconf/gconf.xml.defaults"
|
||||
else
|
||||
export GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source`
|
||||
fi
|
||||
|
||||
SCHEMA_LOCATION=$D/etc/gconf/schemas
|
||||
for SCHEMA in ${SCHEMA_FILES}; do
|
||||
if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
|
||||
HOME=$D/root gconftool-2 \
|
||||
--makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
gconf_prerm() {
|
||||
SCHEMA_LOCATION=/etc/gconf/schemas
|
||||
for SCHEMA in ${SCHEMA_FILES}; do
|
||||
if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
|
||||
HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
|
||||
gconftool-2 \
|
||||
--makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
python populate_packages:append () {
|
||||
import re
|
||||
packages = d.getVar('PACKAGES').split()
|
||||
pkgdest = d.getVar('PKGDEST')
|
||||
|
||||
for pkg in packages:
|
||||
schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
|
||||
schemas = []
|
||||
schema_re = re.compile(r".*\.schemas$")
|
||||
if os.path.exists(schema_dir):
|
||||
for f in os.listdir(schema_dir):
|
||||
if schema_re.match(f):
|
||||
schemas.append(f)
|
||||
if schemas != []:
|
||||
bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
|
||||
d.setVar('SCHEMA_FILES', " ".join(schemas))
|
||||
postinst = d.getVar('pkg_postinst:%s' % pkg)
|
||||
if not postinst:
|
||||
postinst = '#!/bin/sh\n'
|
||||
postinst += d.getVar('gconf_postinst')
|
||||
d.setVar('pkg_postinst:%s' % pkg, postinst)
|
||||
prerm = d.getVar('pkg_prerm:%s' % pkg)
|
||||
if not prerm:
|
||||
prerm = '#!/bin/sh\n'
|
||||
prerm += d.getVar('gconf_prerm')
|
||||
d.setVar('pkg_prerm:%s' % pkg, prerm)
|
||||
d.appendVar("RDEPENDS:%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
|
||||
}
|
||||
28
sources/poky/meta/classes-recipe/gettext.bbclass
Normal file
28
sources/poky/meta/classes-recipe/gettext.bbclass
Normal file
@@ -0,0 +1,28 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
def gettext_dependencies(d):
|
||||
if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
|
||||
return ""
|
||||
if d.getVar('USE_NLS') == 'no':
|
||||
return "gettext-minimal-native"
|
||||
return "gettext-native"
|
||||
|
||||
def gettext_oeconf(d):
|
||||
if d.getVar('USE_NLS') == 'no':
|
||||
return '--disable-nls'
|
||||
# Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set
|
||||
if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
|
||||
return '--disable-nls'
|
||||
return "--enable-nls"
|
||||
|
||||
BASEDEPENDS:append = " ${@gettext_dependencies(d)}"
|
||||
EXTRA_OECONF:append = " ${@gettext_oeconf(d)}"
|
||||
|
||||
# Without this, msgfmt from gettext-native will not find ITS files
|
||||
# provided by target recipes (for example, polkit.its).
|
||||
GETTEXTDATADIRS:append:class-target = ":${STAGING_DATADIR}/gettext"
|
||||
export GETTEXTDATADIRS
|
||||
32
sources/poky/meta/classes-recipe/gi-docgen.bbclass
Normal file
32
sources/poky/meta/classes-recipe/gi-docgen.bbclass
Normal file
@@ -0,0 +1,32 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# gi-docgen is a new gnome documentation generator, which
|
||||
# seems to be a successor to gtk-doc:
|
||||
# https://gitlab.gnome.org/GNOME/gi-docgen
|
||||
|
||||
# True if api-documentation and gobject-introspection-data are in DISTRO_FEATURES,
|
||||
# and qemu-user is in MACHINE_FEATURES, False otherwise.
|
||||
GIDOCGEN_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation gobject-introspection-data', \
|
||||
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
|
||||
|
||||
# When building native recipes, disable gi-docgen, as it is not necessary,
|
||||
# pulls in additional dependencies, and makes build times longer
|
||||
GIDOCGEN_ENABLED:class-native = "False"
|
||||
GIDOCGEN_ENABLED:class-nativesdk = "False"
|
||||
|
||||
# meson: default option name to enable/disable gi-docgen. This matches most
|
||||
# projects' configuration. In doubts - check meson_options.txt in project's
|
||||
# source path.
|
||||
GIDOCGEN_MESON_OPTION ?= 'gtk_doc'
|
||||
GIDOCGEN_MESON_ENABLE_FLAG ?= 'true'
|
||||
GIDOCGEN_MESON_DISABLE_FLAG ?= 'false'
|
||||
|
||||
# Auto enable/disable based on GIDOCGEN_ENABLED
|
||||
EXTRA_OEMESON:prepend = "-D${GIDOCGEN_MESON_OPTION}=${@bb.utils.contains('GIDOCGEN_ENABLED', 'True', '${GIDOCGEN_MESON_ENABLE_FLAG}', '${GIDOCGEN_MESON_DISABLE_FLAG}', d)} "
|
||||
|
||||
DEPENDS:append = "${@' gi-docgen-native gi-docgen' if d.getVar('GIDOCGEN_ENABLED') == 'True' else ''}"
|
||||
|
||||
44
sources/poky/meta/classes-recipe/gio-module-cache.bbclass
Normal file
44
sources/poky/meta/classes-recipe/gio-module-cache.bbclass
Normal file
@@ -0,0 +1,44 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
PACKAGE_WRITE_DEPS += "qemu-native"
|
||||
inherit qemu
|
||||
|
||||
GIO_MODULE_PACKAGES ??= "${PN}"
|
||||
|
||||
gio_module_cache_common() {
|
||||
if [ "x$D" != "x" ]; then
|
||||
$INTERCEPT_DIR/postinst_intercept update_gio_module_cache ${PKG} \
|
||||
mlprefix=${MLPREFIX} \
|
||||
binprefix=${MLPREFIX} \
|
||||
libdir=${libdir} \
|
||||
libexecdir=${libexecdir} \
|
||||
base_libdir=${base_libdir} \
|
||||
bindir=${bindir}
|
||||
else
|
||||
${libexecdir}/${MLPREFIX}gio-querymodules ${libdir}/gio/modules/
|
||||
fi
|
||||
}
|
||||
|
||||
python populate_packages:append () {
|
||||
packages = d.getVar('GIO_MODULE_PACKAGES').split()
|
||||
|
||||
for pkg in packages:
|
||||
bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg)
|
||||
|
||||
postinst = d.getVar('pkg_postinst:%s' % pkg)
|
||||
if not postinst:
|
||||
postinst = '#!/bin/sh\n'
|
||||
postinst += d.getVar('gio_module_cache_common')
|
||||
d.setVar('pkg_postinst:%s' % pkg, postinst)
|
||||
|
||||
postrm = d.getVar('pkg_postrm:%s' % pkg)
|
||||
if not postrm:
|
||||
postrm = '#!/bin/sh\n'
|
||||
postrm += d.getVar('gio_module_cache_common')
|
||||
d.setVar('pkg_postrm:%s' % pkg, postrm)
|
||||
}
|
||||
|
||||
3
sources/poky/meta/classes-recipe/github-releases.bbclass
Normal file
3
sources/poky/meta/classes-recipe/github-releases.bbclass
Normal file
@@ -0,0 +1,3 @@
|
||||
GITHUB_BASE_URI ?= "https://github.com/${BPN}/${BPN}/releases/"
|
||||
UPSTREAM_CHECK_URI ?= "${GITHUB_BASE_URI}"
|
||||
UPSTREAM_CHECK_REGEX ?= "releases/tag/v?(?P<pver>\d+(\.\d+)+)"
|
||||
38
sources/poky/meta/classes-recipe/gnomebase.bbclass
Normal file
38
sources/poky/meta/classes-recipe/gnomebase.bbclass
Normal file
@@ -0,0 +1,38 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
def gnome_verdir(v):
|
||||
return ".".join(v.split(".")[:-1]) or v
|
||||
|
||||
|
||||
GNOME_COMPRESS_TYPE ?= "xz"
|
||||
SECTION ?= "x11/gnome"
|
||||
GNOMEBN ?= "${BPN}"
|
||||
SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
|
||||
|
||||
FILES:${PN} += "${datadir}/application-registry \
|
||||
${datadir}/mime-info \
|
||||
${datadir}/mime/packages \
|
||||
${datadir}/mime/application \
|
||||
${datadir}/gnome-2.0 \
|
||||
${datadir}/polkit* \
|
||||
${datadir}/GConf \
|
||||
${datadir}/glib-2.0/schemas \
|
||||
${datadir}/appdata \
|
||||
${datadir}/icons \
|
||||
"
|
||||
|
||||
FILES:${PN}-doc += "${datadir}/devhelp"
|
||||
|
||||
GNOMEBASEBUILDCLASS ??= "meson"
|
||||
inherit pkgconfig
|
||||
inherit_defer ${GNOMEBASEBUILDCLASS}
|
||||
|
||||
do_install:append() {
|
||||
rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
|
||||
rm -rf ${D}${localstatedir}/scrollkeeper/*
|
||||
rm -f ${D}${datadir}/applications/*.cache
|
||||
}
|
||||
30
sources/poky/meta/classes-recipe/go-mod.bbclass
Normal file
30
sources/poky/meta/classes-recipe/go-mod.bbclass
Normal file
@@ -0,0 +1,30 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Handle Go Modules support
|
||||
#
|
||||
# When using Go Modules, the current working directory MUST be at or below
|
||||
# the location of the 'go.mod' file when the go tool is used, and there is no
|
||||
# way to tell it to look elsewhere. It will automatically look upwards for the
|
||||
# file, but not downwards.
|
||||
#
|
||||
# To support this use case, we provide the `GO_WORKDIR` variable, which defaults
|
||||
# to `GO_IMPORT` but allows for easy override.
|
||||
#
|
||||
# Copyright 2020 (C) O.S. Systems Software LTDA.
|
||||
|
||||
# The '-modcacherw' option ensures we have write access to the cached objects so
|
||||
# we avoid errors during clean task as well as when removing the TMPDIR.
|
||||
GOBUILDFLAGS:append = " -modcacherw"
|
||||
|
||||
inherit go
|
||||
|
||||
GO_WORKDIR ?= "${GO_IMPORT}"
|
||||
do_compile[dirs] += "${B}/src/${GO_WORKDIR}"
|
||||
|
||||
export GOMODCACHE = "${B}/.mod"
|
||||
|
||||
do_compile[cleandirs] += "${B}/.mod"
|
||||
60
sources/poky/meta/classes-recipe/go-ptest.bbclass
Normal file
60
sources/poky/meta/classes-recipe/go-ptest.bbclass
Normal file
@@ -0,0 +1,60 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit go ptest
|
||||
|
||||
do_compile_ptest_base() {
|
||||
export TMPDIR="${GOTMPDIR}"
|
||||
rm -f ${B}/.go_compiled_tests.list
|
||||
go_list_package_tests | while read pkg; do
|
||||
cd ${B}/src/$pkg
|
||||
${GO} test ${GOPTESTBUILDFLAGS} $pkg
|
||||
find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \
|
||||
sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list
|
||||
done
|
||||
do_compile_ptest
|
||||
}
|
||||
|
||||
do_compile_ptest_base[dirs] =+ "${GOTMPDIR}"
|
||||
|
||||
go_make_ptest_wrapper() {
|
||||
cat >${D}${PTEST_PATH}/run-ptest <<EOF
|
||||
#!/bin/sh
|
||||
RC=0
|
||||
run_test() (
|
||||
cd "\$1"
|
||||
((((./\$2 ${GOPTESTFLAGS}; echo \$? >&3) | sed -r -e"s,^(PASS|SKIP|FAIL)\$,\\1: \$1/\$2," >&4) 3>&1) | (read rc; exit \$rc)) 4>&1
|
||||
exit \$?)
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
do_install_ptest_base() {
|
||||
test -f "${B}/.go_compiled_tests.list" || exit 0
|
||||
install -d ${D}${PTEST_PATH}
|
||||
go_stage_testdata
|
||||
go_make_ptest_wrapper
|
||||
havetests=""
|
||||
while read test; do
|
||||
testdir=`dirname $test`
|
||||
testprog=`basename $test`
|
||||
install -d ${D}${PTEST_PATH}/$testdir
|
||||
install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
|
||||
echo "run_test $testdir $testprog || RC=1" >> ${D}${PTEST_PATH}/run-ptest
|
||||
havetests="yes"
|
||||
done < ${B}/.go_compiled_tests.list
|
||||
if [ -n "$havetests" ]; then
|
||||
echo "exit \$RC" >> ${D}${PTEST_PATH}/run-ptest
|
||||
chmod +x ${D}${PTEST_PATH}/run-ptest
|
||||
else
|
||||
rm -rf ${D}${PTEST_PATH}
|
||||
fi
|
||||
do_install_ptest
|
||||
chown -R root:root ${D}${PTEST_PATH}
|
||||
}
|
||||
|
||||
INSANE_SKIP:${PN}-ptest += "ldflags"
|
||||
|
||||
169
sources/poky/meta/classes-recipe/go.bbclass
Normal file
169
sources/poky/meta/classes-recipe/go.bbclass
Normal file
@@ -0,0 +1,169 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit goarch
|
||||
inherit linuxloader
|
||||
|
||||
GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}"
|
||||
|
||||
export GODEBUG = "gocachehash=1"
|
||||
|
||||
GOROOT:class-native = "${STAGING_LIBDIR_NATIVE}/go"
|
||||
GOROOT:class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
|
||||
GOROOT = "${STAGING_LIBDIR}/go"
|
||||
export GOROOT
|
||||
export GOROOT_FINAL = "${libdir}/go"
|
||||
export GOCACHE = "${B}/.cache"
|
||||
|
||||
export GOARCH = "${TARGET_GOARCH}"
|
||||
export GOOS = "${TARGET_GOOS}"
|
||||
export GOHOSTARCH="${BUILD_GOARCH}"
|
||||
export GOHOSTOS="${BUILD_GOOS}"
|
||||
|
||||
GOARM[export] = "0"
|
||||
GOARM:arm:class-target = "${TARGET_GOARM}"
|
||||
GOARM:arm:class-target[export] = "1"
|
||||
|
||||
GO386[export] = "0"
|
||||
GO386:x86:class-target = "${TARGET_GO386}"
|
||||
GO386:x86:class-target[export] = "1"
|
||||
|
||||
GOMIPS[export] = "0"
|
||||
GOMIPS:mips:class-target = "${TARGET_GOMIPS}"
|
||||
GOMIPS:mips:class-target[export] = "1"
|
||||
|
||||
DEPENDS_GOLANG:class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
|
||||
DEPENDS_GOLANG:class-native = "go-native"
|
||||
DEPENDS_GOLANG:class-nativesdk = "virtual/${TARGET_PREFIX}go virtual/${TARGET_PREFIX}go-runtime"
|
||||
|
||||
DEPENDS:append = " ${DEPENDS_GOLANG}"
|
||||
|
||||
GO_LINKSHARED ?= "${@'-linkshared' if d.getVar('GO_DYNLINK') else ''}"
|
||||
GO_RPATH_LINK = "${@'-Wl,-rpath-link=${STAGING_DIR_TARGET}${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
|
||||
GO_RPATH = "${@'-r ${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
|
||||
GO_RPATH:class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
|
||||
GO_RPATH_LINK:class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
|
||||
GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS}"
|
||||
GO_LINKMODE ?= ""
|
||||
GO_EXTRA_LDFLAGS ?= ""
|
||||
GO_LINUXLOADER ?= "-I ${@get_linuxloader(d)}"
|
||||
# Use system loader. If uninative is used, the uninative loader will be patched automatically
|
||||
GO_LINUXLOADER:class-native = ""
|
||||
GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} ${GO_LINUXLOADER} ${GO_EXTRA_LDFLAGS} -extldflags '${GO_EXTLDFLAGS}'"'
|
||||
export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -trimpath"
|
||||
export GOPATH_OMIT_IN_ACTIONID ?= "1"
|
||||
export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c"
|
||||
export GOPTESTFLAGS ?= ""
|
||||
GOBUILDFLAGS:prepend:task-compile = "${GO_PARALLEL_BUILD} "
|
||||
|
||||
export GO = "${HOST_PREFIX}go"
|
||||
GOTOOLDIR = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go/pkg/tool/${BUILD_GOTUPLE}"
|
||||
GOTOOLDIR:class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
|
||||
export GOTOOLDIR
|
||||
|
||||
export CGO_ENABLED ?= "1"
|
||||
export CGO_CFLAGS ?= "${CFLAGS}"
|
||||
export CGO_CPPFLAGS ?= "${CPPFLAGS}"
|
||||
export CGO_CXXFLAGS ?= "${CXXFLAGS}"
|
||||
export CGO_LDFLAGS ?= "${LDFLAGS}"
|
||||
|
||||
GO_INSTALL ?= "${GO_IMPORT}/..."
|
||||
GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
|
||||
|
||||
B = "${WORKDIR}/build"
|
||||
export GOPATH = "${B}"
|
||||
export GOENV = "off"
|
||||
export GOPROXY ??= "https://proxy.golang.org,direct"
|
||||
export GOTMPDIR ?= "${WORKDIR}/build-tmp"
|
||||
GOTMPDIR[vardepvalue] = ""
|
||||
|
||||
python go_do_unpack() {
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
if len(src_uri) == 0:
|
||||
return
|
||||
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
for url in fetcher.urls:
|
||||
if fetcher.ud[url].type == 'git':
|
||||
if fetcher.ud[url].parm.get('destsuffix') is None:
|
||||
s_dirname = os.path.basename(d.getVar('S'))
|
||||
fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src', d.getVar('GO_IMPORT')) + '/'
|
||||
fetcher.unpack(d.getVar('WORKDIR'))
|
||||
}
|
||||
|
||||
go_list_packages() {
|
||||
${GO} list -f '{{.ImportPath}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
|
||||
egrep -v '${GO_INSTALL_FILTEROUT}'
|
||||
}
|
||||
|
||||
go_list_package_tests() {
|
||||
${GO} list -f '{{.ImportPath}} {{.TestGoFiles}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
|
||||
grep -v '\[\]$' | \
|
||||
egrep -v '${GO_INSTALL_FILTEROUT}' | \
|
||||
awk '{ print $1 }'
|
||||
}
|
||||
|
||||
go_do_configure() {
|
||||
ln -snf ${S}/src ${B}/
|
||||
}
|
||||
do_configure[dirs] =+ "${GOTMPDIR}"
|
||||
|
||||
go_do_compile() {
|
||||
export TMPDIR="${GOTMPDIR}"
|
||||
if [ -n "${GO_INSTALL}" ]; then
|
||||
if [ -n "${GO_LINKSHARED}" ]; then
|
||||
${GO} install ${GOBUILDFLAGS} `go_list_packages`
|
||||
rm -rf ${B}/bin
|
||||
fi
|
||||
${GO} install ${GO_LINKSHARED} ${GOBUILDFLAGS} `go_list_packages`
|
||||
fi
|
||||
}
|
||||
do_compile[dirs] =+ "${GOTMPDIR}"
|
||||
do_compile[cleandirs] = "${B}/bin ${B}/pkg"
|
||||
|
||||
go_do_install() {
|
||||
install -d ${D}${libdir}/go/src/${GO_IMPORT}
|
||||
tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \
|
||||
tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
|
||||
tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
|
||||
tar -C ${D}${libdir}/go --no-same-owner -xf -
|
||||
|
||||
if ls ${B}/${GO_BUILD_BINDIR}/* >/dev/null 2>/dev/null ; then
|
||||
install -d ${D}${bindir}
|
||||
install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
|
||||
fi
|
||||
}
|
||||
|
||||
go_stage_testdata() {
|
||||
oldwd="$PWD"
|
||||
cd ${S}/src
|
||||
find ${GO_IMPORT} -depth -type d -name testdata | while read d; do
|
||||
if echo "$d" | grep -q '/vendor/'; then
|
||||
continue
|
||||
fi
|
||||
parent=`dirname $d`
|
||||
install -d ${D}${PTEST_PATH}/$parent
|
||||
cp --preserve=mode,timestamps -R $d ${D}${PTEST_PATH}/$parent/
|
||||
done
|
||||
cd "$oldwd"
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
|
||||
|
||||
FILES:${PN}-dev = "${libdir}/go/src"
|
||||
FILES:${PN}-staticdev = "${libdir}/go/pkg"
|
||||
|
||||
INSANE_SKIP:${PN} += "ldflags"
|
||||
|
||||
# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
|
||||
# doesn't support -buildmode=pie, so skip the QA checking for mips/rv32 and its
|
||||
# variants.
|
||||
python() {
|
||||
if 'mips' in d.getVar('TARGET_ARCH') or 'riscv32' in d.getVar('TARGET_ARCH'):
|
||||
d.appendVar('INSANE_SKIP:%s' % d.getVar('PN'), " textrel")
|
||||
else:
|
||||
d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
|
||||
}
|
||||
102
sources/poky/meta/classes-recipe/goarch.bbclass
Normal file
102
sources/poky/meta/classes-recipe/goarch.bbclass
Normal file
@@ -0,0 +1,102 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
BUILD_GOOS = "${@go_map_os(d.getVar('BUILD_OS'), d)}"
|
||||
BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH'), d)}"
|
||||
BUILD_GOTUPLE = "${BUILD_GOOS}_${BUILD_GOARCH}"
|
||||
HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS'), d)}"
|
||||
HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}"
|
||||
HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d)}"
|
||||
HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
|
||||
HOST_GOMIPS = "${@go_map_mips(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
|
||||
HOST_GOARM:class-native = "7"
|
||||
HOST_GO386:class-native = "sse2"
|
||||
HOST_GOMIPS:class-native = "hardfloat"
|
||||
HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
|
||||
TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}"
|
||||
TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}"
|
||||
TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d)}"
|
||||
TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
|
||||
TARGET_GOMIPS = "${@go_map_mips(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
|
||||
TARGET_GOARM:class-native = "7"
|
||||
TARGET_GO386:class-native = "sse2"
|
||||
TARGET_GOMIPS:class-native = "hardfloat"
|
||||
TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
|
||||
GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}"
|
||||
|
||||
# Use the MACHINEOVERRIDES to map ARM CPU architecture passed to GO via GOARM.
|
||||
# This is combined with *_ARCH to set HOST_GOARM and TARGET_GOARM.
|
||||
BASE_GOARM = ''
|
||||
BASE_GOARM:armv7ve = '7'
|
||||
BASE_GOARM:armv7a = '7'
|
||||
BASE_GOARM:armv6 = '6'
|
||||
BASE_GOARM:armv5 = '5'
|
||||
|
||||
# Go supports dynamic linking on a limited set of architectures.
|
||||
# See the supportsDynlink function in go/src/cmd/compile/internal/gc/main.go
|
||||
GO_DYNLINK = ""
|
||||
GO_DYNLINK:arm ?= "1"
|
||||
GO_DYNLINK:aarch64 ?= "1"
|
||||
GO_DYNLINK:x86 ?= "1"
|
||||
GO_DYNLINK:x86-64 ?= "1"
|
||||
GO_DYNLINK:powerpc64 ?= "1"
|
||||
GO_DYNLINK:powerpc64le ?= "1"
|
||||
GO_DYNLINK:class-native ?= ""
|
||||
GO_DYNLINK:class-nativesdk = ""
|
||||
|
||||
# define here because everybody inherits this class
|
||||
#
|
||||
COMPATIBLE_HOST:linux-gnux32 = "null"
|
||||
COMPATIBLE_HOST:linux-muslx32 = "null"
|
||||
COMPATIBLE_HOST:powerpc = "null"
|
||||
COMPATIBLE_HOST:powerpc64 = "null"
|
||||
COMPATIBLE_HOST:mipsarchn32 = "null"
|
||||
COMPATIBLE_HOST:riscv32 = "null"
|
||||
|
||||
ARM_INSTRUCTION_SET:armv4 = "arm"
|
||||
ARM_INSTRUCTION_SET:armv5 = "arm"
|
||||
ARM_INSTRUCTION_SET:armv6 = "arm"
|
||||
|
||||
TUNE_CCARGS:remove = "-march=mips32r2"
|
||||
SECURITY_NOPIE_CFLAGS ??= ""
|
||||
|
||||
# go can't be built with ccache:
|
||||
# gcc: fatal error: no input files
|
||||
CCACHE_DISABLE ?= "1"
|
||||
|
||||
def go_map_arch(a, d):
|
||||
arch = oe.go.map_arch(a)
|
||||
if not arch:
|
||||
raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a)
|
||||
return arch
|
||||
|
||||
def go_map_arm(a, d):
|
||||
if a.startswith("arm"):
|
||||
return d.getVar('BASE_GOARM')
|
||||
return ''
|
||||
|
||||
def go_map_386(a, f, d):
|
||||
import re
|
||||
if re.match('i.86', a):
|
||||
if ('core2' in f) or ('corei7' in f):
|
||||
return 'sse2'
|
||||
else:
|
||||
return 'softfloat'
|
||||
return ''
|
||||
|
||||
def go_map_mips(a, f, d):
|
||||
import re
|
||||
if a == 'mips' or a == 'mipsel':
|
||||
if 'fpu-hard' in f:
|
||||
return 'hardfloat'
|
||||
else:
|
||||
return 'softfloat'
|
||||
return ''
|
||||
|
||||
def go_map_os(o, d):
|
||||
if o.startswith('linux'):
|
||||
return 'linux'
|
||||
return o
|
||||
@@ -0,0 +1,13 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# This variable is set to True if gobject-introspection-data is in
|
||||
# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
|
||||
#
|
||||
# It should be used in recipes to determine whether introspection data should be built,
|
||||
# so that qemu use can be avoided when necessary.
|
||||
GI_DATA_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \
|
||||
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
|
||||
@@ -0,0 +1,64 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Inherit this class in recipes to enable building their introspection files
|
||||
|
||||
# python3native is inherited to prevent introspection tools being run with
|
||||
# host's python 3 (they need to be run with native python 3)
|
||||
#
|
||||
# This also sets up autoconf-based recipes to build introspection data (or not),
|
||||
# depending on distro and machine features (see gobject-introspection-data class).
|
||||
inherit python3native gobject-introspection-data
|
||||
|
||||
# meson: default option name to enable/disable introspection. This matches most
|
||||
# project's configuration. In doubts - check meson_options.txt in project's
|
||||
# source path.
|
||||
GIR_MESON_OPTION ?= 'introspection'
|
||||
GIR_MESON_ENABLE_FLAG ?= 'true'
|
||||
GIR_MESON_DISABLE_FLAG ?= 'false'
|
||||
|
||||
# Define g-i options such that they can be disabled completely when GIR_MESON_OPTION is empty
|
||||
GIRMESONTARGET = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} "
|
||||
GIRMESONBUILD = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
|
||||
# Auto enable/disable based on GI_DATA_ENABLED
|
||||
EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
|
||||
EXTRA_OEMESON:prepend:class-target = "${@['', '${GIRMESONTARGET}'][d.getVar('GIR_MESON_OPTION') != '']}"
|
||||
# When building native recipes, disable introspection, as it is not necessary,
|
||||
# pulls in additional dependencies, and makes build times longer
|
||||
EXTRA_OECONF:prepend:class-native = "--disable-introspection "
|
||||
EXTRA_OECONF:prepend:class-nativesdk = "--disable-introspection "
|
||||
EXTRA_OEMESON:prepend:class-native = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}"
|
||||
EXTRA_OEMESON:prepend:class-nativesdk = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}"
|
||||
|
||||
# Generating introspection data depends on a combination of native and target
|
||||
# introspection tools, and qemu to run the target tools.
|
||||
DEPENDS:append:class-target = " ${@bb.utils.contains('GI_DATA_ENABLED', 'True', 'gobject-introspection qemu-native', '', d)}"
|
||||
|
||||
# Even when introspection is disabled, the gobject-introspection package is still needed for m4 macros.
|
||||
DEPENDS:append = " gobject-introspection-native"
|
||||
|
||||
# This is used by introspection tools to find .gir includes
|
||||
export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
|
||||
|
||||
do_configure:prepend:class-target () {
|
||||
# introspection.m4 pre-packaged with upstream tarballs does not yet
|
||||
# have our fixes
|
||||
mkdir -p ${S}/m4
|
||||
cp ${STAGING_DIR_NATIVE}/${datadir}/aclocal/introspection.m4 ${S}/m4
|
||||
}
|
||||
|
||||
do_compile:prepend() {
|
||||
# This prevents g-ir-scanner from writing cache data to $HOME
|
||||
export GI_SCANNER_DISABLE_CACHE=1
|
||||
}
|
||||
|
||||
# .typelib files are needed at runtime and so they go to the main package (so
|
||||
# they'll be together with libraries they support).
|
||||
FILES:${PN}:append = " ${libdir}/girepository-*/*.typelib"
|
||||
|
||||
# .gir files go to dev package, as they're needed for developing (but not for
|
||||
# running) things that depends on introspection.
|
||||
FILES:${PN}-dev:append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
|
||||
122
sources/poky/meta/classes-recipe/grub-efi-cfg.bbclass
Normal file
122
sources/poky/meta/classes-recipe/grub-efi-cfg.bbclass
Normal file
@@ -0,0 +1,122 @@
|
||||
# grub-efi.bbclass
|
||||
# Copyright (c) 2011, Intel Corporation.
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# Provide grub-efi specific functions for building bootable images.
|
||||
|
||||
# External variables
|
||||
# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
|
||||
# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
|
||||
# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
|
||||
# ${LABELS} - a list of targets for the automatic config
|
||||
# ${APPEND} - an override list of append strings for each label
|
||||
# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
|
||||
# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
|
||||
# ${GRUB_ROOT} - grub's root device.
|
||||
|
||||
GRUB_SERIAL ?= "console=ttyS0,115200"
|
||||
GRUB_CFG_VM = "${S}/grub_vm.cfg"
|
||||
GRUB_CFG_LIVE = "${S}/grub_live.cfg"
|
||||
GRUB_TIMEOUT ?= "10"
|
||||
#FIXME: build this from the machine config
|
||||
GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
|
||||
|
||||
GRUB_ROOT ?= "${ROOT}"
|
||||
APPEND ?= ""
|
||||
|
||||
# Uses MACHINE specific KERNEL_IMAGETYPE
|
||||
PACKAGE_ARCH = "${MACHINE_ARCH}"
|
||||
|
||||
# Need UUID utility code.
|
||||
inherit fs-uuid
|
||||
|
||||
python build_efi_cfg() {
|
||||
import sys
|
||||
|
||||
workdir = d.getVar('WORKDIR')
|
||||
if not workdir:
|
||||
bb.error("WORKDIR not defined, unable to package")
|
||||
return
|
||||
|
||||
gfxserial = d.getVar('GRUB_GFXSERIAL') or ""
|
||||
|
||||
labels = d.getVar('LABELS')
|
||||
if not labels:
|
||||
bb.debug(1, "LABELS not defined, nothing to do")
|
||||
return
|
||||
|
||||
if labels == []:
|
||||
bb.debug(1, "No labels, nothing to do")
|
||||
return
|
||||
|
||||
cfile = d.getVar('GRUB_CFG')
|
||||
if not cfile:
|
||||
bb.fatal('Unable to read GRUB_CFG')
|
||||
|
||||
try:
|
||||
cfgfile = open(cfile, 'w')
|
||||
except OSError:
|
||||
bb.fatal('Unable to open %s' % cfile)
|
||||
|
||||
cfgfile.write('# Automatically created by OE\n')
|
||||
|
||||
opts = d.getVar('GRUB_OPTS')
|
||||
if opts:
|
||||
for opt in opts.split(';'):
|
||||
cfgfile.write('%s\n' % opt)
|
||||
|
||||
cfgfile.write('default=%s\n' % (labels.split()[0]))
|
||||
|
||||
timeout = d.getVar('GRUB_TIMEOUT')
|
||||
if timeout:
|
||||
cfgfile.write('timeout=%s\n' % timeout)
|
||||
else:
|
||||
cfgfile.write('timeout=50\n')
|
||||
|
||||
root = d.getVar('GRUB_ROOT')
|
||||
if not root:
|
||||
bb.fatal('GRUB_ROOT not defined')
|
||||
|
||||
if gfxserial == "1":
|
||||
btypes = [ [ " graphics console", "" ],
|
||||
[ " serial console", d.getVar('GRUB_SERIAL') or "" ] ]
|
||||
else:
|
||||
btypes = [ [ "", "" ] ]
|
||||
|
||||
for label in labels.split():
|
||||
localdata = d.createCopy()
|
||||
|
||||
overrides = localdata.getVar('OVERRIDES')
|
||||
if not overrides:
|
||||
bb.fatal('OVERRIDES not defined')
|
||||
|
||||
localdata.setVar('OVERRIDES', 'grub_' + label + ':' + overrides)
|
||||
|
||||
for btype in btypes:
|
||||
cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
|
||||
lb = label
|
||||
if label == "install":
|
||||
lb = "install-efi"
|
||||
kernel = localdata.getVar('KERNEL_IMAGETYPE')
|
||||
cfgfile.write('linux /%s LABEL=%s' % (kernel, lb))
|
||||
|
||||
cfgfile.write(' %s' % replace_rootfs_uuid(d, root))
|
||||
|
||||
append = localdata.getVar('APPEND')
|
||||
initrd = localdata.getVar('INITRD')
|
||||
|
||||
if append:
|
||||
append = replace_rootfs_uuid(d, append)
|
||||
cfgfile.write(' %s' % (append))
|
||||
|
||||
cfgfile.write(' %s' % btype[1])
|
||||
cfgfile.write('\n')
|
||||
|
||||
if initrd:
|
||||
cfgfile.write('initrd /initrd')
|
||||
cfgfile.write('\n}\n')
|
||||
|
||||
cfgfile.close()
|
||||
}
|
||||
build_efi_cfg[vardepsexclude] += "OVERRIDES"
|
||||
14
sources/poky/meta/classes-recipe/grub-efi.bbclass
Normal file
14
sources/poky/meta/classes-recipe/grub-efi.bbclass
Normal file
@@ -0,0 +1,14 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit grub-efi-cfg
|
||||
require conf/image-uefi.conf
|
||||
|
||||
efi_populate() {
|
||||
efi_populate_common "$1" grub-efi
|
||||
|
||||
install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
|
||||
}
|
||||
48
sources/poky/meta/classes-recipe/gsettings.bbclass
Normal file
48
sources/poky/meta/classes-recipe/gsettings.bbclass
Normal file
@@ -0,0 +1,48 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# A bbclass to handle installed GSettings (glib) schemas, updated the compiled
|
||||
# form on package install and remove.
|
||||
#
|
||||
# The compiled schemas are platform-agnostic, so we can depend on
|
||||
# glib-2.0-native for the native tool and run the postinst script when the
|
||||
# rootfs builds to save a little time on first boot.
|
||||
|
||||
# TODO use a trigger so that this runs once per package operation run
|
||||
|
||||
GSETTINGS_PACKAGE ?= "${PN}"
|
||||
|
||||
python __anonymous() {
|
||||
pkg = d.getVar("GSETTINGS_PACKAGE")
|
||||
if pkg:
|
||||
d.appendVar("PACKAGE_WRITE_DEPS", " glib-2.0-native")
|
||||
d.appendVar("RDEPENDS:" + pkg, " ${MLPREFIX}glib-2.0-utils")
|
||||
d.appendVar("FILES:" + pkg, " ${datadir}/glib-2.0/schemas")
|
||||
}
|
||||
|
||||
gsettings_postinstrm () {
|
||||
glib-compile-schemas $D${datadir}/glib-2.0/schemas
|
||||
}
|
||||
|
||||
python populate_packages:append () {
|
||||
pkg = d.getVar('GSETTINGS_PACKAGE')
|
||||
if pkg:
|
||||
bb.note("adding gsettings postinst scripts to %s" % pkg)
|
||||
|
||||
postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
|
||||
if not postinst:
|
||||
postinst = '#!/bin/sh\n'
|
||||
postinst += d.getVar('gsettings_postinstrm')
|
||||
d.setVar('pkg_postinst:%s' % pkg, postinst)
|
||||
|
||||
bb.note("adding gsettings postrm scripts to %s" % pkg)
|
||||
|
||||
postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
|
||||
if not postrm:
|
||||
postrm = '#!/bin/sh\n'
|
||||
postrm += d.getVar('gsettings_postinstrm')
|
||||
d.setVar('pkg_postrm:%s' % pkg, postrm)
|
||||
}
|
||||
72
sources/poky/meta/classes-recipe/gtk-doc.bbclass
Normal file
72
sources/poky/meta/classes-recipe/gtk-doc.bbclass
Normal file
@@ -0,0 +1,72 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Helper class to pull in the right gtk-doc dependencies and configure
|
||||
# gtk-doc to enable or disable documentation building (which requries the
|
||||
# use of usermode qemu).
|
||||
|
||||
# This variable is set to True if api-documentation is in
|
||||
# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
|
||||
#
|
||||
# It should be used in recipes to determine whether gtk-doc based documentation should be built,
|
||||
# so that qemu use can be avoided when necessary.
|
||||
GTKDOC_ENABLED:class-native = "False"
|
||||
GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \
|
||||
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
|
||||
|
||||
# meson: default option name to enable/disable gtk-doc. This matches most
|
||||
# project's configuration. In doubts - check meson_options.txt in project's
|
||||
# source path.
|
||||
GTKDOC_MESON_OPTION ?= 'docs'
|
||||
GTKDOC_MESON_ENABLE_FLAG ?= 'true'
|
||||
GTKDOC_MESON_DISABLE_FLAG ?= 'false'
|
||||
|
||||
# Auto enable/disable based on GTKDOC_ENABLED
|
||||
EXTRA_OECONF:prepend = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
|
||||
'--disable-gtk-doc', d)} "
|
||||
EXTRA_OEMESON:prepend = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
|
||||
|
||||
# Even though gtkdoc is disabled on -native, gtk-doc package is still
|
||||
# needed for m4 macros.
|
||||
DEPENDS:append = " gtk-doc-native"
|
||||
|
||||
export STAGING_DIR_HOST
|
||||
|
||||
inherit python3native pkgconfig qemu
|
||||
DEPENDS:append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
|
||||
|
||||
do_compile:prepend:class-target () {
|
||||
if [ ${GTKDOC_ENABLED} = True ]; then
|
||||
# Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
|
||||
# can run target helper binaries through that.
|
||||
qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['\\$GIR_EXTRA_LIBS_PATH','$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
|
||||
cat > ${B}/gtkdoc-qemuwrapper << EOF
|
||||
#!/bin/sh
|
||||
# Use a modules directory which doesn't exist so we don't load random things
|
||||
# which may then get deleted (or their dependencies) and potentially segfault
|
||||
export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
|
||||
|
||||
GIR_EXTRA_LIBS_PATH=\`find ${B} -name *.so -printf "%h\n"|sort|uniq| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
|
||||
GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
|
||||
|
||||
# meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
|
||||
unset LD_LIBRARY_PATH
|
||||
|
||||
if [ -d ".libs" ]; then
|
||||
$qemu_binary ".libs/\$@"
|
||||
else
|
||||
$qemu_binary "\$@"
|
||||
fi
|
||||
|
||||
if [ \$? -ne 0 ]; then
|
||||
echo "If the above error message is about missing .so libraries, then setting up GIR_EXTRA_LIBS_PATH in the recipe should help."
|
||||
echo "(typically like this: GIR_EXTRA_LIBS_PATH=\"$""{B}/something/.libs\" )"
|
||||
exit 1
|
||||
fi
|
||||
EOF
|
||||
chmod +x ${B}/gtkdoc-qemuwrapper
|
||||
fi
|
||||
}
|
||||
95
sources/poky/meta/classes-recipe/gtk-icon-cache.bbclass
Normal file
95
sources/poky/meta/classes-recipe/gtk-icon-cache.bbclass
Normal file
@@ -0,0 +1,95 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
FILES:${PN} += "${datadir}/icons/hicolor"
|
||||
|
||||
GTKIC_VERSION ??= '3'
|
||||
|
||||
GTKPN = "${@ 'gtk4' if d.getVar('GTKIC_VERSION') == '4' else 'gtk+3' }"
|
||||
GTKIC_CMD = "${@ 'gtk4-update-icon-cache' if d.getVar('GTKIC_VERSION') == '4' else 'gtk-update-icon-cache-3.0' }"
|
||||
|
||||
#gtk+3/gtk4 require GTK3DISTROFEATURES, DEPENDS on it make all the
|
||||
#recipes inherit this class require GTK3DISTROFEATURES
|
||||
inherit features_check
|
||||
ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}"
|
||||
|
||||
DEPENDS +=" ${@ '' if d.getVar('BPN') == 'hicolor-icon-theme' else 'hicolor-icon-theme' } \
|
||||
${@ '' if d.getVar('BPN') == 'gdk-pixbuf' else 'gdk-pixbuf' } \
|
||||
${@ '' if d.getVar('BPN') == d.getVar('GTKPN') else d.getVar('GTKPN') } \
|
||||
${GTKPN}-native \
|
||||
"
|
||||
|
||||
PACKAGE_WRITE_DEPS += "${GTKPN}-native gdk-pixbuf-native"
|
||||
|
||||
gtk_icon_cache_postinst() {
|
||||
if [ "x$D" != "x" ]; then
|
||||
$INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
|
||||
mlprefix=${MLPREFIX} \
|
||||
libdir_native=${libdir_native}
|
||||
else
|
||||
|
||||
# Update the pixbuf loaders in case they haven't been registered yet
|
||||
${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
|
||||
|
||||
for icondir in /usr/share/icons/* ; do
|
||||
if [ -d $icondir ] ; then
|
||||
${GTKIC_CMD} -fqt $icondir
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
gtk_icon_cache_postrm() {
|
||||
if [ "x$D" != "x" ]; then
|
||||
$INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
|
||||
mlprefix=${MLPREFIX} \
|
||||
libdir=${libdir}
|
||||
else
|
||||
for icondir in /usr/share/icons/* ; do
|
||||
if [ -d $icondir ] ; then
|
||||
${GTKIC_CMD} -qt $icondir
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
python populate_packages:append () {
|
||||
packages = d.getVar('PACKAGES').split()
|
||||
pkgdest = d.getVar('PKGDEST')
|
||||
|
||||
for pkg in packages:
|
||||
icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir'))
|
||||
if not os.path.exists(icon_dir):
|
||||
continue
|
||||
|
||||
bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
|
||||
rdepends = ' ' + d.getVar('MLPREFIX', False) + "hicolor-icon-theme"
|
||||
d.appendVar('RDEPENDS:%s' % pkg, rdepends)
|
||||
|
||||
#gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3/gtk4
|
||||
bb.note("adding gdk-pixbuf dependency to %s" % pkg)
|
||||
rdepends = ' ' + d.getVar('MLPREFIX', False) + "gdk-pixbuf"
|
||||
d.appendVar('RDEPENDS:%s' % pkg, rdepends)
|
||||
|
||||
bb.note("adding %s dependency to %s" % (d.getVar('GTKPN'), pkg))
|
||||
rdepends = ' ' + d.getVar('MLPREFIX', False) + d.getVar('GTKPN')
|
||||
d.appendVar('RDEPENDS:%s' % pkg, rdepends)
|
||||
|
||||
bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
|
||||
|
||||
postinst = d.getVar('pkg_postinst:%s' % pkg)
|
||||
if not postinst:
|
||||
postinst = '#!/bin/sh\n'
|
||||
postinst += d.getVar('gtk_icon_cache_postinst')
|
||||
d.setVar('pkg_postinst:%s' % pkg, postinst)
|
||||
|
||||
postrm = d.getVar('pkg_postrm:%s' % pkg)
|
||||
if not postrm:
|
||||
postrm = '#!/bin/sh\n'
|
||||
postrm += d.getVar('gtk_icon_cache_postrm')
|
||||
d.setVar('pkg_postrm:%s' % pkg, postrm)
|
||||
}
|
||||
|
||||
82
sources/poky/meta/classes-recipe/gtk-immodules-cache.bbclass
Normal file
82
sources/poky/meta/classes-recipe/gtk-immodules-cache.bbclass
Normal file
@@ -0,0 +1,82 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# This class will update the inputmethod module cache for virtual keyboards
|
||||
#
|
||||
# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules
|
||||
|
||||
PACKAGE_WRITE_DEPS += "qemu-native"
|
||||
|
||||
inherit qemu
|
||||
|
||||
GTKIMMODULES_PACKAGES ?= "${PN}"
|
||||
|
||||
gtk_immodule_cache_postinst() {
|
||||
if [ "x$D" != "x" ]; then
|
||||
$INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
|
||||
mlprefix=${MLPREFIX} \
|
||||
binprefix=${MLPREFIX} \
|
||||
libdir=${libdir} \
|
||||
libexecdir=${libexecdir} \
|
||||
base_libdir=${base_libdir} \
|
||||
bindir=${bindir}
|
||||
else
|
||||
if [ ! -z `which gtk-query-immodules-2.0` ]; then
|
||||
gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
|
||||
fi
|
||||
if [ ! -z `which gtk-query-immodules-3.0` ]; then
|
||||
mkdir -p ${libdir}/gtk-3.0/3.0.0
|
||||
gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
gtk_immodule_cache_postrm() {
|
||||
if [ "x$D" != "x" ]; then
|
||||
$INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
|
||||
mlprefix=${MLPREFIX} \
|
||||
binprefix=${MLPREFIX} \
|
||||
libdir=${libdir} \
|
||||
libexecdir=${libexecdir} \
|
||||
base_libdir=${base_libdir} \
|
||||
bindir=${bindir}
|
||||
else
|
||||
if [ ! -z `which gtk-query-immodules-2.0` ]; then
|
||||
gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
|
||||
fi
|
||||
if [ ! -z `which gtk-query-immodules-3.0` ]; then
|
||||
gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
python populate_packages:append () {
|
||||
gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES').split()
|
||||
|
||||
for pkg in gtkimmodules_pkgs:
|
||||
bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
|
||||
|
||||
postinst = d.getVar('pkg_postinst:%s' % pkg)
|
||||
if not postinst:
|
||||
postinst = '#!/bin/sh\n'
|
||||
postinst += d.getVar('gtk_immodule_cache_postinst')
|
||||
d.setVar('pkg_postinst:%s' % pkg, postinst)
|
||||
|
||||
postrm = d.getVar('pkg_postrm:%s' % pkg)
|
||||
if not postrm:
|
||||
postrm = '#!/bin/sh\n'
|
||||
postrm += d.getVar('gtk_immodule_cache_postrm')
|
||||
d.setVar('pkg_postrm:%s' % pkg, postrm)
|
||||
}
|
||||
|
||||
python __anonymous() {
|
||||
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
|
||||
gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES', False)
|
||||
if not gtkimmodules_check:
|
||||
bb_filename = d.getVar('FILE', False)
|
||||
bb.fatal("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename)
|
||||
}
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
##################################################################
|
||||
# Specific image creation and rootfs population info.
|
||||
##################################################################
|
||||
|
||||
IMAGE_BASENAME ?= "${PN}"
|
||||
IMAGE_VERSION_SUFFIX ?= "-${DATETIME}"
|
||||
IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME SOURCE_DATE_EPOCH"
|
||||
IMAGE_NAME ?= "${IMAGE_LINK_NAME}${IMAGE_VERSION_SUFFIX}"
|
||||
IMAGE_LINK_NAME ?= "${IMAGE_BASENAME}${IMAGE_MACHINE_SUFFIX}${IMAGE_NAME_SUFFIX}"
|
||||
|
||||
# This needs to stay in sync with IMAGE_LINK_NAME, but with INITRAMFS_IMAGE instead of IMAGE_BASENAME
|
||||
# and without ${IMAGE_NAME_SUFFIX} which all initramfs images should set to empty
|
||||
INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}${IMAGE_MACHINE_SUFFIX}', ''][d.getVar('INITRAMFS_IMAGE') == '']}"
|
||||
|
||||
# The default DEPLOY_DIR_IMAGE is ${MACHINE} directory:
|
||||
# meta/conf/bitbake.conf:DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR}/images/${MACHINE}"
|
||||
# so many people find it unnecessary to include this suffix to every image
|
||||
# stored there, but other people often fetch various images for different
|
||||
# MACHINEs to the same downloads directory and then the suffix is very helpful
|
||||
# add separate variable for projects to decide which scheme works best for them
|
||||
# without understanding the IMAGE_NAME/IMAGE_LINK_NAME structure.
|
||||
IMAGE_MACHINE_SUFFIX ??= "-${MACHINE}"
|
||||
|
||||
# IMAGE_NAME is the base name for everything produced when building images.
|
||||
# The actual image that contains the rootfs has an additional suffix (.rootfs
|
||||
# by default) followed by additional suffices which describe the format (.ext4,
|
||||
# .ext4.xz, etc.).
|
||||
IMAGE_NAME_SUFFIX ??= ".rootfs"
|
||||
|
||||
python () {
|
||||
if bb.data.inherits_class('deploy', d) and d.getVar("IMAGE_VERSION_SUFFIX") == "-${DATETIME}":
|
||||
import datetime
|
||||
d.setVar("IMAGE_VERSION_SUFFIX", "-" + datetime.datetime.fromtimestamp(int(d.getVar("SOURCE_DATE_EPOCH")), datetime.timezone.utc).strftime('%Y%m%d%H%M%S'))
|
||||
d.setVarFlag("IMAGE_VERSION_SUFFIX", "vardepvalue", "")
|
||||
}
|
||||
15
sources/poky/meta/classes-recipe/image-combined-dbg.bbclass
Normal file
15
sources/poky/meta/classes-recipe/image-combined-dbg.bbclass
Normal file
@@ -0,0 +1,15 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
IMAGE_PREPROCESS_COMMAND:append = " combine_dbg_image"
|
||||
|
||||
combine_dbg_image () {
|
||||
if [ "${IMAGE_GEN_DEBUGFS}" = "1" -a -e ${IMAGE_ROOTFS}-dbg ]; then
|
||||
# copy target files into -dbg rootfs, so it can be used for
|
||||
# debug purposes directly
|
||||
tar -C ${IMAGE_ROOTFS} -cf - . | tar -C ${IMAGE_ROOTFS}-dbg -xf -
|
||||
fi
|
||||
}
|
||||
27
sources/poky/meta/classes-recipe/image-container.bbclass
Normal file
27
sources/poky/meta/classes-recipe/image-container.bbclass
Normal file
@@ -0,0 +1,27 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
ROOTFS_BOOTSTRAP_INSTALL = ""
|
||||
IMAGE_TYPES_MASKED += "container"
|
||||
IMAGE_TYPEDEP:container = "tar.bz2"
|
||||
|
||||
python __anonymous() {
|
||||
if "container" in d.getVar("IMAGE_FSTYPES") and \
|
||||
d.getVar("IMAGE_CONTAINER_NO_DUMMY") != "1" and \
|
||||
"linux-dummy" not in d.getVar("PREFERRED_PROVIDER_virtual/kernel"):
|
||||
msg = '"container" is in IMAGE_FSTYPES, but ' \
|
||||
'PREFERRED_PROVIDER_virtual/kernel is not "linux-dummy". ' \
|
||||
'Unless a particular kernel is needed, using linux-dummy will ' \
|
||||
'prevent a kernel from being built, which can reduce ' \
|
||||
'build times. If you don\'t want to use "linux-dummy", set ' \
|
||||
'"IMAGE_CONTAINER_NO_DUMMY" to "1".'
|
||||
|
||||
# Raising skip recipe was Paul's clever idea. It causes the error to
|
||||
# only be shown for the recipes actually requested to build, rather
|
||||
# than bb.fatal which would appear for all recipes inheriting the
|
||||
# class.
|
||||
raise bb.parse.SkipRecipe(msg)
|
||||
}
|
||||
264
sources/poky/meta/classes-recipe/image-live.bbclass
Normal file
264
sources/poky/meta/classes-recipe/image-live.bbclass
Normal file
@@ -0,0 +1,264 @@
|
||||
# Copyright (C) 2004, Advanced Micro Devices, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# Creates a bootable image using syslinux, your kernel and an optional
|
||||
# initrd
|
||||
|
||||
#
|
||||
# End result is two things:
|
||||
#
|
||||
# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
|
||||
# an initrd and a rootfs image. These can be written to harddisks directly and
|
||||
# also booted on USB flash disks (write them there with dd).
|
||||
#
|
||||
# 2. A CD .iso image
|
||||
|
||||
# Boot process is that the initrd will boot and process which label was selected
|
||||
# in syslinux. Actions based on the label are then performed (e.g. installing to
|
||||
# an hdd)
|
||||
|
||||
# External variables (also used by syslinux.bbclass)
|
||||
# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
|
||||
# ${HDDIMG_ID} - FAT image volume-id
|
||||
# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
|
||||
|
||||
inherit live-vm-common image-artifact-names
|
||||
|
||||
do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
|
||||
mtools-native:do_populate_sysroot \
|
||||
cdrtools-native:do_populate_sysroot \
|
||||
virtual/kernel:do_deploy \
|
||||
${MLPREFIX}syslinux:do_populate_sysroot \
|
||||
syslinux-native:do_populate_sysroot \
|
||||
${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_').split('.')[0]) if d.getVar('ROOTFS') else ''} \
|
||||
"
|
||||
|
||||
|
||||
LABELS_LIVE ?= "boot install"
|
||||
ROOT_LIVE ?= "root=/dev/ram0"
|
||||
INITRD_IMAGE_LIVE ?= "${MLPREFIX}core-image-minimal-initramfs"
|
||||
INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}${IMAGE_MACHINE_SUFFIX}.${@d.getVar('INITRAMFS_FSTYPES').split()[0]}"
|
||||
|
||||
LIVE_ROOTFS_TYPE ?= "ext4"
|
||||
ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
|
||||
|
||||
IMAGE_TYPEDEP:live = "${LIVE_ROOTFS_TYPE}"
|
||||
IMAGE_TYPEDEP:iso = "${LIVE_ROOTFS_TYPE}"
|
||||
IMAGE_TYPEDEP:hddimg = "${LIVE_ROOTFS_TYPE}"
|
||||
IMAGE_TYPES_MASKED += "live hddimg iso"
|
||||
|
||||
python() {
|
||||
image_b = d.getVar('IMAGE_BASENAME')
|
||||
initrd_i = d.getVar('INITRD_IMAGE_LIVE')
|
||||
if image_b == initrd_i:
|
||||
bb.error('INITRD_IMAGE_LIVE %s cannot use image live, hddimg or iso.' % initrd_i)
|
||||
bb.fatal('Check IMAGE_FSTYPES and INITRAMFS_FSTYPES settings.')
|
||||
elif initrd_i:
|
||||
d.appendVarFlag('do_bootimg', 'depends', ' %s:do_image_complete' % initrd_i)
|
||||
}
|
||||
|
||||
HDDDIR = "${S}/hddimg"
|
||||
ISODIR = "${S}/iso"
|
||||
EFIIMGDIR = "${S}/efi_img"
|
||||
COMPACT_ISODIR = "${S}/iso.z"
|
||||
|
||||
ISOLINUXDIR ?= "/isolinux"
|
||||
ISO_BOOTIMG = "isolinux/isolinux.bin"
|
||||
ISO_BOOTCAT = "isolinux/boot.cat"
|
||||
MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table"
|
||||
|
||||
BOOTIMG_VOLUME_ID ?= "boot"
|
||||
BOOTIMG_EXTRA_SPACE ?= "512"
|
||||
|
||||
populate_live() {
|
||||
populate_kernel $1
|
||||
if [ -s "${ROOTFS}" ]; then
|
||||
install -m 0644 ${ROOTFS} $1/rootfs.img
|
||||
fi
|
||||
}
|
||||
|
||||
build_iso() {
|
||||
# Only create an ISO if we have an INITRD and the live or iso image type was selected
|
||||
if [ -z "${INITRD}" ] || [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso', '1', '0', d)}" != "1" ]; then
|
||||
bbnote "ISO image will not be created."
|
||||
return
|
||||
fi
|
||||
# ${INITRD} is a list of multiple filesystem images
|
||||
for fs in ${INITRD}
|
||||
do
|
||||
if [ ! -s "$fs" ]; then
|
||||
bbwarn "ISO image will not be created. $fs is invalid."
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
populate_live ${ISODIR}
|
||||
|
||||
if [ "${PCBIOS}" = "1" ]; then
|
||||
syslinux_iso_populate ${ISODIR}
|
||||
fi
|
||||
if [ "${EFI}" = "1" ]; then
|
||||
efi_iso_populate ${ISODIR}
|
||||
build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img
|
||||
fi
|
||||
|
||||
# EFI only
|
||||
if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then
|
||||
# Work around bug in isohybrid where it requires isolinux.bin
|
||||
# In the boot catalog, even though it is not used
|
||||
mkdir -p ${ISODIR}/${ISOLINUXDIR}
|
||||
install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
|
||||
fi
|
||||
|
||||
# We used to have support for zisofs; this is a relic of that
|
||||
mkisofs_compress_opts="-r"
|
||||
|
||||
# Check the size of ${ISODIR}/rootfs.img, use mkisofs -iso-level 3
|
||||
# when it exceeds 3.8GB, the specification is 4G - 1 bytes, we need
|
||||
# leave a few space for other files.
|
||||
mkisofs_iso_level=""
|
||||
|
||||
if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
|
||||
rootfs_img_size=`stat -c '%s' ${ISODIR}/rootfs.img`
|
||||
# 4080218931 = 3.8 * 1024 * 1024 * 1024
|
||||
if [ $rootfs_img_size -gt 4080218931 ]; then
|
||||
bbnote "${ISODIR}/rootfs.img execeeds 3.8GB, using '-iso-level 3' for mkisofs"
|
||||
mkisofs_iso_level="-iso-level 3"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
|
||||
# PCBIOS only media
|
||||
mkisofs -V ${BOOTIMG_VOLUME_ID} \
|
||||
-o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \
|
||||
-b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
|
||||
$mkisofs_compress_opts \
|
||||
${MKISOFS_OPTIONS} $mkisofs_iso_level ${ISODIR}
|
||||
else
|
||||
# EFI only OR EFI+PCBIOS
|
||||
mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
|
||||
-o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \
|
||||
-b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
|
||||
$mkisofs_compress_opts ${MKISOFS_OPTIONS} $mkisofs_iso_level \
|
||||
-eltorito-alt-boot -eltorito-platform efi \
|
||||
-b efi.img -no-emul-boot \
|
||||
${ISODIR}
|
||||
isohybrid_args="-u"
|
||||
fi
|
||||
|
||||
isohybrid $isohybrid_args ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso
|
||||
}
|
||||
|
||||
build_fat_img() {
|
||||
FATSOURCEDIR=$1
|
||||
FATIMG=$2
|
||||
|
||||
# Calculate the size required for the final image including the
|
||||
# data and filesystem overhead.
|
||||
# Sectors: 512 bytes
|
||||
# Blocks: 1024 bytes
|
||||
|
||||
# Determine the sector count just for the data
|
||||
SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
|
||||
|
||||
# Account for the filesystem overhead. This includes directory
|
||||
# entries in the clusters as well as the FAT itself.
|
||||
# Assumptions:
|
||||
# FAT32 (12 or 16 may be selected by mkdosfs, but the extra
|
||||
# padding will be minimal on those smaller images and not
|
||||
# worth the logic here to caclulate the smaller FAT sizes)
|
||||
# < 16 entries per directory
|
||||
# 8.3 filenames only
|
||||
|
||||
# 32 bytes per dir entry
|
||||
DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
|
||||
# 32 bytes for every end-of-directory dir entry
|
||||
DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
|
||||
# 4 bytes per FAT entry per sector of data
|
||||
FAT_BYTES=$(expr $SECTORS \* 4)
|
||||
# 4 bytes per FAT entry per end-of-cluster list
|
||||
FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
|
||||
|
||||
# Use a ceiling function to determine FS overhead in sectors
|
||||
DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
|
||||
# There are two FATs on the image
|
||||
FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
|
||||
SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
|
||||
|
||||
# Determine the final size in blocks accounting for some padding
|
||||
BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
|
||||
|
||||
# mkdosfs will sometimes use FAT16 when it is not appropriate,
|
||||
# resulting in a boot failure from SYSLINUX. Use FAT32 for
|
||||
# images larger than 512MB, otherwise let mkdosfs decide.
|
||||
if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
|
||||
FATSIZE="-F 32"
|
||||
fi
|
||||
|
||||
# mkdosfs will fail if ${FATIMG} exists. Since we are creating an
|
||||
# new image, it is safe to delete any previous image.
|
||||
if [ -e ${FATIMG} ]; then
|
||||
rm ${FATIMG}
|
||||
fi
|
||||
|
||||
if [ -z "${HDDIMG_ID}" ]; then
|
||||
mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
|
||||
${BLOCKS}
|
||||
else
|
||||
mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
|
||||
${BLOCKS} -i ${HDDIMG_ID}
|
||||
fi
|
||||
|
||||
# Copy FATSOURCEDIR recursively into the image file directly
|
||||
mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
|
||||
}
|
||||
|
||||
build_hddimg() {
|
||||
# Create an HDD image
|
||||
if [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live hddimg', '1', '0', d)}" = "1" ] ; then
|
||||
populate_live ${HDDDIR}
|
||||
|
||||
if [ "${PCBIOS}" = "1" ]; then
|
||||
syslinux_hddimg_populate ${HDDDIR}
|
||||
fi
|
||||
if [ "${EFI}" = "1" ]; then
|
||||
efi_hddimg_populate ${HDDDIR}
|
||||
fi
|
||||
|
||||
# Check the size of ${HDDDIR}/rootfs.img, error out if it
|
||||
# exceeds 4GB, it is the single file's max size of FAT fs.
|
||||
if [ -f ${HDDDIR}/rootfs.img ]; then
|
||||
rootfs_img_size=`stat -c '%s' ${HDDDIR}/rootfs.img`
|
||||
max_size=`expr 4 \* 1024 \* 1024 \* 1024`
|
||||
if [ $rootfs_img_size -ge $max_size ]; then
|
||||
bberror "${HDDDIR}/rootfs.img rootfs size is greather than or equal to 4GB,"
|
||||
bberror "and this doesn't work on a FAT filesystem. You can either:"
|
||||
bberror "1) Reduce the size of rootfs.img, or,"
|
||||
bbfatal "2) Use wic, vmdk,vhd, vhdx or vdi instead of hddimg\n"
|
||||
fi
|
||||
fi
|
||||
|
||||
build_fat_img ${HDDDIR} ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
|
||||
|
||||
if [ "${PCBIOS}" = "1" ]; then
|
||||
syslinux_hddimg_install
|
||||
fi
|
||||
|
||||
chmod 644 ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
|
||||
fi
|
||||
}
|
||||
|
||||
python do_bootimg() {
|
||||
set_live_vm_vars(d, 'LIVE')
|
||||
if d.getVar("PCBIOS") == "1":
|
||||
bb.build.exec_func('build_syslinux_cfg', d)
|
||||
if d.getVar("EFI") == "1":
|
||||
bb.build.exec_func('build_efi_cfg', d)
|
||||
bb.build.exec_func('build_hddimg', d)
|
||||
bb.build.exec_func('build_iso', d)
|
||||
bb.build.exec_func('create_symlinks', d)
|
||||
}
|
||||
do_bootimg[subimages] = "hddimg iso"
|
||||
|
||||
addtask bootimg before do_image_complete after do_rootfs
|
||||
@@ -0,0 +1,29 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Gather existing and candidate postinst intercepts from BBPATH
|
||||
POSTINST_INTERCEPTS_DIR ?= "${COREBASE}/scripts/postinst-intercepts"
|
||||
POSTINST_INTERCEPTS_PATHS ?= "${@':'.join('%s/postinst-intercepts' % p for p in '${BBPATH}'.split(':'))}:${POSTINST_INTERCEPTS_DIR}"
|
||||
|
||||
python find_intercepts() {
|
||||
intercepts = {}
|
||||
search_paths = []
|
||||
paths = d.getVar('POSTINST_INTERCEPTS_PATHS').split(':')
|
||||
overrides = (':' + d.getVar('FILESOVERRIDES')).split(':') + ['']
|
||||
search_paths = [os.path.join(p, op) for p in paths for op in overrides]
|
||||
searched = oe.path.which_wild('*', ':'.join(search_paths), candidates=True)
|
||||
files, chksums = [], []
|
||||
for pathname, candidates in searched:
|
||||
if os.path.isfile(pathname):
|
||||
files.append(pathname)
|
||||
chksums.append('%s:True' % pathname)
|
||||
chksums.extend('%s:False' % c for c in candidates[:-1])
|
||||
|
||||
d.setVar('POSTINST_INTERCEPT_CHECKSUMS', ' '.join(chksums))
|
||||
d.setVar('POSTINST_INTERCEPTS', ' '.join(files))
|
||||
}
|
||||
find_intercepts[eventmask] += "bb.event.RecipePreFinalise"
|
||||
addhandler find_intercepts
|
||||
678
sources/poky/meta/classes-recipe/image.bbclass
Normal file
678
sources/poky/meta/classes-recipe/image.bbclass
Normal file
@@ -0,0 +1,678 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
IMAGE_CLASSES ??= ""
|
||||
|
||||
# rootfs bootstrap install
|
||||
# warning - image-container resets this
|
||||
ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts"
|
||||
|
||||
# Handle inherits of any of the image classes we need
|
||||
IMGCLASSES = "rootfs_${IMAGE_PKGTYPE} image_types ${IMAGE_CLASSES}"
|
||||
# Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk_base
|
||||
# in the non-Linux SDK_OS case, such as mingw32
|
||||
inherit populate_sdk_base
|
||||
IMGCLASSES += "${@['', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}"
|
||||
IMGCLASSES += "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso hddimg', 'image-live', '', d)}"
|
||||
IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-container', '', d)}"
|
||||
IMGCLASSES += "image_types_wic"
|
||||
IMGCLASSES += "rootfs-postcommands"
|
||||
IMGCLASSES += "image-postinst-intercepts"
|
||||
IMGCLASSES += "overlayfs-etc"
|
||||
inherit_defer ${IMGCLASSES}
|
||||
|
||||
TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
|
||||
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
|
||||
POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks"
|
||||
|
||||
LICENSE ?= "MIT"
|
||||
PACKAGES = ""
|
||||
DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native"
|
||||
RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}"
|
||||
RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
|
||||
PATH:prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
|
||||
|
||||
INHIBIT_DEFAULT_DEPS = "1"
|
||||
|
||||
# IMAGE_FEATURES may contain any available package group
|
||||
IMAGE_FEATURES ?= ""
|
||||
IMAGE_FEATURES[type] = "list"
|
||||
IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login serial-autologin-root post-install-logging overlayfs-etc"
|
||||
|
||||
# Generate companion debugfs?
|
||||
IMAGE_GEN_DEBUGFS ?= "0"
|
||||
|
||||
# These packages will be installed as additional into debug rootfs
|
||||
IMAGE_INSTALL_DEBUGFS ?= ""
|
||||
|
||||
# These packages will be removed from a read-only rootfs after all other
|
||||
# packages have been installed
|
||||
ROOTFS_RO_UNNEEDED ??= "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}"
|
||||
|
||||
# packages to install from features
|
||||
FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
|
||||
FEATURE_INSTALL[vardepvalue] = "${FEATURE_INSTALL}"
|
||||
FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
|
||||
FEATURE_INSTALL_OPTIONAL[vardepvalue] = "${FEATURE_INSTALL_OPTIONAL}"
|
||||
|
||||
# Define some very basic feature package groups
|
||||
FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}"
|
||||
SPLASH ?= "${@bb.utils.contains("MACHINE_FEATURES", "screen", "psplash", "", d)}"
|
||||
FEATURE_PACKAGES_splash = "${SPLASH}"
|
||||
|
||||
IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
|
||||
|
||||
def check_image_features(d):
|
||||
valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems') or "").split()
|
||||
valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys()
|
||||
for var in d:
|
||||
if var.startswith("FEATURE_PACKAGES_"):
|
||||
valid_features.append(var[17:])
|
||||
valid_features.sort()
|
||||
|
||||
features = set(oe.data.typed_value('IMAGE_FEATURES', d))
|
||||
for feature in features:
|
||||
if feature not in valid_features:
|
||||
if bb.utils.contains('EXTRA_IMAGE_FEATURES', feature, True, False, d):
|
||||
raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES (added via EXTRA_IMAGE_FEATURES) is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
|
||||
else:
|
||||
raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
|
||||
|
||||
IMAGE_INSTALL ?= ""
|
||||
IMAGE_INSTALL[type] = "list"
|
||||
export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}"
|
||||
PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
|
||||
|
||||
IMGDEPLOYDIR = "${WORKDIR}/deploy-${PN}-image-complete"
|
||||
|
||||
# Images are generally built explicitly, do not need to be part of world.
|
||||
EXCLUDE_FROM_WORLD = "1"
|
||||
|
||||
USE_DEVFS ?= "1"
|
||||
USE_DEPMOD ?= "1"
|
||||
|
||||
PID = "${@os.getpid()}"
|
||||
|
||||
PACKAGE_ARCH = "${MACHINE_ARCH}"
|
||||
SSTATE_ARCHS_TUNEPKG = "${@all_multilib_tune_values(d, 'TUNE_PKGARCH')}"
|
||||
|
||||
LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
|
||||
LDCONFIGDEPEND:libc-musl = ""
|
||||
|
||||
# This is needed to have depmod data in PKGDATA_DIR,
|
||||
# but if you're building small initramfs image
|
||||
# e.g. to include it in your kernel, you probably
|
||||
# don't want this dependency, which is causing dependency loop
|
||||
KERNELDEPMODDEPEND ?= "virtual/kernel:do_packagedata"
|
||||
|
||||
do_rootfs[depends] += " \
|
||||
makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND} \
|
||||
virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot \
|
||||
${KERNELDEPMODDEPEND} \
|
||||
"
|
||||
do_rootfs[recrdeptask] += "do_packagedata"
|
||||
|
||||
def rootfs_command_variables(d):
|
||||
return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','ROOTFS_POSTUNINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND',
|
||||
'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS']
|
||||
|
||||
python () {
|
||||
variables = rootfs_command_variables(d)
|
||||
for var in variables:
|
||||
d.setVarFlag(var, 'vardeps', d.getVar(var))
|
||||
}
|
||||
|
||||
def rootfs_variables(d):
|
||||
from oe.rootfs import variable_depends
|
||||
variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
|
||||
'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', 'IMAGE_LOCALES_ARCHIVE',
|
||||
'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
|
||||
'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
|
||||
'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
|
||||
variables.extend(rootfs_command_variables(d))
|
||||
variables.extend(variable_depends(d))
|
||||
return " ".join(variables)
|
||||
|
||||
do_rootfs[vardeps] += "${@rootfs_variables(d)}"
|
||||
|
||||
# This is needed to have kernel image in DEPLOY_DIR.
|
||||
# This follows many common usecases and user expectations.
|
||||
# But if you are building an image which doesn't need the kernel image at all,
|
||||
# you can unset this variable manually.
|
||||
KERNEL_DEPLOY_DEPEND ?= "virtual/kernel:do_deploy"
|
||||
do_build[depends] += "${KERNEL_DEPLOY_DEPEND}"
|
||||
|
||||
|
||||
python () {
|
||||
def extraimage_getdepends(task):
|
||||
deps = ""
|
||||
for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
|
||||
if ":" in dep:
|
||||
deps += " %s " % (dep)
|
||||
else:
|
||||
deps += " %s:%s" % (dep, task)
|
||||
return deps
|
||||
|
||||
d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot'))
|
||||
|
||||
deps = " " + imagetypes_getdepends(d)
|
||||
d.appendVarFlag('do_rootfs', 'depends', deps)
|
||||
|
||||
#process IMAGE_FEATURES, we must do this before runtime_mapping_rename
|
||||
#Check for replaces image features
|
||||
features = set(oe.data.typed_value('IMAGE_FEATURES', d))
|
||||
remain_features = features.copy()
|
||||
for feature in features:
|
||||
replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature) or "").split())
|
||||
remain_features -= replaces
|
||||
|
||||
#Check for conflict image features
|
||||
for feature in remain_features:
|
||||
conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature) or "").split())
|
||||
temp = conflicts & remain_features
|
||||
if temp:
|
||||
bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN'), feature, ' '.join(list(temp))))
|
||||
|
||||
d.setVar('IMAGE_FEATURES', ' '.join(sorted(list(remain_features))))
|
||||
|
||||
check_image_features(d)
|
||||
}
|
||||
|
||||
IMAGE_POSTPROCESS_COMMAND ?= ""
|
||||
|
||||
IMAGE_LINGUAS ??= ""
|
||||
|
||||
LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
|
||||
|
||||
# per default create a locale archive
|
||||
IMAGE_LOCALES_ARCHIVE ?= '1'
|
||||
|
||||
# Prefer image, but use the fallback files for lookups if the image ones
|
||||
# aren't yet available.
|
||||
PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
|
||||
|
||||
PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/intercept_scripts,${WORKDIR}/oe-rootfs-repo,${WORKDIR}/sstate-build-image_complete"
|
||||
|
||||
PACKAGE_EXCLUDE ??= ""
|
||||
PACKAGE_EXCLUDE[type] = "list"
|
||||
|
||||
fakeroot python do_rootfs () {
|
||||
from oe.rootfs import create_rootfs
|
||||
from oe.manifest import create_manifest
|
||||
import logging
|
||||
import oe.packagedata
|
||||
|
||||
logger = d.getVar('BB_TASK_LOGGER', False)
|
||||
if logger:
|
||||
logcatcher = bb.utils.LogCatcher()
|
||||
logger.addHandler(logcatcher)
|
||||
else:
|
||||
logcatcher = None
|
||||
|
||||
# NOTE: if you add, remove or significantly refactor the stages of this
|
||||
# process then you should recalculate the weightings here. This is quite
|
||||
# easy to do - just change the MultiStageProgressReporter line temporarily
|
||||
# to pass debug=True as the last parameter and you'll get a printout of
|
||||
# the weightings as well as a map to the lines where next_stage() was
|
||||
# called. Of course this isn't critical, but it helps to keep the progress
|
||||
# reporting accurate.
|
||||
stage_weights = [1, 203, 354, 186, 65, 4228, 1, 353, 49, 330, 382, 23, 1]
|
||||
progress_reporter = bb.progress.MultiStageProgressReporter(d, stage_weights)
|
||||
progress_reporter.next_stage()
|
||||
|
||||
# Handle package exclusions
|
||||
excl_pkgs = d.getVar("PACKAGE_EXCLUDE").split()
|
||||
inst_pkgs = d.getVar("PACKAGE_INSTALL").split()
|
||||
inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY").split()
|
||||
|
||||
d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
|
||||
d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
|
||||
|
||||
for pkg in excl_pkgs:
|
||||
if pkg in inst_pkgs:
|
||||
bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
|
||||
inst_pkgs.remove(pkg)
|
||||
|
||||
if pkg in inst_attempt_pkgs:
|
||||
bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
|
||||
inst_attempt_pkgs.remove(pkg)
|
||||
|
||||
d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
|
||||
d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
|
||||
|
||||
# Ensure we handle package name remapping
|
||||
# We have to delay the runtime_mapping_rename until just before rootfs runs
|
||||
# otherwise, the multilib renaming could step in and squash any fixups that
|
||||
# may have occurred.
|
||||
pn = d.getVar('PN')
|
||||
oe.packagedata.runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
|
||||
oe.packagedata.runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
|
||||
oe.packagedata.runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
|
||||
|
||||
# Generate the initial manifest
|
||||
create_manifest(d)
|
||||
|
||||
progress_reporter.next_stage()
|
||||
|
||||
# generate rootfs
|
||||
d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
|
||||
create_rootfs(d, progress_reporter=progress_reporter, logcatcher=logcatcher)
|
||||
|
||||
progress_reporter.finish()
|
||||
}
|
||||
do_rootfs[dirs] = "${TOPDIR}"
|
||||
do_rootfs[cleandirs] += "${IMAGE_ROOTFS} ${IMGDEPLOYDIR} ${S}"
|
||||
do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
|
||||
addtask rootfs after do_prepare_recipe_sysroot
|
||||
|
||||
fakeroot python do_image () {
|
||||
from oe.utils import execute_pre_post_process
|
||||
|
||||
d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
|
||||
pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND")
|
||||
|
||||
execute_pre_post_process(d, pre_process_cmds)
|
||||
}
|
||||
do_image[dirs] = "${TOPDIR}"
|
||||
addtask do_image after do_rootfs
|
||||
|
||||
fakeroot python do_image_complete () {
|
||||
from oe.utils import execute_pre_post_process
|
||||
|
||||
post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND")
|
||||
|
||||
execute_pre_post_process(d, post_process_cmds)
|
||||
}
|
||||
do_image_complete[dirs] = "${TOPDIR}"
|
||||
SSTATETASKS += "do_image_complete"
|
||||
SSTATE_SKIP_CREATION:task-image-complete = '1'
|
||||
do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
|
||||
do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
|
||||
do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
|
||||
addtask do_image_complete after do_image before do_build
|
||||
python do_image_complete_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_image_complete_setscene
|
||||
|
||||
# Add image-level QA/sanity checks to IMAGE_QA_COMMANDS
|
||||
#
|
||||
# IMAGE_QA_COMMANDS += " \
|
||||
# image_check_everything_ok \
|
||||
# "
|
||||
#
|
||||
# This task runs all functions in IMAGE_QA_COMMANDS after the rootfs
|
||||
# construction has completed in order to validate the resulting image.
|
||||
#
|
||||
# The functions should use ${IMAGE_ROOTFS} to find the unpacked rootfs
|
||||
# directory, which if QA passes will be the basis for the images.
|
||||
#
|
||||
# The functions are expected to call oe.qa.handle_error() to report any
|
||||
# problems.
|
||||
fakeroot python do_image_qa () {
|
||||
qa_cmds = (d.getVar('IMAGE_QA_COMMANDS') or '').split()
|
||||
|
||||
for cmd in qa_cmds:
|
||||
bb.build.exec_func(cmd, d)
|
||||
|
||||
oe.qa.exit_if_errors(d)
|
||||
}
|
||||
addtask do_image_qa after do_rootfs before do_image
|
||||
|
||||
SSTATETASKS += "do_image_qa"
|
||||
SSTATE_SKIP_CREATION:task-image-qa = '1'
|
||||
do_image_qa[sstate-inputdirs] = ""
|
||||
do_image_qa[sstate-outputdirs] = ""
|
||||
python do_image_qa_setscene () {
|
||||
sstate_setscene(d)
|
||||
}
|
||||
addtask do_image_qa_setscene
|
||||
|
||||
def setup_debugfs_variables(d):
|
||||
d.appendVar('IMAGE_ROOTFS', '-dbg')
|
||||
if d.getVar('IMAGE_LINK_NAME'):
|
||||
d.appendVar('IMAGE_LINK_NAME', '-dbg')
|
||||
d.appendVar('IMAGE_NAME','-dbg')
|
||||
d.setVar('IMAGE_BUILDING_DEBUGFS', 'true')
|
||||
debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS')
|
||||
if debugfs_image_fstypes:
|
||||
d.setVar('IMAGE_FSTYPES', debugfs_image_fstypes)
|
||||
|
||||
python setup_debugfs () {
|
||||
setup_debugfs_variables(d)
|
||||
}
|
||||
|
||||
python () {
|
||||
vardeps = set()
|
||||
# We allow CONVERSIONTYPES to have duplicates. That avoids breaking
|
||||
# derived distros when OE-core or some other layer independently adds
|
||||
# the same type. There is still only one command for each type, but
|
||||
# presumably the commands will do the same when the type is the same,
|
||||
# even when added in different places.
|
||||
#
|
||||
# Without de-duplication, gen_conversion_cmds() below
|
||||
# would create the same compression command multiple times.
|
||||
ctypes = set(d.getVar('CONVERSIONTYPES').split())
|
||||
old_overrides = d.getVar('OVERRIDES', False)
|
||||
|
||||
def _image_base_type(type):
|
||||
basetype = type
|
||||
for ctype in ctypes:
|
||||
if type.endswith("." + ctype):
|
||||
basetype = type[:-len("." + ctype)]
|
||||
break
|
||||
|
||||
if basetype != type:
|
||||
# New base type itself might be generated by a conversion command.
|
||||
basetype = _image_base_type(basetype)
|
||||
|
||||
return basetype
|
||||
|
||||
basetypes = {}
|
||||
alltypes = d.getVar('IMAGE_FSTYPES').split()
|
||||
typedeps = {}
|
||||
|
||||
if d.getVar('IMAGE_GEN_DEBUGFS') == "1":
|
||||
debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS').split()
|
||||
for t in debugfs_fstypes:
|
||||
alltypes.append("debugfs_" + t)
|
||||
|
||||
def _add_type(t):
|
||||
baset = _image_base_type(t)
|
||||
input_t = t
|
||||
if baset not in basetypes:
|
||||
basetypes[baset]= []
|
||||
if t not in basetypes[baset]:
|
||||
basetypes[baset].append(t)
|
||||
debug = ""
|
||||
if t.startswith("debugfs_"):
|
||||
t = t[8:]
|
||||
debug = "debugfs_"
|
||||
deps = (d.getVar('IMAGE_TYPEDEP:' + t) or "").split()
|
||||
vardeps.add('IMAGE_TYPEDEP:' + t)
|
||||
if baset not in typedeps:
|
||||
typedeps[baset] = set()
|
||||
deps = [debug + dep for dep in deps]
|
||||
for dep in deps:
|
||||
if dep not in alltypes:
|
||||
alltypes.append(dep)
|
||||
_add_type(dep)
|
||||
basedep = _image_base_type(dep)
|
||||
typedeps[baset].add(basedep)
|
||||
|
||||
if baset != input_t:
|
||||
_add_type(baset)
|
||||
|
||||
for t in alltypes[:]:
|
||||
_add_type(t)
|
||||
|
||||
d.appendVarFlag('do_image', 'vardeps', ' '.join(vardeps))
|
||||
|
||||
maskedtypes = (d.getVar('IMAGE_TYPES_MASKED') or "").split()
|
||||
maskedtypes = [dbg + t for t in maskedtypes for dbg in ("", "debugfs_")]
|
||||
|
||||
for t in basetypes:
|
||||
vardeps = set()
|
||||
cmds = []
|
||||
subimages = []
|
||||
realt = t
|
||||
|
||||
if t in maskedtypes:
|
||||
continue
|
||||
|
||||
localdata = bb.data.createCopy(d)
|
||||
debug = ""
|
||||
if t.startswith("debugfs_"):
|
||||
setup_debugfs_variables(localdata)
|
||||
debug = "setup_debugfs "
|
||||
realt = t[8:]
|
||||
localdata.setVar('OVERRIDES', '%s:%s' % (realt, old_overrides))
|
||||
localdata.setVar('type', realt)
|
||||
# Delete DATETIME so we don't expand any references to it now
|
||||
# This means the task's hash can be stable rather than having hardcoded
|
||||
# date/time values. It will get expanded at execution time.
|
||||
# Similarly TMPDIR since otherwise we see QA stamp comparision problems
|
||||
# Expand PV else it can trigger get_srcrev which can fail due to these variables being unset
|
||||
localdata.setVar('PV', d.getVar('PV'))
|
||||
localdata.delVar('DATETIME')
|
||||
localdata.delVar('DATE')
|
||||
localdata.delVar('TMPDIR')
|
||||
localdata.delVar('IMAGE_VERSION_SUFFIX')
|
||||
vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude') or '').split()
|
||||
for dep in vardepsexclude:
|
||||
localdata.delVar(dep)
|
||||
|
||||
image_cmd = localdata.getVar("IMAGE_CMD")
|
||||
vardeps.add('IMAGE_CMD:' + realt)
|
||||
if image_cmd:
|
||||
cmds.append("\t" + image_cmd)
|
||||
else:
|
||||
bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t)
|
||||
cmds.append(localdata.expand("\tcd ${IMGDEPLOYDIR}"))
|
||||
|
||||
# Since a copy of IMAGE_CMD:xxx will be inlined within do_image_xxx,
|
||||
# prevent a redundant copy of IMAGE_CMD:xxx being emitted as a function.
|
||||
d.delVarFlag('IMAGE_CMD:' + realt, 'func')
|
||||
|
||||
rm_tmp_images = set()
|
||||
def gen_conversion_cmds(bt):
|
||||
for ctype in sorted(ctypes):
|
||||
if bt.endswith("." + ctype):
|
||||
type = bt[0:-len(ctype) - 1]
|
||||
if type.startswith("debugfs_"):
|
||||
type = type[8:]
|
||||
# Create input image first.
|
||||
gen_conversion_cmds(type)
|
||||
localdata.setVar('type', type)
|
||||
cmd = "\t" + localdata.getVar("CONVERSION_CMD:" + ctype)
|
||||
if cmd not in cmds:
|
||||
cmds.append(cmd)
|
||||
vardeps.add('CONVERSION_CMD:' + ctype)
|
||||
subimage = type + "." + ctype
|
||||
if subimage not in subimages:
|
||||
subimages.append(subimage)
|
||||
if type not in alltypes:
|
||||
rm_tmp_images.add(localdata.expand("${IMAGE_NAME}.${type}"))
|
||||
|
||||
for bt in basetypes[t]:
|
||||
gen_conversion_cmds(bt)
|
||||
|
||||
localdata.setVar('type', realt)
|
||||
if t not in alltypes:
|
||||
rm_tmp_images.add(localdata.expand("${IMAGE_NAME}.${type}"))
|
||||
else:
|
||||
subimages.append(realt)
|
||||
|
||||
# Clean up after applying all conversion commands. Some of them might
|
||||
# use the same input, therefore we cannot delete sooner without applying
|
||||
# some complex dependency analysis.
|
||||
for image in sorted(rm_tmp_images):
|
||||
cmds.append("\trm " + image)
|
||||
|
||||
after = 'do_image'
|
||||
for dep in typedeps[t]:
|
||||
after += ' do_image_%s' % dep.replace("-", "_").replace(".", "_")
|
||||
|
||||
task = "do_image_%s" % t.replace("-", "_").replace(".", "_")
|
||||
|
||||
d.setVar(task, '\n'.join(cmds))
|
||||
d.setVarFlag(task, 'func', '1')
|
||||
d.setVarFlag(task, 'fakeroot', '1')
|
||||
|
||||
d.appendVarFlag(task, 'prefuncs', ' ' + debug + ' set_image_size')
|
||||
d.prependVarFlag(task, 'postfuncs', 'create_symlinks ')
|
||||
d.appendVarFlag(task, 'subimages', ' ' + ' '.join(subimages))
|
||||
d.appendVarFlag(task, 'vardeps', ' ' + ' '.join(vardeps))
|
||||
d.appendVarFlag(task, 'vardepsexclude', ' DATETIME DATE ' + ' '.join(vardepsexclude))
|
||||
|
||||
bb.debug(2, "Adding task %s before %s, after %s" % (task, 'do_image_complete', after))
|
||||
bb.build.addtask(task, 'do_image_complete', after, d)
|
||||
}
|
||||
|
||||
#
|
||||
# Compute the rootfs size
|
||||
#
|
||||
def get_rootfs_size(d):
|
||||
import subprocess, oe.utils
|
||||
|
||||
rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT'))
|
||||
overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR'))
|
||||
rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE'))
|
||||
rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE'))
|
||||
rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE')
|
||||
image_fstypes = d.getVar('IMAGE_FSTYPES') or ''
|
||||
initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES') or ''
|
||||
initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE')
|
||||
|
||||
size_kb = oe.utils.directory_size(d.getVar("IMAGE_ROOTFS")) / 1024
|
||||
|
||||
base_size = size_kb * overhead_factor
|
||||
bb.debug(1, '%f = %d * %f' % (base_size, size_kb, overhead_factor))
|
||||
base_size2 = max(base_size, rootfs_req_size) + rootfs_extra_space
|
||||
bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), rootfs_extra_space))
|
||||
|
||||
base_size = base_size2
|
||||
if base_size != int(base_size):
|
||||
base_size = int(base_size + 1)
|
||||
else:
|
||||
base_size = int(base_size)
|
||||
bb.debug(1, '%f = int(%f)' % (base_size, base_size2))
|
||||
|
||||
base_size_saved = base_size
|
||||
base_size += rootfs_alignment - 1
|
||||
base_size -= base_size % rootfs_alignment
|
||||
bb.debug(1, '%d = aligned(%d)' % (base_size, base_size_saved))
|
||||
|
||||
# Do not check image size of the debugfs image. This is not supposed
|
||||
# to be deployed, etc. so it doesn't make sense to limit the size
|
||||
# of the debug.
|
||||
if (d.getVar('IMAGE_BUILDING_DEBUGFS') or "") == "true":
|
||||
bb.debug(1, 'returning debugfs size %d' % (base_size))
|
||||
return base_size
|
||||
|
||||
# Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
|
||||
if rootfs_maxsize:
|
||||
rootfs_maxsize_int = int(rootfs_maxsize)
|
||||
if base_size > rootfs_maxsize_int:
|
||||
bb.fatal("The rootfs size %d(K) exceeds IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
|
||||
(base_size, rootfs_maxsize_int))
|
||||
|
||||
# Check the initramfs size against INITRAMFS_MAXSIZE (if set)
|
||||
if image_fstypes == initramfs_fstypes != '' and initramfs_maxsize:
|
||||
initramfs_maxsize_int = int(initramfs_maxsize)
|
||||
if base_size > initramfs_maxsize_int:
|
||||
bb.error("The initramfs size %d(K) exceeds INITRAMFS_MAXSIZE: %d(K)" % \
|
||||
(base_size, initramfs_maxsize_int))
|
||||
bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
|
||||
bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
|
||||
|
||||
bb.debug(1, 'returning %d' % (base_size))
|
||||
return base_size
|
||||
|
||||
python set_image_size () {
|
||||
rootfs_size = get_rootfs_size(d)
|
||||
d.setVar('ROOTFS_SIZE', str(rootfs_size))
|
||||
d.setVarFlag('ROOTFS_SIZE', 'export', '1')
|
||||
}
|
||||
|
||||
#
|
||||
# Create symlinks to the newly created image
|
||||
#
|
||||
python create_symlinks() {
|
||||
|
||||
deploy_dir = d.getVar('IMGDEPLOYDIR')
|
||||
img_name = d.getVar('IMAGE_NAME')
|
||||
link_name = d.getVar('IMAGE_LINK_NAME')
|
||||
manifest_name = d.getVar('IMAGE_MANIFEST')
|
||||
taskname = d.getVar("BB_CURRENTTASK")
|
||||
subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split()
|
||||
|
||||
if not link_name:
|
||||
return
|
||||
for type in subimages:
|
||||
dst = os.path.join(deploy_dir, link_name + "." + type)
|
||||
src = img_name + "." + type
|
||||
if os.path.exists(os.path.join(deploy_dir, src)):
|
||||
bb.note("Creating symlink: %s -> %s" % (dst, src))
|
||||
if os.path.islink(dst):
|
||||
os.remove(dst)
|
||||
os.symlink(src, dst)
|
||||
else:
|
||||
bb.note("Skipping symlink, source does not exist: %s -> %s" % (dst, src))
|
||||
}
|
||||
|
||||
MULTILIBRE_ALLOW_REP += "${base_bindir} ${base_sbindir} ${bindir} ${sbindir} ${libexecdir} ${sysconfdir} ${nonarch_base_libdir}/udev /lib/modules/[^/]*/modules.*"
|
||||
MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
|
||||
MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
|
||||
|
||||
do_fetch[noexec] = "1"
|
||||
do_unpack[noexec] = "1"
|
||||
do_patch[noexec] = "1"
|
||||
do_configure[noexec] = "1"
|
||||
do_compile[noexec] = "1"
|
||||
do_install[noexec] = "1"
|
||||
deltask do_populate_lic
|
||||
deltask do_populate_sysroot
|
||||
do_package[noexec] = "1"
|
||||
deltask do_package_qa
|
||||
deltask do_packagedata
|
||||
deltask do_package_write_ipk
|
||||
deltask do_package_write_deb
|
||||
deltask do_package_write_rpm
|
||||
|
||||
# Prepare the root links to point to the /usr counterparts.
|
||||
create_merged_usr_symlinks() {
|
||||
root="$1"
|
||||
install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir}
|
||||
ln -rs $root${base_bindir} $root/bin
|
||||
ln -rs $root${base_sbindir} $root/sbin
|
||||
ln -rs $root${base_libdir} $root/${baselib}
|
||||
|
||||
if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then
|
||||
install -d $root${nonarch_base_libdir}
|
||||
ln -rs $root${nonarch_base_libdir} $root/lib
|
||||
fi
|
||||
|
||||
# create base links for multilibs
|
||||
multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}"
|
||||
for d in $multi_libdirs; do
|
||||
install -d $root${exec_prefix}/$d
|
||||
ln -rs $root${exec_prefix}/$d $root/$d
|
||||
done
|
||||
}
|
||||
|
||||
create_merged_usr_symlinks_rootfs() {
|
||||
create_merged_usr_symlinks ${IMAGE_ROOTFS}
|
||||
}
|
||||
|
||||
create_merged_usr_symlinks_sdk() {
|
||||
create_merged_usr_symlinks ${SDK_OUTPUT}${SDKTARGETSYSROOT}
|
||||
}
|
||||
|
||||
ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs', '',d)}"
|
||||
POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk', '',d)}"
|
||||
|
||||
reproducible_final_image_task () {
|
||||
if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
|
||||
REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
|
||||
if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
|
||||
REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
|
||||
fi
|
||||
fi
|
||||
# Set mtime of all files to a reproducible value
|
||||
bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
|
||||
find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS
|
||||
}
|
||||
|
||||
systemd_preset_all () {
|
||||
if [ -e ${IMAGE_ROOTFS}${root_prefix}/lib/systemd/systemd ]; then
|
||||
systemctl --root="${IMAGE_ROOTFS}" --preset-mode=enable-only preset-all
|
||||
fi
|
||||
}
|
||||
|
||||
IMAGE_PREPROCESS_COMMAND:append = " ${@ 'systemd_preset_all' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task "
|
||||
|
||||
CVE_PRODUCT = ""
|
||||
391
sources/poky/meta/classes-recipe/image_types.bbclass
Normal file
391
sources/poky/meta/classes-recipe/image_types.bbclass
Normal file
@@ -0,0 +1,391 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# The default aligment of the size of the rootfs is set to 1KiB. In case
|
||||
# you're using the SD card emulation of a QEMU system simulator you may
|
||||
# set this value to 2048 (2MiB alignment).
|
||||
IMAGE_ROOTFS_ALIGNMENT ?= "1"
|
||||
|
||||
def imagetypes_getdepends(d):
|
||||
def adddep(depstr, deps):
|
||||
for d in (depstr or "").split():
|
||||
# Add task dependency if not already present
|
||||
if ":" not in d:
|
||||
d += ":do_populate_sysroot"
|
||||
deps.add(d)
|
||||
|
||||
# Take a type in the form of foo.bar.car and split it into the items
|
||||
# needed for the image deps "foo", and the conversion deps ["bar", "car"]
|
||||
def split_types(typestring):
|
||||
types = typestring.split(".")
|
||||
return types[0], types[1:]
|
||||
|
||||
fstypes = set((d.getVar('IMAGE_FSTYPES') or "").split())
|
||||
fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS') or "").split())
|
||||
|
||||
deprecated = set()
|
||||
deps = set()
|
||||
for typestring in fstypes:
|
||||
basetype, resttypes = split_types(typestring)
|
||||
|
||||
var = "IMAGE_DEPENDS_%s" % basetype
|
||||
if d.getVar(var) is not None:
|
||||
deprecated.add(var)
|
||||
|
||||
for typedepends in (d.getVar("IMAGE_TYPEDEP:%s" % basetype) or "").split():
|
||||
base, rest = split_types(typedepends)
|
||||
resttypes += rest
|
||||
|
||||
var = "IMAGE_DEPENDS_%s" % base
|
||||
if d.getVar(var) is not None:
|
||||
deprecated.add(var)
|
||||
|
||||
for ctype in resttypes:
|
||||
adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype), deps)
|
||||
adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype), deps)
|
||||
|
||||
if deprecated:
|
||||
bb.fatal('Deprecated variable(s) found: "%s". '
|
||||
'Use do_image_<type>[depends] += "<recipe>:<task>" instead' % ', '.join(deprecated))
|
||||
|
||||
# Sort the set so that ordering is consistant
|
||||
return " ".join(sorted(deps))
|
||||
|
||||
XZ_COMPRESSION_LEVEL ?= "-6"
|
||||
XZ_INTEGRITY_CHECK ?= "crc32"
|
||||
|
||||
ZIP_COMPRESSION_LEVEL ?= "-9"
|
||||
|
||||
7ZIP_COMPRESSION_LEVEL ?= "9"
|
||||
7ZIP_COMPRESSION_METHOD ?= "BZip2"
|
||||
7ZIP_EXTENSION ?= "7z"
|
||||
|
||||
JFFS2_SUM_EXTRA_ARGS ?= ""
|
||||
IMAGE_CMD:jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}.jffs2 ${EXTRA_IMAGECMD}"
|
||||
|
||||
IMAGE_CMD:cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}.cramfs ${EXTRA_IMAGECMD}"
|
||||
|
||||
oe_mkext234fs () {
|
||||
fstype=$1
|
||||
extra_imagecmd=""
|
||||
|
||||
if [ $# -gt 1 ]; then
|
||||
shift
|
||||
extra_imagecmd=$@
|
||||
fi
|
||||
|
||||
# If generating an empty image the size of the sparse block should be large
|
||||
# enough to allocate an ext4 filesystem using 4096 bytes per inode, this is
|
||||
# about 60K, so dd needs a minimum count of 60, with bs=1024 (bytes per IO)
|
||||
eval local COUNT=\"0\"
|
||||
eval local MIN_COUNT=\"60\"
|
||||
if [ $ROOTFS_SIZE -lt $MIN_COUNT ]; then
|
||||
eval COUNT=\"$MIN_COUNT\"
|
||||
fi
|
||||
# Create a sparse image block
|
||||
bbdebug 1 Executing "dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024"
|
||||
dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
|
||||
bbdebug 1 "Actual Rootfs size: `du -s ${IMAGE_ROOTFS}`"
|
||||
bbdebug 1 "Actual Partition size: `stat -c '%s' ${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype`"
|
||||
bbdebug 1 Executing "mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype -d ${IMAGE_ROOTFS}"
|
||||
mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype -d ${IMAGE_ROOTFS}
|
||||
# Error codes 0-3 indicate successfull operation of fsck (no errors or errors corrected)
|
||||
fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}.$fstype || [ $? -le 3 ]
|
||||
}
|
||||
|
||||
IMAGE_CMD:ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
|
||||
IMAGE_CMD:ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
|
||||
IMAGE_CMD:ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
|
||||
|
||||
MIN_BTRFS_SIZE ?= "16384"
|
||||
IMAGE_CMD:btrfs () {
|
||||
size=${ROOTFS_SIZE}
|
||||
if [ ${size} -lt ${MIN_BTRFS_SIZE} ] ; then
|
||||
size=${MIN_BTRFS_SIZE}
|
||||
bbwarn "Rootfs size is too small for BTRFS. Filesystem will be extended to ${size}K"
|
||||
fi
|
||||
dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}.btrfs seek=${size} count=0 bs=1024
|
||||
mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}.btrfs
|
||||
}
|
||||
|
||||
oe_mksquashfs () {
|
||||
local comp=$1; shift
|
||||
local extra_imagecmd="$@"
|
||||
|
||||
if [ "$comp" = "zstd" ]; then
|
||||
suffix="zst"
|
||||
fi
|
||||
|
||||
# Use the bitbake reproducible timestamp instead of the hardcoded squashfs one
|
||||
export SOURCE_DATE_EPOCH=$(stat -c '%Y' ${IMAGE_ROOTFS})
|
||||
mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}.squashfs${comp:+-}${suffix:-$comp} -noappend ${comp:+-comp }$comp $extra_imagecmd
|
||||
}
|
||||
IMAGE_CMD:squashfs = "oe_mksquashfs '' ${EXTRA_IMAGECMD}"
|
||||
IMAGE_CMD:squashfs-xz = "oe_mksquashfs xz ${EXTRA_IMAGECMD}"
|
||||
IMAGE_CMD:squashfs-lzo = "oe_mksquashfs lzo ${EXTRA_IMAGECMD}"
|
||||
IMAGE_CMD:squashfs-lz4 = "oe_mksquashfs lz4 ${EXTRA_IMAGECMD}"
|
||||
IMAGE_CMD:squashfs-zst = "oe_mksquashfs zstd ${EXTRA_IMAGECMD}"
|
||||
|
||||
IMAGE_CMD:erofs = "mkfs.erofs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}.erofs ${IMAGE_ROOTFS}"
|
||||
IMAGE_CMD:erofs-lz4 = "mkfs.erofs -zlz4 ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}.erofs-lz4 ${IMAGE_ROOTFS}"
|
||||
IMAGE_CMD:erofs-lz4hc = "mkfs.erofs -zlz4hc ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}.erofs-lz4hc ${IMAGE_ROOTFS}"
|
||||
|
||||
# Note that vfat can't handle all types of files that a real linux file system
|
||||
# can (e.g. device files, symlinks, etc.) and therefore it not suitable for all
|
||||
# use cases
|
||||
oe_mkvfatfs () {
|
||||
mkfs.vfat $@ -C ${IMGDEPLOYDIR}/${IMAGE_NAME}.vfat ${ROOTFS_SIZE}
|
||||
mcopy -i "${IMGDEPLOYDIR}/${IMAGE_NAME}.vfat" -vsmpQ ${IMAGE_ROOTFS}/* ::/
|
||||
}
|
||||
|
||||
IMAGE_CMD:vfat = "oe_mkvfatfs ${EXTRA_IMAGECMD}"
|
||||
|
||||
IMAGE_CMD_TAR ?= "tar"
|
||||
# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
|
||||
IMAGE_CMD:tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
|
||||
|
||||
do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
|
||||
IMAGE_CMD:cpio () {
|
||||
(cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}.cpio)
|
||||
# We only need the /init symlink if we're building the real
|
||||
# image. The -dbg image doesn't need it! By being clever
|
||||
# about this we also avoid 'touch' below failing, as it
|
||||
# might be trying to touch /sbin/init on the host since both
|
||||
# the normal and the -dbg image share the same WORKDIR
|
||||
if [ "${IMAGE_BUILDING_DEBUGFS}" != "true" ]; then
|
||||
if [ ! -L ${IMAGE_ROOTFS}/init ] && [ ! -e ${IMAGE_ROOTFS}/init ]; then
|
||||
if [ -L ${IMAGE_ROOTFS}/sbin/init ] || [ -e ${IMAGE_ROOTFS}/sbin/init ]; then
|
||||
ln -sf /sbin/init ${WORKDIR}/cpio_append/init
|
||||
touch -h -r ${IMAGE_ROOTFS}/sbin/init ${WORKDIR}/cpio_append/init
|
||||
else
|
||||
touch -r ${IMAGE_ROOTFS} ${WORKDIR}/cpio_append/init
|
||||
fi
|
||||
(cd ${WORKDIR}/cpio_append && echo ./init | cpio --reproducible -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}.cpio)
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
UBI_VOLNAME ?= "${MACHINE}-rootfs"
|
||||
UBI_VOLTYPE ?= "dynamic"
|
||||
UBI_IMGTYPE ?= "ubifs"
|
||||
|
||||
write_ubi_config() {
|
||||
local vname="$1"
|
||||
|
||||
cat <<EOF > ubinize${vname}-${IMAGE_NAME}.cfg
|
||||
[ubifs]
|
||||
mode=ubi
|
||||
image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}.${UBI_IMGTYPE}
|
||||
vol_id=0
|
||||
vol_type=${UBI_VOLTYPE}
|
||||
vol_name=${UBI_VOLNAME}
|
||||
vol_flags=autoresize
|
||||
EOF
|
||||
}
|
||||
|
||||
multiubi_mkfs() {
|
||||
local mkubifs_args="$1"
|
||||
local ubinize_args="$2"
|
||||
|
||||
# Added prompt error message for ubi and ubifs image creation.
|
||||
if [ -z "$mkubifs_args" ] || [ -z "$ubinize_args" ]; then
|
||||
bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
|
||||
fi
|
||||
|
||||
if [ -z "$3" ]; then
|
||||
local vname=""
|
||||
else
|
||||
local vname="_$3"
|
||||
fi
|
||||
write_ubi_config "${vname}"
|
||||
|
||||
if [ -n "$vname" ]; then
|
||||
mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}.ubifs ${mkubifs_args}
|
||||
fi
|
||||
ubinize -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
|
||||
|
||||
# Cleanup cfg file
|
||||
mv ubinize${vname}-${IMAGE_NAME}.cfg ${IMGDEPLOYDIR}/
|
||||
|
||||
# Create own symlinks for 'named' volumes
|
||||
if [ -n "$vname" ]; then
|
||||
cd ${IMGDEPLOYDIR}
|
||||
if [ -e ${IMAGE_NAME}${vname}.ubifs ]; then
|
||||
ln -sf ${IMAGE_NAME}${vname}.ubifs \
|
||||
${IMAGE_LINK_NAME}${vname}.ubifs
|
||||
fi
|
||||
if [ -e ${IMAGE_NAME}${vname}.ubi ]; then
|
||||
ln -sf ${IMAGE_NAME}${vname}.ubi \
|
||||
${IMAGE_LINK_NAME}${vname}.ubi
|
||||
fi
|
||||
cd -
|
||||
fi
|
||||
}
|
||||
|
||||
MULTIUBI_ARGS = "MKUBIFS_ARGS UBINIZE_ARGS"
|
||||
|
||||
IMAGE_CMD:multiubi () {
|
||||
${@' '.join(['%s_%s="%s";' % (arg, name, d.getVar('%s_%s' % (arg, name))) for arg in d.getVar('MULTIUBI_ARGS').split() for name in d.getVar('MULTIUBI_BUILD').split()])}
|
||||
# Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name>
|
||||
for name in ${MULTIUBI_BUILD}; do
|
||||
eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\"
|
||||
eval local ubinize_args=\"\$UBINIZE_ARGS_${name}\"
|
||||
|
||||
multiubi_mkfs "${mkubifs_args}" "${ubinize_args}" "${name}"
|
||||
done
|
||||
}
|
||||
|
||||
IMAGE_CMD:ubi () {
|
||||
multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}"
|
||||
}
|
||||
IMAGE_TYPEDEP:ubi = "${UBI_IMGTYPE}"
|
||||
|
||||
IMAGE_CMD:ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.ubifs ${MKUBIFS_ARGS}"
|
||||
|
||||
MIN_F2FS_SIZE ?= "524288"
|
||||
IMAGE_CMD:f2fs () {
|
||||
# We need to add additional smarts here form devices smaller than 1.5G
|
||||
# Need to scale appropriately between 40M -> 1.5G as the "overprovision
|
||||
# ratio" goes down as the device gets bigger (70% -> 4.5%), below about
|
||||
# 500M the standard IMAGE_OVERHEAD_FACTOR does not work, so add additional
|
||||
# space here when under 500M
|
||||
size=${ROOTFS_SIZE}
|
||||
if [ ${size} -lt ${MIN_F2FS_SIZE} ] ; then
|
||||
size=${MIN_F2FS_SIZE}
|
||||
bbwarn "Rootfs size is too small for F2FS. Filesystem will be extended to ${size}K"
|
||||
fi
|
||||
dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}.f2fs seek=${size} count=0 bs=1024
|
||||
mkfs.f2fs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}.f2fs
|
||||
sload.f2fs -f ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}.f2fs
|
||||
}
|
||||
|
||||
EXTRA_IMAGECMD = ""
|
||||
|
||||
inherit siteinfo kernel-arch image-artifact-names
|
||||
|
||||
JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
|
||||
JFFS2_ERASEBLOCK ?= "0x40000"
|
||||
EXTRA_IMAGECMD:jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
|
||||
|
||||
# Change these if you want default mkfs behavior (i.e. create minimal inode number)
|
||||
EXTRA_IMAGECMD:ext2 ?= "-i 4096"
|
||||
EXTRA_IMAGECMD:ext3 ?= "-i 4096"
|
||||
EXTRA_IMAGECMD:ext4 ?= "-i 4096"
|
||||
EXTRA_IMAGECMD:btrfs ?= "-n 4096 --shrink"
|
||||
EXTRA_IMAGECMD:f2fs ?= ""
|
||||
|
||||
# If a specific FAT size is needed, set it here (e.g. "-F 32"/"-F 16"/"-F 12")
|
||||
# otherwise mkfs.vfat will automatically pick one.
|
||||
EXTRA_IMAGECMD:vfat ?= ""
|
||||
|
||||
do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
|
||||
do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
|
||||
do_image_cramfs[depends] += "util-linux-native:do_populate_sysroot"
|
||||
do_image_ext2[depends] += "e2fsprogs-native:do_populate_sysroot"
|
||||
do_image_ext3[depends] += "e2fsprogs-native:do_populate_sysroot"
|
||||
do_image_ext4[depends] += "e2fsprogs-native:do_populate_sysroot"
|
||||
do_image_btrfs[depends] += "btrfs-tools-native:do_populate_sysroot"
|
||||
do_image_squashfs[depends] += "squashfs-tools-native:do_populate_sysroot"
|
||||
do_image_squashfs_xz[depends] += "squashfs-tools-native:do_populate_sysroot"
|
||||
do_image_squashfs_lzo[depends] += "squashfs-tools-native:do_populate_sysroot"
|
||||
do_image_squashfs_lz4[depends] += "squashfs-tools-native:do_populate_sysroot"
|
||||
do_image_squashfs_zst[depends] += "squashfs-tools-native:do_populate_sysroot"
|
||||
do_image_ubi[depends] += "mtd-utils-native:do_populate_sysroot"
|
||||
do_image_ubifs[depends] += "mtd-utils-native:do_populate_sysroot"
|
||||
do_image_multiubi[depends] += "mtd-utils-native:do_populate_sysroot"
|
||||
do_image_f2fs[depends] += "f2fs-tools-native:do_populate_sysroot"
|
||||
do_image_erofs[depends] += "erofs-utils-native:do_populate_sysroot"
|
||||
do_image_erofs_lz4[depends] += "erofs-utils-native:do_populate_sysroot"
|
||||
do_image_erofs_lz4hc[depends] += "erofs-utils-native:do_populate_sysroot"
|
||||
do_image_vfat[depends] += "dosfstools-native:do_populate_sysroot mtools-native:do_populate_sysroot"
|
||||
|
||||
# This variable is available to request which values are suitable for IMAGE_FSTYPES
|
||||
IMAGE_TYPES = " \
|
||||
jffs2 jffs2.sum \
|
||||
cramfs \
|
||||
ext2 ext2.gz ext2.bz2 ext2.lzma \
|
||||
ext3 ext3.gz \
|
||||
ext4 ext4.gz \
|
||||
btrfs \
|
||||
vfat \
|
||||
squashfs squashfs-xz squashfs-lzo squashfs-lz4 squashfs-zst \
|
||||
ubi ubifs multiubi \
|
||||
tar tar.gz tar.bz2 tar.xz tar.lz4 tar.zst \
|
||||
cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 cpio.zst \
|
||||
wic wic.gz wic.bz2 wic.lzma wic.zst \
|
||||
container \
|
||||
f2fs \
|
||||
erofs erofs-lz4 erofs-lz4hc \
|
||||
"
|
||||
# These image types are x86 specific as they need syslinux
|
||||
IMAGE_TYPES:append:x86 = " hddimg iso"
|
||||
IMAGE_TYPES:append:x86-64 = " hddimg iso"
|
||||
|
||||
# Compression is a special case of conversion. The old variable
|
||||
# names are still supported for backward-compatibility. When defining
|
||||
# new compression or conversion commands, use CONVERSIONTYPES and
|
||||
# CONVERSION_CMD/DEPENDS.
|
||||
COMPRESSIONTYPES ?= ""
|
||||
|
||||
CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip 7zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vhd vhdx vdi qcow2 base64 gzsync zsync ${COMPRESSIONTYPES}"
|
||||
CONVERSION_CMD:lzma = "lzma -k -f -7 ${IMAGE_NAME}.${type}"
|
||||
CONVERSION_CMD:gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.gz"
|
||||
CONVERSION_CMD:bz2 = "pbzip2 -f -k ${IMAGE_NAME}.${type}"
|
||||
CONVERSION_CMD:xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.xz"
|
||||
CONVERSION_CMD:lz4 = "lz4 -f -9 -z -l ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.lz4"
|
||||
CONVERSION_CMD:lzo = "lzop -f -9 ${IMAGE_NAME}.${type}"
|
||||
CONVERSION_CMD:zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}.${type}.zip ${IMAGE_NAME}.${type}"
|
||||
CONVERSION_CMD:7zip = "7za a -mx=${7ZIP_COMPRESSION_LEVEL} -mm=${7ZIP_COMPRESSION_METHOD} ${IMAGE_NAME}.${type}.${7ZIP_EXTENSION} ${IMAGE_NAME}.${type}"
|
||||
CONVERSION_CMD:zst = "zstd -f -k -c ${ZSTD_DEFAULTS} ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.zst"
|
||||
CONVERSION_CMD:sum = "sumtool -i ${IMAGE_NAME}.${type} -o ${IMAGE_NAME}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
|
||||
CONVERSION_CMD:md5sum = "md5sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.md5sum"
|
||||
CONVERSION_CMD:sha1sum = "sha1sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha1sum"
|
||||
CONVERSION_CMD:sha224sum = "sha224sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha224sum"
|
||||
CONVERSION_CMD:sha256sum = "sha256sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha256sum"
|
||||
CONVERSION_CMD:sha384sum = "sha384sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha384sum"
|
||||
CONVERSION_CMD:sha512sum = "sha512sum ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.sha512sum"
|
||||
CONVERSION_CMD:bmap = "bmaptool create ${IMAGE_NAME}.${type} -o ${IMAGE_NAME}.${type}.bmap"
|
||||
CONVERSION_CMD:u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.u-boot"
|
||||
CONVERSION_CMD:vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.vmdk"
|
||||
CONVERSION_CMD:vhdx = "qemu-img convert -O vhdx -o subformat=dynamic ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.vhdx"
|
||||
CONVERSION_CMD:vhd = "qemu-img convert -O vpc -o subformat=fixed ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.vhd"
|
||||
CONVERSION_CMD:vdi = "qemu-img convert -O vdi ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.vdi"
|
||||
CONVERSION_CMD:qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}.${type} ${IMAGE_NAME}.${type}.qcow2"
|
||||
CONVERSION_CMD:base64 = "base64 ${IMAGE_NAME}.${type} > ${IMAGE_NAME}.${type}.base64"
|
||||
CONVERSION_CMD:zsync = "zsyncmake_curl ${IMAGE_NAME}.${type}"
|
||||
CONVERSION_CMD:gzsync = "zsyncmake_curl -z ${IMAGE_NAME}.${type}"
|
||||
CONVERSION_DEPENDS_lzma = "xz-native"
|
||||
CONVERSION_DEPENDS_gz = "pigz-native"
|
||||
CONVERSION_DEPENDS_bz2 = "pbzip2-native"
|
||||
CONVERSION_DEPENDS_xz = "xz-native"
|
||||
CONVERSION_DEPENDS_lz4 = "lz4-native"
|
||||
CONVERSION_DEPENDS_lzo = "lzop-native"
|
||||
CONVERSION_DEPENDS_zip = "zip-native"
|
||||
CONVERSION_DEPENDS_7zip = "p7zip-native"
|
||||
CONVERSION_DEPENDS_zst = "zstd-native"
|
||||
CONVERSION_DEPENDS_sum = "mtd-utils-native"
|
||||
CONVERSION_DEPENDS_bmap = "bmaptool-native"
|
||||
CONVERSION_DEPENDS_u-boot = "u-boot-tools-native"
|
||||
CONVERSION_DEPENDS_vmdk = "qemu-system-native"
|
||||
CONVERSION_DEPENDS_vdi = "qemu-system-native"
|
||||
CONVERSION_DEPENDS_qcow2 = "qemu-system-native"
|
||||
CONVERSION_DEPENDS_base64 = "coreutils-native"
|
||||
CONVERSION_DEPENDS_vhdx = "qemu-system-native"
|
||||
CONVERSION_DEPENDS_vhd = "qemu-system-native"
|
||||
CONVERSION_DEPENDS_zsync = "zsync-curl-native"
|
||||
CONVERSION_DEPENDS_gzsync = "zsync-curl-native"
|
||||
|
||||
RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
|
||||
RUNNABLE_MACHINE_PATTERNS ?= "qemu"
|
||||
|
||||
DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
|
||||
|
||||
# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
|
||||
# images that will not be built at do_rootfs time: vmdk, vhd, vhdx, vdi, qcow2, hddimg, iso, etc.
|
||||
IMAGE_TYPES_MASKED ?= ""
|
||||
|
||||
# bmap requires python3 to be in the PATH
|
||||
EXTRANATIVEPATH += "${@'python3-native' if d.getVar('IMAGE_FSTYPES').find('.bmap') else ''}"
|
||||
207
sources/poky/meta/classes-recipe/image_types_wic.bbclass
Normal file
207
sources/poky/meta/classes-recipe/image_types_wic.bbclass
Normal file
@@ -0,0 +1,207 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# The WICVARS variable is used to define list of bitbake variables used in wic code
|
||||
# variables from this list is written to <image>.env file
|
||||
WICVARS ?= "\
|
||||
APPEND \
|
||||
ASSUME_PROVIDED \
|
||||
BBLAYERS \
|
||||
DEPLOY_DIR_IMAGE \
|
||||
FAKEROOTCMD \
|
||||
HOSTTOOLS_DIR \
|
||||
IMAGE_BASENAME \
|
||||
IMAGE_BOOT_FILES \
|
||||
IMAGE_EFI_BOOT_FILES \
|
||||
IMAGE_LINK_NAME \
|
||||
IMAGE_ROOTFS \
|
||||
IMGDEPLOYDIR \
|
||||
INITRAMFS_FSTYPES \
|
||||
INITRAMFS_IMAGE \
|
||||
INITRAMFS_IMAGE_BUNDLE \
|
||||
INITRAMFS_LINK_NAME \
|
||||
INITRD \
|
||||
INITRD_LIVE \
|
||||
ISODIR \
|
||||
KERNEL_IMAGETYPE \
|
||||
MACHINE \
|
||||
PSEUDO_IGNORE_PATHS \
|
||||
RECIPE_SYSROOT_NATIVE \
|
||||
ROOTFS_SIZE \
|
||||
STAGING_DATADIR \
|
||||
STAGING_DIR \
|
||||
STAGING_DIR_HOST \
|
||||
STAGING_LIBDIR \
|
||||
TARGET_SYS \
|
||||
"
|
||||
|
||||
inherit_defer ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
|
||||
|
||||
WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
|
||||
WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
|
||||
WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/wic' % p for p in '${BBPATH}'.split(':'))}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}"
|
||||
WKS_FULL_PATH = "${@wks_search(d.getVar('WKS_FILES').split(), d.getVar('WKS_SEARCH_PATH')) or ''}"
|
||||
|
||||
def wks_search(files, search_path):
|
||||
for f in files:
|
||||
if os.path.isabs(f):
|
||||
if os.path.exists(f):
|
||||
return f
|
||||
else:
|
||||
searched = bb.utils.which(search_path, f)
|
||||
if searched:
|
||||
return searched
|
||||
|
||||
WIC_CREATE_EXTRA_ARGS ?= ""
|
||||
|
||||
IMAGE_CMD:wic () {
|
||||
out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
|
||||
build_wic="${WORKDIR}/build-wic"
|
||||
tmp_wic="${WORKDIR}/tmp-wic"
|
||||
wks="${WKS_FULL_PATH}"
|
||||
if [ -e "$tmp_wic" ]; then
|
||||
# Ensure we don't have any junk leftover from a previously interrupted
|
||||
# do_image_wic execution
|
||||
rm -rf "$tmp_wic"
|
||||
fi
|
||||
if [ -z "$wks" ]; then
|
||||
bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
|
||||
fi
|
||||
BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS}
|
||||
|
||||
# look to see if the user specifies a custom imager
|
||||
IMAGER=direct
|
||||
eval set -- "${WIC_CREATE_EXTRA_ARGS} --"
|
||||
while [ 1 ]; do
|
||||
case "$1" in
|
||||
--imager|-i)
|
||||
shift
|
||||
IMAGER=$1
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
mv "$build_wic/$(basename "${wks%.wks}")"*.${IMAGER} "$out.wic"
|
||||
}
|
||||
IMAGE_CMD:wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
|
||||
do_image_wic[cleandirs] = "${WORKDIR}/build-wic"
|
||||
|
||||
PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/build-wic"
|
||||
|
||||
# Rebuild when the wks file or vars in WICVARS change
|
||||
USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
|
||||
WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
|
||||
do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}"
|
||||
do_image_wic[depends] += "${@' '.join('%s-native:do_populate_sysroot' % r for r in ('parted', 'gptfdisk', 'dosfstools', 'mtools'))}"
|
||||
|
||||
# We ensure all artfacts are deployed (e.g virtual/bootloader)
|
||||
do_image_wic[recrdeptask] += "do_deploy"
|
||||
do_image_wic[deptask] += "do_image_complete"
|
||||
|
||||
WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
|
||||
WKS_FILE_DEPENDS_DEFAULT += "bmaptool-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native erofs-utils-native"
|
||||
# Unified kernel images need objcopy
|
||||
WKS_FILE_DEPENDS_DEFAULT += "virtual/${TARGET_PREFIX}binutils"
|
||||
WKS_FILE_DEPENDS_BOOTLOADERS = ""
|
||||
WKS_FILE_DEPENDS_BOOTLOADERS:x86 = "syslinux grub-efi systemd-boot os-release"
|
||||
WKS_FILE_DEPENDS_BOOTLOADERS:x86-64 = "syslinux grub-efi systemd-boot os-release"
|
||||
WKS_FILE_DEPENDS_BOOTLOADERS:x86-x32 = "syslinux grub-efi"
|
||||
|
||||
WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}"
|
||||
|
||||
DEPENDS += "${@ '${WKS_FILE_DEPENDS}' if d.getVar('USING_WIC') else '' }"
|
||||
|
||||
python do_write_wks_template () {
|
||||
"""Write out expanded template contents to WKS_FULL_PATH."""
|
||||
import re
|
||||
|
||||
template_body = d.getVar('_WKS_TEMPLATE')
|
||||
|
||||
# Remove any remnant variable references left behind by the expansion
|
||||
# due to undefined variables
|
||||
expand_var_regexp = re.compile(r"\${[^{}@\n\t :]+}")
|
||||
while True:
|
||||
new_body = re.sub(expand_var_regexp, '', template_body)
|
||||
if new_body == template_body:
|
||||
break
|
||||
else:
|
||||
template_body = new_body
|
||||
|
||||
wks_file = d.getVar('WKS_FULL_PATH')
|
||||
with open(wks_file, 'w') as f:
|
||||
f.write(template_body)
|
||||
f.close()
|
||||
# Copy the finalized wks file to the deploy directory for later use
|
||||
depdir = d.getVar('IMGDEPLOYDIR')
|
||||
basename = d.getVar('IMAGE_BASENAME')
|
||||
bb.utils.copyfile(wks_file, "%s/%s" % (depdir, basename + '-' + os.path.basename(wks_file)))
|
||||
}
|
||||
|
||||
do_flush_pseudodb() {
|
||||
${FAKEROOTENV} ${FAKEROOTCMD} -S
|
||||
}
|
||||
|
||||
python () {
|
||||
if d.getVar('USING_WIC'):
|
||||
wks_file_u = d.getVar('WKS_FULL_PATH', False)
|
||||
wks_file = d.expand(wks_file_u)
|
||||
base, ext = os.path.splitext(wks_file)
|
||||
if ext == '.in' and os.path.exists(wks_file):
|
||||
wks_out_file = os.path.join(d.getVar('WORKDIR'), os.path.basename(base))
|
||||
d.setVar('WKS_FULL_PATH', wks_out_file)
|
||||
d.setVar('WKS_TEMPLATE_PATH', wks_file_u)
|
||||
d.setVar('WKS_FILE_CHECKSUM', '${WKS_TEMPLATE_PATH}:True')
|
||||
|
||||
# We need to re-parse each time the file changes, and bitbake
|
||||
# needs to be told about that explicitly.
|
||||
bb.parse.mark_dependency(d, wks_file)
|
||||
|
||||
try:
|
||||
with open(wks_file, 'r') as f:
|
||||
body = f.read()
|
||||
except (IOError, OSError) as exc:
|
||||
pass
|
||||
else:
|
||||
# Previously, I used expandWithRefs to get the dependency list
|
||||
# and add it to WICVARS, but there's no point re-parsing the
|
||||
# file in process_wks_template as well, so just put it in
|
||||
# a variable and let the metadata deal with the deps.
|
||||
d.setVar('_WKS_TEMPLATE', body)
|
||||
bb.build.addtask('do_write_wks_template', 'do_image_wic', 'do_image', d)
|
||||
bb.build.addtask('do_image_wic', 'do_image_complete', None, d)
|
||||
}
|
||||
|
||||
#
|
||||
# Write environment variables used by wic
|
||||
# to tmp/sysroots/<machine>/imgdata/<image>.env
|
||||
#
|
||||
python do_rootfs_wicenv () {
|
||||
wicvars = d.getVar('WICVARS')
|
||||
if not wicvars:
|
||||
return
|
||||
|
||||
stdir = d.getVar('STAGING_DIR')
|
||||
outdir = os.path.join(stdir, d.getVar('MACHINE'), 'imgdata')
|
||||
bb.utils.mkdirhier(outdir)
|
||||
basename = d.getVar('IMAGE_BASENAME')
|
||||
with open(os.path.join(outdir, basename) + '.env', 'w') as envf:
|
||||
for var in wicvars.split():
|
||||
value = d.getVar(var)
|
||||
if value:
|
||||
envf.write('%s="%s"\n' % (var, value.strip()))
|
||||
envf.close()
|
||||
# Copy .env file to deploy directory for later use with stand alone wic
|
||||
depdir = d.getVar('IMGDEPLOYDIR')
|
||||
bb.utils.copyfile(os.path.join(outdir, basename) + '.env', os.path.join(depdir, basename) + '.env')
|
||||
}
|
||||
addtask do_flush_pseudodb after do_rootfs before do_image do_image_qa
|
||||
addtask do_rootfs_wicenv after do_image before do_image_wic
|
||||
do_rootfs_wicenv[vardeps] += "${WICVARS}"
|
||||
do_rootfs_wicenv[prefuncs] = 'set_image_size'
|
||||
86
sources/poky/meta/classes-recipe/kernel-arch.bbclass
Normal file
86
sources/poky/meta/classes-recipe/kernel-arch.bbclass
Normal file
@@ -0,0 +1,86 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
#
|
||||
# set the ARCH environment variable for kernel compilation (including
|
||||
# modules). return value must match one of the architecture directories
|
||||
# in the kernel source "arch" directory
|
||||
#
|
||||
|
||||
valid_archs = "alpha cris ia64 \
|
||||
i386 x86 \
|
||||
m68knommu m68k ppc powerpc powerpc64 ppc64 \
|
||||
sparc sparc64 \
|
||||
arm aarch64 \
|
||||
m32r mips \
|
||||
sh sh64 um h8300 \
|
||||
parisc s390 v850 \
|
||||
avr32 blackfin \
|
||||
loongarch64 \
|
||||
microblaze \
|
||||
nios2 arc riscv xtensa"
|
||||
|
||||
def map_kernel_arch(a, d):
|
||||
import re
|
||||
|
||||
valid_archs = d.getVar('valid_archs').split()
|
||||
|
||||
if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
|
||||
elif re.match('arceb$', a): return 'arc'
|
||||
elif re.match('armeb$', a): return 'arm'
|
||||
elif re.match('aarch64$', a): return 'arm64'
|
||||
elif re.match('aarch64_be$', a): return 'arm64'
|
||||
elif re.match('aarch64_ilp32$', a): return 'arm64'
|
||||
elif re.match('aarch64_be_ilp32$', a): return 'arm64'
|
||||
elif re.match('loongarch(32|64|)$', a): return 'loongarch'
|
||||
elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a): return 'mips'
|
||||
elif re.match('mcf', a): return 'm68k'
|
||||
elif re.match('riscv(32|64|)(eb|)$', a): return 'riscv'
|
||||
elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
|
||||
elif re.match('sh(3|4)$', a): return 'sh'
|
||||
elif re.match('bfin', a): return 'blackfin'
|
||||
elif re.match('microblazee[bl]', a): return 'microblaze'
|
||||
elif a in valid_archs: return a
|
||||
else:
|
||||
if not d.getVar("TARGET_OS").startswith("linux"):
|
||||
return a
|
||||
bb.error("cannot map '%s' to a linux kernel architecture" % a)
|
||||
|
||||
export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH'), d)}"
|
||||
|
||||
def map_uboot_arch(a, d):
|
||||
import re
|
||||
|
||||
if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
|
||||
elif re.match('i.86$', a): return 'x86'
|
||||
return a
|
||||
|
||||
export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH'), d)}"
|
||||
|
||||
# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
|
||||
# specific options necessary for building the kernel and modules.
|
||||
TARGET_CC_KERNEL_ARCH ?= ""
|
||||
HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
|
||||
TARGET_LD_KERNEL_ARCH ?= ""
|
||||
HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
|
||||
TARGET_AR_KERNEL_ARCH ?= ""
|
||||
HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
|
||||
TARGET_OBJCOPY_KERNEL_ARCH ?= ""
|
||||
HOST_OBJCOPY_KERNEL_ARCH ?= "${TARGET_OBJCOPY_KERNEL_ARCH}"
|
||||
|
||||
KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} \
|
||||
-fuse-ld=bfd ${DEBUG_PREFIX_MAP} \
|
||||
-fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} \
|
||||
-fmacro-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} \
|
||||
-fdebug-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH} \
|
||||
-fmacro-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH} \
|
||||
"
|
||||
KERNEL_LD = "${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
|
||||
KERNEL_AR = "${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
|
||||
KERNEL_OBJCOPY = "${HOST_PREFIX}objcopy ${HOST_OBJCOPY_KERNEL_ARCH}"
|
||||
# Code in package.py can't handle options on KERNEL_STRIP
|
||||
KERNEL_STRIP = "${HOST_PREFIX}strip"
|
||||
TOOLCHAIN ?= "gcc"
|
||||
@@ -0,0 +1,37 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
##################################################################
|
||||
# Specific kernel creation info
|
||||
# for recipes/bbclasses which need to reuse some of the kernel
|
||||
# artifacts, but aren't kernel recipes themselves
|
||||
##################################################################
|
||||
|
||||
inherit image-artifact-names
|
||||
|
||||
KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}${IMAGE_MACHINE_SUFFIX}${IMAGE_VERSION_SUFFIX}"
|
||||
KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}"
|
||||
KERNEL_ARTIFACT_BIN_EXT ?= ".bin"
|
||||
|
||||
KERNEL_IMAGE_NAME ?= "${KERNEL_ARTIFACT_NAME}"
|
||||
KERNEL_IMAGE_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
|
||||
KERNEL_IMAGE_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
|
||||
KERNEL_IMAGETYPE_SYMLINK ?= "1"
|
||||
|
||||
KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}"
|
||||
KERNEL_DTB_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
|
||||
KERNEL_DTB_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
|
||||
|
||||
KERNEL_FIT_NAME ?= "${KERNEL_ARTIFACT_NAME}"
|
||||
KERNEL_FIT_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
|
||||
KERNEL_FIT_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
|
||||
|
||||
MODULE_TARBALL_NAME ?= "${KERNEL_ARTIFACT_NAME}"
|
||||
MODULE_TARBALL_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
|
||||
MODULE_TARBALL_DEPLOY ?= "1"
|
||||
|
||||
INITRAMFS_NAME ?= "initramfs-${KERNEL_ARTIFACT_NAME}"
|
||||
INITRAMFS_LINK_NAME ?= "initramfs-${KERNEL_ARTIFACT_LINK_NAME}"
|
||||
139
sources/poky/meta/classes-recipe/kernel-devicetree.bbclass
Normal file
139
sources/poky/meta/classes-recipe/kernel-devicetree.bbclass
Normal file
@@ -0,0 +1,139 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Support for device tree generation
|
||||
python () {
|
||||
if not bb.data.inherits_class('nopackages', d):
|
||||
d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-devicetree")
|
||||
if d.getVar('KERNEL_DEVICETREE_BUNDLE') == '1':
|
||||
d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle")
|
||||
}
|
||||
|
||||
# recursivly search for devicetree files
|
||||
FILES:${KERNEL_PACKAGE_NAME}-devicetree = " \
|
||||
/${KERNEL_DTBDEST}/**/*.dtb \
|
||||
/${KERNEL_DTBDEST}/**/*.dtbo \
|
||||
"
|
||||
|
||||
FILES:${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
|
||||
|
||||
# Generate kernel+devicetree bundle
|
||||
KERNEL_DEVICETREE_BUNDLE ?= "0"
|
||||
|
||||
# dtc flags passed via DTC_FLAGS env variable
|
||||
KERNEL_DTC_FLAGS ?= ""
|
||||
|
||||
normalize_dtb () {
|
||||
dtb="$1"
|
||||
if echo $dtb | grep -q '/dts/'; then
|
||||
bbwarn "$dtb contains the full path to the the dts file, but only the dtb name should be used."
|
||||
dtb=`basename $dtb | sed 's,\.dts$,.dtb,g'`
|
||||
fi
|
||||
echo "$dtb"
|
||||
}
|
||||
|
||||
get_real_dtb_path_in_kernel () {
|
||||
dtb="$1"
|
||||
dtb_path="${B}/arch/${ARCH}/boot/dts/$dtb"
|
||||
if [ ! -e "$dtb_path" ]; then
|
||||
dtb_path="${B}/arch/${ARCH}/boot/$dtb"
|
||||
fi
|
||||
echo "$dtb_path"
|
||||
}
|
||||
|
||||
do_configure:append() {
|
||||
if [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
|
||||
if echo ${KERNEL_IMAGETYPE_FOR_MAKE} | grep -q 'zImage'; then
|
||||
case "${ARCH}" in
|
||||
"arm")
|
||||
config="${B}/.config"
|
||||
if ! grep -q 'CONFIG_ARM_APPENDED_DTB=y' $config; then
|
||||
bbwarn 'CONFIG_ARM_APPENDED_DTB is NOT enabled in the kernel. Enabling it to allow the kernel to boot with the Device Tree appended!'
|
||||
sed -i "/CONFIG_ARM_APPENDED_DTB[ =]/d" $config
|
||||
echo "CONFIG_ARM_APPENDED_DTB=y" >> $config
|
||||
echo "# CONFIG_ARM_ATAG_DTB_COMPAT is not set" >> $config
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
bberror "KERNEL_DEVICETREE_BUNDLE is not supported for ${ARCH}. Currently it is only supported for 'ARM'."
|
||||
esac
|
||||
else
|
||||
bberror 'The KERNEL_DEVICETREE_BUNDLE requires the KERNEL_IMAGETYPE to contain zImage.'
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
do_compile:append() {
|
||||
if [ -n "${KERNEL_DTC_FLAGS}" ]; then
|
||||
export DTC_FLAGS="${KERNEL_DTC_FLAGS}"
|
||||
fi
|
||||
|
||||
for dtbf in ${KERNEL_DEVICETREE}; do
|
||||
dtb=`normalize_dtb "$dtbf"`
|
||||
oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" STRIP="${KERNEL_STRIP}" ${KERNEL_EXTRA_ARGS}
|
||||
done
|
||||
}
|
||||
|
||||
do_install:append() {
|
||||
install -d ${D}/${KERNEL_DTBDEST}
|
||||
for dtbf in ${KERNEL_DEVICETREE}; do
|
||||
dtb=`normalize_dtb "$dtbf"`
|
||||
dtb_path=`get_real_dtb_path_in_kernel "$dtb"`
|
||||
if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then
|
||||
dtb_ext=${dtb##*.}
|
||||
dtb_base_name=`basename $dtb .$dtb_ext`
|
||||
dtb=$dtb_base_name.$dtb_ext
|
||||
fi
|
||||
install -Dm 0644 $dtb_path ${D}/${KERNEL_DTBDEST}/$dtb
|
||||
done
|
||||
}
|
||||
|
||||
do_deploy:append() {
|
||||
for dtbf in ${KERNEL_DEVICETREE}; do
|
||||
dtb=`normalize_dtb "$dtbf"`
|
||||
dtb_ext=${dtb##*.}
|
||||
dtb_base_name=`basename $dtb .$dtb_ext`
|
||||
install -d $deployDir
|
||||
if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then
|
||||
dtb=$dtb_base_name.$dtb_ext
|
||||
fi
|
||||
install -m 0644 ${D}/${KERNEL_DTBDEST}/$dtb $deployDir/$dtb_base_name.$dtb_ext
|
||||
if [ -n "${KERNEL_DTB_NAME}" ] ; then
|
||||
ln -sf $dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
|
||||
fi
|
||||
if [ -n "${KERNEL_DTB_LINK_NAME}" ] ; then
|
||||
ln -sf $dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
|
||||
fi
|
||||
for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
|
||||
if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
|
||||
cat ${D}/${KERNEL_IMAGEDEST}/$type \
|
||||
$deployDir/$dtb_base_name.$dtb_ext \
|
||||
> $deployDir/$type-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT}
|
||||
if [ -n "${KERNEL_DTB_NAME}" ]; then
|
||||
ln -sf $type-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} \
|
||||
$deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
|
||||
fi
|
||||
if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
|
||||
ln -sf $type-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} \
|
||||
$deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
|
||||
fi
|
||||
if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
|
||||
cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
|
||||
$deployDir/$dtb_base_name.$dtb_ext \
|
||||
> $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT}
|
||||
if [ -n "${KERNEL_DTB_NAME}" ]; then
|
||||
ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} \
|
||||
$deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
|
||||
fi
|
||||
if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
|
||||
ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name.$dtb_ext${KERNEL_DTB_BIN_EXT} \
|
||||
$deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
830
sources/poky/meta/classes-recipe/kernel-fitimage.bbclass
Normal file
830
sources/poky/meta/classes-recipe/kernel-fitimage.bbclass
Normal file
@@ -0,0 +1,830 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit kernel-uboot kernel-artifact-names uboot-config
|
||||
require conf/image-fitimage.conf
|
||||
|
||||
def get_fit_replacement_type(d):
|
||||
kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
|
||||
replacementtype = ""
|
||||
if 'fitImage' in kerneltypes.split():
|
||||
uarch = d.getVar("UBOOT_ARCH")
|
||||
if uarch == "arm64":
|
||||
replacementtype = "Image"
|
||||
elif uarch == "riscv":
|
||||
replacementtype = "Image"
|
||||
elif uarch == "mips":
|
||||
replacementtype = "vmlinuz.bin"
|
||||
elif uarch == "x86":
|
||||
replacementtype = "bzImage"
|
||||
elif uarch == "microblaze":
|
||||
replacementtype = "linux.bin"
|
||||
else:
|
||||
replacementtype = "zImage"
|
||||
return replacementtype
|
||||
|
||||
KERNEL_IMAGETYPE_REPLACEMENT ?= "${@get_fit_replacement_type(d)}"
|
||||
DEPENDS:append = " ${@'u-boot-tools-native dtc-native' if 'fitImage' in (d.getVar('KERNEL_IMAGETYPES') or '').split() else ''}"
|
||||
|
||||
python __anonymous () {
|
||||
# Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
|
||||
# to kernel.bbclass . We have to override it, since we pack zImage
|
||||
# (at least for now) into the fitImage .
|
||||
typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
|
||||
if 'fitImage' in typeformake.split():
|
||||
d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('fitImage', d.getVar('KERNEL_IMAGETYPE_REPLACEMENT')))
|
||||
|
||||
image = d.getVar('INITRAMFS_IMAGE')
|
||||
if image:
|
||||
d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
|
||||
|
||||
ubootenv = d.getVar('UBOOT_ENV')
|
||||
if ubootenv:
|
||||
d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/bootloader:do_populate_sysroot')
|
||||
|
||||
#check if there are any dtb providers
|
||||
providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb")
|
||||
if providerdtb:
|
||||
d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/dtb:do_populate_sysroot')
|
||||
d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' virtual/dtb:do_populate_sysroot')
|
||||
d.setVar('EXTERNAL_KERNEL_DEVICETREE', "${RECIPE_SYSROOT}/boot/devicetree")
|
||||
}
|
||||
|
||||
#
|
||||
# Emit the fitImage ITS header
|
||||
#
|
||||
# $1 ... .its filename
|
||||
fitimage_emit_fit_header() {
|
||||
cat << EOF >> $1
|
||||
/dts-v1/;
|
||||
|
||||
/ {
|
||||
description = "${FIT_DESC}";
|
||||
#address-cells = <${FIT_ADDRESS_CELLS}>;
|
||||
EOF
|
||||
}
|
||||
|
||||
#
|
||||
# Emit the fitImage section bits
|
||||
#
|
||||
# $1 ... .its filename
|
||||
# $2 ... Section bit type: imagestart - image section start
|
||||
# confstart - configuration section start
|
||||
# sectend - section end
|
||||
# fitend - fitimage end
|
||||
#
|
||||
fitimage_emit_section_maint() {
|
||||
case $2 in
|
||||
imagestart)
|
||||
cat << EOF >> $1
|
||||
|
||||
images {
|
||||
EOF
|
||||
;;
|
||||
confstart)
|
||||
cat << EOF >> $1
|
||||
|
||||
configurations {
|
||||
EOF
|
||||
;;
|
||||
sectend)
|
||||
cat << EOF >> $1
|
||||
};
|
||||
EOF
|
||||
;;
|
||||
fitend)
|
||||
cat << EOF >> $1
|
||||
};
|
||||
EOF
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
#
|
||||
# Emit the fitImage ITS kernel section
|
||||
#
|
||||
# $1 ... .its filename
|
||||
# $2 ... Image counter
|
||||
# $3 ... Path to kernel image
|
||||
# $4 ... Compression type
|
||||
fitimage_emit_section_kernel() {
|
||||
|
||||
kernel_csum="${FIT_HASH_ALG}"
|
||||
kernel_sign_algo="${FIT_SIGN_ALG}"
|
||||
kernel_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
|
||||
|
||||
ENTRYPOINT="${UBOOT_ENTRYPOINT}"
|
||||
if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
|
||||
ENTRYPOINT=`${HOST_PREFIX}nm vmlinux | \
|
||||
awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
|
||||
fi
|
||||
|
||||
cat << EOF >> $1
|
||||
kernel-$2 {
|
||||
description = "Linux kernel";
|
||||
data = /incbin/("$3");
|
||||
type = "${UBOOT_MKIMAGE_KERNEL_TYPE}";
|
||||
arch = "${UBOOT_ARCH}";
|
||||
os = "linux";
|
||||
compression = "$4";
|
||||
load = <${UBOOT_LOADADDRESS}>;
|
||||
entry = <$ENTRYPOINT>;
|
||||
hash-1 {
|
||||
algo = "$kernel_csum";
|
||||
};
|
||||
};
|
||||
EOF
|
||||
|
||||
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$kernel_sign_keyname" ] ; then
|
||||
sed -i '$ d' $1
|
||||
cat << EOF >> $1
|
||||
signature-1 {
|
||||
algo = "$kernel_csum,$kernel_sign_algo";
|
||||
key-name-hint = "$kernel_sign_keyname";
|
||||
};
|
||||
};
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Emit the fitImage ITS DTB section
|
||||
#
|
||||
# $1 ... .its filename
|
||||
# $2 ... Image counter
|
||||
# $3 ... Path to DTB image
|
||||
fitimage_emit_section_dtb() {
|
||||
|
||||
dtb_csum="${FIT_HASH_ALG}"
|
||||
dtb_sign_algo="${FIT_SIGN_ALG}"
|
||||
dtb_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
|
||||
|
||||
dtb_loadline=""
|
||||
dtb_ext=${DTB##*.}
|
||||
if [ "${dtb_ext}" = "dtbo" ]; then
|
||||
if [ -n "${UBOOT_DTBO_LOADADDRESS}" ]; then
|
||||
dtb_loadline="load = <${UBOOT_DTBO_LOADADDRESS}>;"
|
||||
fi
|
||||
elif [ -n "${UBOOT_DTB_LOADADDRESS}" ]; then
|
||||
dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
|
||||
fi
|
||||
cat << EOF >> $1
|
||||
fdt-$2 {
|
||||
description = "Flattened Device Tree blob";
|
||||
data = /incbin/("$3");
|
||||
type = "flat_dt";
|
||||
arch = "${UBOOT_ARCH}";
|
||||
compression = "none";
|
||||
$dtb_loadline
|
||||
hash-1 {
|
||||
algo = "$dtb_csum";
|
||||
};
|
||||
};
|
||||
EOF
|
||||
|
||||
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$dtb_sign_keyname" ] ; then
|
||||
sed -i '$ d' $1
|
||||
cat << EOF >> $1
|
||||
signature-1 {
|
||||
algo = "$dtb_csum,$dtb_sign_algo";
|
||||
key-name-hint = "$dtb_sign_keyname";
|
||||
};
|
||||
};
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Emit the fitImage ITS u-boot script section
|
||||
#
|
||||
# $1 ... .its filename
|
||||
# $2 ... Image counter
|
||||
# $3 ... Path to boot script image
|
||||
fitimage_emit_section_boot_script() {
|
||||
|
||||
bootscr_csum="${FIT_HASH_ALG}"
|
||||
bootscr_sign_algo="${FIT_SIGN_ALG}"
|
||||
bootscr_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
|
||||
|
||||
cat << EOF >> $1
|
||||
bootscr-$2 {
|
||||
description = "U-boot script";
|
||||
data = /incbin/("$3");
|
||||
type = "script";
|
||||
arch = "${UBOOT_ARCH}";
|
||||
compression = "none";
|
||||
hash-1 {
|
||||
algo = "$bootscr_csum";
|
||||
};
|
||||
};
|
||||
EOF
|
||||
|
||||
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$bootscr_sign_keyname" ] ; then
|
||||
sed -i '$ d' $1
|
||||
cat << EOF >> $1
|
||||
signature-1 {
|
||||
algo = "$bootscr_csum,$bootscr_sign_algo";
|
||||
key-name-hint = "$bootscr_sign_keyname";
|
||||
};
|
||||
};
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# Emit the fitImage ITS setup section
|
||||
#
|
||||
# $1 ... .its filename
|
||||
# $2 ... Image counter
|
||||
# $3 ... Path to setup image
|
||||
fitimage_emit_section_setup() {
|
||||
|
||||
setup_csum="${FIT_HASH_ALG}"
|
||||
|
||||
cat << EOF >> $1
|
||||
setup-$2 {
|
||||
description = "Linux setup.bin";
|
||||
data = /incbin/("$3");
|
||||
type = "x86_setup";
|
||||
arch = "${UBOOT_ARCH}";
|
||||
os = "linux";
|
||||
compression = "none";
|
||||
load = <0x00090000>;
|
||||
entry = <0x00090000>;
|
||||
hash-1 {
|
||||
algo = "$setup_csum";
|
||||
};
|
||||
};
|
||||
EOF
|
||||
}
|
||||
|
||||
#
|
||||
# Emit the fitImage ITS ramdisk section
|
||||
#
|
||||
# $1 ... .its filename
|
||||
# $2 ... Image counter
|
||||
# $3 ... Path to ramdisk image
|
||||
fitimage_emit_section_ramdisk() {
|
||||
|
||||
ramdisk_csum="${FIT_HASH_ALG}"
|
||||
ramdisk_sign_algo="${FIT_SIGN_ALG}"
|
||||
ramdisk_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
|
||||
ramdisk_loadline=""
|
||||
ramdisk_entryline=""
|
||||
|
||||
if [ -n "${UBOOT_RD_LOADADDRESS}" ]; then
|
||||
ramdisk_loadline="load = <${UBOOT_RD_LOADADDRESS}>;"
|
||||
fi
|
||||
if [ -n "${UBOOT_RD_ENTRYPOINT}" ]; then
|
||||
ramdisk_entryline="entry = <${UBOOT_RD_ENTRYPOINT}>;"
|
||||
fi
|
||||
|
||||
cat << EOF >> $1
|
||||
ramdisk-$2 {
|
||||
description = "${INITRAMFS_IMAGE}";
|
||||
data = /incbin/("$3");
|
||||
type = "ramdisk";
|
||||
arch = "${UBOOT_ARCH}";
|
||||
os = "linux";
|
||||
compression = "none";
|
||||
$ramdisk_loadline
|
||||
$ramdisk_entryline
|
||||
hash-1 {
|
||||
algo = "$ramdisk_csum";
|
||||
};
|
||||
};
|
||||
EOF
|
||||
|
||||
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$ramdisk_sign_keyname" ] ; then
|
||||
sed -i '$ d' $1
|
||||
cat << EOF >> $1
|
||||
signature-1 {
|
||||
algo = "$ramdisk_csum,$ramdisk_sign_algo";
|
||||
key-name-hint = "$ramdisk_sign_keyname";
|
||||
};
|
||||
};
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
#
|
||||
# echoes symlink destination if it points below directory
|
||||
#
|
||||
# $1 ... file that's a potential symlink
|
||||
# $2 ... expected parent directory
|
||||
symlink_points_below() {
|
||||
file="$2/$1"
|
||||
dir=$2
|
||||
|
||||
if ! [ -L "$file" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
realpath="$(realpath --relative-to=$dir $file)"
|
||||
if [ -z "${realpath%%../*}" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
echo "$realpath"
|
||||
}
|
||||
|
||||
#
|
||||
# Emit the fitImage ITS configuration section
|
||||
#
|
||||
# $1 ... .its filename
|
||||
# $2 ... Linux kernel ID
|
||||
# $3 ... DTB image name
|
||||
# $4 ... ramdisk ID
|
||||
# $5 ... u-boot script ID
|
||||
# $6 ... config ID
|
||||
# $7 ... default flag
|
||||
# $8 ... default DTB image name
|
||||
fitimage_emit_section_config() {
|
||||
|
||||
conf_csum="${FIT_HASH_ALG}"
|
||||
conf_sign_algo="${FIT_SIGN_ALG}"
|
||||
conf_padding_algo="${FIT_PAD_ALG}"
|
||||
if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
|
||||
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
|
||||
fi
|
||||
|
||||
its_file="$1"
|
||||
kernel_id="$2"
|
||||
dtb_image="$3"
|
||||
ramdisk_id="$4"
|
||||
bootscr_id="$5"
|
||||
config_id="$6"
|
||||
default_flag="$7"
|
||||
default_dtb_image="$8"
|
||||
|
||||
# Test if we have any DTBs at all
|
||||
sep=""
|
||||
conf_desc=""
|
||||
conf_node="${FIT_CONF_PREFIX}"
|
||||
kernel_line=""
|
||||
fdt_line=""
|
||||
ramdisk_line=""
|
||||
bootscr_line=""
|
||||
setup_line=""
|
||||
default_line=""
|
||||
compatible_line=""
|
||||
|
||||
dtb_image_sect=$(symlink_points_below $dtb_image "${EXTERNAL_KERNEL_DEVICETREE}")
|
||||
if [ -z "$dtb_image_sect" ]; then
|
||||
dtb_image_sect=$dtb_image
|
||||
fi
|
||||
|
||||
dtb_path="${EXTERNAL_KERNEL_DEVICETREE}/${dtb_image_sect}"
|
||||
if [ -f "$dtb_path" ] || [ -L "$dtb_path" ]; then
|
||||
compat=$(fdtget -t s "$dtb_path" / compatible | sed 's/ /", "/g')
|
||||
if [ -n "$compat" ]; then
|
||||
compatible_line="compatible = \"$compat\";"
|
||||
fi
|
||||
fi
|
||||
|
||||
dtb_image=$(echo $dtb_image | tr '/' '_')
|
||||
dtb_image_sect=$(echo "${dtb_image_sect}" | tr '/' '_')
|
||||
|
||||
# conf node name is selected based on dtb ID if it is present,
|
||||
# otherwise its selected based on kernel ID
|
||||
if [ -n "$dtb_image" ]; then
|
||||
conf_node=$conf_node$dtb_image
|
||||
else
|
||||
conf_node=$conf_node$kernel_id
|
||||
fi
|
||||
|
||||
if [ -n "$kernel_id" ]; then
|
||||
conf_desc="Linux kernel"
|
||||
sep=", "
|
||||
kernel_line="kernel = \"kernel-$kernel_id\";"
|
||||
fi
|
||||
|
||||
if [ -n "$dtb_image" ]; then
|
||||
conf_desc="$conf_desc${sep}FDT blob"
|
||||
sep=", "
|
||||
fdt_line="fdt = \"fdt-$dtb_image_sect\";"
|
||||
fi
|
||||
|
||||
if [ -n "$ramdisk_id" ]; then
|
||||
conf_desc="$conf_desc${sep}ramdisk"
|
||||
sep=", "
|
||||
ramdisk_line="ramdisk = \"ramdisk-$ramdisk_id\";"
|
||||
fi
|
||||
|
||||
if [ -n "$bootscr_id" ]; then
|
||||
conf_desc="$conf_desc${sep}u-boot script"
|
||||
sep=", "
|
||||
bootscr_line="bootscr = \"bootscr-$bootscr_id\";"
|
||||
fi
|
||||
|
||||
if [ -n "$config_id" ]; then
|
||||
conf_desc="$conf_desc${sep}setup"
|
||||
setup_line="setup = \"setup-$config_id\";"
|
||||
fi
|
||||
|
||||
if [ "$default_flag" = "1" ]; then
|
||||
# default node is selected based on dtb ID if it is present,
|
||||
# otherwise its selected based on kernel ID
|
||||
if [ -n "$dtb_image" ]; then
|
||||
# Select default node as user specified dtb when
|
||||
# multiple dtb exists.
|
||||
if [ -n "$default_dtb_image" ]; then
|
||||
default_line="default = \"${FIT_CONF_PREFIX}$default_dtb_image\";"
|
||||
else
|
||||
default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
|
||||
fi
|
||||
else
|
||||
default_line="default = \"${FIT_CONF_PREFIX}$kernel_id\";"
|
||||
fi
|
||||
fi
|
||||
|
||||
cat << EOF >> $its_file
|
||||
$default_line
|
||||
$conf_node {
|
||||
description = "$default_flag $conf_desc";
|
||||
$compatible_line
|
||||
$kernel_line
|
||||
$fdt_line
|
||||
$ramdisk_line
|
||||
$bootscr_line
|
||||
$setup_line
|
||||
hash-1 {
|
||||
algo = "$conf_csum";
|
||||
};
|
||||
EOF
|
||||
|
||||
if [ -n "$conf_sign_keyname" ] ; then
|
||||
|
||||
sign_line="sign-images = "
|
||||
sep=""
|
||||
|
||||
if [ -n "$kernel_id" ]; then
|
||||
sign_line="$sign_line${sep}\"kernel\""
|
||||
sep=", "
|
||||
fi
|
||||
|
||||
if [ -n "$dtb_image" ]; then
|
||||
sign_line="$sign_line${sep}\"fdt\""
|
||||
sep=", "
|
||||
fi
|
||||
|
||||
if [ -n "$ramdisk_id" ]; then
|
||||
sign_line="$sign_line${sep}\"ramdisk\""
|
||||
sep=", "
|
||||
fi
|
||||
|
||||
if [ -n "$bootscr_id" ]; then
|
||||
sign_line="$sign_line${sep}\"bootscr\""
|
||||
sep=", "
|
||||
fi
|
||||
|
||||
if [ -n "$config_id" ]; then
|
||||
sign_line="$sign_line${sep}\"setup\""
|
||||
fi
|
||||
|
||||
sign_line="$sign_line;"
|
||||
|
||||
cat << EOF >> $its_file
|
||||
signature-1 {
|
||||
algo = "$conf_csum,$conf_sign_algo";
|
||||
key-name-hint = "$conf_sign_keyname";
|
||||
padding = "$conf_padding_algo";
|
||||
$sign_line
|
||||
};
|
||||
EOF
|
||||
fi
|
||||
|
||||
cat << EOF >> $its_file
|
||||
};
|
||||
EOF
|
||||
}
|
||||
|
||||
#
|
||||
# Assemble fitImage
|
||||
#
|
||||
# $1 ... .its filename
|
||||
# $2 ... fitImage name
|
||||
# $3 ... include ramdisk
|
||||
fitimage_assemble() {
|
||||
kernelcount=1
|
||||
dtbcount=""
|
||||
DTBS=""
|
||||
ramdiskcount=$3
|
||||
setupcount=""
|
||||
bootscr_id=""
|
||||
default_dtb_image=""
|
||||
rm -f $1 arch/${ARCH}/boot/$2
|
||||
|
||||
if [ -n "${UBOOT_SIGN_IMG_KEYNAME}" -a "${UBOOT_SIGN_KEYNAME}" = "${UBOOT_SIGN_IMG_KEYNAME}" ]; then
|
||||
bbfatal "Keys used to sign images and configuration nodes must be different."
|
||||
fi
|
||||
|
||||
fitimage_emit_fit_header $1
|
||||
|
||||
#
|
||||
# Step 1: Prepare a kernel image section.
|
||||
#
|
||||
fitimage_emit_section_maint $1 imagestart
|
||||
|
||||
uboot_prep_kimage
|
||||
fitimage_emit_section_kernel $1 $kernelcount linux.bin "$linux_comp"
|
||||
|
||||
#
|
||||
# Step 2: Prepare a DTB image section
|
||||
#
|
||||
|
||||
if [ -n "${KERNEL_DEVICETREE}" ]; then
|
||||
dtbcount=1
|
||||
for DTB in ${KERNEL_DEVICETREE}; do
|
||||
if echo $DTB | grep -q '/dts/'; then
|
||||
bbwarn "$DTB contains the full path to the the dts file, but only the dtb name should be used."
|
||||
DTB=`basename $DTB | sed 's,\.dts$,.dtb,g'`
|
||||
fi
|
||||
|
||||
# Skip ${DTB} if it's also provided in ${EXTERNAL_KERNEL_DEVICETREE}
|
||||
if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ] && [ -s ${EXTERNAL_KERNEL_DEVICETREE}/${DTB} ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
DTB_PATH="${KERNEL_OUTPUT_DIR}/dts/$DTB"
|
||||
if [ ! -e "$DTB_PATH" ]; then
|
||||
DTB_PATH="${KERNEL_OUTPUT_DIR}/$DTB"
|
||||
fi
|
||||
|
||||
# Strip off the path component from the filename
|
||||
if "${@'false' if oe.types.boolean(d.getVar('KERNEL_DTBVENDORED')) else 'true'}"; then
|
||||
DTB=`basename $DTB`
|
||||
fi
|
||||
|
||||
# Set the default dtb image if it exists in the devicetree.
|
||||
if [ ${FIT_CONF_DEFAULT_DTB} = $DTB ];then
|
||||
default_dtb_image=$(echo "$DTB" | tr '/' '_')
|
||||
fi
|
||||
|
||||
DTB=$(echo "$DTB" | tr '/' '_')
|
||||
|
||||
# Skip DTB if we've picked it up previously
|
||||
echo "$DTBS" | tr ' ' '\n' | grep -xq "$DTB" && continue
|
||||
|
||||
DTBS="$DTBS $DTB"
|
||||
DTB=$(echo $DTB | tr '/' '_')
|
||||
fitimage_emit_section_dtb $1 $DTB $DTB_PATH
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
|
||||
dtbcount=1
|
||||
for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" -name '*.dtb' -printf '%P\n' | sort) \
|
||||
$(find "${EXTERNAL_KERNEL_DEVICETREE}" -name '*.dtbo' -printf '%P\n' | sort); do
|
||||
# Set the default dtb image if it exists in the devicetree.
|
||||
if [ ${FIT_CONF_DEFAULT_DTB} = $DTB ];then
|
||||
default_dtb_image=$(echo "$DTB" | tr '/' '_')
|
||||
fi
|
||||
|
||||
DTB=$(echo "$DTB" | tr '/' '_')
|
||||
|
||||
# Skip DTB/DTBO if we've picked it up previously
|
||||
echo "$DTBS" | tr ' ' '\n' | grep -xq "$DTB" && continue
|
||||
|
||||
DTBS="$DTBS $DTB"
|
||||
|
||||
# Also skip if a symlink. We'll later have each config section point at it
|
||||
[ $(symlink_points_below $DTB "${EXTERNAL_KERNEL_DEVICETREE}") ] && continue
|
||||
|
||||
DTB=$(echo $DTB | tr '/' '_')
|
||||
fitimage_emit_section_dtb $1 $DTB "${EXTERNAL_KERNEL_DEVICETREE}/$DTB"
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -n "${FIT_CONF_DEFAULT_DTB}" ] && [ -z $default_dtb_image ]; then
|
||||
bbwarn "${FIT_CONF_DEFAULT_DTB} is not available in the list of device trees."
|
||||
fi
|
||||
|
||||
#
|
||||
# Step 3: Prepare a u-boot script section
|
||||
#
|
||||
|
||||
if [ -n "${UBOOT_ENV}" ] && [ -d "${STAGING_DIR_HOST}/boot" ]; then
|
||||
if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then
|
||||
cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B}
|
||||
bootscr_id="${UBOOT_ENV_BINARY}"
|
||||
fitimage_emit_section_boot_script $1 "$bootscr_id" ${UBOOT_ENV_BINARY}
|
||||
else
|
||||
bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found."
|
||||
fi
|
||||
fi
|
||||
|
||||
#
|
||||
# Step 4: Prepare a setup section. (For x86)
|
||||
#
|
||||
if [ -e ${KERNEL_OUTPUT_DIR}/setup.bin ]; then
|
||||
setupcount=1
|
||||
fitimage_emit_section_setup $1 $setupcount ${KERNEL_OUTPUT_DIR}/setup.bin
|
||||
fi
|
||||
|
||||
#
|
||||
# Step 5: Prepare a ramdisk section.
|
||||
#
|
||||
if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
|
||||
# Find and use the first initramfs image archive type we find
|
||||
found=
|
||||
for img in ${FIT_SUPPORTED_INITRAMFS_FSTYPES}; do
|
||||
initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img"
|
||||
if [ -e "$initramfs_path" ]; then
|
||||
bbnote "Found initramfs image: $initramfs_path"
|
||||
found=true
|
||||
fitimage_emit_section_ramdisk $1 "$ramdiskcount" "$initramfs_path"
|
||||
break
|
||||
else
|
||||
bbnote "Did not find initramfs image: $initramfs_path"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$found" ]; then
|
||||
bbfatal "Could not find a valid initramfs type for ${INITRAMFS_IMAGE_NAME}, the supported types are: ${FIT_SUPPORTED_INITRAMFS_FSTYPES}"
|
||||
fi
|
||||
fi
|
||||
|
||||
fitimage_emit_section_maint $1 sectend
|
||||
|
||||
# Force the first Kernel and DTB in the default config
|
||||
kernelcount=1
|
||||
if [ -n "$dtbcount" ]; then
|
||||
dtbcount=1
|
||||
fi
|
||||
|
||||
#
|
||||
# Step 6: Prepare a configurations section
|
||||
#
|
||||
fitimage_emit_section_maint $1 confstart
|
||||
|
||||
# kernel-fitimage.bbclass currently only supports a single kernel (no less or
|
||||
# more) to be added to the FIT image along with 0 or more device trees and
|
||||
# 0 or 1 ramdisk.
|
||||
# It is also possible to include an initramfs bundle (kernel and rootfs in one binary)
|
||||
# When the initramfs bundle is used ramdisk is disabled.
|
||||
# If a device tree is to be part of the FIT image, then select
|
||||
# the default configuration to be used is based on the dtbcount. If there is
|
||||
# no dtb present than select the default configuation to be based on
|
||||
# the kernelcount.
|
||||
if [ -n "$DTBS" ]; then
|
||||
i=1
|
||||
for DTB in ${DTBS}; do
|
||||
dtb_ext=${DTB##*.}
|
||||
if [ "$dtb_ext" = "dtbo" ]; then
|
||||
fitimage_emit_section_config $1 "" "$DTB" "" "$bootscr_id" "" "`expr $i = $dtbcount`" "$default_dtb_image"
|
||||
else
|
||||
fitimage_emit_section_config $1 $kernelcount "$DTB" "$ramdiskcount" "$bootscr_id" "$setupcount" "`expr $i = $dtbcount`" "$default_dtb_image"
|
||||
fi
|
||||
i=`expr $i + 1`
|
||||
done
|
||||
else
|
||||
defaultconfigcount=1
|
||||
fitimage_emit_section_config $1 $kernelcount "" "$ramdiskcount" "$bootscr_id" "$setupcount" $defaultconfigcount "$default_dtb_image"
|
||||
fi
|
||||
|
||||
fitimage_emit_section_maint $1 sectend
|
||||
|
||||
fitimage_emit_section_maint $1 fitend
|
||||
|
||||
#
|
||||
# Step 7: Assemble the image
|
||||
#
|
||||
${UBOOT_MKIMAGE} \
|
||||
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
|
||||
-f $1 \
|
||||
${KERNEL_OUTPUT_DIR}/$2
|
||||
|
||||
#
|
||||
# Step 8: Sign the image
|
||||
#
|
||||
if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
|
||||
${UBOOT_MKIMAGE_SIGN} \
|
||||
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
|
||||
-F -k "${UBOOT_SIGN_KEYDIR}" \
|
||||
-r ${KERNEL_OUTPUT_DIR}/$2 \
|
||||
${UBOOT_MKIMAGE_SIGN_ARGS}
|
||||
fi
|
||||
}
|
||||
|
||||
do_assemble_fitimage() {
|
||||
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
|
||||
cd ${B}
|
||||
fitimage_assemble fit-image.its fitImage-none ""
|
||||
if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
|
||||
ln -sf fitImage-none ${B}/${KERNEL_OUTPUT_DIR}/fitImage
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
addtask assemble_fitimage before do_install after do_compile
|
||||
|
||||
SYSROOT_DIRS:append = " /sysroot-only"
|
||||
do_install:append() {
|
||||
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
|
||||
[ "${UBOOT_SIGN_ENABLE}" = "1" ]; then
|
||||
install -D ${B}/${KERNEL_OUTPUT_DIR}/fitImage-none ${D}/sysroot-only/fitImage
|
||||
fi
|
||||
}
|
||||
|
||||
do_assemble_fitimage_initramfs() {
|
||||
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
|
||||
test -n "${INITRAMFS_IMAGE}" ; then
|
||||
cd ${B}
|
||||
if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
|
||||
fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-bundle ""
|
||||
ln -sf fitImage-bundle ${B}/${KERNEL_OUTPUT_DIR}/fitImage
|
||||
else
|
||||
fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
addtask assemble_fitimage_initramfs before do_deploy after do_bundle_initramfs
|
||||
|
||||
do_kernel_generate_rsa_keys() {
|
||||
if [ "${UBOOT_SIGN_ENABLE}" = "0" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
|
||||
bbwarn "FIT_GENERATE_KEYS is set to 1 even though UBOOT_SIGN_ENABLE is set to 0. The keys will not be generated as they won't be used."
|
||||
fi
|
||||
|
||||
if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
|
||||
|
||||
# Generate keys to sign configuration nodes, only if they don't already exist
|
||||
if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key ] || \
|
||||
[ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt ]; then
|
||||
|
||||
# make directory if it does not already exist
|
||||
mkdir -p "${UBOOT_SIGN_KEYDIR}"
|
||||
|
||||
bbnote "Generating RSA private key for signing fitImage"
|
||||
openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
|
||||
"${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
|
||||
"${FIT_SIGN_NUMBITS}"
|
||||
|
||||
bbnote "Generating certificate for signing fitImage"
|
||||
openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
|
||||
-key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
|
||||
-out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt
|
||||
fi
|
||||
|
||||
# Generate keys to sign image nodes, only if they don't already exist
|
||||
if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key ] || \
|
||||
[ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt ]; then
|
||||
|
||||
# make directory if it does not already exist
|
||||
mkdir -p "${UBOOT_SIGN_KEYDIR}"
|
||||
|
||||
bbnote "Generating RSA private key for signing fitImage"
|
||||
openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
|
||||
"${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
|
||||
"${FIT_SIGN_NUMBITS}"
|
||||
|
||||
bbnote "Generating certificate for signing fitImage"
|
||||
openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
|
||||
-key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
|
||||
-out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
addtask kernel_generate_rsa_keys before do_assemble_fitimage after do_compile
|
||||
|
||||
kernel_do_deploy[vardepsexclude] = "DATETIME"
|
||||
kernel_do_deploy:append() {
|
||||
# Update deploy directory
|
||||
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
|
||||
|
||||
if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
|
||||
bbnote "Copying fit-image.its source file..."
|
||||
install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
|
||||
if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
|
||||
ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
|
||||
fi
|
||||
|
||||
bbnote "Copying linux.bin file..."
|
||||
install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}
|
||||
if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
|
||||
ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "${INITRAMFS_IMAGE}" ]; then
|
||||
bbnote "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
|
||||
install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
|
||||
if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
|
||||
ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
|
||||
fi
|
||||
|
||||
if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
|
||||
bbnote "Copying fitImage-${INITRAMFS_IMAGE} file..."
|
||||
install -m 0644 ${B}/${KERNEL_OUTPUT_DIR}/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}"
|
||||
if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
|
||||
ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
111
sources/poky/meta/classes-recipe/kernel-grub.bbclass
Normal file
111
sources/poky/meta/classes-recipe/kernel-grub.bbclass
Normal file
@@ -0,0 +1,111 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
#
|
||||
# While installing a rpm to update kernel on a deployed target, it will update
|
||||
# the boot area and the boot menu with the kernel as the priority but allow
|
||||
# you to fall back to the original kernel as well.
|
||||
#
|
||||
# - In kernel-image's preinstall scriptlet, it backs up original kernel to avoid
|
||||
# probable confliction with the new one.
|
||||
#
|
||||
# - In kernel-image's postinstall scriptlet, it modifies grub's config file to
|
||||
# updates the new kernel as the boot priority.
|
||||
#
|
||||
|
||||
python __anonymous () {
|
||||
import re
|
||||
|
||||
preinst = '''
|
||||
# Parsing confliction
|
||||
[ -f "$D/boot/grub/menu.list" ] && grubcfg="$D/boot/grub/menu.list"
|
||||
[ -f "$D/boot/grub/grub.cfg" ] && grubcfg="$D/boot/grub/grub.cfg"
|
||||
if [ -n "$grubcfg" ]; then
|
||||
# Dereference symlink to avoid confliction with new kernel name.
|
||||
if grep -q "/KERNEL_IMAGETYPE \+root=" $grubcfg; then
|
||||
if [ -L "$D/boot/KERNEL_IMAGETYPE" ]; then
|
||||
kimage=`realpath $D/boot/KERNEL_IMAGETYPE 2>/dev/null`
|
||||
if [ -f "$D$kimage" ]; then
|
||||
sed -i "s:KERNEL_IMAGETYPE \+root=:${kimage##*/} root=:" $grubcfg
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Rename old kernel if it conflicts with new kernel name.
|
||||
if grep -q "/KERNEL_IMAGETYPE-${KERNEL_VERSION} \+root=" $grubcfg; then
|
||||
if [ -f "$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}" ]; then
|
||||
timestamp=`date +%s`
|
||||
kimage="$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}-$timestamp-back"
|
||||
sed -i "s:KERNEL_IMAGETYPE-${KERNEL_VERSION} \+root=:${kimage##*/} root=:" $grubcfg
|
||||
mv "$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}" "$kimage"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
'''
|
||||
|
||||
postinst = '''
|
||||
get_new_grub_cfg() {
|
||||
grubcfg="$1"
|
||||
old_image="$2"
|
||||
title="Update KERNEL_IMAGETYPE-${KERNEL_VERSION}-${PV}"
|
||||
if [ "${grubcfg##*/}" = "grub.cfg" ]; then
|
||||
rootfs=`grep " *linux \+[^ ]\+ \+root=" $grubcfg -m 1 | \
|
||||
sed "s#${old_image}#${old_image%/*}/KERNEL_IMAGETYPE-${KERNEL_VERSION}#"`
|
||||
|
||||
echo "menuentry \"$title\" {"
|
||||
echo " set root=(hd0,1)"
|
||||
echo "$rootfs"
|
||||
echo "}"
|
||||
elif [ "${grubcfg##*/}" = "menu.list" ]; then
|
||||
rootfs=`grep "kernel \+[^ ]\+ \+root=" $grubcfg -m 1 | \
|
||||
sed "s#${old_image}#${old_image%/*}/KERNEL_IMAGETYPE-${KERNEL_VERSION}#"`
|
||||
|
||||
echo "default 0"
|
||||
echo "timeout 30"
|
||||
echo "title $title"
|
||||
echo "root (hd0,0)"
|
||||
echo "$rootfs"
|
||||
fi
|
||||
}
|
||||
|
||||
get_old_grub_cfg() {
|
||||
grubcfg="$1"
|
||||
if [ "${grubcfg##*/}" = "grub.cfg" ]; then
|
||||
cat "$grubcfg"
|
||||
elif [ "${grubcfg##*/}" = "menu.list" ]; then
|
||||
sed -e '/^default/d' -e '/^timeout/d' "$grubcfg"
|
||||
fi
|
||||
}
|
||||
|
||||
if [ -f "$D/boot/grub/grub.cfg" ]; then
|
||||
grubcfg="$D/boot/grub/grub.cfg"
|
||||
old_image=`grep ' *linux \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
|
||||
elif [ -f "$D/boot/grub/menu.list" ]; then
|
||||
grubcfg="$D/boot/grub/menu.list"
|
||||
old_image=`grep '^kernel \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
|
||||
fi
|
||||
|
||||
# Don't update grubcfg at first install while old bzImage doesn't exist.
|
||||
if [ -f "$D/boot/${old_image##*/}" ]; then
|
||||
grubcfgtmp="$grubcfg.tmp"
|
||||
get_new_grub_cfg "$grubcfg" "$old_image" > $grubcfgtmp
|
||||
get_old_grub_cfg "$grubcfg" >> $grubcfgtmp
|
||||
mv $grubcfgtmp $grubcfg
|
||||
echo "Caution! Update kernel may affect kernel-module!"
|
||||
fi
|
||||
'''
|
||||
|
||||
imagetypes = d.getVar('KERNEL_IMAGETYPES')
|
||||
imagetypes = re.sub(r'\.gz$', '', imagetypes)
|
||||
|
||||
for type in imagetypes.split():
|
||||
typelower = type.lower()
|
||||
preinst_append = preinst.replace('KERNEL_IMAGETYPE', type)
|
||||
postinst_prepend = postinst.replace('KERNEL_IMAGETYPE', type)
|
||||
d.setVar('pkg_preinst:kernel-image-' + typelower + ':append', preinst_append)
|
||||
d.setVar('pkg_postinst:kernel-image-' + typelower + ':prepend', postinst_prepend)
|
||||
}
|
||||
|
||||
188
sources/poky/meta/classes-recipe/kernel-module-split.bbclass
Normal file
188
sources/poky/meta/classes-recipe/kernel-module-split.bbclass
Normal file
@@ -0,0 +1,188 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
pkg_postinst:modules () {
|
||||
if [ -z "$D" ]; then
|
||||
depmod -a ${KERNEL_VERSION}
|
||||
else
|
||||
# image.bbclass will call depmodwrapper after everything is installed,
|
||||
# no need to do it here as well
|
||||
:
|
||||
fi
|
||||
}
|
||||
|
||||
pkg_postrm:modules () {
|
||||
if [ -z "$D" ]; then
|
||||
depmod -a ${KERNEL_VERSION}
|
||||
else
|
||||
depmodwrapper -a -b $D ${KERNEL_VERSION} ${KERNEL_PACKAGE_NAME}
|
||||
fi
|
||||
}
|
||||
|
||||
autoload_postinst_fragment() {
|
||||
if [ x"$D" = "x" ]; then
|
||||
modprobe %s || true
|
||||
fi
|
||||
}
|
||||
|
||||
PACKAGE_WRITE_DEPS += "kmod-native depmodwrapper-cross"
|
||||
|
||||
modulesloaddir ??= "${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '${nonarch_libdir}', '${sysconfdir}', d)}/modules-load.d"
|
||||
modprobedir ??= "${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '${nonarch_base_libdir}', '${sysconfdir}', d)}/modprobe.d"
|
||||
|
||||
KERNEL_SPLIT_MODULES ?= "1"
|
||||
PACKAGESPLITFUNCS =+ "split_kernel_module_packages"
|
||||
|
||||
KERNEL_MODULES_META_PACKAGE ?= "${@ d.getVar("KERNEL_PACKAGE_NAME") or "kernel" }-modules"
|
||||
|
||||
KERNEL_MODULE_PACKAGE_PREFIX ?= ""
|
||||
KERNEL_MODULE_PACKAGE_SUFFIX ?= "-${KERNEL_VERSION}"
|
||||
KERNEL_MODULE_PROVIDE_VIRTUAL ?= "1"
|
||||
|
||||
python split_kernel_module_packages () {
|
||||
import re
|
||||
|
||||
modinfoexp = re.compile("([^=]+)=(.*)")
|
||||
|
||||
def extract_modinfo(file):
|
||||
import tempfile, subprocess
|
||||
tempfile.tempdir = d.getVar("WORKDIR")
|
||||
compressed = re.match( r'.*\.(gz|xz|zst)$', file)
|
||||
tf = tempfile.mkstemp()
|
||||
tmpfile = tf[1]
|
||||
if compressed:
|
||||
tmpkofile = tmpfile + ".ko"
|
||||
if compressed.group(1) == 'gz':
|
||||
cmd = "gunzip -dc %s > %s" % (file, tmpkofile)
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
elif compressed.group(1) == 'xz':
|
||||
cmd = "xz -dc %s > %s" % (file, tmpkofile)
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
elif compressed.group(1) == 'zst':
|
||||
cmd = "zstd -dc %s > %s" % (file, tmpkofile)
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
else:
|
||||
msg = "Cannot decompress '%s'" % file
|
||||
raise msg
|
||||
cmd = "%s -j .modinfo -O binary %s %s" % (d.getVar("OBJCOPY"), tmpkofile, tmpfile)
|
||||
else:
|
||||
cmd = "%s -j .modinfo -O binary %s %s" % (d.getVar("OBJCOPY"), file, tmpfile)
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
# errors='replace': Some old kernel versions contain invalid utf-8 characters in mod descriptions (like 0xf6, 'ö')
|
||||
with open(tmpfile, errors='replace') as f:
|
||||
l = f.read().split("\000")
|
||||
os.close(tf[0])
|
||||
os.unlink(tmpfile)
|
||||
if compressed:
|
||||
os.unlink(tmpkofile)
|
||||
vals = {}
|
||||
for i in l:
|
||||
m = modinfoexp.match(i)
|
||||
if not m:
|
||||
continue
|
||||
vals[m.group(1)] = m.group(2)
|
||||
return vals
|
||||
|
||||
def frob_metadata(file, pkg, pattern, format, basename):
|
||||
vals = extract_modinfo(file)
|
||||
|
||||
dvar = d.getVar('PKGD')
|
||||
|
||||
# If autoloading is requested, output ${modulesloaddir}/<name>.conf and append
|
||||
# appropriate modprobe commands to the postinst
|
||||
autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD") or "").split()
|
||||
autoload = d.getVar('module_autoload_%s' % basename)
|
||||
if autoload and autoload == basename:
|
||||
bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename)
|
||||
if autoload and basename not in autoloadlist:
|
||||
bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename))
|
||||
if basename in autoloadlist:
|
||||
conf = '%s/%s.conf' % (d.getVar('modulesloaddir'), basename)
|
||||
name = '%s%s' % (dvar, conf)
|
||||
os.makedirs(os.path.dirname(name), exist_ok=True)
|
||||
with open(name, 'w') as f:
|
||||
if autoload:
|
||||
for m in autoload.split():
|
||||
f.write('%s\n' % m)
|
||||
else:
|
||||
f.write('%s\n' % basename)
|
||||
conf2append = ' %s' % conf
|
||||
d.appendVar('FILES:%s' % pkg, conf2append)
|
||||
d.appendVar('CONFFILES:%s' % pkg, conf2append)
|
||||
postinst = d.getVar('pkg_postinst:%s' % pkg)
|
||||
if not postinst:
|
||||
bb.fatal("pkg_postinst:%s not defined" % pkg)
|
||||
postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename)
|
||||
d.setVar('pkg_postinst:%s' % pkg, postinst)
|
||||
|
||||
# Write out any modconf fragment
|
||||
modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()
|
||||
modconf = d.getVar('module_conf_%s' % basename)
|
||||
if modconf and basename in modconflist:
|
||||
conf = '%s/%s.conf' % (d.getVar('modprobedir'), basename)
|
||||
name = '%s%s' % (dvar, conf)
|
||||
os.makedirs(os.path.dirname(name), exist_ok=True)
|
||||
with open(name, 'w') as f:
|
||||
f.write("%s\n" % modconf)
|
||||
conf2append = ' %s' % conf
|
||||
d.appendVar('FILES:%s' % pkg, conf2append)
|
||||
d.appendVar('CONFFILES:%s' % pkg, conf2append)
|
||||
|
||||
elif modconf:
|
||||
bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
|
||||
|
||||
if "description" in vals:
|
||||
old_desc = d.getVar('DESCRIPTION:' + pkg) or ""
|
||||
d.setVar('DESCRIPTION:' + pkg, old_desc + "; " + vals["description"])
|
||||
|
||||
rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
|
||||
modinfo_deps = []
|
||||
if "depends" in vals and vals["depends"] != "":
|
||||
for dep in vals["depends"].split(","):
|
||||
on = legitimize_package_name(dep)
|
||||
dependency_pkg = format % on
|
||||
modinfo_deps.append(dependency_pkg)
|
||||
for dep in modinfo_deps:
|
||||
if not dep in rdepends:
|
||||
rdepends[dep] = []
|
||||
d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
|
||||
|
||||
# Avoid automatic -dev recommendations for modules ending with -dev.
|
||||
d.setVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs', 1)
|
||||
|
||||
# Provide virtual package without postfix
|
||||
providevirt = d.getVar('KERNEL_MODULE_PROVIDE_VIRTUAL')
|
||||
if providevirt == "1":
|
||||
postfix = format.split('%s')[1]
|
||||
d.setVar('RPROVIDES:' + pkg, pkg.replace(postfix, ''))
|
||||
|
||||
kernel_package_name = d.getVar("KERNEL_PACKAGE_NAME") or "kernel"
|
||||
kernel_version = d.getVar("KERNEL_VERSION")
|
||||
|
||||
metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
|
||||
splitmods = d.getVar('KERNEL_SPLIT_MODULES')
|
||||
postinst = d.getVar('pkg_postinst:modules')
|
||||
postrm = d.getVar('pkg_postrm:modules')
|
||||
|
||||
if splitmods != '1':
|
||||
d.appendVar('FILES:' + metapkg, '%s %s %s/modules' %
|
||||
(d.getVar('modulesloaddir'), d.getVar('modprobedir'), d.getVar("nonarch_base_libdir")))
|
||||
d.appendVar('pkg_postinst:%s' % metapkg, postinst)
|
||||
d.prependVar('pkg_postrm:%s' % metapkg, postrm);
|
||||
return
|
||||
|
||||
module_regex = r'^(.*)\.k?o(?:\.(gz|xz|zst))?$'
|
||||
|
||||
module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
|
||||
module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
|
||||
module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix
|
||||
|
||||
modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version))
|
||||
if modules:
|
||||
d.appendVar('RDEPENDS:' + metapkg, ' '+' '.join(modules))
|
||||
}
|
||||
|
||||
do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()))}'
|
||||
49
sources/poky/meta/classes-recipe/kernel-uboot.bbclass
Normal file
49
sources/poky/meta/classes-recipe/kernel-uboot.bbclass
Normal file
@@ -0,0 +1,49 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# fitImage kernel compression algorithm
|
||||
FIT_KERNEL_COMP_ALG ?= "gzip"
|
||||
FIT_KERNEL_COMP_ALG_EXTENSION ?= ".gz"
|
||||
|
||||
# Kernel image type passed to mkimage (i.e. kernel kernel_noload...)
|
||||
UBOOT_MKIMAGE_KERNEL_TYPE ?= "kernel"
|
||||
|
||||
uboot_prep_kimage() {
|
||||
if [ -e arch/${ARCH}/boot/compressed/vmlinux ]; then
|
||||
vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
|
||||
linux_suffix=""
|
||||
linux_comp="none"
|
||||
elif [ -e arch/${ARCH}/boot/vmlinuz.bin ]; then
|
||||
rm -f linux.bin
|
||||
cp -l arch/${ARCH}/boot/vmlinuz.bin linux.bin
|
||||
vmlinux_path=""
|
||||
linux_suffix=""
|
||||
linux_comp="none"
|
||||
else
|
||||
vmlinux_path="vmlinux"
|
||||
# Use vmlinux.initramfs for linux.bin when INITRAMFS_IMAGE_BUNDLE set
|
||||
# As per the implementation in kernel.bbclass.
|
||||
# See do_bundle_initramfs function
|
||||
if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ] && [ -e vmlinux.initramfs ]; then
|
||||
vmlinux_path="vmlinux.initramfs"
|
||||
fi
|
||||
linux_suffix="${FIT_KERNEL_COMP_ALG_EXTENSION}"
|
||||
linux_comp="${FIT_KERNEL_COMP_ALG}"
|
||||
fi
|
||||
|
||||
[ -n "${vmlinux_path}" ] && ${KERNEL_OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
|
||||
|
||||
if [ "${linux_comp}" != "none" ] ; then
|
||||
if [ "${linux_comp}" = "gzip" ] ; then
|
||||
gzip -9 linux.bin
|
||||
elif [ "${linux_comp}" = "lzo" ] ; then
|
||||
lzop -9 linux.bin
|
||||
fi
|
||||
mv -f "linux.bin${linux_suffix}" linux.bin
|
||||
fi
|
||||
|
||||
echo "${linux_comp}"
|
||||
}
|
||||
41
sources/poky/meta/classes-recipe/kernel-uimage.bbclass
Normal file
41
sources/poky/meta/classes-recipe/kernel-uimage.bbclass
Normal file
@@ -0,0 +1,41 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit kernel-uboot
|
||||
|
||||
python __anonymous () {
|
||||
if "uImage" in d.getVar('KERNEL_IMAGETYPES'):
|
||||
depends = d.getVar("DEPENDS")
|
||||
depends = "%s u-boot-tools-native" % depends
|
||||
d.setVar("DEPENDS", depends)
|
||||
|
||||
# Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
|
||||
# to kernel.bbclass . We override the variable here, since we need
|
||||
# to build uImage using the kernel build system if and only if
|
||||
# KEEPUIMAGE == yes. Otherwise, we pack compressed vmlinux into
|
||||
# the uImage .
|
||||
if d.getVar("KEEPUIMAGE") != 'yes':
|
||||
typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
|
||||
if "uImage" in typeformake.split():
|
||||
d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('uImage', 'vmlinux'))
|
||||
|
||||
# Enable building of uImage with mkimage
|
||||
bb.build.addtask('do_uboot_mkimage', 'do_install', 'do_kernel_link_images', d)
|
||||
}
|
||||
|
||||
do_uboot_mkimage[dirs] += "${B}"
|
||||
do_uboot_mkimage() {
|
||||
uboot_prep_kimage
|
||||
|
||||
ENTRYPOINT=${UBOOT_ENTRYPOINT}
|
||||
if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
|
||||
ENTRYPOINT=`${HOST_PREFIX}nm ${B}/vmlinux | \
|
||||
awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
|
||||
fi
|
||||
|
||||
uboot-mkimage -A ${UBOOT_ARCH} -O linux -T ${UBOOT_MKIMAGE_KERNEL_TYPE} -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage
|
||||
rm -f linux.bin
|
||||
}
|
||||
756
sources/poky/meta/classes-recipe/kernel-yocto.bbclass
Normal file
756
sources/poky/meta/classes-recipe/kernel-yocto.bbclass
Normal file
@@ -0,0 +1,756 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# remove tasks that modify the source tree in case externalsrc is inherited
|
||||
SRCTREECOVEREDTASKS += "do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
|
||||
PATCH_GIT_USER_EMAIL ?= "kernel-yocto@oe"
|
||||
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
|
||||
|
||||
# The distro or local.conf should set this, but if nobody cares...
|
||||
LINUX_KERNEL_TYPE ??= "standard"
|
||||
|
||||
# KMETA ?= ""
|
||||
KBRANCH ?= "master"
|
||||
KMACHINE ?= "${MACHINE}"
|
||||
SRCREV_FORMAT ?= "meta_machine"
|
||||
|
||||
# LEVELS:
|
||||
# 0: no reporting
|
||||
# 1: report options that are specified, but not in the final config
|
||||
# 2: report options that are not hardware related, but set by a BSP
|
||||
KCONF_AUDIT_LEVEL ?= "1"
|
||||
KCONF_BSP_AUDIT_LEVEL ?= "0"
|
||||
KMETA_AUDIT ?= "yes"
|
||||
KMETA_AUDIT_WERROR ?= ""
|
||||
|
||||
# returns local (absolute) path names for all valid patches in the
|
||||
# src_uri
|
||||
def find_patches(d,subdir):
|
||||
patches = src_patches(d)
|
||||
patch_list=[]
|
||||
for p in patches:
|
||||
_, _, local, _, _, parm = bb.fetch.decodeurl(p)
|
||||
# if patchdir has been passed, we won't be able to apply it so skip
|
||||
# the patch for now, and special processing happens later
|
||||
patchdir = ''
|
||||
if "patchdir" in parm:
|
||||
patchdir = parm["patchdir"]
|
||||
if subdir:
|
||||
if subdir == patchdir:
|
||||
patch_list.append(local)
|
||||
else:
|
||||
# skip the patch if a patchdir was supplied, it won't be handled
|
||||
# properly
|
||||
if not patchdir:
|
||||
patch_list.append(local)
|
||||
|
||||
return patch_list
|
||||
|
||||
# returns all the elements from the src uri that are .scc files
|
||||
def find_sccs(d):
|
||||
sources=src_patches(d, True)
|
||||
sources_list=[]
|
||||
for s in sources:
|
||||
base, ext = os.path.splitext(os.path.basename(s))
|
||||
if ext and ext in [".scc", ".cfg"]:
|
||||
sources_list.append(s)
|
||||
elif base and 'defconfig' in base:
|
||||
sources_list.append(s)
|
||||
|
||||
return sources_list
|
||||
|
||||
# check the SRC_URI for "kmeta" type'd git repositories. Return the name of
|
||||
# the repository as it will be found in WORKDIR
|
||||
def find_kernel_feature_dirs(d):
|
||||
feature_dirs=[]
|
||||
fetch = bb.fetch2.Fetch([], d)
|
||||
for url in fetch.urls:
|
||||
urldata = fetch.ud[url]
|
||||
parm = urldata.parm
|
||||
type=""
|
||||
if "type" in parm:
|
||||
type = parm["type"]
|
||||
if "destsuffix" in parm:
|
||||
destdir = parm["destsuffix"]
|
||||
if type == "kmeta":
|
||||
feature_dirs.append(destdir)
|
||||
|
||||
return feature_dirs
|
||||
|
||||
# find the master/machine source branch. In the same way that the fetcher proceses
|
||||
# git repositories in the SRC_URI we take the first repo found, first branch.
|
||||
def get_machine_branch(d, default):
|
||||
fetch = bb.fetch2.Fetch([], d)
|
||||
for url in fetch.urls:
|
||||
urldata = fetch.ud[url]
|
||||
parm = urldata.parm
|
||||
if "branch" in parm:
|
||||
branches = urldata.parm.get("branch").split(',')
|
||||
btype = urldata.parm.get("type")
|
||||
if btype != "kmeta":
|
||||
return branches[0]
|
||||
|
||||
return default
|
||||
|
||||
# returns a list of all directories that are on FILESEXTRAPATHS (and
|
||||
# hence available to the build) that contain .scc or .cfg files
|
||||
def get_dirs_with_fragments(d):
|
||||
extrapaths = []
|
||||
extrafiles = []
|
||||
extrapathsvalue = (d.getVar("FILESEXTRAPATHS") or "")
|
||||
# Remove default flag which was used for checking
|
||||
extrapathsvalue = extrapathsvalue.replace("__default:", "")
|
||||
extrapaths = extrapathsvalue.split(":")
|
||||
for path in extrapaths:
|
||||
if path + ":True" not in extrafiles:
|
||||
extrafiles.append(path + ":" + str(os.path.exists(path)))
|
||||
|
||||
return " ".join(extrafiles)
|
||||
|
||||
do_kernel_metadata() {
|
||||
set +e
|
||||
|
||||
if [ -n "$1" ]; then
|
||||
mode="$1"
|
||||
else
|
||||
mode="patch"
|
||||
fi
|
||||
|
||||
cd ${S}
|
||||
export KMETA=${KMETA}
|
||||
|
||||
bbnote "do_kernel_metadata: for summary/debug, set KCONF_AUDIT_LEVEL > 0"
|
||||
|
||||
# if kernel tools are available in-tree, they are preferred
|
||||
# and are placed on the path before any external tools. Unless
|
||||
# the external tools flag is set, in that case we do nothing.
|
||||
if [ -f "${S}/scripts/util/configme" ]; then
|
||||
if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then
|
||||
PATH=${S}/scripts/util:${PATH}
|
||||
fi
|
||||
fi
|
||||
|
||||
# In a similar manner to the kernel itself:
|
||||
#
|
||||
# defconfig: $(obj)/conf
|
||||
# ifeq ($(KBUILD_DEFCONFIG),)
|
||||
# $< --defconfig $(Kconfig)
|
||||
# else
|
||||
# @echo "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
|
||||
# $(Q)$< --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
|
||||
# endif
|
||||
#
|
||||
# If a defconfig is specified via the KBUILD_DEFCONFIG variable, we copy it
|
||||
# from the source tree, into a common location and normalized "defconfig" name,
|
||||
# where the rest of the process will include and incoroporate it into the build
|
||||
#
|
||||
# If the fetcher has already placed a defconfig in WORKDIR (from the SRC_URI),
|
||||
# we don't overwrite it, but instead warn the user that SRC_URI defconfigs take
|
||||
# precendence.
|
||||
#
|
||||
if [ -n "${KBUILD_DEFCONFIG}" ]; then
|
||||
if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then
|
||||
if [ -f "${WORKDIR}/defconfig" ]; then
|
||||
# If the two defconfig's are different, warn that we overwrote the
|
||||
# one already placed in WORKDIR
|
||||
cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
|
||||
if [ $? -ne 0 ]; then
|
||||
bbdebug 1 "detected SRC_URI or unpatched defconfig in WORKDIR. ${KBUILD_DEFCONFIG} copied over it"
|
||||
fi
|
||||
cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
|
||||
else
|
||||
cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
|
||||
fi
|
||||
in_tree_defconfig="${WORKDIR}/defconfig"
|
||||
else
|
||||
bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree (${S}/arch/${ARCH}/configs/)"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$mode" = "patch" ]; then
|
||||
# was anyone trying to patch the kernel meta data ?, we need to do
|
||||
# this here, since the scc commands migrate the .cfg fragments to the
|
||||
# kernel source tree, where they'll be used later.
|
||||
check_git_config
|
||||
patches="${@" ".join(find_patches(d,'kernel-meta'))}"
|
||||
if [ -n "$patches" ]; then
|
||||
(
|
||||
cd ${WORKDIR}/kernel-meta
|
||||
|
||||
# take the SRC_URI patches, and create a series file
|
||||
# this is required to support some better processing
|
||||
# of issues with the patches
|
||||
rm -f series
|
||||
for p in $patches; do
|
||||
cp $p .
|
||||
echo "$(basename $p)" >> series
|
||||
done
|
||||
|
||||
# process the series with kgit-s2q, which is what is
|
||||
# handling the rest of the kernel. This allows us
|
||||
# more flexibility for handling failures or advanced
|
||||
# mergeing functinoality
|
||||
message=$(kgit-s2q --gen -v --patches ${WORKDIR}/kernel-meta 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
# setup to try the patch again
|
||||
kgit-s2q --prev
|
||||
bberror "Problem applying patches to: ${WORKDIR}/kernel-meta"
|
||||
bbfatal_log "\n($message)"
|
||||
fi
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
||||
sccs_from_src_uri="${@" ".join(find_sccs(d))}"
|
||||
patches="${@" ".join(find_patches(d,''))}"
|
||||
feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
|
||||
|
||||
# a quick check to make sure we don't have duplicate defconfigs If
|
||||
# there's a defconfig in the SRC_URI, did we also have one from the
|
||||
# KBUILD_DEFCONFIG processing above ?
|
||||
src_uri_defconfig=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") != 0) { print $0 }' RS=' ')
|
||||
# drop and defconfig's from the src_uri variable, we captured it just above here if it existed
|
||||
sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") == 0) { print $0 }' RS=' ')
|
||||
|
||||
if [ -n "$in_tree_defconfig" ]; then
|
||||
sccs_defconfig=$in_tree_defconfig
|
||||
if [ -n "$src_uri_defconfig" ]; then
|
||||
bbwarn "[NOTE]: defconfig was supplied both via KBUILD_DEFCONFIG and SRC_URI. Dropping SRC_URI entry $src_uri_defconfig"
|
||||
fi
|
||||
else
|
||||
# if we didn't have an in-tree one, make our defconfig the one
|
||||
# from the src_uri. Note: there may not have been one from the
|
||||
# src_uri, so this can be an empty variable.
|
||||
sccs_defconfig=$src_uri_defconfig
|
||||
fi
|
||||
sccs="$sccs_from_src_uri"
|
||||
|
||||
# check for feature directories/repos/branches that were part of the
|
||||
# SRC_URI. If they were supplied, we convert them into include directives
|
||||
# for the update part of the process
|
||||
for f in ${feat_dirs}; do
|
||||
if [ -d "${WORKDIR}/$f/kernel-meta" ]; then
|
||||
includes="$includes -I${WORKDIR}/$f/kernel-meta"
|
||||
elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
|
||||
includes="$includes -I${WORKDIR}/../oe-local-files/$f"
|
||||
elif [ -d "${WORKDIR}/$f" ]; then
|
||||
includes="$includes -I${WORKDIR}/$f"
|
||||
fi
|
||||
done
|
||||
for s in ${sccs} ${patches}; do
|
||||
sdir=$(dirname $s)
|
||||
includes="$includes -I${sdir}"
|
||||
# if a SRC_URI passed patch or .scc has a subdir of "kernel-meta",
|
||||
# then we add it to the search path
|
||||
if [ -d "${sdir}/kernel-meta" ]; then
|
||||
includes="$includes -I${sdir}/kernel-meta"
|
||||
fi
|
||||
done
|
||||
|
||||
# expand kernel features into their full path equivalents
|
||||
bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE})
|
||||
if [ -z "$bsp_definition" ]; then
|
||||
if [ -z "$sccs_defconfig" ]; then
|
||||
bbfatal_log "Could not locate BSP definition for ${KMACHINE}/${LINUX_KERNEL_TYPE} and no defconfig was provided"
|
||||
fi
|
||||
else
|
||||
# if the bsp definition has "define KMETA_EXTERNAL_BSP t",
|
||||
# then we need to set a flag that will instruct the next
|
||||
# steps to use the BSP as both configuration and patches.
|
||||
grep -q KMETA_EXTERNAL_BSP $bsp_definition
|
||||
if [ $? -eq 0 ]; then
|
||||
KMETA_EXTERNAL_BSPS="t"
|
||||
fi
|
||||
fi
|
||||
meta_dir=$(kgit --meta)
|
||||
|
||||
KERNEL_FEATURES_FINAL=""
|
||||
if [ -n "${KERNEL_FEATURES}" ]; then
|
||||
for feature in ${KERNEL_FEATURES}; do
|
||||
feature_found=f
|
||||
for d in $includes; do
|
||||
path_to_check=$(echo $d | sed 's/^-I//')
|
||||
if [ "$feature_found" = "f" ] && [ -e "$path_to_check/$feature" ]; then
|
||||
feature_found=t
|
||||
fi
|
||||
done
|
||||
if [ "$feature_found" = "f" ]; then
|
||||
if [ -n "${KERNEL_DANGLING_FEATURES_WARN_ONLY}" ]; then
|
||||
bbwarn "Feature '$feature' not found, but KERNEL_DANGLING_FEATURES_WARN_ONLY is set"
|
||||
bbwarn "This may cause runtime issues, dropping feature and allowing configuration to continue"
|
||||
else
|
||||
bberror "Feature '$feature' not found, this will cause configuration failures."
|
||||
bberror "Check the SRC_URI for meta-data repositories or directories that may be missing"
|
||||
bbfatal_log "Set KERNEL_DANGLING_FEATURES_WARN_ONLY to ignore this issue"
|
||||
fi
|
||||
else
|
||||
KERNEL_FEATURES_FINAL="$KERNEL_FEATURES_FINAL $feature"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ "$mode" = "config" ]; then
|
||||
# run1: pull all the configuration fragments, no matter where they come from
|
||||
elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
|
||||
if [ -n "${elements}" ]; then
|
||||
echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
|
||||
scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches $KERNEL_FEATURES_FINAL
|
||||
if [ $? -ne 0 ]; then
|
||||
bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# if KMETA_EXTERNAL_BSPS has been set, or it has been detected from
|
||||
# the bsp definition, then we inject the bsp_definition into the
|
||||
# patch phase below. we'll piggy back on the sccs variable.
|
||||
if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
|
||||
sccs="${bsp_definition} ${sccs}"
|
||||
fi
|
||||
|
||||
if [ "$mode" = "patch" ]; then
|
||||
# run2: only generate patches for elements that have been passed on the SRC_URI
|
||||
elements="`echo -n ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
|
||||
if [ -n "${elements}" ]; then
|
||||
scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} $KERNEL_FEATURES_FINAL
|
||||
if [ $? -ne 0 ]; then
|
||||
bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ${KCONF_AUDIT_LEVEL} -gt 0 ]; then
|
||||
bbnote "kernel meta data summary for ${KMACHINE} (${LINUX_KERNEL_TYPE}):"
|
||||
bbnote "======================================================================"
|
||||
if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
|
||||
bbnote "Non kernel-cache (external) bsp"
|
||||
fi
|
||||
bbnote "BSP entry point / definition: $bsp_definition"
|
||||
if [ -n "$in_tree_defconfig" ]; then
|
||||
bbnote "KBUILD_DEFCONFIG: ${KBUILD_DEFCONFIG}"
|
||||
fi
|
||||
bbnote "Fragments from SRC_URI: $sccs_from_src_uri"
|
||||
bbnote "KERNEL_FEATURES: $KERNEL_FEATURES_FINAL"
|
||||
bbnote "Final scc/cfg list: $sccs_defconfig $bsp_definition $sccs $KERNEL_FEATURES_FINAL"
|
||||
fi
|
||||
|
||||
set -e
|
||||
}
|
||||
|
||||
do_patch() {
|
||||
set +e
|
||||
cd ${S}
|
||||
|
||||
check_git_config
|
||||
meta_dir=$(kgit --meta)
|
||||
(cd ${meta_dir}; ln -sf patch.queue series)
|
||||
if [ -f "${meta_dir}/series" ]; then
|
||||
kgit_extra_args=""
|
||||
if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
|
||||
kgit_extra_args="--commit-sha author"
|
||||
fi
|
||||
kgit-s2q --gen -v $kgit_extra_args --patches .kernel-meta/
|
||||
if [ $? -ne 0 ]; then
|
||||
bberror "Could not apply patches for ${KMACHINE}."
|
||||
bbfatal_log "Patch failures can be resolved in the linux source directory ${S})"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -f "${meta_dir}/merge.queue" ]; then
|
||||
# we need to merge all these branches
|
||||
for b in $(cat ${meta_dir}/merge.queue); do
|
||||
git show-ref --verify --quiet refs/heads/${b}
|
||||
if [ $? -eq 0 ]; then
|
||||
bbnote "Merging branch ${b}"
|
||||
git merge -q --no-ff -m "Merge branch ${b}" ${b}
|
||||
else
|
||||
bbfatal "branch ${b} does not exist, cannot merge"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
set -e
|
||||
}
|
||||
|
||||
do_kernel_checkout() {
|
||||
set +e
|
||||
|
||||
source_dir=`echo ${S} | sed 's%/$%%'`
|
||||
source_workdir="${WORKDIR}/git"
|
||||
if [ -d "${WORKDIR}/git/" ]; then
|
||||
# case: git repository
|
||||
# if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
|
||||
if [ "${source_dir}" != "${source_workdir}" ]; then
|
||||
if [ -d "${source_workdir}/.git" ]; then
|
||||
# regular git repository with .git
|
||||
rm -rf ${S}
|
||||
mv ${WORKDIR}/git ${S}
|
||||
else
|
||||
# create source for bare cloned git repository
|
||||
git clone ${WORKDIR}/git ${S}
|
||||
rm -rf ${WORKDIR}/git
|
||||
fi
|
||||
fi
|
||||
cd ${S}
|
||||
|
||||
# convert any remote branches to local tracking ones
|
||||
for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
|
||||
b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
|
||||
git show-ref --quiet --verify -- "refs/heads/$b"
|
||||
if [ $? -ne 0 ]; then
|
||||
git branch $b $i > /dev/null
|
||||
fi
|
||||
done
|
||||
|
||||
# Create a working tree copy of the kernel by checking out a branch
|
||||
machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
|
||||
|
||||
# checkout and clobber any unimportant files
|
||||
git checkout -f ${machine_branch}
|
||||
else
|
||||
# case: we have no git repository at all.
|
||||
# To support low bandwidth options for building the kernel, we'll just
|
||||
# convert the tree to a git repo and let the rest of the process work unchanged
|
||||
|
||||
# if ${S} hasn't been set to the proper subdirectory a default of "linux" is
|
||||
# used, but we can't initialize that empty directory. So check it and throw a
|
||||
# clear error
|
||||
|
||||
cd ${S}
|
||||
if [ ! -f "Makefile" ]; then
|
||||
bberror "S is not set to the linux source directory. Check "
|
||||
bbfatal "the recipe and set S to the proper extracted subdirectory"
|
||||
fi
|
||||
rm -f .gitignore
|
||||
git init
|
||||
check_git_config
|
||||
git add .
|
||||
git commit -q -n -m "baseline commit: creating repo for ${PN}-${PV}"
|
||||
git clean -d -f
|
||||
fi
|
||||
|
||||
set -e
|
||||
}
|
||||
do_kernel_checkout[dirs] = "${S} ${WORKDIR}"
|
||||
|
||||
addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc
|
||||
addtask kernel_metadata after do_validate_branches do_unpack before do_patch
|
||||
do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
|
||||
do_kernel_metadata[file-checksums] = " ${@get_dirs_with_fragments(d)}"
|
||||
do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
|
||||
|
||||
do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot"
|
||||
do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}gcc:do_populate_sysroot"
|
||||
do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot"
|
||||
do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot"
|
||||
do_kernel_configme[dirs] += "${S} ${B}"
|
||||
do_kernel_configme() {
|
||||
do_kernel_metadata config
|
||||
|
||||
# translate the kconfig_mode into something that merge_config.sh
|
||||
# understands
|
||||
case ${KCONFIG_MODE} in
|
||||
*allnoconfig)
|
||||
config_flags="-n"
|
||||
;;
|
||||
*alldefconfig)
|
||||
config_flags=""
|
||||
;;
|
||||
*)
|
||||
if [ -f ${WORKDIR}/defconfig ]; then
|
||||
config_flags="-n"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
cd ${S}
|
||||
|
||||
meta_dir=$(kgit --meta)
|
||||
configs="$(scc --configs -o ${meta_dir})"
|
||||
if [ $? -ne 0 ]; then
|
||||
bberror "${configs}"
|
||||
bbfatal_log "Could not find configuration queue (${meta_dir}/config.queue)"
|
||||
fi
|
||||
|
||||
CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" STRIP="${KERNEL_STRIP}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
|
||||
if [ $? -ne 0 -o ! -f ${B}/.config ]; then
|
||||
bberror "Could not generate a .config for ${KMACHINE}-${LINUX_KERNEL_TYPE}"
|
||||
if [ ${KCONF_AUDIT_LEVEL} -gt 1 ]; then
|
||||
bbfatal_log "`cat ${meta_dir}/cfg/merge_config_build.log`"
|
||||
else
|
||||
bbfatal_log "Details can be found at: ${S}/${meta_dir}/cfg/merge_config_build.log"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "${LINUX_VERSION_EXTENSION}" ]; then
|
||||
echo "# Global settings from linux recipe" >> ${B}/.config
|
||||
echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
|
||||
fi
|
||||
}
|
||||
|
||||
addtask kernel_configme before do_configure after do_patch
|
||||
addtask config_analysis
|
||||
|
||||
do_config_analysis[depends] = "virtual/kernel:do_configure"
|
||||
do_config_analysis[depends] += "kern-tools-native:do_populate_sysroot"
|
||||
|
||||
CONFIG_AUDIT_FILE ?= "${WORKDIR}/config-audit.txt"
|
||||
CONFIG_ANALYSIS_FILE ?= "${WORKDIR}/config-analysis.txt"
|
||||
|
||||
python do_config_analysis() {
|
||||
import re, string, sys, subprocess
|
||||
|
||||
s = d.getVar('S')
|
||||
|
||||
env = os.environ.copy()
|
||||
env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
|
||||
env['LD'] = d.getVar('KERNEL_LD')
|
||||
env['CC'] = d.getVar('KERNEL_CC')
|
||||
env['OBJCOPY'] = d.getVar('KERNEL_OBJCOPY')
|
||||
env['STRIP'] = d.getVar('KERNEL_STRIP')
|
||||
env['ARCH'] = d.getVar('ARCH')
|
||||
env['srctree'] = s
|
||||
|
||||
# read specific symbols from the kernel recipe or from local.conf
|
||||
# i.e.: CONFIG_ANALYSIS:pn-linux-yocto-dev = 'NF_CONNTRACK LOCALVERSION'
|
||||
config = d.getVar( 'CONFIG_ANALYSIS' )
|
||||
if not config:
|
||||
config = [ "" ]
|
||||
else:
|
||||
config = config.split()
|
||||
|
||||
for c in config:
|
||||
for action in ["analysis","audit"]:
|
||||
if action == "analysis":
|
||||
try:
|
||||
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--blame', c], cwd=s, env=env ).decode('utf-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
|
||||
|
||||
outfile = d.getVar( 'CONFIG_ANALYSIS_FILE' )
|
||||
|
||||
if action == "audit":
|
||||
try:
|
||||
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--summary', '--extended', '--sanity', c], cwd=s, env=env ).decode('utf-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
|
||||
|
||||
outfile = d.getVar( 'CONFIG_AUDIT_FILE' )
|
||||
|
||||
if c:
|
||||
outdir = os.path.dirname( outfile )
|
||||
outname = os.path.basename( outfile )
|
||||
outfile = outdir + '/'+ c + '-' + outname
|
||||
|
||||
if config and os.path.isfile(outfile):
|
||||
os.remove(outfile)
|
||||
|
||||
with open(outfile, 'w+') as f:
|
||||
f.write( analysis )
|
||||
|
||||
bb.warn( "Configuration {} executed, see: {} for details".format(action,outfile ))
|
||||
if c:
|
||||
bb.warn( analysis )
|
||||
}
|
||||
|
||||
python do_kernel_configcheck() {
|
||||
import re, string, sys, subprocess
|
||||
|
||||
s = d.getVar('S')
|
||||
|
||||
# if KMETA isn't set globally by a recipe using this routine, use kgit to
|
||||
# locate or create the meta directory. Otherwise, kconf_check is not
|
||||
# passed a valid meta-series for processing
|
||||
kmeta = d.getVar("KMETA")
|
||||
if not kmeta or not os.path.exists('{}/{}'.format(s,kmeta)):
|
||||
kmeta = subprocess.check_output(['kgit', '--meta'], cwd=d.getVar('S')).decode('utf-8').rstrip()
|
||||
|
||||
env = os.environ.copy()
|
||||
env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
|
||||
env['LD'] = d.getVar('KERNEL_LD')
|
||||
env['CC'] = d.getVar('KERNEL_CC')
|
||||
env['OBJCOPY'] = d.getVar('KERNEL_OBJCOPY')
|
||||
env['STRIP'] = d.getVar('KERNEL_STRIP')
|
||||
env['ARCH'] = d.getVar('ARCH')
|
||||
env['srctree'] = s
|
||||
|
||||
try:
|
||||
configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
bb.fatal( "Cannot gather config fragments for audit: %s" % e.output.decode("utf-8") )
|
||||
|
||||
config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0)
|
||||
bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0)
|
||||
kmeta_audit_werror = d.getVar("KMETA_AUDIT_WERROR") or ""
|
||||
warnings_detected = False
|
||||
|
||||
# if config check visibility is "1", that's the lowest level of audit. So
|
||||
# we add the --classify option to the run, since classification will
|
||||
# streamline the output to only report options that could be boot issues,
|
||||
# or are otherwise required for proper operation.
|
||||
extra_params = ""
|
||||
if config_check_visibility == 1:
|
||||
extra_params = "--classify"
|
||||
|
||||
# category #1: mismatches
|
||||
try:
|
||||
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--mismatches', extra_params], cwd=s, env=env ).decode('utf-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
|
||||
|
||||
if analysis:
|
||||
outfile = "{}/{}/cfg/mismatch.txt".format( s, kmeta )
|
||||
if os.path.isfile(outfile):
|
||||
os.remove(outfile)
|
||||
with open(outfile, 'w+') as f:
|
||||
f.write( analysis )
|
||||
|
||||
if config_check_visibility and os.stat(outfile).st_size > 0:
|
||||
with open (outfile, "r") as myfile:
|
||||
results = myfile.read()
|
||||
bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
|
||||
warnings_detected = True
|
||||
|
||||
# category #2: invalid fragment elements
|
||||
extra_params = ""
|
||||
if bsp_check_visibility > 1:
|
||||
extra_params = "--strict"
|
||||
try:
|
||||
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--invalid', extra_params], cwd=s, env=env ).decode('utf-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
|
||||
|
||||
if analysis:
|
||||
outfile = "{}/{}/cfg/invalid.txt".format(s,kmeta)
|
||||
if os.path.isfile(outfile):
|
||||
os.remove(outfile)
|
||||
with open(outfile, 'w+') as f:
|
||||
f.write( analysis )
|
||||
|
||||
if bsp_check_visibility and os.stat(outfile).st_size > 0:
|
||||
with open (outfile, "r") as myfile:
|
||||
results = myfile.read()
|
||||
bb.warn( "[kernel config]: This BSP contains fragments with warnings:\n\n%s" % results)
|
||||
warnings_detected = True
|
||||
|
||||
# category #3: redefined options (this is pretty verbose and is debug only)
|
||||
try:
|
||||
analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--sanity'], cwd=s, env=env ).decode('utf-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
bb.fatal( "config analysis failed when running '%s': %s" % (" ".join(e.cmd), e.output.decode('utf-8')))
|
||||
|
||||
if analysis:
|
||||
outfile = "{}/{}/cfg/redefinition.txt".format(s,kmeta)
|
||||
if os.path.isfile(outfile):
|
||||
os.remove(outfile)
|
||||
with open(outfile, 'w+') as f:
|
||||
f.write( analysis )
|
||||
|
||||
# if the audit level is greater than two, we report if a fragment has overriden
|
||||
# a value from a base fragment. This is really only used for new kernel introduction
|
||||
if bsp_check_visibility > 2 and os.stat(outfile).st_size > 0:
|
||||
with open (outfile, "r") as myfile:
|
||||
results = myfile.read()
|
||||
bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results)
|
||||
warnings_detected = True
|
||||
|
||||
if warnings_detected and kmeta_audit_werror:
|
||||
bb.fatal( "configuration warnings detected, werror is set, promoting to fatal" )
|
||||
}
|
||||
|
||||
# Ensure that the branches (BSP and meta) are on the locations specified by
|
||||
# their SRCREV values. If they are NOT on the right commits, the branches
|
||||
# are corrected to the proper commit.
|
||||
do_validate_branches() {
|
||||
set +e
|
||||
cd ${S}
|
||||
|
||||
machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
|
||||
machine_srcrev="${SRCREV_machine}"
|
||||
|
||||
# if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
|
||||
# check and we can exit early
|
||||
if [ "${machine_srcrev}" = "AUTOINC" ]; then
|
||||
linux_yocto_dev='${@oe.utils.conditional("PREFERRED_PROVIDER_virtual/kernel", "linux-yocto-dev", "1", "", d)}'
|
||||
if [ -n "$linux_yocto_dev" ]; then
|
||||
git checkout -q -f ${machine_branch}
|
||||
ver=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
|
||||
patchlevel=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
|
||||
sublevel=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
|
||||
kver="$ver.$patchlevel"
|
||||
bbnote "dev kernel: performing version -> branch -> SRCREV validation"
|
||||
bbnote "dev kernel: recipe version ${LINUX_VERSION}, src version: $kver"
|
||||
echo "${LINUX_VERSION}" | grep -q $kver
|
||||
if [ $? -ne 0 ]; then
|
||||
version="$(echo ${LINUX_VERSION} | sed 's/\+.*$//g')"
|
||||
versioned_branch="v$version/$machine_branch"
|
||||
|
||||
machine_branch=$versioned_branch
|
||||
force_srcrev="$(git rev-parse $machine_branch 2> /dev/null)"
|
||||
if [ $? -ne 0 ]; then
|
||||
bbfatal "kernel version mismatch detected, and no valid branch $machine_branch detected"
|
||||
fi
|
||||
|
||||
bbnote "dev kernel: adjusting branch to $machine_branch, srcrev to: $force_srcrev"
|
||||
fi
|
||||
else
|
||||
bbnote "SRCREV validation is not required for AUTOREV"
|
||||
fi
|
||||
elif [ "${machine_srcrev}" = "" ]; then
|
||||
if [ "${SRCREV}" != "AUTOINC" ] && [ "${SRCREV}" != "INVALID" ]; then
|
||||
# SRCREV_machine_<MACHINE> was not set. This means that a custom recipe
|
||||
# that doesn't use the SRCREV_FORMAT "machine_meta" is being built. In
|
||||
# this case, we need to reset to the give SRCREV before heading to patching
|
||||
bbnote "custom recipe is being built, forcing SRCREV to ${SRCREV}"
|
||||
force_srcrev="${SRCREV}"
|
||||
fi
|
||||
else
|
||||
git cat-file -t ${machine_srcrev} > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
bberror "${machine_srcrev} is not a valid commit ID."
|
||||
bbfatal_log "The kernel source tree may be out of sync"
|
||||
fi
|
||||
force_srcrev=${machine_srcrev}
|
||||
fi
|
||||
|
||||
git checkout -q -f ${machine_branch}
|
||||
if [ -n "${force_srcrev}" ]; then
|
||||
# see if the branch we are about to patch has been properly reset to the defined
|
||||
# SRCREV .. if not, we reset it.
|
||||
branch_head=`git rev-parse HEAD`
|
||||
if [ "${force_srcrev}" != "${branch_head}" ]; then
|
||||
current_branch=`git rev-parse --abbrev-ref HEAD`
|
||||
git branch "$current_branch-orig"
|
||||
git reset --hard ${force_srcrev}
|
||||
# We've checked out HEAD, make sure we cleanup kgit-s2q fence post check
|
||||
# so the patches are applied as expected otherwise no patching
|
||||
# would be done in some corner cases.
|
||||
kgit-s2q --clean
|
||||
fi
|
||||
fi
|
||||
|
||||
set -e
|
||||
}
|
||||
|
||||
OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
|
||||
KBUILD_OUTPUT = "${B}"
|
||||
|
||||
python () {
|
||||
# If diffconfig is available, ensure it runs after kernel_configme
|
||||
if 'do_diffconfig' in d:
|
||||
bb.build.addtask('do_diffconfig', None, 'do_kernel_configme', d)
|
||||
|
||||
externalsrc = d.getVar('EXTERNALSRC')
|
||||
if externalsrc:
|
||||
# If we deltask do_patch, do_kernel_configme is left without
|
||||
# dependencies and runs too early
|
||||
d.setVarFlag('do_kernel_configme', 'deps', (d.getVarFlag('do_kernel_configme', 'deps', False) or []) + ['do_unpack'])
|
||||
}
|
||||
|
||||
# extra tasks
|
||||
addtask kernel_version_sanity_check after do_kernel_metadata do_kernel_checkout before do_compile
|
||||
addtask validate_branches before do_patch after do_kernel_checkout
|
||||
addtask kernel_configcheck after do_configure before do_compile
|
||||
877
sources/poky/meta/classes-recipe/kernel.bbclass
Normal file
877
sources/poky/meta/classes-recipe/kernel.bbclass
Normal file
@@ -0,0 +1,877 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit linux-kernel-base kernel-module-split
|
||||
|
||||
COMPATIBLE_HOST = ".*-linux"
|
||||
|
||||
KERNEL_PACKAGE_NAME ??= "kernel"
|
||||
KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
|
||||
|
||||
PROVIDES += "virtual/kernel"
|
||||
DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native bison-native"
|
||||
DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lzo", "lzop-native", "", d)}"
|
||||
DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}"
|
||||
DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.zst", "zstd-native", "", d)}"
|
||||
PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
|
||||
|
||||
do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot gzip-native:do_populate_sysroot"
|
||||
do_clean[depends] += "make-mod-scripts:do_clean"
|
||||
|
||||
CVE_PRODUCT ?= "linux_kernel"
|
||||
|
||||
S = "${STAGING_KERNEL_DIR}"
|
||||
B = "${WORKDIR}/build"
|
||||
KBUILD_OUTPUT = "${B}"
|
||||
OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
|
||||
|
||||
# we include gcc above, we dont need virtual/libc
|
||||
INHIBIT_DEFAULT_DEPS = "1"
|
||||
|
||||
KERNEL_IMAGETYPE ?= "zImage"
|
||||
INITRAMFS_IMAGE ?= ""
|
||||
INITRAMFS_TASK ?= ""
|
||||
INITRAMFS_IMAGE_BUNDLE ?= ""
|
||||
INITRAMFS_DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR_IMAGE}"
|
||||
INITRAMFS_MULTICONFIG ?= ""
|
||||
|
||||
# KERNEL_VERSION is extracted from source code. It is evaluated as
|
||||
# None for the first parsing, since the code has not been fetched.
|
||||
# After the code is fetched, it will be evaluated as real version
|
||||
# number and cause kernel to be rebuilt. To avoid this, make
|
||||
# KERNEL_VERSION_NAME and KERNEL_VERSION_PKG_NAME depend on
|
||||
# LINUX_VERSION which is a constant.
|
||||
KERNEL_VERSION_NAME = "${@d.getVar('KERNEL_VERSION') or ""}"
|
||||
KERNEL_VERSION_NAME[vardepvalue] = "${LINUX_VERSION}"
|
||||
KERNEL_VERSION_PKG_NAME = "${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
|
||||
KERNEL_VERSION_PKG_NAME[vardepvalue] = "${LINUX_VERSION}"
|
||||
|
||||
python __anonymous () {
|
||||
pn = d.getVar("PN")
|
||||
kpn = d.getVar("KERNEL_PACKAGE_NAME")
|
||||
|
||||
# XXX Remove this after bug 11905 is resolved
|
||||
# FILES:${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
|
||||
if kpn == pn:
|
||||
bb.warn("Some packages (E.g. *-dev) might be missing due to "
|
||||
"bug 11905 (variable KERNEL_PACKAGE_NAME == PN)")
|
||||
|
||||
# The default kernel recipe builds in a shared location defined by
|
||||
# bitbake/distro confs: STAGING_KERNEL_DIR and STAGING_KERNEL_BUILDDIR.
|
||||
# Set these variables to directories under ${WORKDIR} in alternate
|
||||
# kernel recipes (I.e. where KERNEL_PACKAGE_NAME != kernel) so that they
|
||||
# may build in parallel with the default kernel without clobbering.
|
||||
if kpn != "kernel":
|
||||
workdir = d.getVar("WORKDIR")
|
||||
sourceDir = os.path.join(workdir, 'kernel-source')
|
||||
artifactsDir = os.path.join(workdir, 'kernel-build-artifacts')
|
||||
d.setVar("STAGING_KERNEL_DIR", sourceDir)
|
||||
d.setVar("STAGING_KERNEL_BUILDDIR", artifactsDir)
|
||||
|
||||
# Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
|
||||
type = d.getVar('KERNEL_IMAGETYPE') or ""
|
||||
alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
|
||||
types = d.getVar('KERNEL_IMAGETYPES') or ""
|
||||
if type not in types.split():
|
||||
types = (type + ' ' + types).strip()
|
||||
if alttype not in types.split():
|
||||
types = (alttype + ' ' + types).strip()
|
||||
d.setVar('KERNEL_IMAGETYPES', types)
|
||||
|
||||
# KERNEL_IMAGETYPES may contain a mixture of image types supported directly
|
||||
# by the kernel build system and types which are created by post-processing
|
||||
# the output of the kernel build system (e.g. compressing vmlinux ->
|
||||
# vmlinux.gz in kernel_do_transform_kernel()).
|
||||
# KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
|
||||
# directly by the kernel build system.
|
||||
if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
|
||||
typeformake = set()
|
||||
for type in types.split():
|
||||
if type == 'vmlinux.gz':
|
||||
type = 'vmlinux'
|
||||
typeformake.add(type)
|
||||
|
||||
d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', ' '.join(sorted(typeformake)))
|
||||
|
||||
kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
|
||||
imagedest = d.getVar('KERNEL_IMAGEDEST')
|
||||
|
||||
for type in types.split():
|
||||
if bb.data.inherits_class('nopackages', d):
|
||||
continue
|
||||
typelower = type.lower()
|
||||
d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
|
||||
d.setVar('FILES:' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
|
||||
d.appendVar('RDEPENDS:%s-image' % kname, ' %s-image-%s (= ${EXTENDPKGV})' % (kname, typelower))
|
||||
splitmods = d.getVar("KERNEL_SPLIT_MODULES")
|
||||
if splitmods != '1':
|
||||
d.appendVar('RDEPENDS:%s-image' % kname, ' %s-modules (= ${EXTENDPKGV})' % kname)
|
||||
d.appendVar('RDEPENDS:%s-image-%s' % (kname, typelower), ' %s-modules-${KERNEL_VERSION_PKG_NAME} (= ${EXTENDPKGV})' % kname)
|
||||
d.setVar('PKG:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
|
||||
d.appendVar('RPROVIDES:%s-modules' % kname, ' %s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
|
||||
|
||||
d.setVar('PKG:%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
|
||||
d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
|
||||
|
||||
if d.getVar('KERNEL_IMAGETYPE_SYMLINK') == '1':
|
||||
d.prependVar('pkg_postinst:%s-image-%s' % (kname,typelower), """set +e
|
||||
if [ -n "$D" ]; then
|
||||
ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
|
||||
else
|
||||
ln -sf %s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Filesystem on ${KERNEL_IMAGEDEST}/ doesn't support symlinks, falling back to copied image (%s)."
|
||||
install -m 0644 ${KERNEL_IMAGEDEST}/%s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s
|
||||
fi
|
||||
fi
|
||||
set -e
|
||||
""" % (type, type, type, type, type, type, type))
|
||||
d.setVar('pkg_postrm:%s-image-%s' % (kname,typelower), """set +e
|
||||
if [ -f "${KERNEL_IMAGEDEST}/%s" -o -L "${KERNEL_IMAGEDEST}/%s" ]; then
|
||||
rm -f ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
|
||||
fi
|
||||
set -e
|
||||
""" % (type, type, type))
|
||||
|
||||
|
||||
image = d.getVar('INITRAMFS_IMAGE')
|
||||
# If the INTIRAMFS_IMAGE is set but the INITRAMFS_IMAGE_BUNDLE is set to 0,
|
||||
# the do_bundle_initramfs does nothing, but the INITRAMFS_IMAGE is built
|
||||
# standalone for use by wic and other tools.
|
||||
if image:
|
||||
if d.getVar('INITRAMFS_MULTICONFIG'):
|
||||
d.appendVarFlag('do_bundle_initramfs', 'mcdepends', ' mc::${INITRAMFS_MULTICONFIG}:${INITRAMFS_IMAGE}:do_image_complete')
|
||||
else:
|
||||
d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
|
||||
if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')):
|
||||
bb.build.addtask('do_transform_bundled_initramfs', 'do_deploy', 'do_bundle_initramfs', d)
|
||||
|
||||
# NOTE: setting INITRAMFS_TASK is for backward compatibility
|
||||
# The preferred method is to set INITRAMFS_IMAGE, because
|
||||
# this INITRAMFS_TASK has circular dependency problems
|
||||
# if the initramfs requires kernel modules
|
||||
image_task = d.getVar('INITRAMFS_TASK')
|
||||
if image_task:
|
||||
d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
|
||||
}
|
||||
|
||||
# Here we pull in all various kernel image types which we support.
|
||||
#
|
||||
# In case you're wondering why kernel.bbclass inherits the other image
|
||||
# types instead of the other way around, the reason for that is to
|
||||
# maintain compatibility with various currently existing meta-layers.
|
||||
# By pulling in the various kernel image types here, we retain the
|
||||
# original behavior of kernel.bbclass, so no meta-layers should get
|
||||
# broken.
|
||||
#
|
||||
# KERNEL_CLASSES by default pulls in kernel-uimage.bbclass, since this
|
||||
# used to be the default behavior when only uImage was supported. This
|
||||
# variable can be appended by users who implement support for new kernel
|
||||
# image types.
|
||||
|
||||
KERNEL_CLASSES ?= " kernel-uimage "
|
||||
inherit_defer ${KERNEL_CLASSES}
|
||||
|
||||
# Old style kernels may set ${S} = ${WORKDIR}/git for example
|
||||
# We need to move these over to STAGING_KERNEL_DIR. We can't just
|
||||
# create the symlink in advance as the git fetcher can't cope with
|
||||
# the symlink.
|
||||
do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
|
||||
do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
|
||||
python do_symlink_kernsrc () {
|
||||
s = d.getVar("S")
|
||||
kernsrc = d.getVar("STAGING_KERNEL_DIR")
|
||||
if s != kernsrc:
|
||||
bb.utils.mkdirhier(kernsrc)
|
||||
bb.utils.remove(kernsrc, recurse=True)
|
||||
if s[-1] == '/':
|
||||
# drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as
|
||||
# directory name and fail
|
||||
s = s[:-1]
|
||||
if d.getVar("EXTERNALSRC"):
|
||||
# With EXTERNALSRC S will not be wiped so we can symlink to it
|
||||
os.symlink(s, kernsrc)
|
||||
else:
|
||||
import shutil
|
||||
shutil.move(s, kernsrc)
|
||||
os.symlink(kernsrc, s)
|
||||
}
|
||||
# do_patch is normally ordered before do_configure, but
|
||||
# externalsrc.bbclass deletes do_patch, breaking the dependency of
|
||||
# do_configure on do_symlink_kernsrc.
|
||||
addtask symlink_kernsrc before do_patch do_configure after do_unpack
|
||||
|
||||
inherit kernel-arch deploy
|
||||
|
||||
PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-module-.*"
|
||||
PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-image-.*"
|
||||
PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-firmware-.*"
|
||||
|
||||
export OS = "${TARGET_OS}"
|
||||
export CROSS_COMPILE = "${TARGET_PREFIX}"
|
||||
|
||||
KERNEL_RELEASE ?= "${KERNEL_VERSION}"
|
||||
|
||||
# The directory where built kernel lies in the kernel tree
|
||||
KERNEL_OUTPUT_DIR ?= "arch/${ARCH}/boot"
|
||||
KERNEL_IMAGEDEST ?= "boot"
|
||||
KERNEL_DTBDEST ?= "${KERNEL_IMAGEDEST}"
|
||||
KERNEL_DTBVENDORED ?= "0"
|
||||
|
||||
#
|
||||
# configuration
|
||||
#
|
||||
export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE") or "ttyS0"}"
|
||||
|
||||
KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}"
|
||||
|
||||
# kernels are generally machine specific
|
||||
PACKAGE_ARCH = "${MACHINE_ARCH}"
|
||||
|
||||
# U-Boot support
|
||||
UBOOT_ENTRYPOINT ?= "20008000"
|
||||
UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
|
||||
|
||||
# Some Linux kernel configurations need additional parameters on the command line
|
||||
KERNEL_EXTRA_ARGS ?= ""
|
||||
|
||||
EXTRA_OEMAKE += ' CC="${KERNEL_CC}" LD="${KERNEL_LD}" OBJCOPY="${KERNEL_OBJCOPY}" STRIP="${KERNEL_STRIP}"'
|
||||
EXTRA_OEMAKE += ' HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}"'
|
||||
EXTRA_OEMAKE += ' HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}"'
|
||||
# Only for newer kernels (5.19+), native pkg-config variables are set for older kernels when building kernel and modules
|
||||
EXTRA_OEMAKE += ' HOSTPKG_CONFIG="pkg-config-native"'
|
||||
|
||||
KERNEL_ALT_IMAGETYPE ??= ""
|
||||
|
||||
copy_initramfs() {
|
||||
echo "Copying initramfs into ./usr ..."
|
||||
# In case the directory is not created yet from the first pass compile:
|
||||
mkdir -p ${B}/usr
|
||||
# Find and use the first initramfs image archive type we find
|
||||
rm -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
|
||||
for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst; do
|
||||
if [ -e "${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
|
||||
cp ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
|
||||
case $img in
|
||||
*gz)
|
||||
echo "gzip decompressing image"
|
||||
gunzip -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
|
||||
break
|
||||
;;
|
||||
*lz4)
|
||||
echo "lz4 decompressing image"
|
||||
lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
|
||||
break
|
||||
;;
|
||||
*lzo)
|
||||
echo "lzo decompressing image"
|
||||
lzop -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
|
||||
break
|
||||
;;
|
||||
*lzma)
|
||||
echo "lzma decompressing image"
|
||||
lzma -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
|
||||
break
|
||||
;;
|
||||
*xz)
|
||||
echo "xz decompressing image"
|
||||
xz -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
|
||||
break
|
||||
;;
|
||||
*zst)
|
||||
echo "zst decompressing image"
|
||||
zstd -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
|
||||
break
|
||||
;;
|
||||
esac
|
||||
break
|
||||
fi
|
||||
done
|
||||
# Verify that the above loop found a initramfs, fail otherwise
|
||||
[ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz|.zst) for bundling; INITRAMFS_IMAGE_NAME might be wrong."
|
||||
}
|
||||
|
||||
do_bundle_initramfs () {
|
||||
if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
|
||||
echo "Creating a kernel image with a bundled initramfs..."
|
||||
copy_initramfs
|
||||
# Backing up kernel image relies on its type(regular file or symbolic link)
|
||||
tmp_path=""
|
||||
for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
|
||||
if [ -h ${KERNEL_OUTPUT_DIR}/$imageType ] ; then
|
||||
linkpath=`readlink -n ${KERNEL_OUTPUT_DIR}/$imageType`
|
||||
realpath=`readlink -fn ${KERNEL_OUTPUT_DIR}/$imageType`
|
||||
mv -f $realpath $realpath.bak
|
||||
tmp_path=$tmp_path" "$imageType"#"$linkpath"#"$realpath
|
||||
elif [ -f ${KERNEL_OUTPUT_DIR}/$imageType ]; then
|
||||
mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.bak
|
||||
tmp_path=$tmp_path" "$imageType"##"
|
||||
fi
|
||||
done
|
||||
use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
|
||||
kernel_do_compile
|
||||
# Restoring kernel image
|
||||
for tp in $tmp_path ; do
|
||||
imageType=`echo $tp|cut -d "#" -f 1`
|
||||
linkpath=`echo $tp|cut -d "#" -f 2`
|
||||
realpath=`echo $tp|cut -d "#" -f 3`
|
||||
if [ -n "$realpath" ]; then
|
||||
mv -f $realpath $realpath.initramfs
|
||||
mv -f $realpath.bak $realpath
|
||||
ln -sf $linkpath.initramfs ${B}/${KERNEL_OUTPUT_DIR}/$imageType.initramfs
|
||||
else
|
||||
mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.initramfs
|
||||
mv -f ${KERNEL_OUTPUT_DIR}/$imageType.bak ${KERNEL_OUTPUT_DIR}/$imageType
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
do_bundle_initramfs[dirs] = "${B}"
|
||||
|
||||
kernel_do_transform_bundled_initramfs() {
|
||||
# vmlinux.gz is not built by kernel
|
||||
if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
|
||||
gzip -9cn < ${KERNEL_OUTPUT_DIR}/vmlinux.initramfs > ${KERNEL_OUTPUT_DIR}/vmlinux.gz.initramfs
|
||||
fi
|
||||
}
|
||||
do_transform_bundled_initramfs[dirs] = "${B}"
|
||||
|
||||
python do_package:prepend () {
|
||||
d.setVar('STRIP', d.getVar('KERNEL_STRIP').strip())
|
||||
}
|
||||
|
||||
python do_devshell:prepend () {
|
||||
os.environ["LDFLAGS"] = ''
|
||||
}
|
||||
|
||||
addtask bundle_initramfs after do_install before do_deploy
|
||||
|
||||
KERNEL_DEBUG_TIMESTAMPS ??= "0"
|
||||
|
||||
kernel_do_compile() {
|
||||
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
|
||||
|
||||
# setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
|
||||
export PKG_CONFIG_DIR="${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig"
|
||||
export PKG_CONFIG_PATH="$PKG_CONFIG_DIR:${STAGING_DATADIR_NATIVE}/pkgconfig"
|
||||
export PKG_CONFIG_LIBDIR="$PKG_CONFIG_DIR"
|
||||
export PKG_CONFIG_SYSROOT_DIR=""
|
||||
|
||||
if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
|
||||
# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
|
||||
# be set....
|
||||
if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
|
||||
# The source directory is not necessarily a git repository, so we
|
||||
# specify the git-dir to ensure that git does not query a
|
||||
# repository in any parent directory.
|
||||
SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
|
||||
fi
|
||||
|
||||
ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
|
||||
export KBUILD_BUILD_TIMESTAMP="$ts"
|
||||
export KCONFIG_NOTIMESTAMP=1
|
||||
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
|
||||
else
|
||||
ts=`LC_ALL=C date`
|
||||
export KBUILD_BUILD_TIMESTAMP="$ts"
|
||||
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
|
||||
fi
|
||||
# The $use_alternate_initrd is only set from
|
||||
# do_bundle_initramfs() This variable is specifically for the
|
||||
# case where we are making a second pass at the kernel
|
||||
# compilation and we want to force the kernel build to use a
|
||||
# different initramfs image. The way to do that in the kernel
|
||||
# is to specify:
|
||||
# make ...args... CONFIG_INITRAMFS_SOURCE=some_other_initramfs.cpio
|
||||
if [ "$use_alternate_initrd" = "" ] && [ "${INITRAMFS_TASK}" != "" ] ; then
|
||||
# The old style way of copying an prebuilt image and building it
|
||||
# is turned on via INTIRAMFS_TASK != ""
|
||||
copy_initramfs
|
||||
use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
|
||||
fi
|
||||
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
|
||||
oe_runmake ${PARALLEL_MAKE} ${typeformake} ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
|
||||
done
|
||||
}
|
||||
|
||||
kernel_do_transform_kernel() {
|
||||
# vmlinux.gz is not built by kernel
|
||||
if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
|
||||
mkdir -p "${KERNEL_OUTPUT_DIR}"
|
||||
gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
|
||||
fi
|
||||
}
|
||||
do_transform_kernel[dirs] = "${B}"
|
||||
addtask transform_kernel after do_compile before do_install
|
||||
|
||||
do_compile_kernelmodules() {
|
||||
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
|
||||
|
||||
# setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
|
||||
export PKG_CONFIG_DIR="${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig"
|
||||
export PKG_CONFIG_PATH="$PKG_CONFIG_DIR:${STAGING_DATADIR_NATIVE}/pkgconfig"
|
||||
export PKG_CONFIG_LIBDIR="$PKG_CONFIG_DIR"
|
||||
export PKG_CONFIG_SYSROOT_DIR=""
|
||||
|
||||
if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
|
||||
# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
|
||||
# be set....
|
||||
if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
|
||||
# The source directory is not necessarily a git repository, so we
|
||||
# specify the git-dir to ensure that git does not query a
|
||||
# repository in any parent directory.
|
||||
SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
|
||||
fi
|
||||
|
||||
ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
|
||||
export KBUILD_BUILD_TIMESTAMP="$ts"
|
||||
export KCONFIG_NOTIMESTAMP=1
|
||||
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
|
||||
else
|
||||
ts=`LC_ALL=C date`
|
||||
export KBUILD_BUILD_TIMESTAMP="$ts"
|
||||
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
|
||||
fi
|
||||
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
|
||||
oe_runmake -C ${B} ${PARALLEL_MAKE} modules ${KERNEL_EXTRA_ARGS}
|
||||
|
||||
# Module.symvers gets updated during the
|
||||
# building of the kernel modules. We need to
|
||||
# update this in the shared workdir since some
|
||||
# external kernel modules has a dependency on
|
||||
# other kernel modules and will look at this
|
||||
# file to do symbol lookups
|
||||
cp ${B}/Module.symvers ${STAGING_KERNEL_BUILDDIR}/
|
||||
# 5.10+ kernels have module.lds that we need to copy for external module builds
|
||||
if [ -e "${B}/scripts/module.lds" ]; then
|
||||
install -Dm 0644 ${B}/scripts/module.lds ${STAGING_KERNEL_BUILDDIR}/scripts/module.lds
|
||||
fi
|
||||
else
|
||||
bbnote "no modules to compile"
|
||||
fi
|
||||
}
|
||||
addtask compile_kernelmodules after do_compile before do_strip
|
||||
|
||||
kernel_do_install() {
|
||||
#
|
||||
# First install the modules
|
||||
#
|
||||
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
|
||||
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
|
||||
oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
|
||||
rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
|
||||
rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
|
||||
# Remove empty module directories to prevent QA issues
|
||||
[ -d "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel" ] && find "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel" -type d -empty -delete
|
||||
else
|
||||
bbnote "no modules to install"
|
||||
fi
|
||||
|
||||
#
|
||||
# Install various kernel output (zImage, map file, config, module support files)
|
||||
#
|
||||
install -d ${D}/${KERNEL_IMAGEDEST}
|
||||
|
||||
#
|
||||
# When including an initramfs bundle inside a FIT image, the fitImage is created after the install task
|
||||
# by do_assemble_fitimage_initramfs.
|
||||
# This happens after the generation of the initramfs bundle (done by do_bundle_initramfs).
|
||||
# So, at the level of the install task we should not try to install the fitImage. fitImage is still not
|
||||
# generated yet.
|
||||
# After the generation of the fitImage, the deploy task copies the fitImage from the build directory to
|
||||
# the deploy folder.
|
||||
#
|
||||
|
||||
for imageType in ${KERNEL_IMAGETYPES} ; do
|
||||
if [ $imageType != "fitImage" ] || [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then
|
||||
install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION}
|
||||
fi
|
||||
done
|
||||
|
||||
install -m 0644 System.map ${D}/${KERNEL_IMAGEDEST}/System.map-${KERNEL_VERSION}
|
||||
install -m 0644 .config ${D}/${KERNEL_IMAGEDEST}/config-${KERNEL_VERSION}
|
||||
install -m 0644 vmlinux ${D}/${KERNEL_IMAGEDEST}/vmlinux-${KERNEL_VERSION}
|
||||
! [ -e Module.symvers ] || install -m 0644 Module.symvers ${D}/${KERNEL_IMAGEDEST}/Module.symvers-${KERNEL_VERSION}
|
||||
}
|
||||
|
||||
# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
|
||||
do_kernel_version_sanity_check() {
|
||||
if [ "x${KERNEL_VERSION_SANITY_SKIP}" = "x1" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# The Makefile determines the kernel version shown at runtime
|
||||
# Don't use KERNEL_VERSION because the headers it grabs the version from aren't generated until do_compile
|
||||
VERSION=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
|
||||
PATCHLEVEL=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
|
||||
SUBLEVEL=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
|
||||
EXTRAVERSION=$(grep "^EXTRAVERSION =" ${S}/Makefile | sed s/.*=\ *//)
|
||||
|
||||
# Build a string for regex and a plain version string
|
||||
reg="^${VERSION}\.${PATCHLEVEL}"
|
||||
vers="${VERSION}.${PATCHLEVEL}"
|
||||
if [ -n "${SUBLEVEL}" ]; then
|
||||
# Ignoring a SUBLEVEL of zero is fine
|
||||
if [ "${SUBLEVEL}" = "0" ]; then
|
||||
reg="${reg}(\.${SUBLEVEL})?"
|
||||
else
|
||||
reg="${reg}\.${SUBLEVEL}"
|
||||
vers="${vers}.${SUBLEVEL}"
|
||||
fi
|
||||
fi
|
||||
vers="${vers}${EXTRAVERSION}"
|
||||
reg="${reg}${EXTRAVERSION}"
|
||||
|
||||
if [ -z `echo ${PV} | grep -E "${reg}"` ]; then
|
||||
bbfatal "Package Version (${PV}) does not match of kernel being built (${vers}). Please update the PV variable to match the kernel source or set KERNEL_VERSION_SANITY_SKIP=\"1\" in your recipe."
|
||||
fi
|
||||
exit 0
|
||||
}
|
||||
|
||||
addtask shared_workdir after do_compile before do_compile_kernelmodules
|
||||
addtask shared_workdir_setscene
|
||||
|
||||
do_shared_workdir_setscene () {
|
||||
exit 1
|
||||
}
|
||||
|
||||
emit_depmod_pkgdata() {
|
||||
# Stash data for depmod
|
||||
install -d ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/
|
||||
echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/${KERNEL_PACKAGE_NAME}-abiversion
|
||||
cp ${B}/System.map ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/System.map-${KERNEL_VERSION}
|
||||
}
|
||||
|
||||
PACKAGEFUNCS += "emit_depmod_pkgdata"
|
||||
|
||||
do_shared_workdir[cleandirs] += " ${STAGING_KERNEL_BUILDDIR}"
|
||||
do_shared_workdir () {
|
||||
cd ${B}
|
||||
|
||||
kerneldir=${STAGING_KERNEL_BUILDDIR}
|
||||
install -d $kerneldir
|
||||
|
||||
#
|
||||
# Store the kernel version in sysroots for module-base.bbclass
|
||||
#
|
||||
|
||||
echo "${KERNEL_VERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-abiversion
|
||||
echo "${KERNEL_LOCALVERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-localversion
|
||||
|
||||
# Copy files required for module builds
|
||||
cp System.map $kerneldir/System.map-${KERNEL_VERSION}
|
||||
! [ -e Module.symvers ] || cp Module.symvers $kerneldir/
|
||||
cp .config $kerneldir/
|
||||
mkdir -p $kerneldir/include/config
|
||||
cp include/config/kernel.release $kerneldir/include/config/kernel.release
|
||||
if [ -e certs/signing_key.x509 ]; then
|
||||
# The signing_key.* files are stored in the certs/ dir in
|
||||
# newer Linux kernels
|
||||
mkdir -p $kerneldir/certs
|
||||
cp certs/signing_key.* $kerneldir/certs/
|
||||
elif [ -e signing_key.priv ]; then
|
||||
cp signing_key.* $kerneldir/
|
||||
fi
|
||||
|
||||
# We can also copy over all the generated files and avoid special cases
|
||||
# like version.h, but we've opted to keep this small until file creep starts
|
||||
# to happen
|
||||
if [ -e include/linux/version.h ]; then
|
||||
mkdir -p $kerneldir/include/linux
|
||||
cp include/linux/version.h $kerneldir/include/linux/version.h
|
||||
fi
|
||||
|
||||
# As of Linux kernel version 3.0.1, the clean target removes
|
||||
# arch/powerpc/lib/crtsavres.o which is present in
|
||||
# KBUILD_LDFLAGS_MODULE, making it required to build external modules.
|
||||
if [ ${ARCH} = "powerpc" ]; then
|
||||
if [ -e arch/powerpc/lib/crtsavres.o ]; then
|
||||
mkdir -p $kerneldir/arch/powerpc/lib/
|
||||
cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d include/generated ]; then
|
||||
mkdir -p $kerneldir/include/generated/
|
||||
cp -fR include/generated/* $kerneldir/include/generated/
|
||||
fi
|
||||
|
||||
if [ -d arch/${ARCH}/include/generated ]; then
|
||||
mkdir -p $kerneldir/arch/${ARCH}/include/generated/
|
||||
cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
|
||||
fi
|
||||
|
||||
if (grep -q -i -e '^CONFIG_UNWINDER_ORC=y$' $kerneldir/.config); then
|
||||
# With CONFIG_UNWINDER_ORC (the default in 4.14), objtool is required for
|
||||
# out-of-tree modules to be able to generate object files.
|
||||
if [ -x tools/objtool/objtool ]; then
|
||||
mkdir -p ${kerneldir}/tools/objtool
|
||||
cp tools/objtool/objtool ${kerneldir}/tools/objtool/
|
||||
fi
|
||||
fi
|
||||
|
||||
# When building with CONFIG_MODVERSIONS=y and CONFIG_RANDSTRUCT=y we need
|
||||
# to copy the build assets generated for the randstruct seed to
|
||||
# STAGING_KERNEL_BUILDDIR, otherwise the out-of-tree modules build will
|
||||
# generate those assets which will result in a different
|
||||
# RANDSTRUCT_HASHED_SEED
|
||||
if [ -d scripts/basic ]; then
|
||||
mkdir -p ${kerneldir}/scripts
|
||||
cp -r scripts/basic ${kerneldir}/scripts
|
||||
fi
|
||||
|
||||
if [ -d scripts/gcc-plugins ]; then
|
||||
mkdir -p ${kerneldir}/scripts
|
||||
cp -r scripts/gcc-plugins ${kerneldir}/scripts
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
|
||||
SYSROOT_DIRS = ""
|
||||
|
||||
KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} O=${B} olddefconfig || oe_runmake -C ${S} O=${B} oldnoconfig"
|
||||
|
||||
python check_oldest_kernel() {
|
||||
oldest_kernel = d.getVar('OLDEST_KERNEL')
|
||||
kernel_version = d.getVar('KERNEL_VERSION')
|
||||
tclibc = d.getVar('TCLIBC')
|
||||
if tclibc == 'glibc':
|
||||
kernel_version = kernel_version.split('-', 1)[0]
|
||||
if oldest_kernel and kernel_version:
|
||||
if bb.utils.vercmp_string(kernel_version, oldest_kernel) < 0:
|
||||
bb.warn('%s: OLDEST_KERNEL is "%s" but the version of the kernel you are building is "%s" - therefore %s as built may not be compatible with this kernel. Either set OLDEST_KERNEL to an older version, or build a newer kernel.' % (d.getVar('PN'), oldest_kernel, kernel_version, tclibc))
|
||||
}
|
||||
|
||||
check_oldest_kernel[vardepsexclude] += "OLDEST_KERNEL KERNEL_VERSION"
|
||||
do_compile[postfuncs] += "check_oldest_kernel"
|
||||
|
||||
KERNEL_LOCALVERSION ??= ""
|
||||
|
||||
# 6.3+ requires the variable LOCALVERSION to be set to not get a "+" in
|
||||
# the local version. Having it empty means nothing will be added, and any
|
||||
# value will be appended to the local kernel version. This replaces the
|
||||
# use of .scmversion file for setting a localversion without using
|
||||
# the CONFIG_LOCALVERSION option.
|
||||
#
|
||||
# Note: This class saves the value of localversion to a file
|
||||
# so other recipes like make-mod-scripts can restore it via the
|
||||
# helper function get_kernellocalversion_file
|
||||
export LOCALVERSION="${KERNEL_LOCALVERSION}"
|
||||
|
||||
kernel_do_configure() {
|
||||
# fixes extra + in /lib/modules/2.6.37+
|
||||
# $ scripts/setlocalversion . => +
|
||||
# $ make kernelversion => 2.6.37
|
||||
# $ make kernelrelease => 2.6.37+
|
||||
# See kernel-arch.bbclass for post v6.3 removal of the extra
|
||||
# + in localversion. .scmversion is no longer used, and the
|
||||
# variable LOCALVERSION must be used
|
||||
if [ ! -e ${B}/.scmversion -a ! -e ${S}/.scmversion ]; then
|
||||
echo ${KERNEL_LOCALVERSION} > ${B}/.scmversion
|
||||
echo ${KERNEL_LOCALVERSION} > ${S}/.scmversion
|
||||
fi
|
||||
|
||||
if [ "${S}" != "${B}" ] && [ -f "${S}/.config" ] && [ ! -f "${B}/.config" ]; then
|
||||
mv "${S}/.config" "${B}/.config"
|
||||
fi
|
||||
|
||||
# Copy defconfig to .config if .config does not exist. This allows
|
||||
# recipes to manage the .config themselves in do_configure:prepend().
|
||||
if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
|
||||
cp "${WORKDIR}/defconfig" "${B}/.config"
|
||||
fi
|
||||
|
||||
${KERNEL_CONFIG_COMMAND}
|
||||
}
|
||||
|
||||
do_savedefconfig() {
|
||||
bbplain "Saving defconfig to:\n${B}/defconfig"
|
||||
oe_runmake -C ${B} savedefconfig
|
||||
}
|
||||
do_savedefconfig[nostamp] = "1"
|
||||
addtask savedefconfig after do_configure
|
||||
|
||||
inherit cml1 pkgconfig
|
||||
|
||||
# Need LD, HOSTLDFLAGS and more for config operations
|
||||
KCONFIG_CONFIG_COMMAND:append = " ${EXTRA_OEMAKE}"
|
||||
|
||||
EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
|
||||
|
||||
# kernel-base becomes kernel-${KERNEL_VERSION}
|
||||
# kernel-image becomes kernel-image-${KERNEL_VERSION}
|
||||
PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules ${KERNEL_PACKAGE_NAME}-dbg"
|
||||
FILES:${PN} = ""
|
||||
FILES:${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo"
|
||||
FILES:${KERNEL_PACKAGE_NAME}-image = ""
|
||||
FILES:${KERNEL_PACKAGE_NAME}-dev = "/${KERNEL_IMAGEDEST}/System.map* /${KERNEL_IMAGEDEST}/Module.symvers* /${KERNEL_IMAGEDEST}/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
|
||||
FILES:${KERNEL_PACKAGE_NAME}-vmlinux = "/${KERNEL_IMAGEDEST}/vmlinux-${KERNEL_VERSION_NAME}"
|
||||
FILES:${KERNEL_PACKAGE_NAME}-modules = ""
|
||||
FILES:${KERNEL_PACKAGE_NAME}-dbg = "/usr/lib/debug /usr/src/debug"
|
||||
RDEPENDS:${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base (= ${EXTENDPKGV})"
|
||||
# Allow machines to override this dependency if kernel image files are
|
||||
# not wanted in images as standard
|
||||
RRECOMMENDS:${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image (= ${EXTENDPKGV})"
|
||||
PKG:${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
|
||||
RDEPENDS:${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux (= ${EXTENDPKGV})', '', d)}"
|
||||
PKG:${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
|
||||
RPROVIDES:${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
|
||||
ALLOW_EMPTY:${KERNEL_PACKAGE_NAME} = "1"
|
||||
ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-base = "1"
|
||||
ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-image = "1"
|
||||
ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-modules = "1"
|
||||
DESCRIPTION:${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
|
||||
|
||||
pkg_postinst:${KERNEL_PACKAGE_NAME}-base () {
|
||||
if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
|
||||
mkdir -p $D/lib/modules/${KERNEL_VERSION}
|
||||
fi
|
||||
if [ -n "$D" ]; then
|
||||
depmodwrapper -a -b $D ${KERNEL_VERSION} ${KERNEL_PACKAGE_NAME}
|
||||
else
|
||||
depmod -a ${KERNEL_VERSION}
|
||||
fi
|
||||
}
|
||||
|
||||
PACKAGESPLITFUNCS =+ "split_kernel_packages"
|
||||
|
||||
python split_kernel_packages () {
|
||||
do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex=r'^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
|
||||
}
|
||||
|
||||
# Many scripts want to look in arch/$arch/boot for the bootable
|
||||
# image. This poses a problem for vmlinux and vmlinuz based
|
||||
# booting. This task arranges to have vmlinux and vmlinuz appear
|
||||
# in the normalized directory location.
|
||||
do_kernel_link_images() {
|
||||
if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
|
||||
mkdir ${B}/arch/${ARCH}/boot
|
||||
fi
|
||||
cd ${B}/arch/${ARCH}/boot
|
||||
ln -sf ../../../vmlinux
|
||||
if [ -f ../../../vmlinuz ]; then
|
||||
ln -sf ../../../vmlinuz
|
||||
fi
|
||||
if [ -f ../../../vmlinuz.bin ]; then
|
||||
ln -sf ../../../vmlinuz.bin
|
||||
fi
|
||||
if [ -f ../../../vmlinux.64 ]; then
|
||||
ln -sf ../../../vmlinux.64
|
||||
fi
|
||||
}
|
||||
addtask kernel_link_images after do_compile before do_strip
|
||||
|
||||
python do_strip() {
|
||||
import shutil
|
||||
|
||||
strip = d.getVar('KERNEL_STRIP')
|
||||
extra_sections = d.getVar('KERNEL_IMAGE_STRIP_EXTRA_SECTIONS')
|
||||
kernel_image = d.getVar('B') + "/" + d.getVar('KERNEL_OUTPUT_DIR') + "/vmlinux"
|
||||
|
||||
if (extra_sections and kernel_image.find(d.getVar('KERNEL_IMAGEDEST') + '/vmlinux') != -1):
|
||||
kernel_image_stripped = kernel_image + ".stripped"
|
||||
shutil.copy2(kernel_image, kernel_image_stripped)
|
||||
oe.package.runstrip((kernel_image_stripped, 8, strip, extra_sections))
|
||||
bb.debug(1, "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections: " + \
|
||||
extra_sections)
|
||||
}
|
||||
do_strip[dirs] = "${B}"
|
||||
|
||||
addtask strip before do_sizecheck after do_kernel_link_images
|
||||
|
||||
# Support checking the kernel size since some kernels need to reside in partitions
|
||||
# with a fixed length or there is a limit in transferring the kernel to memory.
|
||||
# If more than one image type is enabled, warn on any that don't fit but only fail
|
||||
# if none fit.
|
||||
do_sizecheck() {
|
||||
if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
|
||||
invalid=`echo ${KERNEL_IMAGE_MAXSIZE} | sed 's/[0-9]//g'`
|
||||
if [ -n "$invalid" ]; then
|
||||
die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integer (The unit is Kbytes)"
|
||||
fi
|
||||
at_least_one_fits=
|
||||
for imageType in ${KERNEL_IMAGETYPES} ; do
|
||||
size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'`
|
||||
if [ $size -gt ${KERNEL_IMAGE_MAXSIZE} ]; then
|
||||
bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
|
||||
else
|
||||
at_least_one_fits=y
|
||||
fi
|
||||
done
|
||||
if [ -z "$at_least_one_fits" ]; then
|
||||
die "All kernel images are too big for your device. Please reduce the size of the kernel by making more of it modular."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
do_sizecheck[dirs] = "${B}"
|
||||
|
||||
addtask sizecheck before do_install after do_strip
|
||||
|
||||
inherit kernel-artifact-names
|
||||
|
||||
kernel_do_deploy() {
|
||||
deployDir="${DEPLOYDIR}"
|
||||
if [ -n "${KERNEL_DEPLOYSUBDIR}" ]; then
|
||||
deployDir="${DEPLOYDIR}/${KERNEL_DEPLOYSUBDIR}"
|
||||
mkdir "$deployDir"
|
||||
fi
|
||||
|
||||
for imageType in ${KERNEL_IMAGETYPES} ; do
|
||||
baseName=$imageType-${KERNEL_IMAGE_NAME}
|
||||
|
||||
if [ -s ${KERNEL_OUTPUT_DIR}/$imageType.stripped ] ; then
|
||||
install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.stripped $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT}
|
||||
else
|
||||
install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT}
|
||||
fi
|
||||
if [ -n "${KERNEL_IMAGE_LINK_NAME}" ] ; then
|
||||
ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${KERNEL_IMAGE_LINK_NAME}${KERNEL_IMAGE_BIN_EXT}
|
||||
fi
|
||||
if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
|
||||
ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
|
||||
mkdir -p ${D}${root_prefix}/lib
|
||||
if [ -n "${SOURCE_DATE_EPOCH}" ]; then
|
||||
TAR_ARGS="--sort=name --clamp-mtime --mtime=@${SOURCE_DATE_EPOCH}"
|
||||
else
|
||||
TAR_ARGS=""
|
||||
fi
|
||||
TAR_ARGS="$TAR_ARGS --owner=0 --group=0"
|
||||
tar $TAR_ARGS -cv -C ${D}${root_prefix} lib | gzip -9n > $deployDir/modules-${MODULE_TARBALL_NAME}.tgz
|
||||
|
||||
if [ -n "${MODULE_TARBALL_LINK_NAME}" ] ; then
|
||||
ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
|
||||
for imageType in ${KERNEL_IMAGETYPES} ; do
|
||||
if [ "$imageType" = "fitImage" ] ; then
|
||||
continue
|
||||
fi
|
||||
initramfsBaseName=$imageType-${INITRAMFS_NAME}
|
||||
install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName${KERNEL_IMAGE_BIN_EXT}
|
||||
if [ -n "${INITRAMFS_LINK_NAME}" ] ; then
|
||||
ln -sf $initramfsBaseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${INITRAMFS_LINK_NAME}${KERNEL_IMAGE_BIN_EXT}
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
# We deploy to filenames that include PKGV and PKGR, read the saved data to
|
||||
# ensure we get the right values for both
|
||||
do_deploy[prefuncs] += "read_subpackage_metadata"
|
||||
|
||||
addtask deploy after do_populate_sysroot do_packagedata
|
||||
|
||||
EXPORT_FUNCTIONS do_deploy
|
||||
|
||||
# Add using Device Tree support
|
||||
inherit kernel-devicetree
|
||||
17
sources/poky/meta/classes-recipe/kernelsrc.bbclass
Normal file
17
sources/poky/meta/classes-recipe/kernelsrc.bbclass
Normal file
@@ -0,0 +1,17 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
S = "${STAGING_KERNEL_DIR}"
|
||||
deltask do_fetch
|
||||
deltask do_unpack
|
||||
do_patch[depends] += "virtual/kernel:do_shared_workdir"
|
||||
do_patch[noexec] = "1"
|
||||
do_package[depends] += "virtual/kernel:do_populate_sysroot"
|
||||
KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
|
||||
LOCAL_VERSION = "${@get_kernellocalversion_file("${STAGING_KERNEL_BUILDDIR}")}"
|
||||
|
||||
inherit linux-kernel-base
|
||||
|
||||
12
sources/poky/meta/classes-recipe/lib_package.bbclass
Normal file
12
sources/poky/meta/classes-recipe/lib_package.bbclass
Normal file
@@ -0,0 +1,12 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
#
|
||||
# ${PN}-bin is defined in bitbake.conf
|
||||
#
|
||||
# We need to allow the other packages to be greedy with what they
|
||||
# want out of /usr/bin and /usr/sbin before ${PN}-bin gets greedy.
|
||||
#
|
||||
PACKAGE_BEFORE_PN = "${PN}-bin"
|
||||
392
sources/poky/meta/classes-recipe/libc-package.bbclass
Normal file
392
sources/poky/meta/classes-recipe/libc-package.bbclass
Normal file
@@ -0,0 +1,392 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
#
|
||||
# This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains
|
||||
# may need packaging and its pointless to duplicate this code.
|
||||
#
|
||||
# Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of:
|
||||
# "compile" - Use QEMU to generate the binary locale files
|
||||
# "precompiled" - The binary locale files are pregenerated and already present
|
||||
# "ondevice" - The device will build the locale files upon first boot through the postinst
|
||||
|
||||
GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
|
||||
|
||||
GLIBC_SPLIT_LC_PACKAGES ?= "0"
|
||||
|
||||
python __anonymous () {
|
||||
enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION")
|
||||
|
||||
pn = d.getVar("PN")
|
||||
if pn.endswith("-initial"):
|
||||
enabled = False
|
||||
|
||||
if enabled and int(enabled):
|
||||
import re
|
||||
|
||||
target_arch = d.getVar("TARGET_ARCH")
|
||||
binary_arches = d.getVar("BINARY_LOCALE_ARCHES") or ""
|
||||
use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or ""
|
||||
|
||||
for regexp in binary_arches.split(" "):
|
||||
r = re.compile(regexp)
|
||||
|
||||
if r.match(target_arch):
|
||||
depends = d.getVar("DEPENDS")
|
||||
if use_cross_localedef == "1" :
|
||||
depends = "%s cross-localedef-native" % depends
|
||||
else:
|
||||
depends = "%s qemu-native" % depends
|
||||
d.setVar("DEPENDS", depends)
|
||||
d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
|
||||
break
|
||||
}
|
||||
|
||||
# try to fix disable charsets/locales/locale-code compile fail
|
||||
PACKAGE_NO_GCONV ?= "0"
|
||||
|
||||
OVERRIDES:append = ":${TARGET_ARCH}-${TARGET_OS}"
|
||||
|
||||
locale_base_postinst_ontarget() {
|
||||
mkdir ${libdir}/locale
|
||||
localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
|
||||
}
|
||||
|
||||
locale_base_postrm() {
|
||||
#!/bin/sh
|
||||
localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s %s
|
||||
}
|
||||
|
||||
LOCALETREESRC ?= "${PKGD}"
|
||||
|
||||
do_prep_locale_tree() {
|
||||
treedir=${WORKDIR}/locale-tree
|
||||
rm -rf $treedir
|
||||
mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${localedir}
|
||||
tar -cf - -C ${LOCALETREESRC}${datadir} -p i18n | tar -xf - -C $treedir/${datadir}
|
||||
# unzip to avoid parsing errors
|
||||
for i in $treedir/${datadir}/i18n/charmaps/*gz; do
|
||||
gunzip $i
|
||||
done
|
||||
# The extract pattern "./l*.so*" is carefully selected so that it will
|
||||
# match ld*.so and lib*.so*, but not any files in the gconv directory
|
||||
# (if it exists). This makes sure we only unpack the files we need.
|
||||
# This is important in case usrmerge is set in DISTRO_FEATURES, which
|
||||
# means ${base_libdir} == ${libdir}.
|
||||
tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir} --wildcards './l*.so*'
|
||||
if [ -f ${STAGING_LIBDIR_NATIVE}/libgcc_s.* ]; then
|
||||
tar -cf - -C ${STAGING_LIBDIR_NATIVE} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
|
||||
fi
|
||||
install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
|
||||
}
|
||||
|
||||
do_collect_bins_from_locale_tree() {
|
||||
treedir=${WORKDIR}/locale-tree
|
||||
|
||||
parent=$(dirname ${localedir})
|
||||
mkdir -p ${PKGD}/$parent
|
||||
tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent
|
||||
|
||||
# Finalize tree by chaning all duplicate files into hard links
|
||||
cross-localedef-hardlink -c -v ${WORKDIR}/locale-tree
|
||||
}
|
||||
|
||||
inherit qemu
|
||||
|
||||
python package_do_split_gconvs () {
|
||||
import re
|
||||
if (d.getVar('PACKAGE_NO_GCONV') == '1'):
|
||||
bb.note("package requested not splitting gconvs")
|
||||
return
|
||||
|
||||
if not d.getVar('PACKAGES'):
|
||||
return
|
||||
|
||||
mlprefix = d.getVar("MLPREFIX") or ""
|
||||
|
||||
bpn = d.getVar('BPN')
|
||||
libdir = d.getVar('libdir')
|
||||
if not libdir:
|
||||
bb.error("libdir not defined")
|
||||
return
|
||||
datadir = d.getVar('datadir')
|
||||
if not datadir:
|
||||
bb.error("datadir not defined")
|
||||
return
|
||||
|
||||
gconv_libdir = oe.path.join(libdir, "gconv")
|
||||
charmap_dir = oe.path.join(datadir, "i18n", "charmaps")
|
||||
locales_dir = oe.path.join(datadir, "i18n", "locales")
|
||||
binary_locales_dir = d.getVar('localedir')
|
||||
|
||||
def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
|
||||
deps = []
|
||||
f = open(fn, "rb")
|
||||
c_re = re.compile(r'^copy "(.*)"')
|
||||
i_re = re.compile(r'^include "(\w+)".*')
|
||||
for l in f.readlines():
|
||||
l = l.decode("latin-1")
|
||||
m = c_re.match(l) or i_re.match(l)
|
||||
if m:
|
||||
dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1)))
|
||||
if not dp in deps:
|
||||
deps.append(dp)
|
||||
f.close()
|
||||
if deps != []:
|
||||
d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
|
||||
if bpn != 'glibc':
|
||||
d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
|
||||
|
||||
do_split_packages(d, gconv_libdir, file_regex=r'^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
|
||||
description='gconv module for character set %s', hook=calc_gconv_deps, \
|
||||
extra_depends=bpn+'-gconv')
|
||||
|
||||
def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
|
||||
deps = []
|
||||
f = open(fn, "rb")
|
||||
c_re = re.compile(r'^copy "(.*)"')
|
||||
i_re = re.compile(r'^include "(\w+)".*')
|
||||
for l in f.readlines():
|
||||
l = l.decode("latin-1")
|
||||
m = c_re.match(l) or i_re.match(l)
|
||||
if m:
|
||||
dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1)))
|
||||
if not dp in deps:
|
||||
deps.append(dp)
|
||||
f.close()
|
||||
if deps != []:
|
||||
d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
|
||||
if bpn != 'glibc':
|
||||
d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
|
||||
|
||||
do_split_packages(d, charmap_dir, file_regex=r'^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
|
||||
description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
|
||||
|
||||
def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
|
||||
deps = []
|
||||
f = open(fn, "rb")
|
||||
c_re = re.compile(r'^copy "(.*)"')
|
||||
i_re = re.compile(r'^include "(\w+)".*')
|
||||
for l in f.readlines():
|
||||
l = l.decode("latin-1")
|
||||
m = c_re.match(l) or i_re.match(l)
|
||||
if m:
|
||||
dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1))
|
||||
if not dp in deps:
|
||||
deps.append(dp)
|
||||
f.close()
|
||||
if deps != []:
|
||||
d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
|
||||
if bpn != 'glibc':
|
||||
d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
|
||||
|
||||
do_split_packages(d, locales_dir, file_regex=r'(.*)', output_pattern=bpn+'-localedata-%s', \
|
||||
description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
|
||||
d.setVar('PACKAGES', d.getVar('PACKAGES', False) + ' ' + d.getVar('MLPREFIX', False) + bpn + '-gconv')
|
||||
|
||||
use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE")
|
||||
|
||||
dot_re = re.compile(r"(.*)\.(.*)")
|
||||
|
||||
# Read in supported locales and associated encodings
|
||||
supported = {}
|
||||
with open(oe.path.join(d.getVar('WORKDIR'), "SUPPORTED")) as f:
|
||||
for line in f.readlines():
|
||||
try:
|
||||
locale, charset = line.rstrip().split()
|
||||
except ValueError:
|
||||
continue
|
||||
supported[locale] = charset
|
||||
|
||||
# GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
|
||||
to_generate = d.getVar('GLIBC_GENERATE_LOCALES')
|
||||
if not to_generate or to_generate == 'all':
|
||||
to_generate = sorted(supported.keys())
|
||||
else:
|
||||
to_generate = to_generate.split()
|
||||
for locale in to_generate:
|
||||
if locale not in supported:
|
||||
if '.' in locale:
|
||||
charset = locale.split('.')[1]
|
||||
else:
|
||||
charset = 'UTF-8'
|
||||
bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset))
|
||||
supported[locale] = charset
|
||||
|
||||
def output_locale_source(name, pkgname, locale, encoding):
|
||||
d.setVar('RDEPENDS:%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
|
||||
(mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
|
||||
d.setVar('pkg_postinst_ontarget:%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \
|
||||
% (locale, encoding, locale))
|
||||
d.setVar('pkg_postrm:%s' % pkgname, d.getVar('locale_base_postrm') % \
|
||||
(locale, encoding, locale))
|
||||
|
||||
def output_locale_binary_rdepends(name, pkgname, locale, encoding):
|
||||
dep = legitimize_package_name('%s-binary-localedata-%s' % (bpn, name))
|
||||
lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
|
||||
if lcsplit and int(lcsplit):
|
||||
d.appendVar('PACKAGES', ' ' + dep)
|
||||
d.setVar('ALLOW_EMPTY:%s' % dep, '1')
|
||||
d.setVar('RDEPENDS:%s' % pkgname, mlprefix + dep)
|
||||
|
||||
commands = {}
|
||||
|
||||
def output_locale_binary(name, pkgname, locale, encoding):
|
||||
treedir = oe.path.join(d.getVar("WORKDIR"), "locale-tree")
|
||||
ldlibdir = oe.path.join(treedir, d.getVar("base_libdir"))
|
||||
path = d.getVar("PATH")
|
||||
i18npath = oe.path.join(treedir, datadir, "i18n")
|
||||
gconvpath = oe.path.join(treedir, "iconvdata")
|
||||
outputpath = oe.path.join(treedir, binary_locales_dir)
|
||||
|
||||
use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or "0"
|
||||
if use_cross_localedef == "1":
|
||||
target_arch = d.getVar('TARGET_ARCH')
|
||||
locale_arch_options = { \
|
||||
"arc": " --uint32-align=4 --little-endian ", \
|
||||
"arceb": " --uint32-align=4 --big-endian ", \
|
||||
"arm": " --uint32-align=4 --little-endian ", \
|
||||
"armeb": " --uint32-align=4 --big-endian ", \
|
||||
"aarch64": " --uint32-align=4 --little-endian ", \
|
||||
"aarch64_be": " --uint32-align=4 --big-endian ", \
|
||||
"sh4": " --uint32-align=4 --big-endian ", \
|
||||
"powerpc": " --uint32-align=4 --big-endian ", \
|
||||
"powerpc64": " --uint32-align=4 --big-endian ", \
|
||||
"powerpc64le": " --uint32-align=4 --little-endian ", \
|
||||
"mips": " --uint32-align=4 --big-endian ", \
|
||||
"mipsisa32r6": " --uint32-align=4 --big-endian ", \
|
||||
"mips64": " --uint32-align=4 --big-endian ", \
|
||||
"mipsisa64r6": " --uint32-align=4 --big-endian ", \
|
||||
"mipsel": " --uint32-align=4 --little-endian ", \
|
||||
"mipsisa32r6el": " --uint32-align=4 --little-endian ", \
|
||||
"mips64el":" --uint32-align=4 --little-endian ", \
|
||||
"mipsisa64r6el":" --uint32-align=4 --little-endian ", \
|
||||
"riscv64": " --uint32-align=4 --little-endian ", \
|
||||
"riscv32": " --uint32-align=4 --little-endian ", \
|
||||
"i586": " --uint32-align=4 --little-endian ", \
|
||||
"i686": " --uint32-align=4 --little-endian ", \
|
||||
"x86_64": " --uint32-align=4 --little-endian ", \
|
||||
"loongarch64": " --uint32-align=4 --little-endian " }
|
||||
|
||||
if target_arch in locale_arch_options:
|
||||
localedef_opts = locale_arch_options[target_arch]
|
||||
else:
|
||||
bb.error("locale_arch_options not found for target_arch=" + target_arch)
|
||||
bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
|
||||
|
||||
localedef_opts += " --force --no-hard-links --no-archive --prefix=%s \
|
||||
--inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s --no-warnings=ascii" \
|
||||
% (treedir, treedir, datadir, locale, encoding, outputpath, name)
|
||||
|
||||
cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \
|
||||
(path, i18npath, gconvpath, localedef_opts)
|
||||
else: # earlier slower qemu way
|
||||
qemu = qemu_target_binary(d)
|
||||
localedef_opts = "--force --no-hard-links --no-archive --prefix=%s \
|
||||
--inputfile=%s/i18n/locales/%s --charmap=%s %s" \
|
||||
% (treedir, datadir, locale, encoding, name)
|
||||
|
||||
qemu_options = d.getVar('QEMU_OPTIONS')
|
||||
|
||||
cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
|
||||
-E LD_LIBRARY_PATH=%s %s %s${base_bindir}/localedef %s" % \
|
||||
(path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
|
||||
|
||||
commands["%s/%s" % (outputpath, name)] = cmd
|
||||
|
||||
bb.note("generating locale %s (%s)" % (locale, encoding))
|
||||
|
||||
def output_locale(name, locale, encoding):
|
||||
pkgname = d.getVar('MLPREFIX', False) + 'locale-base-' + legitimize_package_name(name)
|
||||
d.setVar('ALLOW_EMPTY:%s' % pkgname, '1')
|
||||
d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES')))
|
||||
rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
|
||||
m = re.match(r"(.*)_(.*)", name)
|
||||
if m:
|
||||
rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
|
||||
d.setVar('RPROVIDES:%s' % pkgname, rprovides)
|
||||
|
||||
if use_bin == "compile":
|
||||
output_locale_binary_rdepends(name, pkgname, locale, encoding)
|
||||
output_locale_binary(name, pkgname, locale, encoding)
|
||||
elif use_bin == "precompiled":
|
||||
output_locale_binary_rdepends(name, pkgname, locale, encoding)
|
||||
else:
|
||||
output_locale_source(name, pkgname, locale, encoding)
|
||||
|
||||
if use_bin == "compile":
|
||||
bb.note("preparing tree for binary locale generation")
|
||||
bb.build.exec_func("do_prep_locale_tree", d)
|
||||
|
||||
utf8_only = int(d.getVar('LOCALE_UTF8_ONLY') or 0)
|
||||
utf8_is_default = int(d.getVar('LOCALE_UTF8_IS_DEFAULT') or 0)
|
||||
|
||||
encodings = {}
|
||||
for locale in to_generate:
|
||||
charset = supported[locale]
|
||||
if utf8_only and charset != 'UTF-8':
|
||||
continue
|
||||
|
||||
m = dot_re.match(locale)
|
||||
if m:
|
||||
base = m.group(1)
|
||||
else:
|
||||
base = locale
|
||||
|
||||
# Non-precompiled locales may be renamed so that the default
|
||||
# (non-suffixed) encoding is always UTF-8, i.e., instead of en_US and
|
||||
# en_US.UTF-8, we have en_US and en_US.ISO-8859-1. This implicitly
|
||||
# contradicts SUPPORTED.
|
||||
if use_bin == "precompiled" or not utf8_is_default:
|
||||
output_locale(locale, base, charset)
|
||||
else:
|
||||
if charset == 'UTF-8':
|
||||
output_locale(base, base, charset)
|
||||
else:
|
||||
output_locale('%s.%s' % (base, charset), base, charset)
|
||||
|
||||
def metapkg_hook(file, pkg, pattern, format, basename):
|
||||
name = basename.split('/', 1)[0]
|
||||
metapkg = legitimize_package_name('%s-binary-localedata-%s' % (mlprefix+bpn, name))
|
||||
d.appendVar('RDEPENDS:%s' % metapkg, ' ' + pkg)
|
||||
|
||||
if use_bin == "compile":
|
||||
makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
|
||||
with open(makefile, "w") as m:
|
||||
m.write("all: %s\n\n" % " ".join(commands.keys()))
|
||||
total = len(commands)
|
||||
for i, (maketarget, makerecipe) in enumerate(commands.items()):
|
||||
m.write(maketarget + ":\n")
|
||||
m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
|
||||
m.write("\t" + makerecipe + "\n\n")
|
||||
d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
|
||||
d.setVarFlag("oe_runmake", "progress", r"outof:Progress\s(\d+)/(\d+)")
|
||||
bb.note("Executing binary locale generation makefile")
|
||||
bb.build.exec_func("oe_runmake", d)
|
||||
bb.note("collecting binary locales from locale tree")
|
||||
bb.build.exec_func("do_collect_bins_from_locale_tree", d)
|
||||
|
||||
if use_bin in ('compile', 'precompiled'):
|
||||
lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
|
||||
if lcsplit and int(lcsplit):
|
||||
do_split_packages(d, binary_locales_dir, file_regex=r'^(.*/LC_\w+)', \
|
||||
output_pattern=bpn+'-binary-localedata-%s', \
|
||||
description='binary locale definition for %s', recursive=True,
|
||||
hook=metapkg_hook, extra_depends='', allow_dirs=True, match_path=True)
|
||||
else:
|
||||
do_split_packages(d, binary_locales_dir, file_regex=r'(.*)', \
|
||||
output_pattern=bpn+'-binary-localedata-%s', \
|
||||
description='binary locale definition for %s', extra_depends='', allow_dirs=True)
|
||||
else:
|
||||
bb.note("generation of binary locales disabled. this may break i18n!")
|
||||
|
||||
}
|
||||
|
||||
# We want to do this indirection so that we can safely 'return'
|
||||
# from the called function even though we're prepending
|
||||
python populate_packages:prepend () {
|
||||
bb.build.exec_func('package_do_split_gconvs', d)
|
||||
}
|
||||
317
sources/poky/meta/classes-recipe/license_image.bbclass
Normal file
317
sources/poky/meta/classes-recipe/license_image.bbclass
Normal file
@@ -0,0 +1,317 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
ROOTFS_LICENSE_DIR = "${IMAGE_ROOTFS}/usr/share/common-licenses"
|
||||
|
||||
# This requires LICENSE_CREATE_PACKAGE=1 to work too
|
||||
COMPLEMENTARY_GLOB[lic-pkgs] = "*-lic"
|
||||
|
||||
python() {
|
||||
if not oe.data.typed_value('LICENSE_CREATE_PACKAGE', d):
|
||||
features = set(oe.data.typed_value('IMAGE_FEATURES', d))
|
||||
if 'lic-pkgs' in features:
|
||||
bb.error("'lic-pkgs' in IMAGE_FEATURES but LICENSE_CREATE_PACKAGE not enabled to generate -lic packages")
|
||||
}
|
||||
|
||||
python write_package_manifest() {
|
||||
# Get list of installed packages
|
||||
license_image_dir = d.expand('${LICENSE_DIRECTORY}/${SSTATE_PKGARCH}/${IMAGE_NAME}')
|
||||
bb.utils.mkdirhier(license_image_dir)
|
||||
from oe.rootfs import image_list_installed_packages
|
||||
from oe.utils import format_pkg_list
|
||||
|
||||
pkgs = image_list_installed_packages(d)
|
||||
output = format_pkg_list(pkgs)
|
||||
with open(os.path.join(license_image_dir, 'package.manifest'), "w+") as package_manifest:
|
||||
package_manifest.write(output)
|
||||
}
|
||||
|
||||
python license_create_manifest() {
|
||||
import oe.packagedata
|
||||
from oe.rootfs import image_list_installed_packages
|
||||
|
||||
build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS')
|
||||
if build_images_from_feeds == "1":
|
||||
return 0
|
||||
|
||||
pkg_dic = {}
|
||||
for pkg in sorted(image_list_installed_packages(d)):
|
||||
pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
|
||||
'runtime-reverse', pkg)
|
||||
pkg_name = os.path.basename(os.readlink(pkg_info))
|
||||
|
||||
pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
|
||||
if not "LICENSE" in pkg_dic[pkg_name].keys():
|
||||
pkg_lic_name = "LICENSE:" + pkg_name
|
||||
pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
|
||||
|
||||
rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
|
||||
d.getVar('SSTATE_PKGARCH'), d.getVar('IMAGE_NAME'), 'license.manifest')
|
||||
write_license_files(d, rootfs_license_manifest, pkg_dic, rootfs=True)
|
||||
}
|
||||
|
||||
def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
|
||||
import re
|
||||
import stat
|
||||
|
||||
bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
|
||||
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
|
||||
pkgarchs = d.getVar("SSTATE_ARCHS").split()
|
||||
pkgarchs.reverse()
|
||||
|
||||
exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
|
||||
with open(license_manifest, "w") as license_file:
|
||||
for pkg in sorted(pkg_dic):
|
||||
remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
|
||||
incompatible_licenses = incompatible_pkg_license(d, remaining_bad_licenses, pkg_dic[pkg]["LICENSE"])
|
||||
if incompatible_licenses:
|
||||
bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(incompatible_licenses)))
|
||||
else:
|
||||
incompatible_licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
|
||||
if incompatible_licenses:
|
||||
oe.qa.handle_error('license-incompatible', "Including %s with incompatible license(s) %s into the image, because it has been allowed by exception list." %(pkg, ' '.join(incompatible_licenses)), d)
|
||||
try:
|
||||
(pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
|
||||
oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
|
||||
remaining_bad_licenses, canonical_license, d)
|
||||
except oe.license.LicenseError as exc:
|
||||
bb.fatal('%s: %s' % (d.getVar('P'), exc))
|
||||
|
||||
if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
|
||||
# Rootfs manifest
|
||||
license_file.write("PACKAGE NAME: %s\n" % pkg)
|
||||
license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"])
|
||||
license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
|
||||
license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"])
|
||||
|
||||
# If the package doesn't contain any file, that is, its size is 0, the license
|
||||
# isn't relevant as far as the final image is concerned. So doing license check
|
||||
# doesn't make much sense, skip it.
|
||||
if pkg_dic[pkg]["PKGSIZE:%s" % pkg] == "0":
|
||||
continue
|
||||
else:
|
||||
# Image manifest
|
||||
license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
|
||||
license_file.write("VERSION: %s\n" % pkg_dic[pkg]["PV"])
|
||||
license_file.write("LICENSE: %s\n" % pkg_dic[pkg]["LICENSE"])
|
||||
license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
|
||||
|
||||
for lic in pkg_dic[pkg]["LICENSES"]:
|
||||
for pkgarch in pkgarchs:
|
||||
lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY'),
|
||||
pkgarch,
|
||||
pkg_dic[pkg]["PN"], "generic_%s" %
|
||||
re.sub(r'\+', '', lic))
|
||||
if os.path.exists(lic_file):
|
||||
break
|
||||
# add explicity avoid of CLOSED license because isn't generic
|
||||
if lic == "CLOSED":
|
||||
continue
|
||||
|
||||
if not os.path.exists(lic_file):
|
||||
oe.qa.handle_error('license-file-missing',
|
||||
"The license listed %s was not in the "\
|
||||
"licenses collected for recipe %s"
|
||||
% (lic, pkg_dic[pkg]["PN"]), d)
|
||||
oe.qa.exit_if_errors(d)
|
||||
|
||||
# Two options here:
|
||||
# - Just copy the manifest
|
||||
# - Copy the manifest and the license directories
|
||||
# With both options set we see a .5 M increase in core-image-minimal
|
||||
copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
|
||||
copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
|
||||
if rootfs and copy_lic_manifest == "1":
|
||||
rootfs_license_dir = d.getVar('ROOTFS_LICENSE_DIR')
|
||||
bb.utils.mkdirhier(rootfs_license_dir)
|
||||
rootfs_license_manifest = os.path.join(rootfs_license_dir,
|
||||
os.path.split(license_manifest)[1])
|
||||
if not os.path.exists(rootfs_license_manifest):
|
||||
oe.path.copyhardlink(license_manifest, rootfs_license_manifest)
|
||||
|
||||
if copy_lic_dirs == "1":
|
||||
for pkg in sorted(pkg_dic):
|
||||
pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
|
||||
bb.utils.mkdirhier(pkg_rootfs_license_dir)
|
||||
for pkgarch in pkgarchs:
|
||||
pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
|
||||
pkgarch, pkg_dic[pkg]["PN"])
|
||||
if os.path.exists(pkg_license_dir):
|
||||
break
|
||||
if not os.path.exists(pkg_license_dir ):
|
||||
bb.fatal("Couldn't find license information for dependency %s" % pkg)
|
||||
|
||||
pkg_manifest_licenses = [canonical_license(d, lic) \
|
||||
for lic in pkg_dic[pkg]["LICENSES"]]
|
||||
|
||||
licenses = os.listdir(pkg_license_dir)
|
||||
for lic in licenses:
|
||||
pkg_license = os.path.join(pkg_license_dir, lic)
|
||||
pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
|
||||
|
||||
if re.match(r"^generic_.*$", lic):
|
||||
generic_lic = canonical_license(d,
|
||||
re.search(r"^generic_(.*)$", lic).group(1))
|
||||
|
||||
# Do not copy generic license into package if isn't
|
||||
# declared into LICENSES of the package.
|
||||
if not re.sub(r'\+$', '', generic_lic) in \
|
||||
[re.sub(r'\+', '', lic) for lic in \
|
||||
pkg_manifest_licenses]:
|
||||
continue
|
||||
|
||||
if oe.license.license_ok(generic_lic,
|
||||
bad_licenses) == False:
|
||||
continue
|
||||
|
||||
# Make sure we use only canonical name for the license file
|
||||
generic_lic_file = "generic_%s" % generic_lic
|
||||
rootfs_license = os.path.join(rootfs_license_dir, generic_lic_file)
|
||||
if not os.path.exists(rootfs_license):
|
||||
oe.path.copyhardlink(pkg_license, rootfs_license)
|
||||
|
||||
if not os.path.exists(pkg_rootfs_license):
|
||||
os.symlink(os.path.join('..', generic_lic_file), pkg_rootfs_license)
|
||||
else:
|
||||
if (oe.license.license_ok(canonical_license(d,
|
||||
lic), bad_licenses) == False or
|
||||
os.path.exists(pkg_rootfs_license)):
|
||||
continue
|
||||
|
||||
oe.path.copyhardlink(pkg_license, pkg_rootfs_license)
|
||||
# Fixup file ownership and permissions
|
||||
for walkroot, dirs, files in os.walk(rootfs_license_dir):
|
||||
for f in files:
|
||||
p = os.path.join(walkroot, f)
|
||||
os.lchown(p, 0, 0)
|
||||
if not os.path.islink(p):
|
||||
os.chmod(p, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
|
||||
for dir in dirs:
|
||||
p = os.path.join(walkroot, dir)
|
||||
os.lchown(p, 0, 0)
|
||||
os.chmod(p, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
|
||||
|
||||
write_license_files[vardepsexclude] = "SSTATE_ARCHS"
|
||||
|
||||
def license_deployed_manifest(d):
|
||||
"""
|
||||
Write the license manifest for the deployed recipes.
|
||||
The deployed recipes usually includes the bootloader
|
||||
and extra files to boot the target.
|
||||
"""
|
||||
|
||||
dep_dic = {}
|
||||
man_dic = {}
|
||||
lic_dir = d.getVar("LICENSE_DIRECTORY")
|
||||
pkgarchs = d.getVar("SSTATE_ARCHS").split()
|
||||
pkgarchs.reverse()
|
||||
|
||||
dep_dic = get_deployed_dependencies(d)
|
||||
for dep in dep_dic.keys():
|
||||
man_dic[dep] = {}
|
||||
# It is necessary to mark this will be used for image manifest
|
||||
man_dic[dep]["IMAGE_MANIFEST"] = True
|
||||
man_dic[dep]["PN"] = dep
|
||||
man_dic[dep]["FILES"] = \
|
||||
" ".join(get_deployed_files(dep_dic[dep]))
|
||||
|
||||
for pkgarch in pkgarchs:
|
||||
licfile = os.path.join(lic_dir, pkgarch, dep, "recipeinfo")
|
||||
if os.path.exists(licfile):
|
||||
break
|
||||
if not os.path.exists(licfile):
|
||||
bb.fatal("Couldn't find license information for dependency %s" % dep)
|
||||
with open(licfile, "r") as f:
|
||||
for line in f.readlines():
|
||||
key,val = line.split(": ", 1)
|
||||
man_dic[dep][key] = val[:-1]
|
||||
|
||||
lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'), d.getVar('SSTATE_PKGARCH'),
|
||||
d.getVar('IMAGE_NAME'))
|
||||
bb.utils.mkdirhier(lic_manifest_dir)
|
||||
image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
|
||||
write_license_files(d, image_license_manifest, man_dic, rootfs=False)
|
||||
|
||||
link_name = d.getVar('IMAGE_LINK_NAME')
|
||||
if link_name:
|
||||
lic_manifest_symlink_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'), d.getVar('SSTATE_PKGARCH'),
|
||||
link_name)
|
||||
# remove old symlink
|
||||
if os.path.islink(lic_manifest_symlink_dir):
|
||||
os.unlink(lic_manifest_symlink_dir)
|
||||
|
||||
# create the image dir symlink
|
||||
if lic_manifest_dir != lic_manifest_symlink_dir:
|
||||
os.symlink(lic_manifest_dir, lic_manifest_symlink_dir)
|
||||
|
||||
license_deployed_manifest[vardepsexclude] = "SSTATE_ARCHS"
|
||||
|
||||
def get_deployed_dependencies(d):
|
||||
"""
|
||||
Get all the deployed dependencies of an image
|
||||
"""
|
||||
|
||||
deploy = {}
|
||||
# Get all the dependencies for the current task (rootfs).
|
||||
taskdata = d.getVar("BB_TASKDEPDATA", False)
|
||||
pn = d.getVar("PN")
|
||||
depends = list(set([dep[0] for dep
|
||||
in list(taskdata.values())
|
||||
if not dep[0].endswith("-native") and not dep[0] == pn]))
|
||||
|
||||
# To verify what was deployed it checks the rootfs dependencies against
|
||||
# the SSTATE_MANIFESTS for "deploy" task.
|
||||
# The manifest file name contains the arch. Because we are not running
|
||||
# in the recipe context it is necessary to check every arch used.
|
||||
sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS")
|
||||
archs = list(set(d.getVar("SSTATE_ARCHS").split()))
|
||||
for dep in depends:
|
||||
for arch in archs:
|
||||
sstate_manifest_file = os.path.join(sstate_manifest_dir,
|
||||
"manifest-%s-%s.deploy" % (arch, dep))
|
||||
if os.path.exists(sstate_manifest_file):
|
||||
deploy[dep] = sstate_manifest_file
|
||||
break
|
||||
|
||||
return deploy
|
||||
get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA SSTATE_ARCHS"
|
||||
|
||||
def get_deployed_files(man_file):
|
||||
"""
|
||||
Get the files deployed from the sstate manifest
|
||||
"""
|
||||
|
||||
dep_files = []
|
||||
excluded_files = []
|
||||
with open(man_file, "r") as manifest:
|
||||
all_files = manifest.read()
|
||||
for f in all_files.splitlines():
|
||||
if ((not (os.path.islink(f) or os.path.isdir(f))) and
|
||||
not os.path.basename(f) in excluded_files):
|
||||
dep_files.append(os.path.basename(f))
|
||||
return dep_files
|
||||
|
||||
ROOTFS_POSTPROCESS_COMMAND:prepend = "write_package_manifest license_create_manifest "
|
||||
do_rootfs[recrdeptask] += "do_populate_lic"
|
||||
|
||||
python do_populate_lic_deploy() {
|
||||
license_deployed_manifest(d)
|
||||
oe.qa.exit_if_errors(d)
|
||||
}
|
||||
|
||||
addtask populate_lic_deploy before do_build after do_image_complete
|
||||
do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy"
|
||||
|
||||
python license_qa_dead_symlink() {
|
||||
import os
|
||||
|
||||
for root, dirs, files in os.walk(d.getVar('ROOTFS_LICENSE_DIR')):
|
||||
for file in files:
|
||||
full_path = root + "/" + file
|
||||
if os.path.islink(full_path) and not os.path.exists(full_path):
|
||||
bb.error("broken symlink: " + full_path)
|
||||
}
|
||||
IMAGE_QA_COMMANDS += "license_qa_dead_symlink"
|
||||
31
sources/poky/meta/classes-recipe/linux-dummy.bbclass
Normal file
31
sources/poky/meta/classes-recipe/linux-dummy.bbclass
Normal file
@@ -0,0 +1,31 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
python __anonymous () {
|
||||
if d.getVar('PREFERRED_PROVIDER_virtual/kernel') == 'linux-dummy':
|
||||
# copy part codes from kernel.bbclass
|
||||
kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
|
||||
|
||||
# set an empty package of kernel-devicetree
|
||||
d.appendVar('PACKAGES', ' %s-devicetree' % kname)
|
||||
d.setVar('ALLOW_EMPTY:%s-devicetree' % kname, '1')
|
||||
|
||||
# Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
|
||||
type = d.getVar('KERNEL_IMAGETYPE') or ""
|
||||
alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
|
||||
types = d.getVar('KERNEL_IMAGETYPES') or ""
|
||||
if type not in types.split():
|
||||
types = (type + ' ' + types).strip()
|
||||
if alttype not in types.split():
|
||||
types = (alttype + ' ' + types).strip()
|
||||
|
||||
# set empty packages of kernel-image-*
|
||||
for type in types.split():
|
||||
typelower = type.lower()
|
||||
d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
|
||||
d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
|
||||
}
|
||||
|
||||
62
sources/poky/meta/classes-recipe/linux-kernel-base.bbclass
Normal file
62
sources/poky/meta/classes-recipe/linux-kernel-base.bbclass
Normal file
@@ -0,0 +1,62 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# parse kernel ABI version out of <linux/version.h>
|
||||
def get_kernelversion_headers(p):
|
||||
import re
|
||||
|
||||
fn = p + '/include/linux/utsrelease.h'
|
||||
if not os.path.isfile(fn):
|
||||
# after 2.6.33-rc1
|
||||
fn = p + '/include/generated/utsrelease.h'
|
||||
if not os.path.isfile(fn):
|
||||
fn = p + '/include/linux/version.h'
|
||||
|
||||
try:
|
||||
f = open(fn, 'r')
|
||||
except IOError:
|
||||
return None
|
||||
|
||||
l = f.readlines()
|
||||
f.close()
|
||||
r = re.compile("#define UTS_RELEASE \"(.*)\"")
|
||||
for s in l:
|
||||
m = r.match(s)
|
||||
if m:
|
||||
return m.group(1)
|
||||
return None
|
||||
|
||||
|
||||
def get_kernelversion_file(p):
|
||||
fn = p + '/kernel-abiversion'
|
||||
|
||||
try:
|
||||
with open(fn, 'r') as f:
|
||||
return f.readlines()[0].strip()
|
||||
except IOError:
|
||||
return None
|
||||
|
||||
def get_kernellocalversion_file(p):
|
||||
fn = p + '/kernel-localversion'
|
||||
|
||||
try:
|
||||
with open(fn, 'r') as f:
|
||||
return f.readlines()[0].strip()
|
||||
except IOError:
|
||||
return ""
|
||||
|
||||
return ""
|
||||
|
||||
def linux_module_packages(s, d):
|
||||
suffix = ""
|
||||
return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
|
||||
|
||||
export KBUILD_BUILD_VERSION = "1"
|
||||
export KBUILD_BUILD_USER ?= "oe-user"
|
||||
export KBUILD_BUILD_HOST ?= "oe-host"
|
||||
|
||||
# that's all
|
||||
|
||||
84
sources/poky/meta/classes-recipe/linuxloader.bbclass
Normal file
84
sources/poky/meta/classes-recipe/linuxloader.bbclass
Normal file
@@ -0,0 +1,84 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
def get_musl_loader_arch(d):
|
||||
import re
|
||||
ldso_arch = "NotSupported"
|
||||
|
||||
targetarch = d.getVar("TARGET_ARCH")
|
||||
if targetarch.startswith("microblaze"):
|
||||
ldso_arch = "microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el', d)}"
|
||||
elif targetarch.startswith("mips"):
|
||||
ldso_arch = "mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
|
||||
elif targetarch == "powerpc":
|
||||
ldso_arch = "powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
|
||||
elif targetarch.startswith("powerpc64"):
|
||||
ldso_arch = "powerpc64${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'le', d)}"
|
||||
elif targetarch == "x86_64":
|
||||
ldso_arch = "x86_64"
|
||||
elif re.search("i.86", targetarch):
|
||||
ldso_arch = "i386"
|
||||
elif targetarch.startswith("arm"):
|
||||
ldso_arch = "arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}"
|
||||
elif targetarch.startswith("aarch64"):
|
||||
ldso_arch = "aarch64${ARMPKGSFX_ENDIAN_64}"
|
||||
elif targetarch.startswith("riscv64"):
|
||||
ldso_arch = "riscv64${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
|
||||
elif targetarch.startswith("riscv32"):
|
||||
ldso_arch = "riscv32${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
|
||||
return ldso_arch
|
||||
|
||||
def get_musl_loader(d):
|
||||
import re
|
||||
return "/lib/ld-musl-" + get_musl_loader_arch(d) + ".so.1"
|
||||
|
||||
def get_glibc_loader(d):
|
||||
import re
|
||||
|
||||
dynamic_loader = "NotSupported"
|
||||
targetarch = d.getVar("TARGET_ARCH")
|
||||
if targetarch in ["powerpc", "microblaze"]:
|
||||
dynamic_loader = "${base_libdir}/ld.so.1"
|
||||
elif targetarch in ["mipsisa32r6el", "mipsisa32r6", "mipsisa64r6el", "mipsisa64r6"]:
|
||||
dynamic_loader = "${base_libdir}/ld-linux-mipsn8.so.1"
|
||||
elif targetarch.startswith("mips"):
|
||||
dynamic_loader = "${base_libdir}/ld.so.1"
|
||||
elif targetarch.startswith("loongarch64"):
|
||||
dynamic_loader = "${base_libdir}/ld-linux-loongarch-lp64d.so.1"
|
||||
elif targetarch == "powerpc64le":
|
||||
dynamic_loader = "${base_libdir}/ld64.so.2"
|
||||
elif targetarch == "powerpc64":
|
||||
dynamic_loader = "${base_libdir}/ld64.so.1"
|
||||
elif targetarch == "x86_64":
|
||||
dynamic_loader = "${base_libdir}/ld-linux-x86-64.so.2"
|
||||
elif re.search("i.86", targetarch):
|
||||
dynamic_loader = "${base_libdir}/ld-linux.so.2"
|
||||
elif targetarch == "arm":
|
||||
dynamic_loader = "${base_libdir}/ld-linux${@['', '-armhf'][d.getVar('TARGET_FPU') == 'hard']}.so.3"
|
||||
elif targetarch.startswith("aarch64"):
|
||||
dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
|
||||
elif targetarch.startswith("riscv64"):
|
||||
dynamic_loader = "${base_libdir}/ld-linux-riscv64-lp64${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
|
||||
elif targetarch.startswith("riscv32"):
|
||||
dynamic_loader = "${base_libdir}/ld-linux-riscv32-ilp32${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
|
||||
return dynamic_loader
|
||||
|
||||
def get_linuxloader(d):
|
||||
overrides = d.getVar("OVERRIDES").split(":")
|
||||
|
||||
if "libc-baremetal" in overrides:
|
||||
return "NotSupported"
|
||||
|
||||
if "libc-musl" in overrides:
|
||||
dynamic_loader = get_musl_loader(d)
|
||||
else:
|
||||
dynamic_loader = get_glibc_loader(d)
|
||||
return dynamic_loader
|
||||
|
||||
get_linuxloader[vardepvalue] = "${@get_linuxloader(d)}"
|
||||
get_musl_loader[vardepvalue] = "${@get_musl_loader(d)}"
|
||||
get_musl_loader_arch[vardepvalue] = "${@get_musl_loader_arch(d)}"
|
||||
get_glibc_loader[vardepvalue] = "${@get_glibc_loader(d)}"
|
||||
100
sources/poky/meta/classes-recipe/live-vm-common.bbclass
Normal file
100
sources/poky/meta/classes-recipe/live-vm-common.bbclass
Normal file
@@ -0,0 +1,100 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Some of the vars for vm and live image are conflicted, this function
|
||||
# is used for fixing the problem.
|
||||
def set_live_vm_vars(d, suffix):
|
||||
vars = ['GRUB_CFG', 'SYSLINUX_CFG', 'ROOT', 'LABELS', 'INITRD']
|
||||
for var in vars:
|
||||
var_with_suffix = var + '_' + suffix
|
||||
if d.getVar(var):
|
||||
bb.warn('Found potential conflicted var %s, please use %s rather than %s' % \
|
||||
(var, var_with_suffix, var))
|
||||
elif d.getVar(var_with_suffix):
|
||||
d.setVar(var, d.getVar(var_with_suffix))
|
||||
|
||||
|
||||
EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
|
||||
EFI_PROVIDER ?= "grub-efi"
|
||||
EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
|
||||
|
||||
MKDOSFS_EXTRAOPTS ??= "-S 512"
|
||||
|
||||
# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
|
||||
# contain "efi". This way legacy is supported by default if neither is
|
||||
# specified, maintaining the original behavior.
|
||||
def pcbios(d):
|
||||
pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
|
||||
if pcbios == "0":
|
||||
pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d)
|
||||
return pcbios
|
||||
|
||||
PCBIOS = "${@pcbios(d)}"
|
||||
PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS') == '1']}"
|
||||
|
||||
# efi_populate_common DEST BOOTLOADER
|
||||
efi_populate_common() {
|
||||
# DEST must be the root of the image so that EFIDIR is not
|
||||
# nested under a top level directory.
|
||||
DEST=$1
|
||||
|
||||
install -d ${DEST}${EFIDIR}
|
||||
|
||||
install -m 0644 ${DEPLOY_DIR_IMAGE}/$2-${EFI_BOOT_IMAGE} ${DEST}${EFIDIR}/${EFI_BOOT_IMAGE}
|
||||
EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
|
||||
printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${DEST}/startup.nsh
|
||||
}
|
||||
|
||||
efi_iso_populate() {
|
||||
iso_dir=$1
|
||||
efi_populate $iso_dir
|
||||
# Build a EFI directory to create efi.img
|
||||
mkdir -p ${EFIIMGDIR}/${EFIDIR}
|
||||
cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
|
||||
cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
|
||||
|
||||
EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
|
||||
printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${EFIIMGDIR}/startup.nsh
|
||||
|
||||
if [ -f "$iso_dir/initrd" ] ; then
|
||||
cp $iso_dir/initrd ${EFIIMGDIR}
|
||||
fi
|
||||
}
|
||||
|
||||
efi_hddimg_populate() {
|
||||
efi_populate $1
|
||||
}
|
||||
|
||||
inherit_defer ${EFI_CLASS}
|
||||
inherit_defer ${PCBIOS_CLASS}
|
||||
|
||||
populate_kernel() {
|
||||
dest=$1
|
||||
install -d $dest
|
||||
|
||||
# Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
|
||||
bbnote "Trying to install ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} as $dest/${KERNEL_IMAGETYPE}"
|
||||
if [ -e ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} ]; then
|
||||
install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} $dest/${KERNEL_IMAGETYPE}
|
||||
else
|
||||
bbwarn "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} doesn't exist"
|
||||
fi
|
||||
|
||||
# initrd is made of concatenation of multiple filesystem images
|
||||
if [ -n "${INITRD}" ]; then
|
||||
rm -f $dest/initrd
|
||||
for fs in ${INITRD}
|
||||
do
|
||||
if [ -s "$fs" ]; then
|
||||
cat $fs >> $dest/initrd
|
||||
else
|
||||
bbfatal "$fs is invalid. initrd image creation failed."
|
||||
fi
|
||||
done
|
||||
chmod 0644 $dest/initrd
|
||||
fi
|
||||
}
|
||||
|
||||
41
sources/poky/meta/classes-recipe/manpages.bbclass
Normal file
41
sources/poky/meta/classes-recipe/manpages.bbclass
Normal file
@@ -0,0 +1,41 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
# Inherit this class to enable or disable building and installation of manpages
|
||||
# depending on whether 'api-documentation' is in DISTRO_FEATURES. Such building
|
||||
# tends to pull in the entire XML stack and other tools, so it's not enabled
|
||||
# by default.
|
||||
PACKAGECONFIG:append:class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
|
||||
|
||||
inherit qemu
|
||||
|
||||
# usually manual files are packaged to ${PN}-doc except man-pages
|
||||
MAN_PKG ?= "${PN}-doc"
|
||||
|
||||
# only add man-db to RDEPENDS when manual files are built and installed
|
||||
RDEPENDS:${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
|
||||
|
||||
pkg_postinst:${MAN_PKG}:append () {
|
||||
# only update manual page index caches when manual files are built and installed
|
||||
if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
|
||||
if test -n "$D"; then
|
||||
if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true', 'false', d)}; then
|
||||
$INTERCEPT_DIR/postinst_intercept update_mandb ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} bindir=${bindir} sysconfdir=${sysconfdir} mandir=${mandir}
|
||||
else
|
||||
$INTERCEPT_DIR/postinst_intercept delay_to_first_boot ${PKG} mlprefix=${MLPREFIX}
|
||||
fi
|
||||
else
|
||||
mandb -q
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
pkg_postrm:${MAN_PKG}:append () {
|
||||
# only update manual page index caches when manual files are built and installed
|
||||
if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
|
||||
mandb -q
|
||||
fi
|
||||
}
|
||||
59
sources/poky/meta/classes-recipe/meson-routines.bbclass
Normal file
59
sources/poky/meta/classes-recipe/meson-routines.bbclass
Normal file
@@ -0,0 +1,59 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit siteinfo
|
||||
|
||||
def meson_array(var, d):
|
||||
items = d.getVar(var).split()
|
||||
return repr(items[0] if len(items) == 1 else items)
|
||||
|
||||
# Map our ARCH values to what Meson expects:
|
||||
# http://mesonbuild.com/Reference-tables.html#cpu-families
|
||||
def meson_cpu_family(var, d):
|
||||
import re
|
||||
arch = d.getVar(var)
|
||||
if arch == 'powerpc':
|
||||
return 'ppc'
|
||||
elif arch == 'powerpc64' or arch == 'powerpc64le':
|
||||
return 'ppc64'
|
||||
elif arch == 'armeb':
|
||||
return 'arm'
|
||||
elif arch == 'aarch64_be':
|
||||
return 'aarch64'
|
||||
elif arch == 'loongarch64':
|
||||
return 'loongarch64'
|
||||
elif arch == 'mipsel':
|
||||
return 'mips'
|
||||
elif arch == 'mips64el':
|
||||
return 'mips64'
|
||||
elif re.match(r"i[3-6]86", arch):
|
||||
return "x86"
|
||||
elif arch == "microblazeel":
|
||||
return "microblaze"
|
||||
else:
|
||||
return arch
|
||||
|
||||
# Map our OS values to what Meson expects:
|
||||
# https://mesonbuild.com/Reference-tables.html#operating-system-names
|
||||
def meson_operating_system(var, d):
|
||||
os = d.getVar(var)
|
||||
if "mingw" in os:
|
||||
return "windows"
|
||||
# avoid e.g 'linux-gnueabi'
|
||||
elif "linux" in os:
|
||||
return "linux"
|
||||
else:
|
||||
return os
|
||||
|
||||
def meson_endian(prefix, d):
|
||||
arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS")
|
||||
sitedata = siteinfo_data_for_machine(arch, os, d)
|
||||
if "endian-little" in sitedata:
|
||||
return "little"
|
||||
elif "endian-big" in sitedata:
|
||||
return "big"
|
||||
else:
|
||||
bb.fatal("Cannot determine endianism for %s-%s" % (arch, os))
|
||||
181
sources/poky/meta/classes-recipe/meson.bbclass
Normal file
181
sources/poky/meta/classes-recipe/meson.bbclass
Normal file
@@ -0,0 +1,181 @@
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
|
||||
inherit python3native meson-routines qemu
|
||||
|
||||
DEPENDS:append = " meson-native ninja-native"
|
||||
|
||||
EXEWRAPPER_ENABLED:class-native = "False"
|
||||
EXEWRAPPER_ENABLED:class-nativesdk = "False"
|
||||
EXEWRAPPER_ENABLED ?= "${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d)}"
|
||||
DEPENDS:append = "${@' qemu-native' if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ''}"
|
||||
|
||||
# As Meson enforces out-of-tree builds we can just use cleandirs
|
||||
B = "${WORKDIR}/build"
|
||||
do_configure[cleandirs] = "${B}"
|
||||
|
||||
# Where the meson.build build configuration is
|
||||
MESON_SOURCEPATH = "${S}"
|
||||
|
||||
# The target to build in do_compile. If unset the default targets are built.
|
||||
MESON_TARGET ?= ""
|
||||
|
||||
def noprefix(var, d):
|
||||
return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
|
||||
|
||||
MESON_BUILDTYPE ?= "${@oe.utils.vartrue('DEBUG_BUILD', 'debug', 'plain', d)}"
|
||||
MESON_BUILDTYPE[vardeps] += "DEBUG_BUILD"
|
||||
MESONOPTS = " --prefix ${prefix} \
|
||||
--buildtype ${MESON_BUILDTYPE} \
|
||||
--bindir ${@noprefix('bindir', d)} \
|
||||
--sbindir ${@noprefix('sbindir', d)} \
|
||||
--datadir ${@noprefix('datadir', d)} \
|
||||
--libdir ${@noprefix('libdir', d)} \
|
||||
--libexecdir ${@noprefix('libexecdir', d)} \
|
||||
--includedir ${@noprefix('includedir', d)} \
|
||||
--mandir ${@noprefix('mandir', d)} \
|
||||
--infodir ${@noprefix('infodir', d)} \
|
||||
--sysconfdir ${sysconfdir} \
|
||||
--localstatedir ${localstatedir} \
|
||||
--sharedstatedir ${sharedstatedir} \
|
||||
--wrap-mode nodownload \
|
||||
--native-file ${WORKDIR}/meson.native"
|
||||
|
||||
EXTRA_OEMESON:append = " ${PACKAGECONFIG_CONFARGS}"
|
||||
|
||||
MESON_CROSS_FILE = ""
|
||||
MESON_CROSS_FILE:class-target = "--cross-file ${WORKDIR}/meson.cross"
|
||||
MESON_CROSS_FILE:class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
|
||||
|
||||
# Needed to set up qemu wrapper below
|
||||
export STAGING_DIR_HOST
|
||||
|
||||
def rust_tool(d, target_var):
|
||||
rustc = d.getVar('RUSTC')
|
||||
if not rustc:
|
||||
return ""
|
||||
cmd = [rustc, "--target", d.getVar(target_var)] + d.getVar("RUSTFLAGS").split()
|
||||
return "rust = %s" % repr(cmd)
|
||||
|
||||
addtask write_config before do_configure
|
||||
do_write_config[vardeps] += "CC CXX AR NM STRIP READELF OBJCOPY CFLAGS CXXFLAGS LDFLAGS RUSTC RUSTFLAGS EXEWRAPPER_ENABLED"
|
||||
do_write_config() {
|
||||
# This needs to be Py to split the args into single-element lists
|
||||
cat >${WORKDIR}/meson.cross <<EOF
|
||||
[binaries]
|
||||
c = ${@meson_array('CC', d)}
|
||||
cpp = ${@meson_array('CXX', d)}
|
||||
cython = 'cython3'
|
||||
ar = ${@meson_array('AR', d)}
|
||||
nm = ${@meson_array('NM', d)}
|
||||
strip = ${@meson_array('STRIP', d)}
|
||||
readelf = ${@meson_array('READELF', d)}
|
||||
objcopy = ${@meson_array('OBJCOPY', d)}
|
||||
pkg-config = 'pkg-config'
|
||||
llvm-config = 'llvm-config'
|
||||
cups-config = 'cups-config'
|
||||
g-ir-scanner = '${STAGING_BINDIR}/g-ir-scanner-wrapper'
|
||||
g-ir-compiler = '${STAGING_BINDIR}/g-ir-compiler-wrapper'
|
||||
${@rust_tool(d, "RUST_HOST_SYS")}
|
||||
${@"exe_wrapper = '${WORKDIR}/meson-qemuwrapper'" if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ""}
|
||||
|
||||
[built-in options]
|
||||
c_args = ${@meson_array('CFLAGS', d)}
|
||||
c_link_args = ${@meson_array('LDFLAGS', d)}
|
||||
cpp_args = ${@meson_array('CXXFLAGS', d)}
|
||||
cpp_link_args = ${@meson_array('LDFLAGS', d)}
|
||||
|
||||
[properties]
|
||||
needs_exe_wrapper = true
|
||||
sys_root = '${STAGING_DIR_HOST}'
|
||||
|
||||
[host_machine]
|
||||
system = '${@meson_operating_system('HOST_OS', d)}'
|
||||
cpu_family = '${@meson_cpu_family('HOST_ARCH', d)}'
|
||||
cpu = '${HOST_ARCH}'
|
||||
endian = '${@meson_endian('HOST', d)}'
|
||||
|
||||
[target_machine]
|
||||
system = '${@meson_operating_system('TARGET_OS', d)}'
|
||||
cpu_family = '${@meson_cpu_family('TARGET_ARCH', d)}'
|
||||
cpu = '${TARGET_ARCH}'
|
||||
endian = '${@meson_endian('TARGET', d)}'
|
||||
EOF
|
||||
|
||||
cat >${WORKDIR}/meson.native <<EOF
|
||||
[binaries]
|
||||
c = ${@meson_array('BUILD_CC', d)}
|
||||
cpp = ${@meson_array('BUILD_CXX', d)}
|
||||
cython = 'cython3'
|
||||
ar = ${@meson_array('BUILD_AR', d)}
|
||||
nm = ${@meson_array('BUILD_NM', d)}
|
||||
strip = ${@meson_array('BUILD_STRIP', d)}
|
||||
readelf = ${@meson_array('BUILD_READELF', d)}
|
||||
objcopy = ${@meson_array('BUILD_OBJCOPY', d)}
|
||||
llvm-config = '${STAGING_BINDIR_NATIVE}/llvm-config'
|
||||
pkg-config = 'pkg-config-native'
|
||||
${@rust_tool(d, "RUST_BUILD_SYS")}
|
||||
|
||||
[built-in options]
|
||||
c_args = ${@meson_array('BUILD_CFLAGS', d)}
|
||||
c_link_args = ${@meson_array('BUILD_LDFLAGS', d)}
|
||||
cpp_args = ${@meson_array('BUILD_CXXFLAGS', d)}
|
||||
cpp_link_args = ${@meson_array('BUILD_LDFLAGS', d)}
|
||||
EOF
|
||||
}
|
||||
|
||||
do_write_config:append:class-target() {
|
||||
# Write out a qemu wrapper that will be used as exe_wrapper so that meson
|
||||
# can run target helper binaries through that.
|
||||
qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
|
||||
cat > ${WORKDIR}/meson-qemuwrapper << EOF
|
||||
#!/bin/sh
|
||||
# Use a modules directory which doesn't exist so we don't load random things
|
||||
# which may then get deleted (or their dependencies) and potentially segfault
|
||||
export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
|
||||
|
||||
# meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
|
||||
unset LD_LIBRARY_PATH
|
||||
|
||||
$qemu_binary "\$@"
|
||||
EOF
|
||||
chmod +x ${WORKDIR}/meson-qemuwrapper
|
||||
}
|
||||
|
||||
# Tell externalsrc that changes to this file require a reconfigure
|
||||
CONFIGURE_FILES = "meson.build"
|
||||
|
||||
meson_do_configure() {
|
||||
# Meson requires this to be 'bfd, 'lld' or 'gold' from 0.53 onwards
|
||||
# https://github.com/mesonbuild/meson/commit/ef9aeb188ea2bc7353e59916c18901cde90fa2b3
|
||||
unset LD
|
||||
|
||||
bbnote Executing meson ${EXTRA_OEMESON}...
|
||||
if ! meson setup ${MESONOPTS} "${MESON_SOURCEPATH}" "${B}" ${MESON_CROSS_FILE} ${EXTRA_OEMESON}; then
|
||||
bbfatal_log meson failed
|
||||
fi
|
||||
}
|
||||
|
||||
python meson_do_qa_configure() {
|
||||
import re
|
||||
warn_re = re.compile(r"^WARNING: Cross property (.+) is using default value (.+)$", re.MULTILINE)
|
||||
with open(d.expand("${B}/meson-logs/meson-log.txt")) as logfile:
|
||||
log = logfile.read()
|
||||
for (prop, value) in warn_re.findall(log):
|
||||
bb.warn("Meson cross property %s used without explicit assignment, defaulting to %s" % (prop, value))
|
||||
}
|
||||
do_configure[postfuncs] += "meson_do_qa_configure"
|
||||
|
||||
do_compile[progress] = "outof:^\[(\d+)/(\d+)\]\s+"
|
||||
meson_do_compile() {
|
||||
meson compile -v ${PARALLEL_MAKE} ${MESON_TARGET}
|
||||
}
|
||||
|
||||
meson_do_install() {
|
||||
meson install --destdir ${D} --no-rebuild
|
||||
}
|
||||
|
||||
EXPORT_FUNCTIONS do_configure do_compile do_install
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user