Complete Yocto mirror with license table for TQMa6UL (2038-compliance)

- 264 license table entries with exact download URLs (224/264 resolved)
- Complete sources/ directory with all BitBake recipes
- Build configuration: tqma6ul-multi-mba6ulx, spaetzle (musl)
- Full traceability for Softwarefreigabeantrag
- GCC 13.4.0, Linux 6.6.102, U-Boot 2023.04, musl 1.2.4
- License distribution: GPL-2.0 (24), MIT (23), GPL-2.0+ (18), BSD-3 (16)
This commit is contained in:
Siggi (OpenClaw Agent)
2026-03-01 20:58:18 +00:00
commit 16accb6b24
15086 changed files with 1292356 additions and 0 deletions

View File

@@ -0,0 +1,12 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
BBIMPORTS = ["data", "path", "utils", "types", "package", "packagedata", \
"packagegroup", "sstatesig", "lsb", "cachedpath", "license", \
"qa", "reproducible", "rust", "buildcfg", "go"]

View File

@@ -0,0 +1,79 @@
import os
import subprocess
import bb.process
def detect_revision(d):
path = get_scmbasepath(d)
return get_metadata_git_revision(path)
def detect_branch(d):
path = get_scmbasepath(d)
return get_metadata_git_branch(path)
def get_scmbasepath(d):
return os.path.join(d.getVar('COREBASE'), 'meta')
def get_metadata_git_branch(path):
try:
rev, _ = bb.process.run('git rev-parse --abbrev-ref HEAD', cwd=path)
except bb.process.ExecutionError:
rev = '<unknown>'
return rev.strip()
def get_metadata_git_revision(path):
try:
rev, _ = bb.process.run('git rev-parse HEAD', cwd=path)
except bb.process.ExecutionError:
rev = '<unknown>'
return rev.strip()
def get_metadata_git_toplevel(path):
try:
toplevel, _ = bb.process.run('git rev-parse --show-toplevel', cwd=path)
except bb.process.ExecutionError:
return ""
return toplevel.strip()
def get_metadata_git_remotes(path):
try:
remotes_list, _ = bb.process.run('git remote', cwd=path)
remotes = remotes_list.split()
except bb.process.ExecutionError:
remotes = []
return remotes
def get_metadata_git_remote_url(path, remote):
try:
uri, _ = bb.process.run('git remote get-url {remote}'.format(remote=remote), cwd=path)
except bb.process.ExecutionError:
return ""
return uri.strip()
def get_metadata_git_describe(path):
try:
describe, _ = bb.process.run('git describe --tags', cwd=path)
except bb.process.ExecutionError:
return ""
return describe.strip()
def is_layer_modified(path):
try:
subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
git diff --quiet --no-ext-diff
git diff --quiet --no-ext-diff --cached""" % path,
shell=True,
stderr=subprocess.STDOUT)
return ""
except subprocess.CalledProcessError as ex:
# Silently treat errors as "modified", without checking for the
# (expected) return code 1 in a modified git repo. For example, we get
# output and a 129 return code when a layer isn't a git repo at all.
return " -- modified"
def get_layer_revisions(d):
layers = (d.getVar("BBLAYERS") or "").split()
revisions = []
for i in layers:
revisions.append((i, os.path.basename(i), get_metadata_git_branch(i).strip(), get_metadata_git_revision(i), is_layer_modified(i)))
return revisions

View File

@@ -0,0 +1,723 @@
# Report significant differences in the buildhistory repository since a specific revision
#
# Copyright (C) 2012-2013, 2016-2017 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
#
# SPDX-License-Identifier: GPL-2.0-only
#
# Note: requires GitPython 0.3.1+
#
# You can use this from the command line by running scripts/buildhistory-diff
#
import sys
import os.path
import difflib
import git
import re
import shlex
import hashlib
import collections
import bb.utils
import bb.tinfoil
# How to display fields
list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
list_order_fields = ['PACKAGES']
defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'}
numeric_fields = ['PKGSIZE', 'IMAGESIZE']
# Fields to monitor
monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG']
ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
# Percentage change to alert for numeric fields
monitor_numeric_threshold = 10
# Image files to monitor (note that image-info.txt is handled separately)
img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
colours = {
'colour_default': '',
'colour_add': '',
'colour_remove': '',
}
def init_colours(use_colours):
global colours
if use_colours:
colours = {
'colour_default': '\033[0m',
'colour_add': '\033[1;32m',
'colour_remove': '\033[1;31m',
}
else:
colours = {
'colour_default': '',
'colour_add': '',
'colour_remove': '',
}
class ChangeRecord:
def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
self.path = path
self.fieldname = fieldname
self.oldvalue = oldvalue
self.newvalue = newvalue
self.monitored = monitored
self.filechanges = None
def __str__(self):
return self._str_internal(True)
def _str_internal(self, outer):
if outer:
if '/image-files/' in self.path:
prefix = '%s: ' % self.path.split('/image-files/')[0]
else:
prefix = '%s: ' % self.path
else:
prefix = ''
def pkglist_combine(depver):
pkglist = []
for k,v in depver.items():
if v:
pkglist.append("%s (%s)" % (k,v))
else:
pkglist.append(k)
return pkglist
def detect_renamed_dirs(aitems, bitems):
adirs = set(map(os.path.dirname, aitems))
bdirs = set(map(os.path.dirname, bitems))
files_ab = [(name, sorted(os.path.basename(item) for item in aitems if os.path.dirname(item) == name)) \
for name in adirs - bdirs]
files_ba = [(name, sorted(os.path.basename(item) for item in bitems if os.path.dirname(item) == name)) \
for name in bdirs - adirs]
renamed_dirs = []
for dir1, files1 in files_ab:
rename = False
for dir2, files2 in files_ba:
if files1 == files2 and not rename:
renamed_dirs.append((dir1,dir2))
# Make sure that we don't use this (dir, files) pair again.
files_ba.remove((dir2,files2))
# If a dir has already been found to have a rename, stop and go no further.
rename = True
# remove files that belong to renamed dirs from aitems and bitems
for dir1, dir2 in renamed_dirs:
aitems = [item for item in aitems if os.path.dirname(item) not in (dir1, dir2)]
bitems = [item for item in bitems if os.path.dirname(item) not in (dir1, dir2)]
return renamed_dirs, aitems, bitems
if self.fieldname in list_fields or self.fieldname in list_order_fields:
renamed_dirs = []
changed_order = False
if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
aitems = pkglist_combine(depvera)
bitems = pkglist_combine(depverb)
else:
if self.fieldname == 'FILELIST':
aitems = shlex.split(self.oldvalue)
bitems = shlex.split(self.newvalue)
renamed_dirs, aitems, bitems = detect_renamed_dirs(aitems, bitems)
else:
aitems = self.oldvalue.split()
bitems = self.newvalue.split()
removed = list(set(aitems) - set(bitems))
added = list(set(bitems) - set(aitems))
if not removed and not added and self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
depvera = bb.utils.explode_dep_versions2(self.oldvalue, sort=False)
depverb = bb.utils.explode_dep_versions2(self.newvalue, sort=False)
for i, j in zip(depvera.items(), depverb.items()):
if i[0] != j[0]:
changed_order = True
break
lines = []
if renamed_dirs:
for dfrom, dto in renamed_dirs:
lines.append('directory renamed {colour_remove}{}{colour_default} -> {colour_add}{}{colour_default}'.format(dfrom, dto, **colours))
if removed or added:
if removed and not bitems:
lines.append('removed all items "{colour_remove}{}{colour_default}"'.format(' '.join(removed), **colours))
else:
if removed:
lines.append('removed "{colour_remove}{value}{colour_default}"'.format(value=' '.join(removed), **colours))
if added:
lines.append('added "{colour_add}{value}{colour_default}"'.format(value=' '.join(added), **colours))
else:
lines.append('changed order')
if not (removed or added or changed_order):
out = ''
else:
out = '%s: %s' % (self.fieldname, ', '.join(lines))
elif self.fieldname in numeric_fields:
aval = int(self.oldvalue or 0)
bval = int(self.newvalue or 0)
if aval != 0:
percentchg = ((bval - aval) / float(aval)) * 100
else:
percentchg = 100
out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default} ({}{:.0f}%)'.format(self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg, **colours)
elif self.fieldname in defaultval_map:
out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default}'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
if self.fieldname == 'PKG' and '[default]' in self.newvalue:
out += ' - may indicate debian renaming failure'
elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
if self.oldvalue and self.newvalue:
out = '%s changed:\n ' % self.fieldname
elif self.newvalue:
out = '%s added:\n ' % self.fieldname
elif self.oldvalue:
out = '%s cleared:\n ' % self.fieldname
alines = self.oldvalue.splitlines()
blines = self.newvalue.splitlines()
diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
out += '\n '.join(list(diff)[2:])
out += '\n --'
elif self.fieldname in img_monitor_files or '/image-files/' in self.path or self.fieldname == "sysroot":
if self.filechanges or (self.oldvalue and self.newvalue):
fieldname = self.fieldname
if '/image-files/' in self.path:
fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
out = 'Changes to %s:\n ' % fieldname
else:
if outer:
prefix = 'Changes to %s ' % self.path
out = '(%s):\n ' % self.fieldname
if self.filechanges:
out += '\n '.join(['%s' % i for i in self.filechanges])
else:
alines = self.oldvalue.splitlines()
blines = self.newvalue.splitlines()
diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
out += '\n '.join(list(diff))
out += '\n --'
else:
out = ''
else:
out = '{} changed from "{colour_remove}{}{colour_default}" to "{colour_add}{}{colour_default}"'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
return '%s%s' % (prefix, out) if out else ''
class FileChange:
changetype_add = 'A'
changetype_remove = 'R'
changetype_type = 'T'
changetype_perms = 'P'
changetype_ownergroup = 'O'
changetype_link = 'L'
changetype_move = 'M'
def __init__(self, path, changetype, oldvalue = None, newvalue = None):
self.path = path
self.changetype = changetype
self.oldvalue = oldvalue
self.newvalue = newvalue
def _ftype_str(self, ftype):
if ftype == '-':
return 'file'
elif ftype == 'd':
return 'directory'
elif ftype == 'l':
return 'symlink'
elif ftype == 'c':
return 'char device'
elif ftype == 'b':
return 'block device'
elif ftype == 'p':
return 'fifo'
elif ftype == 's':
return 'socket'
else:
return 'unknown (%s)' % ftype
def __str__(self):
if self.changetype == self.changetype_add:
return '%s was added' % self.path
elif self.changetype == self.changetype_remove:
return '%s was removed' % self.path
elif self.changetype == self.changetype_type:
return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue))
elif self.changetype == self.changetype_perms:
return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue)
elif self.changetype == self.changetype_ownergroup:
return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue)
elif self.changetype == self.changetype_link:
return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue)
elif self.changetype == self.changetype_move:
return '%s moved to %s' % (self.path, self.oldvalue)
else:
return '%s changed (unknown)' % self.path
def blob_to_dict(blob):
alines = [line for line in blob.data_stream.read().decode('utf-8').splitlines()]
adict = {}
for line in alines:
splitv = [i.strip() for i in line.split('=',1)]
if len(splitv) > 1:
adict[splitv[0]] = splitv[1]
return adict
def file_list_to_dict(lines):
adict = {}
for line in lines:
# Leave the last few fields intact so we handle file names containing spaces
splitv = line.split(None,4)
# Grab the path and remove the leading .
path = splitv[4][1:].strip()
# Handle symlinks
if(' -> ' in path):
target = path.split(' -> ')[1]
path = path.split(' -> ')[0]
adict[path] = splitv[0:3] + [target]
else:
adict[path] = splitv[0:3]
return adict
numeric_removal = str.maketrans('0123456789', 'XXXXXXXXXX')
def compare_file_lists(alines, blines, compare_ownership=True):
adict = file_list_to_dict(alines)
bdict = file_list_to_dict(blines)
filechanges = []
additions = []
removals = []
for path, splitv in adict.items():
newsplitv = bdict.pop(path, None)
if newsplitv:
# Check type
oldvalue = splitv[0][0]
newvalue = newsplitv[0][0]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
# Check permissions
oldvalue = splitv[0][1:]
newvalue = newsplitv[0][1:]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
if compare_ownership:
# Check owner/group
oldvalue = '%s/%s' % (splitv[1], splitv[2])
newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
# Check symlink target
if newsplitv[0][0] == 'l':
if len(splitv) > 3:
oldvalue = splitv[3]
else:
oldvalue = None
newvalue = newsplitv[3]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue))
else:
removals.append(path)
# Whatever is left over has been added
for path in bdict:
additions.append(path)
# Rather than print additions and removals, its nicer to print file 'moves'
# where names or paths are similar.
revmap_remove = {}
for removal in removals:
translated = removal.translate(numeric_removal)
if translated not in revmap_remove:
revmap_remove[translated] = []
revmap_remove[translated].append(removal)
#
# We want to detect renames of large trees of files like
# /lib/modules/5.4.40-yocto-standard to /lib/modules/5.4.43-yocto-standard
#
renames = {}
for addition in additions.copy():
if addition not in additions:
continue
translated = addition.translate(numeric_removal)
if translated in revmap_remove:
if len(revmap_remove[translated]) != 1:
continue
removal = revmap_remove[translated][0]
commondir = addition.split("/")
commondir2 = removal.split("/")
idx = None
for i in range(len(commondir)):
if commondir[i] != commondir2[i]:
idx = i
break
commondir = "/".join(commondir[:i+1])
commondir2 = "/".join(commondir2[:i+1])
# If the common parent is in one dict and not the other its likely a rename
# so iterate through those files and process as such
if commondir2 not in bdict and commondir not in adict:
if commondir not in renames:
renames[commondir] = commondir2
for addition2 in additions.copy():
if addition2.startswith(commondir):
removal2 = addition2.replace(commondir, commondir2)
if removal2 in removals:
additions.remove(addition2)
removals.remove(removal2)
continue
filechanges.append(FileChange(removal, FileChange.changetype_move, addition))
if addition in additions:
additions.remove(addition)
if removal in removals:
removals.remove(removal)
for rename in renames:
filechanges.append(FileChange(renames[rename], FileChange.changetype_move, rename))
for addition in additions:
filechanges.append(FileChange(addition, FileChange.changetype_add))
for removal in removals:
filechanges.append(FileChange(removal, FileChange.changetype_remove))
return filechanges
def compare_lists(alines, blines):
removed = list(set(alines) - set(blines))
added = list(set(blines) - set(alines))
filechanges = []
for pkg in removed:
filechanges.append(FileChange(pkg, FileChange.changetype_remove))
for pkg in added:
filechanges.append(FileChange(pkg, FileChange.changetype_add))
return filechanges
def compare_pkg_lists(astr, bstr):
depvera = bb.utils.explode_dep_versions2(astr)
depverb = bb.utils.explode_dep_versions2(bstr)
# Strip out changes where the version has increased
remove = []
for k in depvera:
if k in depverb:
dva = depvera[k]
dvb = depverb[k]
if dva and dvb and len(dva) == len(dvb):
# Since length is the same, sort so that prefixes (e.g. >=) will line up
dva.sort()
dvb.sort()
removeit = True
for dvai, dvbi in zip(dva, dvb):
if dvai != dvbi:
aiprefix = dvai.split(' ')[0]
biprefix = dvbi.split(' ')[0]
if aiprefix == biprefix and aiprefix in ['>=', '=']:
if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0:
removeit = False
break
else:
removeit = False
break
if removeit:
remove.append(k)
for k in remove:
depvera.pop(k)
depverb.pop(k)
return (depvera, depverb)
def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
adict = blob_to_dict(ablob)
bdict = blob_to_dict(bblob)
pkgname = os.path.basename(path)
defaultvals = {}
defaultvals['PKG'] = pkgname
defaultvals['PKGE'] = '0'
changes = []
keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys()))
for key in keys:
astr = adict.get(key, '')
bstr = bdict.get(key, '')
if key in ver_monitor_fields:
monitored = report_ver or astr or bstr
else:
monitored = key in monitor_fields
mapped_key = defaultval_map.get(key, '')
if mapped_key:
if not astr:
astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, ''))
if not bstr:
bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, ''))
if astr != bstr:
if (not report_all) and key in numeric_fields:
aval = int(astr or 0)
bval = int(bstr or 0)
if aval != 0:
percentchg = ((bval - aval) / float(aval)) * 100
else:
percentchg = 100
if abs(percentchg) < monitor_numeric_threshold:
continue
elif (not report_all) and key in list_fields:
if key == "FILELIST" and (path.endswith("-dbg") or path.endswith("-src")) and bstr.strip() != '':
continue
if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(astr, bstr)
if depvera == depverb:
continue
if key == 'FILELIST':
alist = shlex.split(astr)
blist = shlex.split(bstr)
else:
alist = astr.split()
blist = bstr.split()
alist.sort()
blist.sort()
# We don't care about the removal of self-dependencies
if pkgname in alist and not pkgname in blist:
alist.remove(pkgname)
if ' '.join(alist) == ' '.join(blist):
continue
if key == 'PKGR' and not report_all:
vers = []
# strip leading 'r' and dots
for ver in (astr.split()[0], bstr.split()[0]):
if ver.startswith('r'):
ver = ver[1:]
vers.append(ver.replace('.', ''))
maxlen = max(len(vers[0]), len(vers[1]))
try:
# pad with '0' and convert to int
vers = [int(ver.ljust(maxlen, '0')) for ver in vers]
except ValueError:
pass
else:
# skip decrements and increments
if abs(vers[0] - vers[1]) == 1:
continue
chg = ChangeRecord(path, key, astr, bstr, monitored)
changes.append(chg)
return changes
def compare_siglists(a_blob, b_blob, taskdiff=False):
# FIXME collapse down a recipe's tasks?
alines = a_blob.data_stream.read().decode('utf-8').splitlines()
blines = b_blob.data_stream.read().decode('utf-8').splitlines()
keys = []
pnmap = {}
def readsigs(lines):
sigs = {}
for line in lines:
linesplit = line.split()
if len(linesplit) > 2:
sigs[linesplit[0]] = linesplit[2]
if not linesplit[0] in keys:
keys.append(linesplit[0])
pnmap[linesplit[1]] = linesplit[0].rsplit('.', 1)[0]
return sigs
adict = readsigs(alines)
bdict = readsigs(blines)
out = []
changecount = 0
addcount = 0
removecount = 0
if taskdiff:
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=True)
changes = collections.OrderedDict()
def compare_hashfiles(pn, taskname, hash1, hash2):
hashes = [hash1, hash2]
hashfiles = bb.siggen.find_siginfo(pn, taskname, hashes, tinfoil.config_data)
if not taskname:
(pn, taskname) = pn.rsplit('.', 1)
pn = pnmap.get(pn, pn)
desc = '%s.%s' % (pn, taskname)
if len(hashfiles) == 0:
out.append("Unable to find matching sigdata for %s with hashes %s or %s" % (desc, hash1, hash2))
elif not hash1 in hashfiles:
out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash1))
elif not hash2 in hashfiles:
out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash2))
else:
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb, collapsed=True)
for line in out2:
m = hashlib.sha256()
m.update(line.encode('utf-8'))
entry = changes.get(m.hexdigest(), (line, []))
if desc not in entry[1]:
changes[m.hexdigest()] = (line, entry[1] + [desc])
# Define recursion callback
def recursecb(key, hash1, hash2):
compare_hashfiles(key, None, hash1, hash2)
return []
for key in keys:
siga = adict.get(key, None)
sigb = bdict.get(key, None)
if siga is not None and sigb is not None and siga != sigb:
changecount += 1
(pn, taskname) = key.rsplit('.', 1)
compare_hashfiles(pn, taskname, siga, sigb)
elif siga is None:
addcount += 1
elif sigb is None:
removecount += 1
for key, item in changes.items():
line, tasks = item
if len(tasks) == 1:
desc = tasks[0]
elif len(tasks) == 2:
desc = '%s and %s' % (tasks[0], tasks[1])
else:
desc = '%s and %d others' % (tasks[-1], len(tasks)-1)
out.append('%s: %s' % (desc, line))
else:
for key in keys:
siga = adict.get(key, None)
sigb = bdict.get(key, None)
if siga is not None and sigb is not None and siga != sigb:
out.append('%s changed from %s to %s' % (key, siga, sigb))
changecount += 1
elif siga is None:
out.append('%s was added' % key)
addcount += 1
elif sigb is None:
out.append('%s was removed' % key)
removecount += 1
out.append('Summary: %d tasks added, %d tasks removed, %d tasks modified (%.1f%%)' % (addcount, removecount, changecount, (changecount / float(len(bdict)) * 100)))
return '\n'.join(out)
def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False,
sigs=False, sigsdiff=False, exclude_path=None):
repo = git.Repo(repopath)
assert repo.bare == False
commit = repo.commit(revision1)
diff = commit.diff(revision2)
changes = []
if sigs or sigsdiff:
for d in diff.iter_change_type('M'):
if d.a_blob.path == 'siglist.txt':
changes.append(compare_siglists(d.a_blob, d.b_blob, taskdiff=sigsdiff))
return changes
for d in diff.iter_change_type('M'):
path = os.path.dirname(d.a_blob.path)
if path.startswith('packages/'):
filename = os.path.basename(d.a_blob.path)
if filename == 'latest':
changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
elif filename.startswith('latest.'):
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
changes.append(chg)
elif filename == 'sysroot':
alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
filechanges = compare_file_lists(alines,blines, compare_ownership=False)
if filechanges:
chg = ChangeRecord(path, filename, None, None, True)
chg.filechanges = filechanges
changes.append(chg)
elif path.startswith('images/'):
filename = os.path.basename(d.a_blob.path)
if filename in img_monitor_files:
if filename == 'files-in-image.txt':
alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
filechanges = compare_file_lists(alines,blines)
if filechanges:
chg = ChangeRecord(path, filename, None, None, True)
chg.filechanges = filechanges
changes.append(chg)
elif filename == 'installed-package-names.txt':
alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
filechanges = compare_lists(alines,blines)
if filechanges:
chg = ChangeRecord(path, filename, None, None, True)
chg.filechanges = filechanges
changes.append(chg)
else:
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
changes.append(chg)
elif filename == 'image-info.txt':
changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
elif '/image-files/' in path:
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
changes.append(chg)
# Look for added preinst/postinst/prerm/postrm
# (without reporting newly added recipes)
addedpkgs = []
addedchanges = []
for d in diff.iter_change_type('A'):
path = os.path.dirname(d.b_blob.path)
if path.startswith('packages/'):
filename = os.path.basename(d.b_blob.path)
if filename == 'latest':
addedpkgs.append(path)
elif filename.startswith('latest.'):
chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read().decode('utf-8'), True)
addedchanges.append(chg)
for chg in addedchanges:
found = False
for pkg in addedpkgs:
if chg.path.startswith(pkg):
found = True
break
if not found:
changes.append(chg)
# Look for cleared preinst/postinst/prerm/postrm
for d in diff.iter_change_type('D'):
path = os.path.dirname(d.a_blob.path)
if path.startswith('packages/'):
filename = os.path.basename(d.a_blob.path)
if filename != 'latest' and filename.startswith('latest.'):
chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read().decode('utf-8'), '', True)
changes.append(chg)
# filter out unwanted paths
if exclude_path:
for chg in changes:
if chg.filechanges:
fchgs = []
for fchg in chg.filechanges:
for epath in exclude_path:
if fchg.path.startswith(epath):
break
else:
fchgs.append(fchg)
chg.filechanges = fchgs
if report_all:
return changes
else:
return [chg for chg in changes if chg.monitored]

View File

@@ -0,0 +1,237 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
# Based on standard python library functions but avoid
# repeated stat calls. Its assumed the files will not change from under us
# so we can cache stat calls.
#
import os
import errno
import stat as statmod
class CachedPath(object):
def __init__(self):
self.statcache = {}
self.lstatcache = {}
self.normpathcache = {}
return
def updatecache(self, x):
x = self.normpath(x)
if x in self.statcache:
del self.statcache[x]
if x in self.lstatcache:
del self.lstatcache[x]
def normpath(self, path):
if path in self.normpathcache:
return self.normpathcache[path]
newpath = os.path.normpath(path)
self.normpathcache[path] = newpath
return newpath
def _callstat(self, path):
if path in self.statcache:
return self.statcache[path]
try:
st = os.stat(path)
self.statcache[path] = st
return st
except os.error:
self.statcache[path] = False
return False
# We might as well call lstat and then only
# call stat as well in the symbolic link case
# since this turns out to be much more optimal
# in real world usage of this cache
def callstat(self, path):
path = self.normpath(path)
self.calllstat(path)
return self.statcache[path]
def calllstat(self, path):
path = self.normpath(path)
if path in self.lstatcache:
return self.lstatcache[path]
#bb.error("LStatpath:" + path)
try:
lst = os.lstat(path)
self.lstatcache[path] = lst
if not statmod.S_ISLNK(lst.st_mode):
self.statcache[path] = lst
else:
self._callstat(path)
return lst
except (os.error, AttributeError):
self.lstatcache[path] = False
self.statcache[path] = False
return False
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path ono systems that support symlinks
def isfile(self, path):
"""Test whether a path is a regular file"""
st = self.callstat(path)
if not st:
return False
return statmod.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(self, s):
"""Return true if the pathname refers to an existing directory."""
st = self.callstat(s)
if not st:
return False
return statmod.S_ISDIR(st.st_mode)
def islink(self, path):
"""Test whether a path is a symbolic link"""
st = self.calllstat(path)
if not st:
return False
return statmod.S_ISLNK(st.st_mode)
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(self, path):
"""Test whether a path exists. Returns False for broken symbolic links"""
if self.callstat(path):
return True
return False
def lexists(self, path):
"""Test whether a path exists. Returns True for broken symbolic links"""
if self.calllstat(path):
return True
return False
def stat(self, path):
return self.callstat(path)
def lstat(self, path):
return self.calllstat(path)
def walk(self, top, topdown=True, onerror=None, followlinks=False):
# Matches os.walk, not os.path.walk()
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
names = os.listdir(top)
except os.error as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if self.isdir(os.path.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = os.path.join(top, name)
if followlinks or not self.islink(new_path):
for x in self.walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
## realpath() related functions
def __is_path_below(self, file, root):
return (file + os.path.sep).startswith(root)
def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir):
"""Calculates real path of symlink 'start' + 'rel_path' below
'root'; no part of 'start' below 'root' must contain symlinks. """
have_dir = True
for d in rel_path.split(os.path.sep):
if not have_dir and not assume_dir:
raise OSError(errno.ENOENT, "no such directory %s" % start)
if d == os.path.pardir: # '..'
if len(start) >= len(root):
# do not follow '..' before root
start = os.path.dirname(start)
else:
# emit warning?
pass
else:
(start, have_dir) = self.__realpath(os.path.join(start, d),
root, loop_cnt, assume_dir)
assert(self.__is_path_below(start, root))
return start
def __realpath(self, file, root, loop_cnt, assume_dir):
while self.islink(file) and len(file) >= len(root):
if loop_cnt == 0:
raise OSError(errno.ELOOP, file)
loop_cnt -= 1
target = os.path.normpath(os.readlink(file))
if not os.path.isabs(target):
tdir = os.path.dirname(file)
assert(self.__is_path_below(tdir, root))
else:
tdir = root
file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir)
try:
is_dir = self.isdir(file)
except:
is_dir = False
return (file, is_dir)
def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
""" Returns the canonical path of 'file' with assuming a
toplevel 'root' directory. When 'use_physdir' is set, all
preceding path components of 'file' will be resolved first;
this flag should be set unless it is guaranteed that there is
no symlink in the path. When 'assume_dir' is not set, missing
path components will raise an ENOENT error"""
root = os.path.normpath(root)
file = os.path.normpath(file)
if not root.endswith(os.path.sep):
# letting root end with '/' makes some things easier
root = root + os.path.sep
if not self.__is_path_below(file, root):
raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
try:
if use_physdir:
file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
else:
file = self.__realpath(file, root, loop_cnt, assume_dir)[0]
except OSError as e:
if e.errno == errno.ELOOP:
# make ELOOP more readable; without catching it, there will
# be printed a backtrace with 100s of OSError exceptions
# else
raise OSError(errno.ELOOP,
"too much recursions while resolving '%s'; loop in '%s'" %
(file, e.strerror))
raise
return file

View File

@@ -0,0 +1,159 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import collections
def get_packages(d):
pkgs = d.getVar("PACKAGES_NONML")
extcls = d.getVar("EXTENDERCLASS")
return extcls.rename_packages_internal(pkgs)
def get_depends(varprefix, d):
extcls = d.getVar("EXTENDERCLASS")
return extcls.map_depends_variable(varprefix + "_NONML")
class ClassExtender(object):
def __init__(self, extname, d):
self.extname = extname
self.d = d
self.pkgs_mapping = []
self.d.setVar("EXTENDERCLASS", self)
def extend_name(self, name):
if name.startswith("kernel-") or name == "virtual/kernel":
return name
if name.startswith("rtld"):
return name
if name.endswith("-crosssdk"):
return name
if name.endswith("-" + self.extname):
name = name.replace("-" + self.extname, "")
if name.startswith("virtual/"):
# Assume large numbers of dashes means a triplet is present and we don't need to convert
if name.count("-") >= 3 and name.endswith(("-go", "-binutils", "-gcc", "-g++")):
return name
subs = name.split("/", 1)[1]
if not subs.startswith(self.extname):
return "virtual/" + self.extname + "-" + subs
return name
if name.startswith("/") or (name.startswith("${") and name.endswith("}")):
return name
if not name.startswith(self.extname):
return self.extname + "-" + name
return name
def map_variable(self, varname, setvar = True):
var = self.d.getVar(varname)
if not var:
return ""
var = var.split()
newvar = []
for v in var:
newvar.append(self.extend_name(v))
newdata = " ".join(newvar)
if setvar:
self.d.setVar(varname, newdata)
return newdata
def map_regexp_variable(self, varname, setvar = True):
var = self.d.getVar(varname)
if not var:
return ""
var = var.split()
newvar = []
for v in var:
if v.startswith("^" + self.extname):
newvar.append(v)
elif v.startswith("^"):
newvar.append("^" + self.extname + "-" + v[1:])
else:
newvar.append(self.extend_name(v))
newdata = " ".join(newvar)
if setvar:
self.d.setVar(varname, newdata)
return newdata
def map_depends(self, dep):
if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep):
return dep
else:
# Do not extend for that already have multilib prefix
var = self.d.getVar("MULTILIB_VARIANTS")
if var:
var = var.split()
for v in var:
if dep.startswith(v):
return dep
return self.extend_name(dep)
def map_depends_variable(self, varname, suffix = ""):
# We need to preserve EXTENDPKGV so it can be expanded correctly later
if suffix:
varname = varname + ":" + suffix
orig = self.d.getVar("EXTENDPKGV", False)
self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
deps = self.d.getVar(varname)
if not deps:
self.d.setVar("EXTENDPKGV", orig)
return
deps = bb.utils.explode_dep_versions2(deps)
newdeps = collections.OrderedDict()
for dep in deps:
newdeps[self.map_depends(dep)] = deps[dep]
if not varname.endswith("_NONML"):
self.d.renameVar(varname, varname + "_NONML")
self.d.setVar(varname, "${@oe.classextend.get_depends('%s', d)}" % varname)
self.d.appendVarFlag(varname, "vardeps", " " + varname + "_NONML")
ret = bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}")
self.d.setVar("EXTENDPKGV", orig)
return ret
def map_packagevars(self):
for pkg in (self.d.getVar("PACKAGES").split() + [""]):
self.map_depends_variable("RDEPENDS", pkg)
self.map_depends_variable("RRECOMMENDS", pkg)
self.map_depends_variable("RSUGGESTS", pkg)
self.map_depends_variable("RPROVIDES", pkg)
self.map_depends_variable("RREPLACES", pkg)
self.map_depends_variable("RCONFLICTS", pkg)
self.map_depends_variable("PKG", pkg)
def rename_packages(self):
for pkg in (self.d.getVar("PACKAGES") or "").split():
if pkg.startswith(self.extname):
self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
continue
self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
self.d.renameVar("PACKAGES", "PACKAGES_NONML")
self.d.setVar("PACKAGES", "${@oe.classextend.get_packages(d)}")
def rename_packages_internal(self, pkgs):
self.pkgs_mapping = []
for pkg in (self.d.expand(pkgs) or "").split():
if pkg.startswith(self.extname):
self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
continue
self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
return " ".join([row[1] for row in self.pkgs_mapping])
def rename_package_variables(self, variables):
for pkg_mapping in self.pkgs_mapping:
if pkg_mapping[0].startswith("${") and pkg_mapping[0].endswith("}"):
continue
for subs in variables:
self.d.renameVar("%s:%s" % (subs, pkg_mapping[0]), "%s:%s" % (subs, pkg_mapping[1]))
class NativesdkClassExtender(ClassExtender):
def map_depends(self, dep):
if dep.startswith(self.extname):
return dep
if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
return dep
else:
return self.extend_name(dep)

View File

@@ -0,0 +1,49 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
class ClassRegistryMeta(type):
"""Give each ClassRegistry their own registry"""
def __init__(cls, name, bases, attrs):
cls.registry = {}
type.__init__(cls, name, bases, attrs)
class ClassRegistry(type, metaclass=ClassRegistryMeta):
"""Maintain a registry of classes, indexed by name.
Note that this implementation requires that the names be unique, as it uses
a dictionary to hold the classes by name.
The name in the registry can be overridden via the 'name' attribute of the
class, and the 'priority' attribute controls priority. The prioritized()
method returns the registered classes in priority order.
Subclasses of ClassRegistry may define an 'implemented' property to exert
control over whether the class will be added to the registry (e.g. to keep
abstract base classes out of the registry)."""
priority = 0
def __init__(cls, name, bases, attrs):
super(ClassRegistry, cls).__init__(name, bases, attrs)
try:
if not cls.implemented:
return
except AttributeError:
pass
try:
cls.name
except AttributeError:
cls.name = name
cls.registry[cls.name] = cls
@classmethod
def prioritized(tcls):
return sorted(list(tcls.registry.values()),
key=lambda v: (v.priority, v.name), reverse=True)
def unregister(cls):
for key in cls.registry.keys():
if cls.registry[key] is cls:
del cls.registry[key]

View File

@@ -0,0 +1,293 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
# This class should provide easy access to the different aspects of the
# buildsystem such as layers, bitbake location, etc.
#
# SDK_LAYERS_EXCLUDE: Layers which will be excluded from SDK layers.
# SDK_LAYERS_EXCLUDE_PATTERN: The simiar to SDK_LAYERS_EXCLUDE, this supports
# python regular expression, use space as separator,
# e.g.: ".*-downloads closed-.*"
#
import stat
import shutil
def _smart_copy(src, dest):
import subprocess
# smart_copy will choose the correct function depending on whether the
# source is a file or a directory.
mode = os.stat(src).st_mode
if stat.S_ISDIR(mode):
bb.utils.mkdirhier(dest)
cmd = "tar --exclude='.git' --exclude='__pycache__' --xattrs --xattrs-include='*' -cf - -C %s -p . \
| tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
else:
shutil.copyfile(src, dest)
shutil.copymode(src, dest)
class BuildSystem(object):
def __init__(self, context, d):
self.d = d
self.context = context
self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS').split()]
self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE') or "").split()
self.layers_exclude_pattern = d.getVar('SDK_LAYERS_EXCLUDE_PATTERN')
def copy_bitbake_and_layers(self, destdir, workspace_name=None):
import re
# Copy in all metadata layers + bitbake (as repositories)
copied_corebase = None
layers_copied = []
bb.utils.mkdirhier(destdir)
layers = list(self.layerdirs)
corebase = os.path.abspath(self.d.getVar('COREBASE'))
layers.append(corebase)
# The bitbake build system uses the meta-skeleton layer as a layout
# for common recipies, e.g: the recipetool script to create kernel recipies
# Add the meta-skeleton layer to be included as part of the eSDK installation
layers.append(os.path.join(corebase, 'meta-skeleton'))
# Exclude layers
for layer_exclude in self.layers_exclude:
if layer_exclude in layers:
bb.note('Excluded %s from sdk layers since it is in SDK_LAYERS_EXCLUDE' % layer_exclude)
layers.remove(layer_exclude)
if self.layers_exclude_pattern:
layers_cp = layers[:]
for pattern in self.layers_exclude_pattern.split():
for layer in layers_cp:
if re.match(pattern, layer):
bb.note('Excluded %s from sdk layers since matched SDK_LAYERS_EXCLUDE_PATTERN' % layer)
layers.remove(layer)
workspace_newname = workspace_name
if workspace_newname:
layernames = [os.path.basename(layer) for layer in layers]
extranum = 0
while workspace_newname in layernames:
extranum += 1
workspace_newname = '%s-%d' % (workspace_name, extranum)
corebase_files = self.d.getVar('COREBASE_FILES').split()
corebase_files = [corebase + '/' +x for x in corebase_files]
# Make sure bitbake goes in
bitbake_dir = bb.__file__.rsplit('/', 3)[0]
corebase_files.append(bitbake_dir)
for layer in layers:
layerconf = os.path.join(layer, 'conf', 'layer.conf')
layernewname = os.path.basename(layer)
workspace = False
if os.path.exists(layerconf):
with open(layerconf, 'r') as f:
if f.readline().startswith("# ### workspace layer auto-generated by devtool ###"):
if workspace_newname:
layernewname = workspace_newname
workspace = True
else:
bb.plain("NOTE: Excluding local workspace layer %s from %s" % (layer, self.context))
continue
# If the layer was already under corebase, leave it there
# since layers such as meta have issues when moved.
layerdestpath = destdir
if corebase == os.path.dirname(layer):
layerdestpath += '/' + os.path.basename(corebase)
# If the layer is located somewhere under the same parent directory
# as corebase we keep the layer structure.
elif os.path.commonpath([layer, corebase]) == os.path.dirname(corebase):
layer_relative = os.path.relpath(layer, os.path.dirname(corebase))
if os.path.dirname(layer_relative) != layernewname:
layerdestpath += '/' + os.path.dirname(layer_relative)
layerdestpath += '/' + layernewname
layer_relative = os.path.relpath(layerdestpath,
destdir)
# Treat corebase as special since it typically will contain
# build directories or other custom items.
if corebase == layer:
copied_corebase = layer_relative
bb.utils.mkdirhier(layerdestpath)
for f in corebase_files:
f_basename = os.path.basename(f)
destname = os.path.join(layerdestpath, f_basename)
_smart_copy(f, destname)
else:
layers_copied.append(layer_relative)
if os.path.exists(os.path.join(layerdestpath, 'conf/layer.conf')):
bb.note("Skipping layer %s, already handled" % layer)
else:
_smart_copy(layer, layerdestpath)
if workspace:
# Make some adjustments original workspace layer
# Drop sources (recipe tasks will be locked, so we don't need them)
srcdir = os.path.join(layerdestpath, 'sources')
if os.path.isdir(srcdir):
shutil.rmtree(srcdir)
# Drop all bbappends except the one for the image the SDK is being built for
# (because of externalsrc, the workspace bbappends will interfere with the
# locked signatures if present, and we don't need them anyway)
image_bbappend = os.path.splitext(os.path.basename(self.d.getVar('FILE')))[0] + '.bbappend'
appenddir = os.path.join(layerdestpath, 'appends')
if os.path.isdir(appenddir):
for fn in os.listdir(appenddir):
if fn == image_bbappend:
continue
else:
os.remove(os.path.join(appenddir, fn))
# Drop README
readme = os.path.join(layerdestpath, 'README')
if os.path.exists(readme):
os.remove(readme)
# Filter out comments in layer.conf and change layer name
layerconf = os.path.join(layerdestpath, 'conf', 'layer.conf')
with open(layerconf, 'r') as f:
origlines = f.readlines()
with open(layerconf, 'w') as f:
for line in origlines:
if line.startswith('#'):
continue
line = line.replace('workspacelayer', workspace_newname)
f.write(line)
# meta-skeleton layer is added as part of the build system
# but not as a layer included in the build, therefore it is
# not reported to the function caller.
for layer in layers_copied:
if layer.endswith('/meta-skeleton'):
layers_copied.remove(layer)
break
return copied_corebase, layers_copied
def generate_locked_sigs(sigfile, d):
bb.utils.mkdirhier(os.path.dirname(sigfile))
depd = d.getVar('BB_TASKDEPDATA', False)
tasks = ['%s:%s' % (v[2], v[1]) for v in depd.values()]
bb.parse.siggen.dump_lockedsigs(sigfile, tasks)
def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, onlynative, pruned_output):
with open(lockedsigs, 'r') as infile:
bb.utils.mkdirhier(os.path.dirname(pruned_output))
with open(pruned_output, 'w') as f:
invalue = False
for line in infile:
if invalue:
if line.endswith('\\\n'):
splitval = line.strip().split(':')
if not splitval[1] in excluded_tasks and not splitval[0] in excluded_targets:
if onlynative:
if 'nativesdk' in splitval[0]:
f.write(line)
else:
f.write(line)
else:
f.write(line)
invalue = False
elif line.startswith('SIGGEN_LOCKEDSIGS'):
invalue = True
f.write(line)
def merge_lockedsigs(copy_tasks, lockedsigs_main, lockedsigs_extra, merged_output, copy_output=None):
merged = {}
arch_order = []
with open(lockedsigs_main, 'r') as f:
invalue = None
for line in f:
if invalue:
if line.endswith('\\\n'):
merged[invalue].append(line)
else:
invalue = None
elif line.startswith('SIGGEN_LOCKEDSIGS_t-'):
invalue = line[18:].split('=', 1)[0].rstrip()
merged[invalue] = []
arch_order.append(invalue)
with open(lockedsigs_extra, 'r') as f:
invalue = None
tocopy = {}
for line in f:
if invalue:
if line.endswith('\\\n'):
if not line in merged[invalue]:
target, task = line.strip().split(':')[:2]
if not copy_tasks or task in copy_tasks:
tocopy[invalue].append(line)
merged[invalue].append(line)
else:
invalue = None
elif line.startswith('SIGGEN_LOCKEDSIGS_t-'):
invalue = line[18:].split('=', 1)[0].rstrip()
if not invalue in merged:
merged[invalue] = []
arch_order.append(invalue)
tocopy[invalue] = []
def write_sigs_file(fn, types, sigs):
fulltypes = []
bb.utils.mkdirhier(os.path.dirname(fn))
with open(fn, 'w') as f:
for typename in types:
lines = sigs[typename]
if lines:
f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % typename)
for line in lines:
f.write(line)
f.write(' "\n')
fulltypes.append(typename)
f.write('SIGGEN_LOCKEDSIGS_TYPES = "%s"\n' % ' '.join(fulltypes))
if copy_output:
write_sigs_file(copy_output, list(tocopy.keys()), tocopy)
if merged_output:
write_sigs_file(merged_output, arch_order, merged)
def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cache, d, fixedlsbstring="", filterfile=None):
import shutil
bb.note('Generating sstate-cache...')
nativelsbstring = d.getVar('NATIVELSBSTRING')
bb.process.run("PYTHONDONTWRITEBYTECODE=1 gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
if fixedlsbstring and nativelsbstring != fixedlsbstring:
nativedir = output_sstate_cache + '/' + nativelsbstring
if os.path.isdir(nativedir):
destdir = os.path.join(output_sstate_cache, fixedlsbstring)
for root, _, files in os.walk(nativedir):
for fn in files:
src = os.path.join(root, fn)
dest = os.path.join(destdir, os.path.relpath(src, nativedir))
if os.path.exists(dest):
# Already exists, and it'll be the same file, so just delete it
os.unlink(src)
else:
bb.utils.mkdirhier(os.path.dirname(dest))
shutil.move(src, dest)
def check_sstate_task_list(d, targets, filteroutfile, cmdprefix='', cwd=None, logfile=None):
import subprocess
bb.note('Generating sstate task list...')
if not cwd:
cwd = os.getcwd()
if logfile:
logparam = '-l %s' % logfile
else:
logparam = ''
cmd = "%sPYTHONDONTWRITEBYTECODE=1 BB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
env = dict(d.getVar('BB_ORIGENV', False))
env.pop('BUILDDIR', '')
env.pop('BBPATH', '')
pathitems = env['PATH'].split(':')
env['PATH'] = ':'.join([item for item in pathitems if not item.endswith('/bitbake/bin')])
bb.process.run(cmd, stderr=subprocess.STDOUT, env=env, cwd=cwd, executable='/bin/bash')

View File

@@ -0,0 +1,245 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
import collections
import re
import itertools
import functools
_Version = collections.namedtuple(
"_Version", ["release", "patch_l", "pre_l", "pre_v"]
)
@functools.total_ordering
class Version():
def __init__(self, version, suffix=None):
suffixes = ["alphabetical", "patch"]
if str(suffix) == "alphabetical":
version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(?P<patch_l>[a-z]))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
elif str(suffix) == "patch":
version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(p|patch)(?P<patch_l>[0-9]+))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
else:
version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
regex = re.compile(r"^\s*" + version_pattern + r"\s*$", re.VERBOSE | re.IGNORECASE)
match = regex.search(version)
if not match:
raise Exception("Invalid version: '{0}'".format(version))
self._version = _Version(
release=tuple(int(i) for i in match.group("release").replace("-",".").split(".")),
patch_l=match.group("patch_l") if str(suffix) in suffixes and match.group("patch_l") else "",
pre_l=match.group("pre_l"),
pre_v=match.group("pre_v")
)
self._key = _cmpkey(
self._version.release,
self._version.patch_l,
self._version.pre_l,
self._version.pre_v
)
def __eq__(self, other):
if not isinstance(other, Version):
return NotImplemented
return self._key == other._key
def __gt__(self, other):
if not isinstance(other, Version):
return NotImplemented
return self._key > other._key
def _cmpkey(release, patch_l, pre_l, pre_v):
# remove leading 0
_release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
_patch = patch_l.upper()
if pre_l is None and pre_v is None:
_pre = float('inf')
else:
_pre = float(pre_v) if pre_v else float('-inf')
return _release, _patch, _pre
def get_patched_cves(d):
"""
Get patches that solve CVEs using the "CVE: " tag.
"""
import re
import oe.patch
cve_match = re.compile(r"CVE:( CVE-\d{4}-\d+)+")
# Matches the last "CVE-YYYY-ID" in the file name, also if written
# in lowercase. Possible to have multiple CVE IDs in a single
# file name, but only the last one will be detected from the file name.
# However, patch files contents addressing multiple CVE IDs are supported
# (cve_match regular expression)
cve_file_name_match = re.compile(r".*(CVE-\d{4}-\d+)", re.IGNORECASE)
patched_cves = set()
patches = oe.patch.src_patches(d)
bb.debug(2, "Scanning %d patches for CVEs" % len(patches))
for url in patches:
patch_file = bb.fetch.decodeurl(url)[2]
# Check patch file name for CVE ID
fname_match = cve_file_name_match.search(patch_file)
if fname_match:
cve = fname_match.group(1).upper()
patched_cves.add(cve)
bb.debug(2, "Found %s from patch file name %s" % (cve, patch_file))
# Remote patches won't be present and compressed patches won't be
# unpacked, so say we're not scanning them
if not os.path.isfile(patch_file):
bb.note("%s is remote or compressed, not scanning content" % patch_file)
continue
with open(patch_file, "r", encoding="utf-8") as f:
try:
patch_text = f.read()
except UnicodeDecodeError:
bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
" trying with iso8859-1" % patch_file)
f.close()
with open(patch_file, "r", encoding="iso8859-1") as f:
patch_text = f.read()
# Search for one or more "CVE: " lines
text_match = False
for match in cve_match.finditer(patch_text):
# Get only the CVEs without the "CVE: " tag
cves = patch_text[match.start()+5:match.end()]
for cve in cves.split():
bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
patched_cves.add(cve)
text_match = True
if not fname_match and not text_match:
bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
# Search for additional patched CVEs
for cve in (d.getVarFlags("CVE_STATUS") or {}):
decoded_status, _, _ = decode_cve_status(d, cve)
if decoded_status == "Patched":
bb.debug(2, "CVE %s is additionally patched" % cve)
patched_cves.add(cve)
return patched_cves
def get_cpe_ids(cve_product, version):
"""
Get list of CPE identifiers for the given product and version
"""
version = version.split("+git")[0]
cpe_ids = []
for product in cve_product.split():
# CVE_PRODUCT in recipes may include vendor information for CPE identifiers. If not,
# use wildcard for vendor.
if ":" in product:
vendor, product = product.split(":", 1)
else:
vendor = "*"
cpe_id = 'cpe:2.3:*:{}:{}:{}:*:*:*:*:*:*:*'.format(vendor, product, version)
cpe_ids.append(cpe_id)
return cpe_ids
def cve_check_merge_jsons(output, data):
"""
Merge the data in the "package" property to the main data file
output
"""
if output["version"] != data["version"]:
bb.error("Version mismatch when merging JSON outputs")
return
for product in output["package"]:
if product["name"] == data["package"][0]["name"]:
bb.error("Error adding the same package %s twice" % product["name"])
return
output["package"].append(data["package"][0])
def update_symlinks(target_path, link_path):
"""
Update a symbolic link link_path to point to target_path.
Remove the link and recreate it if exist and is different.
"""
if link_path != target_path and os.path.exists(target_path):
if os.path.exists(os.path.realpath(link_path)):
os.remove(link_path)
os.symlink(os.path.basename(target_path), link_path)
def convert_cve_version(version):
"""
This function converts from CVE format to Yocto version format.
eg 8.3_p1 -> 8.3p1, 6.2_rc1 -> 6.2-rc1
Unless it is redefined using CVE_VERSION in the recipe,
cve_check uses the version in the name of the recipe (${PV})
to check vulnerabilities against a CVE in the database downloaded from NVD.
When the version has an update, i.e.
"p1" in OpenSSH 8.3p1,
"-rc1" in linux kernel 6.2-rc1,
the database stores the version as version_update (8.3_p1, 6.2_rc1).
Therefore, we must transform this version before comparing to the
recipe version.
In this case, the parameter of the function is 8.3_p1.
If the version uses the Release Candidate format, "rc",
this function replaces the '_' by '-'.
If the version uses the Update format, "p",
this function removes the '_' completely.
"""
import re
matches = re.match('^([0-9.]+)_((p|rc)[0-9]+)$', version)
if not matches:
return version
version = matches.group(1)
update = matches.group(2)
if matches.group(3) == "rc":
return version + '-' + update
return version + update
def decode_cve_status(d, cve):
"""
Convert CVE_STATUS into status, detail and description.
"""
status = d.getVarFlag("CVE_STATUS", cve)
if not status:
return ("", "", "")
status_split = status.split(':', 1)
detail = status_split[0]
description = status_split[1].strip() if (len(status_split) > 1) else ""
status_mapping = d.getVarFlag("CVE_CHECK_STATUSMAP", detail)
if status_mapping is None:
bb.warn('Invalid detail "%s" for CVE_STATUS[%s] = "%s", fallback to Unpatched' % (detail, cve, status))
status_mapping = "Unpatched"
return (status_mapping, detail, description)

View File

@@ -0,0 +1,53 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import json
import oe.maketype
def typed_value(key, d):
"""Construct a value for the specified metadata variable, using its flags
to determine the type and parameters for construction."""
var_type = d.getVarFlag(key, 'type')
flags = d.getVarFlags(key)
if flags is not None:
flags = dict((flag, d.expand(value))
for flag, value in list(flags.items()))
else:
flags = {}
try:
return oe.maketype.create(d.getVar(key) or '', var_type, **flags)
except (TypeError, ValueError) as exc:
bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
def export2json(d, json_file, expand=True, searchString="",replaceString=""):
data2export = {}
keys2export = []
for key in d.keys():
if key.startswith("_"):
continue
elif key.startswith("BB"):
continue
elif key.startswith("B_pn"):
continue
elif key.startswith("do_"):
continue
elif d.getVarFlag(key, "func"):
continue
keys2export.append(key)
for key in keys2export:
try:
data2export[key] = d.getVar(key, expand).replace(searchString,replaceString)
except bb.data_smart.ExpansionError:
data2export[key] = ''
except AttributeError:
pass
with open(json_file, "w") as f:
json.dump(data2export, f, skipkeys=True, indent=4, sort_keys=True)

View File

@@ -0,0 +1,314 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
def create_socket(url, d):
import urllib
from bb.utils import export_proxies
export_proxies(d)
return urllib.request.urlopen(url)
def get_links_from_url(url, d):
"Return all the href links found on the web location"
from bs4 import BeautifulSoup, SoupStrainer
soup = BeautifulSoup(create_socket(url,d), "html.parser", parse_only=SoupStrainer("a"))
hyperlinks = []
for line in soup.find_all('a', href=True):
hyperlinks.append(line['href'].strip('/'))
return hyperlinks
def find_latest_numeric_release(url, d):
"Find the latest listed numeric release on the given url"
max=0
maxstr=""
for link in get_links_from_url(url, d):
try:
# TODO use bb.utils.vercmp_string_op()
release = float(link)
except:
release = 0
if release > max:
max = release
maxstr = link
return maxstr
def is_src_rpm(name):
"Check if the link is pointing to a src.rpm file"
return name.endswith(".src.rpm")
def package_name_from_srpm(srpm):
"Strip out the package name from the src.rpm filename"
# ca-certificates-2016.2.7-1.0.fc24.src.rpm
# ^name ^ver ^release^removed
(name, version, release) = srpm.replace(".src.rpm", "").rsplit("-", 2)
return name
def get_source_package_list_from_url(url, section, d):
"Return a sectioned list of package names from a URL list"
bb.note("Reading %s: %s" % (url, section))
links = get_links_from_url(url, d)
srpms = filter(is_src_rpm, links)
names_list = map(package_name_from_srpm, srpms)
new_pkgs = set()
for pkgs in names_list:
new_pkgs.add(pkgs + ":" + section)
return new_pkgs
def get_source_package_list_from_url_by_letter(url, section, d):
import string
from urllib.error import HTTPError
packages = set()
for letter in (string.ascii_lowercase + string.digits):
# Not all subfolders may exist, so silently handle 404
try:
packages |= get_source_package_list_from_url(url + "/" + letter, section, d)
except HTTPError as e:
if e.code != 404: raise
return packages
def get_latest_released_fedora_source_package_list(d):
"Returns list of all the name os packages in the latest fedora distro"
latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/", d)
package_names = get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Everything/source/tree/Packages/" % latest, "main", d)
package_names |= get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d)
return latest, package_names
def get_latest_released_opensuse_source_package_list(d):
"Returns list of all the name os packages in the latest opensuse distro"
latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/leap", d)
package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/leap/%s/repo/oss/suse/src/" % latest, "main", d)
package_names |= get_source_package_list_from_url("http://download.opensuse.org/update/leap/%s/oss/src/" % latest, "updates", d)
return latest, package_names
def get_latest_released_clear_source_package_list(d):
latest = find_latest_numeric_release("https://download.clearlinux.org/releases/", d)
package_names = get_source_package_list_from_url("https://download.clearlinux.org/releases/%s/clear/source/SRPMS/" % latest, "main", d)
return latest, package_names
def find_latest_debian_release(url, d):
"Find the latest listed debian release on the given url"
releases = [link.replace("Debian", "")
for link in get_links_from_url(url, d)
if link.startswith("Debian")]
releases.sort()
try:
return releases[-1]
except:
return "_NotFound_"
def get_debian_style_source_package_list(url, section, d):
"Return the list of package-names stored in the debian style Sources.gz file"
import gzip
package_names = set()
for line in gzip.open(create_socket(url, d), mode="rt"):
if line.startswith("Package:"):
pkg = line.split(":", 1)[1].strip()
package_names.add(pkg + ":" + section)
return package_names
def get_latest_released_debian_source_package_list(d):
"Returns list of all the name of packages in the latest debian distro"
latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/", d)
url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
package_names = get_debian_style_source_package_list(url, "main", d)
url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
package_names |= get_debian_style_source_package_list(url, "updates", d)
return latest, package_names
def find_latest_ubuntu_release(url, d):
"""
Find the latest listed Ubuntu release on the given ubuntu/dists/ URL.
To avoid matching development releases look for distributions that have
updates, so the resulting distro could be any supported release.
"""
url += "?C=M;O=D" # Descending Sort by Last Modified
for link in get_links_from_url(url, d):
if "-updates" in link:
distro = link.replace("-updates", "")
return distro
return "_NotFound_"
def get_latest_released_ubuntu_source_package_list(d):
"Returns list of all the name os packages in the latest ubuntu distro"
latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/", d)
url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
package_names = get_debian_style_source_package_list(url, "main", d)
url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
package_names |= get_debian_style_source_package_list(url, "updates", d)
return latest, package_names
def create_distro_packages_list(distro_check_dir, d):
import shutil
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
bb.utils.remove(pkglst_dir, True)
bb.utils.mkdirhier(pkglst_dir)
per_distro_functions = (
("Debian", get_latest_released_debian_source_package_list),
("Ubuntu", get_latest_released_ubuntu_source_package_list),
("Fedora", get_latest_released_fedora_source_package_list),
("openSUSE", get_latest_released_opensuse_source_package_list),
("Clear", get_latest_released_clear_source_package_list),
)
for name, fetcher_func in per_distro_functions:
try:
release, package_list = fetcher_func(d)
except Exception as e:
bb.warn("Cannot fetch packages for %s: %s" % (name, e))
bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
if len(package_list) == 0:
bb.error("Didn't fetch any packages for %s %s" % (name, release))
package_list_file = os.path.join(pkglst_dir, name + "-" + release)
with open(package_list_file, 'w') as f:
for pkg in sorted(package_list):
f.write(pkg + "\n")
def update_distro_data(distro_check_dir, datetime, d):
"""
If distro packages list data is old then rebuild it.
The operations has to be protected by a lock so that
only one thread performes it at a time.
"""
if not os.path.isdir (distro_check_dir):
try:
bb.note ("Making new directory: %s" % distro_check_dir)
os.makedirs (distro_check_dir)
except OSError:
raise Exception('Unable to create directory %s' % (distro_check_dir))
datetime_file = os.path.join(distro_check_dir, "build_datetime")
saved_datetime = "_invalid_"
import fcntl
try:
if not os.path.exists(datetime_file):
open(datetime_file, 'w+').close() # touch the file so that the next open won't fail
f = open(datetime_file, "r+")
fcntl.lockf(f, fcntl.LOCK_EX)
saved_datetime = f.read()
if saved_datetime[0:8] != datetime[0:8]:
bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime))
bb.note("Regenerating distro package lists")
create_distro_packages_list(distro_check_dir, d)
f.seek(0)
f.write(datetime)
except OSError as e:
raise Exception('Unable to open timestamp: %s' % e)
finally:
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
def compare_in_distro_packages_list(distro_check_dir, d):
if not os.path.isdir(distro_check_dir):
raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
localdata = bb.data.createCopy(d)
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
matching_distros = []
pn = recipe_name = d.getVar('PN')
bb.note("Checking: %s" % pn)
if pn.find("-native") != -1:
pnstripped = pn.split("-native")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[0]
if pn.startswith("nativesdk-"):
pnstripped = pn.split("nativesdk-")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[1]
if pn.find("-cross") != -1:
pnstripped = pn.split("-cross")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[0]
if pn.find("-initial") != -1:
pnstripped = pn.split("-initial")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[0]
bb.note("Recipe: %s" % recipe_name)
distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
tmp = localdata.getVar('DISTRO_PN_ALIAS') or ""
for str in tmp.split():
if str and str.find("=") == -1 and distro_exceptions[str]:
matching_distros.append(str)
distro_pn_aliases = {}
for str in tmp.split():
if "=" in str:
(dist, pn_alias) = str.split('=')
distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
for file in os.listdir(pkglst_dir):
(distro, distro_release) = file.split("-")
f = open(os.path.join(pkglst_dir, file), "r")
for line in f:
(pkg, section) = line.split(":")
if distro.lower() in distro_pn_aliases:
pn = distro_pn_aliases[distro.lower()]
else:
pn = recipe_name
if pn == pkg:
matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end
f.close()
break
f.close()
for item in tmp.split():
matching_distros.append(item)
bb.note("Matching: %s" % matching_distros)
return matching_distros
def create_log_file(d, logname):
logpath = d.getVar('LOG_DIR')
bb.utils.mkdirhier(logpath)
logfn, logsuffix = os.path.splitext(logname)
logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME'), logsuffix))
if not os.path.exists(logfile):
slogfile = os.path.join(logpath, logname)
if os.path.exists(slogfile):
os.remove(slogfile)
open(logfile, 'w+').close()
os.symlink(logfile, slogfile)
d.setVar('LOG_FILE', logfile)
return logfile
def save_distro_check_result(result, datetime, result_file, d):
pn = d.getVar('PN')
logdir = d.getVar('LOG_DIR')
if not logdir:
bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
return
bb.utils.mkdirhier(logdir)
line = pn
for i in result:
line = line + "," + i
f = open(result_file, "a")
import fcntl
fcntl.lockf(f, fcntl.LOCK_EX)
f.seek(0, os.SEEK_END) # seek to the end of file
f.write(line + "\n")
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()

View File

@@ -0,0 +1,145 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
def machine_dict(d):
# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
machdata = {
"darwin9" : {
"arm" : (40, 0, 0, True, 32),
},
"eabi" : {
"arm" : (40, 0, 0, True, 32),
},
"elf" : {
"aarch64" : (183, 0, 0, True, 64),
"aarch64_be" :(183, 0, 0, False, 64),
"i586" : (3, 0, 0, True, 32),
"i686" : (3, 0, 0, True, 32),
"x86_64": (62, 0, 0, True, 64),
"epiphany": (4643, 0, 0, True, 32),
"lm32": (138, 0, 0, False, 32),
"loongarch64":(258, 0, 0, True, 64),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
"microblaze": (189, 0, 0, False, 32),
"microblazeel":(189, 0, 0, True, 32),
"powerpc": (20, 0, 0, False, 32),
"riscv32": (243, 0, 0, True, 32),
"riscv64": (243, 0, 0, True, 64),
},
"linux" : {
"aarch64" : (183, 0, 0, True, 64),
"aarch64_be" :(183, 0, 0, False, 64),
"arm" : (40, 97, 0, True, 32),
"armeb": (40, 97, 0, False, 32),
"powerpc": (20, 0, 0, False, 32),
"powerpc64": (21, 0, 0, False, 64),
"powerpc64le": (21, 0, 0, True, 64),
"i386": ( 3, 0, 0, True, 32),
"i486": ( 3, 0, 0, True, 32),
"i586": ( 3, 0, 0, True, 32),
"i686": ( 3, 0, 0, True, 32),
"x86_64": (62, 0, 0, True, 64),
"ia64": (50, 0, 0, True, 64),
"alpha": (36902, 0, 0, True, 64),
"hppa": (15, 3, 0, False, 32),
"loongarch64":(258, 0, 0, True, 64),
"m68k": ( 4, 0, 0, False, 32),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
"mips64": ( 8, 0, 0, False, 64),
"mips64el": ( 8, 0, 0, True, 64),
"mipsisa32r6": ( 8, 0, 0, False, 32),
"mipsisa32r6el": ( 8, 0, 0, True, 32),
"mipsisa64r6": ( 8, 0, 0, False, 64),
"mipsisa64r6el": ( 8, 0, 0, True, 64),
"nios2": (113, 0, 0, True, 32),
"riscv32": (243, 0, 0, True, 32),
"riscv64": (243, 0, 0, True, 64),
"s390": (22, 0, 0, False, 32),
"sh4": (42, 0, 0, True, 32),
"sparc": ( 2, 0, 0, False, 32),
"microblaze": (189, 0, 0, False, 32),
"microblazeel":(189, 0, 0, True, 32),
},
"linux-android" : {
"aarch64" : (183, 0, 0, True, 64),
"i686": ( 3, 0, 0, True, 32),
"x86_64": (62, 0, 0, True, 64),
},
"linux-androideabi" : {
"arm" : (40, 97, 0, True, 32),
},
"linux-musl" : {
"aarch64" : (183, 0, 0, True, 64),
"aarch64_be" :(183, 0, 0, False, 64),
"arm" : ( 40, 97, 0, True, 32),
"armeb": ( 40, 97, 0, False, 32),
"powerpc": ( 20, 0, 0, False, 32),
"powerpc64": ( 21, 0, 0, False, 64),
"powerpc64le": (21, 0, 0, True, 64),
"i386": ( 3, 0, 0, True, 32),
"i486": ( 3, 0, 0, True, 32),
"i586": ( 3, 0, 0, True, 32),
"i686": ( 3, 0, 0, True, 32),
"x86_64": ( 62, 0, 0, True, 64),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
"mips64": ( 8, 0, 0, False, 64),
"mips64el": ( 8, 0, 0, True, 64),
"microblaze": (189, 0, 0, False, 32),
"microblazeel":(189, 0, 0, True, 32),
"riscv32": (243, 0, 0, True, 32),
"riscv64": (243, 0, 0, True, 64),
"sh4": ( 42, 0, 0, True, 32),
},
"uclinux-uclibc" : {
"bfin": ( 106, 0, 0, True, 32),
},
"linux-gnueabi" : {
"arm" : (40, 0, 0, True, 32),
"armeb" : (40, 0, 0, False, 32),
},
"linux-musleabi" : {
"arm" : (40, 0, 0, True, 32),
"armeb" : (40, 0, 0, False, 32),
},
"linux-gnuspe" : {
"powerpc": (20, 0, 0, False, 32),
},
"linux-muslspe" : {
"powerpc": (20, 0, 0, False, 32),
},
"linux-gnu" : {
"powerpc": (20, 0, 0, False, 32),
"sh4": (42, 0, 0, True, 32),
},
"linux-gnu_ilp32" : {
"aarch64" : (183, 0, 0, True, 32),
},
"linux-gnux32" : {
"x86_64": (62, 0, 0, True, 32),
},
"linux-muslx32" : {
"x86_64": (62, 0, 0, True, 32),
},
"linux-gnun32" : {
"mips64": ( 8, 0, 0, False, 32),
"mips64el": ( 8, 0, 0, True, 32),
"mipsisa64r6": ( 8, 0, 0, False, 32),
"mipsisa64r6el":( 8, 0, 0, True, 32),
},
}
# Add in any extra user supplied data which may come from a BSP layer, removing the
# need to always change this class directly
extra_machdata = (d and d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS" or None) or "").split()
for m in extra_machdata:
call = m + "(machdata, d)"
locs = { "machdata" : machdata, "d" : d}
machdata = bb.utils.better_eval(call, locs)
return machdata

View File

@@ -0,0 +1,34 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
import re
def map_arch(a):
if re.match('i.86', a):
return '386'
elif a == 'x86_64':
return 'amd64'
elif re.match('arm.*', a):
return 'arm'
elif re.match('aarch64.*', a):
return 'arm64'
elif re.match('mips64el.*', a):
return 'mips64le'
elif re.match('mips64.*', a):
return 'mips64'
elif a == 'mips':
return 'mips'
elif a == 'mipsel':
return 'mipsle'
elif re.match('p(pc|owerpc)(64le)', a):
return 'ppc64le'
elif re.match('p(pc|owerpc)(64)', a):
return 'ppc64'
elif a == 'riscv64':
return 'riscv64'
elif a == 'loongarch64':
return 'loong64'
return ''

View File

@@ -0,0 +1,160 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Helper module for GPG signing"""
import bb
import os
import shlex
import subprocess
import tempfile
class LocalSigner(object):
"""Class for handling local (on the build host) signing"""
def __init__(self, d):
self.gpg_bin = d.getVar('GPG_BIN') or \
bb.utils.which(os.getenv('PATH'), 'gpg')
self.gpg_cmd = [self.gpg_bin]
self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent")
# Without this we see "Cannot allocate memory" errors when running processes in parallel
# It needs to be set for any gpg command since any agent launched can stick around in memory
# and this parameter must be set.
if self.gpg_agent_bin:
self.gpg_cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)]
self.gpg_path = d.getVar('GPG_PATH')
self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign")
self.gpg_version = self.get_gpg_version()
def export_pubkey(self, output_file, keyid, armor=True):
"""Export GPG public key to a file"""
cmd = self.gpg_cmd + ["--no-permission-warning", "--batch", "--yes", "--export", "-o", output_file]
if self.gpg_path:
cmd += ["--homedir", self.gpg_path]
if armor:
cmd += ["--armor"]
cmd += [keyid]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def sign_rpms(self, files, keyid, passphrase, digest, sign_chunk, fsk=None, fsk_password=None):
"""Sign RPM files"""
cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid
gpg_args = '--no-permission-warning --batch --passphrase=%s --agent-program=%s|--auto-expand-secmem' % (passphrase, self.gpg_agent_bin)
if self.gpg_version > (2,1,):
gpg_args += ' --pinentry-mode=loopback'
cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args
cmd += "--define '_binary_filedigest_algorithm %s' " % digest
if self.gpg_bin:
cmd += "--define '__gpg %s' " % self.gpg_bin
if self.gpg_path:
cmd += "--define '_gpg_path %s' " % self.gpg_path
if fsk:
cmd += "--signfiles --fskpath %s " % fsk
if fsk_password:
cmd += "--define '_file_signing_key_password %s' " % fsk_password
# Sign in chunks
for i in range(0, len(files), sign_chunk):
subprocess.check_output(shlex.split(cmd + ' '.join(files[i:i+sign_chunk])), stderr=subprocess.STDOUT)
def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True, output_suffix=None, use_sha256=False):
"""Create a detached signature of a file"""
if passphrase_file and passphrase:
raise Exception("You should use either passphrase_file of passphrase, not both")
cmd = self.gpg_cmd + ['--detach-sign', '--no-permission-warning', '--batch',
'--no-tty', '--yes', '--passphrase-fd', '0', '-u', keyid]
if self.gpg_path:
cmd += ['--homedir', self.gpg_path]
if armor:
cmd += ['--armor']
if use_sha256:
cmd += ['--digest-algo', "SHA256"]
#gpg > 2.1 supports password pipes only through the loopback interface
#gpg < 2.1 errors out if given unknown parameters
if self.gpg_version > (2,1,):
cmd += ['--pinentry-mode', 'loopback']
try:
if passphrase_file:
with open(passphrase_file) as fobj:
passphrase = fobj.readline();
if not output_suffix:
output_suffix = 'asc' if armor else 'sig'
output_file = input_file + "." + output_suffix
with tempfile.TemporaryDirectory(dir=os.path.dirname(output_file)) as tmp_dir:
tmp_file = os.path.join(tmp_dir, os.path.basename(output_file))
cmd += ['-o', tmp_file]
cmd += [input_file]
job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
(_, stderr) = job.communicate(passphrase.encode("utf-8"))
if job.returncode:
bb.fatal("GPG exited with code %d: %s" % (job.returncode, stderr.decode("utf-8")))
os.rename(tmp_file, output_file)
except IOError as e:
bb.error("IO error (%s): %s" % (e.errno, e.strerror))
raise Exception("Failed to sign '%s'" % input_file)
except OSError as e:
bb.error("OS error (%s): %s" % (e.errno, e.strerror))
raise Exception("Failed to sign '%s" % input_file)
def get_gpg_version(self):
"""Return the gpg version as a tuple of ints"""
try:
cmd = self.gpg_cmd + ["--version", "--no-permission-warning"]
ver_str = subprocess.check_output(cmd).split()[2].decode("utf-8")
return tuple([int(i) for i in ver_str.split("-")[0].split('.')])
except subprocess.CalledProcessError as e:
bb.fatal("Could not get gpg version: %s" % e)
def verify(self, sig_file, valid_sigs = ''):
"""Verify signature"""
cmd = self.gpg_cmd + ["--verify", "--no-permission-warning", "--status-fd", "1"]
if self.gpg_path:
cmd += ["--homedir", self.gpg_path]
cmd += [sig_file]
status = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Valid if any key matches if unspecified
if not valid_sigs:
ret = False if status.returncode else True
return ret
import re
goodsigs = []
sigre = re.compile(r'^\[GNUPG:\] GOODSIG (\S+)\s(.*)$')
for l in status.stdout.decode("utf-8").splitlines():
s = sigre.match(l)
if s:
goodsigs += [s.group(1)]
for sig in valid_sigs.split():
if sig in goodsigs:
return True
if len(goodsigs):
bb.warn('No accepted signatures found. Good signatures found: %s.' % ' '.join(goodsigs))
return False
def get_signer(d, backend):
"""Get signer object for the specified backend"""
# Use local signing by default
if backend == 'local':
return LocalSigner(d)
else:
bb.fatal("Unsupported signing backend '%s'" % backend)

View File

@@ -0,0 +1,261 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Code for parsing OpenEmbedded license strings"""
import ast
import re
from fnmatch import fnmatchcase as fnmatch
def license_ok(license, dont_want_licenses):
""" Return False if License exist in dont_want_licenses else True """
for dwl in dont_want_licenses:
if fnmatch(license, dwl):
return False
return True
def obsolete_license_list():
return ["AGPL-3", "AGPL-3+", "AGPLv3", "AGPLv3+", "AGPLv3.0", "AGPLv3.0+", "AGPL-3.0", "AGPL-3.0+", "BSD-0-Clause",
"GPL-1", "GPL-1+", "GPLv1", "GPLv1+", "GPLv1.0", "GPLv1.0+", "GPL-1.0", "GPL-1.0+", "GPL-2", "GPL-2+", "GPLv2",
"GPLv2+", "GPLv2.0", "GPLv2.0+", "GPL-2.0", "GPL-2.0+", "GPL-3", "GPL-3+", "GPLv3", "GPLv3+", "GPLv3.0", "GPLv3.0+",
"GPL-3.0", "GPL-3.0+", "LGPLv2", "LGPLv2+", "LGPLv2.0", "LGPLv2.0+", "LGPL-2.0", "LGPL-2.0+", "LGPL2.1", "LGPL2.1+",
"LGPLv2.1", "LGPLv2.1+", "LGPL-2.1", "LGPL-2.1+", "LGPLv3", "LGPLv3+", "LGPL-3.0", "LGPL-3.0+", "MPL-1", "MPLv1",
"MPLv1.1", "MPLv2", "MIT-X", "MIT-style", "openssl", "PSF", "PSFv2", "Python-2", "Apachev2", "Apache-2", "Artisticv1",
"Artistic-1", "AFL-2", "AFL-1", "AFLv2", "AFLv1", "CDDLv1", "CDDL-1", "EPLv1.0", "FreeType", "Nauman",
"tcl", "vim", "SGIv1"]
class LicenseError(Exception):
pass
class LicenseSyntaxError(LicenseError):
def __init__(self, licensestr, exc):
self.licensestr = licensestr
self.exc = exc
LicenseError.__init__(self)
def __str__(self):
return "error in '%s': %s" % (self.licensestr, self.exc)
class InvalidLicense(LicenseError):
def __init__(self, license):
self.license = license
LicenseError.__init__(self)
def __str__(self):
return "invalid characters in license '%s'" % self.license
license_operator_chars = '&|() '
license_operator = re.compile(r'([' + license_operator_chars + '])')
license_pattern = re.compile(r'[a-zA-Z0-9.+_\-]+$')
class LicenseVisitor(ast.NodeVisitor):
"""Get elements based on OpenEmbedded license strings"""
def get_elements(self, licensestr):
new_elements = []
elements = list([x for x in license_operator.split(licensestr) if x.strip()])
for pos, element in enumerate(elements):
if license_pattern.match(element):
if pos > 0 and license_pattern.match(elements[pos-1]):
new_elements.append('&')
element = '"' + element + '"'
elif not license_operator.match(element):
raise InvalidLicense(element)
new_elements.append(element)
return new_elements
"""Syntax tree visitor which can accept elements previously generated with
OpenEmbedded license string"""
def visit_elements(self, elements):
self.visit(ast.parse(' '.join(elements)))
"""Syntax tree visitor which can accept OpenEmbedded license strings"""
def visit_string(self, licensestr):
self.visit_elements(self.get_elements(licensestr))
class FlattenVisitor(LicenseVisitor):
"""Flatten a license tree (parsed from a string) by selecting one of each
set of OR options, in the way the user specifies"""
def __init__(self, choose_licenses):
self.choose_licenses = choose_licenses
self.licenses = []
LicenseVisitor.__init__(self)
def visit_Str(self, node):
self.licenses.append(node.s)
def visit_Constant(self, node):
self.licenses.append(node.value)
def visit_BinOp(self, node):
if isinstance(node.op, ast.BitOr):
left = FlattenVisitor(self.choose_licenses)
left.visit(node.left)
right = FlattenVisitor(self.choose_licenses)
right.visit(node.right)
selected = self.choose_licenses(left.licenses, right.licenses)
self.licenses.extend(selected)
else:
self.generic_visit(node)
def flattened_licenses(licensestr, choose_licenses):
"""Given a license string and choose_licenses function, return a flat list of licenses"""
flatten = FlattenVisitor(choose_licenses)
try:
flatten.visit_string(licensestr)
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
return flatten.licenses
def is_included(licensestr, include_licenses=None, exclude_licenses=None):
"""Given a license string, a list of licenses to include and a list of
licenses to exclude, determine if the license string matches the include
list and does not match the exclude list.
Returns a tuple holding the boolean state and a list of the applicable
licenses that were excluded if state is False, or the licenses that were
included if the state is True."""
def include_license(license):
return any(fnmatch(license, pattern) for pattern in include_licenses)
def exclude_license(license):
return any(fnmatch(license, pattern) for pattern in exclude_licenses)
def choose_licenses(alpha, beta):
"""Select the option in an OR which is the 'best' (has the most
included licenses and no excluded licenses)."""
# The factor 1000 below is arbitrary, just expected to be much larger
# than the number of licenses actually specified. That way the weight
# will be negative if the list of licenses contains an excluded license,
# but still gives a higher weight to the list with the most included
# licenses.
alpha_weight = (len(list(filter(include_license, alpha))) -
1000 * (len(list(filter(exclude_license, alpha))) > 0))
beta_weight = (len(list(filter(include_license, beta))) -
1000 * (len(list(filter(exclude_license, beta))) > 0))
if alpha_weight >= beta_weight:
return alpha
else:
return beta
if not include_licenses:
include_licenses = ['*']
if not exclude_licenses:
exclude_licenses = []
licenses = flattened_licenses(licensestr, choose_licenses)
excluded = [lic for lic in licenses if exclude_license(lic)]
included = [lic for lic in licenses if include_license(lic)]
if excluded:
return False, excluded
else:
return True, included
class ManifestVisitor(LicenseVisitor):
"""Walk license tree (parsed from a string) removing the incompatible
licenses specified"""
def __init__(self, dont_want_licenses, canonical_license, d):
self._dont_want_licenses = dont_want_licenses
self._canonical_license = canonical_license
self._d = d
self._operators = []
self.licenses = []
self.licensestr = ''
LicenseVisitor.__init__(self)
def visit(self, node):
if isinstance(node, ast.Str):
lic = node.s
if license_ok(self._canonical_license(self._d, lic),
self._dont_want_licenses) == True:
if self._operators:
ops = []
for op in self._operators:
if op == '[':
ops.append(op)
elif op == ']':
ops.append(op)
else:
if not ops:
ops.append(op)
elif ops[-1] in ['[', ']']:
ops.append(op)
else:
ops[-1] = op
for op in ops:
if op == '[' or op == ']':
self.licensestr += op
elif self.licenses:
self.licensestr += ' ' + op + ' '
self._operators = []
self.licensestr += lic
self.licenses.append(lic)
elif isinstance(node, ast.BitAnd):
self._operators.append("&")
elif isinstance(node, ast.BitOr):
self._operators.append("|")
elif isinstance(node, ast.List):
self._operators.append("[")
elif isinstance(node, ast.Load):
self.licensestr += "]"
self.generic_visit(node)
def manifest_licenses(licensestr, dont_want_licenses, canonical_license, d):
"""Given a license string and dont_want_licenses list,
return license string filtered and a list of licenses"""
manifest = ManifestVisitor(dont_want_licenses, canonical_license, d)
try:
elements = manifest.get_elements(licensestr)
# Replace '()' to '[]' for handle in ast as List and Load types.
elements = ['[' if e == '(' else e for e in elements]
elements = [']' if e == ')' else e for e in elements]
manifest.visit_elements(elements)
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
# Replace '[]' to '()' for output correct license.
manifest.licensestr = manifest.licensestr.replace('[', '(').replace(']', ')')
return (manifest.licensestr, manifest.licenses)
class ListVisitor(LicenseVisitor):
"""Record all different licenses found in the license string"""
def __init__(self):
self.licenses = set()
def visit_Str(self, node):
self.licenses.add(node.s)
def visit_Constant(self, node):
self.licenses.add(node.value)
def list_licenses(licensestr):
"""Simply get a list of all licenses mentioned in a license string.
Binary operators are not applied or taken into account in any way"""
visitor = ListVisitor()
try:
visitor.visit_string(licensestr)
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
return visitor.licenses
def apply_pkg_license_exception(pkg, bad_licenses, exceptions):
"""Return remaining bad licenses after removing any package exceptions"""
return [lic for lic in bad_licenses if pkg + ':' + lic not in exceptions]

View File

@@ -0,0 +1,123 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
def get_os_release():
"""Get all key-value pairs from /etc/os-release as a dict"""
from collections import OrderedDict
data = OrderedDict()
if os.path.exists('/etc/os-release'):
with open('/etc/os-release') as f:
for line in f:
try:
key, val = line.rstrip().split('=', 1)
except ValueError:
continue
data[key.strip()] = val.strip('"')
return data
def release_dict_osr():
""" Populate a dict with pertinent values from /etc/os-release """
data = {}
os_release = get_os_release()
if 'ID' in os_release:
data['DISTRIB_ID'] = os_release['ID']
if 'VERSION_ID' in os_release:
data['DISTRIB_RELEASE'] = os_release['VERSION_ID']
return data
def release_dict_lsb():
""" Return the output of lsb_release -ir as a dictionary """
from subprocess import PIPE
try:
output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE)
except bb.process.CmdError as exc:
return {}
lsb_map = { 'Distributor ID': 'DISTRIB_ID',
'Release': 'DISTRIB_RELEASE'}
lsb_keys = lsb_map.keys()
data = {}
for line in output.splitlines():
if line.startswith("-e"):
line = line[3:]
try:
key, value = line.split(":\t", 1)
except ValueError:
continue
if key in lsb_keys:
data[lsb_map[key]] = value
if len(data.keys()) != 2:
return None
return data
def release_dict_file():
""" Try to gather release information manually when other methods fail """
data = {}
try:
if os.path.exists('/etc/lsb-release'):
data = {}
with open('/etc/lsb-release') as f:
for line in f:
key, value = line.split("=", 1)
data[key] = value.strip()
elif os.path.exists('/etc/redhat-release'):
data = {}
with open('/etc/redhat-release') as f:
distro = f.readline().strip()
import re
match = re.match(r'(.*) release (.*) \((.*)\)', distro)
if match:
data['DISTRIB_ID'] = match.group(1)
data['DISTRIB_RELEASE'] = match.group(2)
elif os.path.exists('/etc/SuSE-release'):
data = {}
data['DISTRIB_ID'] = 'SUSE LINUX'
with open('/etc/SuSE-release') as f:
for line in f:
if line.startswith('VERSION = '):
data['DISTRIB_RELEASE'] = line[10:].rstrip()
break
except IOError:
return {}
return data
def distro_identifier(adjust_hook=None):
"""Return a distro identifier string based upon lsb_release -ri,
with optional adjustment via a hook"""
import re
# Try /etc/os-release first, then the output of `lsb_release -ir` and
# finally fall back on parsing various release files in order to determine
# host distro name and version.
distro_data = release_dict_osr()
if not distro_data:
distro_data = release_dict_lsb()
if not distro_data:
distro_data = release_dict_file()
distro_id = distro_data.get('DISTRIB_ID', '')
release = distro_data.get('DISTRIB_RELEASE', '')
if adjust_hook:
distro_id, release = adjust_hook(distro_id, release)
if not distro_id:
return "unknown"
# Filter out any non-alphanumerics and convert to lowercase
distro_id = re.sub(r'\W', '', distro_id).lower()
if release:
id_str = '{0}-{1}'.format(distro_id, release)
else:
id_str = distro_id
return id_str.replace(' ','-').replace('/','-')

View File

@@ -0,0 +1,107 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""OpenEmbedded variable typing support
Types are defined in the metadata by name, using the 'type' flag on a
variable. Other flags may be utilized in the construction of the types. See
the arguments of the type's factory for details.
"""
import inspect
import oe.types as types
from collections.abc import Callable
available_types = {}
class MissingFlag(TypeError):
"""A particular flag is required to construct the type, but has not been
provided."""
def __init__(self, flag, type):
self.flag = flag
self.type = type
TypeError.__init__(self)
def __str__(self):
return "Type '%s' requires flag '%s'" % (self.type, self.flag)
def factory(var_type):
"""Return the factory for a specified type."""
if var_type is None:
raise TypeError("No type specified. Valid types: %s" %
', '.join(available_types))
try:
return available_types[var_type]
except KeyError:
raise TypeError("Invalid type '%s':\n Valid types: %s" %
(var_type, ', '.join(available_types)))
def create(value, var_type, **flags):
"""Create an object of the specified type, given the specified flags and
string value."""
obj = factory(var_type)
objflags = {}
for flag in obj.flags:
if flag not in flags:
if flag not in obj.optflags:
raise MissingFlag(flag, var_type)
else:
objflags[flag] = flags[flag]
return obj(value, **objflags)
def get_callable_args(obj):
"""Grab all but the first argument of the specified callable, returning
the list, as well as a list of which of the arguments have default
values."""
if type(obj) is type:
obj = obj.__init__
sig = inspect.signature(obj)
args = list(sig.parameters.keys())
defaults = list(s for s in sig.parameters.keys() if sig.parameters[s].default != inspect.Parameter.empty)
flaglist = []
if args:
if len(args) > 1 and args[0] == 'self':
args = args[1:]
flaglist.extend(args)
optional = set()
if defaults:
optional |= set(flaglist[-len(defaults):])
return flaglist, optional
def factory_setup(name, obj):
"""Prepare a factory for use."""
args, optional = get_callable_args(obj)
extra_args = args[1:]
if extra_args:
obj.flags, optional = extra_args, optional
obj.optflags = set(optional)
else:
obj.flags = obj.optflags = ()
if not hasattr(obj, 'name'):
obj.name = name
def register(name, factory):
"""Register a type, given its name and a factory callable.
Determines the required and optional flags from the factory's
arguments."""
factory_setup(name, factory)
available_types[factory.name] = factory
# Register all our included types
for name in dir(types):
if name.startswith('_'):
continue
obj = getattr(types, name)
if not isinstance(obj, Callable):
continue
register(name, obj)

View File

@@ -0,0 +1,206 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from abc import ABCMeta, abstractmethod
import os
import re
import bb
class Manifest(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
"""
PKG_TYPE_MUST_INSTALL = "mip"
PKG_TYPE_MULTILIB = "mlp"
PKG_TYPE_LANGUAGE = "lgp"
PKG_TYPE_ATTEMPT_ONLY = "aop"
MANIFEST_TYPE_IMAGE = "image"
MANIFEST_TYPE_SDK_HOST = "sdk_host"
MANIFEST_TYPE_SDK_TARGET = "sdk_target"
var_maps = {
MANIFEST_TYPE_IMAGE: {
"PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL,
"PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY,
"LINGUAS_INSTALL": PKG_TYPE_LANGUAGE
},
MANIFEST_TYPE_SDK_HOST: {
"TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL,
"TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
},
MANIFEST_TYPE_SDK_TARGET: {
"TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL,
"TOOLCHAIN_TARGET_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
}
}
INSTALL_ORDER = [
PKG_TYPE_LANGUAGE,
PKG_TYPE_MUST_INSTALL,
PKG_TYPE_ATTEMPT_ONLY,
PKG_TYPE_MULTILIB
]
initial_manifest_file_header = \
"# This file was generated automatically and contains the packages\n" \
"# passed on to the package manager in order to create the rootfs.\n\n" \
"# Format:\n" \
"# <package_type>,<package_name>\n" \
"# where:\n" \
"# <package_type> can be:\n" \
"# 'mip' = must install package\n" \
"# 'aop' = attempt only package\n" \
"# 'mlp' = multilib package\n" \
"# 'lgp' = language package\n\n"
def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE):
self.d = d
self.manifest_type = manifest_type
if manifest_dir is None:
if manifest_type != self.MANIFEST_TYPE_IMAGE:
self.manifest_dir = self.d.getVar('SDK_DIR')
else:
self.manifest_dir = self.d.getVar('WORKDIR')
else:
self.manifest_dir = manifest_dir
bb.utils.mkdirhier(self.manifest_dir)
self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type)
self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type)
self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type)
# packages in the following vars will be split in 'must install' and
# 'multilib'
self.vars_to_split = ["PACKAGE_INSTALL",
"TOOLCHAIN_HOST_TASK",
"TOOLCHAIN_TARGET_TASK"]
"""
This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk).
This will be used for testing until the class is implemented properly!
"""
def _create_dummy_initial(self):
image_rootfs = self.d.getVar('IMAGE_ROOTFS')
pkg_list = dict()
if image_rootfs.find("core-image-sato-sdk") > 0:
pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
"packagegroup-core-x11-sato-games packagegroup-base-extended " \
"packagegroup-core-x11-sato packagegroup-core-x11-base " \
"packagegroup-core-sdk packagegroup-core-tools-debug " \
"packagegroup-core-boot packagegroup-core-tools-testapps " \
"packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \
"apt packagegroup-core-tools-profile psplash " \
"packagegroup-core-standalone-sdk-target " \
"packagegroup-core-ssh-openssh dpkg kernel-dev"
pkg_list[self.PKG_TYPE_LANGUAGE] = \
"locale-base-en-us locale-base-en-gb"
elif image_rootfs.find("core-image-sato") > 0:
pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
"packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \
"packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \
"packagegroup-core-x11-sato packagegroup-core-boot"
pkg_list['lgp'] = \
"locale-base-en-us locale-base-en-gb"
elif image_rootfs.find("core-image-minimal") > 0:
pkg_list[self.PKG_TYPE_MUST_INSTALL] = "packagegroup-core-boot"
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
for pkg_type in pkg_list:
for pkg in pkg_list[pkg_type].split():
manifest.write("%s,%s\n" % (pkg_type, pkg))
"""
This will create the initial manifest which will be used by Rootfs class to
generate the rootfs
"""
@abstractmethod
def create_initial(self):
pass
"""
This creates the manifest after everything has been installed.
"""
@abstractmethod
def create_final(self):
pass
"""
This creates the manifest after the package in initial manifest has been
dummy installed. It lists all *to be installed* packages. There is no real
installation, just a test.
"""
@abstractmethod
def create_full(self, pm):
pass
"""
The following function parses an initial manifest and returns a dictionary
object with the must install, attempt only, multilib and language packages.
"""
def parse_initial_manifest(self):
pkgs = dict()
with open(self.initial_manifest) as manifest:
for line in manifest.read().split('\n'):
comment = re.match("^#.*", line)
pattern = "^(%s|%s|%s|%s),(.*)$" % \
(self.PKG_TYPE_MUST_INSTALL,
self.PKG_TYPE_ATTEMPT_ONLY,
self.PKG_TYPE_MULTILIB,
self.PKG_TYPE_LANGUAGE)
pkg = re.match(pattern, line)
if comment is not None:
continue
if pkg is not None:
pkg_type = pkg.group(1)
pkg_name = pkg.group(2)
if not pkg_type in pkgs:
pkgs[pkg_type] = [pkg_name]
else:
pkgs[pkg_type].append(pkg_name)
return pkgs
'''
This following function parses a full manifest and return a list
object with packages.
'''
def parse_full_manifest(self):
installed_pkgs = list()
if not os.path.exists(self.full_manifest):
bb.note('full manifest not exist')
return installed_pkgs
with open(self.full_manifest, 'r') as manifest:
for pkg in manifest.read().split('\n'):
installed_pkgs.append(pkg.strip())
return installed_pkgs
def create_manifest(d, final_manifest=False, manifest_dir=None,
manifest_type=Manifest.MANIFEST_TYPE_IMAGE):
import importlib
manifest = importlib.import_module('oe.package_manager.' + d.getVar('IMAGE_PKGTYPE') + '.manifest').PkgManifest(d, manifest_dir, manifest_type)
if final_manifest:
manifest.create_final()
else:
manifest.create_initial()
if __name__ == "__main__":
pass

View File

@@ -0,0 +1,175 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
import bb
import json
import subprocess
_ALWAYS_SAFE = frozenset('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
'_.-~()')
MISSING_OK = object()
REGISTRY = "https://registry.npmjs.org"
# we can not use urllib.parse here because npm expects lowercase
# hex-chars but urllib generates uppercase ones
def uri_quote(s, safe = '/'):
res = ""
safe_set = set(safe)
for c in s:
if c in _ALWAYS_SAFE or c in safe_set:
res += c
else:
res += '%%%02x' % ord(c)
return res
class PackageJson:
def __init__(self, spec):
self.__spec = spec
@property
def name(self):
return self.__spec['name']
@property
def version(self):
return self.__spec['version']
@property
def empty_manifest(self):
return {
'name': self.name,
'description': self.__spec.get('description', ''),
'versions': {},
}
def base_filename(self):
return uri_quote(self.name, safe = '@')
def as_manifest_entry(self, tarball_uri):
res = {}
## NOTE: 'npm install' requires more than basic meta information;
## e.g. it takes 'bin' from this manifest entry but not the actual
## 'package.json'
for (idx,dflt) in [('name', None),
('description', ""),
('version', None),
('bin', MISSING_OK),
('man', MISSING_OK),
('scripts', MISSING_OK),
('directories', MISSING_OK),
('dependencies', MISSING_OK),
('devDependencies', MISSING_OK),
('optionalDependencies', MISSING_OK),
('license', "unknown")]:
if idx in self.__spec:
res[idx] = self.__spec[idx]
elif dflt == MISSING_OK:
pass
elif dflt != None:
res[idx] = dflt
else:
raise Exception("%s-%s: missing key %s" % (self.name,
self.version,
idx))
res['dist'] = {
'tarball': tarball_uri,
}
return res
class ManifestImpl:
def __init__(self, base_fname, spec):
self.__base = base_fname
self.__spec = spec
def load(self):
try:
with open(self.filename, "r") as f:
res = json.load(f)
except IOError:
res = self.__spec.empty_manifest
return res
def save(self, meta):
with open(self.filename, "w") as f:
json.dump(meta, f, indent = 2)
@property
def filename(self):
return self.__base + ".meta"
class Manifest:
def __init__(self, base_fname, spec):
self.__base = base_fname
self.__spec = spec
self.__lockf = None
self.__impl = None
def __enter__(self):
self.__lockf = bb.utils.lockfile(self.__base + ".lock")
self.__impl = ManifestImpl(self.__base, self.__spec)
return self.__impl
def __exit__(self, exc_type, exc_val, exc_tb):
bb.utils.unlockfile(self.__lockf)
class NpmCache:
def __init__(self, cache):
self.__cache = cache
@property
def path(self):
return self.__cache
def run(self, type, key, fname):
subprocess.run(['oe-npm-cache', self.__cache, type, key, fname],
check = True)
class NpmRegistry:
def __init__(self, path, cache):
self.__path = path
self.__cache = NpmCache(cache + '/_cacache')
bb.utils.mkdirhier(self.__path)
bb.utils.mkdirhier(self.__cache.path)
@staticmethod
## This function is critical and must match nodejs expectations
def _meta_uri(spec):
return REGISTRY + '/' + uri_quote(spec.name, safe = '@')
@staticmethod
## Exact return value does not matter; just make it look like a
## usual registry url
def _tarball_uri(spec):
return '%s/%s/-/%s-%s.tgz' % (REGISTRY,
uri_quote(spec.name, safe = '@'),
uri_quote(spec.name, safe = '@/'),
spec.version)
def add_pkg(self, tarball, pkg_json):
pkg_json = PackageJson(pkg_json)
base = os.path.join(self.__path, pkg_json.base_filename())
with Manifest(base, pkg_json) as manifest:
meta = manifest.load()
tarball_uri = self._tarball_uri(pkg_json)
meta['versions'][pkg_json.version] = pkg_json.as_manifest_entry(tarball_uri)
manifest.save(meta)
## Cache entries are a little bit dependent on the nodejs
## version; version specific cache implementation must
## mitigate differences
self.__cache.run('meta', self._meta_uri(pkg_json), manifest.filename);
self.__cache.run('tgz', tarball_uri, tarball);

View File

@@ -0,0 +1,54 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
# This file contains common functions for overlayfs and its QA check
# this function is based on https://github.com/systemd/systemd/blob/main/src/basic/unit-name.c
def escapeSystemdUnitName(path):
escapeMap = {
'/': '-',
'-': "\\x2d",
'\\': "\\x5d"
}
return "".join([escapeMap.get(c, c) for c in path.strip('/')])
def strForBash(s):
return s.replace('\\', '\\\\')
def allOverlaysUnitName(d):
return d.getVar('PN') + '-overlays.service'
def mountUnitName(unit):
return escapeSystemdUnitName(unit) + '.mount'
def helperUnitName(unit):
return escapeSystemdUnitName(unit) + '-create-upper-dir.service'
def unitFileList(d):
fileList = []
overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT")
if not overlayMountPoints:
bb.fatal("A recipe uses overlayfs class but there is no OVERLAYFS_MOUNT_POINT set in your MACHINE configuration")
# check that we have required mount points set first
requiredMountPoints = d.getVarFlags('OVERLAYFS_WRITABLE_PATHS')
for mountPoint in requiredMountPoints:
if mountPoint not in overlayMountPoints:
bb.fatal("Missing required mount point for OVERLAYFS_MOUNT_POINT[%s] in your MACHINE configuration" % mountPoint)
for mountPoint in overlayMountPoints:
mountPointList = d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint)
if not mountPointList:
bb.debug(1, "No mount points defined for %s flag, don't add to file list", mountPoint)
continue
for path in mountPointList.split():
fileList.append(mountUnitName(path))
fileList.append(helperUnitName(path))
fileList.append(allOverlaysUnitName(d))
return fileList

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,563 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from abc import ABCMeta, abstractmethod
import os
import glob
import subprocess
import shutil
import re
import collections
import bb
import tempfile
import oe.utils
import oe.path
import string
from oe.gpg_sign import get_signer
import hashlib
import fnmatch
# this can be used by all PM backends to create the index files in parallel
def create_index(arg):
index_cmd = arg
bb.note("Executing '%s' ..." % index_cmd)
result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
if result:
bb.note(result)
def opkg_query(cmd_output):
"""
This method parse the output from the package managerand return
a dictionary with the information of the packages. This is used
when the packages are in deb or ipk format.
"""
verregex = re.compile(r' \([=<>]* [^ )]*\)')
output = dict()
pkg = ""
arch = ""
ver = ""
filename = ""
dep = []
prov = []
pkgarch = ""
for line in cmd_output.splitlines()+['']:
line = line.rstrip()
if ':' in line:
if line.startswith("Package: "):
pkg = line.split(": ")[1]
elif line.startswith("Architecture: "):
arch = line.split(": ")[1]
elif line.startswith("Version: "):
ver = line.split(": ")[1]
elif line.startswith("File: ") or line.startswith("Filename:"):
filename = line.split(": ")[1]
if "/" in filename:
filename = os.path.basename(filename)
elif line.startswith("Depends: "):
depends = verregex.sub('', line.split(": ")[1])
for depend in depends.split(", "):
dep.append(depend)
elif line.startswith("Recommends: "):
recommends = verregex.sub('', line.split(": ")[1])
for recommend in recommends.split(", "):
dep.append("%s [REC]" % recommend)
elif line.startswith("PackageArch: "):
pkgarch = line.split(": ")[1]
elif line.startswith("Provides: "):
provides = verregex.sub('', line.split(": ")[1])
for provide in provides.split(", "):
prov.append(provide)
# When there is a blank line save the package information
elif not line:
# IPK doesn't include the filename
if not filename:
filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
if pkg:
output[pkg] = {"arch":arch, "ver":ver,
"filename":filename, "deps": dep, "pkgarch":pkgarch, "provs": prov}
pkg = ""
arch = ""
ver = ""
filename = ""
dep = []
prov = []
pkgarch = ""
return output
def failed_postinsts_abort(pkgs, log_path):
bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot,
then please place them into pkg_postinst_ontarget:${PN} ().
Deferring to first boot via 'exit 1' is no longer supported.
Details of the failure are in %s.""" %(pkgs, log_path))
def generate_locale_archive(d, rootfs, target_arch, localedir):
# Pretty sure we don't need this for locale archive generation but
# keeping it to be safe...
locale_arch_options = { \
"arc": ["--uint32-align=4", "--little-endian"],
"arceb": ["--uint32-align=4", "--big-endian"],
"arm": ["--uint32-align=4", "--little-endian"],
"armeb": ["--uint32-align=4", "--big-endian"],
"aarch64": ["--uint32-align=4", "--little-endian"],
"aarch64_be": ["--uint32-align=4", "--big-endian"],
"sh4": ["--uint32-align=4", "--big-endian"],
"powerpc": ["--uint32-align=4", "--big-endian"],
"powerpc64": ["--uint32-align=4", "--big-endian"],
"powerpc64le": ["--uint32-align=4", "--little-endian"],
"mips": ["--uint32-align=4", "--big-endian"],
"mipsisa32r6": ["--uint32-align=4", "--big-endian"],
"mips64": ["--uint32-align=4", "--big-endian"],
"mipsisa64r6": ["--uint32-align=4", "--big-endian"],
"mipsel": ["--uint32-align=4", "--little-endian"],
"mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
"mips64el": ["--uint32-align=4", "--little-endian"],
"mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
"riscv64": ["--uint32-align=4", "--little-endian"],
"riscv32": ["--uint32-align=4", "--little-endian"],
"i586": ["--uint32-align=4", "--little-endian"],
"i686": ["--uint32-align=4", "--little-endian"],
"x86_64": ["--uint32-align=4", "--little-endian"],
"loongarch64": ["--uint32-align=4", "--little-endian"]
}
if target_arch in locale_arch_options:
arch_options = locale_arch_options[target_arch]
else:
bb.error("locale_arch_options not found for target_arch=" + target_arch)
bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
# Need to set this so cross-localedef knows where the archive is
env = dict(os.environ)
env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
for name in sorted(os.listdir(localedir)):
path = os.path.join(localedir, name)
if os.path.isdir(path):
cmd = ["cross-localedef", "--verbose"]
cmd += arch_options
cmd += ["--add-to-archive", path]
subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
class Indexer(object, metaclass=ABCMeta):
def __init__(self, d, deploy_dir):
self.d = d
self.deploy_dir = deploy_dir
@abstractmethod
def write_index(self):
pass
class PkgsList(object, metaclass=ABCMeta):
def __init__(self, d, rootfs_dir):
self.d = d
self.rootfs_dir = rootfs_dir
@abstractmethod
def list_pkgs(self):
pass
class PackageManager(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
"""
def __init__(self, d, target_rootfs):
self.d = d
self.target_rootfs = target_rootfs
self.deploy_dir = None
self.deploy_lock = None
self._initialize_intercepts()
def _initialize_intercepts(self):
bb.note("Initializing intercept dir for %s" % self.target_rootfs)
# As there might be more than one instance of PackageManager operating at the same time
# we need to isolate the intercept_scripts directories from each other,
# hence the ugly hash digest in dir name.
self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" %
(hashlib.sha256(self.target_rootfs.encode()).hexdigest()))
postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split()
if not postinst_intercepts:
postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH")
if not postinst_intercepts_path:
postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts")
postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path)
bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts))
bb.utils.remove(self.intercepts_dir, True)
bb.utils.mkdirhier(self.intercepts_dir)
for intercept in postinst_intercepts:
shutil.copy(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
@abstractmethod
def _handle_intercept_failure(self, failed_script):
pass
def _postpone_to_first_boot(self, postinst_intercept_hook):
with open(postinst_intercept_hook) as intercept:
registered_pkgs = None
for line in intercept.read().split("\n"):
m = re.match(r"^##PKGS:(.*)", line)
if m is not None:
registered_pkgs = m.group(1).strip()
break
if registered_pkgs is not None:
bb.note("If an image is being built, the postinstalls for the following packages "
"will be postponed for first boot: %s" %
registered_pkgs)
# call the backend dependent handler
self._handle_intercept_failure(registered_pkgs)
def run_intercepts(self, populate_sdk=None):
intercepts_dir = self.intercepts_dir
bb.note("Running intercept scripts:")
os.environ['D'] = self.target_rootfs
os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
for script in os.listdir(intercepts_dir):
script_full = os.path.join(intercepts_dir, script)
if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
continue
# we do not want to run any multilib variant of this
if script.startswith("delay_to_first_boot"):
self._postpone_to_first_boot(script_full)
continue
if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32':
bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s"
% (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
continue
bb.note("> Executing %s intercept ..." % script)
try:
output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
if output: bb.note(output.decode("utf-8"))
except subprocess.CalledProcessError as e:
bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
if populate_sdk == 'host':
bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
elif populate_sdk == 'target':
if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
% (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
else:
bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
else:
if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
% (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
self._postpone_to_first_boot(script_full)
else:
bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
@abstractmethod
def update(self):
"""
Update the package manager package database.
"""
pass
@abstractmethod
def install(self, pkgs, attempt_only=False, hard_depends_only=False):
"""
Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
True, installation failures are ignored.
"""
pass
@abstractmethod
def remove(self, pkgs, with_dependencies=True):
"""
Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
is False, then any dependencies are left in place.
"""
pass
@abstractmethod
def write_index(self):
"""
This function creates the index files
"""
pass
@abstractmethod
def remove_packaging_data(self):
pass
@abstractmethod
def list_installed(self):
pass
@abstractmethod
def extract(self, pkg):
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
"""
pass
@abstractmethod
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
"""
Add remote package feeds into repository manager configuration. The parameters
for the feeds are set by feed_uris, feed_base_paths and feed_archs.
See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
for their description.
"""
pass
def install_glob(self, globs, sdk=False):
"""
Install all packages that match a glob.
"""
# TODO don't have sdk here but have a property on the superclass
# (and respect in install_complementary)
if sdk:
pkgdatadir = self.d.getVar("PKGDATA_DIR_SDK")
else:
pkgdatadir = self.d.getVar("PKGDATA_DIR")
try:
bb.note("Installing globbed packages...")
cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
bb.note('Running %s' % cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stderr: bb.note(stderr.decode("utf-8"))
pkgs = stdout.decode("utf-8")
self.install(pkgs.split(), attempt_only=True)
except subprocess.CalledProcessError as e:
# Return code 1 means no packages matched
if e.returncode != 1:
bb.fatal("Could not compute globbed packages list. Command "
"'%s' returned %d:\n%s" %
(' '.join(cmd), e.returncode, e.output.decode("utf-8")))
def install_complementary(self, globs=None):
"""
Install complementary packages based upon the list of currently installed
packages e.g. locales, *-dev, *-dbg, etc. Note: every backend needs to
call this function explicitly after the normal package installation.
"""
if globs is None:
globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
split_linguas = set()
for translation in self.d.getVar('IMAGE_LINGUAS').split():
split_linguas.add(translation)
split_linguas.add(translation.split('-')[0])
split_linguas = sorted(split_linguas)
for lang in split_linguas:
globs += " *-locale-%s" % lang
for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split():
globs += (" " + complementary_linguas) % lang
if globs:
# we need to write the list of installed packages to a file because the
# oe-pkgdata-util reads it from a file
with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
pkgs = self.list_installed()
provided_pkgs = set()
for pkg in pkgs.values():
provided_pkgs |= set(pkg.get('provs', []))
output = oe.utils.format_pkg_list(pkgs, "arch")
installed_pkgs.write(output)
installed_pkgs.flush()
cmd = ["oe-pkgdata-util",
"-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
globs]
exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
if exclude:
cmd.extend(['--exclude=' + '|'.join(exclude.split())])
try:
bb.note('Running %s' % cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stderr: bb.note(stderr.decode("utf-8"))
complementary_pkgs = stdout.decode("utf-8")
complementary_pkgs = set(complementary_pkgs.split())
skip_pkgs = sorted(complementary_pkgs & provided_pkgs)
install_pkgs = sorted(complementary_pkgs - provided_pkgs)
bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % (
' '.join(install_pkgs),
' '.join(skip_pkgs)))
self.install(install_pkgs, hard_depends_only=True)
except subprocess.CalledProcessError as e:
bb.fatal("Could not compute complementary packages list. Command "
"'%s' returned %d:\n%s" %
(' '.join(cmd), e.returncode, e.output.decode("utf-8")))
if self.d.getVar('IMAGE_LOCALES_ARCHIVE') == '1':
target_arch = self.d.getVar('TARGET_ARCH')
localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
if os.path.exists(localedir) and os.listdir(localedir):
generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
# And now delete the binary locales
self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
def deploy_dir_lock(self):
if self.deploy_dir is None:
raise RuntimeError("deploy_dir is not set!")
lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
self.deploy_lock = bb.utils.lockfile(lock_file_name)
def deploy_dir_unlock(self):
if self.deploy_lock is None:
return
bb.utils.unlockfile(self.deploy_lock)
self.deploy_lock = None
def construct_uris(self, uris, base_paths):
"""
Construct URIs based on the following pattern: uri/base_path where 'uri'
and 'base_path' correspond to each element of the corresponding array
argument leading to len(uris) x len(base_paths) elements on the returned
array
"""
def _append(arr1, arr2, sep='/'):
res = []
narr1 = [a.rstrip(sep) for a in arr1]
narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2]
for a1 in narr1:
if arr2:
for a2 in narr2:
res.append("%s%s%s" % (a1, sep, a2))
else:
res.append(a1)
return res
return _append(uris, base_paths)
def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies, include_self=False):
"""
Go through our do_package_write_X dependencies and hardlink the packages we depend
upon into the repo directory. This prevents us seeing other packages that may
have been built that we don't depend upon and also packages for architectures we don't
support.
"""
import errno
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
mytaskname = d.getVar("BB_RUNTASK")
pn = d.getVar("PN")
seendirs = set()
multilibs = {}
bb.utils.remove(subrepo_dir, recurse=True)
bb.utils.mkdirhier(subrepo_dir)
# Detect bitbake -b usage
nodeps = d.getVar("BB_LIMITEDDEPS") or False
if nodeps or not filterbydependencies:
for arch in d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").split() + d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").replace("-", "_").split():
target = os.path.join(deploydir + "/" + arch)
if os.path.exists(target):
oe.path.symlink(target, subrepo_dir + "/" + arch, True)
return
start = None
for dep in taskdepdata:
data = taskdepdata[dep]
if data[1] == mytaskname and data[0] == pn:
start = dep
break
if start is None:
bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
pkgdeps = set()
start = [start]
if include_self:
seen = set()
else:
seen = set(start)
# Support direct dependencies (do_rootfs -> do_package_write_X)
# or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X)
while start:
next = []
for dep2 in start:
for dep in taskdepdata[dep2][3]:
if include_self or taskdepdata[dep][0] != pn:
if "do_" + taskname in dep:
pkgdeps.add(dep)
elif dep not in seen:
next.append(dep)
seen.add(dep)
start = next
for dep in pkgdeps:
c = taskdepdata[dep][0]
manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
if not manifest:
bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
if not os.path.exists(manifest):
continue
with open(manifest, "r") as f:
for l in f:
l = l.strip()
deploydir = os.path.normpath(deploydir)
if bb.data.inherits_class('packagefeed-stability', d):
dest = l.replace(deploydir + "-prediff", "")
else:
dest = l.replace(deploydir, "")
dest = subrepo_dir + dest
if l.endswith("/"):
if dest not in seendirs:
bb.utils.mkdirhier(dest)
seendirs.add(dest)
continue
# Try to hardlink the file, copy if that fails
destdir = os.path.dirname(dest)
if destdir not in seendirs:
bb.utils.mkdirhier(destdir)
seendirs.add(destdir)
try:
os.link(l, dest)
except OSError as err:
if err.errno == errno.EXDEV:
bb.utils.copyfile(l, dest)
else:
raise
def generate_index_files(d):
from oe.package_manager.rpm import RpmSubdirIndexer
from oe.package_manager.ipk import OpkgIndexer
from oe.package_manager.deb import DpkgIndexer
classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
indexer_map = {
"rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
"ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
"deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
}
result = None
for pkg_class in classes:
if not pkg_class in indexer_map:
continue
if os.path.exists(indexer_map[pkg_class][1]):
result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
if result is not None:
bb.fatal(result)

View File

@@ -0,0 +1,522 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import re
import subprocess
from oe.package_manager import *
class DpkgIndexer(Indexer):
def _create_configs(self):
bb.utils.mkdirhier(self.apt_conf_dir)
bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
with open(os.path.join(self.apt_conf_dir, "preferences"),
"w") as prefs_file:
pass
with open(os.path.join(self.apt_conf_dir, "sources.list"),
"w+") as sources_file:
pass
with open(self.apt_conf_file, "w") as apt_conf:
with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
"apt", "apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
line = re.sub(r"#ROOTFS#", "/dev/null", line)
line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
def write_index(self):
self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
"apt-ftparchive")
self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
self._create_configs()
os.environ['APT_CONFIG'] = self.apt_conf_file
pkg_archs = self.d.getVar('PACKAGE_ARCHS')
if pkg_archs is not None:
arch_list = pkg_archs.split()
sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
if sdk_pkg_archs is not None:
for a in sdk_pkg_archs.split():
if a not in pkg_archs:
arch_list.append(a)
all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
gzip = bb.utils.which(os.getenv('PATH'), "gzip")
index_cmds = []
deb_dirs_found = False
index_sign_files = set()
for arch in arch_list:
arch_dir = os.path.join(self.deploy_dir, arch)
if not os.path.isdir(arch_dir):
continue
cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
cmd += "%s -fcn Packages > Packages.gz;" % gzip
release_file = os.path.join(arch_dir, "Release")
index_sign_files.add(release_file)
with open(release_file, "w+") as release:
release.write("Label: %s\n" % arch)
cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
index_cmds.append(cmd)
deb_dirs_found = True
if not deb_dirs_found:
bb.note("There are no packages in %s" % self.deploy_dir)
return
oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
if signer:
for f in index_sign_files:
signer.detach_sign(f,
self.d.getVar('PACKAGE_FEED_GPG_NAME'),
self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
output_suffix="gpg",
use_sha256=True)
class PMPkgsList(PkgsList):
def list_pkgs(self):
cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
"--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
"-W"]
cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\nProvides: ${Provides}\n\n")
try:
cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
return opkg_query(cmd_output)
class OpkgDpkgPM(PackageManager):
def __init__(self, d, target_rootfs):
"""
This is an abstract class. Do not instantiate this directly.
"""
super(OpkgDpkgPM, self).__init__(d, target_rootfs)
def package_info(self, pkg, cmd):
"""
Returns a dictionary with the package info.
This method extracts the common parts for Opkg and Dpkg
"""
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Unable to list available packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
return opkg_query(output)
def extract(self, pkg, pkg_info):
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
This method extracts the common parts for Opkg and Dpkg
"""
ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
pkg_path = pkg_info[pkg]["filepath"]
if not os.path.isfile(pkg_path):
bb.fatal("Unable to extract package for '%s'."
"File %s doesn't exists" % (pkg, pkg_path))
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
data_tar = 'data.tar.xz'
try:
cmd = [ar_cmd, 'x', pkg_path]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
cmd = [tar_cmd, 'xf', data_tar]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
except OSError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
os.chdir(current_dir)
return tmp_dir
def _handle_intercept_failure(self, registered_pkgs):
self.mark_packages("unpacked", registered_pkgs.split())
class DpkgPM(OpkgDpkgPM):
def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True):
super(DpkgPM, self).__init__(d, target_rootfs)
self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir)
create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies)
if apt_conf_dir is None:
self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
else:
self.apt_conf_dir = apt_conf_dir
self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
self.apt_args = d.getVar("APT_ARGS")
self.all_arch_list = archs.split()
all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
self._create_configs(archs, base_archs)
self.indexer = DpkgIndexer(self.d, self.deploy_dir)
def mark_packages(self, status_tag, packages=None):
"""
This function will change a package's status in /var/lib/dpkg/status file.
If 'packages' is None then the new_status will be applied to all
packages
"""
status_file = self.target_rootfs + "/var/lib/dpkg/status"
with open(status_file, "r") as sf:
with open(status_file + ".tmp", "w+") as tmp_sf:
if packages is None:
tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
r"Package: \1\n\2Status: \3%s" % status_tag,
sf.read()))
else:
if type(packages).__name__ != "list":
raise TypeError("'packages' should be a list object")
status = sf.read()
for pkg in packages:
status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
status)
tmp_sf.write(status)
bb.utils.rename(status_file + ".tmp", status_file)
def run_pre_post_installs(self, package_name=None):
"""
Run the pre/post installs for package "package_name". If package_name is
None, then run all pre/post install scriptlets.
"""
info_dir = self.target_rootfs + "/var/lib/dpkg/info"
ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
control_scripts = [
ControlScript(".preinst", "Preinstall", "install"),
ControlScript(".postinst", "Postinstall", "configure")]
status_file = self.target_rootfs + "/var/lib/dpkg/status"
installed_pkgs = []
with open(status_file, "r") as status:
for line in status.read().split('\n'):
m = re.match(r"^Package: (.*)", line)
if m is not None:
installed_pkgs.append(m.group(1))
if package_name is not None and not package_name in installed_pkgs:
return
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = self.intercepts_dir
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
for pkg_name in installed_pkgs:
for control_script in control_scripts:
p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
if os.path.exists(p_full):
try:
bb.note("Executing %s for package: %s ..." %
(control_script.name.lower(), pkg_name))
output = subprocess.check_output([p_full, control_script.argument],
stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
except subprocess.CalledProcessError as e:
bb.warn("%s for package %s failed with %d:\n%s" %
(control_script.name, pkg_name, e.returncode,
e.output.decode("utf-8")))
failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
def update(self):
os.environ['APT_CONFIG'] = self.apt_conf_file
self.deploy_dir_lock()
cmd = "%s update" % self.apt_get_cmd
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to update the package index files. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
self.deploy_dir_unlock()
def install(self, pkgs, attempt_only=False, hard_depends_only=False):
if attempt_only and len(pkgs) == 0:
return
os.environ['APT_CONFIG'] = self.apt_conf_file
extra_args = ""
if hard_depends_only:
extra_args = "--no-install-recommends"
cmd = "%s %s install --allow-downgrades --allow-remove-essential --allow-change-held-packages --allow-unauthenticated --no-remove %s %s" % \
(self.apt_get_cmd, self.apt_args, extra_args, ' '.join(pkgs))
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
bb.note(output.decode("utf-8"))
except subprocess.CalledProcessError as e:
(bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output.decode("utf-8")))
# rename *.dpkg-new files/dirs
for root, dirs, files in os.walk(self.target_rootfs):
for dir in dirs:
new_dir = re.sub(r"\.dpkg-new", "", dir)
if dir != new_dir:
bb.utils.rename(os.path.join(root, dir),
os.path.join(root, new_dir))
for file in files:
new_file = re.sub(r"\.dpkg-new", "", file)
if file != new_file:
bb.utils.rename(os.path.join(root, file),
os.path.join(root, new_file))
def remove(self, pkgs, with_dependencies=True):
if not pkgs:
return
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = self.intercepts_dir
if with_dependencies:
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
else:
cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
" -P --force-depends %s" % \
(bb.utils.which(os.getenv('PATH'), "dpkg"),
self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to remove packages. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
def write_index(self):
self.deploy_dir_lock()
result = self.indexer.write_index()
self.deploy_dir_unlock()
if result is not None:
bb.fatal(result)
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
if feed_uris == "":
return
sources_conf = os.path.join("%s/etc/apt/sources.list"
% self.target_rootfs)
if not os.path.exists(os.path.dirname(sources_conf)):
return
arch_list = []
if feed_archs is None:
for arch in self.all_arch_list:
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
else:
arch_list = feed_archs.split()
feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
with open(sources_conf, "w+") as sources_file:
for uri in feed_uris:
if arch_list:
for arch in arch_list:
bb.note('Adding dpkg channel at (%s)' % uri)
sources_file.write("deb [trusted=yes] %s/%s ./\n" %
(uri, arch))
else:
bb.note('Adding dpkg channel at (%s)' % uri)
sources_file.write("deb [trusted=yes] %s ./\n" % uri)
def _create_configs(self, archs, base_archs):
base_archs = re.sub(r"_", r"-", base_archs)
if os.path.exists(self.apt_conf_dir):
bb.utils.remove(self.apt_conf_dir, True)
bb.utils.mkdirhier(self.apt_conf_dir)
bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/")
arch_list = []
for arch in self.all_arch_list:
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
priority = 801
for arch in arch_list:
prefs_file.write(
"Package: *\n"
"Pin: release l=%s\n"
"Pin-Priority: %d\n\n" % (arch, priority))
priority += 5
pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
for pkg in pkg_exclude.split():
prefs_file.write(
"Package: %s\n"
"Pin: release *\n"
"Pin-Priority: -1\n\n" % pkg)
arch_list.reverse()
with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
for arch in arch_list:
sources_file.write("deb [trusted=yes] file:%s/ ./\n" %
os.path.join(self.deploy_dir, arch))
base_arch_list = base_archs.split()
multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
for variant in multilib_variants.split():
localdata = bb.data.createCopy(self.d)
variant_tune = localdata.getVar("DEFAULTTUNE:virtclass-multilib-" + variant, False)
orig_arch = localdata.getVar("DPKG_ARCH")
localdata.setVar("DEFAULTTUNE", variant_tune)
variant_arch = localdata.getVar("DPKG_ARCH")
if variant_arch not in base_arch_list:
base_arch_list.append(variant_arch)
with open(self.apt_conf_file, "w+") as apt_conf:
with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
match_arch = re.match(r" Architecture \".*\";$", line)
architectures = ""
if match_arch:
for base_arch in base_arch_list:
architectures += "\"%s\";" % base_arch
apt_conf.write(" Architectures {%s};\n" % architectures);
apt_conf.write(" Architecture \"%s\";\n" % base_archs)
else:
line = re.sub(r"#ROOTFS#", self.target_rootfs, line)
line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
open(os.path.join(target_dpkg_dir, "status"), "w+").close()
if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
open(os.path.join(target_dpkg_dir, "available"), "w+").close()
def remove_packaging_data(self):
bb.utils.remove(self.target_rootfs + self.d.getVar('opkglibdir'), True)
bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
def fix_broken_dependencies(self):
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s %s --allow-unauthenticated -f install" % (self.apt_get_cmd, self.apt_args)
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Cannot fix broken dependencies. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
def list_installed(self):
return PMPkgsList(self.d, self.target_rootfs).list_pkgs()
def package_info(self, pkg):
"""
Returns a dictionary with the package info.
"""
cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
pkg_arch = pkg_info[pkg]["pkgarch"]
pkg_filename = pkg_info[pkg]["filename"]
pkg_info[pkg]["filepath"] = \
os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
return pkg_info
def extract(self, pkg):
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
"""
pkg_info = self.package_info(pkg)
if not pkg_info:
bb.fatal("Unable to get information for package '%s' while "
"trying to extract the package." % pkg)
tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info)
bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
return tmp_dir

View File

@@ -0,0 +1,28 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from oe.manifest import Manifest
class PkgManifest(Manifest):
def create_initial(self):
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
for var in self.var_maps[self.manifest_type]:
pkg_list = self.d.getVar(var)
if pkg_list is None:
continue
for pkg in pkg_list.split():
manifest.write("%s,%s\n" %
(self.var_maps[self.manifest_type][var], pkg))
def create_final(self):
pass
def create_full(self, pm):
pass

View File

@@ -0,0 +1,212 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import re
import shutil
from oe.rootfs import Rootfs
from oe.manifest import Manifest
from oe.utils import execute_pre_post_process
from oe.package_manager.deb.manifest import PkgManifest
from oe.package_manager.deb import DpkgPM
class DpkgOpkgRootfs(Rootfs):
def __init__(self, d, progress_reporter=None, logcatcher=None):
super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
def _get_pkgs_postinsts(self, status_file):
def _get_pkg_depends_list(pkg_depends):
pkg_depends_list = []
# filter version requirements like libc (>= 1.1)
for dep in pkg_depends.split(', '):
m_dep = re.match(r"^(.*) \(.*\)$", dep)
if m_dep:
dep = m_dep.group(1)
pkg_depends_list.append(dep)
return pkg_depends_list
pkgs = {}
pkg_name = ""
pkg_status_match = False
pkg_depends = ""
with open(status_file) as status:
data = status.read()
status.close()
for line in data.split('\n'):
m_pkg = re.match(r"^Package: (.*)", line)
m_status = re.match(r"^Status:.*unpacked", line)
m_depends = re.match(r"^Depends: (.*)", line)
#Only one of m_pkg, m_status or m_depends is not None at time
#If m_pkg is not None, we started a new package
if m_pkg is not None:
#Get Package name
pkg_name = m_pkg.group(1)
#Make sure we reset other variables
pkg_status_match = False
pkg_depends = ""
elif m_status is not None:
#New status matched
pkg_status_match = True
elif m_depends is not None:
#New depends macthed
pkg_depends = m_depends.group(1)
else:
pass
#Now check if we can process package depends and postinst
if "" != pkg_name and pkg_status_match:
pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
else:
#Not enough information
pass
# remove package dependencies not in postinsts
pkg_names = list(pkgs.keys())
for pkg_name in pkg_names:
deps = pkgs[pkg_name][:]
for d in deps:
if d not in pkg_names:
pkgs[pkg_name].remove(d)
return pkgs
def _get_delayed_postinsts_common(self, status_file):
def _dep_resolve(graph, node, resolved, seen):
seen.append(node)
for edge in graph[node]:
if edge not in resolved:
if edge in seen:
raise RuntimeError("Packages %s and %s have " \
"a circular dependency in postinsts scripts." \
% (node, edge))
_dep_resolve(graph, edge, resolved, seen)
resolved.append(node)
pkg_list = []
pkgs = None
if not self.d.getVar('PACKAGE_INSTALL').strip():
bb.note("Building empty image")
else:
pkgs = self._get_pkgs_postinsts(status_file)
if pkgs:
root = "__packagegroup_postinst__"
pkgs[root] = list(pkgs.keys())
_dep_resolve(pkgs, root, pkg_list, [])
pkg_list.remove(root)
if len(pkg_list) == 0:
return None
return pkg_list
def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
if bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d):
return
num = 0
for p in self._get_delayed_postinsts():
bb.utils.mkdirhier(dst_postinst_dir)
if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
num += 1
class PkgRootfs(DpkgOpkgRootfs):
def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
self.log_check_regex = '^E:'
self.log_check_expected_regexes = \
[
"^E: Unmet dependencies."
]
bb.utils.remove(self.image_rootfs, True)
bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
self.manifest = PkgManifest(d, manifest_dir)
self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
d.getVar('PACKAGE_ARCHS'),
d.getVar('DPKG_ARCH'))
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
bb.utils.mkdirhier(alt_dir)
# update PM index files
self.pm.write_index()
execute_pre_post_process(self.d, deb_pre_process_cmds)
if self.progress_reporter:
self.progress_reporter.next_stage()
# Don't support incremental, so skip that
self.progress_reporter.next_stage()
self.pm.update()
if self.progress_reporter:
self.progress_reporter.next_stage()
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
self.pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
self.pm.fix_broken_dependencies()
if self.progress_reporter:
# Don't support attemptonly, so skip that
self.progress_reporter.next_stage()
self.progress_reporter.next_stage()
self.pm.install_complementary()
if self.progress_reporter:
self.progress_reporter.next_stage()
self._setup_dbg_rootfs(['/var/lib/dpkg'])
self.pm.fix_broken_dependencies()
self.pm.mark_packages("installed")
self.pm.run_pre_post_installs()
execute_pre_post_process(self.d, deb_post_process_cmds)
if self.progress_reporter:
self.progress_reporter.next_stage()
@staticmethod
def _depends_list():
return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
def _get_delayed_postinsts(self):
status_file = self.image_rootfs + "/var/lib/dpkg/status"
return self._get_delayed_postinsts_common(status_file)
def _save_postinsts(self):
dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
def _log_check(self):
self._log_check_warn()
self._log_check_error()
def _cleanup(self):
pass

View File

@@ -0,0 +1,107 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import glob
import shutil
from oe.utils import execute_pre_post_process
from oe.sdk import Sdk
from oe.manifest import Manifest
from oe.package_manager.deb import DpkgPM
from oe.package_manager.deb.manifest import PkgManifest
class PkgSdk(Sdk):
def __init__(self, d, manifest_dir=None):
super(PkgSdk, self).__init__(d, manifest_dir)
self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
self.target_manifest = PkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = PkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
deb_repo_workdir = "oe-sdk-repo"
if "sdk_ext" in d.getVar("BB_RUNTASK"):
deb_repo_workdir = "oe-sdk-ext-repo"
self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
self.d.getVar("PACKAGE_ARCHS"),
self.d.getVar("DPKG_ARCH"),
self.target_conf_dir,
deb_repo_workdir=deb_repo_workdir)
self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
self.d.getVar("SDK_PACKAGE_ARCHS"),
self.d.getVar("DEB_SDK_ARCH"),
self.host_conf_dir,
deb_repo_workdir=deb_repo_workdir)
def _copy_apt_dir_to(self, dst_dir):
staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
self.remove(dst_dir, True)
shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
def _populate_sysroot(self, pm, manifest):
pkgs_to_install = manifest.parse_initial_manifest()
pm.write_index()
pm.update()
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
def _populate(self):
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
self.target_pm.run_pre_post_installs()
env_bkp = os.environ.copy()
os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
os.pathsep + os.environ["PATH"]
self.target_pm.run_intercepts(populate_sdk='target')
os.environ.update(env_bkp)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.target_pm.remove_packaging_data()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
self.install_locales(self.host_pm)
self.host_pm.run_pre_post_installs()
self.host_pm.run_intercepts(populate_sdk='host')
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
"etc", "apt"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.host_pm.remove_packaging_data()
native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
"var", "lib", "dpkg")
self.mkdirhier(native_dpkg_state_dir)
for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
self.movefile(f, native_dpkg_state_dir)
self.remove(os.path.join(self.sdk_output, "var"), True)

View File

@@ -0,0 +1,515 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import glob
import re
import shutil
import subprocess
from oe.package_manager import *
class OpkgIndexer(Indexer):
def write_index(self):
arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
"SDK_PACKAGE_ARCHS",
]
opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
opkg_index_cmd_extra_params = self.d.getVar('OPKG_MAKE_INDEX_EXTRA_PARAMS') or ""
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
open(os.path.join(self.deploy_dir, "Packages"), "w").close()
index_cmds = set()
index_sign_files = set()
for arch_var in arch_vars:
archs = self.d.getVar(arch_var)
if archs is None:
continue
for arch in archs.split():
pkgs_dir = os.path.join(self.deploy_dir, arch)
pkgs_file = os.path.join(pkgs_dir, "Packages")
if not os.path.isdir(pkgs_dir):
continue
if not os.path.exists(pkgs_file):
open(pkgs_file, "w").close()
index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s %s' %
(opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir, opkg_index_cmd_extra_params))
index_sign_files.add(pkgs_file)
if len(index_cmds) == 0:
bb.note("There are no packages in %s!" % self.deploy_dir)
return
oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
if signer:
feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
is_ascii_sig = (feed_sig_type.upper() != "BIN")
for f in index_sign_files:
signer.detach_sign(f,
self.d.getVar('PACKAGE_FEED_GPG_NAME'),
self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
armor=is_ascii_sig)
class PMPkgsList(PkgsList):
def __init__(self, d, rootfs_dir):
super(PMPkgsList, self).__init__(d, rootfs_dir)
config_file = d.getVar("IPKGCONF_TARGET")
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
self.opkg_args += self.d.getVar("OPKG_ARGS")
def list_pkgs(self, format=None):
cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
# opkg returns success even when it printed some
# "Collected errors:" report to stderr. Mixing stderr into
# stdout then leads to random failures later on when
# parsing the output. To avoid this we need to collect both
# output streams separately and check for empty stderr.
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
cmd_output, cmd_stderr = p.communicate()
cmd_output = cmd_output.decode("utf-8")
cmd_stderr = cmd_stderr.decode("utf-8")
if p.returncode or cmd_stderr:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr))
return opkg_query(cmd_output)
class OpkgDpkgPM(PackageManager):
def __init__(self, d, target_rootfs):
"""
This is an abstract class. Do not instantiate this directly.
"""
super(OpkgDpkgPM, self).__init__(d, target_rootfs)
def package_info(self, pkg, cmd):
"""
Returns a dictionary with the package info.
This method extracts the common parts for Opkg and Dpkg
"""
proc = subprocess.run(cmd, capture_output=True, encoding="utf-8", shell=True)
if proc.returncode:
bb.fatal("Unable to list available packages. Command '%s' "
"returned %d:\n%s" % (cmd, proc.returncode, proc.stderr))
elif proc.stderr:
bb.note("Command '%s' returned stderr: %s" % (cmd, proc.stderr))
return opkg_query(proc.stdout)
def extract(self, pkg, pkg_info):
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
This method extracts the common parts for Opkg and Dpkg
"""
ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
pkg_path = pkg_info[pkg]["filepath"]
if not os.path.isfile(pkg_path):
bb.fatal("Unable to extract package for '%s'."
"File %s doesn't exists" % (pkg, pkg_path))
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
try:
cmd = [ar_cmd, 'x', pkg_path]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
data_tar = glob.glob("data.tar.*")
if len(data_tar) != 1:
bb.fatal("Unable to extract %s package. Failed to identify "
"data tarball (found tarballs '%s').",
pkg_path, data_tar)
data_tar = data_tar[0]
cmd = [tar_cmd, 'xf', data_tar]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
except OSError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
bb.utils.remove(os.path.join(tmp_dir, data_tar))
os.chdir(current_dir)
return tmp_dir
def _handle_intercept_failure(self, registered_pkgs):
self.mark_packages("unpacked", registered_pkgs.split())
class OpkgPM(OpkgDpkgPM):
def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True):
super(OpkgPM, self).__init__(d, target_rootfs)
self.config_file = config_file
self.pkg_archs = archs
self.task_name = task_name
self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir)
self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
self.opkg_args += self.d.getVar("OPKG_ARGS")
if prepare_index:
create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies)
self.opkg_dir = oe.path.join(target_rootfs, self.d.getVar('OPKGLIBDIR'), "opkg")
bb.utils.mkdirhier(self.opkg_dir)
self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
if self.from_feeds:
self._create_custom_config()
else:
self._create_config()
self.indexer = OpkgIndexer(self.d, self.deploy_dir)
def mark_packages(self, status_tag, packages=None):
"""
This function will change a package's status in /var/lib/opkg/status file.
If 'packages' is None then the new_status will be applied to all
packages
"""
status_file = os.path.join(self.opkg_dir, "status")
with open(status_file, "r") as sf:
with open(status_file + ".tmp", "w+") as tmp_sf:
if packages is None:
tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
r"Package: \1\n\2Status: \3%s" % status_tag,
sf.read()))
else:
if type(packages).__name__ != "list":
raise TypeError("'packages' should be a list object")
status = sf.read()
for pkg in packages:
status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
status)
tmp_sf.write(status)
bb.utils.rename(status_file + ".tmp", status_file)
def _create_custom_config(self):
bb.note("Building from feeds activated!")
with open(self.config_file, "w+") as config_file:
priority = 1
for arch in self.pkg_archs.split():
config_file.write("arch %s %d\n" % (arch, priority))
priority += 5
for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
if feed_match is not None:
feed_name = feed_match.group(1)
feed_uri = feed_match.group(2)
bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
"""
Allow to use package deploy directory contents as quick devel-testing
feed. This creates individual feed configs for each arch subdir of those
specified as compatible for the current machine.
NOTE: Development-helper feature, NOT a full-fledged feed.
"""
if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
for arch in self.pkg_archs.split():
cfg_file_name = oe.path.join(self.target_rootfs,
self.d.getVar("sysconfdir"),
"opkg",
"local-%s-feed.conf" % arch)
with open(cfg_file_name, "w+") as cfg_file:
cfg_file.write("src/gz local-%s %s/%s" %
(arch,
self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
arch))
if self.d.getVar('OPKGLIBDIR') != '/var/lib':
# There is no command line option for this anymore, we need to add
# info_dir and status_file to config file, if OPKGLIBDIR doesn't have
# the default value of "/var/lib" as defined in opkg:
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
def _create_config(self):
with open(self.config_file, "w+") as config_file:
priority = 1
for arch in self.pkg_archs.split():
config_file.write("arch %s %d\n" % (arch, priority))
priority += 5
config_file.write("src oe file:%s\n" % self.deploy_dir)
for arch in self.pkg_archs.split():
pkgs_dir = os.path.join(self.deploy_dir, arch)
if os.path.isdir(pkgs_dir):
config_file.write("src oe-%s file:%s\n" %
(arch, pkgs_dir))
if self.d.getVar('OPKGLIBDIR') != '/var/lib':
# There is no command line option for this anymore, we need to add
# info_dir and status_file to config file, if OPKGLIBDIR doesn't have
# the default value of "/var/lib" as defined in opkg:
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
if feed_uris == "":
return
rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
% self.target_rootfs)
os.makedirs('%s/etc/opkg' % self.target_rootfs, exist_ok=True)
feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
with open(rootfs_config, "w+") as config_file:
uri_iterator = 0
for uri in feed_uris:
if archs:
for arch in archs:
if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
continue
bb.note('Adding opkg feed url-%s-%d (%s)' %
(arch, uri_iterator, uri))
config_file.write("src/gz uri-%s-%d %s/%s\n" %
(arch, uri_iterator, uri, arch))
else:
bb.note('Adding opkg feed url-%d (%s)' %
(uri_iterator, uri))
config_file.write("src/gz uri-%d %s\n" %
(uri_iterator, uri))
uri_iterator += 1
def update(self):
self.deploy_dir_lock()
cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.deploy_dir_unlock()
bb.fatal("Unable to update the package index files. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
self.deploy_dir_unlock()
def install(self, pkgs, attempt_only=False, hard_depends_only=False):
if not pkgs:
return
cmd = "%s %s" % (self.opkg_cmd, self.opkg_args)
for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split():
cmd += " --add-exclude %s" % exclude
for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split():
cmd += " --add-ignore-recommends %s" % bad_recommendation
if hard_depends_only:
cmd += " --no-install-recommends"
cmd += " install "
cmd += " ".join(pkgs)
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = self.intercepts_dir
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
failed_pkgs = []
for line in output.split('\n'):
if line.endswith("configuration required on target."):
bb.warn(line)
failed_pkgs.append(line.split(".")[0])
if failed_pkgs:
failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
except subprocess.CalledProcessError as e:
(bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output.decode("utf-8")))
def remove(self, pkgs, with_dependencies=True):
if not pkgs:
return
if with_dependencies:
cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
(self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
else:
cmd = "%s %s --force-depends remove %s" % \
(self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
try:
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to remove packages. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
def write_index(self):
self.deploy_dir_lock()
result = self.indexer.write_index()
self.deploy_dir_unlock()
if result is not None:
bb.fatal(result)
def remove_packaging_data(self):
cachedir = oe.path.join(self.target_rootfs, self.d.getVar("localstatedir"), "cache", "opkg")
bb.utils.remove(self.opkg_dir, True)
bb.utils.remove(cachedir, True)
def remove_lists(self):
if not self.from_feeds:
bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True)
def list_installed(self):
return PMPkgsList(self.d, self.target_rootfs).list_pkgs()
def dummy_install(self, pkgs):
"""
The following function dummy installs pkgs and returns the log of output.
"""
if len(pkgs) == 0:
return
# Create an temp dir as opkg root for dummy installation
temp_rootfs = self.d.expand('${T}/opkg')
opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
if opkg_lib_dir[0] == "/":
opkg_lib_dir = opkg_lib_dir[1:]
temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg')
bb.utils.mkdirhier(temp_opkg_dir)
opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
opkg_args += self.d.getVar("OPKG_ARGS")
cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to update. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
# Dummy installation
cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
opkg_args,
' '.join(pkgs))
proc = subprocess.run(cmd, capture_output=True, encoding="utf-8", shell=True)
if proc.returncode:
bb.fatal("Unable to dummy install packages. Command '%s' "
"returned %d:\n%s" % (cmd, proc.returncode, proc.stderr))
elif proc.stderr:
bb.note("Command '%s' returned stderr: %s" % (cmd, proc.stderr))
bb.utils.remove(temp_rootfs, True)
return proc.stdout
def backup_packaging_data(self):
# Save the opkglib for increment ipk image generation
if os.path.exists(self.saved_opkg_dir):
bb.utils.remove(self.saved_opkg_dir, True)
shutil.copytree(self.opkg_dir,
self.saved_opkg_dir,
symlinks=True)
def recover_packaging_data(self):
# Move the opkglib back
if os.path.exists(self.saved_opkg_dir):
if os.path.exists(self.opkg_dir):
bb.utils.remove(self.opkg_dir, True)
bb.note('Recover packaging data')
shutil.copytree(self.saved_opkg_dir,
self.opkg_dir,
symlinks=True)
def package_info(self, pkg):
"""
Returns a dictionary with the package info.
"""
cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
pkg_arch = pkg_info[pkg]["arch"]
pkg_filename = pkg_info[pkg]["filename"]
pkg_info[pkg]["filepath"] = \
os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
return pkg_info
def extract(self, pkg):
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
"""
pkg_info = self.package_info(pkg)
if not pkg_info:
bb.fatal("Unable to get information for package '%s' while "
"trying to extract the package." % pkg)
return super(OpkgPM, self).extract(pkg, pkg_info)

View File

@@ -0,0 +1,76 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from oe.manifest import Manifest
import re
class PkgManifest(Manifest):
"""
Returns a dictionary object with mip and mlp packages.
"""
def _split_multilib(self, pkg_list):
pkgs = dict()
for pkg in pkg_list.split():
pkg_type = self.PKG_TYPE_MUST_INSTALL
ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
for ml_variant in ml_variants:
if pkg.startswith(ml_variant + '-'):
pkg_type = self.PKG_TYPE_MULTILIB
if not pkg_type in pkgs:
pkgs[pkg_type] = pkg
else:
pkgs[pkg_type] += " " + pkg
return pkgs
def create_initial(self):
pkgs = dict()
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
for var in self.var_maps[self.manifest_type]:
if var in self.vars_to_split:
split_pkgs = self._split_multilib(self.d.getVar(var))
if split_pkgs is not None:
pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
else:
pkg_list = self.d.getVar(var)
if pkg_list is not None:
pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
for pkg_type in sorted(pkgs):
for pkg in sorted(pkgs[pkg_type].split()):
manifest.write("%s,%s\n" % (pkg_type, pkg))
def create_final(self):
pass
def create_full(self, pm):
if not os.path.exists(self.initial_manifest):
self.create_initial()
initial_manifest = self.parse_initial_manifest()
pkgs_to_install = list()
for pkg_type in initial_manifest:
pkgs_to_install += initial_manifest[pkg_type]
if len(pkgs_to_install) == 0:
return
output = pm.dummy_install(pkgs_to_install)
with open(self.full_manifest, 'w+') as manifest:
pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
for line in set(output.split('\n')):
m = pkg_re.match(line)
if m:
manifest.write(m.group(1) + '\n')
return

View File

@@ -0,0 +1,352 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import re
import filecmp
import shutil
from oe.rootfs import Rootfs
from oe.manifest import Manifest
from oe.utils import execute_pre_post_process
from oe.package_manager.ipk.manifest import PkgManifest
from oe.package_manager.ipk import OpkgPM
class DpkgOpkgRootfs(Rootfs):
def __init__(self, d, progress_reporter=None, logcatcher=None):
super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
def _get_pkgs_postinsts(self, status_file):
def _get_pkg_depends_list(pkg_depends):
pkg_depends_list = []
# filter version requirements like libc (>= 1.1)
for dep in pkg_depends.split(', '):
m_dep = re.match(r"^(.*) \(.*\)$", dep)
if m_dep:
dep = m_dep.group(1)
pkg_depends_list.append(dep)
return pkg_depends_list
pkgs = {}
pkg_name = ""
pkg_status_match = False
pkg_depends = ""
with open(status_file) as status:
data = status.read()
status.close()
for line in data.split('\n'):
m_pkg = re.match(r"^Package: (.*)", line)
m_status = re.match(r"^Status:.*unpacked", line)
m_depends = re.match(r"^Depends: (.*)", line)
#Only one of m_pkg, m_status or m_depends is not None at time
#If m_pkg is not None, we started a new package
if m_pkg is not None:
#Get Package name
pkg_name = m_pkg.group(1)
#Make sure we reset other variables
pkg_status_match = False
pkg_depends = ""
elif m_status is not None:
#New status matched
pkg_status_match = True
elif m_depends is not None:
#New depends macthed
pkg_depends = m_depends.group(1)
else:
pass
#Now check if we can process package depends and postinst
if "" != pkg_name and pkg_status_match:
pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
else:
#Not enough information
pass
# remove package dependencies not in postinsts
pkg_names = list(pkgs.keys())
for pkg_name in pkg_names:
deps = pkgs[pkg_name][:]
for d in deps:
if d not in pkg_names:
pkgs[pkg_name].remove(d)
return pkgs
def _get_delayed_postinsts_common(self, status_file):
def _dep_resolve(graph, node, resolved, seen):
seen.append(node)
for edge in graph[node]:
if edge not in resolved:
if edge in seen:
raise RuntimeError("Packages %s and %s have " \
"a circular dependency in postinsts scripts." \
% (node, edge))
_dep_resolve(graph, edge, resolved, seen)
resolved.append(node)
pkg_list = []
pkgs = None
if not self.d.getVar('PACKAGE_INSTALL').strip():
bb.note("Building empty image")
else:
pkgs = self._get_pkgs_postinsts(status_file)
if pkgs:
root = "__packagegroup_postinst__"
pkgs[root] = list(pkgs.keys())
_dep_resolve(pkgs, root, pkg_list, [])
pkg_list.remove(root)
if len(pkg_list) == 0:
return None
return pkg_list
def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
if bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d):
return
num = 0
for p in self._get_delayed_postinsts():
bb.utils.mkdirhier(dst_postinst_dir)
if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
num += 1
class PkgRootfs(DpkgOpkgRootfs):
def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
self.log_check_regex = '(exit 1|Collected errors)'
self.manifest = PkgManifest(d, manifest_dir)
self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
if self._remove_old_rootfs():
bb.utils.remove(self.image_rootfs, True)
self.pm = OpkgPM(d,
self.image_rootfs,
self.opkg_conf,
self.pkg_archs)
else:
self.pm = OpkgPM(d,
self.image_rootfs,
self.opkg_conf,
self.pkg_archs)
self.pm.recover_packaging_data()
bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
'''
Compare two files with the same key twice to see if they are equal.
If they are not equal, it means they are duplicated and come from
different packages.
'''
def _file_equal(self, key, f1, f2):
if filecmp.cmp(f1, f2):
return True
# Not equal
return False
"""
This function was reused from the old implementation.
See commit: "image.bbclass: Added variables for multilib support." by
Lianhao Lu.
"""
def _multilib_sanity_test(self, dirs):
allow_replace = "|".join((self.d.getVar("MULTILIBRE_ALLOW_REP") or "").split())
if allow_replace is None:
allow_replace = ""
allow_rep = re.compile(re.sub(r"\|$", r"", allow_replace))
error_prompt = "Multilib check error:"
files = {}
for dir in dirs:
for root, subfolders, subfiles in os.walk(dir):
for file in subfiles:
item = os.path.join(root, file)
key = str(os.path.join("/", os.path.relpath(item, dir)))
valid = True
if key in files:
#check whether the file is allow to replace
if allow_rep.match(key):
valid = True
else:
if os.path.exists(files[key]) and \
os.path.exists(item) and \
not self._file_equal(key, files[key], item):
valid = False
bb.fatal("%s duplicate files %s %s is not the same\n" %
(error_prompt, item, files[key]))
#pass the check, add to list
if valid:
files[key] = item
def _multilib_test_install(self, pkgs):
ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
bb.utils.mkdirhier(ml_temp)
dirs = [self.image_rootfs]
for variant in self.d.getVar("MULTILIB_VARIANTS").split():
ml_target_rootfs = os.path.join(ml_temp, variant)
bb.utils.remove(ml_target_rootfs, True)
ml_opkg_conf = os.path.join(ml_temp,
variant + "-" + os.path.basename(self.opkg_conf))
ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False)
ml_pm.update()
ml_pm.install(pkgs)
dirs.append(ml_target_rootfs)
self._multilib_sanity_test(dirs)
'''
While ipk incremental image generation is enabled, it will remove the
unneeded pkgs by comparing the old full manifest in previous existing
image and the new full manifest in the current image.
'''
def _remove_extra_packages(self, pkgs_initial_install):
if self.inc_opkg_image_gen == "1":
# Parse full manifest in previous existing image creation session
old_full_manifest = self.manifest.parse_full_manifest()
# Create full manifest for the current image session, the old one
# will be replaced by the new one.
self.manifest.create_full(self.pm)
# Parse full manifest in current image creation session
new_full_manifest = self.manifest.parse_full_manifest()
pkg_to_remove = list()
for pkg in old_full_manifest:
if pkg not in new_full_manifest:
pkg_to_remove.append(pkg)
if pkg_to_remove != []:
bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
self.pm.remove(pkg_to_remove)
'''
Compare with previous existing image creation, if some conditions
triggered, the previous old image should be removed.
The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
and BAD_RECOMMENDATIONS' has been changed.
'''
def _remove_old_rootfs(self):
if self.inc_opkg_image_gen != "1":
return True
vars_list_file = self.d.expand('${T}/vars_list')
old_vars_list = ""
if os.path.exists(vars_list_file):
old_vars_list = open(vars_list_file, 'r+').read()
new_vars_list = '%s:%s:%s\n' % \
((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
(self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
(self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
open(vars_list_file, 'w+').write(new_vars_list)
if old_vars_list != new_vars_list:
return True
return False
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
# update PM index files
self.pm.write_index()
execute_pre_post_process(self.d, opkg_pre_process_cmds)
if self.progress_reporter:
self.progress_reporter.next_stage()
# Steps are a bit different in order, skip next
self.progress_reporter.next_stage()
self.pm.update()
if self.progress_reporter:
self.progress_reporter.next_stage()
if self.inc_opkg_image_gen == "1":
self._remove_extra_packages(pkgs_to_install)
if self.progress_reporter:
self.progress_reporter.next_stage()
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
# For multilib, we perform a sanity test before final install
# If sanity test fails, it will automatically do a bb.fatal()
# and the installation will stop
if pkg_type == Manifest.PKG_TYPE_MULTILIB:
self._multilib_test_install(pkgs_to_install[pkg_type])
self.pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
if self.progress_reporter:
self.progress_reporter.next_stage()
self.pm.install_complementary()
if self.progress_reporter:
self.progress_reporter.next_stage()
opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
opkg_dir = os.path.join(opkg_lib_dir, 'opkg')
self._setup_dbg_rootfs([opkg_dir])
execute_pre_post_process(self.d, opkg_post_process_cmds)
if self.inc_opkg_image_gen == "1":
self.pm.backup_packaging_data()
if self.progress_reporter:
self.progress_reporter.next_stage()
@staticmethod
def _depends_list():
return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
def _get_delayed_postinsts(self):
status_file = os.path.join(self.image_rootfs,
self.d.getVar('OPKGLIBDIR').strip('/'),
"opkg", "status")
return self._get_delayed_postinsts_common(status_file)
def _save_postinsts(self):
dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
def _log_check(self):
self._log_check_warn()
self._log_check_error()
def _cleanup(self):
self.pm.remove_lists()

View File

@@ -0,0 +1,113 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import glob
import shutil
from oe.utils import execute_pre_post_process
from oe.sdk import Sdk
from oe.package_manager.ipk.manifest import PkgManifest
from oe.manifest import Manifest
from oe.package_manager.ipk import OpkgPM
class PkgSdk(Sdk):
def __init__(self, d, manifest_dir=None):
super(PkgSdk, self).__init__(d, manifest_dir)
# In sdk_list_installed_packages the call to opkg is hardcoded to
# always use IPKGCONF_TARGET and there's no exposed API to change this
# so simply override IPKGCONF_TARGET to use this separated config file.
ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK_TARGET")
d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target)
self.target_conf = self.d.getVar("IPKGCONF_TARGET")
self.host_conf = self.d.getVar("IPKGCONF_SDK")
self.target_manifest = PkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = PkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
ipk_repo_workdir = "oe-sdk-repo"
if "sdk_ext" in d.getVar("BB_RUNTASK"):
ipk_repo_workdir = "oe-sdk-ext-repo"
self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"),
ipk_repo_workdir=ipk_repo_workdir)
self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
self.d.getVar("SDK_PACKAGE_ARCHS"),
ipk_repo_workdir=ipk_repo_workdir)
def _populate_sysroot(self, pm, manifest):
pkgs_to_install = manifest.parse_initial_manifest()
if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
pm.write_index()
pm.update()
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
def _populate(self):
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
env_bkp = os.environ.copy()
os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
os.pathsep + os.environ["PATH"]
self.target_pm.run_intercepts(populate_sdk='target')
os.environ.update(env_bkp)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.target_pm.remove_packaging_data()
else:
self.target_pm.remove_lists()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
self.install_locales(self.host_pm)
self.host_pm.run_intercepts(populate_sdk='host')
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.host_pm.remove_packaging_data()
else:
self.host_pm.remove_lists()
target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
self.mkdirhier(target_sysconfdir)
shutil.copy(self.target_conf, target_sysconfdir)
os.chmod(os.path.join(target_sysconfdir,
os.path.basename(self.target_conf)), 0o644)
self.mkdirhier(host_sysconfdir)
shutil.copy(self.host_conf, host_sysconfdir)
os.chmod(os.path.join(host_sysconfdir,
os.path.basename(self.host_conf)), 0o644)
native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
self.d.getVar('localstatedir_nativesdk').strip('/'),
"lib", "opkg")
self.mkdirhier(native_opkg_state_dir)
for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
self.movefile(f, native_opkg_state_dir)
self.remove(os.path.join(self.sdk_output, "var"), True)

View File

@@ -0,0 +1,422 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import shutil
import subprocess
from oe.package_manager import *
class RpmIndexer(Indexer):
def write_index(self):
self.do_write_index(self.deploy_dir)
def do_write_index(self, deploy_dir):
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir))
if result:
bb.fatal(result)
# Sign repomd
if signer:
sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
is_ascii_sig = (sig_type.upper() != "BIN")
signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'),
self.d.getVar('PACKAGE_FEED_GPG_NAME'),
self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
armor=is_ascii_sig)
class RpmSubdirIndexer(RpmIndexer):
def write_index(self):
bb.note("Generating package index for %s" %(self.deploy_dir))
# Remove the existing repodata to ensure that we re-generate it no matter what
bb.utils.remove(os.path.join(self.deploy_dir, "repodata"), recurse=True)
self.do_write_index(self.deploy_dir)
for entry in os.walk(self.deploy_dir):
if os.path.samefile(self.deploy_dir, entry[0]):
for dir in entry[1]:
if dir != 'repodata':
dir_path = oe.path.join(self.deploy_dir, dir)
bb.note("Generating package index for %s" %(dir_path))
self.do_write_index(dir_path)
class PMPkgsList(PkgsList):
def list_pkgs(self):
return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed()
class RpmPM(PackageManager):
def __init__(self,
d,
target_rootfs,
target_vendor,
task_name='target',
arch_var=None,
os_var=None,
rpm_repo_workdir="oe-rootfs-repo",
filterbydependencies=True,
needfeed=True):
super(RpmPM, self).__init__(d, target_rootfs)
self.target_vendor = target_vendor
self.task_name = task_name
if arch_var == None:
self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
else:
self.archs = self.d.getVar(arch_var).replace("-","_")
if task_name == "host":
self.primary_arch = self.d.getVar('SDK_ARCH')
else:
self.primary_arch = self.d.getVar('MACHINE_ARCH')
if needfeed:
self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
self.task_name)
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
def _configure_dnf(self):
# libsolv handles 'noarch' internally, we don't need to specify it explicitly
archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]]
# This prevents accidental matching against libsolv's built-in policies
if len(archs) <= 1:
archs = archs + ["bogusarch"]
# This architecture needs to be upfront so that packages using it are properly prioritized
archs = ["sdk_provides_dummy_target"] + archs
confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
bb.utils.mkdirhier(confdir)
with open(confdir + "arch", 'w') as f:
f.write(":".join(archs))
distro_codename = self.d.getVar('DISTRO_CODENAME')
with open(confdir + "releasever", 'w') as f:
f.write(distro_codename if distro_codename is not None else '')
with open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w') as f:
f.write("")
def _configure_rpm(self):
# We need to configure rpm to use our primary package architecture as the installation architecture,
# and to make it compatible with other package architectures that we use.
# Otherwise it will refuse to proceed with packages installation.
platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
bb.utils.mkdirhier(platformconfdir)
with open(platformconfdir + "platform", 'w') as f:
f.write("%s-pc-linux" % self.primary_arch)
with open(rpmrcconfdir + "rpmrc", 'w') as f:
f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
f.write("buildarch_compat: %s: noarch\n" % self.primary_arch)
with open(platformconfdir + "macros", 'w') as f:
f.write("%_transaction_color 7\n")
if self.d.getVar('RPM_PREFER_ELF_ARCH'):
with open(platformconfdir + "macros", 'a') as f:
f.write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Importing GPG key failed. Command '%s' "
"returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
def create_configs(self):
self._configure_dnf()
self._configure_rpm()
def write_index(self):
lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
lf = bb.utils.lockfile(lockfilename, False)
RpmIndexer(self.d, self.rpm_repo_dir).write_index()
bb.utils.unlockfile(lf)
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
from urllib.parse import urlparse
if feed_uris == "":
return
gpg_opts = ''
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
gpg_opts += 'repo_gpgcheck=1\n'
gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
if self.d.getVar('RPM_SIGN_PACKAGES') != '1':
gpg_opts += 'gpgcheck=0\n'
bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
for uri in remote_uris:
repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
if feed_archs is not None:
for arch in feed_archs.split():
repo_uri = uri + "/" + arch
repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
with open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a') as f:
f.write("[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
else:
repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
repo_uri = uri
with open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w') as f:
f.write("[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
def _prepare_pkg_transaction(self):
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = self.intercepts_dir
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
def install(self, pkgs, attempt_only=False, hard_depends_only=False):
if len(pkgs) == 0:
return
self._prepare_pkg_transaction()
bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
(["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
(["--setopt=install_weak_deps=False"] if (hard_depends_only or self.d.getVar('NO_RECOMMENDATIONS') == "1") else []) +
(["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
["install"] +
pkgs)
failed_scriptlets_pkgnames = collections.OrderedDict()
for line in output.splitlines():
if line.startswith("Error: Systemctl"):
bb.error(line)
if line.startswith("Error in POSTIN scriptlet in rpm package"):
failed_scriptlets_pkgnames[line.split()[-1]] = True
if len(failed_scriptlets_pkgnames) > 0:
failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
def remove(self, pkgs, with_dependencies = True):
if not pkgs:
return
self._prepare_pkg_transaction()
if with_dependencies:
self._invoke_dnf(["remove"] + pkgs)
else:
cmd = bb.utils.which(os.getenv('PATH'), "rpm")
args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs]
try:
bb.note("Running %s" % ' '.join([cmd] + args + pkgs))
output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
except subprocess.CalledProcessError as e:
bb.fatal("Could not invoke rpm. Command "
"'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
def upgrade(self):
self._prepare_pkg_transaction()
self._invoke_dnf(["upgrade"])
def autoremove(self):
self._prepare_pkg_transaction()
self._invoke_dnf(["autoremove"])
def remove_packaging_data(self):
self._invoke_dnf(["clean", "all"])
for dir in self.packaging_data_dirs:
bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
def backup_packaging_data(self):
# Save the packaging dirs for increment rpm image generation
if os.path.exists(self.saved_packaging_data):
bb.utils.remove(self.saved_packaging_data, True)
for i in self.packaging_data_dirs:
source_dir = oe.path.join(self.target_rootfs, i)
target_dir = oe.path.join(self.saved_packaging_data, i)
if os.path.isdir(source_dir):
shutil.copytree(source_dir, target_dir, symlinks=True)
elif os.path.isfile(source_dir):
shutil.copy2(source_dir, target_dir)
def recovery_packaging_data(self):
# Move the rpmlib back
if os.path.exists(self.saved_packaging_data):
for i in self.packaging_data_dirs:
target_dir = oe.path.join(self.target_rootfs, i)
if os.path.exists(target_dir):
bb.utils.remove(target_dir, True)
source_dir = oe.path.join(self.saved_packaging_data, i)
if os.path.isdir(source_dir):
shutil.copytree(source_dir, target_dir, symlinks=True)
elif os.path.isfile(source_dir):
shutil.copy2(source_dir, target_dir)
def list_installed(self):
output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
print_output = False)
packages = {}
current_package = None
current_deps = None
current_state = "initial"
for line in output.splitlines():
if line.startswith("Package:"):
package_info = line.split(" ")[1:]
current_package = package_info[0]
package_arch = package_info[1]
package_version = package_info[2]
package_rpm = package_info[3]
packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm}
current_deps = []
elif line.startswith("Dependencies:"):
current_state = "dependencies"
elif line.startswith("Recommendations"):
current_state = "recommendations"
elif line.startswith("DependenciesEndHere:"):
current_state = "initial"
packages[current_package]["deps"] = current_deps
elif len(line) > 0:
if current_state == "dependencies":
current_deps.append(line)
elif current_state == "recommendations":
current_deps.append("%s [REC]" % line)
return packages
def update(self):
self._invoke_dnf(["makecache", "--refresh"])
def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
standard_dnf_args = ["-v", "--rpmverbosity=info", "-y",
"-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
"--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
"--installroot=%s" % (self.target_rootfs),
"--setopt=logdir=%s" % (self.d.getVar('T'))
]
if hasattr(self, "rpm_repo_dir"):
standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir))
cmd = [dnf_cmd] + standard_dnf_args + dnf_args
bb.note('Running %s' % ' '.join(cmd))
try:
output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
if print_output:
bb.debug(1, output)
return output
except subprocess.CalledProcessError as e:
if print_output:
(bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
"'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
else:
(bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
"'%s' returned %d:" % (' '.join(cmd), e.returncode))
return e.output.decode("utf-8")
def dump_install_solution(self, pkgs):
with open(self.solution_manifest, 'w') as f:
f.write(" ".join(pkgs))
return pkgs
def load_old_install_solution(self):
if not os.path.exists(self.solution_manifest):
return []
with open(self.solution_manifest, 'r') as fd:
return fd.read().split()
def _script_num_prefix(self, path):
files = os.listdir(path)
numbers = set()
numbers.add(99)
for f in files:
numbers.add(int(f.split("-")[0]))
return max(numbers) + 1
def save_rpmpostinst(self, pkg):
bb.note("Saving postinstall script of %s" % (pkg))
cmd = bb.utils.which(os.getenv('PATH'), "rpm")
args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
try:
output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Could not invoke rpm. Command "
"'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
# may need to prepend #!/bin/sh to output
target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
bb.utils.mkdirhier(target_path)
num = self._script_num_prefix(target_path)
saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
with open(saved_script_name, 'w') as f:
f.write(output)
os.chmod(saved_script_name, 0o755)
def _handle_intercept_failure(self, registered_pkgs):
rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
bb.utils.mkdirhier(rpm_postinsts_dir)
# Save the package postinstalls in /etc/rpm-postinsts
for pkg in registered_pkgs.split():
self.save_rpmpostinst(pkg)
def extract(self, pkg):
output = self._invoke_dnf(["repoquery", "--location", pkg])
pkg_name = output.splitlines()[-1]
if not pkg_name.endswith(".rpm"):
bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
# Strip file: prefix
pkg_path = pkg_name[5:]
cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
if not os.path.isfile(pkg_path):
bb.fatal("Unable to extract package for '%s'."
"File %s doesn't exists" % (pkg, pkg_path))
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
try:
cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
except OSError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
os.chdir(current_dir)
return tmp_dir

View File

@@ -0,0 +1,56 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from oe.manifest import Manifest
class PkgManifest(Manifest):
"""
Returns a dictionary object with mip and mlp packages.
"""
def _split_multilib(self, pkg_list):
pkgs = dict()
for pkg in pkg_list.split():
pkg_type = self.PKG_TYPE_MUST_INSTALL
ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
for ml_variant in ml_variants:
if pkg.startswith(ml_variant + '-'):
pkg_type = self.PKG_TYPE_MULTILIB
if not pkg_type in pkgs:
pkgs[pkg_type] = pkg
else:
pkgs[pkg_type] += " " + pkg
return pkgs
def create_initial(self):
pkgs = dict()
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
for var in self.var_maps[self.manifest_type]:
if var in self.vars_to_split:
split_pkgs = self._split_multilib(self.d.getVar(var))
if split_pkgs is not None:
pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
else:
pkg_list = self.d.getVar(var)
if pkg_list is not None:
pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
for pkg_type in pkgs:
for pkg in pkgs[pkg_type].split():
manifest.write("%s,%s\n" % (pkg_type, pkg))
def create_final(self):
pass
def create_full(self, pm):
pass

View File

@@ -0,0 +1,150 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from oe.rootfs import Rootfs
from oe.manifest import Manifest
from oe.utils import execute_pre_post_process
from oe.package_manager.rpm.manifest import PkgManifest
from oe.package_manager.rpm import RpmPM
class PkgRootfs(Rootfs):
def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
self.log_check_regex = r'(unpacking of archive failed|Cannot find package'\
r'|exit 1|ERROR: |Error: |Error |ERROR '\
r'|Failed |Failed: |Failed$|Failed\(\d+\):)'
self.manifest = PkgManifest(d, manifest_dir)
self.pm = RpmPM(d,
d.getVar('IMAGE_ROOTFS'),
self.d.getVar('TARGET_VENDOR')
)
self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
if self.inc_rpm_image_gen != "1":
bb.utils.remove(self.image_rootfs, True)
else:
self.pm.recovery_packaging_data()
bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
self.pm.create_configs()
'''
While rpm incremental image generation is enabled, it will remove the
unneeded pkgs by comparing the new install solution manifest and the
old installed manifest.
'''
def _create_incremental(self, pkgs_initial_install):
if self.inc_rpm_image_gen == "1":
pkgs_to_install = list()
for pkg_type in pkgs_initial_install:
pkgs_to_install += pkgs_initial_install[pkg_type]
installed_manifest = self.pm.load_old_install_solution()
solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
pkg_to_remove = list()
for pkg in installed_manifest:
if pkg not in solution_manifest:
pkg_to_remove.append(pkg)
self.pm.update()
bb.note('incremental update -- upgrade packages in place ')
self.pm.upgrade()
if pkg_to_remove != []:
bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
self.pm.remove(pkg_to_remove)
self.pm.autoremove()
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
# update PM index files
self.pm.write_index()
execute_pre_post_process(self.d, rpm_pre_process_cmds)
if self.progress_reporter:
self.progress_reporter.next_stage()
if self.inc_rpm_image_gen == "1":
self._create_incremental(pkgs_to_install)
if self.progress_reporter:
self.progress_reporter.next_stage()
self.pm.update()
pkgs = []
pkgs_attempt = []
for pkg_type in pkgs_to_install:
if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
pkgs_attempt += pkgs_to_install[pkg_type]
else:
pkgs += pkgs_to_install[pkg_type]
if self.progress_reporter:
self.progress_reporter.next_stage()
self.pm.install(pkgs)
if self.progress_reporter:
self.progress_reporter.next_stage()
self.pm.install(pkgs_attempt, True)
if self.progress_reporter:
self.progress_reporter.next_stage()
self.pm.install_complementary()
if self.progress_reporter:
self.progress_reporter.next_stage()
self._setup_dbg_rootfs(['/etc/rpm', '/etc/rpmrc', '/etc/dnf', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
execute_pre_post_process(self.d, rpm_post_process_cmds)
if self.inc_rpm_image_gen == "1":
self.pm.backup_packaging_data()
if self.progress_reporter:
self.progress_reporter.next_stage()
@staticmethod
def _depends_list():
return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
def _get_delayed_postinsts(self):
postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
if os.path.isdir(postinst_dir):
files = os.listdir(postinst_dir)
for f in files:
bb.note('Delayed package scriptlet: %s' % f)
return files
return None
def _save_postinsts(self):
# this is just a stub. For RPM, the failed postinstalls are
# already saved in /etc/rpm-postinsts
pass
def _log_check(self):
self._log_check_warn()
self._log_check_error()
def _cleanup(self):
if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d):
self.pm._invoke_dnf(["clean", "all"])

View File

@@ -0,0 +1,122 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import glob
from oe.utils import execute_pre_post_process
from oe.sdk import Sdk
from oe.manifest import Manifest
from oe.package_manager.rpm.manifest import PkgManifest
from oe.package_manager.rpm import RpmPM
class PkgSdk(Sdk):
def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"):
super(PkgSdk, self).__init__(d, manifest_dir)
self.target_manifest = PkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = PkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
rpm_repo_workdir = "oe-sdk-repo"
if "sdk_ext" in d.getVar("BB_RUNTASK"):
rpm_repo_workdir = "oe-sdk-ext-repo"
self.target_pm = RpmPM(d,
self.sdk_target_sysroot,
self.d.getVar('TARGET_VENDOR'),
'target',
rpm_repo_workdir=rpm_repo_workdir
)
self.host_pm = RpmPM(d,
self.sdk_host_sysroot,
self.d.getVar('SDK_VENDOR'),
'host',
"SDK_PACKAGE_ARCHS",
"SDK_OS",
rpm_repo_workdir=rpm_repo_workdir
)
def _populate_sysroot(self, pm, manifest):
pkgs_to_install = manifest.parse_initial_manifest()
pm.create_configs()
pm.write_index()
pm.update()
pkgs = []
pkgs_attempt = []
for pkg_type in pkgs_to_install:
if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
pkgs_attempt += pkgs_to_install[pkg_type]
else:
pkgs += pkgs_to_install[pkg_type]
pm.install(pkgs)
pm.install(pkgs_attempt, True)
def _populate(self):
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
env_bkp = os.environ.copy()
os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
os.pathsep + os.environ["PATH"]
self.target_pm.run_intercepts(populate_sdk='target')
os.environ.update(env_bkp)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.target_pm.remove_packaging_data()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
self.install_locales(self.host_pm)
self.host_pm.run_intercepts(populate_sdk='host')
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.host_pm.remove_packaging_data()
# Move host RPM library data
native_rpm_state_dir = os.path.join(self.sdk_output,
self.sdk_native_path,
self.d.getVar('localstatedir_nativesdk').strip('/'),
"lib",
"rpm"
)
self.mkdirhier(native_rpm_state_dir)
for f in glob.glob(os.path.join(self.sdk_output,
"var",
"lib",
"rpm",
"*")):
self.movefile(f, native_rpm_state_dir)
self.remove(os.path.join(self.sdk_output, "var"), True)
# Move host sysconfig data
native_sysconf_dir = os.path.join(self.sdk_output,
self.sdk_native_path,
self.d.getVar('sysconfdir',
True).strip('/'),
)
self.mkdirhier(native_sysconf_dir)
for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
self.movefile(f, native_sysconf_dir)
for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
self.mkdirhier(native_sysconf_dir + "/dnf")
self.movefile(f, native_sysconf_dir + "/dnf")
self.remove(os.path.join(self.sdk_output, "etc"), True)

View File

@@ -0,0 +1,366 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import codecs
import os
import json
import bb.compress.zstd
import oe.path
from glob import glob
def packaged(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
def read_pkgdatafile(fn):
pkgdata = {}
def decode(str):
c = codecs.getdecoder("unicode_escape")
return c(str)[0]
if os.access(fn, os.R_OK):
import re
with open(fn, 'r') as f:
lines = f.readlines()
r = re.compile(r"(^.+?):\s+(.*)")
for l in lines:
m = r.match(l)
if m:
pkgdata[m.group(1)] = decode(m.group(2))
return pkgdata
def get_subpkgedata_fn(pkg, d):
return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg)
def has_subpkgdata(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d), os.R_OK)
def read_subpkgdata(pkg, d):
return read_pkgdatafile(get_subpkgedata_fn(pkg, d))
def has_pkgdata(pn, d):
fn = d.expand('${PKGDATA_DIR}/%s' % pn)
return os.access(fn, os.R_OK)
def read_pkgdata(pn, d):
fn = d.expand('${PKGDATA_DIR}/%s' % pn)
return read_pkgdatafile(fn)
#
# Collapse FOO:pkg variables into FOO
#
def read_subpkgdata_dict(pkg, d):
ret = {}
subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
for var in subd:
newvar = var.replace(":" + pkg, "")
if newvar == var and var + ":" + pkg in subd:
continue
ret[newvar] = subd[var]
return ret
def read_subpkgdata_extended(pkg, d):
import json
import bb.compress.zstd
fn = d.expand("${PKGDATA_DIR}/extended/%s.json.zstd" % pkg)
try:
num_threads = int(d.getVar("BB_NUMBER_THREADS"))
with bb.compress.zstd.open(fn, "rt", encoding="utf-8", num_threads=num_threads) as f:
return json.load(f)
except FileNotFoundError:
return None
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
pkgdatadir = d.getVar("PKGDATA_DIR")
pkgmap = {}
try:
files = os.listdir(pkgdatadir)
except OSError:
bb.warn("No files in %s?" % pkgdatadir)
files = []
for pn in [f for f in files if not os.path.isdir(os.path.join(pkgdatadir, f))]:
try:
pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn))
except OSError:
continue
packages = pkgdata.get("PACKAGES") or ""
for pkg in packages.split():
pkgmap[pkg] = pn
return pkgmap
def pkgmap(d):
"""Return a dictionary mapping package to recipe name.
Cache the mapping in the metadata"""
pkgmap_data = d.getVar("__pkgmap_data", False)
if pkgmap_data is None:
pkgmap_data = _pkgmap(d)
d.setVar("__pkgmap_data", pkgmap_data)
return pkgmap_data
def recipename(pkg, d):
"""Return the recipe name for the given binary package name."""
return pkgmap(d).get(pkg)
def foreach_runtime_provider_pkgdata(d, rdep, include_rdep=False):
pkgdata_dir = d.getVar("PKGDATA_DIR")
possibles = set()
try:
possibles |= set(os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdep)))
except OSError:
pass
if include_rdep:
possibles.add(rdep)
for p in sorted(list(possibles)):
rdep_data = read_subpkgdata(p, d)
yield p, rdep_data
def get_package_mapping(pkg, basepkg, d, depversions=None):
import oe.packagedata
data = oe.packagedata.read_subpkgdata(pkg, d)
key = "PKG:%s" % pkg
if key in data:
if bb.data.inherits_class('allarch', d) and bb.data.inherits_class('packagegroup', d) and pkg != data[key]:
bb.error("An allarch packagegroup shouldn't depend on packages which are dynamically renamed (%s to %s)" % (pkg, data[key]))
# Have to avoid undoing the write_extra_pkgs(global_variants...)
if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
and data[key] == basepkg:
return pkg
if depversions == []:
# Avoid returning a mapping if the renamed package rprovides its original name
rprovkey = "RPROVIDES:%s" % pkg
if rprovkey in data:
if pkg in bb.utils.explode_dep_versions2(data[rprovkey]):
bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg))
return pkg
# Do map to rewritten package name
return data[key]
return pkg
def get_package_additional_metadata(pkg_type, d):
base_key = "PACKAGE_ADD_METADATA"
for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key):
if d.getVar(key, False) is None:
continue
d.setVarFlag(key, "type", "list")
if d.getVarFlag(key, "separator") is None:
d.setVarFlag(key, "separator", "\\n")
metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
return "\n".join(metadata_fields).strip()
def runtime_mapping_rename(varname, pkg, d):
#bb.note("%s before: %s" % (varname, d.getVar(varname)))
new_depends = {}
deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "")
for depend, depversions in deps.items():
new_depend = get_package_mapping(depend, pkg, d, depversions)
if depend != new_depend:
bb.note("package name mapping done: %s -> %s" % (depend, new_depend))
new_depends[new_depend] = deps[depend]
d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
#bb.note("%s after: %s" % (varname, d.getVar(varname)))
def emit_pkgdata(pkgfiles, d):
def process_postinst_on_target(pkg, mlprefix):
pkgval = d.getVar('PKG:%s' % pkg)
if pkgval is None:
pkgval = pkg
defer_fragment = """
if [ -n "$D" ]; then
$INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
exit 0
fi
""" % (pkgval, mlprefix)
postinst = d.getVar('pkg_postinst:%s' % pkg)
postinst_ontarget = d.getVar('pkg_postinst_ontarget:%s' % pkg)
if postinst_ontarget:
bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += defer_fragment
postinst += postinst_ontarget
d.setVar('pkg_postinst:%s' % pkg, postinst)
def add_set_e_to_scriptlets(pkg):
for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
scriptlet = d.getVar('%s:%s' % (scriptlet_name, pkg))
if scriptlet:
scriptlet_split = scriptlet.split('\n')
if scriptlet_split[0].startswith("#!"):
scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
else:
scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
d.setVar('%s:%s' % (scriptlet_name, pkg), scriptlet)
def write_if_exists(f, pkg, var):
def encode(str):
import codecs
c = codecs.getencoder("unicode_escape")
return c(str)[0].decode("latin1")
val = d.getVar('%s:%s' % (var, pkg))
if val:
f.write('%s:%s: %s\n' % (var, pkg, encode(val)))
return val
val = d.getVar('%s' % (var))
if val:
f.write('%s: %s\n' % (var, encode(val)))
return val
def write_extra_pkgs(variants, pn, packages, pkgdatadir):
for variant in variants:
with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
fd.write("PACKAGES: %s\n" % ' '.join(
map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
for variant in variants:
for pkg in packages.split():
ml_pkg = "%s-%s" % (variant, pkg)
subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
with open(subdata_file, 'w') as fd:
fd.write("PKG:%s: %s" % (ml_pkg, pkg))
packages = d.getVar('PACKAGES')
pkgdest = d.getVar('PKGDEST')
pkgdatadir = d.getVar('PKGDESTWORK')
data_file = pkgdatadir + d.expand("/${PN}")
with open(data_file, 'w') as fd:
fd.write("PACKAGES: %s\n" % packages)
pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
pn = d.getVar('PN')
global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
write_extra_pkgs(variants, pn, packages, pkgdatadir)
if bb.data.inherits_class('allarch', d) and not variants \
and not bb.data.inherits_class('packagegroup', d):
write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
workdir = d.getVar('WORKDIR')
for pkg in packages.split():
pkgval = d.getVar('PKG:%s' % pkg)
if pkgval is None:
pkgval = pkg
d.setVar('PKG:%s' % pkg, pkg)
extended_data = {
"files_info": {}
}
pkgdestpkg = os.path.join(pkgdest, pkg)
files = {}
files_extra = {}
total_size = 0
seen = set()
for f in pkgfiles[pkg]:
fpath = os.sep + os.path.relpath(f, pkgdestpkg)
fstat = os.lstat(f)
files[fpath] = fstat.st_size
extended_data["files_info"].setdefault(fpath, {})
extended_data["files_info"][fpath]['size'] = fstat.st_size
if fstat.st_ino not in seen:
seen.add(fstat.st_ino)
total_size += fstat.st_size
if fpath in pkgdebugsource:
extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
del pkgdebugsource[fpath]
d.setVar('FILES_INFO:' + pkg , json.dumps(files, sort_keys=True))
process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
add_set_e_to_scriptlets(pkg)
subdata_file = pkgdatadir + "/runtime/%s" % pkg
with open(subdata_file, 'w') as sf:
for var in (d.getVar('PKGDATA_VARS') or "").split():
val = write_if_exists(sf, pkg, var)
write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
for dfile in sorted((d.getVar('FILERPROVIDESFLIST:' + pkg) or "").split()):
write_if_exists(sf, pkg, 'FILERPROVIDES:' + dfile)
write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
for dfile in sorted((d.getVar('FILERDEPENDSFLIST:' + pkg) or "").split()):
write_if_exists(sf, pkg, 'FILERDEPENDS:' + dfile)
sf.write('%s:%s: %d\n' % ('PKGSIZE', pkg, total_size))
subdata_extended_file = pkgdatadir + "/extended/%s.json.zstd" % pkg
num_threads = int(d.getVar("BB_NUMBER_THREADS"))
with bb.compress.zstd.open(subdata_extended_file, "wt", encoding="utf-8", num_threads=num_threads) as f:
json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
# Symlinks needed for rprovides lookup
rprov = d.getVar('RPROVIDES:%s' % pkg) or d.getVar('RPROVIDES')
if rprov:
for p in bb.utils.explode_deps(rprov):
subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
bb.utils.mkdirhier(os.path.dirname(subdata_sym))
oe.path.relsymlink(subdata_file, subdata_sym, True)
allow_empty = d.getVar('ALLOW_EMPTY:%s' % pkg)
if not allow_empty:
allow_empty = d.getVar('ALLOW_EMPTY')
root = "%s/%s" % (pkgdest, pkg)
os.chdir(root)
g = glob('*')
if g or allow_empty == "1":
# Symlinks needed for reverse lookups (from the final package name)
subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
oe.path.relsymlink(subdata_file, subdata_sym, True)
packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
open(packagedfile, 'w').close()
if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
write_extra_runtime_pkgs(variants, packages, pkgdatadir)
if bb.data.inherits_class('allarch', d) and not variants \
and not bb.data.inherits_class('packagegroup', d):
write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
def mapping_rename_hook(d):
"""
Rewrite variables to account for package renaming in things
like debian.bbclass or manual PKG variable name changes
"""
pkg = d.getVar("PKG")
oe.packagedata.runtime_mapping_rename("RDEPENDS", pkg, d)
oe.packagedata.runtime_mapping_rename("RRECOMMENDS", pkg, d)
oe.packagedata.runtime_mapping_rename("RSUGGESTS", pkg, d)

View File

@@ -0,0 +1,36 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import itertools
def is_optional(feature, d):
return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
def packages(features, d):
for feature in features:
packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
for pkg in (packages or "").split():
yield pkg
def required_packages(features, d):
req = [feature for feature in features if not is_optional(feature, d)]
return packages(req, d)
def optional_packages(features, d):
opt = [feature for feature in features if is_optional(feature, d)]
return packages(opt, d)
def active_packages(features, d):
return itertools.chain(required_packages(features, d),
optional_packages(features, d))
def active_recipes(features, d):
import oe.packagedata
for pkg in active_packages(features, d):
recipe = oe.packagedata.recipename(pkg, d)
if recipe:
yield recipe

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,349 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import errno
import glob
import shutil
import subprocess
import os.path
def join(*paths):
"""Like os.path.join but doesn't treat absolute RHS specially"""
return os.path.normpath("/".join(paths))
def relative(src, dest):
""" Return a relative path from src to dest.
>>> relative("/usr/bin", "/tmp/foo/bar")
../../tmp/foo/bar
>>> relative("/usr/bin", "/usr/lib")
../lib
>>> relative("/tmp", "/tmp/foo/bar")
foo/bar
"""
return os.path.relpath(dest, src)
def make_relative_symlink(path):
""" Convert an absolute symlink to a relative one """
if not os.path.islink(path):
return
link = os.readlink(path)
if not os.path.isabs(link):
return
# find the common ancestor directory
ancestor = path
depth = 0
while ancestor and not link.startswith(ancestor):
ancestor = ancestor.rpartition('/')[0]
depth += 1
if not ancestor:
print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path)
return
base = link.partition(ancestor)[2].strip('/')
while depth > 1:
base = "../" + base
depth -= 1
os.remove(path)
os.symlink(base, path)
def replace_absolute_symlinks(basedir, d):
"""
Walk basedir looking for absolute symlinks and replacing them with relative ones.
The absolute links are assumed to be relative to basedir
(compared to make_relative_symlink above which tries to compute common ancestors
using pattern matching instead)
"""
for walkroot, dirs, files in os.walk(basedir):
for file in files + dirs:
path = os.path.join(walkroot, file)
if not os.path.islink(path):
continue
link = os.readlink(path)
if not os.path.isabs(link):
continue
walkdir = os.path.dirname(path.rpartition(basedir)[2])
base = os.path.relpath(link, walkdir)
bb.debug(2, "Replacing absolute path %s with relative path %s" % (link, base))
os.remove(path)
os.symlink(base, path)
def format_display(path, metadata):
""" Prepare a path for display to the user. """
rel = relative(metadata.getVar("TOPDIR"), path)
if len(rel) > len(path):
return path
else:
return rel
def copytree(src, dst):
# We could use something like shutil.copytree here but it turns out to
# to be slow. It takes twice as long copying to an empty directory.
# If dst already has contents performance can be 15 time slower
# This way we also preserve hardlinks between files in the tree.
bb.utils.mkdirhier(dst)
cmd = "tar --xattrs --xattrs-include='*' -cf - -S -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
def copyhardlinktree(src, dst):
"""Make a tree of hard links when possible, otherwise copy."""
bb.utils.mkdirhier(dst)
if os.path.isdir(src) and not len(os.listdir(src)):
return
canhard = False
testfile = None
for root, dirs, files in os.walk(src):
if len(files):
testfile = os.path.join(root, files[0])
break
if testfile is not None:
try:
os.link(testfile, os.path.join(dst, 'testfile'))
os.unlink(os.path.join(dst, 'testfile'))
canhard = True
except Exception as e:
bb.debug(2, "Hardlink test failed with " + str(e))
if (canhard):
# Need to copy directories only with tar first since cp will error if two
# writers try and create a directory at the same time
cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -S -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
source = ''
if os.path.isdir(src):
if len(glob.glob('%s/.??*' % src)) > 0:
source = './.??* '
if len(glob.glob('%s/**' % src)) > 0:
source += './*'
s_dir = src
else:
source = src
s_dir = os.getcwd()
cmd = 'cp -afl --preserve=xattr %s %s' % (source, os.path.realpath(dst))
subprocess.check_output(cmd, shell=True, cwd=s_dir, stderr=subprocess.STDOUT)
else:
copytree(src, dst)
def copyhardlink(src, dst):
"""Make a hard link when possible, otherwise copy."""
try:
os.link(src, dst)
except OSError:
shutil.copy(src, dst)
def remove(path, recurse=True):
"""
Equivalent to rm -f or rm -rf
NOTE: be careful about passing paths that may contain filenames with
wildcards in them (as opposed to passing an actual wildcarded path) -
since we use glob.glob() to expand the path. Filenames containing
square brackets are particularly problematic since the they may not
actually expand to match the original filename.
"""
for name in glob.glob(path):
try:
os.unlink(name)
except OSError as exc:
if recurse and exc.errno == errno.EISDIR:
shutil.rmtree(name)
elif exc.errno != errno.ENOENT:
raise
def symlink(source, destination, force=False):
"""Create a symbolic link"""
try:
if force:
remove(destination)
os.symlink(source, destination)
except OSError as e:
if e.errno != errno.EEXIST or os.readlink(destination) != source:
raise
def relsymlink(target, name, force=False):
symlink(os.path.relpath(target, os.path.dirname(name)), name, force=force)
def find(dir, **walkoptions):
""" Given a directory, recurses into that directory,
returning all files as absolute paths. """
for root, dirs, files in os.walk(dir, **walkoptions):
for file in files:
yield os.path.join(root, file)
## realpath() related functions
def __is_path_below(file, root):
return (file + os.path.sep).startswith(root)
def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir):
"""Calculates real path of symlink 'start' + 'rel_path' below
'root'; no part of 'start' below 'root' must contain symlinks. """
have_dir = True
for d in rel_path.split(os.path.sep):
if not have_dir and not assume_dir:
raise OSError(errno.ENOENT, "no such directory %s" % start)
if d == os.path.pardir: # '..'
if len(start) >= len(root):
# do not follow '..' before root
start = os.path.dirname(start)
else:
# emit warning?
pass
else:
(start, have_dir) = __realpath(os.path.join(start, d),
root, loop_cnt, assume_dir)
assert(__is_path_below(start, root))
return start
def __realpath(file, root, loop_cnt, assume_dir):
while os.path.islink(file) and len(file) >= len(root):
if loop_cnt == 0:
raise OSError(errno.ELOOP, file)
loop_cnt -= 1
target = os.path.normpath(os.readlink(file))
if not os.path.isabs(target):
tdir = os.path.dirname(file)
assert(__is_path_below(tdir, root))
else:
tdir = root
file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir)
try:
is_dir = os.path.isdir(file)
except:
is_dir = false
return (file, is_dir)
def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
""" Returns the canonical path of 'file' with assuming a
toplevel 'root' directory. When 'use_physdir' is set, all
preceding path components of 'file' will be resolved first;
this flag should be set unless it is guaranteed that there is
no symlink in the path. When 'assume_dir' is not set, missing
path components will raise an ENOENT error"""
root = os.path.normpath(root)
file = os.path.normpath(file)
if not root.endswith(os.path.sep):
# letting root end with '/' makes some things easier
root = root + os.path.sep
if not __is_path_below(file, root):
raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
try:
if use_physdir:
file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
else:
file = __realpath(file, root, loop_cnt, assume_dir)[0]
except OSError as e:
if e.errno == errno.ELOOP:
# make ELOOP more readable; without catching it, there will
# be printed a backtrace with 100s of OSError exceptions
# else
raise OSError(errno.ELOOP,
"too much recursions while resolving '%s'; loop in '%s'" %
(file, e.strerror))
raise
return file
def is_path_parent(possible_parent, *paths):
"""
Return True if a path is the parent of another, False otherwise.
Multiple paths to test can be specified in which case all
specified test paths must be under the parent in order to
return True.
"""
def abs_path_trailing(pth):
pth_abs = os.path.abspath(pth)
if not pth_abs.endswith(os.sep):
pth_abs += os.sep
return pth_abs
possible_parent_abs = abs_path_trailing(possible_parent)
if not paths:
return False
for path in paths:
path_abs = abs_path_trailing(path)
if not path_abs.startswith(possible_parent_abs):
return False
return True
def which_wild(pathname, path=None, mode=os.F_OK, *, reverse=False, candidates=False):
"""Search a search path for pathname, supporting wildcards.
Return all paths in the specific search path matching the wildcard pattern
in pathname, returning only the first encountered for each file. If
candidates is True, information on all potential candidate paths are
included.
"""
paths = (path or os.environ.get('PATH', os.defpath)).split(':')
if reverse:
paths.reverse()
seen, files = set(), []
for index, element in enumerate(paths):
if not os.path.isabs(element):
element = os.path.abspath(element)
candidate = os.path.join(element, pathname)
globbed = glob.glob(candidate)
if globbed:
for found_path in sorted(globbed):
if not os.access(found_path, mode):
continue
rel = os.path.relpath(found_path, element)
if rel not in seen:
seen.add(rel)
if candidates:
files.append((found_path, [os.path.join(p, rel) for p in paths[:index+1]]))
else:
files.append(found_path)
return files
def canonicalize(paths, sep=','):
"""Given a string with paths (separated by commas by default), expand
each path using os.path.realpath() and return the resulting paths as a
string (separated using the same separator a the original string).
"""
# Ignore paths containing "$" as they are assumed to be unexpanded bitbake
# variables. Normally they would be ignored, e.g., when passing the paths
# through the shell they would expand to empty strings. However, when they
# are passed through os.path.realpath(), it will cause them to be prefixed
# with the absolute path to the current directory and thus not be empty
# anymore.
#
# Also maintain trailing slashes, as the paths may actually be used as
# prefixes in sting compares later on, where the slashes then are important.
canonical_paths = []
for path in (paths or '').split(sep):
if '$' not in path:
trailing_slash = path.endswith('/') and '/' or ''
canonical_paths.append(os.path.realpath(path) + trailing_slash)
return sep.join(canonical_paths)

View File

@@ -0,0 +1,127 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
def prserv_make_conn(d, check = False):
import prserv.serv
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
conn = None
conn = prserv.serv.connect(host_params[0], int(host_params[1]))
if check:
if not conn.ping():
raise Exception('service not available')
except Exception as exc:
bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc)))
return conn
def prserv_dump_db(d):
if not d.getVar('PRSERV_HOST'):
bb.error("Not using network based PR service")
return None
conn = prserv_make_conn(d)
if conn is None:
bb.error("Making connection failed to remote PR service")
return None
#dump db
opt_version = d.getVar('PRSERV_DUMPOPT_VERSION')
opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH')
opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM')
opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL'))
d = conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
conn.close()
return d
def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
if not d.getVar('PRSERV_HOST'):
bb.error("Not using network based PR service")
return None
conn = prserv_make_conn(d)
if conn is None:
bb.error("Making connection failed to remote PR service")
return None
#get the entry values
imported = []
prefix = "PRAUTO$"
for v in d.keys():
if v.startswith(prefix):
(remain, sep, checksum) = v.rpartition('$')
(remain, sep, pkgarch) = remain.rpartition('$')
(remain, sep, version) = remain.rpartition('$')
if (remain + '$' != prefix) or \
(filter_version and filter_version != version) or \
(filter_pkgarch and filter_pkgarch != pkgarch) or \
(filter_checksum and filter_checksum != checksum):
continue
try:
value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum))
except BaseException as exc:
bb.debug("Not valid value of %s:%s" % (v,str(exc)))
continue
ret = conn.importone(version,pkgarch,checksum,value)
if ret != value:
bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret))
else:
imported.append((version,pkgarch,checksum,value))
conn.close()
return imported
def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
import bb.utils
#initilize the output file
bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR'))
df = d.getVar('PRSERV_DUMPFILE')
#write data
with open(df, "a") as f, bb.utils.fileslocked(["%s.lock" % df]) as locks:
if metainfo:
#dump column info
f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
f.write("#Table: %s\n" % metainfo['tbl_name'])
f.write("#Columns:\n")
f.write("#name \t type \t notn \t dflt \t pk\n")
f.write("#----------\t --------\t --------\t --------\t ----\n")
for i in range(len(metainfo['col_info'])):
f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" %
(metainfo['col_info'][i]['name'],
metainfo['col_info'][i]['type'],
metainfo['col_info'][i]['notnull'],
metainfo['col_info'][i]['dflt_value'],
metainfo['col_info'][i]['pk']))
f.write("\n")
if lockdown:
f.write("PRSERV_LOCKDOWN = \"1\"\n\n")
if datainfo:
idx = {}
for i in range(len(datainfo)):
pkgarch = datainfo[i]['pkgarch']
value = datainfo[i]['value']
if pkgarch not in idx:
idx[pkgarch] = i
elif value > datainfo[idx[pkgarch]]['value']:
idx[pkgarch] = i
f.write("PRAUTO$%s$%s$%s = \"%s\"\n" %
(str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value)))
if not nomax:
for i in idx:
f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
def prserv_check_avail(d):
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
if len(host_params) != 2:
raise TypeError
else:
int(host_params[1])
except TypeError:
bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"')
else:
conn = prserv_make_conn(d, True)
conn.close()

View File

@@ -0,0 +1,238 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import os, struct, mmap
class NotELFFileError(Exception):
pass
class ELFFile:
EI_NIDENT = 16
EI_CLASS = 4
EI_DATA = 5
EI_VERSION = 6
EI_OSABI = 7
EI_ABIVERSION = 8
E_MACHINE = 0x12
# possible values for EI_CLASS
ELFCLASSNONE = 0
ELFCLASS32 = 1
ELFCLASS64 = 2
# possible value for EI_VERSION
EV_CURRENT = 1
# possible values for EI_DATA
EI_DATA_NONE = 0
EI_DATA_LSB = 1
EI_DATA_MSB = 2
PT_INTERP = 3
def my_assert(self, expectation, result):
if not expectation == result:
#print "'%x','%x' %s" % (ord(expectation), ord(result), self.name)
raise NotELFFileError("%s is not an ELF" % self.name)
def __init__(self, name):
self.name = name
self.objdump_output = {}
self.data = None
# Context Manager functions to close the mmap explicitly
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self.data:
self.data.close()
def open(self):
with open(self.name, "rb") as f:
try:
self.data = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
except ValueError:
# This means the file is empty
raise NotELFFileError("%s is empty" % self.name)
# Check the file has the minimum number of ELF table entries
if len(self.data) < ELFFile.EI_NIDENT + 4:
raise NotELFFileError("%s is not an ELF" % self.name)
# ELF header
self.my_assert(self.data[0], 0x7f)
self.my_assert(self.data[1], ord('E'))
self.my_assert(self.data[2], ord('L'))
self.my_assert(self.data[3], ord('F'))
if self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS32:
self.bits = 32
elif self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS64:
self.bits = 64
else:
# Not 32-bit or 64.. lets assert
raise NotELFFileError("ELF but not 32 or 64 bit.")
self.my_assert(self.data[ELFFile.EI_VERSION], ELFFile.EV_CURRENT)
self.endian = self.data[ELFFile.EI_DATA]
if self.endian not in (ELFFile.EI_DATA_LSB, ELFFile.EI_DATA_MSB):
raise NotELFFileError("Unexpected EI_DATA %x" % self.endian)
def osAbi(self):
return self.data[ELFFile.EI_OSABI]
def abiVersion(self):
return self.data[ELFFile.EI_ABIVERSION]
def abiSize(self):
return self.bits
def isLittleEndian(self):
return self.endian == ELFFile.EI_DATA_LSB
def isBigEndian(self):
return self.endian == ELFFile.EI_DATA_MSB
def getStructEndian(self):
return {ELFFile.EI_DATA_LSB: "<",
ELFFile.EI_DATA_MSB: ">"}[self.endian]
def getShort(self, offset):
return struct.unpack_from(self.getStructEndian() + "H", self.data, offset)[0]
def getWord(self, offset):
return struct.unpack_from(self.getStructEndian() + "i", self.data, offset)[0]
def isDynamic(self):
"""
Return True if there is a .interp segment (therefore dynamically
linked), otherwise False (statically linked).
"""
offset = self.getWord(self.bits == 32 and 0x1C or 0x20)
size = self.getShort(self.bits == 32 and 0x2A or 0x36)
count = self.getShort(self.bits == 32 and 0x2C or 0x38)
for i in range(0, count):
p_type = self.getWord(offset + i * size)
if p_type == ELFFile.PT_INTERP:
return True
return False
def machine(self):
"""
We know the endian stored in self.endian and we
know the position
"""
return self.getShort(ELFFile.E_MACHINE)
def set_objdump(self, cmd, output):
self.objdump_output[cmd] = output
def run_objdump(self, cmd, d):
import bb.process
import sys
if cmd in self.objdump_output:
return self.objdump_output[cmd]
objdump = d.getVar('OBJDUMP')
env = os.environ.copy()
env["LC_ALL"] = "C"
env["PATH"] = d.getVar('PATH')
try:
bb.note("%s %s %s" % (objdump, cmd, self.name))
self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0]
return self.objdump_output[cmd]
except Exception as e:
bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e))
return ""
def elf_machine_to_string(machine):
"""
Return the name of a given ELF e_machine field or the hex value as a string
if it isn't recognised.
"""
try:
return {
0x00: "Unset",
0x02: "SPARC",
0x03: "x86",
0x08: "MIPS",
0x14: "PowerPC",
0x28: "ARM",
0x2A: "SuperH",
0x32: "IA-64",
0x3E: "x86-64",
0xB7: "AArch64",
0xF7: "BPF"
}[machine]
except:
return "Unknown (%s)" % repr(machine)
def write_error(type, error, d):
logfile = d.getVar('QA_LOGFILE')
if logfile:
p = d.getVar('P')
with open(logfile, "a+") as f:
f.write("%s: %s [%s]\n" % (p, error, type))
def handle_error(error_class, error_msg, d):
if error_class in (d.getVar("ERROR_QA") or "").split():
write_error(error_class, error_msg, d)
bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
d.setVar("QA_ERRORS_FOUND", "True")
return False
elif error_class in (d.getVar("WARN_QA") or "").split():
write_error(error_class, error_msg, d)
bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
else:
bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
return True
def add_message(messages, section, new_msg):
if section not in messages:
messages[section] = new_msg
else:
messages[section] = messages[section] + "\n" + new_msg
def exit_with_message_if_errors(message, d):
qa_fatal_errors = bb.utils.to_boolean(d.getVar("QA_ERRORS_FOUND"), False)
if qa_fatal_errors:
bb.fatal(message)
def exit_if_errors(d):
exit_with_message_if_errors("Fatal QA errors were found, failing task.", d)
def check_upstream_status(fullpath):
import re
kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE)
strict_status_re = re.compile(r"^Upstream-Status: (Pending|Submitted|Denied|Inappropriate|Backport|Inactive-Upstream)( .+)?$", re.MULTILINE)
guidelines = "https://docs.yoctoproject.org/contributor-guide/recipe-style-guide.html#patch-upstream-status"
with open(fullpath, encoding='utf-8', errors='ignore') as f:
file_content = f.read()
match_kinda = kinda_status_re.search(file_content)
match_strict = strict_status_re.search(file_content)
if not match_strict:
if match_kinda:
return "Malformed Upstream-Status in patch\n%s\nPlease correct according to %s :\n%s" % (fullpath, guidelines, match_kinda.group(0))
else:
return "Missing Upstream-Status in patch\n%s\nPlease add according to %s ." % (fullpath, guidelines)
if __name__ == "__main__":
import sys
with ELFFile(sys.argv[1]) as elf:
elf.open()
print(elf.isDynamic())

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,197 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import os
import subprocess
import bb
# For reproducible builds, this code sets the default SOURCE_DATE_EPOCH in each
# component's build environment. The format is number of seconds since the
# system epoch.
#
# Upstream components (generally) respect this environment variable,
# using it in place of the "current" date and time.
# See https://reproducible-builds.org/specs/source-date-epoch/
#
# The default value of SOURCE_DATE_EPOCH comes from the function
# get_source_date_epoch_value which reads from the SDE_FILE, or if the file
# is not available will use the fallback of SOURCE_DATE_EPOCH_FALLBACK.
#
# The SDE_FILE is normally constructed from the function
# create_source_date_epoch_stamp which is typically added as a postfuncs to
# the do_unpack task. If a recipe does NOT have do_unpack, it should be added
# to a task that runs after the source is available and before the
# do_deploy_source_date_epoch task is executed.
#
# If a recipe wishes to override the default behavior it should set it's own
# SOURCE_DATE_EPOCH or override the do_deploy_source_date_epoch_stamp task
# with recipe-specific functionality to write the appropriate
# SOURCE_DATE_EPOCH into the SDE_FILE.
#
# SOURCE_DATE_EPOCH is intended to be a reproducible value. This value should
# be reproducible for anyone who builds the same revision from the same
# sources.
#
# There are 4 ways the create_source_date_epoch_stamp function determines what
# becomes SOURCE_DATE_EPOCH:
#
# 1. Use the value from __source_date_epoch.txt file if this file exists.
# This file was most likely created in the previous build by one of the
# following methods 2,3,4.
# Alternatively, it can be provided by a recipe via SRC_URI.
#
# If the file does not exist:
#
# 2. If there is a git checkout, use the last git commit timestamp.
# Git does not preserve file timestamps on checkout.
#
# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
# This works for well-kept repositories distributed via tarball.
#
# 4. Use the modification time of the youngest file in the source tree, if
# there is one.
# This will be the newest file from the distribution tarball, if any.
#
# 5. Fall back to a fixed timestamp (SOURCE_DATE_EPOCH_FALLBACK).
#
# Once the value is determined, it is stored in the recipe's SDE_FILE.
def get_source_date_epoch_from_known_files(d, sourcedir):
source_date_epoch = None
newest_file = None
known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"])
for file in known_files:
filepath = os.path.join(sourcedir, file)
if os.path.isfile(filepath):
mtime = int(os.lstat(filepath).st_mtime)
# There may be more than one "known_file" present, if so, use the youngest one
if not source_date_epoch or mtime > source_date_epoch:
source_date_epoch = mtime
newest_file = filepath
if newest_file:
bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file)
return source_date_epoch
def find_git_folder(d, sourcedir):
# First guess: WORKDIR/git
# This is the default git fetcher unpack path
workdir = d.getVar('WORKDIR')
gitpath = os.path.join(workdir, "git/.git")
if os.path.isdir(gitpath):
return gitpath
# Second guess: ${S}
gitpath = os.path.join(sourcedir, ".git")
if os.path.isdir(gitpath):
return gitpath
# Perhaps there was a subpath or destsuffix specified.
# Go looking in the WORKDIR
exclude = set(["build", "image", "license-destdir", "patches", "pseudo",
"recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"])
for root, dirs, files in os.walk(workdir, topdown=True):
dirs[:] = [d for d in dirs if d not in exclude]
if '.git' in dirs:
return os.path.join(root, ".git")
bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
return None
def get_source_date_epoch_from_git(d, sourcedir):
if not "git://" in d.getVar('SRC_URI') and not "gitsm://" in d.getVar('SRC_URI'):
return None
gitpath = find_git_folder(d, sourcedir)
if not gitpath:
return None
# Check that the repository has a valid HEAD; it may not if subdir is used
# in SRC_URI
p = subprocess.run(['git', '--git-dir', gitpath, 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if p.returncode != 0:
bb.debug(1, "%s does not have a valid HEAD: %s" % (gitpath, p.stdout.decode('utf-8')))
return None
bb.debug(1, "git repository: %s" % gitpath)
p = subprocess.run(['git', '-c', 'log.showSignature=false', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'],
check=True, stdout=subprocess.PIPE)
return int(p.stdout.decode('utf-8'))
def get_source_date_epoch_from_youngest_file(d, sourcedir):
if sourcedir == d.getVar('WORKDIR'):
# These sources are almost certainly not from a tarball
return None
# Do it the hard way: check all files and find the youngest one...
source_date_epoch = None
newest_file = None
for root, dirs, files in os.walk(sourcedir, topdown=True):
files = [f for f in files if not f[0] == '.']
for fname in files:
if fname == "singletask.lock":
# Ignore externalsrc/devtool lockfile [YOCTO #14921]
continue
filename = os.path.join(root, fname)
try:
mtime = int(os.lstat(filename).st_mtime)
except ValueError:
mtime = 0
if not source_date_epoch or mtime > source_date_epoch:
source_date_epoch = mtime
newest_file = filename
if newest_file:
bb.debug(1, "Newest file found: %s" % newest_file)
return source_date_epoch
def fixed_source_date_epoch(d):
bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH")
source_date_epoch = d.getVar('SOURCE_DATE_EPOCH_FALLBACK')
if source_date_epoch:
bb.debug(1, "Using SOURCE_DATE_EPOCH_FALLBACK")
return int(source_date_epoch)
return 0
def get_source_date_epoch(d, sourcedir):
return (
get_source_date_epoch_from_git(d, sourcedir) or
get_source_date_epoch_from_youngest_file(d, sourcedir) or
fixed_source_date_epoch(d) # Last resort
)
def epochfile_read(epochfile, d):
cached, efile = d.getVar('__CACHED_SOURCE_DATE_EPOCH') or (None, None)
if cached and efile == epochfile:
return cached
if cached and epochfile != efile:
bb.debug(1, "Epoch file changed from %s to %s" % (efile, epochfile))
source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
try:
with open(epochfile, 'r') as f:
s = f.read()
try:
source_date_epoch = int(s)
except ValueError:
bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s)
source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
except FileNotFoundError:
bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
d.setVar('__CACHED_SOURCE_DATE_EPOCH', (str(source_date_epoch), epochfile))
return str(source_date_epoch)
def epochfile_write(source_date_epoch, epochfile, d):
bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
bb.utils.mkdirhier(os.path.dirname(epochfile))
tmp_file = "%s.new" % epochfile
with open(tmp_file, 'w') as f:
f.write(str(source_date_epoch))
os.rename(tmp_file, epochfile)

View File

@@ -0,0 +1,438 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.package_manager import *
from oe.manifest import *
import oe.path
import shutil
import os
import subprocess
import re
class Rootfs(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
"""
def __init__(self, d, progress_reporter=None, logcatcher=None):
self.d = d
self.pm = None
self.image_rootfs = self.d.getVar('IMAGE_ROOTFS')
self.deploydir = self.d.getVar('IMGDEPLOYDIR')
self.progress_reporter = progress_reporter
self.logcatcher = logcatcher
self.install_order = Manifest.INSTALL_ORDER
@abstractmethod
def _create(self):
pass
@abstractmethod
def _get_delayed_postinsts(self):
pass
@abstractmethod
def _save_postinsts(self):
pass
@abstractmethod
def _log_check(self):
pass
def _log_check_common(self, type, match):
# Ignore any lines containing log_check to avoid recursion, and ignore
# lines beginning with a + since sh -x may emit code which isn't
# actually executed, but may contain error messages
excludes = [ 'log_check', r'^\+' ]
if hasattr(self, 'log_check_expected_regexes'):
excludes.extend(self.log_check_expected_regexes)
# Insert custom log_check excludes
excludes += [x for x in (self.d.getVar("IMAGE_LOG_CHECK_EXCLUDES") or "").split(" ") if x]
excludes = [re.compile(x) for x in excludes]
r = re.compile(match)
log_path = self.d.expand("${T}/log.do_rootfs")
messages = []
with open(log_path, 'r') as log:
for line in log:
if self.logcatcher and self.logcatcher.contains(line.rstrip()):
continue
for ee in excludes:
m = ee.search(line)
if m:
break
if m:
continue
m = r.search(line)
if m:
messages.append('[log_check] %s' % line)
if messages:
if len(messages) == 1:
msg = '1 %s message' % type
else:
msg = '%d %s messages' % (len(messages), type)
msg = '[log_check] %s: found %s in the logfile:\n%s' % \
(self.d.getVar('PN'), msg, ''.join(messages))
if type == 'error':
bb.fatal(msg)
else:
bb.warn(msg)
def _log_check_warn(self):
self._log_check_common('warning', '^(warn|Warn|WARNING:)')
def _log_check_error(self):
self._log_check_common('error', self.log_check_regex)
def _insert_feed_uris(self):
if bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d):
self.pm.insert_feeds_uris(self.d.getVar('PACKAGE_FEED_URIS') or "",
self.d.getVar('PACKAGE_FEED_BASE_PATHS') or "",
self.d.getVar('PACKAGE_FEED_ARCHS'))
"""
The _cleanup() method should be used to clean-up stuff that we don't really
want to end up on target. For example, in the case of RPM, the DB locks.
The method is called, once, at the end of create() method.
"""
@abstractmethod
def _cleanup(self):
pass
def _setup_dbg_rootfs(self, package_paths):
gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0'
if gen_debugfs != '1':
return
bb.note(" Renaming the original rootfs...")
try:
shutil.rmtree(self.image_rootfs + '-orig')
except:
pass
bb.utils.rename(self.image_rootfs, self.image_rootfs + '-orig')
bb.note(" Creating debug rootfs...")
bb.utils.mkdirhier(self.image_rootfs)
bb.note(" Copying back package database...")
for path in package_paths:
bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(path))
if os.path.isdir(self.image_rootfs + '-orig' + path):
shutil.copytree(self.image_rootfs + '-orig' + path, self.image_rootfs + path, symlinks=True)
elif os.path.isfile(self.image_rootfs + '-orig' + path):
shutil.copyfile(self.image_rootfs + '-orig' + path, self.image_rootfs + path)
# Copy files located in /usr/lib/debug or /usr/src/debug
for dir in ["/usr/lib/debug", "/usr/src/debug"]:
src = self.image_rootfs + '-orig' + dir
if os.path.exists(src):
dst = self.image_rootfs + dir
bb.utils.mkdirhier(os.path.dirname(dst))
shutil.copytree(src, dst)
# Copy files with suffix '.debug' or located in '.debug' dir.
for root, dirs, files in os.walk(self.image_rootfs + '-orig'):
relative_dir = root[len(self.image_rootfs + '-orig'):]
for f in files:
if f.endswith('.debug') or '/.debug' in relative_dir:
bb.utils.mkdirhier(self.image_rootfs + relative_dir)
shutil.copy(os.path.join(root, f),
self.image_rootfs + relative_dir)
bb.note(" Install complementary '*-dbg' packages...")
self.pm.install_complementary('*-dbg')
if self.d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
bb.note(" Install complementary '*-src' packages...")
self.pm.install_complementary('*-src')
"""
Install additional debug packages. Possibility to install additional packages,
which are not automatically installed as complementary package of
standard one, e.g. debug package of static libraries.
"""
extra_debug_pkgs = self.d.getVar('IMAGE_INSTALL_DEBUGFS')
if extra_debug_pkgs:
bb.note(" Install extra debug packages...")
self.pm.install(extra_debug_pkgs.split(), True)
bb.note(" Removing package database...")
for path in package_paths:
if os.path.isdir(self.image_rootfs + path):
shutil.rmtree(self.image_rootfs + path)
elif os.path.isfile(self.image_rootfs + path):
os.remove(self.image_rootfs + path)
bb.note(" Rename debug rootfs...")
try:
shutil.rmtree(self.image_rootfs + '-dbg')
except:
pass
bb.utils.rename(self.image_rootfs, self.image_rootfs + '-dbg')
bb.note(" Restoring original rootfs...")
bb.utils.rename(self.image_rootfs + '-orig', self.image_rootfs)
def _exec_shell_cmd(self, cmd):
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output))
return None
def create(self):
bb.note("###### Generate rootfs #######")
pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND")
post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
def make_last(command, commands):
commands = commands.split()
if command in commands:
commands.remove(command)
commands.append(command)
return "".join(commands)
# We want this to run as late as possible, in particular after
# systemd_sysusers_create and set_user_group. Using :append is not enough
make_last("tidy_shadowutils_files", post_process_cmds)
make_last("rootfs_reproducible", post_process_cmds)
execute_pre_post_process(self.d, pre_process_cmds)
if self.progress_reporter:
self.progress_reporter.next_stage()
# call the package manager dependent create method
self._create()
sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir')
bb.utils.mkdirhier(sysconfdir)
with open(sysconfdir + "/version", "w+") as ver:
ver.write(self.d.getVar('BUILDNAME') + "\n")
execute_pre_post_process(self.d, rootfs_post_install_cmds)
self.pm.run_intercepts()
execute_pre_post_process(self.d, post_process_cmds)
if self.progress_reporter:
self.progress_reporter.next_stage()
if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d) and \
not bb.utils.contains("IMAGE_FEATURES",
"read-only-rootfs-delayed-postinsts",
True, False, self.d):
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is not None:
bb.fatal("The following packages could not be configured "
"offline and rootfs is read-only: %s" %
delayed_postinsts)
if self.d.getVar('USE_DEVFS') != "1":
self._create_devfs()
self._uninstall_unneeded()
if self.progress_reporter:
self.progress_reporter.next_stage()
self._insert_feed_uris()
self._run_ldconfig()
if self.d.getVar('USE_DEPMOD') != "0":
self._generate_kernel_module_deps()
self._cleanup()
self._log_check()
if self.progress_reporter:
self.progress_reporter.next_stage()
def _uninstall_unneeded(self):
# Remove the run-postinsts package if no delayed postinsts are found
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is None:
if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")) or os.path.exists(self.d.expand("${IMAGE_ROOTFS}${systemd_system_unitdir}/run-postinsts.service")):
self.pm.remove(["run-postinsts"])
image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d) and \
not bb.utils.contains("IMAGE_FEATURES",
"read-only-rootfs-delayed-postinsts",
True, False, self.d)
image_rorfs_force = self.d.getVar('FORCE_RO_REMOVE')
if image_rorfs or image_rorfs_force == "1":
# Remove components that we don't need if it's a read-only rootfs
unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED").split()
pkgs_installed = image_list_installed_packages(self.d)
# Make sure update-alternatives is removed last. This is
# because its database has to available while uninstalling
# other packages, allowing alternative symlinks of packages
# to be uninstalled or to be managed correctly otherwise.
provider = self.d.getVar("VIRTUAL-RUNTIME_update-alternatives")
pkgs_to_remove = sorted([pkg for pkg in pkgs_installed if pkg in unneeded_pkgs], key=lambda x: x == provider)
# update-alternatives provider is removed in its own remove()
# call because all package managers do not guarantee the packages
# are removed in the order they given in the list (which is
# passed to the command line). The sorting done earlier is
# utilized to implement the 2-stage removal.
if len(pkgs_to_remove) > 1:
self.pm.remove(pkgs_to_remove[:-1], False)
if len(pkgs_to_remove) > 0:
self.pm.remove([pkgs_to_remove[-1]], False)
if delayed_postinsts:
self._save_postinsts()
if image_rorfs:
bb.warn("There are post install scripts "
"in a read-only rootfs")
post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND")
execute_pre_post_process(self.d, post_uninstall_cmds)
runtime_pkgmanage = bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d)
if not runtime_pkgmanage:
# Remove the package manager data files
self.pm.remove_packaging_data()
def _run_ldconfig(self):
if self.d.getVar('LDCONFIGDEPEND'):
bb.note("Executing: ldconfig -r " + self.image_rootfs + " -c new -v -X")
self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
'new', '-v', '-X'])
image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d)
ldconfig_in_features = bb.utils.contains("DISTRO_FEATURES", "ldconfig",
True, False, self.d)
if image_rorfs or not ldconfig_in_features:
ldconfig_cache_dir = os.path.join(self.image_rootfs, "var/cache/ldconfig")
if os.path.exists(ldconfig_cache_dir):
bb.note("Removing ldconfig auxiliary cache...")
shutil.rmtree(ldconfig_cache_dir)
def _check_for_kernel_modules(self, modules_dir):
for root, dirs, files in os.walk(modules_dir, topdown=True):
for name in files:
found_ko = name.endswith((".ko", ".ko.gz", ".ko.xz", ".ko.zst"))
if found_ko:
return found_ko
return False
def _generate_kernel_module_deps(self):
modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules')
# if we don't have any modules don't bother to do the depmod
if not self._check_for_kernel_modules(modules_dir):
bb.note("No Kernel Modules found, not running depmod")
return
pkgdatadir = self.d.getVar('PKGDATA_DIR')
# PKGDATA_DIR can include multiple kernels so we run depmod for each
# one of them.
for direntry in os.listdir(pkgdatadir):
match = re.match('(.*)-depmod', direntry)
if not match:
continue
kernel_package_name = match.group(1)
kernel_abi_ver_file = oe.path.join(pkgdatadir, direntry, kernel_package_name + '-abiversion')
if not os.path.exists(kernel_abi_ver_file):
bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
with open(kernel_abi_ver_file) as f:
kernel_ver = f.read().strip(' \n')
versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
bb.utils.mkdirhier(versioned_modules_dir)
bb.note("Running depmodwrapper for %s ..." % versioned_modules_dir)
if self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver, kernel_package_name]):
bb.fatal("Kernel modules dependency generation failed")
"""
Create devfs:
* IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file
* IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached
for in the BBPATH
If neither are specified then the default name of files/device_table-minimal.txt
is searched for in the BBPATH (same as the old version.)
"""
def _create_devfs(self):
devtable_list = []
devtable = self.d.getVar('IMAGE_DEVICE_TABLE')
if devtable is not None:
devtable_list.append(devtable)
else:
devtables = self.d.getVar('IMAGE_DEVICE_TABLES')
if devtables is None:
devtables = 'files/device_table-minimal.txt'
for devtable in devtables.split():
devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH'), devtable))
for devtable in devtable_list:
self._exec_shell_cmd(["makedevs", "-r",
self.image_rootfs, "-D", devtable])
def get_class_for_type(imgtype):
import importlib
mod = importlib.import_module('oe.package_manager.' + imgtype + '.rootfs')
return mod.PkgRootfs
def variable_depends(d, manifest_dir=None):
img_type = d.getVar('IMAGE_PKGTYPE')
cls = get_class_for_type(img_type)
return cls._depends_list()
def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None):
env_bkp = os.environ.copy()
img_type = d.getVar('IMAGE_PKGTYPE')
cls = get_class_for_type(img_type)
cls(d, manifest_dir, progress_reporter, logcatcher).create()
os.environ.clear()
os.environ.update(env_bkp)
def image_list_installed_packages(d, rootfs_dir=None):
# Theres no rootfs for baremetal images
if bb.data.inherits_class('baremetal-image', d):
return ""
if not rootfs_dir:
rootfs_dir = d.getVar('IMAGE_ROOTFS')
img_type = d.getVar('IMAGE_PKGTYPE')
import importlib
cls = importlib.import_module('oe.package_manager.' + img_type)
return cls.PMPkgsList(d, rootfs_dir).list_pkgs()
if __name__ == "__main__":
"""
We should be able to run this as a standalone script, from outside bitbake
environment.
"""
"""
TBD
"""

View File

@@ -0,0 +1,13 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# Handle mismatches between `uname -m`-style output and Rust's arch names
def arch_to_rust_arch(arch):
if arch == "ppc64le":
return "powerpc64le"
if arch in ('riscv32', 'riscv64'):
return arch + 'gc'
return arch

View File

@@ -0,0 +1,120 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import collections
DepRecipe = collections.namedtuple("DepRecipe", ("doc", "doc_sha1", "recipe"))
DepSource = collections.namedtuple("DepSource", ("doc", "doc_sha1", "recipe", "file"))
def get_recipe_spdxid(d):
return "SPDXRef-%s-%s" % ("Recipe", d.getVar("PN"))
def get_download_spdxid(d, idx):
return "SPDXRef-Download-%s-%d" % (d.getVar("PN"), idx)
def get_package_spdxid(pkg):
return "SPDXRef-Package-%s" % pkg
def get_source_file_spdxid(d, idx):
return "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), idx)
def get_packaged_file_spdxid(pkg, idx):
return "SPDXRef-PackagedFile-%s-%d" % (pkg, idx)
def get_image_spdxid(img):
return "SPDXRef-Image-%s" % img
def get_sdk_spdxid(sdk):
return "SPDXRef-SDK-%s" % sdk
def _doc_path_by_namespace(spdx_deploy, arch, doc_namespace):
return spdx_deploy / "by-namespace" / arch / doc_namespace.replace("/", "_")
def doc_find_by_namespace(spdx_deploy, search_arches, doc_namespace):
for pkgarch in search_arches:
p = _doc_path_by_namespace(spdx_deploy, pkgarch, doc_namespace)
if os.path.exists(p):
return p
return None
def _doc_path_by_hashfn(spdx_deploy, arch, doc_name, hashfn):
return (
spdx_deploy / "by-hash" / arch / hashfn.split()[1] / (doc_name + ".spdx.json")
)
def doc_find_by_hashfn(spdx_deploy, search_arches, doc_name, hashfn):
for pkgarch in search_arches:
p = _doc_path_by_hashfn(spdx_deploy, pkgarch, doc_name, hashfn)
if os.path.exists(p):
return p
return None
def doc_path(spdx_deploy, doc_name, arch, subdir):
return spdx_deploy / arch / subdir / (doc_name + ".spdx.json")
def write_doc(d, spdx_doc, arch, subdir, spdx_deploy=None, indent=None):
from pathlib import Path
if spdx_deploy is None:
spdx_deploy = Path(d.getVar("SPDXDEPLOY"))
dest = doc_path(spdx_deploy, spdx_doc.name, arch, subdir)
dest.parent.mkdir(exist_ok=True, parents=True)
with dest.open("wb") as f:
doc_sha1 = spdx_doc.to_json(f, sort_keys=True, indent=indent)
l = _doc_path_by_namespace(spdx_deploy, arch, spdx_doc.documentNamespace)
l.parent.mkdir(exist_ok=True, parents=True)
l.symlink_to(os.path.relpath(dest, l.parent))
l = _doc_path_by_hashfn(
spdx_deploy, arch, spdx_doc.name, d.getVar("BB_HASHFILENAME")
)
l.parent.mkdir(exist_ok=True, parents=True)
l.symlink_to(os.path.relpath(dest, l.parent))
return doc_sha1
def read_doc(fn):
import hashlib
import oe.spdx
import io
import contextlib
@contextlib.contextmanager
def get_file():
if isinstance(fn, io.IOBase):
yield fn
else:
with fn.open("rb") as f:
yield f
with get_file() as f:
sha1 = hashlib.sha1()
while True:
chunk = f.read(4096)
if not chunk:
break
sha1.update(chunk)
f.seek(0)
doc = oe.spdx.SPDXDocument.from_json(f)
return (doc, sha1.hexdigest())

View File

@@ -0,0 +1,160 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.manifest import *
from oe.package_manager import *
import os
import traceback
class Sdk(object, metaclass=ABCMeta):
def __init__(self, d, manifest_dir):
self.d = d
self.sdk_output = self.d.getVar('SDK_OUTPUT')
self.sdk_native_path = self.d.getVar('SDKPATHNATIVE').strip('/')
self.target_path = self.d.getVar('SDKTARGETSYSROOT').strip('/')
self.sysconfdir = self.d.getVar('sysconfdir').strip('/')
self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path)
self.sdk_host_sysroot = self.sdk_output
if manifest_dir is None:
self.manifest_dir = self.d.getVar("SDK_DIR")
else:
self.manifest_dir = manifest_dir
self.remove(self.sdk_output, True)
self.install_order = Manifest.INSTALL_ORDER
@abstractmethod
def _populate(self):
pass
def populate(self):
self.mkdirhier(self.sdk_output)
# call backend dependent implementation
self._populate()
# Don't ship any libGL in the SDK
self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
self.d.getVar('libdir_nativesdk').strip('/'),
"libGL*"))
# Fix or remove broken .la files
self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
self.d.getVar('libdir_nativesdk').strip('/'),
"*.la"))
# Link the ld.so.cache file into the hosts filesystem
link_name = os.path.join(self.sdk_output, self.sdk_native_path,
self.sysconfdir, "ld.so.cache")
self.mkdirhier(os.path.dirname(link_name))
os.symlink("/etc/ld.so.cache", link_name)
execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND'))
def movefile(self, sourcefile, destdir):
try:
# FIXME: this check of movefile's return code to None should be
# fixed within the function to use only exceptions to signal when
# something goes wrong
if (bb.utils.movefile(sourcefile, destdir) == None):
raise OSError("moving %s to %s failed"
%(sourcefile, destdir))
#FIXME: using umbrella exc catching because bb.utils method raises it
except Exception as e:
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
bb.fatal("unable to place %s in final SDK location" % sourcefile)
def mkdirhier(self, dirpath):
try:
bb.utils.mkdirhier(dirpath)
except OSError as e:
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
bb.fatal("cannot make dir for SDK: %s" % dirpath)
def remove(self, path, recurse=False):
try:
bb.utils.remove(path, recurse)
#FIXME: using umbrella exc catching because bb.utils method raises it
except Exception as e:
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
bb.warn("cannot remove SDK dir: %s" % path)
def install_locales(self, pm):
linguas = self.d.getVar("SDKIMAGE_LINGUAS")
if linguas:
import fnmatch
# Install the binary locales
if linguas == "all":
pm.install_glob("nativesdk-glibc-binary-localedata-*.utf-8", sdk=True)
else:
pm.install(["nativesdk-glibc-binary-localedata-%s.utf-8" % \
lang for lang in linguas.split()])
# Generate a locale archive of them
target_arch = self.d.getVar('SDK_ARCH')
rootfs = oe.path.join(self.sdk_host_sysroot, self.sdk_native_path)
localedir = oe.path.join(rootfs, self.d.getVar("libdir_nativesdk"), "locale")
generate_locale_archive(self.d, rootfs, target_arch, localedir)
# And now delete the binary locales
pkgs = fnmatch.filter(pm.list_installed(), "nativesdk-glibc-binary-localedata-*.utf-8")
pm.remove(pkgs)
else:
# No linguas so do nothing
pass
def sdk_list_installed_packages(d, target, rootfs_dir=None):
if rootfs_dir is None:
sdk_output = d.getVar('SDK_OUTPUT')
target_path = d.getVar('SDKTARGETSYSROOT').strip('/')
rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
if target is False:
ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK")
d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target)
img_type = d.getVar('IMAGE_PKGTYPE')
import importlib
cls = importlib.import_module('oe.package_manager.' + img_type)
return cls.PMPkgsList(d, rootfs_dir).list_pkgs()
def populate_sdk(d, manifest_dir=None):
env_bkp = os.environ.copy()
img_type = d.getVar('IMAGE_PKGTYPE')
import importlib
cls = importlib.import_module('oe.package_manager.' + img_type + '.sdk')
cls.PkgSdk(d, manifest_dir).populate()
os.environ.clear()
os.environ.update(env_bkp)
def get_extra_sdkinfo(sstate_dir):
"""
This function is going to be used for generating the target and host manifest files packages of eSDK.
"""
import math
extra_info = {}
extra_info['tasksizes'] = {}
extra_info['filesizes'] = {}
for root, _, files in os.walk(sstate_dir):
for fn in files:
if fn.endswith('.tgz'):
fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024))
task = fn.rsplit(':',1)[1].split('_',1)[1].split(',')[0]
origtotal = extra_info['tasksizes'].get(task, 0)
extra_info['tasksizes'][task] = origtotal + fsize
extra_info['filesizes'][fn] = fsize
return extra_info
if __name__ == "__main__":
pass

View File

@@ -0,0 +1,399 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
#
# This library is intended to capture the JSON SPDX specification in a type
# safe manner. It is not intended to encode any particular OE specific
# behaviors, see the sbom.py for that.
#
# The documented SPDX spec document doesn't cover the JSON syntax for
# particular configuration, which can make it hard to determine what the JSON
# syntax should be. I've found it is actually much simpler to read the official
# SPDX JSON schema which can be found here: https://github.com/spdx/spdx-spec
# in schemas/spdx-schema.json
#
import hashlib
import itertools
import json
SPDX_VERSION = "2.2"
#
# The following are the support classes that are used to implement SPDX object
#
class _Property(object):
"""
A generic SPDX object property. The different types will derive from this
class
"""
def __init__(self, *, default=None):
self.default = default
def setdefault(self, dest, name):
if self.default is not None:
dest.setdefault(name, self.default)
class _String(_Property):
"""
A scalar string property for an SPDX object
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def set_property(self, attrs, name):
def get_helper(obj):
return obj._spdx[name]
def set_helper(obj, value):
obj._spdx[name] = value
def del_helper(obj):
del obj._spdx[name]
attrs[name] = property(get_helper, set_helper, del_helper)
def init(self, source):
return source
class _Object(_Property):
"""
A scalar SPDX object property of a SPDX object
"""
def __init__(self, cls, **kwargs):
super().__init__(**kwargs)
self.cls = cls
def set_property(self, attrs, name):
def get_helper(obj):
if not name in obj._spdx:
obj._spdx[name] = self.cls()
return obj._spdx[name]
def set_helper(obj, value):
obj._spdx[name] = value
def del_helper(obj):
del obj._spdx[name]
attrs[name] = property(get_helper, set_helper)
def init(self, source):
return self.cls(**source)
class _ListProperty(_Property):
"""
A list of SPDX properties
"""
def __init__(self, prop, **kwargs):
super().__init__(**kwargs)
self.prop = prop
def set_property(self, attrs, name):
def get_helper(obj):
if not name in obj._spdx:
obj._spdx[name] = []
return obj._spdx[name]
def set_helper(obj, value):
obj._spdx[name] = list(value)
def del_helper(obj):
del obj._spdx[name]
attrs[name] = property(get_helper, set_helper, del_helper)
def init(self, source):
return [self.prop.init(o) for o in source]
class _StringList(_ListProperty):
"""
A list of strings as a property for an SPDX object
"""
def __init__(self, **kwargs):
super().__init__(_String(), **kwargs)
class _ObjectList(_ListProperty):
"""
A list of SPDX objects as a property for an SPDX object
"""
def __init__(self, cls, **kwargs):
super().__init__(_Object(cls), **kwargs)
class MetaSPDXObject(type):
"""
A metaclass that allows properties (anything derived from a _Property
class) to be defined for a SPDX object
"""
def __new__(mcls, name, bases, attrs):
attrs["_properties"] = {}
for key in attrs.keys():
if isinstance(attrs[key], _Property):
prop = attrs[key]
attrs["_properties"][key] = prop
prop.set_property(attrs, key)
return super().__new__(mcls, name, bases, attrs)
class SPDXObject(metaclass=MetaSPDXObject):
"""
The base SPDX object; all SPDX spec classes must derive from this class
"""
def __init__(self, **d):
self._spdx = {}
for name, prop in self._properties.items():
prop.setdefault(self._spdx, name)
if name in d:
self._spdx[name] = prop.init(d[name])
def serializer(self):
return self._spdx
def __setattr__(self, name, value):
if name in self._properties or name == "_spdx":
super().__setattr__(name, value)
return
raise KeyError("%r is not a valid SPDX property" % name)
#
# These are the SPDX objects implemented from the spec. The *only* properties
# that can be added to these objects are ones directly specified in the SPDX
# spec, however you may add helper functions to make operations easier.
#
# Defaults should *only* be specified if the SPDX spec says there is a certain
# required value for a field (e.g. dataLicense), or if the field is mandatory
# and has some sane "this field is unknown" (e.g. "NOASSERTION")
#
class SPDXAnnotation(SPDXObject):
annotationDate = _String()
annotationType = _String()
annotator = _String()
comment = _String()
class SPDXChecksum(SPDXObject):
algorithm = _String()
checksumValue = _String()
class SPDXRelationship(SPDXObject):
spdxElementId = _String()
relatedSpdxElement = _String()
relationshipType = _String()
comment = _String()
annotations = _ObjectList(SPDXAnnotation)
class SPDXExternalReference(SPDXObject):
referenceCategory = _String()
referenceType = _String()
referenceLocator = _String()
class SPDXPackageVerificationCode(SPDXObject):
packageVerificationCodeValue = _String()
packageVerificationCodeExcludedFiles = _StringList()
class SPDXPackage(SPDXObject):
ALLOWED_CHECKSUMS = [
"SHA1",
"SHA224",
"SHA256",
"SHA384",
"SHA512",
"MD2",
"MD4",
"MD5",
"MD6",
]
name = _String()
SPDXID = _String()
versionInfo = _String()
downloadLocation = _String(default="NOASSERTION")
supplier = _String(default="NOASSERTION")
homepage = _String()
licenseConcluded = _String(default="NOASSERTION")
licenseDeclared = _String(default="NOASSERTION")
summary = _String()
description = _String()
sourceInfo = _String()
copyrightText = _String(default="NOASSERTION")
licenseInfoFromFiles = _StringList(default=["NOASSERTION"])
externalRefs = _ObjectList(SPDXExternalReference)
packageVerificationCode = _Object(SPDXPackageVerificationCode)
hasFiles = _StringList()
packageFileName = _String()
annotations = _ObjectList(SPDXAnnotation)
checksums = _ObjectList(SPDXChecksum)
class SPDXFile(SPDXObject):
SPDXID = _String()
fileName = _String()
licenseConcluded = _String(default="NOASSERTION")
copyrightText = _String(default="NOASSERTION")
licenseInfoInFiles = _StringList(default=["NOASSERTION"])
checksums = _ObjectList(SPDXChecksum)
fileTypes = _StringList()
class SPDXCreationInfo(SPDXObject):
created = _String()
licenseListVersion = _String()
comment = _String()
creators = _StringList()
class SPDXExternalDocumentRef(SPDXObject):
externalDocumentId = _String()
spdxDocument = _String()
checksum = _Object(SPDXChecksum)
class SPDXExtractedLicensingInfo(SPDXObject):
name = _String()
comment = _String()
licenseId = _String()
extractedText = _String()
class SPDXDocument(SPDXObject):
spdxVersion = _String(default="SPDX-" + SPDX_VERSION)
dataLicense = _String(default="CC0-1.0")
SPDXID = _String(default="SPDXRef-DOCUMENT")
name = _String()
documentNamespace = _String()
creationInfo = _Object(SPDXCreationInfo)
packages = _ObjectList(SPDXPackage)
files = _ObjectList(SPDXFile)
relationships = _ObjectList(SPDXRelationship)
externalDocumentRefs = _ObjectList(SPDXExternalDocumentRef)
hasExtractedLicensingInfos = _ObjectList(SPDXExtractedLicensingInfo)
def __init__(self, **d):
super().__init__(**d)
def to_json(self, f, *, sort_keys=False, indent=None, separators=None):
class Encoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, SPDXObject):
return o.serializer()
return super().default(o)
sha1 = hashlib.sha1()
for chunk in Encoder(
sort_keys=sort_keys,
indent=indent,
separators=separators,
).iterencode(self):
chunk = chunk.encode("utf-8")
f.write(chunk)
sha1.update(chunk)
return sha1.hexdigest()
@classmethod
def from_json(cls, f):
return cls(**json.load(f))
def add_relationship(self, _from, relationship, _to, *, comment=None, annotation=None):
if isinstance(_from, SPDXObject):
from_spdxid = _from.SPDXID
else:
from_spdxid = _from
if isinstance(_to, SPDXObject):
to_spdxid = _to.SPDXID
else:
to_spdxid = _to
r = SPDXRelationship(
spdxElementId=from_spdxid,
relatedSpdxElement=to_spdxid,
relationshipType=relationship,
)
if comment is not None:
r.comment = comment
if annotation is not None:
r.annotations.append(annotation)
self.relationships.append(r)
def find_by_spdxid(self, spdxid):
for o in itertools.chain(self.packages, self.files):
if o.SPDXID == spdxid:
return o
return None
def find_external_document_ref(self, namespace):
for r in self.externalDocumentRefs:
if r.spdxDocument == namespace:
return r
return None
def is_compiled_source (filename, compiled_sources, types):
"""
Check if the file is a compiled file
"""
import os
# If we don't have compiled source, we assume all are compiled.
if not compiled_sources:
return True
# We return always true if the file type is not in the list of compiled files.
# Some files in the source directory are not compiled, for example, Makefiles,
# but also python .py file. We need to include them in the SPDX.
basename = os.path.basename(filename)
ext = basename.partition(".")[2]
if ext not in types:
return True
# Check that the file is in the list
return filename in compiled_sources
def get_compiled_sources(d):
"""
Get list of compiled sources from debug information and normalize the paths
"""
import itertools
import oe.package
source_info = oe.package.read_debugsources_info(d)
if not source_info:
bb.debug(1, "Do not have debugsources.list. Skipping")
return [], []
# Sources are not split now in SPDX, so we aggregate them
sources = set(itertools.chain.from_iterable(source_info.values()))
# Check extensions of files
types = set()
for src in sources:
basename = os.path.basename(src)
ext = basename.partition(".")[2]
if ext not in types and ext:
types.add(ext)
bb.debug(1, f"Num of sources: {len(sources)} and types: {len(types)} {str(types)}")
return sources, types

View File

@@ -0,0 +1,691 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import bb.siggen
import bb.runqueue
import oe
import netrc
def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches):
# Return True if we should keep the dependency, False to drop it
def isNative(x):
return x.endswith("-native")
def isCross(x):
return "-cross-" in x
def isNativeSDK(x):
return x.startswith("nativesdk-")
def isKernel(mc, fn):
inherits = " ".join(dataCaches[mc].inherits[fn])
return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1
def isPackageGroup(mc, fn):
inherits = " ".join(dataCaches[mc].inherits[fn])
return "/packagegroup.bbclass" in inherits
def isAllArch(mc, fn):
inherits = " ".join(dataCaches[mc].inherits[fn])
return "/allarch.bbclass" in inherits
def isImage(mc, fn):
return "/image.bbclass" in " ".join(dataCaches[mc].inherits[fn])
depmc, _, deptaskname, depmcfn = bb.runqueue.split_tid_mcfn(dep)
mc, _ = bb.runqueue.split_mc(fn)
# We can skip the rm_work task signature to avoid running the task
# when we remove some tasks from the dependencie chain
# i.e INHERIT:remove = "create-spdx" will trigger the do_rm_work
if task == "do_rm_work":
return False
# (Almost) always include our own inter-task dependencies (unless it comes
# from a mcdepends). The exception is the special
# do_kernel_configme->do_unpack_and_patch dependency from archiver.bbclass.
if recipename == depname and depmc == mc:
if task == "do_kernel_configme" and deptaskname == "do_unpack_and_patch":
return False
return True
# Exclude well defined recipe->dependency
if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
return False
# Check for special wildcard
if "*->%s" % depname in siggen.saferecipedeps and recipename != depname:
return False
# Don't change native/cross/nativesdk recipe dependencies any further
if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
return True
# Only target packages beyond here
# allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
if isPackageGroup(mc, fn) and isAllArch(mc, fn) and not isNative(depname):
return False
# Exclude well defined machine specific configurations which don't change ABI
if depname in siggen.abisaferecipes and not isImage(mc, fn):
return False
# Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
# if we're just doing an RRECOMMENDS:xxx = "kernel-module-*", not least because the checksum
# is machine specific.
# Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
# and we reccomend a kernel-module, we exclude the dependency.
if dataCaches and isKernel(depmc, depmcfn) and not isKernel(mc, fn):
for pkg in dataCaches[mc].runrecs[fn]:
if " ".join(dataCaches[mc].runrecs[fn][pkg]).find("kernel-module-") != -1:
return False
# Default to keep dependencies
return True
def sstate_lockedsigs(d):
sigs = {}
types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES") or "").split()
for t in types:
siggen_lockedsigs_var = "SIGGEN_LOCKEDSIGS_%s" % t
lockedsigs = (d.getVar(siggen_lockedsigs_var) or "").split()
for ls in lockedsigs:
pn, task, h = ls.split(":", 2)
if pn not in sigs:
sigs[pn] = {}
sigs[pn][task] = [h, siggen_lockedsigs_var]
return sigs
class SignatureGeneratorOEBasicHashMixIn(object):
supports_multiconfig_datacaches = True
def init_rundepcheck(self, data):
self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
self.lockedsigs = sstate_lockedsigs(data)
self.lockedhashes = {}
self.lockedpnmap = {}
self.lockedhashfn = {}
self.machine = data.getVar("MACHINE")
self.mismatch_msgs = []
self.mismatch_number = 0
self.lockedsigs_msgs = ""
self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
"").split()
self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
self._internal = False
pass
def tasks_resolved(self, virtmap, virtpnmap, dataCache):
# Translate virtual/xxx entries to PN values
newabisafe = []
for a in self.abisaferecipes:
if a in virtpnmap:
newabisafe.append(virtpnmap[a])
else:
newabisafe.append(a)
self.abisaferecipes = newabisafe
newsafedeps = []
for a in self.saferecipedeps:
a1, a2 = a.split("->")
if a1 in virtpnmap:
a1 = virtpnmap[a1]
if a2 in virtpnmap:
a2 = virtpnmap[a2]
newsafedeps.append(a1 + "->" + a2)
self.saferecipedeps = newsafedeps
def rundep_check(self, fn, recipename, task, dep, depname, dataCaches = None):
return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCaches)
def get_taskdata(self):
return (self.lockedpnmap, self.lockedhashfn, self.lockedhashes) + super().get_taskdata()
def set_taskdata(self, data):
self.lockedpnmap, self.lockedhashfn, self.lockedhashes = data[:3]
super().set_taskdata(data[3:])
def dump_sigs(self, dataCache, options):
if 'lockedsigs' in options:
sigfile = os.getcwd() + "/locked-sigs.inc"
bb.plain("Writing locked sigs to %s" % sigfile)
self.dump_lockedsigs(sigfile)
return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
def get_taskhash(self, tid, deps, dataCaches):
if tid in self.lockedhashes:
if self.lockedhashes[tid]:
return self.lockedhashes[tid]
else:
return super().get_taskhash(tid, deps, dataCaches)
h = super().get_taskhash(tid, deps, dataCaches)
(mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
recipename = dataCaches[mc].pkg_fn[fn]
self.lockedpnmap[fn] = recipename
self.lockedhashfn[fn] = dataCaches[mc].hashfn[fn]
unlocked = False
if recipename in self.unlockedrecipes:
unlocked = True
else:
def recipename_from_dep(dep):
(depmc, _, _, depfn) = bb.runqueue.split_tid_mcfn(dep)
return dataCaches[depmc].pkg_fn[depfn]
# If any unlocked recipe is in the direct dependencies then the
# current recipe should be unlocked as well.
depnames = [ recipename_from_dep(x) for x in deps if mc == bb.runqueue.mc_from_tid(x)]
if any(x in y for y in depnames for x in self.unlockedrecipes):
self.unlockedrecipes[recipename] = ''
unlocked = True
if not unlocked and recipename in self.lockedsigs:
if task in self.lockedsigs[recipename]:
h_locked = self.lockedsigs[recipename][task][0]
var = self.lockedsigs[recipename][task][1]
self.lockedhashes[tid] = h_locked
self._internal = True
unihash = self.get_unihash(tid)
self._internal = False
#bb.warn("Using %s %s %s" % (recipename, task, h))
if h != h_locked and h_locked != unihash:
self.mismatch_number += 1
self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s'
% (recipename, task, h, h_locked, var))
return h_locked
self.lockedhashes[tid] = False
#bb.warn("%s %s %s" % (recipename, task, h))
return h
def get_stampfile_hash(self, tid):
if tid in self.lockedhashes and self.lockedhashes[tid]:
return self.lockedhashes[tid]
return super().get_stampfile_hash(tid)
def get_cached_unihash(self, tid):
if tid in self.lockedhashes and self.lockedhashes[tid] and not self._internal:
return self.lockedhashes[tid]
return super().get_cached_unihash(tid)
def dump_sigtask(self, fn, task, stampbase, runtime):
tid = fn + ":" + task
if tid in self.lockedhashes and self.lockedhashes[tid]:
return
super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime)
def dump_lockedsigs(self, sigfile, taskfilter=None):
types = {}
for tid in self.runtaskdeps:
# Bitbake changed this to a tuple in newer versions
if isinstance(tid, tuple):
tid = tid[1]
if taskfilter:
if not tid in taskfilter:
continue
fn = bb.runqueue.fn_from_tid(tid)
t = self.lockedhashfn[fn].split(" ")[1].split(":")[5]
t = 't-' + t.replace('_', '-')
if t not in types:
types[t] = []
types[t].append(tid)
with open(sigfile, "w") as f:
l = sorted(types)
for t in l:
f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t)
types[t].sort()
sortedtid = sorted(types[t], key=lambda tid: self.lockedpnmap[bb.runqueue.fn_from_tid(tid)])
for tid in sortedtid:
(_, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
if tid not in self.taskhash:
continue
f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.get_unihash(tid) + " \\\n")
f.write(' "\n')
f.write('SIGGEN_LOCKEDSIGS_TYPES:%s = "%s"' % (self.machine, " ".join(l)))
def dump_siglist(self, sigfile, path_prefix_strip=None):
def strip_fn(fn):
nonlocal path_prefix_strip
if not path_prefix_strip:
return fn
fn_exp = fn.split(":")
if fn_exp[-1].startswith(path_prefix_strip):
fn_exp[-1] = fn_exp[-1][len(path_prefix_strip):]
return ":".join(fn_exp)
with open(sigfile, "w") as f:
tasks = []
for taskitem in self.taskhash:
(fn, task) = taskitem.rsplit(":", 1)
pn = self.lockedpnmap[fn]
tasks.append((pn, task, strip_fn(fn), self.taskhash[taskitem]))
for (pn, task, fn, taskhash) in sorted(tasks):
f.write('%s:%s %s %s\n' % (pn, task, fn, taskhash))
def checkhashes(self, sq_data, missed, found, d):
warn_msgs = []
error_msgs = []
sstate_missing_msgs = []
info_msgs = None
if self.lockedsigs:
if len(self.lockedsigs) > 10:
self.lockedsigs_msgs = "There are %s recipes with locked tasks (%s task(s) have non matching signature)" % (len(self.lockedsigs), self.mismatch_number)
else:
self.lockedsigs_msgs = "The following recipes have locked tasks:"
for pn in self.lockedsigs:
self.lockedsigs_msgs += " %s" % (pn)
for tid in sq_data['hash']:
if tid not in found:
for pn in self.lockedsigs:
taskname = bb.runqueue.taskname_from_tid(tid)
if sq_data['hash'][tid] in iter(self.lockedsigs[pn].values()):
if taskname == 'do_shared_workdir':
continue
sstate_missing_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?"
% (pn, taskname, sq_data['hash'][tid]))
checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK")
if checklevel == 'info':
info_msgs = self.lockedsigs_msgs
if checklevel == 'warn' or checklevel == 'info':
warn_msgs += self.mismatch_msgs
elif checklevel == 'error':
error_msgs += self.mismatch_msgs
checklevel = d.getVar("SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK")
if checklevel == 'warn':
warn_msgs += sstate_missing_msgs
elif checklevel == 'error':
error_msgs += sstate_missing_msgs
if info_msgs:
bb.note(info_msgs)
if warn_msgs:
bb.warn("\n".join(warn_msgs))
if error_msgs:
bb.fatal("\n".join(error_msgs))
class SignatureGeneratorOEBasicHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
name = "OEBasicHash"
class SignatureGeneratorOEEquivHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorUniHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
name = "OEEquivHash"
def init_rundepcheck(self, data):
super().init_rundepcheck(data)
self.server = data.getVar('BB_HASHSERVE')
if not self.server:
bb.fatal("OEEquivHash requires BB_HASHSERVE to be set")
self.method = data.getVar('SSTATE_HASHEQUIV_METHOD')
if not self.method:
bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_METHOD to be set")
self.max_parallel = int(data.getVar('BB_HASHSERVE_MAX_PARALLEL') or 1)
self.username = data.getVar("BB_HASHSERVE_USERNAME")
self.password = data.getVar("BB_HASHSERVE_PASSWORD")
if not self.username or not self.password:
try:
n = netrc.netrc()
auth = n.authenticators(self.server)
if auth is not None:
self.username, _, self.password = auth
except FileNotFoundError:
pass
except netrc.NetrcParseError as e:
bb.warn("Error parsing %s:%s: %s" % (e.filename, str(e.lineno), e.msg))
# Insert these classes into siggen's namespace so it can see and select them
bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
bb.siggen.SignatureGeneratorOEEquivHash = SignatureGeneratorOEEquivHash
def find_siginfo(pn, taskname, taskhashlist, d):
""" Find signature data files for comparison purposes """
import fnmatch
import glob
if not taskname:
# We have to derive pn and taskname
key = pn
if key.startswith("mc:"):
# mc:<mc>:<pn>:<task>
_, _, pn, taskname = key.split(':', 3)
else:
# <pn>:<task>
pn, taskname = key.split(':', 1)
hashfiles = {}
def get_hashval(siginfo):
if siginfo.endswith('.siginfo'):
return siginfo.rpartition(':')[2].partition('_')[0]
else:
return siginfo.rpartition('.')[2]
def get_time(fullpath):
return os.stat(fullpath).st_mtime
# First search in stamps dir
localdata = d.createCopy()
localdata.setVar('MULTIMACH_TARGET_SYS', '*')
localdata.setVar('PN', pn)
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('EXTENDPE', '')
stamp = localdata.getVar('STAMP')
if pn.startswith("gcc-source"):
# gcc-source shared workdir is a special case :(
stamp = localdata.expand("${STAMPS_DIR}/work-shared/gcc-${PV}-${PR}")
filespec = '%s.%s.sigdata.*' % (stamp, taskname)
foundall = False
import glob
bb.debug(1, "Calling glob.glob on {}".format(filespec))
for fullpath in glob.glob(filespec):
match = False
if taskhashlist:
for taskhash in taskhashlist:
if fullpath.endswith('.%s' % taskhash):
hashfiles[taskhash] = {'path':fullpath, 'sstate':False, 'time':get_time(fullpath)}
if len(hashfiles) == len(taskhashlist):
foundall = True
break
else:
hashval = get_hashval(fullpath)
hashfiles[hashval] = {'path':fullpath, 'sstate':False, 'time':get_time(fullpath)}
if not taskhashlist or (len(hashfiles) < 2 and not foundall):
# That didn't work, look in sstate-cache
hashes = taskhashlist or ['?' * 64]
localdata = bb.data.createCopy(d)
for hashval in hashes:
localdata.setVar('PACKAGE_ARCH', '*')
localdata.setVar('TARGET_VENDOR', '*')
localdata.setVar('TARGET_OS', '*')
localdata.setVar('PN', pn)
# gcc-source is a special case, same as with local stamps above
if pn.startswith("gcc-source"):
localdata.setVar('PN', "gcc")
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('BB_TASKHASH', hashval)
localdata.setVar('SSTATE_CURRTASK', taskname[3:])
swspec = localdata.getVar('SSTATE_SWSPEC')
if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic', 'do_preconfigure'] and swspec:
localdata.setVar('SSTATE_PKGSPEC', '${SSTATE_SWSPEC}')
elif pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
filespec = '%s.siginfo' % localdata.getVar('SSTATE_PKG')
bb.debug(1, "Calling glob.glob on {}".format(filespec))
matchedfiles = glob.glob(filespec)
for fullpath in matchedfiles:
actual_hashval = get_hashval(fullpath)
if actual_hashval in hashfiles:
continue
hashfiles[actual_hashval] = {'path':fullpath, 'sstate':True, 'time':get_time(fullpath)}
return hashfiles
bb.siggen.find_siginfo = find_siginfo
bb.siggen.find_siginfo_version = 2
def sstate_get_manifest_filename(task, d):
"""
Return the sstate manifest file path for a particular task.
Also returns the datastore that can be used to query related variables.
"""
d2 = d.createCopy()
extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info')
if extrainf:
d2.setVar("SSTATE_MANMACH", extrainf)
return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
d2 = d
variant = ''
curr_variant = ''
if d.getVar("BBEXTENDCURR") == "multilib":
curr_variant = d.getVar("BBEXTENDVARIANT")
if "virtclass-multilib" not in d.getVar("OVERRIDES"):
curr_variant = "invalid"
if taskdata2.startswith("virtual:multilib"):
variant = taskdata2.split(":")[2]
if curr_variant != variant:
if variant not in multilibcache:
multilibcache[variant] = oe.utils.get_multilib_datastore(variant, d)
d2 = multilibcache[variant]
if taskdata.endswith("-native"):
pkgarchs = ["${BUILD_ARCH}", "${BUILD_ARCH}_${ORIGNATIVELSBSTRING}"]
elif taskdata.startswith("nativesdk-"):
pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"]
elif "-cross-canadian" in taskdata:
pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"]
elif "-cross-" in taskdata:
pkgarchs = ["${BUILD_ARCH}"]
elif "-crosssdk" in taskdata:
pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"]
else:
pkgarchs = ['${MACHINE_ARCH}']
pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split()))
pkgarchs.append('allarch')
pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
searched_manifests = []
for pkgarch in pkgarchs:
manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
if os.path.exists(manifest):
return manifest, d2
searched_manifests.append(manifest)
bb.fatal("The sstate manifest for task '%s:%s' (multilib variant '%s') could not be found.\nThe pkgarchs considered were: %s.\nBut none of these manifests exists:\n %s"
% (taskdata, taskname, variant, d2.expand(", ".join(pkgarchs)),"\n ".join(searched_manifests)))
return None, d2
def OEOuthashBasic(path, sigfile, task, d):
"""
Basic output hash function
Calculates the output hash of a task by hashing all output file metadata,
and file contents.
"""
import hashlib
import stat
import pwd
import grp
import re
import fnmatch
def update_hash(s):
s = s.encode('utf-8')
h.update(s)
if sigfile:
sigfile.write(s)
h = hashlib.sha256()
prev_dir = os.getcwd()
corebase = d.getVar("COREBASE")
tmpdir = d.getVar("TMPDIR")
include_owners = os.environ.get('PSEUDO_DISABLED') == '0'
if "package_write_" in task or task == "package_qa":
include_owners = False
include_timestamps = False
include_root = True
if task == "package":
include_timestamps = True
include_root = False
source_date_epoch = float(d.getVar("SOURCE_DATE_EPOCH"))
hash_version = d.getVar('HASHEQUIV_HASH_VERSION')
extra_sigdata = d.getVar("HASHEQUIV_EXTRA_SIGDATA")
filemaps = {}
for m in (d.getVar('SSTATE_HASHEQUIV_FILEMAP') or '').split():
entry = m.split(":")
if len(entry) != 3 or entry[0] != task:
continue
filemaps.setdefault(entry[1], [])
filemaps[entry[1]].append(entry[2])
try:
os.chdir(path)
basepath = os.path.normpath(path)
update_hash("OEOuthashBasic\n")
if hash_version:
update_hash(hash_version + "\n")
if extra_sigdata:
update_hash(extra_sigdata + "\n")
# It is only currently useful to get equivalent hashes for things that
# can be restored from sstate. Since the sstate object is named using
# SSTATE_PKGSPEC and the task name, those should be included in the
# output hash calculation.
update_hash("SSTATE_PKGSPEC=%s\n" % d.getVar('SSTATE_PKGSPEC'))
update_hash("task=%s\n" % task)
for root, dirs, files in os.walk('.', topdown=True):
# Sort directories to ensure consistent ordering when recursing
dirs.sort()
files.sort()
def process(path):
s = os.lstat(path)
if stat.S_ISDIR(s.st_mode):
update_hash('d')
elif stat.S_ISCHR(s.st_mode):
update_hash('c')
elif stat.S_ISBLK(s.st_mode):
update_hash('b')
elif stat.S_ISSOCK(s.st_mode):
update_hash('s')
elif stat.S_ISLNK(s.st_mode):
update_hash('l')
elif stat.S_ISFIFO(s.st_mode):
update_hash('p')
else:
update_hash('-')
def add_perm(mask, on, off='-'):
if mask & s.st_mode:
update_hash(on)
else:
update_hash(off)
add_perm(stat.S_IRUSR, 'r')
add_perm(stat.S_IWUSR, 'w')
if stat.S_ISUID & s.st_mode:
add_perm(stat.S_IXUSR, 's', 'S')
else:
add_perm(stat.S_IXUSR, 'x')
if include_owners:
# Group/other permissions are only relevant in pseudo context
add_perm(stat.S_IRGRP, 'r')
add_perm(stat.S_IWGRP, 'w')
if stat.S_ISGID & s.st_mode:
add_perm(stat.S_IXGRP, 's', 'S')
else:
add_perm(stat.S_IXGRP, 'x')
add_perm(stat.S_IROTH, 'r')
add_perm(stat.S_IWOTH, 'w')
if stat.S_ISVTX & s.st_mode:
update_hash('t')
else:
add_perm(stat.S_IXOTH, 'x')
try:
update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
except KeyError as e:
msg = ("KeyError: %s\nPath %s is owned by uid %d, gid %d, which doesn't match "
"any user/group on target. This may be due to host contamination." %
(e, os.path.abspath(path), s.st_uid, s.st_gid))
raise Exception(msg).with_traceback(e.__traceback__)
if include_timestamps:
# Need to clamp to SOURCE_DATE_EPOCH
if s.st_mtime > source_date_epoch:
update_hash(" %10d" % source_date_epoch)
else:
update_hash(" %10d" % s.st_mtime)
update_hash(" ")
if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
update_hash("%9s" % ("%d.%d" % (os.major(s.st_rdev), os.minor(s.st_rdev))))
else:
update_hash(" " * 9)
filterfile = False
for entry in filemaps:
if fnmatch.fnmatch(path, entry):
filterfile = True
update_hash(" ")
if stat.S_ISREG(s.st_mode) and not filterfile:
update_hash("%10d" % s.st_size)
else:
update_hash(" " * 10)
update_hash(" ")
fh = hashlib.sha256()
if stat.S_ISREG(s.st_mode):
# Hash file contents
if filterfile:
# Need to ignore paths in crossscripts and postinst-useradd files.
with open(path, 'rb') as d:
chunk = d.read()
chunk = chunk.replace(bytes(basepath, encoding='utf8'), b'')
for entry in filemaps:
if not fnmatch.fnmatch(path, entry):
continue
for r in filemaps[entry]:
if r.startswith("regex-"):
chunk = re.sub(bytes(r[6:], encoding='utf8'), b'', chunk)
else:
chunk = chunk.replace(bytes(r, encoding='utf8'), b'')
fh.update(chunk)
else:
with open(path, 'rb') as d:
for chunk in iter(lambda: d.read(4096), b""):
fh.update(chunk)
update_hash(fh.hexdigest())
else:
update_hash(" " * len(fh.hexdigest()))
update_hash(" %s" % path)
if stat.S_ISLNK(s.st_mode):
update_hash(" -> %s" % os.readlink(path))
update_hash("\n")
# Process this directory and all its child files
if include_root or root != ".":
process(root)
for f in files:
if f == 'fixmepath':
continue
process(os.path.join(root, f))
for dir in dirs:
if os.path.islink(os.path.join(root, dir)):
process(os.path.join(root, dir))
finally:
os.chdir(prev_dir)
return h.hexdigest()

View File

@@ -0,0 +1,332 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import logging
import oe.classutils
import shlex
from bb.process import Popen, ExecutionError
logger = logging.getLogger('BitBake.OE.Terminal')
class UnsupportedTerminal(Exception):
pass
class NoSupportedTerminals(Exception):
def __init__(self, terms):
self.terms = terms
class Registry(oe.classutils.ClassRegistry):
command = None
def __init__(cls, name, bases, attrs):
super(Registry, cls).__init__(name.lower(), bases, attrs)
@property
def implemented(cls):
return bool(cls.command)
class Terminal(Popen, metaclass=Registry):
def __init__(self, sh_cmd, title=None, env=None, d=None):
from subprocess import STDOUT
fmt_sh_cmd = self.format_command(sh_cmd, title)
try:
Popen.__init__(self, fmt_sh_cmd, env=env, stderr=STDOUT)
except OSError as exc:
import errno
if exc.errno == errno.ENOENT:
raise UnsupportedTerminal(self.name)
else:
raise
def format_command(self, sh_cmd, title):
fmt = {'title': title or 'Terminal', 'command': sh_cmd, 'cwd': os.getcwd() }
if isinstance(self.command, str):
return shlex.split(self.command.format(**fmt))
else:
return [element.format(**fmt) for element in self.command]
class XTerminal(Terminal):
def __init__(self, sh_cmd, title=None, env=None, d=None):
Terminal.__init__(self, sh_cmd, title, env, d)
if not os.environ.get('DISPLAY'):
raise UnsupportedTerminal(self.name)
class Gnome(XTerminal):
command = 'gnome-terminal -t "{title}" -- {command}'
priority = 2
def __init__(self, sh_cmd, title=None, env=None, d=None):
# Recent versions of gnome-terminal does not support non-UTF8 charset:
# https://bugzilla.gnome.org/show_bug.cgi?id=732127; as a workaround,
# clearing the LC_ALL environment variable so it uses the locale.
# Once fixed on the gnome-terminal project, this should be removed.
if os.getenv('LC_ALL'): os.putenv('LC_ALL','')
XTerminal.__init__(self, sh_cmd, title, env, d)
class Mate(XTerminal):
command = 'mate-terminal --disable-factory -t "{title}" -x {command}'
priority = 2
class Xfce(XTerminal):
command = 'xfce4-terminal -T "{title}" -e "{command}"'
priority = 2
class Terminology(XTerminal):
command = 'terminology -T="{title}" -e {command}'
priority = 2
class Konsole(XTerminal):
command = 'konsole --separate --workdir . -p tabtitle="{title}" -e {command}'
priority = 2
def __init__(self, sh_cmd, title=None, env=None, d=None):
# Check version
vernum = check_terminal_version("konsole")
if vernum and bb.utils.vercmp_string_op(vernum, "2.0.0", "<"):
# Konsole from KDE 3.x
self.command = 'konsole -T "{title}" -e {command}'
elif vernum and bb.utils.vercmp_string_op(vernum, "16.08.1", "<"):
# Konsole pre 16.08.01 Has nofork
self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
XTerminal.__init__(self, sh_cmd, title, env, d)
class XTerm(XTerminal):
command = 'xterm -T "{title}" -e {command}'
priority = 1
class Rxvt(XTerminal):
command = 'rxvt -T "{title}" -e {command}'
priority = 1
class URxvt(XTerminal):
command = 'urxvt -T "{title}" -e {command}'
priority = 1
class Screen(Terminal):
command = 'screen -D -m -t "{title}" -S devshell {command}'
def __init__(self, sh_cmd, title=None, env=None, d=None):
s_id = "devshell_%i" % os.getpid()
self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id
Terminal.__init__(self, sh_cmd, title, env, d)
msg = 'Screen started. Please connect in another terminal with ' \
'"screen -r %s"' % s_id
if (d):
bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
0.5, 10), d)
else:
logger.warning(msg)
class TmuxRunning(Terminal):
"""Open a new pane in the current running tmux window"""
name = 'tmux-running'
command = 'tmux split-window -c "{cwd}" "{command}"'
priority = 2.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
if not bb.utils.which(os.getenv('PATH'), 'tmux'):
raise UnsupportedTerminal('tmux is not installed')
if not os.getenv('TMUX'):
raise UnsupportedTerminal('tmux is not running')
if not check_tmux_pane_size('tmux'):
raise UnsupportedTerminal('tmux pane too small or tmux < 1.9 version is being used')
Terminal.__init__(self, sh_cmd, title, env, d)
class TmuxNewWindow(Terminal):
"""Open a new window in the current running tmux session"""
name = 'tmux-new-window'
command = 'tmux new-window -c "{cwd}" -n "{title}" "{command}"'
priority = 2.70
def __init__(self, sh_cmd, title=None, env=None, d=None):
if not bb.utils.which(os.getenv('PATH'), 'tmux'):
raise UnsupportedTerminal('tmux is not installed')
if not os.getenv('TMUX'):
raise UnsupportedTerminal('tmux is not running')
Terminal.__init__(self, sh_cmd, title, env, d)
class Tmux(Terminal):
"""Start a new tmux session and window"""
command = 'tmux new -c "{cwd}" -d -s devshell -n devshell "{command}"'
priority = 0.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
if not bb.utils.which(os.getenv('PATH'), 'tmux'):
raise UnsupportedTerminal('tmux is not installed')
# TODO: consider using a 'devshell' session shared amongst all
# devshells, if it's already there, add a new window to it.
window_name = 'devshell-%i' % os.getpid()
self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'
if not check_tmux_version('1.9'):
# `tmux new-session -c` was added in 1.9;
# older versions fail with that flag
self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'
self.command = self.command.format(window_name)
Terminal.__init__(self, sh_cmd, title, env, d)
attach_cmd = 'tmux att -t {0}'.format(window_name)
msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name)
if d:
bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
else:
logger.warning(msg)
class Custom(Terminal):
command = 'false' # This is a placeholder
priority = 3
def __init__(self, sh_cmd, title=None, env=None, d=None):
self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD')
if self.command:
if not '{command}' in self.command:
self.command += ' {command}'
Terminal.__init__(self, sh_cmd, title, env, d)
logger.warning('Custom terminal was started.')
else:
logger.debug('No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
def prioritized():
return Registry.prioritized()
def get_cmd_list():
terms = Registry.prioritized()
cmds = []
for term in terms:
if term.command:
cmds.append(term.command)
return cmds
def spawn_preferred(sh_cmd, title=None, env=None, d=None):
"""Spawn the first supported terminal, by priority"""
for terminal in prioritized():
try:
spawn(terminal.name, sh_cmd, title, env, d)
break
except UnsupportedTerminal:
pass
except:
bb.warn("Terminal %s is supported but did not start" % (terminal.name))
# when we've run out of options
else:
raise NoSupportedTerminals(get_cmd_list())
def spawn(name, sh_cmd, title=None, env=None, d=None):
"""Spawn the specified terminal, by name"""
logger.debug('Attempting to spawn terminal "%s"', name)
try:
terminal = Registry.registry[name]
except KeyError:
raise UnsupportedTerminal(name)
# We need to know when the command completes but some terminals (at least
# gnome and tmux) gives us no way to do this. We therefore write the pid
# to a file using a "phonehome" wrapper script, then monitor the pid
# until it exits.
import tempfile
import time
pidfile = tempfile.NamedTemporaryFile(delete = False).name
try:
sh_cmd = bb.utils.which(os.getenv('PATH'), "oe-gnome-terminal-phonehome") + " " + pidfile + " " + sh_cmd
pipe = terminal(sh_cmd, title, env, d)
output = pipe.communicate()[0]
if output:
output = output.decode("utf-8")
if pipe.returncode != 0:
raise ExecutionError(sh_cmd, pipe.returncode, output)
while os.stat(pidfile).st_size <= 0:
time.sleep(0.01)
continue
with open(pidfile, "r") as f:
pid = int(f.readline())
finally:
os.unlink(pidfile)
while True:
try:
os.kill(pid, 0)
time.sleep(0.1)
except OSError:
return
def check_tmux_version(desired):
vernum = check_terminal_version("tmux")
if vernum and bb.utils.vercmp_string_op(vernum, desired, "<"):
return False
return vernum
def check_tmux_pane_size(tmux):
import subprocess as sub
# On older tmux versions (<1.9), return false. The reason
# is that there is no easy way to get the height of the active panel
# on current window without nested formats (available from version 1.9)
if not check_tmux_version('1.9'):
return False
try:
p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux,
shell=True,stdout=sub.PIPE,stderr=sub.PIPE)
out, err = p.communicate()
size = int(out.strip())
except OSError as exc:
import errno
if exc.errno == errno.ENOENT:
return None
else:
raise
return size/2 >= 19
def check_terminal_version(terminalName):
import subprocess as sub
try:
cmdversion = '%s --version' % terminalName
if terminalName.startswith('tmux'):
cmdversion = '%s -V' % terminalName
newenv = os.environ.copy()
newenv["LANG"] = "C"
p = sub.Popen(['sh', '-c', cmdversion], stdout=sub.PIPE, stderr=sub.PIPE, env=newenv)
out, err = p.communicate()
ver_info = out.decode().rstrip().split('\n')
except OSError as exc:
import errno
if exc.errno == errno.ENOENT:
return None
else:
raise
vernum = None
for ver in ver_info:
if ver.startswith('Konsole'):
vernum = ver.split(' ')[-1]
if ver.startswith('GNOME Terminal'):
vernum = ver.split(' ')[-1]
if ver.startswith('MATE Terminal'):
vernum = ver.split(' ')[-1]
if ver.startswith('tmux'):
vernum = ver.split()[-1]
if ver.startswith('tmux next-'):
vernum = ver.split()[-1][5:]
return vernum
def distro_name():
try:
p = Popen(['lsb_release', '-i'])
out, err = p.communicate()
distro = out.split(':')[1].strip().lower()
except:
distro = "unknown"
return distro

View File

@@ -0,0 +1,188 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import errno
import re
import os
class OEList(list):
"""OpenEmbedded 'list' type
Acts as an ordinary list, but is constructed from a string value and a
separator (optional), and re-joins itself when converted to a string with
str(). Set the variable type flag to 'list' to use this type, and the
'separator' flag may be specified (defaulting to whitespace)."""
name = "list"
def __init__(self, value, separator = None):
if value is not None:
list.__init__(self, value.split(separator))
else:
list.__init__(self)
if separator is None:
self.separator = " "
else:
self.separator = separator
def __str__(self):
return self.separator.join(self)
def choice(value, choices):
"""OpenEmbedded 'choice' type
Acts as a multiple choice for the user. To use this, set the variable
type flag to 'choice', and set the 'choices' flag to a space separated
list of valid values."""
if not isinstance(value, str):
raise TypeError("choice accepts a string, not '%s'" % type(value))
value = value.lower()
choices = choices.lower()
if value not in choices.split():
raise ValueError("Invalid choice '%s'. Valid choices: %s" %
(value, choices))
return value
class NoMatch(object):
"""Stub python regex pattern object which never matches anything"""
def findall(self, string, flags=0):
return None
def finditer(self, string, flags=0):
return None
def match(self, flags=0):
return None
def search(self, string, flags=0):
return None
def split(self, string, maxsplit=0):
return None
def sub(pattern, repl, string, count=0):
return None
def subn(pattern, repl, string, count=0):
return None
NoMatch = NoMatch()
def regex(value, regexflags=None):
"""OpenEmbedded 'regex' type
Acts as a regular expression, returning the pre-compiled regular
expression pattern object. To use this type, set the variable type flag
to 'regex', and optionally, set the 'regexflags' type to a space separated
list of the flags to control the regular expression matching (e.g.
FOO[regexflags] += 'ignorecase'). See the python documentation on the
're' module for a list of valid flags."""
flagval = 0
if regexflags:
for flag in regexflags.split():
flag = flag.upper()
try:
flagval |= getattr(re, flag)
except AttributeError:
raise ValueError("Invalid regex flag '%s'" % flag)
if not value:
# Let's ensure that the default behavior for an undefined or empty
# variable is to match nothing. If the user explicitly wants to match
# anything, they can match '.*' instead.
return NoMatch
try:
return re.compile(value, flagval)
except re.error as exc:
raise ValueError("Invalid regex value '%s': %s" %
(value, exc.args[0]))
def boolean(value):
"""OpenEmbedded 'boolean' type
Valid values for true: 'yes', 'y', 'true', 't', '1'
Valid values for false: 'no', 'n', 'false', 'f', '0', None
"""
if value is None:
return False
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise TypeError("boolean accepts a string, not '%s'" % type(value))
value = value.lower()
if value in ('yes', 'y', 'true', 't', '1'):
return True
elif value in ('no', 'n', 'false', 'f', '0'):
return False
raise ValueError("Invalid boolean value '%s'" % value)
def integer(value, numberbase=10):
"""OpenEmbedded 'integer' type
Defaults to base 10, but this can be specified using the optional
'numberbase' flag."""
return int(value, int(numberbase))
_float = float
def float(value, fromhex='false'):
"""OpenEmbedded floating point type
To use this type, set the type flag to 'float', and optionally set the
'fromhex' flag to a true value (obeying the same rules as for the
'boolean' type) if the value is in base 16 rather than base 10."""
if boolean(fromhex):
return _float.fromhex(value)
else:
return _float(value)
def path(value, relativeto='', normalize='true', mustexist='false'):
value = os.path.join(relativeto, value)
if boolean(normalize):
value = os.path.normpath(value)
if boolean(mustexist):
try:
with open(value, 'r'):
pass
except IOError as exc:
if exc.errno == errno.ENOENT:
raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
return value
def is_x86(arch):
"""
Check whether arch is x86 or x86_64
"""
if arch.startswith('x86_') or re.match('i.*86', arch):
return True
else:
return False
def qemu_use_kvm(kvm, target_arch):
"""
Enable kvm if target_arch == build_arch or both of them are x86 archs.
"""
use_kvm = False
if kvm and boolean(kvm):
build_arch = os.uname()[4]
if is_x86(build_arch) and is_x86(target_arch):
use_kvm = True
elif build_arch == target_arch:
use_kvm = True
return use_kvm

View File

@@ -0,0 +1,71 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import argparse
import re
class myArgumentParser(argparse.ArgumentParser):
def _print_message(self, message, file=None):
bb.warn("%s - %s: %s" % (d.getVar('PN'), pkg, message))
# This should never be called...
def exit(self, status=0, message=None):
message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN'), pkg))
error(message)
def error(self, message):
bb.fatal(message)
def split_commands(params):
params = re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
# Remove any empty items
return [x for x in params if x]
def split_args(params):
params = re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
# Remove any empty items
return [x for x in params if x]
def build_useradd_parser():
# The following comes from --help on useradd from shadow
parser = myArgumentParser(prog='useradd')
parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account")
parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account")
parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true")
parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account")
parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account")
parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account")
parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account")
parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_const", const=True)
parser.add_argument("-M", "--no-create-home", dest="create_home", help="do not create the user's home directory", action="store_const", const=False)
parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
parser.add_argument("-r", "--system", help="create a system account", action="store_true")
parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_const", const=True)
parser.add_argument("LOGIN", help="Login name of the new user")
return parser
def build_groupadd_parser():
# The following comes from --help on groupadd from shadow
parser = myArgumentParser(prog='groupadd')
parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group")
parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
parser.add_argument("-r", "--system", help="create a system account", action="store_true")
parser.add_argument("GROUP", help="Group name of the new group")
return parser

View File

@@ -0,0 +1,529 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import subprocess
import multiprocessing
import traceback
import errno
def read_file(filename):
try:
f = open( filename, "r" )
except IOError as reason:
return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M:
else:
data = f.read().strip()
f.close()
return data
return None
def ifelse(condition, iftrue = True, iffalse = False):
if condition:
return iftrue
else:
return iffalse
def conditional(variable, checkvalue, truevalue, falsevalue, d):
if d.getVar(variable) == checkvalue:
return truevalue
else:
return falsevalue
def vartrue(var, iftrue, iffalse, d):
import oe.types
if oe.types.boolean(d.getVar(var)):
return iftrue
else:
return iffalse
def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
if float(d.getVar(variable)) <= float(checkvalue):
return truevalue
else:
return falsevalue
def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
result = bb.utils.vercmp_string(d.getVar(variable), checkvalue)
if result <= 0:
return truevalue
else:
return falsevalue
def both_contain(variable1, variable2, checkvalue, d):
val1 = d.getVar(variable1)
val2 = d.getVar(variable2)
val1 = set(val1.split())
val2 = set(val2.split())
if isinstance(checkvalue, str):
checkvalue = set(checkvalue.split())
else:
checkvalue = set(checkvalue)
if checkvalue.issubset(val1) and checkvalue.issubset(val2):
return " ".join(checkvalue)
else:
return ""
def set_intersect(variable1, variable2, d):
"""
Expand both variables, interpret them as lists of strings, and return the
intersection as a flattened string.
For example:
s1 = "a b c"
s2 = "b c d"
s3 = set_intersect(s1, s2)
=> s3 = "b c"
"""
val1 = set(d.getVar(variable1).split())
val2 = set(d.getVar(variable2).split())
return " ".join(val1 & val2)
def prune_suffix(var, suffixes, d):
# See if var ends with any of the suffixes listed and
# remove it if found
for suffix in suffixes:
if suffix and var.endswith(suffix):
var = var[:-len(suffix)]
prefix = d.getVar("MLPREFIX")
if prefix and var.startswith(prefix):
var = var[len(prefix):]
return var
def str_filter(f, str, d):
from re import match
return " ".join([x for x in str.split() if match(f, x, 0)])
def str_filter_out(f, str, d):
from re import match
return " ".join([x for x in str.split() if not match(f, x, 0)])
def build_depends_string(depends, task):
"""Append a taskname to a string of dependencies as used by the [depends] flag"""
return " ".join(dep + ":" + task for dep in depends.split())
def inherits(d, *classes):
"""Return True if the metadata inherits any of the specified classes"""
return any(bb.data.inherits_class(cls, d) for cls in classes)
def features_backfill(var,d):
# This construct allows the addition of new features to variable specified
# as var
# Example for var = "DISTRO_FEATURES"
# This construct allows the addition of new features to DISTRO_FEATURES
# that if not present would disable existing functionality, without
# disturbing distributions that have already set DISTRO_FEATURES.
# Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
# add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
features = (d.getVar(var) or "").split()
backfill = (d.getVar(var+"_BACKFILL") or "").split()
considered = (d.getVar(var+"_BACKFILL_CONSIDERED") or "").split()
addfeatures = []
for feature in backfill:
if feature not in features and feature not in considered:
addfeatures.append(feature)
if addfeatures:
d.appendVar(var, " " + " ".join(addfeatures))
def all_distro_features(d, features, truevalue="1", falsevalue=""):
"""
Returns truevalue if *all* given features are set in DISTRO_FEATURES,
else falsevalue. The features can be given as single string or anything
that can be turned into a set.
This is a shorter, more flexible version of
bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d).
Without explicit true/false values it can be used directly where
Python expects a boolean:
if oe.utils.all_distro_features(d, "foo bar"):
bb.fatal("foo and bar are mutually exclusive DISTRO_FEATURES")
With just a truevalue, it can be used to include files that are meant to be
used only when requested via DISTRO_FEATURES:
require ${@ oe.utils.all_distro_features(d, "foo bar", "foo-and-bar.inc")
"""
return bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d)
def any_distro_features(d, features, truevalue="1", falsevalue=""):
"""
Returns truevalue if at least *one* of the given features is set in DISTRO_FEATURES,
else falsevalue. The features can be given as single string or anything
that can be turned into a set.
This is a shorter, more flexible version of
bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d).
Without explicit true/false values it can be used directly where
Python expects a boolean:
if not oe.utils.any_distro_features(d, "foo bar"):
bb.fatal("foo, bar or both must be set in DISTRO_FEATURES")
With just a truevalue, it can be used to include files that are meant to be
used only when requested via DISTRO_FEATURES:
require ${@ oe.utils.any_distro_features(d, "foo bar", "foo-or-bar.inc")
"""
return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
def parallel_make(d, makeinst=False):
"""
Return the integer value for the number of parallel threads to use when
building, scraped out of PARALLEL_MAKE. If no parallelization option is
found, returns None
e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
"""
if makeinst:
pm = (d.getVar('PARALLEL_MAKEINST') or '').split()
else:
pm = (d.getVar('PARALLEL_MAKE') or '').split()
# look for '-j' and throw other options (e.g. '-l') away
while pm:
opt = pm.pop(0)
if opt == '-j':
v = pm.pop(0)
elif opt.startswith('-j'):
v = opt[2:].strip()
else:
continue
return int(v)
return ''
def parallel_make_argument(d, fmt, limit=None, makeinst=False):
"""
Helper utility to construct a parallel make argument from the number of
parallel threads specified in PARALLEL_MAKE.
Returns the input format string `fmt` where a single '%d' will be expanded
with the number of parallel threads to use. If `limit` is specified, the
number of parallel threads will be no larger than it. If no parallelization
option is found in PARALLEL_MAKE, returns an empty string
e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
"-n 10"
"""
v = parallel_make(d, makeinst)
if v:
if limit:
v = min(limit, v)
return fmt % v
return ''
def packages_filter_out_system(d):
"""
Return a list of packages from PACKAGES with the "system" packages such as
PN-dbg PN-doc PN-locale-eb-gb removed.
"""
pn = d.getVar('PN')
pkgfilter = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
localepkg = pn + "-locale-"
pkgs = []
for pkg in d.getVar('PACKAGES').split():
if pkg not in pkgfilter and localepkg not in pkg:
pkgs.append(pkg)
return pkgs
def getstatusoutput(cmd):
return subprocess.getstatusoutput(cmd)
def trim_version(version, num_parts=2):
"""
Return just the first <num_parts> of <version>, split by periods. For
example, trim_version("1.2.3", 2) will return "1.2".
"""
if type(version) is not str:
raise TypeError("Version should be a string")
if num_parts < 1:
raise ValueError("Cannot split to parts < 1")
parts = version.split(".")
trimmed = ".".join(parts[:num_parts])
return trimmed
def cpu_count(at_least=1, at_most=64):
cpus = len(os.sched_getaffinity(0))
return max(min(cpus, at_most), at_least)
def execute_pre_post_process(d, cmds):
if cmds is None:
return
cmds = cmds.replace(";", " ")
for cmd in cmds.split():
bb.note("Executing %s ..." % cmd)
bb.build.exec_func(cmd, d)
def get_bb_number_threads(d):
return int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
def multiprocess_launch(target, items, d, extraargs=None):
max_process = get_bb_number_threads(d)
return multiprocess_launch_mp(target, items, max_process, extraargs)
# For each item in items, call the function 'target' with item as the first
# argument, extraargs as the other arguments and handle any exceptions in the
# parent thread
def multiprocess_launch_mp(target, items, max_process, extraargs=None):
class ProcessLaunch(multiprocessing.Process):
def __init__(self, *args, **kwargs):
multiprocessing.Process.__init__(self, *args, **kwargs)
self._pconn, self._cconn = multiprocessing.Pipe()
self._exception = None
self._result = None
def run(self):
try:
ret = self._target(*self._args, **self._kwargs)
self._cconn.send((None, ret))
except Exception as e:
tb = traceback.format_exc()
self._cconn.send((e, tb))
def update(self):
if self._pconn.poll():
(e, tb) = self._pconn.recv()
if e is not None:
self._exception = (e, tb)
else:
self._result = tb
@property
def exception(self):
self.update()
return self._exception
@property
def result(self):
self.update()
return self._result
launched = []
errors = []
results = []
items = list(items)
while (items and not errors) or launched:
if not errors and items and len(launched) < max_process:
args = (items.pop(),)
if extraargs is not None:
args = args + extraargs
p = ProcessLaunch(target=target, args=args)
p.start()
launched.append(p)
for q in launched:
# Have to manually call update() to avoid deadlocks. The pipe can be full and
# transfer stalled until we try and read the results object but the subprocess won't exit
# as it still has data to write (https://bugs.python.org/issue8426)
q.update()
# The finished processes are joined when calling is_alive()
if not q.is_alive():
if q.exception:
errors.append(q.exception)
if q.result:
results.append(q.result)
launched.remove(q)
# Paranoia doesn't hurt
for p in launched:
p.join()
if errors:
msg = ""
for (e, tb) in errors:
if isinstance(e, subprocess.CalledProcessError) and e.output:
msg = msg + str(e) + "\n"
msg = msg + "Subprocess output:"
msg = msg + e.output.decode("utf-8", errors="ignore")
else:
msg = msg + str(e) + ": " + str(tb) + "\n"
bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg)
return results
def squashspaces(string):
import re
return re.sub(r"\s+", " ", string).strip()
def rprovides_map(pkgdata_dir, pkg_dict):
# Map file -> pkg provider
rprov_map = {}
for pkg in pkg_dict:
path_to_pkgfile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg)
if not os.path.isfile(path_to_pkgfile):
continue
with open(path_to_pkgfile) as f:
for line in f:
if line.startswith('RPROVIDES') or line.startswith('FILERPROVIDES'):
# List all components provided by pkg.
# Exclude version strings, i.e. those starting with (
provides = [x for x in line.split()[1:] if not x.startswith('(')]
for prov in provides:
if prov in rprov_map:
rprov_map[prov].append(pkg)
else:
rprov_map[prov] = [pkg]
return rprov_map
def format_pkg_list(pkg_dict, ret_format=None, pkgdata_dir=None):
output = []
if ret_format == "arch":
for pkg in sorted(pkg_dict):
output.append("%s %s" % (pkg, pkg_dict[pkg]["arch"]))
elif ret_format == "file":
for pkg in sorted(pkg_dict):
output.append("%s %s %s" % (pkg, pkg_dict[pkg]["filename"], pkg_dict[pkg]["arch"]))
elif ret_format == "ver":
for pkg in sorted(pkg_dict):
output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"]))
elif ret_format == "deps":
rprov_map = rprovides_map(pkgdata_dir, pkg_dict)
for pkg in sorted(pkg_dict):
for dep in pkg_dict[pkg]["deps"]:
if dep in rprov_map:
# There could be multiple providers within the image
for pkg_provider in rprov_map[dep]:
output.append("%s|%s * %s [RPROVIDES]" % (pkg, pkg_provider, dep))
else:
output.append("%s|%s" % (pkg, dep))
else:
for pkg in sorted(pkg_dict):
output.append(pkg)
output_str = '\n'.join(output)
if output_str:
# make sure last line is newline terminated
output_str += '\n'
return output_str
# Helper function to get the host compiler version
# Do not assume the compiler is gcc
def get_host_compiler_version(d, taskcontextonly=False):
import re, subprocess
if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
return
compiler = d.getVar("BUILD_CC")
# Get rid of ccache since it is not present when parsing.
if compiler.startswith('ccache '):
compiler = compiler[7:]
try:
env = os.environ.copy()
# datastore PATH does not contain session PATH as set by environment-setup-...
# this breaks the install-buildtools use-case
# env["PATH"] = d.getVar("PATH")
output = subprocess.check_output("%s --version" % compiler, \
shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
if not match:
bb.fatal("Can't get compiler version from %s --version output" % compiler)
version = match.group(1)
return compiler, version
def host_gcc_version(d, taskcontextonly=False):
import re, subprocess
if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
return
compiler = d.getVar("BUILD_CC")
# Get rid of ccache since it is not present when parsing.
if compiler.startswith('ccache '):
compiler = compiler[7:]
try:
env = os.environ.copy()
env["PATH"] = d.getVar("PATH")
output = subprocess.check_output("%s --version" % compiler, \
shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
if not match:
bb.fatal("Can't get compiler version from %s --version output" % compiler)
version = match.group(1)
return "-%s" % version if version in ("4.8", "4.9") else ""
def get_multilib_datastore(variant, d):
localdata = bb.data.createCopy(d)
if variant:
overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
localdata.setVar("OVERRIDES", overrides)
localdata.setVar("MLPREFIX", variant + "-")
else:
origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL")
if origdefault:
localdata.setVar("DEFAULTTUNE", origdefault)
overrides = localdata.getVar("OVERRIDES", False).split(":")
overrides = ":".join([x for x in overrides if not x.startswith("virtclass-multilib-")])
localdata.setVar("OVERRIDES", overrides)
localdata.setVar("MLPREFIX", "")
return localdata
def sh_quote(string):
import shlex
return shlex.quote(string)
def directory_size(root, blocksize=4096):
"""
Calculate the size of the directory, taking into account hard links,
rounding up every size to multiples of the blocksize.
"""
def roundup(size):
"""
Round the size up to the nearest multiple of the block size.
"""
import math
return math.ceil(size / blocksize) * blocksize
def getsize(filename):
"""
Get the size of the filename, not following symlinks, taking into
account hard links.
"""
stat = os.lstat(filename)
if stat.st_ino not in inodes:
inodes.add(stat.st_ino)
return stat.st_size
else:
return 0
inodes = set()
total = 0
for root, dirs, files in os.walk(root):
total += sum(roundup(getsize(os.path.join(root, name))) for name in files)
total += roundup(getsize(root))
return total
# Update the mtime of a file, skip if permission/read-only issues
def touch(filename):
try:
os.utime(filename, None)
except PermissionError:
pass
except OSError as e:
# Handle read-only file systems gracefully
if e.errno != errno.EROFS:
raise e