Complete Yocto mirror with license table for TQMa6UL (2038-compliance)

- 264 license table entries with exact download URLs (224/264 resolved)
- Complete sources/ directory with all BitBake recipes
- Build configuration: tqma6ul-multi-mba6ulx, spaetzle (musl)
- Full traceability for Softwarefreigabeantrag
- GCC 13.4.0, Linux 6.6.102, U-Boot 2023.04, musl 1.2.4
- License distribution: GPL-2.0 (24), MIT (23), GPL-2.0+ (18), BSD-3 (16)
This commit is contained in:
Siggi (OpenClaw Agent)
2026-03-01 20:58:18 +00:00
commit 16accb6b24
15086 changed files with 1292356 additions and 0 deletions

View File

@@ -0,0 +1,84 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import logging
import os
import sys
from bblayers.common import LayerPlugin
logger = logging.getLogger('bitbake-layers')
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import oe.buildcfg
def plugin_init(plugins):
return BuildConfPlugin()
class BuildConfPlugin(LayerPlugin):
notes_fixme = """FIXME: Please place here the detailed instructions for using this build configuration.
They will be shown to the users when they set up their builds via TEMPLATECONF.
"""
summary_fixme = """FIXME: Please place here the short summary of what this build configuration is for.
It will be shown to the users when they set up their builds via TEMPLATECONF.
"""
def _save_conf(self, templatename, templatepath, oecorepath, relpaths_to_oecore):
confdir = os.path.join(os.environ["BBPATH"], "conf")
destdir = os.path.join(templatepath, "conf", "templates", templatename)
os.makedirs(destdir, exist_ok=True)
with open(os.path.join(confdir, "local.conf")) as src:
with open(os.path.join(destdir, "local.conf.sample"), 'w') as dest:
dest.write(src.read())
with open(os.path.join(confdir, "bblayers.conf")) as src:
with open(os.path.join(destdir, "bblayers.conf.sample"), 'w') as dest:
bblayers_data = src.read()
for (abspath, relpath) in relpaths_to_oecore:
bblayers_data = bblayers_data.replace(abspath, "##OEROOT##/" + relpath)
dest.write(bblayers_data)
with open(os.path.join(destdir, "conf-summary.txt"), 'w') as dest:
dest.write(self.summary_fixme)
with open(os.path.join(destdir, "conf-notes.txt"), 'w') as dest:
dest.write(self.notes_fixme)
logger.info("""Configuration template placed into {}
Please review the files in there, and particularly provide a configuration summary in {}
and notes in {}
You can try out the configuration with
TEMPLATECONF={} . {}/oe-init-build-env build-try-{}"""
.format(destdir, os.path.join(destdir, "conf-summary.txt"), os.path.join(destdir, "conf-notes.txt"), destdir, oecorepath, templatename))
def do_save_build_conf(self, args):
""" Save the currently active build configuration (conf/local.conf, conf/bblayers.conf) as a template into a layer.\n This template can later be used for setting up builds via TEMPLATECONF. """
layers = oe.buildcfg.get_layer_revisions(self.tinfoil.config_data)
targetlayer = None
oecore = None
for l in layers:
if os.path.abspath(l[0]) == os.path.abspath(args.layerpath):
targetlayer = l[0]
if l[1] == 'meta':
oecore = os.path.dirname(l[0])
if not targetlayer:
logger.error("Layer {} not in one of the currently enabled layers:\n{}".format(args.layerpath, "\n".join([l[0] for l in layers])))
elif not oecore:
logger.error("Openembedded-core not in one of the currently enabled layers:\n{}".format("\n".join([l[0] for l in layers])))
else:
relpaths_to_oecore = [(l[0], os.path.relpath(l[0], start=oecore)) for l in layers]
self._save_conf(args.templatename, targetlayer, oecore, relpaths_to_oecore)
def register_commands(self, sp):
parser_build_conf = self.add_command(sp, 'save-build-conf', self.do_save_build_conf, parserecipes=False)
parser_build_conf.add_argument('layerpath',
help='The path to the layer where the configuration template should be saved.')
parser_build_conf.add_argument('templatename',
help='The name of the configuration template.')

View File

@@ -0,0 +1,90 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import logging
import os
import sys
import shutil
import bb.utils
from bblayers.common import LayerPlugin
from bblayers.action import ActionPlugin
logger = logging.getLogger('bitbake-layers')
def plugin_init(plugins):
return CreatePlugin()
def read_template(template, template_dir='templates'):
lines = str()
with open(os.path.join(os.path.dirname(__file__), template_dir, template)) as fd:
lines = ''.join(fd.readlines())
return lines
class CreatePlugin(LayerPlugin):
def do_create_layer(self, args):
"""Create a basic layer"""
layerdir = os.path.abspath(args.layerdir)
if os.path.exists(layerdir):
sys.stderr.write("Specified layer directory exists\n")
return 1
# create dirs
conf = os.path.join(layerdir, 'conf')
bb.utils.mkdirhier(conf)
layername = os.path.basename(os.path.normpath(args.layerdir))
layerid = args.layerid if args.layerid is not None else layername
# Create the README from templates/README
readme_template = read_template('README').format(layername=layername)
readme = os.path.join(layerdir, 'README')
with open(readme, 'w') as fd:
fd.write(readme_template)
# Copy the MIT license from meta
copying = 'COPYING.MIT'
dn = os.path.dirname
license_src = os.path.join(dn(dn(dn(__file__))), copying)
license_dst = os.path.join(layerdir, copying)
shutil.copy(license_src, license_dst)
# Get the compat value for core layer.
compat = self.tinfoil.config_data.getVar('LAYERSERIES_CORENAMES') or ""
# Create the layer.conf from templates/layer.conf
layerconf_template = read_template('layer.conf').format(
layerid=layerid, priority=args.priority, compat=compat)
layerconf = os.path.join(conf, 'layer.conf')
with open(layerconf, 'w') as fd:
fd.write(layerconf_template)
# Create the example from templates/example.bb
example_template = read_template('example.bb')
example = os.path.join(layerdir, 'recipes-' + args.examplerecipe, args.examplerecipe)
bb.utils.mkdirhier(example)
with open(os.path.join(example, args.examplerecipe + '_%s.bb') % args.version, 'w') as fd:
fd.write(example_template)
if args.add_layer:
# Add the layer to bblayers.conf
args.layerdir = [layerdir]
ActionPlugin.do_add_layer(self, args)
logger.plain('Layer added %s' % args.layerdir)
else:
logger.plain('Add your new layer with \'bitbake-layers add-layer %s\'' % args.layerdir)
def register_commands(self, sp):
parser_create_layer = self.add_command(sp, 'create-layer', self.do_create_layer, parserecipes=False)
parser_create_layer.add_argument('layerdir', help='Layer directory to create')
parser_create_layer.add_argument('--add-layer', '-a', action='store_true', help='Add the layer to bblayers.conf after creation')
parser_create_layer.add_argument('--layerid', '-i', help='Layer id to use if different from layername')
parser_create_layer.add_argument('--priority', '-p', default=6, help='Priority of recipes in layer')
parser_create_layer.add_argument('--example-recipe-name', '-e', dest='examplerecipe', default='example', help='Filename of the example recipe')
parser_create_layer.add_argument('--example-recipe-version', '-v', dest='version', default='0.1', help='Version number for the example recipe')

View File

@@ -0,0 +1,102 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import logging
import os
import sys
import bb.utils
from bblayers.common import LayerPlugin
logger = logging.getLogger('bitbake-layers')
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import oe.buildcfg
def plugin_init(plugins):
return MakeSetupPlugin()
class MakeSetupPlugin(LayerPlugin):
def _get_remotes_with_url(self, repo_path):
remotes = {}
for r in oe.buildcfg.get_metadata_git_remotes(repo_path):
remotes[r] = {'uri':oe.buildcfg.get_metadata_git_remote_url(repo_path, r)}
return remotes
def _is_submodule(self, repo_path):
# This is slightly brittle: git does not offer a way to tell whether
# a given repo dir is a submodule checkout, so we need to rely on .git
# being a file (rather than a dir like it is in standalone checkouts).
# The file typically contains a gitdir pointer to elsewhere.
return os.path.isfile(os.path.join(repo_path,".git"))
def make_repo_config(self, destdir):
""" This is a helper function for the writer plugins that discovers currently configured layers.
The writers do not have to use it, but it can save a bit of work and avoid duplicated code, hence it is
available here. """
repos = {}
layers = oe.buildcfg.get_layer_revisions(self.tinfoil.config_data)
destdir_repo = oe.buildcfg.get_metadata_git_toplevel(destdir)
for (l_path, l_name, l_branch, l_rev, l_ismodified) in layers:
if l_name == 'workspace':
continue
if l_ismodified:
logger.error("Layer {name} in {path} has uncommitted modifications or is not in a git repository.".format(name=l_name,path=l_path))
return
repo_path = oe.buildcfg.get_metadata_git_toplevel(l_path)
if self._is_submodule(repo_path):
continue
if repo_path not in repos.keys():
repos[repo_path] = {'path':os.path.basename(repo_path),'git-remote':{
'rev':l_rev,
'branch':l_branch,
'remotes':self._get_remotes_with_url(repo_path),
'describe':oe.buildcfg.get_metadata_git_describe(repo_path)}}
if repo_path == destdir_repo:
repos[repo_path]['contains_this_file'] = True
if not repos[repo_path]['git-remote']['remotes'] and not repos[repo_path]['contains_this_file']:
logger.error("Layer repository in {path} does not have any remotes configured. Please add at least one with 'git remote add'.".format(path=repo_path))
return
top_path = os.path.commonpath([os.path.dirname(r) for r in repos.keys()])
repos_nopaths = {}
for r in repos.keys():
r_nopath = os.path.basename(r)
repos_nopaths[r_nopath] = repos[r]
r_relpath = os.path.relpath(r, top_path)
repos_nopaths[r_nopath]['path'] = r_relpath
return repos_nopaths
def do_make_setup(self, args):
""" Writes out a configuration file and/or a script that replicate the directory structure and revisions of the layers in a current build. """
for p in self.plugins:
if str(p) == args.writer:
p.do_write(self, args)
def register_commands(self, sp):
parser_setup_layers = self.add_command(sp, 'create-layers-setup', self.do_make_setup, parserecipes=False)
parser_setup_layers.add_argument('destdir',
help='Directory where to write the output\n(if it is inside one of the layers, the layer becomes a bootstrap repository and thus will be excluded from fetching).')
parser_setup_layers.add_argument('--output-prefix', '-o',
help='File name prefix for the output files, if the default (setup-layers) is undesirable.')
self.plugins = []
for path in (self.tinfoil.config_data.getVar('BBPATH').split(':')):
pluginpath = os.path.join(path, 'lib', 'bblayers', 'setupwriters')
bb.utils.load_plugins(logger, self.plugins, pluginpath)
parser_setup_layers.add_argument('--writer', '-w', choices=[str(p) for p in self.plugins], help="Choose the output format (defaults to oe-setup-layers).\n\nCurrently supported options are:\noe-setup-layers - a self-contained python script and a json config for it.\n\n", default="oe-setup-layers")
for plugin in self.plugins:
if hasattr(plugin, 'register_arguments'):
plugin.register_arguments(parser_setup_layers)

View File

@@ -0,0 +1,117 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import logging
import os
import json
import stat
logger = logging.getLogger('bitbake-layers')
def plugin_init(plugins):
return OeSetupLayersWriter()
class OeSetupLayersWriter():
def __str__(self):
return "oe-setup-layers"
def _write_python(self, input, output):
with open(input) as f:
script = f.read()
with open(output, 'w') as f:
f.write(script)
st = os.stat(output)
os.chmod(output, st.st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH)
def _write_json(self, repos, output):
with open(output, 'w') as f:
json.dump(repos, f, sort_keys=True, indent=4)
def _read_repo_config(self, json_path):
with open(json_path) as f:
json_config = json.load(f)
supported_versions = ["1.0"]
if json_config["version"] not in supported_versions:
err = "File {} has version {}, which is not in supported versions: {}".format(json_path, json_config["version"], supported_versions)
logger.error(err)
raise Exception(err)
return json_config
def _modify_repo_config(self, json_config, args):
sources = json_config['sources']
for pair in args.custom_references:
try:
repo, rev = pair.split(':', maxsplit=1)
except ValueError:
err = "Invalid custom reference specified: '{}'. Provide one using 'REPOSITORY:REFERENCE'.".format(pair)
logger.error(err)
raise Exception(err)
if not repo in sources.keys():
err = "Repository {} does not exist in setup-layers config".format(repo)
logger.error(err)
raise Exception(err)
layer_remote = json_config['sources'][repo]['git-remote']
layer_remote['rev'] = rev
# Clear describe
layer_remote['describe'] = ''
def do_write(self, parent, args):
""" Writes out a python script and a json config that replicate the directory structure and revisions of the layers in a current build. """
output = args.output_prefix or "setup-layers"
output = os.path.join(os.path.abspath(args.destdir), output)
if args.update:
# Modify existing layers setup
if args.custom_references is None:
err = "No custom reference specified. Please provide one using '--use-custom-reference REPOSITORY:REFERENCE'."
logger.error(err)
raise Exception(err)
json = self._read_repo_config(output + ".json")
if not 'sources' in json.keys():
err = "File {}.json does not contain valid layer sources.".format(output)
logger.error(err)
raise Exception(err)
else:
# Create new layers setup
if not os.path.exists(args.destdir):
os.makedirs(args.destdir)
repos = parent.make_repo_config(args.destdir)
json = {"version":"1.0","sources":repos}
if not repos:
err = "Could not determine layer sources"
logger.error(err)
raise Exception(err)
if args.custom_references is not None:
self._modify_repo_config(json, args)
self._write_json(json, output + ".json")
logger.info('Created {}.json'.format(output))
if not args.json_only:
self._write_python(os.path.join(os.path.dirname(__file__),'../../../../scripts/oe-setup-layers'), output)
logger.info('Created {}'.format(output))
def register_arguments(self, parser):
parser.add_argument('--json-only', action='store_true',
help='When using the oe-setup-layers writer, write only the layer configuruation in json format. Otherwise, also a copy of scripts/oe-setup-layers (from oe-core or poky) is provided, which is a self contained python script that fetches all the needed layers and sets them to correct revisions using the data from the json.')
parser.add_argument('--update', '-u',
action='store_true',
help=("Instead of writing a new json file, update an existing layer setup json file with custom references provided via the '--use-custom-reference' option."
"\nThis will only update repositories for which a custom reference is specified, all other repositores will be left unchanged."))
parser.add_argument('--use-custom-reference', '-r',
action='append',
dest='custom_references',
metavar='REPOSITORY:REFERENCE',
help=("A pair consisting of a repository and a custom reference to use for it (by default the currently checked out commit id would be written out)."
"\nThis value can be any reference that 'git checkout' would accept, and is not checked for validity."
"\nThis option can be used multiple times."))

View File

@@ -0,0 +1,41 @@
This README file contains information on the contents of the {layername} layer.
Please see the corresponding sections below for details.
Dependencies
============
URI: <first dependency>
branch: <branch name>
URI: <second dependency>
branch: <branch name>
.
.
.
Patches
=======
Please submit any patches against the {layername} layer to the xxxx mailing list (xxxx@zzzz.org)
and cc: the maintainer:
Maintainer: XXX YYYYYY <xxx.yyyyyy@zzzzz.com>
Table of Contents
=================
I. Adding the {layername} layer to your build
II. Misc
I. Adding the {layername} layer to your build
=================================================
Run 'bitbake-layers add-layer {layername}'
II. Misc
========
--- replace with specific information about the {layername} layer ---

View File

@@ -0,0 +1,13 @@
SUMMARY = "bitbake-layers recipe"
DESCRIPTION = "Recipe created by bitbake-layers"
LICENSE = "MIT"
python do_display_banner() {
bb.plain("***********************************************");
bb.plain("* *");
bb.plain("* Example recipe created by bitbake-layers *");
bb.plain("* *");
bb.plain("***********************************************");
}
addtask display_banner before do_build

View File

@@ -0,0 +1,13 @@
# We have a conf and classes directory, add to BBPATH
BBPATH .= ":${{LAYERDIR}}"
# We have recipes-* directories, add to BBFILES
BBFILES += "${{LAYERDIR}}/recipes-*/*/*.bb \
${{LAYERDIR}}/recipes-*/*/*.bbappend"
BBFILE_COLLECTIONS += "{layerid}"
BBFILE_PATTERN_{layerid} = "^${{LAYERDIR}}/"
BBFILE_PRIORITY_{layerid} = "{priority}"
LAYERDEPENDS_{layerid} = "core"
LAYERSERIES_COMPAT_{layerid} = "{compat}"

View File

@@ -0,0 +1,215 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
# Implements system state sampling. Called by buildstats.bbclass.
# Because it is a real Python module, it can hold persistent state,
# like open log files and the time of the last sampling.
import time
import re
import bb.event
class SystemStats:
def __init__(self, d):
bn = d.getVar('BUILDNAME')
bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
bb.utils.mkdirhier(bsdir)
file_handlers = [('diskstats', self._reduce_diskstats),
('meminfo', self._reduce_meminfo),
('stat', self._reduce_stat)]
# Some hosts like openSUSE have readable /proc/pressure files
# but throw errors when these files are opened. Catch these error
# and ensure that the reduce_proc_pressure directory is not created.
if os.path.exists("/proc/pressure"):
try:
with open('/proc/pressure/cpu', 'rb') as source:
source.read()
pressuredir = os.path.join(bsdir, 'reduced_proc_pressure')
bb.utils.mkdirhier(pressuredir)
file_handlers.extend([('pressure/cpu', self._reduce_pressure),
('pressure/io', self._reduce_pressure),
('pressure/memory', self._reduce_pressure)])
except Exception:
pass
self.proc_files = []
for filename, handler in (file_handlers):
# The corresponding /proc files might not exist on the host.
# For example, /proc/diskstats is not available in virtualized
# environments like Linux-VServer. Silently skip collecting
# the data.
if os.path.exists(os.path.join('/proc', filename)):
# In practice, this class gets instantiated only once in
# the bitbake cooker process. Therefore 'append' mode is
# not strictly necessary, but using it makes the class
# more robust should two processes ever write
# concurrently.
destfile = os.path.join(bsdir, '%sproc_%s.log' % ('reduced_' if handler else '', filename))
self.proc_files.append((filename, open(destfile, 'ab'), handler))
self.monitor_disk = open(os.path.join(bsdir, 'monitor_disk.log'), 'ab')
# Last time that we sampled /proc data resp. recorded disk monitoring data.
self.last_proc = 0
self.last_disk_monitor = 0
# Minimum number of seconds between recording a sample. This becames relevant when we get
# called very often while many short tasks get started. Sampling during quiet periods
# depends on the heartbeat event, which fires less often.
# By default, the Heartbeat events occur roughly once every second but the actual time
# between these events deviates by a few milliseconds, in most cases. Hence
# pick a somewhat arbitary tolerance such that we sample a large majority
# of the Heartbeat events. This ignores rare events that fall outside the minimum
# and may lead an extra sample in a given second every so often. However, it allows for fairly
# consistent intervals between samples without missing many events.
self.tolerance = 0.01
self.min_seconds = 1.0 - self.tolerance
self.meminfo_regex = re.compile(rb'^(MemTotal|MemFree|Buffers|Cached|SwapTotal|SwapFree):\s*(\d+)')
self.diskstats_regex = re.compile(rb'^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')
self.diskstats_ltime = None
self.diskstats_data = None
self.stat_ltimes = None
# Last time we sampled /proc/pressure. All resources stored in a single dict with the key as filename
self.last_pressure = {"pressure/cpu": None, "pressure/io": None, "pressure/memory": None}
def close(self):
self.monitor_disk.close()
for _, output, _ in self.proc_files:
output.close()
def _reduce_meminfo(self, time, data, filename):
"""
Extracts 'MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree'
and writes their values into a single line, in that order.
"""
values = {}
for line in data.split(b'\n'):
m = self.meminfo_regex.match(line)
if m:
values[m.group(1)] = m.group(2)
if len(values) == 6:
return (time,
b' '.join([values[x] for x in
(b'MemTotal', b'MemFree', b'Buffers', b'Cached', b'SwapTotal', b'SwapFree')]) + b'\n')
def _diskstats_is_relevant_line(self, linetokens):
if len(linetokens) != 14:
return False
disk = linetokens[2]
return self.diskstats_regex.match(disk)
def _reduce_diskstats(self, time, data, filename):
relevant_tokens = filter(self._diskstats_is_relevant_line, map(lambda x: x.split(), data.split(b'\n')))
diskdata = [0] * 3
reduced = None
for tokens in relevant_tokens:
# rsect
diskdata[0] += int(tokens[5])
# wsect
diskdata[1] += int(tokens[9])
# use
diskdata[2] += int(tokens[12])
if self.diskstats_ltime:
# We need to compute information about the time interval
# since the last sampling and record the result as sample
# for that point in the past.
interval = time - self.diskstats_ltime
if interval > 0:
sums = [ a - b for a, b in zip(diskdata, self.diskstats_data) ]
readTput = sums[0] / 2.0 * 100.0 / interval
writeTput = sums[1] / 2.0 * 100.0 / interval
util = float( sums[2] ) / 10 / interval
util = max(0.0, min(1.0, util))
reduced = (self.diskstats_ltime, (readTput, writeTput, util))
self.diskstats_ltime = time
self.diskstats_data = diskdata
return reduced
def _reduce_nop(self, time, data, filename):
return (time, data)
def _reduce_stat(self, time, data, filename):
if not data:
return None
# CPU times {user, nice, system, idle, io_wait, irq, softirq} from first line
tokens = data.split(b'\n', 1)[0].split()
times = [ int(token) for token in tokens[1:] ]
reduced = None
if self.stat_ltimes:
user = float((times[0] + times[1]) - (self.stat_ltimes[0] + self.stat_ltimes[1]))
system = float((times[2] + times[5] + times[6]) - (self.stat_ltimes[2] + self.stat_ltimes[5] + self.stat_ltimes[6]))
idle = float(times[3] - self.stat_ltimes[3])
iowait = float(times[4] - self.stat_ltimes[4])
aSum = max(user + system + idle + iowait, 1)
reduced = (time, (user/aSum, system/aSum, iowait/aSum))
self.stat_ltimes = times
return reduced
def _reduce_pressure(self, time, data, filename):
"""
Return reduced pressure: {avg10, avg60, avg300} and delta total compared to the previous sample
for the cpu, io and memory resources. A common function is used for all 3 resources since the
format of the /proc/pressure file is the same in each case.
"""
if not data:
return None
tokens = data.split(b'\n', 1)[0].split()
avg10 = float(tokens[1].split(b'=')[1])
avg60 = float(tokens[2].split(b'=')[1])
avg300 = float(tokens[3].split(b'=')[1])
total = int(tokens[4].split(b'=')[1])
reduced = None
if self.last_pressure[filename]:
delta = total - self.last_pressure[filename]
reduced = (time, (avg10, avg60, avg300, delta))
self.last_pressure[filename] = total
return reduced
def sample(self, event, force):
"""
Collect and log proc or disk_monitor stats periodically.
Return True if a new sample is collected and hence the value last_proc or last_disk_monitor
is changed.
"""
retval = False
now = time.time()
if (now - self.last_proc > self.min_seconds) or force:
for filename, output, handler in self.proc_files:
with open(os.path.join('/proc', filename), 'rb') as input:
data = input.read()
if handler:
reduced = handler(now, data, filename)
else:
reduced = (now, data)
if reduced:
if isinstance(reduced[1], bytes):
# Use as it is.
data = reduced[1]
else:
# Convert to a single line.
data = (' '.join([str(x) for x in reduced[1]]) + '\n').encode('ascii')
# Unbuffered raw write, less overhead and useful
# in case that we end up with concurrent writes.
os.write(output.fileno(),
('%.0f\n' % reduced[0]).encode('ascii') +
data +
b'\n')
self.last_proc = now
retval = True
if isinstance(event, bb.event.MonitorDiskEvent) and \
((now - self.last_disk_monitor > self.min_seconds) or force):
os.write(self.monitor_disk.fileno(),
('%.0f\n' % now).encode('ascii') +
''.join(['%s: %d\n' % (dev, sample.total_bytes - sample.free_bytes)
for dev, sample in event.disk_usage.items()]).encode('ascii') +
b'\n')
self.last_disk_monitor = now
retval = True
return retval

View File

@@ -0,0 +1,12 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
BBIMPORTS = ["data", "path", "utils", "types", "package", "packagedata", \
"packagegroup", "sstatesig", "lsb", "cachedpath", "license", \
"qa", "reproducible", "rust", "buildcfg", "go"]

View File

@@ -0,0 +1,79 @@
import os
import subprocess
import bb.process
def detect_revision(d):
path = get_scmbasepath(d)
return get_metadata_git_revision(path)
def detect_branch(d):
path = get_scmbasepath(d)
return get_metadata_git_branch(path)
def get_scmbasepath(d):
return os.path.join(d.getVar('COREBASE'), 'meta')
def get_metadata_git_branch(path):
try:
rev, _ = bb.process.run('git rev-parse --abbrev-ref HEAD', cwd=path)
except bb.process.ExecutionError:
rev = '<unknown>'
return rev.strip()
def get_metadata_git_revision(path):
try:
rev, _ = bb.process.run('git rev-parse HEAD', cwd=path)
except bb.process.ExecutionError:
rev = '<unknown>'
return rev.strip()
def get_metadata_git_toplevel(path):
try:
toplevel, _ = bb.process.run('git rev-parse --show-toplevel', cwd=path)
except bb.process.ExecutionError:
return ""
return toplevel.strip()
def get_metadata_git_remotes(path):
try:
remotes_list, _ = bb.process.run('git remote', cwd=path)
remotes = remotes_list.split()
except bb.process.ExecutionError:
remotes = []
return remotes
def get_metadata_git_remote_url(path, remote):
try:
uri, _ = bb.process.run('git remote get-url {remote}'.format(remote=remote), cwd=path)
except bb.process.ExecutionError:
return ""
return uri.strip()
def get_metadata_git_describe(path):
try:
describe, _ = bb.process.run('git describe --tags', cwd=path)
except bb.process.ExecutionError:
return ""
return describe.strip()
def is_layer_modified(path):
try:
subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
git diff --quiet --no-ext-diff
git diff --quiet --no-ext-diff --cached""" % path,
shell=True,
stderr=subprocess.STDOUT)
return ""
except subprocess.CalledProcessError as ex:
# Silently treat errors as "modified", without checking for the
# (expected) return code 1 in a modified git repo. For example, we get
# output and a 129 return code when a layer isn't a git repo at all.
return " -- modified"
def get_layer_revisions(d):
layers = (d.getVar("BBLAYERS") or "").split()
revisions = []
for i in layers:
revisions.append((i, os.path.basename(i), get_metadata_git_branch(i).strip(), get_metadata_git_revision(i), is_layer_modified(i)))
return revisions

View File

@@ -0,0 +1,723 @@
# Report significant differences in the buildhistory repository since a specific revision
#
# Copyright (C) 2012-2013, 2016-2017 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
#
# SPDX-License-Identifier: GPL-2.0-only
#
# Note: requires GitPython 0.3.1+
#
# You can use this from the command line by running scripts/buildhistory-diff
#
import sys
import os.path
import difflib
import git
import re
import shlex
import hashlib
import collections
import bb.utils
import bb.tinfoil
# How to display fields
list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
list_order_fields = ['PACKAGES']
defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'}
numeric_fields = ['PKGSIZE', 'IMAGESIZE']
# Fields to monitor
monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG']
ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
# Percentage change to alert for numeric fields
monitor_numeric_threshold = 10
# Image files to monitor (note that image-info.txt is handled separately)
img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
colours = {
'colour_default': '',
'colour_add': '',
'colour_remove': '',
}
def init_colours(use_colours):
global colours
if use_colours:
colours = {
'colour_default': '\033[0m',
'colour_add': '\033[1;32m',
'colour_remove': '\033[1;31m',
}
else:
colours = {
'colour_default': '',
'colour_add': '',
'colour_remove': '',
}
class ChangeRecord:
def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
self.path = path
self.fieldname = fieldname
self.oldvalue = oldvalue
self.newvalue = newvalue
self.monitored = monitored
self.filechanges = None
def __str__(self):
return self._str_internal(True)
def _str_internal(self, outer):
if outer:
if '/image-files/' in self.path:
prefix = '%s: ' % self.path.split('/image-files/')[0]
else:
prefix = '%s: ' % self.path
else:
prefix = ''
def pkglist_combine(depver):
pkglist = []
for k,v in depver.items():
if v:
pkglist.append("%s (%s)" % (k,v))
else:
pkglist.append(k)
return pkglist
def detect_renamed_dirs(aitems, bitems):
adirs = set(map(os.path.dirname, aitems))
bdirs = set(map(os.path.dirname, bitems))
files_ab = [(name, sorted(os.path.basename(item) for item in aitems if os.path.dirname(item) == name)) \
for name in adirs - bdirs]
files_ba = [(name, sorted(os.path.basename(item) for item in bitems if os.path.dirname(item) == name)) \
for name in bdirs - adirs]
renamed_dirs = []
for dir1, files1 in files_ab:
rename = False
for dir2, files2 in files_ba:
if files1 == files2 and not rename:
renamed_dirs.append((dir1,dir2))
# Make sure that we don't use this (dir, files) pair again.
files_ba.remove((dir2,files2))
# If a dir has already been found to have a rename, stop and go no further.
rename = True
# remove files that belong to renamed dirs from aitems and bitems
for dir1, dir2 in renamed_dirs:
aitems = [item for item in aitems if os.path.dirname(item) not in (dir1, dir2)]
bitems = [item for item in bitems if os.path.dirname(item) not in (dir1, dir2)]
return renamed_dirs, aitems, bitems
if self.fieldname in list_fields or self.fieldname in list_order_fields:
renamed_dirs = []
changed_order = False
if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
aitems = pkglist_combine(depvera)
bitems = pkglist_combine(depverb)
else:
if self.fieldname == 'FILELIST':
aitems = shlex.split(self.oldvalue)
bitems = shlex.split(self.newvalue)
renamed_dirs, aitems, bitems = detect_renamed_dirs(aitems, bitems)
else:
aitems = self.oldvalue.split()
bitems = self.newvalue.split()
removed = list(set(aitems) - set(bitems))
added = list(set(bitems) - set(aitems))
if not removed and not added and self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
depvera = bb.utils.explode_dep_versions2(self.oldvalue, sort=False)
depverb = bb.utils.explode_dep_versions2(self.newvalue, sort=False)
for i, j in zip(depvera.items(), depverb.items()):
if i[0] != j[0]:
changed_order = True
break
lines = []
if renamed_dirs:
for dfrom, dto in renamed_dirs:
lines.append('directory renamed {colour_remove}{}{colour_default} -> {colour_add}{}{colour_default}'.format(dfrom, dto, **colours))
if removed or added:
if removed and not bitems:
lines.append('removed all items "{colour_remove}{}{colour_default}"'.format(' '.join(removed), **colours))
else:
if removed:
lines.append('removed "{colour_remove}{value}{colour_default}"'.format(value=' '.join(removed), **colours))
if added:
lines.append('added "{colour_add}{value}{colour_default}"'.format(value=' '.join(added), **colours))
else:
lines.append('changed order')
if not (removed or added or changed_order):
out = ''
else:
out = '%s: %s' % (self.fieldname, ', '.join(lines))
elif self.fieldname in numeric_fields:
aval = int(self.oldvalue or 0)
bval = int(self.newvalue or 0)
if aval != 0:
percentchg = ((bval - aval) / float(aval)) * 100
else:
percentchg = 100
out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default} ({}{:.0f}%)'.format(self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg, **colours)
elif self.fieldname in defaultval_map:
out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default}'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
if self.fieldname == 'PKG' and '[default]' in self.newvalue:
out += ' - may indicate debian renaming failure'
elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
if self.oldvalue and self.newvalue:
out = '%s changed:\n ' % self.fieldname
elif self.newvalue:
out = '%s added:\n ' % self.fieldname
elif self.oldvalue:
out = '%s cleared:\n ' % self.fieldname
alines = self.oldvalue.splitlines()
blines = self.newvalue.splitlines()
diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
out += '\n '.join(list(diff)[2:])
out += '\n --'
elif self.fieldname in img_monitor_files or '/image-files/' in self.path or self.fieldname == "sysroot":
if self.filechanges or (self.oldvalue and self.newvalue):
fieldname = self.fieldname
if '/image-files/' in self.path:
fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
out = 'Changes to %s:\n ' % fieldname
else:
if outer:
prefix = 'Changes to %s ' % self.path
out = '(%s):\n ' % self.fieldname
if self.filechanges:
out += '\n '.join(['%s' % i for i in self.filechanges])
else:
alines = self.oldvalue.splitlines()
blines = self.newvalue.splitlines()
diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
out += '\n '.join(list(diff))
out += '\n --'
else:
out = ''
else:
out = '{} changed from "{colour_remove}{}{colour_default}" to "{colour_add}{}{colour_default}"'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
return '%s%s' % (prefix, out) if out else ''
class FileChange:
changetype_add = 'A'
changetype_remove = 'R'
changetype_type = 'T'
changetype_perms = 'P'
changetype_ownergroup = 'O'
changetype_link = 'L'
changetype_move = 'M'
def __init__(self, path, changetype, oldvalue = None, newvalue = None):
self.path = path
self.changetype = changetype
self.oldvalue = oldvalue
self.newvalue = newvalue
def _ftype_str(self, ftype):
if ftype == '-':
return 'file'
elif ftype == 'd':
return 'directory'
elif ftype == 'l':
return 'symlink'
elif ftype == 'c':
return 'char device'
elif ftype == 'b':
return 'block device'
elif ftype == 'p':
return 'fifo'
elif ftype == 's':
return 'socket'
else:
return 'unknown (%s)' % ftype
def __str__(self):
if self.changetype == self.changetype_add:
return '%s was added' % self.path
elif self.changetype == self.changetype_remove:
return '%s was removed' % self.path
elif self.changetype == self.changetype_type:
return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue))
elif self.changetype == self.changetype_perms:
return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue)
elif self.changetype == self.changetype_ownergroup:
return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue)
elif self.changetype == self.changetype_link:
return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue)
elif self.changetype == self.changetype_move:
return '%s moved to %s' % (self.path, self.oldvalue)
else:
return '%s changed (unknown)' % self.path
def blob_to_dict(blob):
alines = [line for line in blob.data_stream.read().decode('utf-8').splitlines()]
adict = {}
for line in alines:
splitv = [i.strip() for i in line.split('=',1)]
if len(splitv) > 1:
adict[splitv[0]] = splitv[1]
return adict
def file_list_to_dict(lines):
adict = {}
for line in lines:
# Leave the last few fields intact so we handle file names containing spaces
splitv = line.split(None,4)
# Grab the path and remove the leading .
path = splitv[4][1:].strip()
# Handle symlinks
if(' -> ' in path):
target = path.split(' -> ')[1]
path = path.split(' -> ')[0]
adict[path] = splitv[0:3] + [target]
else:
adict[path] = splitv[0:3]
return adict
numeric_removal = str.maketrans('0123456789', 'XXXXXXXXXX')
def compare_file_lists(alines, blines, compare_ownership=True):
adict = file_list_to_dict(alines)
bdict = file_list_to_dict(blines)
filechanges = []
additions = []
removals = []
for path, splitv in adict.items():
newsplitv = bdict.pop(path, None)
if newsplitv:
# Check type
oldvalue = splitv[0][0]
newvalue = newsplitv[0][0]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
# Check permissions
oldvalue = splitv[0][1:]
newvalue = newsplitv[0][1:]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
if compare_ownership:
# Check owner/group
oldvalue = '%s/%s' % (splitv[1], splitv[2])
newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
# Check symlink target
if newsplitv[0][0] == 'l':
if len(splitv) > 3:
oldvalue = splitv[3]
else:
oldvalue = None
newvalue = newsplitv[3]
if oldvalue != newvalue:
filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue))
else:
removals.append(path)
# Whatever is left over has been added
for path in bdict:
additions.append(path)
# Rather than print additions and removals, its nicer to print file 'moves'
# where names or paths are similar.
revmap_remove = {}
for removal in removals:
translated = removal.translate(numeric_removal)
if translated not in revmap_remove:
revmap_remove[translated] = []
revmap_remove[translated].append(removal)
#
# We want to detect renames of large trees of files like
# /lib/modules/5.4.40-yocto-standard to /lib/modules/5.4.43-yocto-standard
#
renames = {}
for addition in additions.copy():
if addition not in additions:
continue
translated = addition.translate(numeric_removal)
if translated in revmap_remove:
if len(revmap_remove[translated]) != 1:
continue
removal = revmap_remove[translated][0]
commondir = addition.split("/")
commondir2 = removal.split("/")
idx = None
for i in range(len(commondir)):
if commondir[i] != commondir2[i]:
idx = i
break
commondir = "/".join(commondir[:i+1])
commondir2 = "/".join(commondir2[:i+1])
# If the common parent is in one dict and not the other its likely a rename
# so iterate through those files and process as such
if commondir2 not in bdict and commondir not in adict:
if commondir not in renames:
renames[commondir] = commondir2
for addition2 in additions.copy():
if addition2.startswith(commondir):
removal2 = addition2.replace(commondir, commondir2)
if removal2 in removals:
additions.remove(addition2)
removals.remove(removal2)
continue
filechanges.append(FileChange(removal, FileChange.changetype_move, addition))
if addition in additions:
additions.remove(addition)
if removal in removals:
removals.remove(removal)
for rename in renames:
filechanges.append(FileChange(renames[rename], FileChange.changetype_move, rename))
for addition in additions:
filechanges.append(FileChange(addition, FileChange.changetype_add))
for removal in removals:
filechanges.append(FileChange(removal, FileChange.changetype_remove))
return filechanges
def compare_lists(alines, blines):
removed = list(set(alines) - set(blines))
added = list(set(blines) - set(alines))
filechanges = []
for pkg in removed:
filechanges.append(FileChange(pkg, FileChange.changetype_remove))
for pkg in added:
filechanges.append(FileChange(pkg, FileChange.changetype_add))
return filechanges
def compare_pkg_lists(astr, bstr):
depvera = bb.utils.explode_dep_versions2(astr)
depverb = bb.utils.explode_dep_versions2(bstr)
# Strip out changes where the version has increased
remove = []
for k in depvera:
if k in depverb:
dva = depvera[k]
dvb = depverb[k]
if dva and dvb and len(dva) == len(dvb):
# Since length is the same, sort so that prefixes (e.g. >=) will line up
dva.sort()
dvb.sort()
removeit = True
for dvai, dvbi in zip(dva, dvb):
if dvai != dvbi:
aiprefix = dvai.split(' ')[0]
biprefix = dvbi.split(' ')[0]
if aiprefix == biprefix and aiprefix in ['>=', '=']:
if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0:
removeit = False
break
else:
removeit = False
break
if removeit:
remove.append(k)
for k in remove:
depvera.pop(k)
depverb.pop(k)
return (depvera, depverb)
def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
adict = blob_to_dict(ablob)
bdict = blob_to_dict(bblob)
pkgname = os.path.basename(path)
defaultvals = {}
defaultvals['PKG'] = pkgname
defaultvals['PKGE'] = '0'
changes = []
keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys()))
for key in keys:
astr = adict.get(key, '')
bstr = bdict.get(key, '')
if key in ver_monitor_fields:
monitored = report_ver or astr or bstr
else:
monitored = key in monitor_fields
mapped_key = defaultval_map.get(key, '')
if mapped_key:
if not astr:
astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, ''))
if not bstr:
bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, ''))
if astr != bstr:
if (not report_all) and key in numeric_fields:
aval = int(astr or 0)
bval = int(bstr or 0)
if aval != 0:
percentchg = ((bval - aval) / float(aval)) * 100
else:
percentchg = 100
if abs(percentchg) < monitor_numeric_threshold:
continue
elif (not report_all) and key in list_fields:
if key == "FILELIST" and (path.endswith("-dbg") or path.endswith("-src")) and bstr.strip() != '':
continue
if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
(depvera, depverb) = compare_pkg_lists(astr, bstr)
if depvera == depverb:
continue
if key == 'FILELIST':
alist = shlex.split(astr)
blist = shlex.split(bstr)
else:
alist = astr.split()
blist = bstr.split()
alist.sort()
blist.sort()
# We don't care about the removal of self-dependencies
if pkgname in alist and not pkgname in blist:
alist.remove(pkgname)
if ' '.join(alist) == ' '.join(blist):
continue
if key == 'PKGR' and not report_all:
vers = []
# strip leading 'r' and dots
for ver in (astr.split()[0], bstr.split()[0]):
if ver.startswith('r'):
ver = ver[1:]
vers.append(ver.replace('.', ''))
maxlen = max(len(vers[0]), len(vers[1]))
try:
# pad with '0' and convert to int
vers = [int(ver.ljust(maxlen, '0')) for ver in vers]
except ValueError:
pass
else:
# skip decrements and increments
if abs(vers[0] - vers[1]) == 1:
continue
chg = ChangeRecord(path, key, astr, bstr, monitored)
changes.append(chg)
return changes
def compare_siglists(a_blob, b_blob, taskdiff=False):
# FIXME collapse down a recipe's tasks?
alines = a_blob.data_stream.read().decode('utf-8').splitlines()
blines = b_blob.data_stream.read().decode('utf-8').splitlines()
keys = []
pnmap = {}
def readsigs(lines):
sigs = {}
for line in lines:
linesplit = line.split()
if len(linesplit) > 2:
sigs[linesplit[0]] = linesplit[2]
if not linesplit[0] in keys:
keys.append(linesplit[0])
pnmap[linesplit[1]] = linesplit[0].rsplit('.', 1)[0]
return sigs
adict = readsigs(alines)
bdict = readsigs(blines)
out = []
changecount = 0
addcount = 0
removecount = 0
if taskdiff:
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=True)
changes = collections.OrderedDict()
def compare_hashfiles(pn, taskname, hash1, hash2):
hashes = [hash1, hash2]
hashfiles = bb.siggen.find_siginfo(pn, taskname, hashes, tinfoil.config_data)
if not taskname:
(pn, taskname) = pn.rsplit('.', 1)
pn = pnmap.get(pn, pn)
desc = '%s.%s' % (pn, taskname)
if len(hashfiles) == 0:
out.append("Unable to find matching sigdata for %s with hashes %s or %s" % (desc, hash1, hash2))
elif not hash1 in hashfiles:
out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash1))
elif not hash2 in hashfiles:
out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash2))
else:
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb, collapsed=True)
for line in out2:
m = hashlib.sha256()
m.update(line.encode('utf-8'))
entry = changes.get(m.hexdigest(), (line, []))
if desc not in entry[1]:
changes[m.hexdigest()] = (line, entry[1] + [desc])
# Define recursion callback
def recursecb(key, hash1, hash2):
compare_hashfiles(key, None, hash1, hash2)
return []
for key in keys:
siga = adict.get(key, None)
sigb = bdict.get(key, None)
if siga is not None and sigb is not None and siga != sigb:
changecount += 1
(pn, taskname) = key.rsplit('.', 1)
compare_hashfiles(pn, taskname, siga, sigb)
elif siga is None:
addcount += 1
elif sigb is None:
removecount += 1
for key, item in changes.items():
line, tasks = item
if len(tasks) == 1:
desc = tasks[0]
elif len(tasks) == 2:
desc = '%s and %s' % (tasks[0], tasks[1])
else:
desc = '%s and %d others' % (tasks[-1], len(tasks)-1)
out.append('%s: %s' % (desc, line))
else:
for key in keys:
siga = adict.get(key, None)
sigb = bdict.get(key, None)
if siga is not None and sigb is not None and siga != sigb:
out.append('%s changed from %s to %s' % (key, siga, sigb))
changecount += 1
elif siga is None:
out.append('%s was added' % key)
addcount += 1
elif sigb is None:
out.append('%s was removed' % key)
removecount += 1
out.append('Summary: %d tasks added, %d tasks removed, %d tasks modified (%.1f%%)' % (addcount, removecount, changecount, (changecount / float(len(bdict)) * 100)))
return '\n'.join(out)
def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False,
sigs=False, sigsdiff=False, exclude_path=None):
repo = git.Repo(repopath)
assert repo.bare == False
commit = repo.commit(revision1)
diff = commit.diff(revision2)
changes = []
if sigs or sigsdiff:
for d in diff.iter_change_type('M'):
if d.a_blob.path == 'siglist.txt':
changes.append(compare_siglists(d.a_blob, d.b_blob, taskdiff=sigsdiff))
return changes
for d in diff.iter_change_type('M'):
path = os.path.dirname(d.a_blob.path)
if path.startswith('packages/'):
filename = os.path.basename(d.a_blob.path)
if filename == 'latest':
changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
elif filename.startswith('latest.'):
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
changes.append(chg)
elif filename == 'sysroot':
alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
filechanges = compare_file_lists(alines,blines, compare_ownership=False)
if filechanges:
chg = ChangeRecord(path, filename, None, None, True)
chg.filechanges = filechanges
changes.append(chg)
elif path.startswith('images/'):
filename = os.path.basename(d.a_blob.path)
if filename in img_monitor_files:
if filename == 'files-in-image.txt':
alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
filechanges = compare_file_lists(alines,blines)
if filechanges:
chg = ChangeRecord(path, filename, None, None, True)
chg.filechanges = filechanges
changes.append(chg)
elif filename == 'installed-package-names.txt':
alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
filechanges = compare_lists(alines,blines)
if filechanges:
chg = ChangeRecord(path, filename, None, None, True)
chg.filechanges = filechanges
changes.append(chg)
else:
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
changes.append(chg)
elif filename == 'image-info.txt':
changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
elif '/image-files/' in path:
chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
changes.append(chg)
# Look for added preinst/postinst/prerm/postrm
# (without reporting newly added recipes)
addedpkgs = []
addedchanges = []
for d in diff.iter_change_type('A'):
path = os.path.dirname(d.b_blob.path)
if path.startswith('packages/'):
filename = os.path.basename(d.b_blob.path)
if filename == 'latest':
addedpkgs.append(path)
elif filename.startswith('latest.'):
chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read().decode('utf-8'), True)
addedchanges.append(chg)
for chg in addedchanges:
found = False
for pkg in addedpkgs:
if chg.path.startswith(pkg):
found = True
break
if not found:
changes.append(chg)
# Look for cleared preinst/postinst/prerm/postrm
for d in diff.iter_change_type('D'):
path = os.path.dirname(d.a_blob.path)
if path.startswith('packages/'):
filename = os.path.basename(d.a_blob.path)
if filename != 'latest' and filename.startswith('latest.'):
chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read().decode('utf-8'), '', True)
changes.append(chg)
# filter out unwanted paths
if exclude_path:
for chg in changes:
if chg.filechanges:
fchgs = []
for fchg in chg.filechanges:
for epath in exclude_path:
if fchg.path.startswith(epath):
break
else:
fchgs.append(fchg)
chg.filechanges = fchgs
if report_all:
return changes
else:
return [chg for chg in changes if chg.monitored]

View File

@@ -0,0 +1,237 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
# Based on standard python library functions but avoid
# repeated stat calls. Its assumed the files will not change from under us
# so we can cache stat calls.
#
import os
import errno
import stat as statmod
class CachedPath(object):
def __init__(self):
self.statcache = {}
self.lstatcache = {}
self.normpathcache = {}
return
def updatecache(self, x):
x = self.normpath(x)
if x in self.statcache:
del self.statcache[x]
if x in self.lstatcache:
del self.lstatcache[x]
def normpath(self, path):
if path in self.normpathcache:
return self.normpathcache[path]
newpath = os.path.normpath(path)
self.normpathcache[path] = newpath
return newpath
def _callstat(self, path):
if path in self.statcache:
return self.statcache[path]
try:
st = os.stat(path)
self.statcache[path] = st
return st
except os.error:
self.statcache[path] = False
return False
# We might as well call lstat and then only
# call stat as well in the symbolic link case
# since this turns out to be much more optimal
# in real world usage of this cache
def callstat(self, path):
path = self.normpath(path)
self.calllstat(path)
return self.statcache[path]
def calllstat(self, path):
path = self.normpath(path)
if path in self.lstatcache:
return self.lstatcache[path]
#bb.error("LStatpath:" + path)
try:
lst = os.lstat(path)
self.lstatcache[path] = lst
if not statmod.S_ISLNK(lst.st_mode):
self.statcache[path] = lst
else:
self._callstat(path)
return lst
except (os.error, AttributeError):
self.lstatcache[path] = False
self.statcache[path] = False
return False
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path ono systems that support symlinks
def isfile(self, path):
"""Test whether a path is a regular file"""
st = self.callstat(path)
if not st:
return False
return statmod.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(self, s):
"""Return true if the pathname refers to an existing directory."""
st = self.callstat(s)
if not st:
return False
return statmod.S_ISDIR(st.st_mode)
def islink(self, path):
"""Test whether a path is a symbolic link"""
st = self.calllstat(path)
if not st:
return False
return statmod.S_ISLNK(st.st_mode)
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(self, path):
"""Test whether a path exists. Returns False for broken symbolic links"""
if self.callstat(path):
return True
return False
def lexists(self, path):
"""Test whether a path exists. Returns True for broken symbolic links"""
if self.calllstat(path):
return True
return False
def stat(self, path):
return self.callstat(path)
def lstat(self, path):
return self.calllstat(path)
def walk(self, top, topdown=True, onerror=None, followlinks=False):
# Matches os.walk, not os.path.walk()
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
names = os.listdir(top)
except os.error as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if self.isdir(os.path.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = os.path.join(top, name)
if followlinks or not self.islink(new_path):
for x in self.walk(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
## realpath() related functions
def __is_path_below(self, file, root):
return (file + os.path.sep).startswith(root)
def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir):
"""Calculates real path of symlink 'start' + 'rel_path' below
'root'; no part of 'start' below 'root' must contain symlinks. """
have_dir = True
for d in rel_path.split(os.path.sep):
if not have_dir and not assume_dir:
raise OSError(errno.ENOENT, "no such directory %s" % start)
if d == os.path.pardir: # '..'
if len(start) >= len(root):
# do not follow '..' before root
start = os.path.dirname(start)
else:
# emit warning?
pass
else:
(start, have_dir) = self.__realpath(os.path.join(start, d),
root, loop_cnt, assume_dir)
assert(self.__is_path_below(start, root))
return start
def __realpath(self, file, root, loop_cnt, assume_dir):
while self.islink(file) and len(file) >= len(root):
if loop_cnt == 0:
raise OSError(errno.ELOOP, file)
loop_cnt -= 1
target = os.path.normpath(os.readlink(file))
if not os.path.isabs(target):
tdir = os.path.dirname(file)
assert(self.__is_path_below(tdir, root))
else:
tdir = root
file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir)
try:
is_dir = self.isdir(file)
except:
is_dir = False
return (file, is_dir)
def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
""" Returns the canonical path of 'file' with assuming a
toplevel 'root' directory. When 'use_physdir' is set, all
preceding path components of 'file' will be resolved first;
this flag should be set unless it is guaranteed that there is
no symlink in the path. When 'assume_dir' is not set, missing
path components will raise an ENOENT error"""
root = os.path.normpath(root)
file = os.path.normpath(file)
if not root.endswith(os.path.sep):
# letting root end with '/' makes some things easier
root = root + os.path.sep
if not self.__is_path_below(file, root):
raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
try:
if use_physdir:
file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
else:
file = self.__realpath(file, root, loop_cnt, assume_dir)[0]
except OSError as e:
if e.errno == errno.ELOOP:
# make ELOOP more readable; without catching it, there will
# be printed a backtrace with 100s of OSError exceptions
# else
raise OSError(errno.ELOOP,
"too much recursions while resolving '%s'; loop in '%s'" %
(file, e.strerror))
raise
return file

View File

@@ -0,0 +1,159 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import collections
def get_packages(d):
pkgs = d.getVar("PACKAGES_NONML")
extcls = d.getVar("EXTENDERCLASS")
return extcls.rename_packages_internal(pkgs)
def get_depends(varprefix, d):
extcls = d.getVar("EXTENDERCLASS")
return extcls.map_depends_variable(varprefix + "_NONML")
class ClassExtender(object):
def __init__(self, extname, d):
self.extname = extname
self.d = d
self.pkgs_mapping = []
self.d.setVar("EXTENDERCLASS", self)
def extend_name(self, name):
if name.startswith("kernel-") or name == "virtual/kernel":
return name
if name.startswith("rtld"):
return name
if name.endswith("-crosssdk"):
return name
if name.endswith("-" + self.extname):
name = name.replace("-" + self.extname, "")
if name.startswith("virtual/"):
# Assume large numbers of dashes means a triplet is present and we don't need to convert
if name.count("-") >= 3 and name.endswith(("-go", "-binutils", "-gcc", "-g++")):
return name
subs = name.split("/", 1)[1]
if not subs.startswith(self.extname):
return "virtual/" + self.extname + "-" + subs
return name
if name.startswith("/") or (name.startswith("${") and name.endswith("}")):
return name
if not name.startswith(self.extname):
return self.extname + "-" + name
return name
def map_variable(self, varname, setvar = True):
var = self.d.getVar(varname)
if not var:
return ""
var = var.split()
newvar = []
for v in var:
newvar.append(self.extend_name(v))
newdata = " ".join(newvar)
if setvar:
self.d.setVar(varname, newdata)
return newdata
def map_regexp_variable(self, varname, setvar = True):
var = self.d.getVar(varname)
if not var:
return ""
var = var.split()
newvar = []
for v in var:
if v.startswith("^" + self.extname):
newvar.append(v)
elif v.startswith("^"):
newvar.append("^" + self.extname + "-" + v[1:])
else:
newvar.append(self.extend_name(v))
newdata = " ".join(newvar)
if setvar:
self.d.setVar(varname, newdata)
return newdata
def map_depends(self, dep):
if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep):
return dep
else:
# Do not extend for that already have multilib prefix
var = self.d.getVar("MULTILIB_VARIANTS")
if var:
var = var.split()
for v in var:
if dep.startswith(v):
return dep
return self.extend_name(dep)
def map_depends_variable(self, varname, suffix = ""):
# We need to preserve EXTENDPKGV so it can be expanded correctly later
if suffix:
varname = varname + ":" + suffix
orig = self.d.getVar("EXTENDPKGV", False)
self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
deps = self.d.getVar(varname)
if not deps:
self.d.setVar("EXTENDPKGV", orig)
return
deps = bb.utils.explode_dep_versions2(deps)
newdeps = collections.OrderedDict()
for dep in deps:
newdeps[self.map_depends(dep)] = deps[dep]
if not varname.endswith("_NONML"):
self.d.renameVar(varname, varname + "_NONML")
self.d.setVar(varname, "${@oe.classextend.get_depends('%s', d)}" % varname)
self.d.appendVarFlag(varname, "vardeps", " " + varname + "_NONML")
ret = bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}")
self.d.setVar("EXTENDPKGV", orig)
return ret
def map_packagevars(self):
for pkg in (self.d.getVar("PACKAGES").split() + [""]):
self.map_depends_variable("RDEPENDS", pkg)
self.map_depends_variable("RRECOMMENDS", pkg)
self.map_depends_variable("RSUGGESTS", pkg)
self.map_depends_variable("RPROVIDES", pkg)
self.map_depends_variable("RREPLACES", pkg)
self.map_depends_variable("RCONFLICTS", pkg)
self.map_depends_variable("PKG", pkg)
def rename_packages(self):
for pkg in (self.d.getVar("PACKAGES") or "").split():
if pkg.startswith(self.extname):
self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
continue
self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
self.d.renameVar("PACKAGES", "PACKAGES_NONML")
self.d.setVar("PACKAGES", "${@oe.classextend.get_packages(d)}")
def rename_packages_internal(self, pkgs):
self.pkgs_mapping = []
for pkg in (self.d.expand(pkgs) or "").split():
if pkg.startswith(self.extname):
self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
continue
self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
return " ".join([row[1] for row in self.pkgs_mapping])
def rename_package_variables(self, variables):
for pkg_mapping in self.pkgs_mapping:
if pkg_mapping[0].startswith("${") and pkg_mapping[0].endswith("}"):
continue
for subs in variables:
self.d.renameVar("%s:%s" % (subs, pkg_mapping[0]), "%s:%s" % (subs, pkg_mapping[1]))
class NativesdkClassExtender(ClassExtender):
def map_depends(self, dep):
if dep.startswith(self.extname):
return dep
if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
return dep
else:
return self.extend_name(dep)

View File

@@ -0,0 +1,49 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
class ClassRegistryMeta(type):
"""Give each ClassRegistry their own registry"""
def __init__(cls, name, bases, attrs):
cls.registry = {}
type.__init__(cls, name, bases, attrs)
class ClassRegistry(type, metaclass=ClassRegistryMeta):
"""Maintain a registry of classes, indexed by name.
Note that this implementation requires that the names be unique, as it uses
a dictionary to hold the classes by name.
The name in the registry can be overridden via the 'name' attribute of the
class, and the 'priority' attribute controls priority. The prioritized()
method returns the registered classes in priority order.
Subclasses of ClassRegistry may define an 'implemented' property to exert
control over whether the class will be added to the registry (e.g. to keep
abstract base classes out of the registry)."""
priority = 0
def __init__(cls, name, bases, attrs):
super(ClassRegistry, cls).__init__(name, bases, attrs)
try:
if not cls.implemented:
return
except AttributeError:
pass
try:
cls.name
except AttributeError:
cls.name = name
cls.registry[cls.name] = cls
@classmethod
def prioritized(tcls):
return sorted(list(tcls.registry.values()),
key=lambda v: (v.priority, v.name), reverse=True)
def unregister(cls):
for key in cls.registry.keys():
if cls.registry[key] is cls:
del cls.registry[key]

View File

@@ -0,0 +1,293 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
# This class should provide easy access to the different aspects of the
# buildsystem such as layers, bitbake location, etc.
#
# SDK_LAYERS_EXCLUDE: Layers which will be excluded from SDK layers.
# SDK_LAYERS_EXCLUDE_PATTERN: The simiar to SDK_LAYERS_EXCLUDE, this supports
# python regular expression, use space as separator,
# e.g.: ".*-downloads closed-.*"
#
import stat
import shutil
def _smart_copy(src, dest):
import subprocess
# smart_copy will choose the correct function depending on whether the
# source is a file or a directory.
mode = os.stat(src).st_mode
if stat.S_ISDIR(mode):
bb.utils.mkdirhier(dest)
cmd = "tar --exclude='.git' --exclude='__pycache__' --xattrs --xattrs-include='*' -cf - -C %s -p . \
| tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
else:
shutil.copyfile(src, dest)
shutil.copymode(src, dest)
class BuildSystem(object):
def __init__(self, context, d):
self.d = d
self.context = context
self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS').split()]
self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE') or "").split()
self.layers_exclude_pattern = d.getVar('SDK_LAYERS_EXCLUDE_PATTERN')
def copy_bitbake_and_layers(self, destdir, workspace_name=None):
import re
# Copy in all metadata layers + bitbake (as repositories)
copied_corebase = None
layers_copied = []
bb.utils.mkdirhier(destdir)
layers = list(self.layerdirs)
corebase = os.path.abspath(self.d.getVar('COREBASE'))
layers.append(corebase)
# The bitbake build system uses the meta-skeleton layer as a layout
# for common recipies, e.g: the recipetool script to create kernel recipies
# Add the meta-skeleton layer to be included as part of the eSDK installation
layers.append(os.path.join(corebase, 'meta-skeleton'))
# Exclude layers
for layer_exclude in self.layers_exclude:
if layer_exclude in layers:
bb.note('Excluded %s from sdk layers since it is in SDK_LAYERS_EXCLUDE' % layer_exclude)
layers.remove(layer_exclude)
if self.layers_exclude_pattern:
layers_cp = layers[:]
for pattern in self.layers_exclude_pattern.split():
for layer in layers_cp:
if re.match(pattern, layer):
bb.note('Excluded %s from sdk layers since matched SDK_LAYERS_EXCLUDE_PATTERN' % layer)
layers.remove(layer)
workspace_newname = workspace_name
if workspace_newname:
layernames = [os.path.basename(layer) for layer in layers]
extranum = 0
while workspace_newname in layernames:
extranum += 1
workspace_newname = '%s-%d' % (workspace_name, extranum)
corebase_files = self.d.getVar('COREBASE_FILES').split()
corebase_files = [corebase + '/' +x for x in corebase_files]
# Make sure bitbake goes in
bitbake_dir = bb.__file__.rsplit('/', 3)[0]
corebase_files.append(bitbake_dir)
for layer in layers:
layerconf = os.path.join(layer, 'conf', 'layer.conf')
layernewname = os.path.basename(layer)
workspace = False
if os.path.exists(layerconf):
with open(layerconf, 'r') as f:
if f.readline().startswith("# ### workspace layer auto-generated by devtool ###"):
if workspace_newname:
layernewname = workspace_newname
workspace = True
else:
bb.plain("NOTE: Excluding local workspace layer %s from %s" % (layer, self.context))
continue
# If the layer was already under corebase, leave it there
# since layers such as meta have issues when moved.
layerdestpath = destdir
if corebase == os.path.dirname(layer):
layerdestpath += '/' + os.path.basename(corebase)
# If the layer is located somewhere under the same parent directory
# as corebase we keep the layer structure.
elif os.path.commonpath([layer, corebase]) == os.path.dirname(corebase):
layer_relative = os.path.relpath(layer, os.path.dirname(corebase))
if os.path.dirname(layer_relative) != layernewname:
layerdestpath += '/' + os.path.dirname(layer_relative)
layerdestpath += '/' + layernewname
layer_relative = os.path.relpath(layerdestpath,
destdir)
# Treat corebase as special since it typically will contain
# build directories or other custom items.
if corebase == layer:
copied_corebase = layer_relative
bb.utils.mkdirhier(layerdestpath)
for f in corebase_files:
f_basename = os.path.basename(f)
destname = os.path.join(layerdestpath, f_basename)
_smart_copy(f, destname)
else:
layers_copied.append(layer_relative)
if os.path.exists(os.path.join(layerdestpath, 'conf/layer.conf')):
bb.note("Skipping layer %s, already handled" % layer)
else:
_smart_copy(layer, layerdestpath)
if workspace:
# Make some adjustments original workspace layer
# Drop sources (recipe tasks will be locked, so we don't need them)
srcdir = os.path.join(layerdestpath, 'sources')
if os.path.isdir(srcdir):
shutil.rmtree(srcdir)
# Drop all bbappends except the one for the image the SDK is being built for
# (because of externalsrc, the workspace bbappends will interfere with the
# locked signatures if present, and we don't need them anyway)
image_bbappend = os.path.splitext(os.path.basename(self.d.getVar('FILE')))[0] + '.bbappend'
appenddir = os.path.join(layerdestpath, 'appends')
if os.path.isdir(appenddir):
for fn in os.listdir(appenddir):
if fn == image_bbappend:
continue
else:
os.remove(os.path.join(appenddir, fn))
# Drop README
readme = os.path.join(layerdestpath, 'README')
if os.path.exists(readme):
os.remove(readme)
# Filter out comments in layer.conf and change layer name
layerconf = os.path.join(layerdestpath, 'conf', 'layer.conf')
with open(layerconf, 'r') as f:
origlines = f.readlines()
with open(layerconf, 'w') as f:
for line in origlines:
if line.startswith('#'):
continue
line = line.replace('workspacelayer', workspace_newname)
f.write(line)
# meta-skeleton layer is added as part of the build system
# but not as a layer included in the build, therefore it is
# not reported to the function caller.
for layer in layers_copied:
if layer.endswith('/meta-skeleton'):
layers_copied.remove(layer)
break
return copied_corebase, layers_copied
def generate_locked_sigs(sigfile, d):
bb.utils.mkdirhier(os.path.dirname(sigfile))
depd = d.getVar('BB_TASKDEPDATA', False)
tasks = ['%s:%s' % (v[2], v[1]) for v in depd.values()]
bb.parse.siggen.dump_lockedsigs(sigfile, tasks)
def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, onlynative, pruned_output):
with open(lockedsigs, 'r') as infile:
bb.utils.mkdirhier(os.path.dirname(pruned_output))
with open(pruned_output, 'w') as f:
invalue = False
for line in infile:
if invalue:
if line.endswith('\\\n'):
splitval = line.strip().split(':')
if not splitval[1] in excluded_tasks and not splitval[0] in excluded_targets:
if onlynative:
if 'nativesdk' in splitval[0]:
f.write(line)
else:
f.write(line)
else:
f.write(line)
invalue = False
elif line.startswith('SIGGEN_LOCKEDSIGS'):
invalue = True
f.write(line)
def merge_lockedsigs(copy_tasks, lockedsigs_main, lockedsigs_extra, merged_output, copy_output=None):
merged = {}
arch_order = []
with open(lockedsigs_main, 'r') as f:
invalue = None
for line in f:
if invalue:
if line.endswith('\\\n'):
merged[invalue].append(line)
else:
invalue = None
elif line.startswith('SIGGEN_LOCKEDSIGS_t-'):
invalue = line[18:].split('=', 1)[0].rstrip()
merged[invalue] = []
arch_order.append(invalue)
with open(lockedsigs_extra, 'r') as f:
invalue = None
tocopy = {}
for line in f:
if invalue:
if line.endswith('\\\n'):
if not line in merged[invalue]:
target, task = line.strip().split(':')[:2]
if not copy_tasks or task in copy_tasks:
tocopy[invalue].append(line)
merged[invalue].append(line)
else:
invalue = None
elif line.startswith('SIGGEN_LOCKEDSIGS_t-'):
invalue = line[18:].split('=', 1)[0].rstrip()
if not invalue in merged:
merged[invalue] = []
arch_order.append(invalue)
tocopy[invalue] = []
def write_sigs_file(fn, types, sigs):
fulltypes = []
bb.utils.mkdirhier(os.path.dirname(fn))
with open(fn, 'w') as f:
for typename in types:
lines = sigs[typename]
if lines:
f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % typename)
for line in lines:
f.write(line)
f.write(' "\n')
fulltypes.append(typename)
f.write('SIGGEN_LOCKEDSIGS_TYPES = "%s"\n' % ' '.join(fulltypes))
if copy_output:
write_sigs_file(copy_output, list(tocopy.keys()), tocopy)
if merged_output:
write_sigs_file(merged_output, arch_order, merged)
def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cache, d, fixedlsbstring="", filterfile=None):
import shutil
bb.note('Generating sstate-cache...')
nativelsbstring = d.getVar('NATIVELSBSTRING')
bb.process.run("PYTHONDONTWRITEBYTECODE=1 gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
if fixedlsbstring and nativelsbstring != fixedlsbstring:
nativedir = output_sstate_cache + '/' + nativelsbstring
if os.path.isdir(nativedir):
destdir = os.path.join(output_sstate_cache, fixedlsbstring)
for root, _, files in os.walk(nativedir):
for fn in files:
src = os.path.join(root, fn)
dest = os.path.join(destdir, os.path.relpath(src, nativedir))
if os.path.exists(dest):
# Already exists, and it'll be the same file, so just delete it
os.unlink(src)
else:
bb.utils.mkdirhier(os.path.dirname(dest))
shutil.move(src, dest)
def check_sstate_task_list(d, targets, filteroutfile, cmdprefix='', cwd=None, logfile=None):
import subprocess
bb.note('Generating sstate task list...')
if not cwd:
cwd = os.getcwd()
if logfile:
logparam = '-l %s' % logfile
else:
logparam = ''
cmd = "%sPYTHONDONTWRITEBYTECODE=1 BB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
env = dict(d.getVar('BB_ORIGENV', False))
env.pop('BUILDDIR', '')
env.pop('BBPATH', '')
pathitems = env['PATH'].split(':')
env['PATH'] = ':'.join([item for item in pathitems if not item.endswith('/bitbake/bin')])
bb.process.run(cmd, stderr=subprocess.STDOUT, env=env, cwd=cwd, executable='/bin/bash')

View File

@@ -0,0 +1,245 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
import collections
import re
import itertools
import functools
_Version = collections.namedtuple(
"_Version", ["release", "patch_l", "pre_l", "pre_v"]
)
@functools.total_ordering
class Version():
def __init__(self, version, suffix=None):
suffixes = ["alphabetical", "patch"]
if str(suffix) == "alphabetical":
version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(?P<patch_l>[a-z]))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
elif str(suffix) == "patch":
version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(p|patch)(?P<patch_l>[0-9]+))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
else:
version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
regex = re.compile(r"^\s*" + version_pattern + r"\s*$", re.VERBOSE | re.IGNORECASE)
match = regex.search(version)
if not match:
raise Exception("Invalid version: '{0}'".format(version))
self._version = _Version(
release=tuple(int(i) for i in match.group("release").replace("-",".").split(".")),
patch_l=match.group("patch_l") if str(suffix) in suffixes and match.group("patch_l") else "",
pre_l=match.group("pre_l"),
pre_v=match.group("pre_v")
)
self._key = _cmpkey(
self._version.release,
self._version.patch_l,
self._version.pre_l,
self._version.pre_v
)
def __eq__(self, other):
if not isinstance(other, Version):
return NotImplemented
return self._key == other._key
def __gt__(self, other):
if not isinstance(other, Version):
return NotImplemented
return self._key > other._key
def _cmpkey(release, patch_l, pre_l, pre_v):
# remove leading 0
_release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
_patch = patch_l.upper()
if pre_l is None and pre_v is None:
_pre = float('inf')
else:
_pre = float(pre_v) if pre_v else float('-inf')
return _release, _patch, _pre
def get_patched_cves(d):
"""
Get patches that solve CVEs using the "CVE: " tag.
"""
import re
import oe.patch
cve_match = re.compile(r"CVE:( CVE-\d{4}-\d+)+")
# Matches the last "CVE-YYYY-ID" in the file name, also if written
# in lowercase. Possible to have multiple CVE IDs in a single
# file name, but only the last one will be detected from the file name.
# However, patch files contents addressing multiple CVE IDs are supported
# (cve_match regular expression)
cve_file_name_match = re.compile(r".*(CVE-\d{4}-\d+)", re.IGNORECASE)
patched_cves = set()
patches = oe.patch.src_patches(d)
bb.debug(2, "Scanning %d patches for CVEs" % len(patches))
for url in patches:
patch_file = bb.fetch.decodeurl(url)[2]
# Check patch file name for CVE ID
fname_match = cve_file_name_match.search(patch_file)
if fname_match:
cve = fname_match.group(1).upper()
patched_cves.add(cve)
bb.debug(2, "Found %s from patch file name %s" % (cve, patch_file))
# Remote patches won't be present and compressed patches won't be
# unpacked, so say we're not scanning them
if not os.path.isfile(patch_file):
bb.note("%s is remote or compressed, not scanning content" % patch_file)
continue
with open(patch_file, "r", encoding="utf-8") as f:
try:
patch_text = f.read()
except UnicodeDecodeError:
bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
" trying with iso8859-1" % patch_file)
f.close()
with open(patch_file, "r", encoding="iso8859-1") as f:
patch_text = f.read()
# Search for one or more "CVE: " lines
text_match = False
for match in cve_match.finditer(patch_text):
# Get only the CVEs without the "CVE: " tag
cves = patch_text[match.start()+5:match.end()]
for cve in cves.split():
bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
patched_cves.add(cve)
text_match = True
if not fname_match and not text_match:
bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
# Search for additional patched CVEs
for cve in (d.getVarFlags("CVE_STATUS") or {}):
decoded_status, _, _ = decode_cve_status(d, cve)
if decoded_status == "Patched":
bb.debug(2, "CVE %s is additionally patched" % cve)
patched_cves.add(cve)
return patched_cves
def get_cpe_ids(cve_product, version):
"""
Get list of CPE identifiers for the given product and version
"""
version = version.split("+git")[0]
cpe_ids = []
for product in cve_product.split():
# CVE_PRODUCT in recipes may include vendor information for CPE identifiers. If not,
# use wildcard for vendor.
if ":" in product:
vendor, product = product.split(":", 1)
else:
vendor = "*"
cpe_id = 'cpe:2.3:*:{}:{}:{}:*:*:*:*:*:*:*'.format(vendor, product, version)
cpe_ids.append(cpe_id)
return cpe_ids
def cve_check_merge_jsons(output, data):
"""
Merge the data in the "package" property to the main data file
output
"""
if output["version"] != data["version"]:
bb.error("Version mismatch when merging JSON outputs")
return
for product in output["package"]:
if product["name"] == data["package"][0]["name"]:
bb.error("Error adding the same package %s twice" % product["name"])
return
output["package"].append(data["package"][0])
def update_symlinks(target_path, link_path):
"""
Update a symbolic link link_path to point to target_path.
Remove the link and recreate it if exist and is different.
"""
if link_path != target_path and os.path.exists(target_path):
if os.path.exists(os.path.realpath(link_path)):
os.remove(link_path)
os.symlink(os.path.basename(target_path), link_path)
def convert_cve_version(version):
"""
This function converts from CVE format to Yocto version format.
eg 8.3_p1 -> 8.3p1, 6.2_rc1 -> 6.2-rc1
Unless it is redefined using CVE_VERSION in the recipe,
cve_check uses the version in the name of the recipe (${PV})
to check vulnerabilities against a CVE in the database downloaded from NVD.
When the version has an update, i.e.
"p1" in OpenSSH 8.3p1,
"-rc1" in linux kernel 6.2-rc1,
the database stores the version as version_update (8.3_p1, 6.2_rc1).
Therefore, we must transform this version before comparing to the
recipe version.
In this case, the parameter of the function is 8.3_p1.
If the version uses the Release Candidate format, "rc",
this function replaces the '_' by '-'.
If the version uses the Update format, "p",
this function removes the '_' completely.
"""
import re
matches = re.match('^([0-9.]+)_((p|rc)[0-9]+)$', version)
if not matches:
return version
version = matches.group(1)
update = matches.group(2)
if matches.group(3) == "rc":
return version + '-' + update
return version + update
def decode_cve_status(d, cve):
"""
Convert CVE_STATUS into status, detail and description.
"""
status = d.getVarFlag("CVE_STATUS", cve)
if not status:
return ("", "", "")
status_split = status.split(':', 1)
detail = status_split[0]
description = status_split[1].strip() if (len(status_split) > 1) else ""
status_mapping = d.getVarFlag("CVE_CHECK_STATUSMAP", detail)
if status_mapping is None:
bb.warn('Invalid detail "%s" for CVE_STATUS[%s] = "%s", fallback to Unpatched' % (detail, cve, status))
status_mapping = "Unpatched"
return (status_mapping, detail, description)

View File

@@ -0,0 +1,53 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import json
import oe.maketype
def typed_value(key, d):
"""Construct a value for the specified metadata variable, using its flags
to determine the type and parameters for construction."""
var_type = d.getVarFlag(key, 'type')
flags = d.getVarFlags(key)
if flags is not None:
flags = dict((flag, d.expand(value))
for flag, value in list(flags.items()))
else:
flags = {}
try:
return oe.maketype.create(d.getVar(key) or '', var_type, **flags)
except (TypeError, ValueError) as exc:
bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
def export2json(d, json_file, expand=True, searchString="",replaceString=""):
data2export = {}
keys2export = []
for key in d.keys():
if key.startswith("_"):
continue
elif key.startswith("BB"):
continue
elif key.startswith("B_pn"):
continue
elif key.startswith("do_"):
continue
elif d.getVarFlag(key, "func"):
continue
keys2export.append(key)
for key in keys2export:
try:
data2export[key] = d.getVar(key, expand).replace(searchString,replaceString)
except bb.data_smart.ExpansionError:
data2export[key] = ''
except AttributeError:
pass
with open(json_file, "w") as f:
json.dump(data2export, f, skipkeys=True, indent=4, sort_keys=True)

View File

@@ -0,0 +1,314 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
def create_socket(url, d):
import urllib
from bb.utils import export_proxies
export_proxies(d)
return urllib.request.urlopen(url)
def get_links_from_url(url, d):
"Return all the href links found on the web location"
from bs4 import BeautifulSoup, SoupStrainer
soup = BeautifulSoup(create_socket(url,d), "html.parser", parse_only=SoupStrainer("a"))
hyperlinks = []
for line in soup.find_all('a', href=True):
hyperlinks.append(line['href'].strip('/'))
return hyperlinks
def find_latest_numeric_release(url, d):
"Find the latest listed numeric release on the given url"
max=0
maxstr=""
for link in get_links_from_url(url, d):
try:
# TODO use bb.utils.vercmp_string_op()
release = float(link)
except:
release = 0
if release > max:
max = release
maxstr = link
return maxstr
def is_src_rpm(name):
"Check if the link is pointing to a src.rpm file"
return name.endswith(".src.rpm")
def package_name_from_srpm(srpm):
"Strip out the package name from the src.rpm filename"
# ca-certificates-2016.2.7-1.0.fc24.src.rpm
# ^name ^ver ^release^removed
(name, version, release) = srpm.replace(".src.rpm", "").rsplit("-", 2)
return name
def get_source_package_list_from_url(url, section, d):
"Return a sectioned list of package names from a URL list"
bb.note("Reading %s: %s" % (url, section))
links = get_links_from_url(url, d)
srpms = filter(is_src_rpm, links)
names_list = map(package_name_from_srpm, srpms)
new_pkgs = set()
for pkgs in names_list:
new_pkgs.add(pkgs + ":" + section)
return new_pkgs
def get_source_package_list_from_url_by_letter(url, section, d):
import string
from urllib.error import HTTPError
packages = set()
for letter in (string.ascii_lowercase + string.digits):
# Not all subfolders may exist, so silently handle 404
try:
packages |= get_source_package_list_from_url(url + "/" + letter, section, d)
except HTTPError as e:
if e.code != 404: raise
return packages
def get_latest_released_fedora_source_package_list(d):
"Returns list of all the name os packages in the latest fedora distro"
latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/", d)
package_names = get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Everything/source/tree/Packages/" % latest, "main", d)
package_names |= get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d)
return latest, package_names
def get_latest_released_opensuse_source_package_list(d):
"Returns list of all the name os packages in the latest opensuse distro"
latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/leap", d)
package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/leap/%s/repo/oss/suse/src/" % latest, "main", d)
package_names |= get_source_package_list_from_url("http://download.opensuse.org/update/leap/%s/oss/src/" % latest, "updates", d)
return latest, package_names
def get_latest_released_clear_source_package_list(d):
latest = find_latest_numeric_release("https://download.clearlinux.org/releases/", d)
package_names = get_source_package_list_from_url("https://download.clearlinux.org/releases/%s/clear/source/SRPMS/" % latest, "main", d)
return latest, package_names
def find_latest_debian_release(url, d):
"Find the latest listed debian release on the given url"
releases = [link.replace("Debian", "")
for link in get_links_from_url(url, d)
if link.startswith("Debian")]
releases.sort()
try:
return releases[-1]
except:
return "_NotFound_"
def get_debian_style_source_package_list(url, section, d):
"Return the list of package-names stored in the debian style Sources.gz file"
import gzip
package_names = set()
for line in gzip.open(create_socket(url, d), mode="rt"):
if line.startswith("Package:"):
pkg = line.split(":", 1)[1].strip()
package_names.add(pkg + ":" + section)
return package_names
def get_latest_released_debian_source_package_list(d):
"Returns list of all the name of packages in the latest debian distro"
latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/", d)
url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
package_names = get_debian_style_source_package_list(url, "main", d)
url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
package_names |= get_debian_style_source_package_list(url, "updates", d)
return latest, package_names
def find_latest_ubuntu_release(url, d):
"""
Find the latest listed Ubuntu release on the given ubuntu/dists/ URL.
To avoid matching development releases look for distributions that have
updates, so the resulting distro could be any supported release.
"""
url += "?C=M;O=D" # Descending Sort by Last Modified
for link in get_links_from_url(url, d):
if "-updates" in link:
distro = link.replace("-updates", "")
return distro
return "_NotFound_"
def get_latest_released_ubuntu_source_package_list(d):
"Returns list of all the name os packages in the latest ubuntu distro"
latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/", d)
url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
package_names = get_debian_style_source_package_list(url, "main", d)
url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
package_names |= get_debian_style_source_package_list(url, "updates", d)
return latest, package_names
def create_distro_packages_list(distro_check_dir, d):
import shutil
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
bb.utils.remove(pkglst_dir, True)
bb.utils.mkdirhier(pkglst_dir)
per_distro_functions = (
("Debian", get_latest_released_debian_source_package_list),
("Ubuntu", get_latest_released_ubuntu_source_package_list),
("Fedora", get_latest_released_fedora_source_package_list),
("openSUSE", get_latest_released_opensuse_source_package_list),
("Clear", get_latest_released_clear_source_package_list),
)
for name, fetcher_func in per_distro_functions:
try:
release, package_list = fetcher_func(d)
except Exception as e:
bb.warn("Cannot fetch packages for %s: %s" % (name, e))
bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
if len(package_list) == 0:
bb.error("Didn't fetch any packages for %s %s" % (name, release))
package_list_file = os.path.join(pkglst_dir, name + "-" + release)
with open(package_list_file, 'w') as f:
for pkg in sorted(package_list):
f.write(pkg + "\n")
def update_distro_data(distro_check_dir, datetime, d):
"""
If distro packages list data is old then rebuild it.
The operations has to be protected by a lock so that
only one thread performes it at a time.
"""
if not os.path.isdir (distro_check_dir):
try:
bb.note ("Making new directory: %s" % distro_check_dir)
os.makedirs (distro_check_dir)
except OSError:
raise Exception('Unable to create directory %s' % (distro_check_dir))
datetime_file = os.path.join(distro_check_dir, "build_datetime")
saved_datetime = "_invalid_"
import fcntl
try:
if not os.path.exists(datetime_file):
open(datetime_file, 'w+').close() # touch the file so that the next open won't fail
f = open(datetime_file, "r+")
fcntl.lockf(f, fcntl.LOCK_EX)
saved_datetime = f.read()
if saved_datetime[0:8] != datetime[0:8]:
bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime))
bb.note("Regenerating distro package lists")
create_distro_packages_list(distro_check_dir, d)
f.seek(0)
f.write(datetime)
except OSError as e:
raise Exception('Unable to open timestamp: %s' % e)
finally:
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
def compare_in_distro_packages_list(distro_check_dir, d):
if not os.path.isdir(distro_check_dir):
raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
localdata = bb.data.createCopy(d)
pkglst_dir = os.path.join(distro_check_dir, "package_lists")
matching_distros = []
pn = recipe_name = d.getVar('PN')
bb.note("Checking: %s" % pn)
if pn.find("-native") != -1:
pnstripped = pn.split("-native")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[0]
if pn.startswith("nativesdk-"):
pnstripped = pn.split("nativesdk-")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[1]
if pn.find("-cross") != -1:
pnstripped = pn.split("-cross")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[0]
if pn.find("-initial") != -1:
pnstripped = pn.split("-initial")
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
recipe_name = pnstripped[0]
bb.note("Recipe: %s" % recipe_name)
distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
tmp = localdata.getVar('DISTRO_PN_ALIAS') or ""
for str in tmp.split():
if str and str.find("=") == -1 and distro_exceptions[str]:
matching_distros.append(str)
distro_pn_aliases = {}
for str in tmp.split():
if "=" in str:
(dist, pn_alias) = str.split('=')
distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
for file in os.listdir(pkglst_dir):
(distro, distro_release) = file.split("-")
f = open(os.path.join(pkglst_dir, file), "r")
for line in f:
(pkg, section) = line.split(":")
if distro.lower() in distro_pn_aliases:
pn = distro_pn_aliases[distro.lower()]
else:
pn = recipe_name
if pn == pkg:
matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end
f.close()
break
f.close()
for item in tmp.split():
matching_distros.append(item)
bb.note("Matching: %s" % matching_distros)
return matching_distros
def create_log_file(d, logname):
logpath = d.getVar('LOG_DIR')
bb.utils.mkdirhier(logpath)
logfn, logsuffix = os.path.splitext(logname)
logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME'), logsuffix))
if not os.path.exists(logfile):
slogfile = os.path.join(logpath, logname)
if os.path.exists(slogfile):
os.remove(slogfile)
open(logfile, 'w+').close()
os.symlink(logfile, slogfile)
d.setVar('LOG_FILE', logfile)
return logfile
def save_distro_check_result(result, datetime, result_file, d):
pn = d.getVar('PN')
logdir = d.getVar('LOG_DIR')
if not logdir:
bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
return
bb.utils.mkdirhier(logdir)
line = pn
for i in result:
line = line + "," + i
f = open(result_file, "a")
import fcntl
fcntl.lockf(f, fcntl.LOCK_EX)
f.seek(0, os.SEEK_END) # seek to the end of file
f.write(line + "\n")
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()

View File

@@ -0,0 +1,145 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
def machine_dict(d):
# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
machdata = {
"darwin9" : {
"arm" : (40, 0, 0, True, 32),
},
"eabi" : {
"arm" : (40, 0, 0, True, 32),
},
"elf" : {
"aarch64" : (183, 0, 0, True, 64),
"aarch64_be" :(183, 0, 0, False, 64),
"i586" : (3, 0, 0, True, 32),
"i686" : (3, 0, 0, True, 32),
"x86_64": (62, 0, 0, True, 64),
"epiphany": (4643, 0, 0, True, 32),
"lm32": (138, 0, 0, False, 32),
"loongarch64":(258, 0, 0, True, 64),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
"microblaze": (189, 0, 0, False, 32),
"microblazeel":(189, 0, 0, True, 32),
"powerpc": (20, 0, 0, False, 32),
"riscv32": (243, 0, 0, True, 32),
"riscv64": (243, 0, 0, True, 64),
},
"linux" : {
"aarch64" : (183, 0, 0, True, 64),
"aarch64_be" :(183, 0, 0, False, 64),
"arm" : (40, 97, 0, True, 32),
"armeb": (40, 97, 0, False, 32),
"powerpc": (20, 0, 0, False, 32),
"powerpc64": (21, 0, 0, False, 64),
"powerpc64le": (21, 0, 0, True, 64),
"i386": ( 3, 0, 0, True, 32),
"i486": ( 3, 0, 0, True, 32),
"i586": ( 3, 0, 0, True, 32),
"i686": ( 3, 0, 0, True, 32),
"x86_64": (62, 0, 0, True, 64),
"ia64": (50, 0, 0, True, 64),
"alpha": (36902, 0, 0, True, 64),
"hppa": (15, 3, 0, False, 32),
"loongarch64":(258, 0, 0, True, 64),
"m68k": ( 4, 0, 0, False, 32),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
"mips64": ( 8, 0, 0, False, 64),
"mips64el": ( 8, 0, 0, True, 64),
"mipsisa32r6": ( 8, 0, 0, False, 32),
"mipsisa32r6el": ( 8, 0, 0, True, 32),
"mipsisa64r6": ( 8, 0, 0, False, 64),
"mipsisa64r6el": ( 8, 0, 0, True, 64),
"nios2": (113, 0, 0, True, 32),
"riscv32": (243, 0, 0, True, 32),
"riscv64": (243, 0, 0, True, 64),
"s390": (22, 0, 0, False, 32),
"sh4": (42, 0, 0, True, 32),
"sparc": ( 2, 0, 0, False, 32),
"microblaze": (189, 0, 0, False, 32),
"microblazeel":(189, 0, 0, True, 32),
},
"linux-android" : {
"aarch64" : (183, 0, 0, True, 64),
"i686": ( 3, 0, 0, True, 32),
"x86_64": (62, 0, 0, True, 64),
},
"linux-androideabi" : {
"arm" : (40, 97, 0, True, 32),
},
"linux-musl" : {
"aarch64" : (183, 0, 0, True, 64),
"aarch64_be" :(183, 0, 0, False, 64),
"arm" : ( 40, 97, 0, True, 32),
"armeb": ( 40, 97, 0, False, 32),
"powerpc": ( 20, 0, 0, False, 32),
"powerpc64": ( 21, 0, 0, False, 64),
"powerpc64le": (21, 0, 0, True, 64),
"i386": ( 3, 0, 0, True, 32),
"i486": ( 3, 0, 0, True, 32),
"i586": ( 3, 0, 0, True, 32),
"i686": ( 3, 0, 0, True, 32),
"x86_64": ( 62, 0, 0, True, 64),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
"mips64": ( 8, 0, 0, False, 64),
"mips64el": ( 8, 0, 0, True, 64),
"microblaze": (189, 0, 0, False, 32),
"microblazeel":(189, 0, 0, True, 32),
"riscv32": (243, 0, 0, True, 32),
"riscv64": (243, 0, 0, True, 64),
"sh4": ( 42, 0, 0, True, 32),
},
"uclinux-uclibc" : {
"bfin": ( 106, 0, 0, True, 32),
},
"linux-gnueabi" : {
"arm" : (40, 0, 0, True, 32),
"armeb" : (40, 0, 0, False, 32),
},
"linux-musleabi" : {
"arm" : (40, 0, 0, True, 32),
"armeb" : (40, 0, 0, False, 32),
},
"linux-gnuspe" : {
"powerpc": (20, 0, 0, False, 32),
},
"linux-muslspe" : {
"powerpc": (20, 0, 0, False, 32),
},
"linux-gnu" : {
"powerpc": (20, 0, 0, False, 32),
"sh4": (42, 0, 0, True, 32),
},
"linux-gnu_ilp32" : {
"aarch64" : (183, 0, 0, True, 32),
},
"linux-gnux32" : {
"x86_64": (62, 0, 0, True, 32),
},
"linux-muslx32" : {
"x86_64": (62, 0, 0, True, 32),
},
"linux-gnun32" : {
"mips64": ( 8, 0, 0, False, 32),
"mips64el": ( 8, 0, 0, True, 32),
"mipsisa64r6": ( 8, 0, 0, False, 32),
"mipsisa64r6el":( 8, 0, 0, True, 32),
},
}
# Add in any extra user supplied data which may come from a BSP layer, removing the
# need to always change this class directly
extra_machdata = (d and d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS" or None) or "").split()
for m in extra_machdata:
call = m + "(machdata, d)"
locs = { "machdata" : machdata, "d" : d}
machdata = bb.utils.better_eval(call, locs)
return machdata

View File

@@ -0,0 +1,34 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
import re
def map_arch(a):
if re.match('i.86', a):
return '386'
elif a == 'x86_64':
return 'amd64'
elif re.match('arm.*', a):
return 'arm'
elif re.match('aarch64.*', a):
return 'arm64'
elif re.match('mips64el.*', a):
return 'mips64le'
elif re.match('mips64.*', a):
return 'mips64'
elif a == 'mips':
return 'mips'
elif a == 'mipsel':
return 'mipsle'
elif re.match('p(pc|owerpc)(64le)', a):
return 'ppc64le'
elif re.match('p(pc|owerpc)(64)', a):
return 'ppc64'
elif a == 'riscv64':
return 'riscv64'
elif a == 'loongarch64':
return 'loong64'
return ''

View File

@@ -0,0 +1,160 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Helper module for GPG signing"""
import bb
import os
import shlex
import subprocess
import tempfile
class LocalSigner(object):
"""Class for handling local (on the build host) signing"""
def __init__(self, d):
self.gpg_bin = d.getVar('GPG_BIN') or \
bb.utils.which(os.getenv('PATH'), 'gpg')
self.gpg_cmd = [self.gpg_bin]
self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent")
# Without this we see "Cannot allocate memory" errors when running processes in parallel
# It needs to be set for any gpg command since any agent launched can stick around in memory
# and this parameter must be set.
if self.gpg_agent_bin:
self.gpg_cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)]
self.gpg_path = d.getVar('GPG_PATH')
self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign")
self.gpg_version = self.get_gpg_version()
def export_pubkey(self, output_file, keyid, armor=True):
"""Export GPG public key to a file"""
cmd = self.gpg_cmd + ["--no-permission-warning", "--batch", "--yes", "--export", "-o", output_file]
if self.gpg_path:
cmd += ["--homedir", self.gpg_path]
if armor:
cmd += ["--armor"]
cmd += [keyid]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def sign_rpms(self, files, keyid, passphrase, digest, sign_chunk, fsk=None, fsk_password=None):
"""Sign RPM files"""
cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid
gpg_args = '--no-permission-warning --batch --passphrase=%s --agent-program=%s|--auto-expand-secmem' % (passphrase, self.gpg_agent_bin)
if self.gpg_version > (2,1,):
gpg_args += ' --pinentry-mode=loopback'
cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args
cmd += "--define '_binary_filedigest_algorithm %s' " % digest
if self.gpg_bin:
cmd += "--define '__gpg %s' " % self.gpg_bin
if self.gpg_path:
cmd += "--define '_gpg_path %s' " % self.gpg_path
if fsk:
cmd += "--signfiles --fskpath %s " % fsk
if fsk_password:
cmd += "--define '_file_signing_key_password %s' " % fsk_password
# Sign in chunks
for i in range(0, len(files), sign_chunk):
subprocess.check_output(shlex.split(cmd + ' '.join(files[i:i+sign_chunk])), stderr=subprocess.STDOUT)
def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True, output_suffix=None, use_sha256=False):
"""Create a detached signature of a file"""
if passphrase_file and passphrase:
raise Exception("You should use either passphrase_file of passphrase, not both")
cmd = self.gpg_cmd + ['--detach-sign', '--no-permission-warning', '--batch',
'--no-tty', '--yes', '--passphrase-fd', '0', '-u', keyid]
if self.gpg_path:
cmd += ['--homedir', self.gpg_path]
if armor:
cmd += ['--armor']
if use_sha256:
cmd += ['--digest-algo', "SHA256"]
#gpg > 2.1 supports password pipes only through the loopback interface
#gpg < 2.1 errors out if given unknown parameters
if self.gpg_version > (2,1,):
cmd += ['--pinentry-mode', 'loopback']
try:
if passphrase_file:
with open(passphrase_file) as fobj:
passphrase = fobj.readline();
if not output_suffix:
output_suffix = 'asc' if armor else 'sig'
output_file = input_file + "." + output_suffix
with tempfile.TemporaryDirectory(dir=os.path.dirname(output_file)) as tmp_dir:
tmp_file = os.path.join(tmp_dir, os.path.basename(output_file))
cmd += ['-o', tmp_file]
cmd += [input_file]
job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
(_, stderr) = job.communicate(passphrase.encode("utf-8"))
if job.returncode:
bb.fatal("GPG exited with code %d: %s" % (job.returncode, stderr.decode("utf-8")))
os.rename(tmp_file, output_file)
except IOError as e:
bb.error("IO error (%s): %s" % (e.errno, e.strerror))
raise Exception("Failed to sign '%s'" % input_file)
except OSError as e:
bb.error("OS error (%s): %s" % (e.errno, e.strerror))
raise Exception("Failed to sign '%s" % input_file)
def get_gpg_version(self):
"""Return the gpg version as a tuple of ints"""
try:
cmd = self.gpg_cmd + ["--version", "--no-permission-warning"]
ver_str = subprocess.check_output(cmd).split()[2].decode("utf-8")
return tuple([int(i) for i in ver_str.split("-")[0].split('.')])
except subprocess.CalledProcessError as e:
bb.fatal("Could not get gpg version: %s" % e)
def verify(self, sig_file, valid_sigs = ''):
"""Verify signature"""
cmd = self.gpg_cmd + ["--verify", "--no-permission-warning", "--status-fd", "1"]
if self.gpg_path:
cmd += ["--homedir", self.gpg_path]
cmd += [sig_file]
status = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Valid if any key matches if unspecified
if not valid_sigs:
ret = False if status.returncode else True
return ret
import re
goodsigs = []
sigre = re.compile(r'^\[GNUPG:\] GOODSIG (\S+)\s(.*)$')
for l in status.stdout.decode("utf-8").splitlines():
s = sigre.match(l)
if s:
goodsigs += [s.group(1)]
for sig in valid_sigs.split():
if sig in goodsigs:
return True
if len(goodsigs):
bb.warn('No accepted signatures found. Good signatures found: %s.' % ' '.join(goodsigs))
return False
def get_signer(d, backend):
"""Get signer object for the specified backend"""
# Use local signing by default
if backend == 'local':
return LocalSigner(d)
else:
bb.fatal("Unsupported signing backend '%s'" % backend)

View File

@@ -0,0 +1,261 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Code for parsing OpenEmbedded license strings"""
import ast
import re
from fnmatch import fnmatchcase as fnmatch
def license_ok(license, dont_want_licenses):
""" Return False if License exist in dont_want_licenses else True """
for dwl in dont_want_licenses:
if fnmatch(license, dwl):
return False
return True
def obsolete_license_list():
return ["AGPL-3", "AGPL-3+", "AGPLv3", "AGPLv3+", "AGPLv3.0", "AGPLv3.0+", "AGPL-3.0", "AGPL-3.0+", "BSD-0-Clause",
"GPL-1", "GPL-1+", "GPLv1", "GPLv1+", "GPLv1.0", "GPLv1.0+", "GPL-1.0", "GPL-1.0+", "GPL-2", "GPL-2+", "GPLv2",
"GPLv2+", "GPLv2.0", "GPLv2.0+", "GPL-2.0", "GPL-2.0+", "GPL-3", "GPL-3+", "GPLv3", "GPLv3+", "GPLv3.0", "GPLv3.0+",
"GPL-3.0", "GPL-3.0+", "LGPLv2", "LGPLv2+", "LGPLv2.0", "LGPLv2.0+", "LGPL-2.0", "LGPL-2.0+", "LGPL2.1", "LGPL2.1+",
"LGPLv2.1", "LGPLv2.1+", "LGPL-2.1", "LGPL-2.1+", "LGPLv3", "LGPLv3+", "LGPL-3.0", "LGPL-3.0+", "MPL-1", "MPLv1",
"MPLv1.1", "MPLv2", "MIT-X", "MIT-style", "openssl", "PSF", "PSFv2", "Python-2", "Apachev2", "Apache-2", "Artisticv1",
"Artistic-1", "AFL-2", "AFL-1", "AFLv2", "AFLv1", "CDDLv1", "CDDL-1", "EPLv1.0", "FreeType", "Nauman",
"tcl", "vim", "SGIv1"]
class LicenseError(Exception):
pass
class LicenseSyntaxError(LicenseError):
def __init__(self, licensestr, exc):
self.licensestr = licensestr
self.exc = exc
LicenseError.__init__(self)
def __str__(self):
return "error in '%s': %s" % (self.licensestr, self.exc)
class InvalidLicense(LicenseError):
def __init__(self, license):
self.license = license
LicenseError.__init__(self)
def __str__(self):
return "invalid characters in license '%s'" % self.license
license_operator_chars = '&|() '
license_operator = re.compile(r'([' + license_operator_chars + '])')
license_pattern = re.compile(r'[a-zA-Z0-9.+_\-]+$')
class LicenseVisitor(ast.NodeVisitor):
"""Get elements based on OpenEmbedded license strings"""
def get_elements(self, licensestr):
new_elements = []
elements = list([x for x in license_operator.split(licensestr) if x.strip()])
for pos, element in enumerate(elements):
if license_pattern.match(element):
if pos > 0 and license_pattern.match(elements[pos-1]):
new_elements.append('&')
element = '"' + element + '"'
elif not license_operator.match(element):
raise InvalidLicense(element)
new_elements.append(element)
return new_elements
"""Syntax tree visitor which can accept elements previously generated with
OpenEmbedded license string"""
def visit_elements(self, elements):
self.visit(ast.parse(' '.join(elements)))
"""Syntax tree visitor which can accept OpenEmbedded license strings"""
def visit_string(self, licensestr):
self.visit_elements(self.get_elements(licensestr))
class FlattenVisitor(LicenseVisitor):
"""Flatten a license tree (parsed from a string) by selecting one of each
set of OR options, in the way the user specifies"""
def __init__(self, choose_licenses):
self.choose_licenses = choose_licenses
self.licenses = []
LicenseVisitor.__init__(self)
def visit_Str(self, node):
self.licenses.append(node.s)
def visit_Constant(self, node):
self.licenses.append(node.value)
def visit_BinOp(self, node):
if isinstance(node.op, ast.BitOr):
left = FlattenVisitor(self.choose_licenses)
left.visit(node.left)
right = FlattenVisitor(self.choose_licenses)
right.visit(node.right)
selected = self.choose_licenses(left.licenses, right.licenses)
self.licenses.extend(selected)
else:
self.generic_visit(node)
def flattened_licenses(licensestr, choose_licenses):
"""Given a license string and choose_licenses function, return a flat list of licenses"""
flatten = FlattenVisitor(choose_licenses)
try:
flatten.visit_string(licensestr)
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
return flatten.licenses
def is_included(licensestr, include_licenses=None, exclude_licenses=None):
"""Given a license string, a list of licenses to include and a list of
licenses to exclude, determine if the license string matches the include
list and does not match the exclude list.
Returns a tuple holding the boolean state and a list of the applicable
licenses that were excluded if state is False, or the licenses that were
included if the state is True."""
def include_license(license):
return any(fnmatch(license, pattern) for pattern in include_licenses)
def exclude_license(license):
return any(fnmatch(license, pattern) for pattern in exclude_licenses)
def choose_licenses(alpha, beta):
"""Select the option in an OR which is the 'best' (has the most
included licenses and no excluded licenses)."""
# The factor 1000 below is arbitrary, just expected to be much larger
# than the number of licenses actually specified. That way the weight
# will be negative if the list of licenses contains an excluded license,
# but still gives a higher weight to the list with the most included
# licenses.
alpha_weight = (len(list(filter(include_license, alpha))) -
1000 * (len(list(filter(exclude_license, alpha))) > 0))
beta_weight = (len(list(filter(include_license, beta))) -
1000 * (len(list(filter(exclude_license, beta))) > 0))
if alpha_weight >= beta_weight:
return alpha
else:
return beta
if not include_licenses:
include_licenses = ['*']
if not exclude_licenses:
exclude_licenses = []
licenses = flattened_licenses(licensestr, choose_licenses)
excluded = [lic for lic in licenses if exclude_license(lic)]
included = [lic for lic in licenses if include_license(lic)]
if excluded:
return False, excluded
else:
return True, included
class ManifestVisitor(LicenseVisitor):
"""Walk license tree (parsed from a string) removing the incompatible
licenses specified"""
def __init__(self, dont_want_licenses, canonical_license, d):
self._dont_want_licenses = dont_want_licenses
self._canonical_license = canonical_license
self._d = d
self._operators = []
self.licenses = []
self.licensestr = ''
LicenseVisitor.__init__(self)
def visit(self, node):
if isinstance(node, ast.Str):
lic = node.s
if license_ok(self._canonical_license(self._d, lic),
self._dont_want_licenses) == True:
if self._operators:
ops = []
for op in self._operators:
if op == '[':
ops.append(op)
elif op == ']':
ops.append(op)
else:
if not ops:
ops.append(op)
elif ops[-1] in ['[', ']']:
ops.append(op)
else:
ops[-1] = op
for op in ops:
if op == '[' or op == ']':
self.licensestr += op
elif self.licenses:
self.licensestr += ' ' + op + ' '
self._operators = []
self.licensestr += lic
self.licenses.append(lic)
elif isinstance(node, ast.BitAnd):
self._operators.append("&")
elif isinstance(node, ast.BitOr):
self._operators.append("|")
elif isinstance(node, ast.List):
self._operators.append("[")
elif isinstance(node, ast.Load):
self.licensestr += "]"
self.generic_visit(node)
def manifest_licenses(licensestr, dont_want_licenses, canonical_license, d):
"""Given a license string and dont_want_licenses list,
return license string filtered and a list of licenses"""
manifest = ManifestVisitor(dont_want_licenses, canonical_license, d)
try:
elements = manifest.get_elements(licensestr)
# Replace '()' to '[]' for handle in ast as List and Load types.
elements = ['[' if e == '(' else e for e in elements]
elements = [']' if e == ')' else e for e in elements]
manifest.visit_elements(elements)
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
# Replace '[]' to '()' for output correct license.
manifest.licensestr = manifest.licensestr.replace('[', '(').replace(']', ')')
return (manifest.licensestr, manifest.licenses)
class ListVisitor(LicenseVisitor):
"""Record all different licenses found in the license string"""
def __init__(self):
self.licenses = set()
def visit_Str(self, node):
self.licenses.add(node.s)
def visit_Constant(self, node):
self.licenses.add(node.value)
def list_licenses(licensestr):
"""Simply get a list of all licenses mentioned in a license string.
Binary operators are not applied or taken into account in any way"""
visitor = ListVisitor()
try:
visitor.visit_string(licensestr)
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
return visitor.licenses
def apply_pkg_license_exception(pkg, bad_licenses, exceptions):
"""Return remaining bad licenses after removing any package exceptions"""
return [lic for lic in bad_licenses if pkg + ':' + lic not in exceptions]

View File

@@ -0,0 +1,123 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
def get_os_release():
"""Get all key-value pairs from /etc/os-release as a dict"""
from collections import OrderedDict
data = OrderedDict()
if os.path.exists('/etc/os-release'):
with open('/etc/os-release') as f:
for line in f:
try:
key, val = line.rstrip().split('=', 1)
except ValueError:
continue
data[key.strip()] = val.strip('"')
return data
def release_dict_osr():
""" Populate a dict with pertinent values from /etc/os-release """
data = {}
os_release = get_os_release()
if 'ID' in os_release:
data['DISTRIB_ID'] = os_release['ID']
if 'VERSION_ID' in os_release:
data['DISTRIB_RELEASE'] = os_release['VERSION_ID']
return data
def release_dict_lsb():
""" Return the output of lsb_release -ir as a dictionary """
from subprocess import PIPE
try:
output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE)
except bb.process.CmdError as exc:
return {}
lsb_map = { 'Distributor ID': 'DISTRIB_ID',
'Release': 'DISTRIB_RELEASE'}
lsb_keys = lsb_map.keys()
data = {}
for line in output.splitlines():
if line.startswith("-e"):
line = line[3:]
try:
key, value = line.split(":\t", 1)
except ValueError:
continue
if key in lsb_keys:
data[lsb_map[key]] = value
if len(data.keys()) != 2:
return None
return data
def release_dict_file():
""" Try to gather release information manually when other methods fail """
data = {}
try:
if os.path.exists('/etc/lsb-release'):
data = {}
with open('/etc/lsb-release') as f:
for line in f:
key, value = line.split("=", 1)
data[key] = value.strip()
elif os.path.exists('/etc/redhat-release'):
data = {}
with open('/etc/redhat-release') as f:
distro = f.readline().strip()
import re
match = re.match(r'(.*) release (.*) \((.*)\)', distro)
if match:
data['DISTRIB_ID'] = match.group(1)
data['DISTRIB_RELEASE'] = match.group(2)
elif os.path.exists('/etc/SuSE-release'):
data = {}
data['DISTRIB_ID'] = 'SUSE LINUX'
with open('/etc/SuSE-release') as f:
for line in f:
if line.startswith('VERSION = '):
data['DISTRIB_RELEASE'] = line[10:].rstrip()
break
except IOError:
return {}
return data
def distro_identifier(adjust_hook=None):
"""Return a distro identifier string based upon lsb_release -ri,
with optional adjustment via a hook"""
import re
# Try /etc/os-release first, then the output of `lsb_release -ir` and
# finally fall back on parsing various release files in order to determine
# host distro name and version.
distro_data = release_dict_osr()
if not distro_data:
distro_data = release_dict_lsb()
if not distro_data:
distro_data = release_dict_file()
distro_id = distro_data.get('DISTRIB_ID', '')
release = distro_data.get('DISTRIB_RELEASE', '')
if adjust_hook:
distro_id, release = adjust_hook(distro_id, release)
if not distro_id:
return "unknown"
# Filter out any non-alphanumerics and convert to lowercase
distro_id = re.sub(r'\W', '', distro_id).lower()
if release:
id_str = '{0}-{1}'.format(distro_id, release)
else:
id_str = distro_id
return id_str.replace(' ','-').replace('/','-')

View File

@@ -0,0 +1,107 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""OpenEmbedded variable typing support
Types are defined in the metadata by name, using the 'type' flag on a
variable. Other flags may be utilized in the construction of the types. See
the arguments of the type's factory for details.
"""
import inspect
import oe.types as types
from collections.abc import Callable
available_types = {}
class MissingFlag(TypeError):
"""A particular flag is required to construct the type, but has not been
provided."""
def __init__(self, flag, type):
self.flag = flag
self.type = type
TypeError.__init__(self)
def __str__(self):
return "Type '%s' requires flag '%s'" % (self.type, self.flag)
def factory(var_type):
"""Return the factory for a specified type."""
if var_type is None:
raise TypeError("No type specified. Valid types: %s" %
', '.join(available_types))
try:
return available_types[var_type]
except KeyError:
raise TypeError("Invalid type '%s':\n Valid types: %s" %
(var_type, ', '.join(available_types)))
def create(value, var_type, **flags):
"""Create an object of the specified type, given the specified flags and
string value."""
obj = factory(var_type)
objflags = {}
for flag in obj.flags:
if flag not in flags:
if flag not in obj.optflags:
raise MissingFlag(flag, var_type)
else:
objflags[flag] = flags[flag]
return obj(value, **objflags)
def get_callable_args(obj):
"""Grab all but the first argument of the specified callable, returning
the list, as well as a list of which of the arguments have default
values."""
if type(obj) is type:
obj = obj.__init__
sig = inspect.signature(obj)
args = list(sig.parameters.keys())
defaults = list(s for s in sig.parameters.keys() if sig.parameters[s].default != inspect.Parameter.empty)
flaglist = []
if args:
if len(args) > 1 and args[0] == 'self':
args = args[1:]
flaglist.extend(args)
optional = set()
if defaults:
optional |= set(flaglist[-len(defaults):])
return flaglist, optional
def factory_setup(name, obj):
"""Prepare a factory for use."""
args, optional = get_callable_args(obj)
extra_args = args[1:]
if extra_args:
obj.flags, optional = extra_args, optional
obj.optflags = set(optional)
else:
obj.flags = obj.optflags = ()
if not hasattr(obj, 'name'):
obj.name = name
def register(name, factory):
"""Register a type, given its name and a factory callable.
Determines the required and optional flags from the factory's
arguments."""
factory_setup(name, factory)
available_types[factory.name] = factory
# Register all our included types
for name in dir(types):
if name.startswith('_'):
continue
obj = getattr(types, name)
if not isinstance(obj, Callable):
continue
register(name, obj)

View File

@@ -0,0 +1,206 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from abc import ABCMeta, abstractmethod
import os
import re
import bb
class Manifest(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
"""
PKG_TYPE_MUST_INSTALL = "mip"
PKG_TYPE_MULTILIB = "mlp"
PKG_TYPE_LANGUAGE = "lgp"
PKG_TYPE_ATTEMPT_ONLY = "aop"
MANIFEST_TYPE_IMAGE = "image"
MANIFEST_TYPE_SDK_HOST = "sdk_host"
MANIFEST_TYPE_SDK_TARGET = "sdk_target"
var_maps = {
MANIFEST_TYPE_IMAGE: {
"PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL,
"PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY,
"LINGUAS_INSTALL": PKG_TYPE_LANGUAGE
},
MANIFEST_TYPE_SDK_HOST: {
"TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL,
"TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
},
MANIFEST_TYPE_SDK_TARGET: {
"TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL,
"TOOLCHAIN_TARGET_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
}
}
INSTALL_ORDER = [
PKG_TYPE_LANGUAGE,
PKG_TYPE_MUST_INSTALL,
PKG_TYPE_ATTEMPT_ONLY,
PKG_TYPE_MULTILIB
]
initial_manifest_file_header = \
"# This file was generated automatically and contains the packages\n" \
"# passed on to the package manager in order to create the rootfs.\n\n" \
"# Format:\n" \
"# <package_type>,<package_name>\n" \
"# where:\n" \
"# <package_type> can be:\n" \
"# 'mip' = must install package\n" \
"# 'aop' = attempt only package\n" \
"# 'mlp' = multilib package\n" \
"# 'lgp' = language package\n\n"
def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE):
self.d = d
self.manifest_type = manifest_type
if manifest_dir is None:
if manifest_type != self.MANIFEST_TYPE_IMAGE:
self.manifest_dir = self.d.getVar('SDK_DIR')
else:
self.manifest_dir = self.d.getVar('WORKDIR')
else:
self.manifest_dir = manifest_dir
bb.utils.mkdirhier(self.manifest_dir)
self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type)
self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type)
self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type)
# packages in the following vars will be split in 'must install' and
# 'multilib'
self.vars_to_split = ["PACKAGE_INSTALL",
"TOOLCHAIN_HOST_TASK",
"TOOLCHAIN_TARGET_TASK"]
"""
This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk).
This will be used for testing until the class is implemented properly!
"""
def _create_dummy_initial(self):
image_rootfs = self.d.getVar('IMAGE_ROOTFS')
pkg_list = dict()
if image_rootfs.find("core-image-sato-sdk") > 0:
pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
"packagegroup-core-x11-sato-games packagegroup-base-extended " \
"packagegroup-core-x11-sato packagegroup-core-x11-base " \
"packagegroup-core-sdk packagegroup-core-tools-debug " \
"packagegroup-core-boot packagegroup-core-tools-testapps " \
"packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \
"apt packagegroup-core-tools-profile psplash " \
"packagegroup-core-standalone-sdk-target " \
"packagegroup-core-ssh-openssh dpkg kernel-dev"
pkg_list[self.PKG_TYPE_LANGUAGE] = \
"locale-base-en-us locale-base-en-gb"
elif image_rootfs.find("core-image-sato") > 0:
pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
"packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \
"packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \
"packagegroup-core-x11-sato packagegroup-core-boot"
pkg_list['lgp'] = \
"locale-base-en-us locale-base-en-gb"
elif image_rootfs.find("core-image-minimal") > 0:
pkg_list[self.PKG_TYPE_MUST_INSTALL] = "packagegroup-core-boot"
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
for pkg_type in pkg_list:
for pkg in pkg_list[pkg_type].split():
manifest.write("%s,%s\n" % (pkg_type, pkg))
"""
This will create the initial manifest which will be used by Rootfs class to
generate the rootfs
"""
@abstractmethod
def create_initial(self):
pass
"""
This creates the manifest after everything has been installed.
"""
@abstractmethod
def create_final(self):
pass
"""
This creates the manifest after the package in initial manifest has been
dummy installed. It lists all *to be installed* packages. There is no real
installation, just a test.
"""
@abstractmethod
def create_full(self, pm):
pass
"""
The following function parses an initial manifest and returns a dictionary
object with the must install, attempt only, multilib and language packages.
"""
def parse_initial_manifest(self):
pkgs = dict()
with open(self.initial_manifest) as manifest:
for line in manifest.read().split('\n'):
comment = re.match("^#.*", line)
pattern = "^(%s|%s|%s|%s),(.*)$" % \
(self.PKG_TYPE_MUST_INSTALL,
self.PKG_TYPE_ATTEMPT_ONLY,
self.PKG_TYPE_MULTILIB,
self.PKG_TYPE_LANGUAGE)
pkg = re.match(pattern, line)
if comment is not None:
continue
if pkg is not None:
pkg_type = pkg.group(1)
pkg_name = pkg.group(2)
if not pkg_type in pkgs:
pkgs[pkg_type] = [pkg_name]
else:
pkgs[pkg_type].append(pkg_name)
return pkgs
'''
This following function parses a full manifest and return a list
object with packages.
'''
def parse_full_manifest(self):
installed_pkgs = list()
if not os.path.exists(self.full_manifest):
bb.note('full manifest not exist')
return installed_pkgs
with open(self.full_manifest, 'r') as manifest:
for pkg in manifest.read().split('\n'):
installed_pkgs.append(pkg.strip())
return installed_pkgs
def create_manifest(d, final_manifest=False, manifest_dir=None,
manifest_type=Manifest.MANIFEST_TYPE_IMAGE):
import importlib
manifest = importlib.import_module('oe.package_manager.' + d.getVar('IMAGE_PKGTYPE') + '.manifest').PkgManifest(d, manifest_dir, manifest_type)
if final_manifest:
manifest.create_final()
else:
manifest.create_initial()
if __name__ == "__main__":
pass

View File

@@ -0,0 +1,175 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
import bb
import json
import subprocess
_ALWAYS_SAFE = frozenset('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
'_.-~()')
MISSING_OK = object()
REGISTRY = "https://registry.npmjs.org"
# we can not use urllib.parse here because npm expects lowercase
# hex-chars but urllib generates uppercase ones
def uri_quote(s, safe = '/'):
res = ""
safe_set = set(safe)
for c in s:
if c in _ALWAYS_SAFE or c in safe_set:
res += c
else:
res += '%%%02x' % ord(c)
return res
class PackageJson:
def __init__(self, spec):
self.__spec = spec
@property
def name(self):
return self.__spec['name']
@property
def version(self):
return self.__spec['version']
@property
def empty_manifest(self):
return {
'name': self.name,
'description': self.__spec.get('description', ''),
'versions': {},
}
def base_filename(self):
return uri_quote(self.name, safe = '@')
def as_manifest_entry(self, tarball_uri):
res = {}
## NOTE: 'npm install' requires more than basic meta information;
## e.g. it takes 'bin' from this manifest entry but not the actual
## 'package.json'
for (idx,dflt) in [('name', None),
('description', ""),
('version', None),
('bin', MISSING_OK),
('man', MISSING_OK),
('scripts', MISSING_OK),
('directories', MISSING_OK),
('dependencies', MISSING_OK),
('devDependencies', MISSING_OK),
('optionalDependencies', MISSING_OK),
('license', "unknown")]:
if idx in self.__spec:
res[idx] = self.__spec[idx]
elif dflt == MISSING_OK:
pass
elif dflt != None:
res[idx] = dflt
else:
raise Exception("%s-%s: missing key %s" % (self.name,
self.version,
idx))
res['dist'] = {
'tarball': tarball_uri,
}
return res
class ManifestImpl:
def __init__(self, base_fname, spec):
self.__base = base_fname
self.__spec = spec
def load(self):
try:
with open(self.filename, "r") as f:
res = json.load(f)
except IOError:
res = self.__spec.empty_manifest
return res
def save(self, meta):
with open(self.filename, "w") as f:
json.dump(meta, f, indent = 2)
@property
def filename(self):
return self.__base + ".meta"
class Manifest:
def __init__(self, base_fname, spec):
self.__base = base_fname
self.__spec = spec
self.__lockf = None
self.__impl = None
def __enter__(self):
self.__lockf = bb.utils.lockfile(self.__base + ".lock")
self.__impl = ManifestImpl(self.__base, self.__spec)
return self.__impl
def __exit__(self, exc_type, exc_val, exc_tb):
bb.utils.unlockfile(self.__lockf)
class NpmCache:
def __init__(self, cache):
self.__cache = cache
@property
def path(self):
return self.__cache
def run(self, type, key, fname):
subprocess.run(['oe-npm-cache', self.__cache, type, key, fname],
check = True)
class NpmRegistry:
def __init__(self, path, cache):
self.__path = path
self.__cache = NpmCache(cache + '/_cacache')
bb.utils.mkdirhier(self.__path)
bb.utils.mkdirhier(self.__cache.path)
@staticmethod
## This function is critical and must match nodejs expectations
def _meta_uri(spec):
return REGISTRY + '/' + uri_quote(spec.name, safe = '@')
@staticmethod
## Exact return value does not matter; just make it look like a
## usual registry url
def _tarball_uri(spec):
return '%s/%s/-/%s-%s.tgz' % (REGISTRY,
uri_quote(spec.name, safe = '@'),
uri_quote(spec.name, safe = '@/'),
spec.version)
def add_pkg(self, tarball, pkg_json):
pkg_json = PackageJson(pkg_json)
base = os.path.join(self.__path, pkg_json.base_filename())
with Manifest(base, pkg_json) as manifest:
meta = manifest.load()
tarball_uri = self._tarball_uri(pkg_json)
meta['versions'][pkg_json.version] = pkg_json.as_manifest_entry(tarball_uri)
manifest.save(meta)
## Cache entries are a little bit dependent on the nodejs
## version; version specific cache implementation must
## mitigate differences
self.__cache.run('meta', self._meta_uri(pkg_json), manifest.filename);
self.__cache.run('tgz', tarball_uri, tarball);

View File

@@ -0,0 +1,54 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
# This file contains common functions for overlayfs and its QA check
# this function is based on https://github.com/systemd/systemd/blob/main/src/basic/unit-name.c
def escapeSystemdUnitName(path):
escapeMap = {
'/': '-',
'-': "\\x2d",
'\\': "\\x5d"
}
return "".join([escapeMap.get(c, c) for c in path.strip('/')])
def strForBash(s):
return s.replace('\\', '\\\\')
def allOverlaysUnitName(d):
return d.getVar('PN') + '-overlays.service'
def mountUnitName(unit):
return escapeSystemdUnitName(unit) + '.mount'
def helperUnitName(unit):
return escapeSystemdUnitName(unit) + '-create-upper-dir.service'
def unitFileList(d):
fileList = []
overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT")
if not overlayMountPoints:
bb.fatal("A recipe uses overlayfs class but there is no OVERLAYFS_MOUNT_POINT set in your MACHINE configuration")
# check that we have required mount points set first
requiredMountPoints = d.getVarFlags('OVERLAYFS_WRITABLE_PATHS')
for mountPoint in requiredMountPoints:
if mountPoint not in overlayMountPoints:
bb.fatal("Missing required mount point for OVERLAYFS_MOUNT_POINT[%s] in your MACHINE configuration" % mountPoint)
for mountPoint in overlayMountPoints:
mountPointList = d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint)
if not mountPointList:
bb.debug(1, "No mount points defined for %s flag, don't add to file list", mountPoint)
continue
for path in mountPointList.split():
fileList.append(mountUnitName(path))
fileList.append(helperUnitName(path))
fileList.append(allOverlaysUnitName(d))
return fileList

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,563 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from abc import ABCMeta, abstractmethod
import os
import glob
import subprocess
import shutil
import re
import collections
import bb
import tempfile
import oe.utils
import oe.path
import string
from oe.gpg_sign import get_signer
import hashlib
import fnmatch
# this can be used by all PM backends to create the index files in parallel
def create_index(arg):
index_cmd = arg
bb.note("Executing '%s' ..." % index_cmd)
result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
if result:
bb.note(result)
def opkg_query(cmd_output):
"""
This method parse the output from the package managerand return
a dictionary with the information of the packages. This is used
when the packages are in deb or ipk format.
"""
verregex = re.compile(r' \([=<>]* [^ )]*\)')
output = dict()
pkg = ""
arch = ""
ver = ""
filename = ""
dep = []
prov = []
pkgarch = ""
for line in cmd_output.splitlines()+['']:
line = line.rstrip()
if ':' in line:
if line.startswith("Package: "):
pkg = line.split(": ")[1]
elif line.startswith("Architecture: "):
arch = line.split(": ")[1]
elif line.startswith("Version: "):
ver = line.split(": ")[1]
elif line.startswith("File: ") or line.startswith("Filename:"):
filename = line.split(": ")[1]
if "/" in filename:
filename = os.path.basename(filename)
elif line.startswith("Depends: "):
depends = verregex.sub('', line.split(": ")[1])
for depend in depends.split(", "):
dep.append(depend)
elif line.startswith("Recommends: "):
recommends = verregex.sub('', line.split(": ")[1])
for recommend in recommends.split(", "):
dep.append("%s [REC]" % recommend)
elif line.startswith("PackageArch: "):
pkgarch = line.split(": ")[1]
elif line.startswith("Provides: "):
provides = verregex.sub('', line.split(": ")[1])
for provide in provides.split(", "):
prov.append(provide)
# When there is a blank line save the package information
elif not line:
# IPK doesn't include the filename
if not filename:
filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
if pkg:
output[pkg] = {"arch":arch, "ver":ver,
"filename":filename, "deps": dep, "pkgarch":pkgarch, "provs": prov}
pkg = ""
arch = ""
ver = ""
filename = ""
dep = []
prov = []
pkgarch = ""
return output
def failed_postinsts_abort(pkgs, log_path):
bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot,
then please place them into pkg_postinst_ontarget:${PN} ().
Deferring to first boot via 'exit 1' is no longer supported.
Details of the failure are in %s.""" %(pkgs, log_path))
def generate_locale_archive(d, rootfs, target_arch, localedir):
# Pretty sure we don't need this for locale archive generation but
# keeping it to be safe...
locale_arch_options = { \
"arc": ["--uint32-align=4", "--little-endian"],
"arceb": ["--uint32-align=4", "--big-endian"],
"arm": ["--uint32-align=4", "--little-endian"],
"armeb": ["--uint32-align=4", "--big-endian"],
"aarch64": ["--uint32-align=4", "--little-endian"],
"aarch64_be": ["--uint32-align=4", "--big-endian"],
"sh4": ["--uint32-align=4", "--big-endian"],
"powerpc": ["--uint32-align=4", "--big-endian"],
"powerpc64": ["--uint32-align=4", "--big-endian"],
"powerpc64le": ["--uint32-align=4", "--little-endian"],
"mips": ["--uint32-align=4", "--big-endian"],
"mipsisa32r6": ["--uint32-align=4", "--big-endian"],
"mips64": ["--uint32-align=4", "--big-endian"],
"mipsisa64r6": ["--uint32-align=4", "--big-endian"],
"mipsel": ["--uint32-align=4", "--little-endian"],
"mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
"mips64el": ["--uint32-align=4", "--little-endian"],
"mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
"riscv64": ["--uint32-align=4", "--little-endian"],
"riscv32": ["--uint32-align=4", "--little-endian"],
"i586": ["--uint32-align=4", "--little-endian"],
"i686": ["--uint32-align=4", "--little-endian"],
"x86_64": ["--uint32-align=4", "--little-endian"],
"loongarch64": ["--uint32-align=4", "--little-endian"]
}
if target_arch in locale_arch_options:
arch_options = locale_arch_options[target_arch]
else:
bb.error("locale_arch_options not found for target_arch=" + target_arch)
bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
# Need to set this so cross-localedef knows where the archive is
env = dict(os.environ)
env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
for name in sorted(os.listdir(localedir)):
path = os.path.join(localedir, name)
if os.path.isdir(path):
cmd = ["cross-localedef", "--verbose"]
cmd += arch_options
cmd += ["--add-to-archive", path]
subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
class Indexer(object, metaclass=ABCMeta):
def __init__(self, d, deploy_dir):
self.d = d
self.deploy_dir = deploy_dir
@abstractmethod
def write_index(self):
pass
class PkgsList(object, metaclass=ABCMeta):
def __init__(self, d, rootfs_dir):
self.d = d
self.rootfs_dir = rootfs_dir
@abstractmethod
def list_pkgs(self):
pass
class PackageManager(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
"""
def __init__(self, d, target_rootfs):
self.d = d
self.target_rootfs = target_rootfs
self.deploy_dir = None
self.deploy_lock = None
self._initialize_intercepts()
def _initialize_intercepts(self):
bb.note("Initializing intercept dir for %s" % self.target_rootfs)
# As there might be more than one instance of PackageManager operating at the same time
# we need to isolate the intercept_scripts directories from each other,
# hence the ugly hash digest in dir name.
self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" %
(hashlib.sha256(self.target_rootfs.encode()).hexdigest()))
postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split()
if not postinst_intercepts:
postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH")
if not postinst_intercepts_path:
postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts")
postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path)
bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts))
bb.utils.remove(self.intercepts_dir, True)
bb.utils.mkdirhier(self.intercepts_dir)
for intercept in postinst_intercepts:
shutil.copy(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
@abstractmethod
def _handle_intercept_failure(self, failed_script):
pass
def _postpone_to_first_boot(self, postinst_intercept_hook):
with open(postinst_intercept_hook) as intercept:
registered_pkgs = None
for line in intercept.read().split("\n"):
m = re.match(r"^##PKGS:(.*)", line)
if m is not None:
registered_pkgs = m.group(1).strip()
break
if registered_pkgs is not None:
bb.note("If an image is being built, the postinstalls for the following packages "
"will be postponed for first boot: %s" %
registered_pkgs)
# call the backend dependent handler
self._handle_intercept_failure(registered_pkgs)
def run_intercepts(self, populate_sdk=None):
intercepts_dir = self.intercepts_dir
bb.note("Running intercept scripts:")
os.environ['D'] = self.target_rootfs
os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
for script in os.listdir(intercepts_dir):
script_full = os.path.join(intercepts_dir, script)
if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
continue
# we do not want to run any multilib variant of this
if script.startswith("delay_to_first_boot"):
self._postpone_to_first_boot(script_full)
continue
if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32':
bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s"
% (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
continue
bb.note("> Executing %s intercept ..." % script)
try:
output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
if output: bb.note(output.decode("utf-8"))
except subprocess.CalledProcessError as e:
bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
if populate_sdk == 'host':
bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
elif populate_sdk == 'target':
if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
% (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
else:
bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
else:
if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
% (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
self._postpone_to_first_boot(script_full)
else:
bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
@abstractmethod
def update(self):
"""
Update the package manager package database.
"""
pass
@abstractmethod
def install(self, pkgs, attempt_only=False, hard_depends_only=False):
"""
Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
True, installation failures are ignored.
"""
pass
@abstractmethod
def remove(self, pkgs, with_dependencies=True):
"""
Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
is False, then any dependencies are left in place.
"""
pass
@abstractmethod
def write_index(self):
"""
This function creates the index files
"""
pass
@abstractmethod
def remove_packaging_data(self):
pass
@abstractmethod
def list_installed(self):
pass
@abstractmethod
def extract(self, pkg):
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
"""
pass
@abstractmethod
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
"""
Add remote package feeds into repository manager configuration. The parameters
for the feeds are set by feed_uris, feed_base_paths and feed_archs.
See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
for their description.
"""
pass
def install_glob(self, globs, sdk=False):
"""
Install all packages that match a glob.
"""
# TODO don't have sdk here but have a property on the superclass
# (and respect in install_complementary)
if sdk:
pkgdatadir = self.d.getVar("PKGDATA_DIR_SDK")
else:
pkgdatadir = self.d.getVar("PKGDATA_DIR")
try:
bb.note("Installing globbed packages...")
cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
bb.note('Running %s' % cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stderr: bb.note(stderr.decode("utf-8"))
pkgs = stdout.decode("utf-8")
self.install(pkgs.split(), attempt_only=True)
except subprocess.CalledProcessError as e:
# Return code 1 means no packages matched
if e.returncode != 1:
bb.fatal("Could not compute globbed packages list. Command "
"'%s' returned %d:\n%s" %
(' '.join(cmd), e.returncode, e.output.decode("utf-8")))
def install_complementary(self, globs=None):
"""
Install complementary packages based upon the list of currently installed
packages e.g. locales, *-dev, *-dbg, etc. Note: every backend needs to
call this function explicitly after the normal package installation.
"""
if globs is None:
globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
split_linguas = set()
for translation in self.d.getVar('IMAGE_LINGUAS').split():
split_linguas.add(translation)
split_linguas.add(translation.split('-')[0])
split_linguas = sorted(split_linguas)
for lang in split_linguas:
globs += " *-locale-%s" % lang
for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split():
globs += (" " + complementary_linguas) % lang
if globs:
# we need to write the list of installed packages to a file because the
# oe-pkgdata-util reads it from a file
with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
pkgs = self.list_installed()
provided_pkgs = set()
for pkg in pkgs.values():
provided_pkgs |= set(pkg.get('provs', []))
output = oe.utils.format_pkg_list(pkgs, "arch")
installed_pkgs.write(output)
installed_pkgs.flush()
cmd = ["oe-pkgdata-util",
"-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
globs]
exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
if exclude:
cmd.extend(['--exclude=' + '|'.join(exclude.split())])
try:
bb.note('Running %s' % cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stderr: bb.note(stderr.decode("utf-8"))
complementary_pkgs = stdout.decode("utf-8")
complementary_pkgs = set(complementary_pkgs.split())
skip_pkgs = sorted(complementary_pkgs & provided_pkgs)
install_pkgs = sorted(complementary_pkgs - provided_pkgs)
bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % (
' '.join(install_pkgs),
' '.join(skip_pkgs)))
self.install(install_pkgs, hard_depends_only=True)
except subprocess.CalledProcessError as e:
bb.fatal("Could not compute complementary packages list. Command "
"'%s' returned %d:\n%s" %
(' '.join(cmd), e.returncode, e.output.decode("utf-8")))
if self.d.getVar('IMAGE_LOCALES_ARCHIVE') == '1':
target_arch = self.d.getVar('TARGET_ARCH')
localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
if os.path.exists(localedir) and os.listdir(localedir):
generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
# And now delete the binary locales
self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
def deploy_dir_lock(self):
if self.deploy_dir is None:
raise RuntimeError("deploy_dir is not set!")
lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
self.deploy_lock = bb.utils.lockfile(lock_file_name)
def deploy_dir_unlock(self):
if self.deploy_lock is None:
return
bb.utils.unlockfile(self.deploy_lock)
self.deploy_lock = None
def construct_uris(self, uris, base_paths):
"""
Construct URIs based on the following pattern: uri/base_path where 'uri'
and 'base_path' correspond to each element of the corresponding array
argument leading to len(uris) x len(base_paths) elements on the returned
array
"""
def _append(arr1, arr2, sep='/'):
res = []
narr1 = [a.rstrip(sep) for a in arr1]
narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2]
for a1 in narr1:
if arr2:
for a2 in narr2:
res.append("%s%s%s" % (a1, sep, a2))
else:
res.append(a1)
return res
return _append(uris, base_paths)
def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies, include_self=False):
"""
Go through our do_package_write_X dependencies and hardlink the packages we depend
upon into the repo directory. This prevents us seeing other packages that may
have been built that we don't depend upon and also packages for architectures we don't
support.
"""
import errno
taskdepdata = d.getVar("BB_TASKDEPDATA", False)
mytaskname = d.getVar("BB_RUNTASK")
pn = d.getVar("PN")
seendirs = set()
multilibs = {}
bb.utils.remove(subrepo_dir, recurse=True)
bb.utils.mkdirhier(subrepo_dir)
# Detect bitbake -b usage
nodeps = d.getVar("BB_LIMITEDDEPS") or False
if nodeps or not filterbydependencies:
for arch in d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").split() + d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").replace("-", "_").split():
target = os.path.join(deploydir + "/" + arch)
if os.path.exists(target):
oe.path.symlink(target, subrepo_dir + "/" + arch, True)
return
start = None
for dep in taskdepdata:
data = taskdepdata[dep]
if data[1] == mytaskname and data[0] == pn:
start = dep
break
if start is None:
bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
pkgdeps = set()
start = [start]
if include_self:
seen = set()
else:
seen = set(start)
# Support direct dependencies (do_rootfs -> do_package_write_X)
# or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X)
while start:
next = []
for dep2 in start:
for dep in taskdepdata[dep2][3]:
if include_self or taskdepdata[dep][0] != pn:
if "do_" + taskname in dep:
pkgdeps.add(dep)
elif dep not in seen:
next.append(dep)
seen.add(dep)
start = next
for dep in pkgdeps:
c = taskdepdata[dep][0]
manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
if not manifest:
bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
if not os.path.exists(manifest):
continue
with open(manifest, "r") as f:
for l in f:
l = l.strip()
deploydir = os.path.normpath(deploydir)
if bb.data.inherits_class('packagefeed-stability', d):
dest = l.replace(deploydir + "-prediff", "")
else:
dest = l.replace(deploydir, "")
dest = subrepo_dir + dest
if l.endswith("/"):
if dest not in seendirs:
bb.utils.mkdirhier(dest)
seendirs.add(dest)
continue
# Try to hardlink the file, copy if that fails
destdir = os.path.dirname(dest)
if destdir not in seendirs:
bb.utils.mkdirhier(destdir)
seendirs.add(destdir)
try:
os.link(l, dest)
except OSError as err:
if err.errno == errno.EXDEV:
bb.utils.copyfile(l, dest)
else:
raise
def generate_index_files(d):
from oe.package_manager.rpm import RpmSubdirIndexer
from oe.package_manager.ipk import OpkgIndexer
from oe.package_manager.deb import DpkgIndexer
classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
indexer_map = {
"rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
"ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
"deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
}
result = None
for pkg_class in classes:
if not pkg_class in indexer_map:
continue
if os.path.exists(indexer_map[pkg_class][1]):
result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
if result is not None:
bb.fatal(result)

View File

@@ -0,0 +1,522 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import re
import subprocess
from oe.package_manager import *
class DpkgIndexer(Indexer):
def _create_configs(self):
bb.utils.mkdirhier(self.apt_conf_dir)
bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
with open(os.path.join(self.apt_conf_dir, "preferences"),
"w") as prefs_file:
pass
with open(os.path.join(self.apt_conf_dir, "sources.list"),
"w+") as sources_file:
pass
with open(self.apt_conf_file, "w") as apt_conf:
with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
"apt", "apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
line = re.sub(r"#ROOTFS#", "/dev/null", line)
line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
def write_index(self):
self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
"apt-ftparchive")
self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
self._create_configs()
os.environ['APT_CONFIG'] = self.apt_conf_file
pkg_archs = self.d.getVar('PACKAGE_ARCHS')
if pkg_archs is not None:
arch_list = pkg_archs.split()
sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
if sdk_pkg_archs is not None:
for a in sdk_pkg_archs.split():
if a not in pkg_archs:
arch_list.append(a)
all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
gzip = bb.utils.which(os.getenv('PATH'), "gzip")
index_cmds = []
deb_dirs_found = False
index_sign_files = set()
for arch in arch_list:
arch_dir = os.path.join(self.deploy_dir, arch)
if not os.path.isdir(arch_dir):
continue
cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
cmd += "%s -fcn Packages > Packages.gz;" % gzip
release_file = os.path.join(arch_dir, "Release")
index_sign_files.add(release_file)
with open(release_file, "w+") as release:
release.write("Label: %s\n" % arch)
cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
index_cmds.append(cmd)
deb_dirs_found = True
if not deb_dirs_found:
bb.note("There are no packages in %s" % self.deploy_dir)
return
oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
if signer:
for f in index_sign_files:
signer.detach_sign(f,
self.d.getVar('PACKAGE_FEED_GPG_NAME'),
self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
output_suffix="gpg",
use_sha256=True)
class PMPkgsList(PkgsList):
def list_pkgs(self):
cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
"--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
"-W"]
cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\nProvides: ${Provides}\n\n")
try:
cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
return opkg_query(cmd_output)
class OpkgDpkgPM(PackageManager):
def __init__(self, d, target_rootfs):
"""
This is an abstract class. Do not instantiate this directly.
"""
super(OpkgDpkgPM, self).__init__(d, target_rootfs)
def package_info(self, pkg, cmd):
"""
Returns a dictionary with the package info.
This method extracts the common parts for Opkg and Dpkg
"""
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Unable to list available packages. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
return opkg_query(output)
def extract(self, pkg, pkg_info):
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
This method extracts the common parts for Opkg and Dpkg
"""
ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
pkg_path = pkg_info[pkg]["filepath"]
if not os.path.isfile(pkg_path):
bb.fatal("Unable to extract package for '%s'."
"File %s doesn't exists" % (pkg, pkg_path))
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
data_tar = 'data.tar.xz'
try:
cmd = [ar_cmd, 'x', pkg_path]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
cmd = [tar_cmd, 'xf', data_tar]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
except OSError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
os.chdir(current_dir)
return tmp_dir
def _handle_intercept_failure(self, registered_pkgs):
self.mark_packages("unpacked", registered_pkgs.split())
class DpkgPM(OpkgDpkgPM):
def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True):
super(DpkgPM, self).__init__(d, target_rootfs)
self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir)
create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies)
if apt_conf_dir is None:
self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
else:
self.apt_conf_dir = apt_conf_dir
self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
self.apt_args = d.getVar("APT_ARGS")
self.all_arch_list = archs.split()
all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
self._create_configs(archs, base_archs)
self.indexer = DpkgIndexer(self.d, self.deploy_dir)
def mark_packages(self, status_tag, packages=None):
"""
This function will change a package's status in /var/lib/dpkg/status file.
If 'packages' is None then the new_status will be applied to all
packages
"""
status_file = self.target_rootfs + "/var/lib/dpkg/status"
with open(status_file, "r") as sf:
with open(status_file + ".tmp", "w+") as tmp_sf:
if packages is None:
tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
r"Package: \1\n\2Status: \3%s" % status_tag,
sf.read()))
else:
if type(packages).__name__ != "list":
raise TypeError("'packages' should be a list object")
status = sf.read()
for pkg in packages:
status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
status)
tmp_sf.write(status)
bb.utils.rename(status_file + ".tmp", status_file)
def run_pre_post_installs(self, package_name=None):
"""
Run the pre/post installs for package "package_name". If package_name is
None, then run all pre/post install scriptlets.
"""
info_dir = self.target_rootfs + "/var/lib/dpkg/info"
ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
control_scripts = [
ControlScript(".preinst", "Preinstall", "install"),
ControlScript(".postinst", "Postinstall", "configure")]
status_file = self.target_rootfs + "/var/lib/dpkg/status"
installed_pkgs = []
with open(status_file, "r") as status:
for line in status.read().split('\n'):
m = re.match(r"^Package: (.*)", line)
if m is not None:
installed_pkgs.append(m.group(1))
if package_name is not None and not package_name in installed_pkgs:
return
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = self.intercepts_dir
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
for pkg_name in installed_pkgs:
for control_script in control_scripts:
p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
if os.path.exists(p_full):
try:
bb.note("Executing %s for package: %s ..." %
(control_script.name.lower(), pkg_name))
output = subprocess.check_output([p_full, control_script.argument],
stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
except subprocess.CalledProcessError as e:
bb.warn("%s for package %s failed with %d:\n%s" %
(control_script.name, pkg_name, e.returncode,
e.output.decode("utf-8")))
failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
def update(self):
os.environ['APT_CONFIG'] = self.apt_conf_file
self.deploy_dir_lock()
cmd = "%s update" % self.apt_get_cmd
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to update the package index files. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
self.deploy_dir_unlock()
def install(self, pkgs, attempt_only=False, hard_depends_only=False):
if attempt_only and len(pkgs) == 0:
return
os.environ['APT_CONFIG'] = self.apt_conf_file
extra_args = ""
if hard_depends_only:
extra_args = "--no-install-recommends"
cmd = "%s %s install --allow-downgrades --allow-remove-essential --allow-change-held-packages --allow-unauthenticated --no-remove %s %s" % \
(self.apt_get_cmd, self.apt_args, extra_args, ' '.join(pkgs))
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
bb.note(output.decode("utf-8"))
except subprocess.CalledProcessError as e:
(bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output.decode("utf-8")))
# rename *.dpkg-new files/dirs
for root, dirs, files in os.walk(self.target_rootfs):
for dir in dirs:
new_dir = re.sub(r"\.dpkg-new", "", dir)
if dir != new_dir:
bb.utils.rename(os.path.join(root, dir),
os.path.join(root, new_dir))
for file in files:
new_file = re.sub(r"\.dpkg-new", "", file)
if file != new_file:
bb.utils.rename(os.path.join(root, file),
os.path.join(root, new_file))
def remove(self, pkgs, with_dependencies=True):
if not pkgs:
return
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = self.intercepts_dir
if with_dependencies:
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
else:
cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
" -P --force-depends %s" % \
(bb.utils.which(os.getenv('PATH'), "dpkg"),
self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to remove packages. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
def write_index(self):
self.deploy_dir_lock()
result = self.indexer.write_index()
self.deploy_dir_unlock()
if result is not None:
bb.fatal(result)
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
if feed_uris == "":
return
sources_conf = os.path.join("%s/etc/apt/sources.list"
% self.target_rootfs)
if not os.path.exists(os.path.dirname(sources_conf)):
return
arch_list = []
if feed_archs is None:
for arch in self.all_arch_list:
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
else:
arch_list = feed_archs.split()
feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
with open(sources_conf, "w+") as sources_file:
for uri in feed_uris:
if arch_list:
for arch in arch_list:
bb.note('Adding dpkg channel at (%s)' % uri)
sources_file.write("deb [trusted=yes] %s/%s ./\n" %
(uri, arch))
else:
bb.note('Adding dpkg channel at (%s)' % uri)
sources_file.write("deb [trusted=yes] %s ./\n" % uri)
def _create_configs(self, archs, base_archs):
base_archs = re.sub(r"_", r"-", base_archs)
if os.path.exists(self.apt_conf_dir):
bb.utils.remove(self.apt_conf_dir, True)
bb.utils.mkdirhier(self.apt_conf_dir)
bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/")
arch_list = []
for arch in self.all_arch_list:
if not os.path.exists(os.path.join(self.deploy_dir, arch)):
continue
arch_list.append(arch)
with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
priority = 801
for arch in arch_list:
prefs_file.write(
"Package: *\n"
"Pin: release l=%s\n"
"Pin-Priority: %d\n\n" % (arch, priority))
priority += 5
pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
for pkg in pkg_exclude.split():
prefs_file.write(
"Package: %s\n"
"Pin: release *\n"
"Pin-Priority: -1\n\n" % pkg)
arch_list.reverse()
with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
for arch in arch_list:
sources_file.write("deb [trusted=yes] file:%s/ ./\n" %
os.path.join(self.deploy_dir, arch))
base_arch_list = base_archs.split()
multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
for variant in multilib_variants.split():
localdata = bb.data.createCopy(self.d)
variant_tune = localdata.getVar("DEFAULTTUNE:virtclass-multilib-" + variant, False)
orig_arch = localdata.getVar("DPKG_ARCH")
localdata.setVar("DEFAULTTUNE", variant_tune)
variant_arch = localdata.getVar("DPKG_ARCH")
if variant_arch not in base_arch_list:
base_arch_list.append(variant_arch)
with open(self.apt_conf_file, "w+") as apt_conf:
with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
match_arch = re.match(r" Architecture \".*\";$", line)
architectures = ""
if match_arch:
for base_arch in base_arch_list:
architectures += "\"%s\";" % base_arch
apt_conf.write(" Architectures {%s};\n" % architectures);
apt_conf.write(" Architecture \"%s\";\n" % base_archs)
else:
line = re.sub(r"#ROOTFS#", self.target_rootfs, line)
line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
open(os.path.join(target_dpkg_dir, "status"), "w+").close()
if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
open(os.path.join(target_dpkg_dir, "available"), "w+").close()
def remove_packaging_data(self):
bb.utils.remove(self.target_rootfs + self.d.getVar('opkglibdir'), True)
bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
def fix_broken_dependencies(self):
os.environ['APT_CONFIG'] = self.apt_conf_file
cmd = "%s %s --allow-unauthenticated -f install" % (self.apt_get_cmd, self.apt_args)
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Cannot fix broken dependencies. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
def list_installed(self):
return PMPkgsList(self.d, self.target_rootfs).list_pkgs()
def package_info(self, pkg):
"""
Returns a dictionary with the package info.
"""
cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
pkg_arch = pkg_info[pkg]["pkgarch"]
pkg_filename = pkg_info[pkg]["filename"]
pkg_info[pkg]["filepath"] = \
os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
return pkg_info
def extract(self, pkg):
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
"""
pkg_info = self.package_info(pkg)
if not pkg_info:
bb.fatal("Unable to get information for package '%s' while "
"trying to extract the package." % pkg)
tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info)
bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
return tmp_dir

View File

@@ -0,0 +1,28 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from oe.manifest import Manifest
class PkgManifest(Manifest):
def create_initial(self):
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
for var in self.var_maps[self.manifest_type]:
pkg_list = self.d.getVar(var)
if pkg_list is None:
continue
for pkg in pkg_list.split():
manifest.write("%s,%s\n" %
(self.var_maps[self.manifest_type][var], pkg))
def create_final(self):
pass
def create_full(self, pm):
pass

View File

@@ -0,0 +1,212 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import re
import shutil
from oe.rootfs import Rootfs
from oe.manifest import Manifest
from oe.utils import execute_pre_post_process
from oe.package_manager.deb.manifest import PkgManifest
from oe.package_manager.deb import DpkgPM
class DpkgOpkgRootfs(Rootfs):
def __init__(self, d, progress_reporter=None, logcatcher=None):
super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
def _get_pkgs_postinsts(self, status_file):
def _get_pkg_depends_list(pkg_depends):
pkg_depends_list = []
# filter version requirements like libc (>= 1.1)
for dep in pkg_depends.split(', '):
m_dep = re.match(r"^(.*) \(.*\)$", dep)
if m_dep:
dep = m_dep.group(1)
pkg_depends_list.append(dep)
return pkg_depends_list
pkgs = {}
pkg_name = ""
pkg_status_match = False
pkg_depends = ""
with open(status_file) as status:
data = status.read()
status.close()
for line in data.split('\n'):
m_pkg = re.match(r"^Package: (.*)", line)
m_status = re.match(r"^Status:.*unpacked", line)
m_depends = re.match(r"^Depends: (.*)", line)
#Only one of m_pkg, m_status or m_depends is not None at time
#If m_pkg is not None, we started a new package
if m_pkg is not None:
#Get Package name
pkg_name = m_pkg.group(1)
#Make sure we reset other variables
pkg_status_match = False
pkg_depends = ""
elif m_status is not None:
#New status matched
pkg_status_match = True
elif m_depends is not None:
#New depends macthed
pkg_depends = m_depends.group(1)
else:
pass
#Now check if we can process package depends and postinst
if "" != pkg_name and pkg_status_match:
pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
else:
#Not enough information
pass
# remove package dependencies not in postinsts
pkg_names = list(pkgs.keys())
for pkg_name in pkg_names:
deps = pkgs[pkg_name][:]
for d in deps:
if d not in pkg_names:
pkgs[pkg_name].remove(d)
return pkgs
def _get_delayed_postinsts_common(self, status_file):
def _dep_resolve(graph, node, resolved, seen):
seen.append(node)
for edge in graph[node]:
if edge not in resolved:
if edge in seen:
raise RuntimeError("Packages %s and %s have " \
"a circular dependency in postinsts scripts." \
% (node, edge))
_dep_resolve(graph, edge, resolved, seen)
resolved.append(node)
pkg_list = []
pkgs = None
if not self.d.getVar('PACKAGE_INSTALL').strip():
bb.note("Building empty image")
else:
pkgs = self._get_pkgs_postinsts(status_file)
if pkgs:
root = "__packagegroup_postinst__"
pkgs[root] = list(pkgs.keys())
_dep_resolve(pkgs, root, pkg_list, [])
pkg_list.remove(root)
if len(pkg_list) == 0:
return None
return pkg_list
def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
if bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d):
return
num = 0
for p in self._get_delayed_postinsts():
bb.utils.mkdirhier(dst_postinst_dir)
if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
num += 1
class PkgRootfs(DpkgOpkgRootfs):
def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
self.log_check_regex = '^E:'
self.log_check_expected_regexes = \
[
"^E: Unmet dependencies."
]
bb.utils.remove(self.image_rootfs, True)
bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
self.manifest = PkgManifest(d, manifest_dir)
self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
d.getVar('PACKAGE_ARCHS'),
d.getVar('DPKG_ARCH'))
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
bb.utils.mkdirhier(alt_dir)
# update PM index files
self.pm.write_index()
execute_pre_post_process(self.d, deb_pre_process_cmds)
if self.progress_reporter:
self.progress_reporter.next_stage()
# Don't support incremental, so skip that
self.progress_reporter.next_stage()
self.pm.update()
if self.progress_reporter:
self.progress_reporter.next_stage()
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
self.pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
self.pm.fix_broken_dependencies()
if self.progress_reporter:
# Don't support attemptonly, so skip that
self.progress_reporter.next_stage()
self.progress_reporter.next_stage()
self.pm.install_complementary()
if self.progress_reporter:
self.progress_reporter.next_stage()
self._setup_dbg_rootfs(['/var/lib/dpkg'])
self.pm.fix_broken_dependencies()
self.pm.mark_packages("installed")
self.pm.run_pre_post_installs()
execute_pre_post_process(self.d, deb_post_process_cmds)
if self.progress_reporter:
self.progress_reporter.next_stage()
@staticmethod
def _depends_list():
return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
def _get_delayed_postinsts(self):
status_file = self.image_rootfs + "/var/lib/dpkg/status"
return self._get_delayed_postinsts_common(status_file)
def _save_postinsts(self):
dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
def _log_check(self):
self._log_check_warn()
self._log_check_error()
def _cleanup(self):
pass

View File

@@ -0,0 +1,107 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import glob
import shutil
from oe.utils import execute_pre_post_process
from oe.sdk import Sdk
from oe.manifest import Manifest
from oe.package_manager.deb import DpkgPM
from oe.package_manager.deb.manifest import PkgManifest
class PkgSdk(Sdk):
def __init__(self, d, manifest_dir=None):
super(PkgSdk, self).__init__(d, manifest_dir)
self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
self.target_manifest = PkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = PkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
deb_repo_workdir = "oe-sdk-repo"
if "sdk_ext" in d.getVar("BB_RUNTASK"):
deb_repo_workdir = "oe-sdk-ext-repo"
self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
self.d.getVar("PACKAGE_ARCHS"),
self.d.getVar("DPKG_ARCH"),
self.target_conf_dir,
deb_repo_workdir=deb_repo_workdir)
self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
self.d.getVar("SDK_PACKAGE_ARCHS"),
self.d.getVar("DEB_SDK_ARCH"),
self.host_conf_dir,
deb_repo_workdir=deb_repo_workdir)
def _copy_apt_dir_to(self, dst_dir):
staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
self.remove(dst_dir, True)
shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
def _populate_sysroot(self, pm, manifest):
pkgs_to_install = manifest.parse_initial_manifest()
pm.write_index()
pm.update()
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
def _populate(self):
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
self.target_pm.run_pre_post_installs()
env_bkp = os.environ.copy()
os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
os.pathsep + os.environ["PATH"]
self.target_pm.run_intercepts(populate_sdk='target')
os.environ.update(env_bkp)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.target_pm.remove_packaging_data()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
self.install_locales(self.host_pm)
self.host_pm.run_pre_post_installs()
self.host_pm.run_intercepts(populate_sdk='host')
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
"etc", "apt"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.host_pm.remove_packaging_data()
native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
"var", "lib", "dpkg")
self.mkdirhier(native_dpkg_state_dir)
for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
self.movefile(f, native_dpkg_state_dir)
self.remove(os.path.join(self.sdk_output, "var"), True)

View File

@@ -0,0 +1,515 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import glob
import re
import shutil
import subprocess
from oe.package_manager import *
class OpkgIndexer(Indexer):
def write_index(self):
arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
"SDK_PACKAGE_ARCHS",
]
opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
opkg_index_cmd_extra_params = self.d.getVar('OPKG_MAKE_INDEX_EXTRA_PARAMS') or ""
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
open(os.path.join(self.deploy_dir, "Packages"), "w").close()
index_cmds = set()
index_sign_files = set()
for arch_var in arch_vars:
archs = self.d.getVar(arch_var)
if archs is None:
continue
for arch in archs.split():
pkgs_dir = os.path.join(self.deploy_dir, arch)
pkgs_file = os.path.join(pkgs_dir, "Packages")
if not os.path.isdir(pkgs_dir):
continue
if not os.path.exists(pkgs_file):
open(pkgs_file, "w").close()
index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s %s' %
(opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir, opkg_index_cmd_extra_params))
index_sign_files.add(pkgs_file)
if len(index_cmds) == 0:
bb.note("There are no packages in %s!" % self.deploy_dir)
return
oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
if signer:
feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
is_ascii_sig = (feed_sig_type.upper() != "BIN")
for f in index_sign_files:
signer.detach_sign(f,
self.d.getVar('PACKAGE_FEED_GPG_NAME'),
self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
armor=is_ascii_sig)
class PMPkgsList(PkgsList):
def __init__(self, d, rootfs_dir):
super(PMPkgsList, self).__init__(d, rootfs_dir)
config_file = d.getVar("IPKGCONF_TARGET")
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
self.opkg_args += self.d.getVar("OPKG_ARGS")
def list_pkgs(self, format=None):
cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
# opkg returns success even when it printed some
# "Collected errors:" report to stderr. Mixing stderr into
# stdout then leads to random failures later on when
# parsing the output. To avoid this we need to collect both
# output streams separately and check for empty stderr.
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
cmd_output, cmd_stderr = p.communicate()
cmd_output = cmd_output.decode("utf-8")
cmd_stderr = cmd_stderr.decode("utf-8")
if p.returncode or cmd_stderr:
bb.fatal("Cannot get the installed packages list. Command '%s' "
"returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr))
return opkg_query(cmd_output)
class OpkgDpkgPM(PackageManager):
def __init__(self, d, target_rootfs):
"""
This is an abstract class. Do not instantiate this directly.
"""
super(OpkgDpkgPM, self).__init__(d, target_rootfs)
def package_info(self, pkg, cmd):
"""
Returns a dictionary with the package info.
This method extracts the common parts for Opkg and Dpkg
"""
proc = subprocess.run(cmd, capture_output=True, encoding="utf-8", shell=True)
if proc.returncode:
bb.fatal("Unable to list available packages. Command '%s' "
"returned %d:\n%s" % (cmd, proc.returncode, proc.stderr))
elif proc.stderr:
bb.note("Command '%s' returned stderr: %s" % (cmd, proc.stderr))
return opkg_query(proc.stdout)
def extract(self, pkg, pkg_info):
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
This method extracts the common parts for Opkg and Dpkg
"""
ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
pkg_path = pkg_info[pkg]["filepath"]
if not os.path.isfile(pkg_path):
bb.fatal("Unable to extract package for '%s'."
"File %s doesn't exists" % (pkg, pkg_path))
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
try:
cmd = [ar_cmd, 'x', pkg_path]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
data_tar = glob.glob("data.tar.*")
if len(data_tar) != 1:
bb.fatal("Unable to extract %s package. Failed to identify "
"data tarball (found tarballs '%s').",
pkg_path, data_tar)
data_tar = data_tar[0]
cmd = [tar_cmd, 'xf', data_tar]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
except OSError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
bb.utils.remove(os.path.join(tmp_dir, data_tar))
os.chdir(current_dir)
return tmp_dir
def _handle_intercept_failure(self, registered_pkgs):
self.mark_packages("unpacked", registered_pkgs.split())
class OpkgPM(OpkgDpkgPM):
def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True):
super(OpkgPM, self).__init__(d, target_rootfs)
self.config_file = config_file
self.pkg_archs = archs
self.task_name = task_name
self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir)
self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
self.opkg_args += self.d.getVar("OPKG_ARGS")
if prepare_index:
create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies)
self.opkg_dir = oe.path.join(target_rootfs, self.d.getVar('OPKGLIBDIR'), "opkg")
bb.utils.mkdirhier(self.opkg_dir)
self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
if self.from_feeds:
self._create_custom_config()
else:
self._create_config()
self.indexer = OpkgIndexer(self.d, self.deploy_dir)
def mark_packages(self, status_tag, packages=None):
"""
This function will change a package's status in /var/lib/opkg/status file.
If 'packages' is None then the new_status will be applied to all
packages
"""
status_file = os.path.join(self.opkg_dir, "status")
with open(status_file, "r") as sf:
with open(status_file + ".tmp", "w+") as tmp_sf:
if packages is None:
tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
r"Package: \1\n\2Status: \3%s" % status_tag,
sf.read()))
else:
if type(packages).__name__ != "list":
raise TypeError("'packages' should be a list object")
status = sf.read()
for pkg in packages:
status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
status)
tmp_sf.write(status)
bb.utils.rename(status_file + ".tmp", status_file)
def _create_custom_config(self):
bb.note("Building from feeds activated!")
with open(self.config_file, "w+") as config_file:
priority = 1
for arch in self.pkg_archs.split():
config_file.write("arch %s %d\n" % (arch, priority))
priority += 5
for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
if feed_match is not None:
feed_name = feed_match.group(1)
feed_uri = feed_match.group(2)
bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
"""
Allow to use package deploy directory contents as quick devel-testing
feed. This creates individual feed configs for each arch subdir of those
specified as compatible for the current machine.
NOTE: Development-helper feature, NOT a full-fledged feed.
"""
if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
for arch in self.pkg_archs.split():
cfg_file_name = oe.path.join(self.target_rootfs,
self.d.getVar("sysconfdir"),
"opkg",
"local-%s-feed.conf" % arch)
with open(cfg_file_name, "w+") as cfg_file:
cfg_file.write("src/gz local-%s %s/%s" %
(arch,
self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
arch))
if self.d.getVar('OPKGLIBDIR') != '/var/lib':
# There is no command line option for this anymore, we need to add
# info_dir and status_file to config file, if OPKGLIBDIR doesn't have
# the default value of "/var/lib" as defined in opkg:
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
def _create_config(self):
with open(self.config_file, "w+") as config_file:
priority = 1
for arch in self.pkg_archs.split():
config_file.write("arch %s %d\n" % (arch, priority))
priority += 5
config_file.write("src oe file:%s\n" % self.deploy_dir)
for arch in self.pkg_archs.split():
pkgs_dir = os.path.join(self.deploy_dir, arch)
if os.path.isdir(pkgs_dir):
config_file.write("src oe-%s file:%s\n" %
(arch, pkgs_dir))
if self.d.getVar('OPKGLIBDIR') != '/var/lib':
# There is no command line option for this anymore, we need to add
# info_dir and status_file to config file, if OPKGLIBDIR doesn't have
# the default value of "/var/lib" as defined in opkg:
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
# libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
if feed_uris == "":
return
rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
% self.target_rootfs)
os.makedirs('%s/etc/opkg' % self.target_rootfs, exist_ok=True)
feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
with open(rootfs_config, "w+") as config_file:
uri_iterator = 0
for uri in feed_uris:
if archs:
for arch in archs:
if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
continue
bb.note('Adding opkg feed url-%s-%d (%s)' %
(arch, uri_iterator, uri))
config_file.write("src/gz uri-%s-%d %s/%s\n" %
(arch, uri_iterator, uri, arch))
else:
bb.note('Adding opkg feed url-%d (%s)' %
(uri_iterator, uri))
config_file.write("src/gz uri-%d %s\n" %
(uri_iterator, uri))
uri_iterator += 1
def update(self):
self.deploy_dir_lock()
cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
self.deploy_dir_unlock()
bb.fatal("Unable to update the package index files. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
self.deploy_dir_unlock()
def install(self, pkgs, attempt_only=False, hard_depends_only=False):
if not pkgs:
return
cmd = "%s %s" % (self.opkg_cmd, self.opkg_args)
for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split():
cmd += " --add-exclude %s" % exclude
for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split():
cmd += " --add-ignore-recommends %s" % bad_recommendation
if hard_depends_only:
cmd += " --no-install-recommends"
cmd += " install "
cmd += " ".join(pkgs)
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = self.intercepts_dir
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
failed_pkgs = []
for line in output.split('\n'):
if line.endswith("configuration required on target."):
bb.warn(line)
failed_pkgs.append(line.split(".")[0])
if failed_pkgs:
failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
except subprocess.CalledProcessError as e:
(bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
"Command '%s' returned %d:\n%s" %
(cmd, e.returncode, e.output.decode("utf-8")))
def remove(self, pkgs, with_dependencies=True):
if not pkgs:
return
if with_dependencies:
cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
(self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
else:
cmd = "%s %s --force-depends remove %s" % \
(self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
try:
bb.note(cmd)
output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to remove packages. Command '%s' "
"returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
def write_index(self):
self.deploy_dir_lock()
result = self.indexer.write_index()
self.deploy_dir_unlock()
if result is not None:
bb.fatal(result)
def remove_packaging_data(self):
cachedir = oe.path.join(self.target_rootfs, self.d.getVar("localstatedir"), "cache", "opkg")
bb.utils.remove(self.opkg_dir, True)
bb.utils.remove(cachedir, True)
def remove_lists(self):
if not self.from_feeds:
bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True)
def list_installed(self):
return PMPkgsList(self.d, self.target_rootfs).list_pkgs()
def dummy_install(self, pkgs):
"""
The following function dummy installs pkgs and returns the log of output.
"""
if len(pkgs) == 0:
return
# Create an temp dir as opkg root for dummy installation
temp_rootfs = self.d.expand('${T}/opkg')
opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
if opkg_lib_dir[0] == "/":
opkg_lib_dir = opkg_lib_dir[1:]
temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg')
bb.utils.mkdirhier(temp_opkg_dir)
opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
opkg_args += self.d.getVar("OPKG_ARGS")
cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.fatal("Unable to update. Command '%s' "
"returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
# Dummy installation
cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
opkg_args,
' '.join(pkgs))
proc = subprocess.run(cmd, capture_output=True, encoding="utf-8", shell=True)
if proc.returncode:
bb.fatal("Unable to dummy install packages. Command '%s' "
"returned %d:\n%s" % (cmd, proc.returncode, proc.stderr))
elif proc.stderr:
bb.note("Command '%s' returned stderr: %s" % (cmd, proc.stderr))
bb.utils.remove(temp_rootfs, True)
return proc.stdout
def backup_packaging_data(self):
# Save the opkglib for increment ipk image generation
if os.path.exists(self.saved_opkg_dir):
bb.utils.remove(self.saved_opkg_dir, True)
shutil.copytree(self.opkg_dir,
self.saved_opkg_dir,
symlinks=True)
def recover_packaging_data(self):
# Move the opkglib back
if os.path.exists(self.saved_opkg_dir):
if os.path.exists(self.opkg_dir):
bb.utils.remove(self.opkg_dir, True)
bb.note('Recover packaging data')
shutil.copytree(self.saved_opkg_dir,
self.opkg_dir,
symlinks=True)
def package_info(self, pkg):
"""
Returns a dictionary with the package info.
"""
cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
pkg_arch = pkg_info[pkg]["arch"]
pkg_filename = pkg_info[pkg]["filename"]
pkg_info[pkg]["filepath"] = \
os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
return pkg_info
def extract(self, pkg):
"""
Returns the path to a tmpdir where resides the contents of a package.
Deleting the tmpdir is responsability of the caller.
"""
pkg_info = self.package_info(pkg)
if not pkg_info:
bb.fatal("Unable to get information for package '%s' while "
"trying to extract the package." % pkg)
return super(OpkgPM, self).extract(pkg, pkg_info)

View File

@@ -0,0 +1,76 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from oe.manifest import Manifest
import re
class PkgManifest(Manifest):
"""
Returns a dictionary object with mip and mlp packages.
"""
def _split_multilib(self, pkg_list):
pkgs = dict()
for pkg in pkg_list.split():
pkg_type = self.PKG_TYPE_MUST_INSTALL
ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
for ml_variant in ml_variants:
if pkg.startswith(ml_variant + '-'):
pkg_type = self.PKG_TYPE_MULTILIB
if not pkg_type in pkgs:
pkgs[pkg_type] = pkg
else:
pkgs[pkg_type] += " " + pkg
return pkgs
def create_initial(self):
pkgs = dict()
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
for var in self.var_maps[self.manifest_type]:
if var in self.vars_to_split:
split_pkgs = self._split_multilib(self.d.getVar(var))
if split_pkgs is not None:
pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
else:
pkg_list = self.d.getVar(var)
if pkg_list is not None:
pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
for pkg_type in sorted(pkgs):
for pkg in sorted(pkgs[pkg_type].split()):
manifest.write("%s,%s\n" % (pkg_type, pkg))
def create_final(self):
pass
def create_full(self, pm):
if not os.path.exists(self.initial_manifest):
self.create_initial()
initial_manifest = self.parse_initial_manifest()
pkgs_to_install = list()
for pkg_type in initial_manifest:
pkgs_to_install += initial_manifest[pkg_type]
if len(pkgs_to_install) == 0:
return
output = pm.dummy_install(pkgs_to_install)
with open(self.full_manifest, 'w+') as manifest:
pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
for line in set(output.split('\n')):
m = pkg_re.match(line)
if m:
manifest.write(m.group(1) + '\n')
return

View File

@@ -0,0 +1,352 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import re
import filecmp
import shutil
from oe.rootfs import Rootfs
from oe.manifest import Manifest
from oe.utils import execute_pre_post_process
from oe.package_manager.ipk.manifest import PkgManifest
from oe.package_manager.ipk import OpkgPM
class DpkgOpkgRootfs(Rootfs):
def __init__(self, d, progress_reporter=None, logcatcher=None):
super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
def _get_pkgs_postinsts(self, status_file):
def _get_pkg_depends_list(pkg_depends):
pkg_depends_list = []
# filter version requirements like libc (>= 1.1)
for dep in pkg_depends.split(', '):
m_dep = re.match(r"^(.*) \(.*\)$", dep)
if m_dep:
dep = m_dep.group(1)
pkg_depends_list.append(dep)
return pkg_depends_list
pkgs = {}
pkg_name = ""
pkg_status_match = False
pkg_depends = ""
with open(status_file) as status:
data = status.read()
status.close()
for line in data.split('\n'):
m_pkg = re.match(r"^Package: (.*)", line)
m_status = re.match(r"^Status:.*unpacked", line)
m_depends = re.match(r"^Depends: (.*)", line)
#Only one of m_pkg, m_status or m_depends is not None at time
#If m_pkg is not None, we started a new package
if m_pkg is not None:
#Get Package name
pkg_name = m_pkg.group(1)
#Make sure we reset other variables
pkg_status_match = False
pkg_depends = ""
elif m_status is not None:
#New status matched
pkg_status_match = True
elif m_depends is not None:
#New depends macthed
pkg_depends = m_depends.group(1)
else:
pass
#Now check if we can process package depends and postinst
if "" != pkg_name and pkg_status_match:
pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
else:
#Not enough information
pass
# remove package dependencies not in postinsts
pkg_names = list(pkgs.keys())
for pkg_name in pkg_names:
deps = pkgs[pkg_name][:]
for d in deps:
if d not in pkg_names:
pkgs[pkg_name].remove(d)
return pkgs
def _get_delayed_postinsts_common(self, status_file):
def _dep_resolve(graph, node, resolved, seen):
seen.append(node)
for edge in graph[node]:
if edge not in resolved:
if edge in seen:
raise RuntimeError("Packages %s and %s have " \
"a circular dependency in postinsts scripts." \
% (node, edge))
_dep_resolve(graph, edge, resolved, seen)
resolved.append(node)
pkg_list = []
pkgs = None
if not self.d.getVar('PACKAGE_INSTALL').strip():
bb.note("Building empty image")
else:
pkgs = self._get_pkgs_postinsts(status_file)
if pkgs:
root = "__packagegroup_postinst__"
pkgs[root] = list(pkgs.keys())
_dep_resolve(pkgs, root, pkg_list, [])
pkg_list.remove(root)
if len(pkg_list) == 0:
return None
return pkg_list
def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
if bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d):
return
num = 0
for p in self._get_delayed_postinsts():
bb.utils.mkdirhier(dst_postinst_dir)
if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
num += 1
class PkgRootfs(DpkgOpkgRootfs):
def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
self.log_check_regex = '(exit 1|Collected errors)'
self.manifest = PkgManifest(d, manifest_dir)
self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
if self._remove_old_rootfs():
bb.utils.remove(self.image_rootfs, True)
self.pm = OpkgPM(d,
self.image_rootfs,
self.opkg_conf,
self.pkg_archs)
else:
self.pm = OpkgPM(d,
self.image_rootfs,
self.opkg_conf,
self.pkg_archs)
self.pm.recover_packaging_data()
bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
'''
Compare two files with the same key twice to see if they are equal.
If they are not equal, it means they are duplicated and come from
different packages.
'''
def _file_equal(self, key, f1, f2):
if filecmp.cmp(f1, f2):
return True
# Not equal
return False
"""
This function was reused from the old implementation.
See commit: "image.bbclass: Added variables for multilib support." by
Lianhao Lu.
"""
def _multilib_sanity_test(self, dirs):
allow_replace = "|".join((self.d.getVar("MULTILIBRE_ALLOW_REP") or "").split())
if allow_replace is None:
allow_replace = ""
allow_rep = re.compile(re.sub(r"\|$", r"", allow_replace))
error_prompt = "Multilib check error:"
files = {}
for dir in dirs:
for root, subfolders, subfiles in os.walk(dir):
for file in subfiles:
item = os.path.join(root, file)
key = str(os.path.join("/", os.path.relpath(item, dir)))
valid = True
if key in files:
#check whether the file is allow to replace
if allow_rep.match(key):
valid = True
else:
if os.path.exists(files[key]) and \
os.path.exists(item) and \
not self._file_equal(key, files[key], item):
valid = False
bb.fatal("%s duplicate files %s %s is not the same\n" %
(error_prompt, item, files[key]))
#pass the check, add to list
if valid:
files[key] = item
def _multilib_test_install(self, pkgs):
ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
bb.utils.mkdirhier(ml_temp)
dirs = [self.image_rootfs]
for variant in self.d.getVar("MULTILIB_VARIANTS").split():
ml_target_rootfs = os.path.join(ml_temp, variant)
bb.utils.remove(ml_target_rootfs, True)
ml_opkg_conf = os.path.join(ml_temp,
variant + "-" + os.path.basename(self.opkg_conf))
ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False)
ml_pm.update()
ml_pm.install(pkgs)
dirs.append(ml_target_rootfs)
self._multilib_sanity_test(dirs)
'''
While ipk incremental image generation is enabled, it will remove the
unneeded pkgs by comparing the old full manifest in previous existing
image and the new full manifest in the current image.
'''
def _remove_extra_packages(self, pkgs_initial_install):
if self.inc_opkg_image_gen == "1":
# Parse full manifest in previous existing image creation session
old_full_manifest = self.manifest.parse_full_manifest()
# Create full manifest for the current image session, the old one
# will be replaced by the new one.
self.manifest.create_full(self.pm)
# Parse full manifest in current image creation session
new_full_manifest = self.manifest.parse_full_manifest()
pkg_to_remove = list()
for pkg in old_full_manifest:
if pkg not in new_full_manifest:
pkg_to_remove.append(pkg)
if pkg_to_remove != []:
bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
self.pm.remove(pkg_to_remove)
'''
Compare with previous existing image creation, if some conditions
triggered, the previous old image should be removed.
The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
and BAD_RECOMMENDATIONS' has been changed.
'''
def _remove_old_rootfs(self):
if self.inc_opkg_image_gen != "1":
return True
vars_list_file = self.d.expand('${T}/vars_list')
old_vars_list = ""
if os.path.exists(vars_list_file):
old_vars_list = open(vars_list_file, 'r+').read()
new_vars_list = '%s:%s:%s\n' % \
((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
(self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
(self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
open(vars_list_file, 'w+').write(new_vars_list)
if old_vars_list != new_vars_list:
return True
return False
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
# update PM index files
self.pm.write_index()
execute_pre_post_process(self.d, opkg_pre_process_cmds)
if self.progress_reporter:
self.progress_reporter.next_stage()
# Steps are a bit different in order, skip next
self.progress_reporter.next_stage()
self.pm.update()
if self.progress_reporter:
self.progress_reporter.next_stage()
if self.inc_opkg_image_gen == "1":
self._remove_extra_packages(pkgs_to_install)
if self.progress_reporter:
self.progress_reporter.next_stage()
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
# For multilib, we perform a sanity test before final install
# If sanity test fails, it will automatically do a bb.fatal()
# and the installation will stop
if pkg_type == Manifest.PKG_TYPE_MULTILIB:
self._multilib_test_install(pkgs_to_install[pkg_type])
self.pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
if self.progress_reporter:
self.progress_reporter.next_stage()
self.pm.install_complementary()
if self.progress_reporter:
self.progress_reporter.next_stage()
opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
opkg_dir = os.path.join(opkg_lib_dir, 'opkg')
self._setup_dbg_rootfs([opkg_dir])
execute_pre_post_process(self.d, opkg_post_process_cmds)
if self.inc_opkg_image_gen == "1":
self.pm.backup_packaging_data()
if self.progress_reporter:
self.progress_reporter.next_stage()
@staticmethod
def _depends_list():
return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
def _get_delayed_postinsts(self):
status_file = os.path.join(self.image_rootfs,
self.d.getVar('OPKGLIBDIR').strip('/'),
"opkg", "status")
return self._get_delayed_postinsts_common(status_file)
def _save_postinsts(self):
dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
def _log_check(self):
self._log_check_warn()
self._log_check_error()
def _cleanup(self):
self.pm.remove_lists()

View File

@@ -0,0 +1,113 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import glob
import shutil
from oe.utils import execute_pre_post_process
from oe.sdk import Sdk
from oe.package_manager.ipk.manifest import PkgManifest
from oe.manifest import Manifest
from oe.package_manager.ipk import OpkgPM
class PkgSdk(Sdk):
def __init__(self, d, manifest_dir=None):
super(PkgSdk, self).__init__(d, manifest_dir)
# In sdk_list_installed_packages the call to opkg is hardcoded to
# always use IPKGCONF_TARGET and there's no exposed API to change this
# so simply override IPKGCONF_TARGET to use this separated config file.
ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK_TARGET")
d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target)
self.target_conf = self.d.getVar("IPKGCONF_TARGET")
self.host_conf = self.d.getVar("IPKGCONF_SDK")
self.target_manifest = PkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = PkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
ipk_repo_workdir = "oe-sdk-repo"
if "sdk_ext" in d.getVar("BB_RUNTASK"):
ipk_repo_workdir = "oe-sdk-ext-repo"
self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"),
ipk_repo_workdir=ipk_repo_workdir)
self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
self.d.getVar("SDK_PACKAGE_ARCHS"),
ipk_repo_workdir=ipk_repo_workdir)
def _populate_sysroot(self, pm, manifest):
pkgs_to_install = manifest.parse_initial_manifest()
if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
pm.write_index()
pm.update()
for pkg_type in self.install_order:
if pkg_type in pkgs_to_install:
pm.install(pkgs_to_install[pkg_type],
[False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
def _populate(self):
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
env_bkp = os.environ.copy()
os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
os.pathsep + os.environ["PATH"]
self.target_pm.run_intercepts(populate_sdk='target')
os.environ.update(env_bkp)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.target_pm.remove_packaging_data()
else:
self.target_pm.remove_lists()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
self.install_locales(self.host_pm)
self.host_pm.run_intercepts(populate_sdk='host')
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.host_pm.remove_packaging_data()
else:
self.host_pm.remove_lists()
target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
self.mkdirhier(target_sysconfdir)
shutil.copy(self.target_conf, target_sysconfdir)
os.chmod(os.path.join(target_sysconfdir,
os.path.basename(self.target_conf)), 0o644)
self.mkdirhier(host_sysconfdir)
shutil.copy(self.host_conf, host_sysconfdir)
os.chmod(os.path.join(host_sysconfdir,
os.path.basename(self.host_conf)), 0o644)
native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
self.d.getVar('localstatedir_nativesdk').strip('/'),
"lib", "opkg")
self.mkdirhier(native_opkg_state_dir)
for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
self.movefile(f, native_opkg_state_dir)
self.remove(os.path.join(self.sdk_output, "var"), True)

View File

@@ -0,0 +1,422 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import shutil
import subprocess
from oe.package_manager import *
class RpmIndexer(Indexer):
def write_index(self):
self.do_write_index(self.deploy_dir)
def do_write_index(self, deploy_dir):
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir))
if result:
bb.fatal(result)
# Sign repomd
if signer:
sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
is_ascii_sig = (sig_type.upper() != "BIN")
signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'),
self.d.getVar('PACKAGE_FEED_GPG_NAME'),
self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
armor=is_ascii_sig)
class RpmSubdirIndexer(RpmIndexer):
def write_index(self):
bb.note("Generating package index for %s" %(self.deploy_dir))
# Remove the existing repodata to ensure that we re-generate it no matter what
bb.utils.remove(os.path.join(self.deploy_dir, "repodata"), recurse=True)
self.do_write_index(self.deploy_dir)
for entry in os.walk(self.deploy_dir):
if os.path.samefile(self.deploy_dir, entry[0]):
for dir in entry[1]:
if dir != 'repodata':
dir_path = oe.path.join(self.deploy_dir, dir)
bb.note("Generating package index for %s" %(dir_path))
self.do_write_index(dir_path)
class PMPkgsList(PkgsList):
def list_pkgs(self):
return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed()
class RpmPM(PackageManager):
def __init__(self,
d,
target_rootfs,
target_vendor,
task_name='target',
arch_var=None,
os_var=None,
rpm_repo_workdir="oe-rootfs-repo",
filterbydependencies=True,
needfeed=True):
super(RpmPM, self).__init__(d, target_rootfs)
self.target_vendor = target_vendor
self.task_name = task_name
if arch_var == None:
self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
else:
self.archs = self.d.getVar(arch_var).replace("-","_")
if task_name == "host":
self.primary_arch = self.d.getVar('SDK_ARCH')
else:
self.primary_arch = self.d.getVar('MACHINE_ARCH')
if needfeed:
self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
self.task_name)
if not os.path.exists(self.d.expand('${T}/saved')):
bb.utils.mkdirhier(self.d.expand('${T}/saved'))
def _configure_dnf(self):
# libsolv handles 'noarch' internally, we don't need to specify it explicitly
archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]]
# This prevents accidental matching against libsolv's built-in policies
if len(archs) <= 1:
archs = archs + ["bogusarch"]
# This architecture needs to be upfront so that packages using it are properly prioritized
archs = ["sdk_provides_dummy_target"] + archs
confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
bb.utils.mkdirhier(confdir)
with open(confdir + "arch", 'w') as f:
f.write(":".join(archs))
distro_codename = self.d.getVar('DISTRO_CODENAME')
with open(confdir + "releasever", 'w') as f:
f.write(distro_codename if distro_codename is not None else '')
with open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w') as f:
f.write("")
def _configure_rpm(self):
# We need to configure rpm to use our primary package architecture as the installation architecture,
# and to make it compatible with other package architectures that we use.
# Otherwise it will refuse to proceed with packages installation.
platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
bb.utils.mkdirhier(platformconfdir)
with open(platformconfdir + "platform", 'w') as f:
f.write("%s-pc-linux" % self.primary_arch)
with open(rpmrcconfdir + "rpmrc", 'w') as f:
f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
f.write("buildarch_compat: %s: noarch\n" % self.primary_arch)
with open(platformconfdir + "macros", 'w') as f:
f.write("%_transaction_color 7\n")
if self.d.getVar('RPM_PREFER_ELF_ARCH'):
with open(platformconfdir + "macros", 'a') as f:
f.write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
bb.fatal("Importing GPG key failed. Command '%s' "
"returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
def create_configs(self):
self._configure_dnf()
self._configure_rpm()
def write_index(self):
lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
lf = bb.utils.lockfile(lockfilename, False)
RpmIndexer(self.d, self.rpm_repo_dir).write_index()
bb.utils.unlockfile(lf)
def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
from urllib.parse import urlparse
if feed_uris == "":
return
gpg_opts = ''
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
gpg_opts += 'repo_gpgcheck=1\n'
gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
if self.d.getVar('RPM_SIGN_PACKAGES') != '1':
gpg_opts += 'gpgcheck=0\n'
bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
for uri in remote_uris:
repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
if feed_archs is not None:
for arch in feed_archs.split():
repo_uri = uri + "/" + arch
repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
with open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a') as f:
f.write("[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
else:
repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
repo_uri = uri
with open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w') as f:
f.write("[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
def _prepare_pkg_transaction(self):
os.environ['D'] = self.target_rootfs
os.environ['OFFLINE_ROOT'] = self.target_rootfs
os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
os.environ['INTERCEPT_DIR'] = self.intercepts_dir
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
def install(self, pkgs, attempt_only=False, hard_depends_only=False):
if len(pkgs) == 0:
return
self._prepare_pkg_transaction()
bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
(["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
(["--setopt=install_weak_deps=False"] if (hard_depends_only or self.d.getVar('NO_RECOMMENDATIONS') == "1") else []) +
(["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
["install"] +
pkgs)
failed_scriptlets_pkgnames = collections.OrderedDict()
for line in output.splitlines():
if line.startswith("Error: Systemctl"):
bb.error(line)
if line.startswith("Error in POSTIN scriptlet in rpm package"):
failed_scriptlets_pkgnames[line.split()[-1]] = True
if len(failed_scriptlets_pkgnames) > 0:
failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
def remove(self, pkgs, with_dependencies = True):
if not pkgs:
return
self._prepare_pkg_transaction()
if with_dependencies:
self._invoke_dnf(["remove"] + pkgs)
else:
cmd = bb.utils.which(os.getenv('PATH'), "rpm")
args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs]
try:
bb.note("Running %s" % ' '.join([cmd] + args + pkgs))
output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
bb.note(output)
except subprocess.CalledProcessError as e:
bb.fatal("Could not invoke rpm. Command "
"'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
def upgrade(self):
self._prepare_pkg_transaction()
self._invoke_dnf(["upgrade"])
def autoremove(self):
self._prepare_pkg_transaction()
self._invoke_dnf(["autoremove"])
def remove_packaging_data(self):
self._invoke_dnf(["clean", "all"])
for dir in self.packaging_data_dirs:
bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
def backup_packaging_data(self):
# Save the packaging dirs for increment rpm image generation
if os.path.exists(self.saved_packaging_data):
bb.utils.remove(self.saved_packaging_data, True)
for i in self.packaging_data_dirs:
source_dir = oe.path.join(self.target_rootfs, i)
target_dir = oe.path.join(self.saved_packaging_data, i)
if os.path.isdir(source_dir):
shutil.copytree(source_dir, target_dir, symlinks=True)
elif os.path.isfile(source_dir):
shutil.copy2(source_dir, target_dir)
def recovery_packaging_data(self):
# Move the rpmlib back
if os.path.exists(self.saved_packaging_data):
for i in self.packaging_data_dirs:
target_dir = oe.path.join(self.target_rootfs, i)
if os.path.exists(target_dir):
bb.utils.remove(target_dir, True)
source_dir = oe.path.join(self.saved_packaging_data, i)
if os.path.isdir(source_dir):
shutil.copytree(source_dir, target_dir, symlinks=True)
elif os.path.isfile(source_dir):
shutil.copy2(source_dir, target_dir)
def list_installed(self):
output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
print_output = False)
packages = {}
current_package = None
current_deps = None
current_state = "initial"
for line in output.splitlines():
if line.startswith("Package:"):
package_info = line.split(" ")[1:]
current_package = package_info[0]
package_arch = package_info[1]
package_version = package_info[2]
package_rpm = package_info[3]
packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm}
current_deps = []
elif line.startswith("Dependencies:"):
current_state = "dependencies"
elif line.startswith("Recommendations"):
current_state = "recommendations"
elif line.startswith("DependenciesEndHere:"):
current_state = "initial"
packages[current_package]["deps"] = current_deps
elif len(line) > 0:
if current_state == "dependencies":
current_deps.append(line)
elif current_state == "recommendations":
current_deps.append("%s [REC]" % line)
return packages
def update(self):
self._invoke_dnf(["makecache", "--refresh"])
def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
standard_dnf_args = ["-v", "--rpmverbosity=info", "-y",
"-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
"--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
"--installroot=%s" % (self.target_rootfs),
"--setopt=logdir=%s" % (self.d.getVar('T'))
]
if hasattr(self, "rpm_repo_dir"):
standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir))
cmd = [dnf_cmd] + standard_dnf_args + dnf_args
bb.note('Running %s' % ' '.join(cmd))
try:
output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
if print_output:
bb.debug(1, output)
return output
except subprocess.CalledProcessError as e:
if print_output:
(bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
"'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
else:
(bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
"'%s' returned %d:" % (' '.join(cmd), e.returncode))
return e.output.decode("utf-8")
def dump_install_solution(self, pkgs):
with open(self.solution_manifest, 'w') as f:
f.write(" ".join(pkgs))
return pkgs
def load_old_install_solution(self):
if not os.path.exists(self.solution_manifest):
return []
with open(self.solution_manifest, 'r') as fd:
return fd.read().split()
def _script_num_prefix(self, path):
files = os.listdir(path)
numbers = set()
numbers.add(99)
for f in files:
numbers.add(int(f.split("-")[0]))
return max(numbers) + 1
def save_rpmpostinst(self, pkg):
bb.note("Saving postinstall script of %s" % (pkg))
cmd = bb.utils.which(os.getenv('PATH'), "rpm")
args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
try:
output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Could not invoke rpm. Command "
"'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
# may need to prepend #!/bin/sh to output
target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
bb.utils.mkdirhier(target_path)
num = self._script_num_prefix(target_path)
saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
with open(saved_script_name, 'w') as f:
f.write(output)
os.chmod(saved_script_name, 0o755)
def _handle_intercept_failure(self, registered_pkgs):
rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
bb.utils.mkdirhier(rpm_postinsts_dir)
# Save the package postinstalls in /etc/rpm-postinsts
for pkg in registered_pkgs.split():
self.save_rpmpostinst(pkg)
def extract(self, pkg):
output = self._invoke_dnf(["repoquery", "--location", pkg])
pkg_name = output.splitlines()[-1]
if not pkg_name.endswith(".rpm"):
bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
# Strip file: prefix
pkg_path = pkg_name[5:]
cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
if not os.path.isfile(pkg_path):
bb.fatal("Unable to extract package for '%s'."
"File %s doesn't exists" % (pkg, pkg_path))
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
try:
cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
except OSError as e:
bb.utils.remove(tmp_dir, recurse=True)
bb.fatal("Unable to extract %s package. Command '%s' "
"returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
os.chdir(current_dir)
return tmp_dir

View File

@@ -0,0 +1,56 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from oe.manifest import Manifest
class PkgManifest(Manifest):
"""
Returns a dictionary object with mip and mlp packages.
"""
def _split_multilib(self, pkg_list):
pkgs = dict()
for pkg in pkg_list.split():
pkg_type = self.PKG_TYPE_MUST_INSTALL
ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
for ml_variant in ml_variants:
if pkg.startswith(ml_variant + '-'):
pkg_type = self.PKG_TYPE_MULTILIB
if not pkg_type in pkgs:
pkgs[pkg_type] = pkg
else:
pkgs[pkg_type] += " " + pkg
return pkgs
def create_initial(self):
pkgs = dict()
with open(self.initial_manifest, "w+") as manifest:
manifest.write(self.initial_manifest_file_header)
for var in self.var_maps[self.manifest_type]:
if var in self.vars_to_split:
split_pkgs = self._split_multilib(self.d.getVar(var))
if split_pkgs is not None:
pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
else:
pkg_list = self.d.getVar(var)
if pkg_list is not None:
pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
for pkg_type in pkgs:
for pkg in pkgs[pkg_type].split():
manifest.write("%s,%s\n" % (pkg_type, pkg))
def create_final(self):
pass
def create_full(self, pm):
pass

View File

@@ -0,0 +1,150 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from oe.rootfs import Rootfs
from oe.manifest import Manifest
from oe.utils import execute_pre_post_process
from oe.package_manager.rpm.manifest import PkgManifest
from oe.package_manager.rpm import RpmPM
class PkgRootfs(Rootfs):
def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
self.log_check_regex = r'(unpacking of archive failed|Cannot find package'\
r'|exit 1|ERROR: |Error: |Error |ERROR '\
r'|Failed |Failed: |Failed$|Failed\(\d+\):)'
self.manifest = PkgManifest(d, manifest_dir)
self.pm = RpmPM(d,
d.getVar('IMAGE_ROOTFS'),
self.d.getVar('TARGET_VENDOR')
)
self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
if self.inc_rpm_image_gen != "1":
bb.utils.remove(self.image_rootfs, True)
else:
self.pm.recovery_packaging_data()
bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
self.pm.create_configs()
'''
While rpm incremental image generation is enabled, it will remove the
unneeded pkgs by comparing the new install solution manifest and the
old installed manifest.
'''
def _create_incremental(self, pkgs_initial_install):
if self.inc_rpm_image_gen == "1":
pkgs_to_install = list()
for pkg_type in pkgs_initial_install:
pkgs_to_install += pkgs_initial_install[pkg_type]
installed_manifest = self.pm.load_old_install_solution()
solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
pkg_to_remove = list()
for pkg in installed_manifest:
if pkg not in solution_manifest:
pkg_to_remove.append(pkg)
self.pm.update()
bb.note('incremental update -- upgrade packages in place ')
self.pm.upgrade()
if pkg_to_remove != []:
bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
self.pm.remove(pkg_to_remove)
self.pm.autoremove()
def _create(self):
pkgs_to_install = self.manifest.parse_initial_manifest()
rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
# update PM index files
self.pm.write_index()
execute_pre_post_process(self.d, rpm_pre_process_cmds)
if self.progress_reporter:
self.progress_reporter.next_stage()
if self.inc_rpm_image_gen == "1":
self._create_incremental(pkgs_to_install)
if self.progress_reporter:
self.progress_reporter.next_stage()
self.pm.update()
pkgs = []
pkgs_attempt = []
for pkg_type in pkgs_to_install:
if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
pkgs_attempt += pkgs_to_install[pkg_type]
else:
pkgs += pkgs_to_install[pkg_type]
if self.progress_reporter:
self.progress_reporter.next_stage()
self.pm.install(pkgs)
if self.progress_reporter:
self.progress_reporter.next_stage()
self.pm.install(pkgs_attempt, True)
if self.progress_reporter:
self.progress_reporter.next_stage()
self.pm.install_complementary()
if self.progress_reporter:
self.progress_reporter.next_stage()
self._setup_dbg_rootfs(['/etc/rpm', '/etc/rpmrc', '/etc/dnf', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
execute_pre_post_process(self.d, rpm_post_process_cmds)
if self.inc_rpm_image_gen == "1":
self.pm.backup_packaging_data()
if self.progress_reporter:
self.progress_reporter.next_stage()
@staticmethod
def _depends_list():
return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
def _get_delayed_postinsts(self):
postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
if os.path.isdir(postinst_dir):
files = os.listdir(postinst_dir)
for f in files:
bb.note('Delayed package scriptlet: %s' % f)
return files
return None
def _save_postinsts(self):
# this is just a stub. For RPM, the failed postinstalls are
# already saved in /etc/rpm-postinsts
pass
def _log_check(self):
self._log_check_warn()
self._log_check_error()
def _cleanup(self):
if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d):
self.pm._invoke_dnf(["clean", "all"])

View File

@@ -0,0 +1,122 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import glob
from oe.utils import execute_pre_post_process
from oe.sdk import Sdk
from oe.manifest import Manifest
from oe.package_manager.rpm.manifest import PkgManifest
from oe.package_manager.rpm import RpmPM
class PkgSdk(Sdk):
def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"):
super(PkgSdk, self).__init__(d, manifest_dir)
self.target_manifest = PkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_TARGET)
self.host_manifest = PkgManifest(d, self.manifest_dir,
Manifest.MANIFEST_TYPE_SDK_HOST)
rpm_repo_workdir = "oe-sdk-repo"
if "sdk_ext" in d.getVar("BB_RUNTASK"):
rpm_repo_workdir = "oe-sdk-ext-repo"
self.target_pm = RpmPM(d,
self.sdk_target_sysroot,
self.d.getVar('TARGET_VENDOR'),
'target',
rpm_repo_workdir=rpm_repo_workdir
)
self.host_pm = RpmPM(d,
self.sdk_host_sysroot,
self.d.getVar('SDK_VENDOR'),
'host',
"SDK_PACKAGE_ARCHS",
"SDK_OS",
rpm_repo_workdir=rpm_repo_workdir
)
def _populate_sysroot(self, pm, manifest):
pkgs_to_install = manifest.parse_initial_manifest()
pm.create_configs()
pm.write_index()
pm.update()
pkgs = []
pkgs_attempt = []
for pkg_type in pkgs_to_install:
if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
pkgs_attempt += pkgs_to_install[pkg_type]
else:
pkgs += pkgs_to_install[pkg_type]
pm.install(pkgs)
pm.install(pkgs_attempt, True)
def _populate(self):
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
bb.note("Installing TARGET packages")
self._populate_sysroot(self.target_pm, self.target_manifest)
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
env_bkp = os.environ.copy()
os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
os.pathsep + os.environ["PATH"]
self.target_pm.run_intercepts(populate_sdk='target')
os.environ.update(env_bkp)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.target_pm.remove_packaging_data()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
self.install_locales(self.host_pm)
self.host_pm.run_intercepts(populate_sdk='host')
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.host_pm.remove_packaging_data()
# Move host RPM library data
native_rpm_state_dir = os.path.join(self.sdk_output,
self.sdk_native_path,
self.d.getVar('localstatedir_nativesdk').strip('/'),
"lib",
"rpm"
)
self.mkdirhier(native_rpm_state_dir)
for f in glob.glob(os.path.join(self.sdk_output,
"var",
"lib",
"rpm",
"*")):
self.movefile(f, native_rpm_state_dir)
self.remove(os.path.join(self.sdk_output, "var"), True)
# Move host sysconfig data
native_sysconf_dir = os.path.join(self.sdk_output,
self.sdk_native_path,
self.d.getVar('sysconfdir',
True).strip('/'),
)
self.mkdirhier(native_sysconf_dir)
for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
self.movefile(f, native_sysconf_dir)
for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
self.mkdirhier(native_sysconf_dir + "/dnf")
self.movefile(f, native_sysconf_dir + "/dnf")
self.remove(os.path.join(self.sdk_output, "etc"), True)

View File

@@ -0,0 +1,366 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import codecs
import os
import json
import bb.compress.zstd
import oe.path
from glob import glob
def packaged(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
def read_pkgdatafile(fn):
pkgdata = {}
def decode(str):
c = codecs.getdecoder("unicode_escape")
return c(str)[0]
if os.access(fn, os.R_OK):
import re
with open(fn, 'r') as f:
lines = f.readlines()
r = re.compile(r"(^.+?):\s+(.*)")
for l in lines:
m = r.match(l)
if m:
pkgdata[m.group(1)] = decode(m.group(2))
return pkgdata
def get_subpkgedata_fn(pkg, d):
return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg)
def has_subpkgdata(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d), os.R_OK)
def read_subpkgdata(pkg, d):
return read_pkgdatafile(get_subpkgedata_fn(pkg, d))
def has_pkgdata(pn, d):
fn = d.expand('${PKGDATA_DIR}/%s' % pn)
return os.access(fn, os.R_OK)
def read_pkgdata(pn, d):
fn = d.expand('${PKGDATA_DIR}/%s' % pn)
return read_pkgdatafile(fn)
#
# Collapse FOO:pkg variables into FOO
#
def read_subpkgdata_dict(pkg, d):
ret = {}
subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
for var in subd:
newvar = var.replace(":" + pkg, "")
if newvar == var and var + ":" + pkg in subd:
continue
ret[newvar] = subd[var]
return ret
def read_subpkgdata_extended(pkg, d):
import json
import bb.compress.zstd
fn = d.expand("${PKGDATA_DIR}/extended/%s.json.zstd" % pkg)
try:
num_threads = int(d.getVar("BB_NUMBER_THREADS"))
with bb.compress.zstd.open(fn, "rt", encoding="utf-8", num_threads=num_threads) as f:
return json.load(f)
except FileNotFoundError:
return None
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
pkgdatadir = d.getVar("PKGDATA_DIR")
pkgmap = {}
try:
files = os.listdir(pkgdatadir)
except OSError:
bb.warn("No files in %s?" % pkgdatadir)
files = []
for pn in [f for f in files if not os.path.isdir(os.path.join(pkgdatadir, f))]:
try:
pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn))
except OSError:
continue
packages = pkgdata.get("PACKAGES") or ""
for pkg in packages.split():
pkgmap[pkg] = pn
return pkgmap
def pkgmap(d):
"""Return a dictionary mapping package to recipe name.
Cache the mapping in the metadata"""
pkgmap_data = d.getVar("__pkgmap_data", False)
if pkgmap_data is None:
pkgmap_data = _pkgmap(d)
d.setVar("__pkgmap_data", pkgmap_data)
return pkgmap_data
def recipename(pkg, d):
"""Return the recipe name for the given binary package name."""
return pkgmap(d).get(pkg)
def foreach_runtime_provider_pkgdata(d, rdep, include_rdep=False):
pkgdata_dir = d.getVar("PKGDATA_DIR")
possibles = set()
try:
possibles |= set(os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdep)))
except OSError:
pass
if include_rdep:
possibles.add(rdep)
for p in sorted(list(possibles)):
rdep_data = read_subpkgdata(p, d)
yield p, rdep_data
def get_package_mapping(pkg, basepkg, d, depversions=None):
import oe.packagedata
data = oe.packagedata.read_subpkgdata(pkg, d)
key = "PKG:%s" % pkg
if key in data:
if bb.data.inherits_class('allarch', d) and bb.data.inherits_class('packagegroup', d) and pkg != data[key]:
bb.error("An allarch packagegroup shouldn't depend on packages which are dynamically renamed (%s to %s)" % (pkg, data[key]))
# Have to avoid undoing the write_extra_pkgs(global_variants...)
if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
and data[key] == basepkg:
return pkg
if depversions == []:
# Avoid returning a mapping if the renamed package rprovides its original name
rprovkey = "RPROVIDES:%s" % pkg
if rprovkey in data:
if pkg in bb.utils.explode_dep_versions2(data[rprovkey]):
bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg))
return pkg
# Do map to rewritten package name
return data[key]
return pkg
def get_package_additional_metadata(pkg_type, d):
base_key = "PACKAGE_ADD_METADATA"
for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key):
if d.getVar(key, False) is None:
continue
d.setVarFlag(key, "type", "list")
if d.getVarFlag(key, "separator") is None:
d.setVarFlag(key, "separator", "\\n")
metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
return "\n".join(metadata_fields).strip()
def runtime_mapping_rename(varname, pkg, d):
#bb.note("%s before: %s" % (varname, d.getVar(varname)))
new_depends = {}
deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "")
for depend, depversions in deps.items():
new_depend = get_package_mapping(depend, pkg, d, depversions)
if depend != new_depend:
bb.note("package name mapping done: %s -> %s" % (depend, new_depend))
new_depends[new_depend] = deps[depend]
d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
#bb.note("%s after: %s" % (varname, d.getVar(varname)))
def emit_pkgdata(pkgfiles, d):
def process_postinst_on_target(pkg, mlprefix):
pkgval = d.getVar('PKG:%s' % pkg)
if pkgval is None:
pkgval = pkg
defer_fragment = """
if [ -n "$D" ]; then
$INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
exit 0
fi
""" % (pkgval, mlprefix)
postinst = d.getVar('pkg_postinst:%s' % pkg)
postinst_ontarget = d.getVar('pkg_postinst_ontarget:%s' % pkg)
if postinst_ontarget:
bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += defer_fragment
postinst += postinst_ontarget
d.setVar('pkg_postinst:%s' % pkg, postinst)
def add_set_e_to_scriptlets(pkg):
for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
scriptlet = d.getVar('%s:%s' % (scriptlet_name, pkg))
if scriptlet:
scriptlet_split = scriptlet.split('\n')
if scriptlet_split[0].startswith("#!"):
scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
else:
scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
d.setVar('%s:%s' % (scriptlet_name, pkg), scriptlet)
def write_if_exists(f, pkg, var):
def encode(str):
import codecs
c = codecs.getencoder("unicode_escape")
return c(str)[0].decode("latin1")
val = d.getVar('%s:%s' % (var, pkg))
if val:
f.write('%s:%s: %s\n' % (var, pkg, encode(val)))
return val
val = d.getVar('%s' % (var))
if val:
f.write('%s: %s\n' % (var, encode(val)))
return val
def write_extra_pkgs(variants, pn, packages, pkgdatadir):
for variant in variants:
with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
fd.write("PACKAGES: %s\n" % ' '.join(
map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
for variant in variants:
for pkg in packages.split():
ml_pkg = "%s-%s" % (variant, pkg)
subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
with open(subdata_file, 'w') as fd:
fd.write("PKG:%s: %s" % (ml_pkg, pkg))
packages = d.getVar('PACKAGES')
pkgdest = d.getVar('PKGDEST')
pkgdatadir = d.getVar('PKGDESTWORK')
data_file = pkgdatadir + d.expand("/${PN}")
with open(data_file, 'w') as fd:
fd.write("PACKAGES: %s\n" % packages)
pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
pn = d.getVar('PN')
global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
write_extra_pkgs(variants, pn, packages, pkgdatadir)
if bb.data.inherits_class('allarch', d) and not variants \
and not bb.data.inherits_class('packagegroup', d):
write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
workdir = d.getVar('WORKDIR')
for pkg in packages.split():
pkgval = d.getVar('PKG:%s' % pkg)
if pkgval is None:
pkgval = pkg
d.setVar('PKG:%s' % pkg, pkg)
extended_data = {
"files_info": {}
}
pkgdestpkg = os.path.join(pkgdest, pkg)
files = {}
files_extra = {}
total_size = 0
seen = set()
for f in pkgfiles[pkg]:
fpath = os.sep + os.path.relpath(f, pkgdestpkg)
fstat = os.lstat(f)
files[fpath] = fstat.st_size
extended_data["files_info"].setdefault(fpath, {})
extended_data["files_info"][fpath]['size'] = fstat.st_size
if fstat.st_ino not in seen:
seen.add(fstat.st_ino)
total_size += fstat.st_size
if fpath in pkgdebugsource:
extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
del pkgdebugsource[fpath]
d.setVar('FILES_INFO:' + pkg , json.dumps(files, sort_keys=True))
process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
add_set_e_to_scriptlets(pkg)
subdata_file = pkgdatadir + "/runtime/%s" % pkg
with open(subdata_file, 'w') as sf:
for var in (d.getVar('PKGDATA_VARS') or "").split():
val = write_if_exists(sf, pkg, var)
write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
for dfile in sorted((d.getVar('FILERPROVIDESFLIST:' + pkg) or "").split()):
write_if_exists(sf, pkg, 'FILERPROVIDES:' + dfile)
write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
for dfile in sorted((d.getVar('FILERDEPENDSFLIST:' + pkg) or "").split()):
write_if_exists(sf, pkg, 'FILERDEPENDS:' + dfile)
sf.write('%s:%s: %d\n' % ('PKGSIZE', pkg, total_size))
subdata_extended_file = pkgdatadir + "/extended/%s.json.zstd" % pkg
num_threads = int(d.getVar("BB_NUMBER_THREADS"))
with bb.compress.zstd.open(subdata_extended_file, "wt", encoding="utf-8", num_threads=num_threads) as f:
json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
# Symlinks needed for rprovides lookup
rprov = d.getVar('RPROVIDES:%s' % pkg) or d.getVar('RPROVIDES')
if rprov:
for p in bb.utils.explode_deps(rprov):
subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
bb.utils.mkdirhier(os.path.dirname(subdata_sym))
oe.path.relsymlink(subdata_file, subdata_sym, True)
allow_empty = d.getVar('ALLOW_EMPTY:%s' % pkg)
if not allow_empty:
allow_empty = d.getVar('ALLOW_EMPTY')
root = "%s/%s" % (pkgdest, pkg)
os.chdir(root)
g = glob('*')
if g or allow_empty == "1":
# Symlinks needed for reverse lookups (from the final package name)
subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
oe.path.relsymlink(subdata_file, subdata_sym, True)
packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
open(packagedfile, 'w').close()
if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
write_extra_runtime_pkgs(variants, packages, pkgdatadir)
if bb.data.inherits_class('allarch', d) and not variants \
and not bb.data.inherits_class('packagegroup', d):
write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
def mapping_rename_hook(d):
"""
Rewrite variables to account for package renaming in things
like debian.bbclass or manual PKG variable name changes
"""
pkg = d.getVar("PKG")
oe.packagedata.runtime_mapping_rename("RDEPENDS", pkg, d)
oe.packagedata.runtime_mapping_rename("RRECOMMENDS", pkg, d)
oe.packagedata.runtime_mapping_rename("RSUGGESTS", pkg, d)

View File

@@ -0,0 +1,36 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import itertools
def is_optional(feature, d):
return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
def packages(features, d):
for feature in features:
packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
for pkg in (packages or "").split():
yield pkg
def required_packages(features, d):
req = [feature for feature in features if not is_optional(feature, d)]
return packages(req, d)
def optional_packages(features, d):
opt = [feature for feature in features if is_optional(feature, d)]
return packages(opt, d)
def active_packages(features, d):
return itertools.chain(required_packages(features, d),
optional_packages(features, d))
def active_recipes(features, d):
import oe.packagedata
for pkg in active_packages(features, d):
recipe = oe.packagedata.recipename(pkg, d)
if recipe:
yield recipe

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,349 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import errno
import glob
import shutil
import subprocess
import os.path
def join(*paths):
"""Like os.path.join but doesn't treat absolute RHS specially"""
return os.path.normpath("/".join(paths))
def relative(src, dest):
""" Return a relative path from src to dest.
>>> relative("/usr/bin", "/tmp/foo/bar")
../../tmp/foo/bar
>>> relative("/usr/bin", "/usr/lib")
../lib
>>> relative("/tmp", "/tmp/foo/bar")
foo/bar
"""
return os.path.relpath(dest, src)
def make_relative_symlink(path):
""" Convert an absolute symlink to a relative one """
if not os.path.islink(path):
return
link = os.readlink(path)
if not os.path.isabs(link):
return
# find the common ancestor directory
ancestor = path
depth = 0
while ancestor and not link.startswith(ancestor):
ancestor = ancestor.rpartition('/')[0]
depth += 1
if not ancestor:
print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path)
return
base = link.partition(ancestor)[2].strip('/')
while depth > 1:
base = "../" + base
depth -= 1
os.remove(path)
os.symlink(base, path)
def replace_absolute_symlinks(basedir, d):
"""
Walk basedir looking for absolute symlinks and replacing them with relative ones.
The absolute links are assumed to be relative to basedir
(compared to make_relative_symlink above which tries to compute common ancestors
using pattern matching instead)
"""
for walkroot, dirs, files in os.walk(basedir):
for file in files + dirs:
path = os.path.join(walkroot, file)
if not os.path.islink(path):
continue
link = os.readlink(path)
if not os.path.isabs(link):
continue
walkdir = os.path.dirname(path.rpartition(basedir)[2])
base = os.path.relpath(link, walkdir)
bb.debug(2, "Replacing absolute path %s with relative path %s" % (link, base))
os.remove(path)
os.symlink(base, path)
def format_display(path, metadata):
""" Prepare a path for display to the user. """
rel = relative(metadata.getVar("TOPDIR"), path)
if len(rel) > len(path):
return path
else:
return rel
def copytree(src, dst):
# We could use something like shutil.copytree here but it turns out to
# to be slow. It takes twice as long copying to an empty directory.
# If dst already has contents performance can be 15 time slower
# This way we also preserve hardlinks between files in the tree.
bb.utils.mkdirhier(dst)
cmd = "tar --xattrs --xattrs-include='*' -cf - -S -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
def copyhardlinktree(src, dst):
"""Make a tree of hard links when possible, otherwise copy."""
bb.utils.mkdirhier(dst)
if os.path.isdir(src) and not len(os.listdir(src)):
return
canhard = False
testfile = None
for root, dirs, files in os.walk(src):
if len(files):
testfile = os.path.join(root, files[0])
break
if testfile is not None:
try:
os.link(testfile, os.path.join(dst, 'testfile'))
os.unlink(os.path.join(dst, 'testfile'))
canhard = True
except Exception as e:
bb.debug(2, "Hardlink test failed with " + str(e))
if (canhard):
# Need to copy directories only with tar first since cp will error if two
# writers try and create a directory at the same time
cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -S -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
source = ''
if os.path.isdir(src):
if len(glob.glob('%s/.??*' % src)) > 0:
source = './.??* '
if len(glob.glob('%s/**' % src)) > 0:
source += './*'
s_dir = src
else:
source = src
s_dir = os.getcwd()
cmd = 'cp -afl --preserve=xattr %s %s' % (source, os.path.realpath(dst))
subprocess.check_output(cmd, shell=True, cwd=s_dir, stderr=subprocess.STDOUT)
else:
copytree(src, dst)
def copyhardlink(src, dst):
"""Make a hard link when possible, otherwise copy."""
try:
os.link(src, dst)
except OSError:
shutil.copy(src, dst)
def remove(path, recurse=True):
"""
Equivalent to rm -f or rm -rf
NOTE: be careful about passing paths that may contain filenames with
wildcards in them (as opposed to passing an actual wildcarded path) -
since we use glob.glob() to expand the path. Filenames containing
square brackets are particularly problematic since the they may not
actually expand to match the original filename.
"""
for name in glob.glob(path):
try:
os.unlink(name)
except OSError as exc:
if recurse and exc.errno == errno.EISDIR:
shutil.rmtree(name)
elif exc.errno != errno.ENOENT:
raise
def symlink(source, destination, force=False):
"""Create a symbolic link"""
try:
if force:
remove(destination)
os.symlink(source, destination)
except OSError as e:
if e.errno != errno.EEXIST or os.readlink(destination) != source:
raise
def relsymlink(target, name, force=False):
symlink(os.path.relpath(target, os.path.dirname(name)), name, force=force)
def find(dir, **walkoptions):
""" Given a directory, recurses into that directory,
returning all files as absolute paths. """
for root, dirs, files in os.walk(dir, **walkoptions):
for file in files:
yield os.path.join(root, file)
## realpath() related functions
def __is_path_below(file, root):
return (file + os.path.sep).startswith(root)
def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir):
"""Calculates real path of symlink 'start' + 'rel_path' below
'root'; no part of 'start' below 'root' must contain symlinks. """
have_dir = True
for d in rel_path.split(os.path.sep):
if not have_dir and not assume_dir:
raise OSError(errno.ENOENT, "no such directory %s" % start)
if d == os.path.pardir: # '..'
if len(start) >= len(root):
# do not follow '..' before root
start = os.path.dirname(start)
else:
# emit warning?
pass
else:
(start, have_dir) = __realpath(os.path.join(start, d),
root, loop_cnt, assume_dir)
assert(__is_path_below(start, root))
return start
def __realpath(file, root, loop_cnt, assume_dir):
while os.path.islink(file) and len(file) >= len(root):
if loop_cnt == 0:
raise OSError(errno.ELOOP, file)
loop_cnt -= 1
target = os.path.normpath(os.readlink(file))
if not os.path.isabs(target):
tdir = os.path.dirname(file)
assert(__is_path_below(tdir, root))
else:
tdir = root
file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir)
try:
is_dir = os.path.isdir(file)
except:
is_dir = false
return (file, is_dir)
def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
""" Returns the canonical path of 'file' with assuming a
toplevel 'root' directory. When 'use_physdir' is set, all
preceding path components of 'file' will be resolved first;
this flag should be set unless it is guaranteed that there is
no symlink in the path. When 'assume_dir' is not set, missing
path components will raise an ENOENT error"""
root = os.path.normpath(root)
file = os.path.normpath(file)
if not root.endswith(os.path.sep):
# letting root end with '/' makes some things easier
root = root + os.path.sep
if not __is_path_below(file, root):
raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
try:
if use_physdir:
file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
else:
file = __realpath(file, root, loop_cnt, assume_dir)[0]
except OSError as e:
if e.errno == errno.ELOOP:
# make ELOOP more readable; without catching it, there will
# be printed a backtrace with 100s of OSError exceptions
# else
raise OSError(errno.ELOOP,
"too much recursions while resolving '%s'; loop in '%s'" %
(file, e.strerror))
raise
return file
def is_path_parent(possible_parent, *paths):
"""
Return True if a path is the parent of another, False otherwise.
Multiple paths to test can be specified in which case all
specified test paths must be under the parent in order to
return True.
"""
def abs_path_trailing(pth):
pth_abs = os.path.abspath(pth)
if not pth_abs.endswith(os.sep):
pth_abs += os.sep
return pth_abs
possible_parent_abs = abs_path_trailing(possible_parent)
if not paths:
return False
for path in paths:
path_abs = abs_path_trailing(path)
if not path_abs.startswith(possible_parent_abs):
return False
return True
def which_wild(pathname, path=None, mode=os.F_OK, *, reverse=False, candidates=False):
"""Search a search path for pathname, supporting wildcards.
Return all paths in the specific search path matching the wildcard pattern
in pathname, returning only the first encountered for each file. If
candidates is True, information on all potential candidate paths are
included.
"""
paths = (path or os.environ.get('PATH', os.defpath)).split(':')
if reverse:
paths.reverse()
seen, files = set(), []
for index, element in enumerate(paths):
if not os.path.isabs(element):
element = os.path.abspath(element)
candidate = os.path.join(element, pathname)
globbed = glob.glob(candidate)
if globbed:
for found_path in sorted(globbed):
if not os.access(found_path, mode):
continue
rel = os.path.relpath(found_path, element)
if rel not in seen:
seen.add(rel)
if candidates:
files.append((found_path, [os.path.join(p, rel) for p in paths[:index+1]]))
else:
files.append(found_path)
return files
def canonicalize(paths, sep=','):
"""Given a string with paths (separated by commas by default), expand
each path using os.path.realpath() and return the resulting paths as a
string (separated using the same separator a the original string).
"""
# Ignore paths containing "$" as they are assumed to be unexpanded bitbake
# variables. Normally they would be ignored, e.g., when passing the paths
# through the shell they would expand to empty strings. However, when they
# are passed through os.path.realpath(), it will cause them to be prefixed
# with the absolute path to the current directory and thus not be empty
# anymore.
#
# Also maintain trailing slashes, as the paths may actually be used as
# prefixes in sting compares later on, where the slashes then are important.
canonical_paths = []
for path in (paths or '').split(sep):
if '$' not in path:
trailing_slash = path.endswith('/') and '/' or ''
canonical_paths.append(os.path.realpath(path) + trailing_slash)
return sep.join(canonical_paths)

View File

@@ -0,0 +1,127 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
def prserv_make_conn(d, check = False):
import prserv.serv
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
conn = None
conn = prserv.serv.connect(host_params[0], int(host_params[1]))
if check:
if not conn.ping():
raise Exception('service not available')
except Exception as exc:
bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc)))
return conn
def prserv_dump_db(d):
if not d.getVar('PRSERV_HOST'):
bb.error("Not using network based PR service")
return None
conn = prserv_make_conn(d)
if conn is None:
bb.error("Making connection failed to remote PR service")
return None
#dump db
opt_version = d.getVar('PRSERV_DUMPOPT_VERSION')
opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH')
opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM')
opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL'))
d = conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
conn.close()
return d
def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
if not d.getVar('PRSERV_HOST'):
bb.error("Not using network based PR service")
return None
conn = prserv_make_conn(d)
if conn is None:
bb.error("Making connection failed to remote PR service")
return None
#get the entry values
imported = []
prefix = "PRAUTO$"
for v in d.keys():
if v.startswith(prefix):
(remain, sep, checksum) = v.rpartition('$')
(remain, sep, pkgarch) = remain.rpartition('$')
(remain, sep, version) = remain.rpartition('$')
if (remain + '$' != prefix) or \
(filter_version and filter_version != version) or \
(filter_pkgarch and filter_pkgarch != pkgarch) or \
(filter_checksum and filter_checksum != checksum):
continue
try:
value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum))
except BaseException as exc:
bb.debug("Not valid value of %s:%s" % (v,str(exc)))
continue
ret = conn.importone(version,pkgarch,checksum,value)
if ret != value:
bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret))
else:
imported.append((version,pkgarch,checksum,value))
conn.close()
return imported
def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
import bb.utils
#initilize the output file
bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR'))
df = d.getVar('PRSERV_DUMPFILE')
#write data
with open(df, "a") as f, bb.utils.fileslocked(["%s.lock" % df]) as locks:
if metainfo:
#dump column info
f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
f.write("#Table: %s\n" % metainfo['tbl_name'])
f.write("#Columns:\n")
f.write("#name \t type \t notn \t dflt \t pk\n")
f.write("#----------\t --------\t --------\t --------\t ----\n")
for i in range(len(metainfo['col_info'])):
f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" %
(metainfo['col_info'][i]['name'],
metainfo['col_info'][i]['type'],
metainfo['col_info'][i]['notnull'],
metainfo['col_info'][i]['dflt_value'],
metainfo['col_info'][i]['pk']))
f.write("\n")
if lockdown:
f.write("PRSERV_LOCKDOWN = \"1\"\n\n")
if datainfo:
idx = {}
for i in range(len(datainfo)):
pkgarch = datainfo[i]['pkgarch']
value = datainfo[i]['value']
if pkgarch not in idx:
idx[pkgarch] = i
elif value > datainfo[idx[pkgarch]]['value']:
idx[pkgarch] = i
f.write("PRAUTO$%s$%s$%s = \"%s\"\n" %
(str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value)))
if not nomax:
for i in idx:
f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
def prserv_check_avail(d):
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
if len(host_params) != 2:
raise TypeError
else:
int(host_params[1])
except TypeError:
bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"')
else:
conn = prserv_make_conn(d, True)
conn.close()

View File

@@ -0,0 +1,238 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import os, struct, mmap
class NotELFFileError(Exception):
pass
class ELFFile:
EI_NIDENT = 16
EI_CLASS = 4
EI_DATA = 5
EI_VERSION = 6
EI_OSABI = 7
EI_ABIVERSION = 8
E_MACHINE = 0x12
# possible values for EI_CLASS
ELFCLASSNONE = 0
ELFCLASS32 = 1
ELFCLASS64 = 2
# possible value for EI_VERSION
EV_CURRENT = 1
# possible values for EI_DATA
EI_DATA_NONE = 0
EI_DATA_LSB = 1
EI_DATA_MSB = 2
PT_INTERP = 3
def my_assert(self, expectation, result):
if not expectation == result:
#print "'%x','%x' %s" % (ord(expectation), ord(result), self.name)
raise NotELFFileError("%s is not an ELF" % self.name)
def __init__(self, name):
self.name = name
self.objdump_output = {}
self.data = None
# Context Manager functions to close the mmap explicitly
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self.data:
self.data.close()
def open(self):
with open(self.name, "rb") as f:
try:
self.data = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
except ValueError:
# This means the file is empty
raise NotELFFileError("%s is empty" % self.name)
# Check the file has the minimum number of ELF table entries
if len(self.data) < ELFFile.EI_NIDENT + 4:
raise NotELFFileError("%s is not an ELF" % self.name)
# ELF header
self.my_assert(self.data[0], 0x7f)
self.my_assert(self.data[1], ord('E'))
self.my_assert(self.data[2], ord('L'))
self.my_assert(self.data[3], ord('F'))
if self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS32:
self.bits = 32
elif self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS64:
self.bits = 64
else:
# Not 32-bit or 64.. lets assert
raise NotELFFileError("ELF but not 32 or 64 bit.")
self.my_assert(self.data[ELFFile.EI_VERSION], ELFFile.EV_CURRENT)
self.endian = self.data[ELFFile.EI_DATA]
if self.endian not in (ELFFile.EI_DATA_LSB, ELFFile.EI_DATA_MSB):
raise NotELFFileError("Unexpected EI_DATA %x" % self.endian)
def osAbi(self):
return self.data[ELFFile.EI_OSABI]
def abiVersion(self):
return self.data[ELFFile.EI_ABIVERSION]
def abiSize(self):
return self.bits
def isLittleEndian(self):
return self.endian == ELFFile.EI_DATA_LSB
def isBigEndian(self):
return self.endian == ELFFile.EI_DATA_MSB
def getStructEndian(self):
return {ELFFile.EI_DATA_LSB: "<",
ELFFile.EI_DATA_MSB: ">"}[self.endian]
def getShort(self, offset):
return struct.unpack_from(self.getStructEndian() + "H", self.data, offset)[0]
def getWord(self, offset):
return struct.unpack_from(self.getStructEndian() + "i", self.data, offset)[0]
def isDynamic(self):
"""
Return True if there is a .interp segment (therefore dynamically
linked), otherwise False (statically linked).
"""
offset = self.getWord(self.bits == 32 and 0x1C or 0x20)
size = self.getShort(self.bits == 32 and 0x2A or 0x36)
count = self.getShort(self.bits == 32 and 0x2C or 0x38)
for i in range(0, count):
p_type = self.getWord(offset + i * size)
if p_type == ELFFile.PT_INTERP:
return True
return False
def machine(self):
"""
We know the endian stored in self.endian and we
know the position
"""
return self.getShort(ELFFile.E_MACHINE)
def set_objdump(self, cmd, output):
self.objdump_output[cmd] = output
def run_objdump(self, cmd, d):
import bb.process
import sys
if cmd in self.objdump_output:
return self.objdump_output[cmd]
objdump = d.getVar('OBJDUMP')
env = os.environ.copy()
env["LC_ALL"] = "C"
env["PATH"] = d.getVar('PATH')
try:
bb.note("%s %s %s" % (objdump, cmd, self.name))
self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0]
return self.objdump_output[cmd]
except Exception as e:
bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e))
return ""
def elf_machine_to_string(machine):
"""
Return the name of a given ELF e_machine field or the hex value as a string
if it isn't recognised.
"""
try:
return {
0x00: "Unset",
0x02: "SPARC",
0x03: "x86",
0x08: "MIPS",
0x14: "PowerPC",
0x28: "ARM",
0x2A: "SuperH",
0x32: "IA-64",
0x3E: "x86-64",
0xB7: "AArch64",
0xF7: "BPF"
}[machine]
except:
return "Unknown (%s)" % repr(machine)
def write_error(type, error, d):
logfile = d.getVar('QA_LOGFILE')
if logfile:
p = d.getVar('P')
with open(logfile, "a+") as f:
f.write("%s: %s [%s]\n" % (p, error, type))
def handle_error(error_class, error_msg, d):
if error_class in (d.getVar("ERROR_QA") or "").split():
write_error(error_class, error_msg, d)
bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
d.setVar("QA_ERRORS_FOUND", "True")
return False
elif error_class in (d.getVar("WARN_QA") or "").split():
write_error(error_class, error_msg, d)
bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
else:
bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
return True
def add_message(messages, section, new_msg):
if section not in messages:
messages[section] = new_msg
else:
messages[section] = messages[section] + "\n" + new_msg
def exit_with_message_if_errors(message, d):
qa_fatal_errors = bb.utils.to_boolean(d.getVar("QA_ERRORS_FOUND"), False)
if qa_fatal_errors:
bb.fatal(message)
def exit_if_errors(d):
exit_with_message_if_errors("Fatal QA errors were found, failing task.", d)
def check_upstream_status(fullpath):
import re
kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE)
strict_status_re = re.compile(r"^Upstream-Status: (Pending|Submitted|Denied|Inappropriate|Backport|Inactive-Upstream)( .+)?$", re.MULTILINE)
guidelines = "https://docs.yoctoproject.org/contributor-guide/recipe-style-guide.html#patch-upstream-status"
with open(fullpath, encoding='utf-8', errors='ignore') as f:
file_content = f.read()
match_kinda = kinda_status_re.search(file_content)
match_strict = strict_status_re.search(file_content)
if not match_strict:
if match_kinda:
return "Malformed Upstream-Status in patch\n%s\nPlease correct according to %s :\n%s" % (fullpath, guidelines, match_kinda.group(0))
else:
return "Missing Upstream-Status in patch\n%s\nPlease add according to %s ." % (fullpath, guidelines)
if __name__ == "__main__":
import sys
with ELFFile(sys.argv[1]) as elf:
elf.open()
print(elf.isDynamic())

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,197 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import os
import subprocess
import bb
# For reproducible builds, this code sets the default SOURCE_DATE_EPOCH in each
# component's build environment. The format is number of seconds since the
# system epoch.
#
# Upstream components (generally) respect this environment variable,
# using it in place of the "current" date and time.
# See https://reproducible-builds.org/specs/source-date-epoch/
#
# The default value of SOURCE_DATE_EPOCH comes from the function
# get_source_date_epoch_value which reads from the SDE_FILE, or if the file
# is not available will use the fallback of SOURCE_DATE_EPOCH_FALLBACK.
#
# The SDE_FILE is normally constructed from the function
# create_source_date_epoch_stamp which is typically added as a postfuncs to
# the do_unpack task. If a recipe does NOT have do_unpack, it should be added
# to a task that runs after the source is available and before the
# do_deploy_source_date_epoch task is executed.
#
# If a recipe wishes to override the default behavior it should set it's own
# SOURCE_DATE_EPOCH or override the do_deploy_source_date_epoch_stamp task
# with recipe-specific functionality to write the appropriate
# SOURCE_DATE_EPOCH into the SDE_FILE.
#
# SOURCE_DATE_EPOCH is intended to be a reproducible value. This value should
# be reproducible for anyone who builds the same revision from the same
# sources.
#
# There are 4 ways the create_source_date_epoch_stamp function determines what
# becomes SOURCE_DATE_EPOCH:
#
# 1. Use the value from __source_date_epoch.txt file if this file exists.
# This file was most likely created in the previous build by one of the
# following methods 2,3,4.
# Alternatively, it can be provided by a recipe via SRC_URI.
#
# If the file does not exist:
#
# 2. If there is a git checkout, use the last git commit timestamp.
# Git does not preserve file timestamps on checkout.
#
# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
# This works for well-kept repositories distributed via tarball.
#
# 4. Use the modification time of the youngest file in the source tree, if
# there is one.
# This will be the newest file from the distribution tarball, if any.
#
# 5. Fall back to a fixed timestamp (SOURCE_DATE_EPOCH_FALLBACK).
#
# Once the value is determined, it is stored in the recipe's SDE_FILE.
def get_source_date_epoch_from_known_files(d, sourcedir):
source_date_epoch = None
newest_file = None
known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"])
for file in known_files:
filepath = os.path.join(sourcedir, file)
if os.path.isfile(filepath):
mtime = int(os.lstat(filepath).st_mtime)
# There may be more than one "known_file" present, if so, use the youngest one
if not source_date_epoch or mtime > source_date_epoch:
source_date_epoch = mtime
newest_file = filepath
if newest_file:
bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file)
return source_date_epoch
def find_git_folder(d, sourcedir):
# First guess: WORKDIR/git
# This is the default git fetcher unpack path
workdir = d.getVar('WORKDIR')
gitpath = os.path.join(workdir, "git/.git")
if os.path.isdir(gitpath):
return gitpath
# Second guess: ${S}
gitpath = os.path.join(sourcedir, ".git")
if os.path.isdir(gitpath):
return gitpath
# Perhaps there was a subpath or destsuffix specified.
# Go looking in the WORKDIR
exclude = set(["build", "image", "license-destdir", "patches", "pseudo",
"recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"])
for root, dirs, files in os.walk(workdir, topdown=True):
dirs[:] = [d for d in dirs if d not in exclude]
if '.git' in dirs:
return os.path.join(root, ".git")
bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
return None
def get_source_date_epoch_from_git(d, sourcedir):
if not "git://" in d.getVar('SRC_URI') and not "gitsm://" in d.getVar('SRC_URI'):
return None
gitpath = find_git_folder(d, sourcedir)
if not gitpath:
return None
# Check that the repository has a valid HEAD; it may not if subdir is used
# in SRC_URI
p = subprocess.run(['git', '--git-dir', gitpath, 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if p.returncode != 0:
bb.debug(1, "%s does not have a valid HEAD: %s" % (gitpath, p.stdout.decode('utf-8')))
return None
bb.debug(1, "git repository: %s" % gitpath)
p = subprocess.run(['git', '-c', 'log.showSignature=false', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'],
check=True, stdout=subprocess.PIPE)
return int(p.stdout.decode('utf-8'))
def get_source_date_epoch_from_youngest_file(d, sourcedir):
if sourcedir == d.getVar('WORKDIR'):
# These sources are almost certainly not from a tarball
return None
# Do it the hard way: check all files and find the youngest one...
source_date_epoch = None
newest_file = None
for root, dirs, files in os.walk(sourcedir, topdown=True):
files = [f for f in files if not f[0] == '.']
for fname in files:
if fname == "singletask.lock":
# Ignore externalsrc/devtool lockfile [YOCTO #14921]
continue
filename = os.path.join(root, fname)
try:
mtime = int(os.lstat(filename).st_mtime)
except ValueError:
mtime = 0
if not source_date_epoch or mtime > source_date_epoch:
source_date_epoch = mtime
newest_file = filename
if newest_file:
bb.debug(1, "Newest file found: %s" % newest_file)
return source_date_epoch
def fixed_source_date_epoch(d):
bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH")
source_date_epoch = d.getVar('SOURCE_DATE_EPOCH_FALLBACK')
if source_date_epoch:
bb.debug(1, "Using SOURCE_DATE_EPOCH_FALLBACK")
return int(source_date_epoch)
return 0
def get_source_date_epoch(d, sourcedir):
return (
get_source_date_epoch_from_git(d, sourcedir) or
get_source_date_epoch_from_youngest_file(d, sourcedir) or
fixed_source_date_epoch(d) # Last resort
)
def epochfile_read(epochfile, d):
cached, efile = d.getVar('__CACHED_SOURCE_DATE_EPOCH') or (None, None)
if cached and efile == epochfile:
return cached
if cached and epochfile != efile:
bb.debug(1, "Epoch file changed from %s to %s" % (efile, epochfile))
source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
try:
with open(epochfile, 'r') as f:
s = f.read()
try:
source_date_epoch = int(s)
except ValueError:
bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s)
source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
except FileNotFoundError:
bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
d.setVar('__CACHED_SOURCE_DATE_EPOCH', (str(source_date_epoch), epochfile))
return str(source_date_epoch)
def epochfile_write(source_date_epoch, epochfile, d):
bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
bb.utils.mkdirhier(os.path.dirname(epochfile))
tmp_file = "%s.new" % epochfile
with open(tmp_file, 'w') as f:
f.write(str(source_date_epoch))
os.rename(tmp_file, epochfile)

View File

@@ -0,0 +1,438 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.package_manager import *
from oe.manifest import *
import oe.path
import shutil
import os
import subprocess
import re
class Rootfs(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
"""
def __init__(self, d, progress_reporter=None, logcatcher=None):
self.d = d
self.pm = None
self.image_rootfs = self.d.getVar('IMAGE_ROOTFS')
self.deploydir = self.d.getVar('IMGDEPLOYDIR')
self.progress_reporter = progress_reporter
self.logcatcher = logcatcher
self.install_order = Manifest.INSTALL_ORDER
@abstractmethod
def _create(self):
pass
@abstractmethod
def _get_delayed_postinsts(self):
pass
@abstractmethod
def _save_postinsts(self):
pass
@abstractmethod
def _log_check(self):
pass
def _log_check_common(self, type, match):
# Ignore any lines containing log_check to avoid recursion, and ignore
# lines beginning with a + since sh -x may emit code which isn't
# actually executed, but may contain error messages
excludes = [ 'log_check', r'^\+' ]
if hasattr(self, 'log_check_expected_regexes'):
excludes.extend(self.log_check_expected_regexes)
# Insert custom log_check excludes
excludes += [x for x in (self.d.getVar("IMAGE_LOG_CHECK_EXCLUDES") or "").split(" ") if x]
excludes = [re.compile(x) for x in excludes]
r = re.compile(match)
log_path = self.d.expand("${T}/log.do_rootfs")
messages = []
with open(log_path, 'r') as log:
for line in log:
if self.logcatcher and self.logcatcher.contains(line.rstrip()):
continue
for ee in excludes:
m = ee.search(line)
if m:
break
if m:
continue
m = r.search(line)
if m:
messages.append('[log_check] %s' % line)
if messages:
if len(messages) == 1:
msg = '1 %s message' % type
else:
msg = '%d %s messages' % (len(messages), type)
msg = '[log_check] %s: found %s in the logfile:\n%s' % \
(self.d.getVar('PN'), msg, ''.join(messages))
if type == 'error':
bb.fatal(msg)
else:
bb.warn(msg)
def _log_check_warn(self):
self._log_check_common('warning', '^(warn|Warn|WARNING:)')
def _log_check_error(self):
self._log_check_common('error', self.log_check_regex)
def _insert_feed_uris(self):
if bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d):
self.pm.insert_feeds_uris(self.d.getVar('PACKAGE_FEED_URIS') or "",
self.d.getVar('PACKAGE_FEED_BASE_PATHS') or "",
self.d.getVar('PACKAGE_FEED_ARCHS'))
"""
The _cleanup() method should be used to clean-up stuff that we don't really
want to end up on target. For example, in the case of RPM, the DB locks.
The method is called, once, at the end of create() method.
"""
@abstractmethod
def _cleanup(self):
pass
def _setup_dbg_rootfs(self, package_paths):
gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0'
if gen_debugfs != '1':
return
bb.note(" Renaming the original rootfs...")
try:
shutil.rmtree(self.image_rootfs + '-orig')
except:
pass
bb.utils.rename(self.image_rootfs, self.image_rootfs + '-orig')
bb.note(" Creating debug rootfs...")
bb.utils.mkdirhier(self.image_rootfs)
bb.note(" Copying back package database...")
for path in package_paths:
bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(path))
if os.path.isdir(self.image_rootfs + '-orig' + path):
shutil.copytree(self.image_rootfs + '-orig' + path, self.image_rootfs + path, symlinks=True)
elif os.path.isfile(self.image_rootfs + '-orig' + path):
shutil.copyfile(self.image_rootfs + '-orig' + path, self.image_rootfs + path)
# Copy files located in /usr/lib/debug or /usr/src/debug
for dir in ["/usr/lib/debug", "/usr/src/debug"]:
src = self.image_rootfs + '-orig' + dir
if os.path.exists(src):
dst = self.image_rootfs + dir
bb.utils.mkdirhier(os.path.dirname(dst))
shutil.copytree(src, dst)
# Copy files with suffix '.debug' or located in '.debug' dir.
for root, dirs, files in os.walk(self.image_rootfs + '-orig'):
relative_dir = root[len(self.image_rootfs + '-orig'):]
for f in files:
if f.endswith('.debug') or '/.debug' in relative_dir:
bb.utils.mkdirhier(self.image_rootfs + relative_dir)
shutil.copy(os.path.join(root, f),
self.image_rootfs + relative_dir)
bb.note(" Install complementary '*-dbg' packages...")
self.pm.install_complementary('*-dbg')
if self.d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
bb.note(" Install complementary '*-src' packages...")
self.pm.install_complementary('*-src')
"""
Install additional debug packages. Possibility to install additional packages,
which are not automatically installed as complementary package of
standard one, e.g. debug package of static libraries.
"""
extra_debug_pkgs = self.d.getVar('IMAGE_INSTALL_DEBUGFS')
if extra_debug_pkgs:
bb.note(" Install extra debug packages...")
self.pm.install(extra_debug_pkgs.split(), True)
bb.note(" Removing package database...")
for path in package_paths:
if os.path.isdir(self.image_rootfs + path):
shutil.rmtree(self.image_rootfs + path)
elif os.path.isfile(self.image_rootfs + path):
os.remove(self.image_rootfs + path)
bb.note(" Rename debug rootfs...")
try:
shutil.rmtree(self.image_rootfs + '-dbg')
except:
pass
bb.utils.rename(self.image_rootfs, self.image_rootfs + '-dbg')
bb.note(" Restoring original rootfs...")
bb.utils.rename(self.image_rootfs + '-orig', self.image_rootfs)
def _exec_shell_cmd(self, cmd):
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output))
return None
def create(self):
bb.note("###### Generate rootfs #######")
pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND")
post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
def make_last(command, commands):
commands = commands.split()
if command in commands:
commands.remove(command)
commands.append(command)
return "".join(commands)
# We want this to run as late as possible, in particular after
# systemd_sysusers_create and set_user_group. Using :append is not enough
make_last("tidy_shadowutils_files", post_process_cmds)
make_last("rootfs_reproducible", post_process_cmds)
execute_pre_post_process(self.d, pre_process_cmds)
if self.progress_reporter:
self.progress_reporter.next_stage()
# call the package manager dependent create method
self._create()
sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir')
bb.utils.mkdirhier(sysconfdir)
with open(sysconfdir + "/version", "w+") as ver:
ver.write(self.d.getVar('BUILDNAME') + "\n")
execute_pre_post_process(self.d, rootfs_post_install_cmds)
self.pm.run_intercepts()
execute_pre_post_process(self.d, post_process_cmds)
if self.progress_reporter:
self.progress_reporter.next_stage()
if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d) and \
not bb.utils.contains("IMAGE_FEATURES",
"read-only-rootfs-delayed-postinsts",
True, False, self.d):
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is not None:
bb.fatal("The following packages could not be configured "
"offline and rootfs is read-only: %s" %
delayed_postinsts)
if self.d.getVar('USE_DEVFS') != "1":
self._create_devfs()
self._uninstall_unneeded()
if self.progress_reporter:
self.progress_reporter.next_stage()
self._insert_feed_uris()
self._run_ldconfig()
if self.d.getVar('USE_DEPMOD') != "0":
self._generate_kernel_module_deps()
self._cleanup()
self._log_check()
if self.progress_reporter:
self.progress_reporter.next_stage()
def _uninstall_unneeded(self):
# Remove the run-postinsts package if no delayed postinsts are found
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is None:
if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")) or os.path.exists(self.d.expand("${IMAGE_ROOTFS}${systemd_system_unitdir}/run-postinsts.service")):
self.pm.remove(["run-postinsts"])
image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d) and \
not bb.utils.contains("IMAGE_FEATURES",
"read-only-rootfs-delayed-postinsts",
True, False, self.d)
image_rorfs_force = self.d.getVar('FORCE_RO_REMOVE')
if image_rorfs or image_rorfs_force == "1":
# Remove components that we don't need if it's a read-only rootfs
unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED").split()
pkgs_installed = image_list_installed_packages(self.d)
# Make sure update-alternatives is removed last. This is
# because its database has to available while uninstalling
# other packages, allowing alternative symlinks of packages
# to be uninstalled or to be managed correctly otherwise.
provider = self.d.getVar("VIRTUAL-RUNTIME_update-alternatives")
pkgs_to_remove = sorted([pkg for pkg in pkgs_installed if pkg in unneeded_pkgs], key=lambda x: x == provider)
# update-alternatives provider is removed in its own remove()
# call because all package managers do not guarantee the packages
# are removed in the order they given in the list (which is
# passed to the command line). The sorting done earlier is
# utilized to implement the 2-stage removal.
if len(pkgs_to_remove) > 1:
self.pm.remove(pkgs_to_remove[:-1], False)
if len(pkgs_to_remove) > 0:
self.pm.remove([pkgs_to_remove[-1]], False)
if delayed_postinsts:
self._save_postinsts()
if image_rorfs:
bb.warn("There are post install scripts "
"in a read-only rootfs")
post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND")
execute_pre_post_process(self.d, post_uninstall_cmds)
runtime_pkgmanage = bb.utils.contains("IMAGE_FEATURES", "package-management",
True, False, self.d)
if not runtime_pkgmanage:
# Remove the package manager data files
self.pm.remove_packaging_data()
def _run_ldconfig(self):
if self.d.getVar('LDCONFIGDEPEND'):
bb.note("Executing: ldconfig -r " + self.image_rootfs + " -c new -v -X")
self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
'new', '-v', '-X'])
image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d)
ldconfig_in_features = bb.utils.contains("DISTRO_FEATURES", "ldconfig",
True, False, self.d)
if image_rorfs or not ldconfig_in_features:
ldconfig_cache_dir = os.path.join(self.image_rootfs, "var/cache/ldconfig")
if os.path.exists(ldconfig_cache_dir):
bb.note("Removing ldconfig auxiliary cache...")
shutil.rmtree(ldconfig_cache_dir)
def _check_for_kernel_modules(self, modules_dir):
for root, dirs, files in os.walk(modules_dir, topdown=True):
for name in files:
found_ko = name.endswith((".ko", ".ko.gz", ".ko.xz", ".ko.zst"))
if found_ko:
return found_ko
return False
def _generate_kernel_module_deps(self):
modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules')
# if we don't have any modules don't bother to do the depmod
if not self._check_for_kernel_modules(modules_dir):
bb.note("No Kernel Modules found, not running depmod")
return
pkgdatadir = self.d.getVar('PKGDATA_DIR')
# PKGDATA_DIR can include multiple kernels so we run depmod for each
# one of them.
for direntry in os.listdir(pkgdatadir):
match = re.match('(.*)-depmod', direntry)
if not match:
continue
kernel_package_name = match.group(1)
kernel_abi_ver_file = oe.path.join(pkgdatadir, direntry, kernel_package_name + '-abiversion')
if not os.path.exists(kernel_abi_ver_file):
bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
with open(kernel_abi_ver_file) as f:
kernel_ver = f.read().strip(' \n')
versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
bb.utils.mkdirhier(versioned_modules_dir)
bb.note("Running depmodwrapper for %s ..." % versioned_modules_dir)
if self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver, kernel_package_name]):
bb.fatal("Kernel modules dependency generation failed")
"""
Create devfs:
* IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file
* IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached
for in the BBPATH
If neither are specified then the default name of files/device_table-minimal.txt
is searched for in the BBPATH (same as the old version.)
"""
def _create_devfs(self):
devtable_list = []
devtable = self.d.getVar('IMAGE_DEVICE_TABLE')
if devtable is not None:
devtable_list.append(devtable)
else:
devtables = self.d.getVar('IMAGE_DEVICE_TABLES')
if devtables is None:
devtables = 'files/device_table-minimal.txt'
for devtable in devtables.split():
devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH'), devtable))
for devtable in devtable_list:
self._exec_shell_cmd(["makedevs", "-r",
self.image_rootfs, "-D", devtable])
def get_class_for_type(imgtype):
import importlib
mod = importlib.import_module('oe.package_manager.' + imgtype + '.rootfs')
return mod.PkgRootfs
def variable_depends(d, manifest_dir=None):
img_type = d.getVar('IMAGE_PKGTYPE')
cls = get_class_for_type(img_type)
return cls._depends_list()
def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None):
env_bkp = os.environ.copy()
img_type = d.getVar('IMAGE_PKGTYPE')
cls = get_class_for_type(img_type)
cls(d, manifest_dir, progress_reporter, logcatcher).create()
os.environ.clear()
os.environ.update(env_bkp)
def image_list_installed_packages(d, rootfs_dir=None):
# Theres no rootfs for baremetal images
if bb.data.inherits_class('baremetal-image', d):
return ""
if not rootfs_dir:
rootfs_dir = d.getVar('IMAGE_ROOTFS')
img_type = d.getVar('IMAGE_PKGTYPE')
import importlib
cls = importlib.import_module('oe.package_manager.' + img_type)
return cls.PMPkgsList(d, rootfs_dir).list_pkgs()
if __name__ == "__main__":
"""
We should be able to run this as a standalone script, from outside bitbake
environment.
"""
"""
TBD
"""

View File

@@ -0,0 +1,13 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# Handle mismatches between `uname -m`-style output and Rust's arch names
def arch_to_rust_arch(arch):
if arch == "ppc64le":
return "powerpc64le"
if arch in ('riscv32', 'riscv64'):
return arch + 'gc'
return arch

View File

@@ -0,0 +1,120 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import collections
DepRecipe = collections.namedtuple("DepRecipe", ("doc", "doc_sha1", "recipe"))
DepSource = collections.namedtuple("DepSource", ("doc", "doc_sha1", "recipe", "file"))
def get_recipe_spdxid(d):
return "SPDXRef-%s-%s" % ("Recipe", d.getVar("PN"))
def get_download_spdxid(d, idx):
return "SPDXRef-Download-%s-%d" % (d.getVar("PN"), idx)
def get_package_spdxid(pkg):
return "SPDXRef-Package-%s" % pkg
def get_source_file_spdxid(d, idx):
return "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), idx)
def get_packaged_file_spdxid(pkg, idx):
return "SPDXRef-PackagedFile-%s-%d" % (pkg, idx)
def get_image_spdxid(img):
return "SPDXRef-Image-%s" % img
def get_sdk_spdxid(sdk):
return "SPDXRef-SDK-%s" % sdk
def _doc_path_by_namespace(spdx_deploy, arch, doc_namespace):
return spdx_deploy / "by-namespace" / arch / doc_namespace.replace("/", "_")
def doc_find_by_namespace(spdx_deploy, search_arches, doc_namespace):
for pkgarch in search_arches:
p = _doc_path_by_namespace(spdx_deploy, pkgarch, doc_namespace)
if os.path.exists(p):
return p
return None
def _doc_path_by_hashfn(spdx_deploy, arch, doc_name, hashfn):
return (
spdx_deploy / "by-hash" / arch / hashfn.split()[1] / (doc_name + ".spdx.json")
)
def doc_find_by_hashfn(spdx_deploy, search_arches, doc_name, hashfn):
for pkgarch in search_arches:
p = _doc_path_by_hashfn(spdx_deploy, pkgarch, doc_name, hashfn)
if os.path.exists(p):
return p
return None
def doc_path(spdx_deploy, doc_name, arch, subdir):
return spdx_deploy / arch / subdir / (doc_name + ".spdx.json")
def write_doc(d, spdx_doc, arch, subdir, spdx_deploy=None, indent=None):
from pathlib import Path
if spdx_deploy is None:
spdx_deploy = Path(d.getVar("SPDXDEPLOY"))
dest = doc_path(spdx_deploy, spdx_doc.name, arch, subdir)
dest.parent.mkdir(exist_ok=True, parents=True)
with dest.open("wb") as f:
doc_sha1 = spdx_doc.to_json(f, sort_keys=True, indent=indent)
l = _doc_path_by_namespace(spdx_deploy, arch, spdx_doc.documentNamespace)
l.parent.mkdir(exist_ok=True, parents=True)
l.symlink_to(os.path.relpath(dest, l.parent))
l = _doc_path_by_hashfn(
spdx_deploy, arch, spdx_doc.name, d.getVar("BB_HASHFILENAME")
)
l.parent.mkdir(exist_ok=True, parents=True)
l.symlink_to(os.path.relpath(dest, l.parent))
return doc_sha1
def read_doc(fn):
import hashlib
import oe.spdx
import io
import contextlib
@contextlib.contextmanager
def get_file():
if isinstance(fn, io.IOBase):
yield fn
else:
with fn.open("rb") as f:
yield f
with get_file() as f:
sha1 = hashlib.sha1()
while True:
chunk = f.read(4096)
if not chunk:
break
sha1.update(chunk)
f.seek(0)
doc = oe.spdx.SPDXDocument.from_json(f)
return (doc, sha1.hexdigest())

View File

@@ -0,0 +1,160 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
from abc import ABCMeta, abstractmethod
from oe.utils import execute_pre_post_process
from oe.manifest import *
from oe.package_manager import *
import os
import traceback
class Sdk(object, metaclass=ABCMeta):
def __init__(self, d, manifest_dir):
self.d = d
self.sdk_output = self.d.getVar('SDK_OUTPUT')
self.sdk_native_path = self.d.getVar('SDKPATHNATIVE').strip('/')
self.target_path = self.d.getVar('SDKTARGETSYSROOT').strip('/')
self.sysconfdir = self.d.getVar('sysconfdir').strip('/')
self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path)
self.sdk_host_sysroot = self.sdk_output
if manifest_dir is None:
self.manifest_dir = self.d.getVar("SDK_DIR")
else:
self.manifest_dir = manifest_dir
self.remove(self.sdk_output, True)
self.install_order = Manifest.INSTALL_ORDER
@abstractmethod
def _populate(self):
pass
def populate(self):
self.mkdirhier(self.sdk_output)
# call backend dependent implementation
self._populate()
# Don't ship any libGL in the SDK
self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
self.d.getVar('libdir_nativesdk').strip('/'),
"libGL*"))
# Fix or remove broken .la files
self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
self.d.getVar('libdir_nativesdk').strip('/'),
"*.la"))
# Link the ld.so.cache file into the hosts filesystem
link_name = os.path.join(self.sdk_output, self.sdk_native_path,
self.sysconfdir, "ld.so.cache")
self.mkdirhier(os.path.dirname(link_name))
os.symlink("/etc/ld.so.cache", link_name)
execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND'))
def movefile(self, sourcefile, destdir):
try:
# FIXME: this check of movefile's return code to None should be
# fixed within the function to use only exceptions to signal when
# something goes wrong
if (bb.utils.movefile(sourcefile, destdir) == None):
raise OSError("moving %s to %s failed"
%(sourcefile, destdir))
#FIXME: using umbrella exc catching because bb.utils method raises it
except Exception as e:
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
bb.fatal("unable to place %s in final SDK location" % sourcefile)
def mkdirhier(self, dirpath):
try:
bb.utils.mkdirhier(dirpath)
except OSError as e:
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
bb.fatal("cannot make dir for SDK: %s" % dirpath)
def remove(self, path, recurse=False):
try:
bb.utils.remove(path, recurse)
#FIXME: using umbrella exc catching because bb.utils method raises it
except Exception as e:
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
bb.warn("cannot remove SDK dir: %s" % path)
def install_locales(self, pm):
linguas = self.d.getVar("SDKIMAGE_LINGUAS")
if linguas:
import fnmatch
# Install the binary locales
if linguas == "all":
pm.install_glob("nativesdk-glibc-binary-localedata-*.utf-8", sdk=True)
else:
pm.install(["nativesdk-glibc-binary-localedata-%s.utf-8" % \
lang for lang in linguas.split()])
# Generate a locale archive of them
target_arch = self.d.getVar('SDK_ARCH')
rootfs = oe.path.join(self.sdk_host_sysroot, self.sdk_native_path)
localedir = oe.path.join(rootfs, self.d.getVar("libdir_nativesdk"), "locale")
generate_locale_archive(self.d, rootfs, target_arch, localedir)
# And now delete the binary locales
pkgs = fnmatch.filter(pm.list_installed(), "nativesdk-glibc-binary-localedata-*.utf-8")
pm.remove(pkgs)
else:
# No linguas so do nothing
pass
def sdk_list_installed_packages(d, target, rootfs_dir=None):
if rootfs_dir is None:
sdk_output = d.getVar('SDK_OUTPUT')
target_path = d.getVar('SDKTARGETSYSROOT').strip('/')
rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
if target is False:
ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK")
d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target)
img_type = d.getVar('IMAGE_PKGTYPE')
import importlib
cls = importlib.import_module('oe.package_manager.' + img_type)
return cls.PMPkgsList(d, rootfs_dir).list_pkgs()
def populate_sdk(d, manifest_dir=None):
env_bkp = os.environ.copy()
img_type = d.getVar('IMAGE_PKGTYPE')
import importlib
cls = importlib.import_module('oe.package_manager.' + img_type + '.sdk')
cls.PkgSdk(d, manifest_dir).populate()
os.environ.clear()
os.environ.update(env_bkp)
def get_extra_sdkinfo(sstate_dir):
"""
This function is going to be used for generating the target and host manifest files packages of eSDK.
"""
import math
extra_info = {}
extra_info['tasksizes'] = {}
extra_info['filesizes'] = {}
for root, _, files in os.walk(sstate_dir):
for fn in files:
if fn.endswith('.tgz'):
fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024))
task = fn.rsplit(':',1)[1].split('_',1)[1].split(',')[0]
origtotal = extra_info['tasksizes'].get(task, 0)
extra_info['tasksizes'][task] = origtotal + fsize
extra_info['filesizes'][fn] = fsize
return extra_info
if __name__ == "__main__":
pass

View File

@@ -0,0 +1,399 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
#
# This library is intended to capture the JSON SPDX specification in a type
# safe manner. It is not intended to encode any particular OE specific
# behaviors, see the sbom.py for that.
#
# The documented SPDX spec document doesn't cover the JSON syntax for
# particular configuration, which can make it hard to determine what the JSON
# syntax should be. I've found it is actually much simpler to read the official
# SPDX JSON schema which can be found here: https://github.com/spdx/spdx-spec
# in schemas/spdx-schema.json
#
import hashlib
import itertools
import json
SPDX_VERSION = "2.2"
#
# The following are the support classes that are used to implement SPDX object
#
class _Property(object):
"""
A generic SPDX object property. The different types will derive from this
class
"""
def __init__(self, *, default=None):
self.default = default
def setdefault(self, dest, name):
if self.default is not None:
dest.setdefault(name, self.default)
class _String(_Property):
"""
A scalar string property for an SPDX object
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def set_property(self, attrs, name):
def get_helper(obj):
return obj._spdx[name]
def set_helper(obj, value):
obj._spdx[name] = value
def del_helper(obj):
del obj._spdx[name]
attrs[name] = property(get_helper, set_helper, del_helper)
def init(self, source):
return source
class _Object(_Property):
"""
A scalar SPDX object property of a SPDX object
"""
def __init__(self, cls, **kwargs):
super().__init__(**kwargs)
self.cls = cls
def set_property(self, attrs, name):
def get_helper(obj):
if not name in obj._spdx:
obj._spdx[name] = self.cls()
return obj._spdx[name]
def set_helper(obj, value):
obj._spdx[name] = value
def del_helper(obj):
del obj._spdx[name]
attrs[name] = property(get_helper, set_helper)
def init(self, source):
return self.cls(**source)
class _ListProperty(_Property):
"""
A list of SPDX properties
"""
def __init__(self, prop, **kwargs):
super().__init__(**kwargs)
self.prop = prop
def set_property(self, attrs, name):
def get_helper(obj):
if not name in obj._spdx:
obj._spdx[name] = []
return obj._spdx[name]
def set_helper(obj, value):
obj._spdx[name] = list(value)
def del_helper(obj):
del obj._spdx[name]
attrs[name] = property(get_helper, set_helper, del_helper)
def init(self, source):
return [self.prop.init(o) for o in source]
class _StringList(_ListProperty):
"""
A list of strings as a property for an SPDX object
"""
def __init__(self, **kwargs):
super().__init__(_String(), **kwargs)
class _ObjectList(_ListProperty):
"""
A list of SPDX objects as a property for an SPDX object
"""
def __init__(self, cls, **kwargs):
super().__init__(_Object(cls), **kwargs)
class MetaSPDXObject(type):
"""
A metaclass that allows properties (anything derived from a _Property
class) to be defined for a SPDX object
"""
def __new__(mcls, name, bases, attrs):
attrs["_properties"] = {}
for key in attrs.keys():
if isinstance(attrs[key], _Property):
prop = attrs[key]
attrs["_properties"][key] = prop
prop.set_property(attrs, key)
return super().__new__(mcls, name, bases, attrs)
class SPDXObject(metaclass=MetaSPDXObject):
"""
The base SPDX object; all SPDX spec classes must derive from this class
"""
def __init__(self, **d):
self._spdx = {}
for name, prop in self._properties.items():
prop.setdefault(self._spdx, name)
if name in d:
self._spdx[name] = prop.init(d[name])
def serializer(self):
return self._spdx
def __setattr__(self, name, value):
if name in self._properties or name == "_spdx":
super().__setattr__(name, value)
return
raise KeyError("%r is not a valid SPDX property" % name)
#
# These are the SPDX objects implemented from the spec. The *only* properties
# that can be added to these objects are ones directly specified in the SPDX
# spec, however you may add helper functions to make operations easier.
#
# Defaults should *only* be specified if the SPDX spec says there is a certain
# required value for a field (e.g. dataLicense), or if the field is mandatory
# and has some sane "this field is unknown" (e.g. "NOASSERTION")
#
class SPDXAnnotation(SPDXObject):
annotationDate = _String()
annotationType = _String()
annotator = _String()
comment = _String()
class SPDXChecksum(SPDXObject):
algorithm = _String()
checksumValue = _String()
class SPDXRelationship(SPDXObject):
spdxElementId = _String()
relatedSpdxElement = _String()
relationshipType = _String()
comment = _String()
annotations = _ObjectList(SPDXAnnotation)
class SPDXExternalReference(SPDXObject):
referenceCategory = _String()
referenceType = _String()
referenceLocator = _String()
class SPDXPackageVerificationCode(SPDXObject):
packageVerificationCodeValue = _String()
packageVerificationCodeExcludedFiles = _StringList()
class SPDXPackage(SPDXObject):
ALLOWED_CHECKSUMS = [
"SHA1",
"SHA224",
"SHA256",
"SHA384",
"SHA512",
"MD2",
"MD4",
"MD5",
"MD6",
]
name = _String()
SPDXID = _String()
versionInfo = _String()
downloadLocation = _String(default="NOASSERTION")
supplier = _String(default="NOASSERTION")
homepage = _String()
licenseConcluded = _String(default="NOASSERTION")
licenseDeclared = _String(default="NOASSERTION")
summary = _String()
description = _String()
sourceInfo = _String()
copyrightText = _String(default="NOASSERTION")
licenseInfoFromFiles = _StringList(default=["NOASSERTION"])
externalRefs = _ObjectList(SPDXExternalReference)
packageVerificationCode = _Object(SPDXPackageVerificationCode)
hasFiles = _StringList()
packageFileName = _String()
annotations = _ObjectList(SPDXAnnotation)
checksums = _ObjectList(SPDXChecksum)
class SPDXFile(SPDXObject):
SPDXID = _String()
fileName = _String()
licenseConcluded = _String(default="NOASSERTION")
copyrightText = _String(default="NOASSERTION")
licenseInfoInFiles = _StringList(default=["NOASSERTION"])
checksums = _ObjectList(SPDXChecksum)
fileTypes = _StringList()
class SPDXCreationInfo(SPDXObject):
created = _String()
licenseListVersion = _String()
comment = _String()
creators = _StringList()
class SPDXExternalDocumentRef(SPDXObject):
externalDocumentId = _String()
spdxDocument = _String()
checksum = _Object(SPDXChecksum)
class SPDXExtractedLicensingInfo(SPDXObject):
name = _String()
comment = _String()
licenseId = _String()
extractedText = _String()
class SPDXDocument(SPDXObject):
spdxVersion = _String(default="SPDX-" + SPDX_VERSION)
dataLicense = _String(default="CC0-1.0")
SPDXID = _String(default="SPDXRef-DOCUMENT")
name = _String()
documentNamespace = _String()
creationInfo = _Object(SPDXCreationInfo)
packages = _ObjectList(SPDXPackage)
files = _ObjectList(SPDXFile)
relationships = _ObjectList(SPDXRelationship)
externalDocumentRefs = _ObjectList(SPDXExternalDocumentRef)
hasExtractedLicensingInfos = _ObjectList(SPDXExtractedLicensingInfo)
def __init__(self, **d):
super().__init__(**d)
def to_json(self, f, *, sort_keys=False, indent=None, separators=None):
class Encoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, SPDXObject):
return o.serializer()
return super().default(o)
sha1 = hashlib.sha1()
for chunk in Encoder(
sort_keys=sort_keys,
indent=indent,
separators=separators,
).iterencode(self):
chunk = chunk.encode("utf-8")
f.write(chunk)
sha1.update(chunk)
return sha1.hexdigest()
@classmethod
def from_json(cls, f):
return cls(**json.load(f))
def add_relationship(self, _from, relationship, _to, *, comment=None, annotation=None):
if isinstance(_from, SPDXObject):
from_spdxid = _from.SPDXID
else:
from_spdxid = _from
if isinstance(_to, SPDXObject):
to_spdxid = _to.SPDXID
else:
to_spdxid = _to
r = SPDXRelationship(
spdxElementId=from_spdxid,
relatedSpdxElement=to_spdxid,
relationshipType=relationship,
)
if comment is not None:
r.comment = comment
if annotation is not None:
r.annotations.append(annotation)
self.relationships.append(r)
def find_by_spdxid(self, spdxid):
for o in itertools.chain(self.packages, self.files):
if o.SPDXID == spdxid:
return o
return None
def find_external_document_ref(self, namespace):
for r in self.externalDocumentRefs:
if r.spdxDocument == namespace:
return r
return None
def is_compiled_source (filename, compiled_sources, types):
"""
Check if the file is a compiled file
"""
import os
# If we don't have compiled source, we assume all are compiled.
if not compiled_sources:
return True
# We return always true if the file type is not in the list of compiled files.
# Some files in the source directory are not compiled, for example, Makefiles,
# but also python .py file. We need to include them in the SPDX.
basename = os.path.basename(filename)
ext = basename.partition(".")[2]
if ext not in types:
return True
# Check that the file is in the list
return filename in compiled_sources
def get_compiled_sources(d):
"""
Get list of compiled sources from debug information and normalize the paths
"""
import itertools
import oe.package
source_info = oe.package.read_debugsources_info(d)
if not source_info:
bb.debug(1, "Do not have debugsources.list. Skipping")
return [], []
# Sources are not split now in SPDX, so we aggregate them
sources = set(itertools.chain.from_iterable(source_info.values()))
# Check extensions of files
types = set()
for src in sources:
basename = os.path.basename(src)
ext = basename.partition(".")[2]
if ext not in types and ext:
types.add(ext)
bb.debug(1, f"Num of sources: {len(sources)} and types: {len(types)} {str(types)}")
return sources, types

View File

@@ -0,0 +1,691 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import bb.siggen
import bb.runqueue
import oe
import netrc
def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches):
# Return True if we should keep the dependency, False to drop it
def isNative(x):
return x.endswith("-native")
def isCross(x):
return "-cross-" in x
def isNativeSDK(x):
return x.startswith("nativesdk-")
def isKernel(mc, fn):
inherits = " ".join(dataCaches[mc].inherits[fn])
return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1
def isPackageGroup(mc, fn):
inherits = " ".join(dataCaches[mc].inherits[fn])
return "/packagegroup.bbclass" in inherits
def isAllArch(mc, fn):
inherits = " ".join(dataCaches[mc].inherits[fn])
return "/allarch.bbclass" in inherits
def isImage(mc, fn):
return "/image.bbclass" in " ".join(dataCaches[mc].inherits[fn])
depmc, _, deptaskname, depmcfn = bb.runqueue.split_tid_mcfn(dep)
mc, _ = bb.runqueue.split_mc(fn)
# We can skip the rm_work task signature to avoid running the task
# when we remove some tasks from the dependencie chain
# i.e INHERIT:remove = "create-spdx" will trigger the do_rm_work
if task == "do_rm_work":
return False
# (Almost) always include our own inter-task dependencies (unless it comes
# from a mcdepends). The exception is the special
# do_kernel_configme->do_unpack_and_patch dependency from archiver.bbclass.
if recipename == depname and depmc == mc:
if task == "do_kernel_configme" and deptaskname == "do_unpack_and_patch":
return False
return True
# Exclude well defined recipe->dependency
if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
return False
# Check for special wildcard
if "*->%s" % depname in siggen.saferecipedeps and recipename != depname:
return False
# Don't change native/cross/nativesdk recipe dependencies any further
if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
return True
# Only target packages beyond here
# allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
if isPackageGroup(mc, fn) and isAllArch(mc, fn) and not isNative(depname):
return False
# Exclude well defined machine specific configurations which don't change ABI
if depname in siggen.abisaferecipes and not isImage(mc, fn):
return False
# Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
# if we're just doing an RRECOMMENDS:xxx = "kernel-module-*", not least because the checksum
# is machine specific.
# Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
# and we reccomend a kernel-module, we exclude the dependency.
if dataCaches and isKernel(depmc, depmcfn) and not isKernel(mc, fn):
for pkg in dataCaches[mc].runrecs[fn]:
if " ".join(dataCaches[mc].runrecs[fn][pkg]).find("kernel-module-") != -1:
return False
# Default to keep dependencies
return True
def sstate_lockedsigs(d):
sigs = {}
types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES") or "").split()
for t in types:
siggen_lockedsigs_var = "SIGGEN_LOCKEDSIGS_%s" % t
lockedsigs = (d.getVar(siggen_lockedsigs_var) or "").split()
for ls in lockedsigs:
pn, task, h = ls.split(":", 2)
if pn not in sigs:
sigs[pn] = {}
sigs[pn][task] = [h, siggen_lockedsigs_var]
return sigs
class SignatureGeneratorOEBasicHashMixIn(object):
supports_multiconfig_datacaches = True
def init_rundepcheck(self, data):
self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
self.lockedsigs = sstate_lockedsigs(data)
self.lockedhashes = {}
self.lockedpnmap = {}
self.lockedhashfn = {}
self.machine = data.getVar("MACHINE")
self.mismatch_msgs = []
self.mismatch_number = 0
self.lockedsigs_msgs = ""
self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
"").split()
self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
self._internal = False
pass
def tasks_resolved(self, virtmap, virtpnmap, dataCache):
# Translate virtual/xxx entries to PN values
newabisafe = []
for a in self.abisaferecipes:
if a in virtpnmap:
newabisafe.append(virtpnmap[a])
else:
newabisafe.append(a)
self.abisaferecipes = newabisafe
newsafedeps = []
for a in self.saferecipedeps:
a1, a2 = a.split("->")
if a1 in virtpnmap:
a1 = virtpnmap[a1]
if a2 in virtpnmap:
a2 = virtpnmap[a2]
newsafedeps.append(a1 + "->" + a2)
self.saferecipedeps = newsafedeps
def rundep_check(self, fn, recipename, task, dep, depname, dataCaches = None):
return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCaches)
def get_taskdata(self):
return (self.lockedpnmap, self.lockedhashfn, self.lockedhashes) + super().get_taskdata()
def set_taskdata(self, data):
self.lockedpnmap, self.lockedhashfn, self.lockedhashes = data[:3]
super().set_taskdata(data[3:])
def dump_sigs(self, dataCache, options):
if 'lockedsigs' in options:
sigfile = os.getcwd() + "/locked-sigs.inc"
bb.plain("Writing locked sigs to %s" % sigfile)
self.dump_lockedsigs(sigfile)
return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
def get_taskhash(self, tid, deps, dataCaches):
if tid in self.lockedhashes:
if self.lockedhashes[tid]:
return self.lockedhashes[tid]
else:
return super().get_taskhash(tid, deps, dataCaches)
h = super().get_taskhash(tid, deps, dataCaches)
(mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
recipename = dataCaches[mc].pkg_fn[fn]
self.lockedpnmap[fn] = recipename
self.lockedhashfn[fn] = dataCaches[mc].hashfn[fn]
unlocked = False
if recipename in self.unlockedrecipes:
unlocked = True
else:
def recipename_from_dep(dep):
(depmc, _, _, depfn) = bb.runqueue.split_tid_mcfn(dep)
return dataCaches[depmc].pkg_fn[depfn]
# If any unlocked recipe is in the direct dependencies then the
# current recipe should be unlocked as well.
depnames = [ recipename_from_dep(x) for x in deps if mc == bb.runqueue.mc_from_tid(x)]
if any(x in y for y in depnames for x in self.unlockedrecipes):
self.unlockedrecipes[recipename] = ''
unlocked = True
if not unlocked and recipename in self.lockedsigs:
if task in self.lockedsigs[recipename]:
h_locked = self.lockedsigs[recipename][task][0]
var = self.lockedsigs[recipename][task][1]
self.lockedhashes[tid] = h_locked
self._internal = True
unihash = self.get_unihash(tid)
self._internal = False
#bb.warn("Using %s %s %s" % (recipename, task, h))
if h != h_locked and h_locked != unihash:
self.mismatch_number += 1
self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s'
% (recipename, task, h, h_locked, var))
return h_locked
self.lockedhashes[tid] = False
#bb.warn("%s %s %s" % (recipename, task, h))
return h
def get_stampfile_hash(self, tid):
if tid in self.lockedhashes and self.lockedhashes[tid]:
return self.lockedhashes[tid]
return super().get_stampfile_hash(tid)
def get_cached_unihash(self, tid):
if tid in self.lockedhashes and self.lockedhashes[tid] and not self._internal:
return self.lockedhashes[tid]
return super().get_cached_unihash(tid)
def dump_sigtask(self, fn, task, stampbase, runtime):
tid = fn + ":" + task
if tid in self.lockedhashes and self.lockedhashes[tid]:
return
super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime)
def dump_lockedsigs(self, sigfile, taskfilter=None):
types = {}
for tid in self.runtaskdeps:
# Bitbake changed this to a tuple in newer versions
if isinstance(tid, tuple):
tid = tid[1]
if taskfilter:
if not tid in taskfilter:
continue
fn = bb.runqueue.fn_from_tid(tid)
t = self.lockedhashfn[fn].split(" ")[1].split(":")[5]
t = 't-' + t.replace('_', '-')
if t not in types:
types[t] = []
types[t].append(tid)
with open(sigfile, "w") as f:
l = sorted(types)
for t in l:
f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t)
types[t].sort()
sortedtid = sorted(types[t], key=lambda tid: self.lockedpnmap[bb.runqueue.fn_from_tid(tid)])
for tid in sortedtid:
(_, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
if tid not in self.taskhash:
continue
f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.get_unihash(tid) + " \\\n")
f.write(' "\n')
f.write('SIGGEN_LOCKEDSIGS_TYPES:%s = "%s"' % (self.machine, " ".join(l)))
def dump_siglist(self, sigfile, path_prefix_strip=None):
def strip_fn(fn):
nonlocal path_prefix_strip
if not path_prefix_strip:
return fn
fn_exp = fn.split(":")
if fn_exp[-1].startswith(path_prefix_strip):
fn_exp[-1] = fn_exp[-1][len(path_prefix_strip):]
return ":".join(fn_exp)
with open(sigfile, "w") as f:
tasks = []
for taskitem in self.taskhash:
(fn, task) = taskitem.rsplit(":", 1)
pn = self.lockedpnmap[fn]
tasks.append((pn, task, strip_fn(fn), self.taskhash[taskitem]))
for (pn, task, fn, taskhash) in sorted(tasks):
f.write('%s:%s %s %s\n' % (pn, task, fn, taskhash))
def checkhashes(self, sq_data, missed, found, d):
warn_msgs = []
error_msgs = []
sstate_missing_msgs = []
info_msgs = None
if self.lockedsigs:
if len(self.lockedsigs) > 10:
self.lockedsigs_msgs = "There are %s recipes with locked tasks (%s task(s) have non matching signature)" % (len(self.lockedsigs), self.mismatch_number)
else:
self.lockedsigs_msgs = "The following recipes have locked tasks:"
for pn in self.lockedsigs:
self.lockedsigs_msgs += " %s" % (pn)
for tid in sq_data['hash']:
if tid not in found:
for pn in self.lockedsigs:
taskname = bb.runqueue.taskname_from_tid(tid)
if sq_data['hash'][tid] in iter(self.lockedsigs[pn].values()):
if taskname == 'do_shared_workdir':
continue
sstate_missing_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?"
% (pn, taskname, sq_data['hash'][tid]))
checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK")
if checklevel == 'info':
info_msgs = self.lockedsigs_msgs
if checklevel == 'warn' or checklevel == 'info':
warn_msgs += self.mismatch_msgs
elif checklevel == 'error':
error_msgs += self.mismatch_msgs
checklevel = d.getVar("SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK")
if checklevel == 'warn':
warn_msgs += sstate_missing_msgs
elif checklevel == 'error':
error_msgs += sstate_missing_msgs
if info_msgs:
bb.note(info_msgs)
if warn_msgs:
bb.warn("\n".join(warn_msgs))
if error_msgs:
bb.fatal("\n".join(error_msgs))
class SignatureGeneratorOEBasicHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
name = "OEBasicHash"
class SignatureGeneratorOEEquivHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorUniHashMixIn, bb.siggen.SignatureGeneratorBasicHash):
name = "OEEquivHash"
def init_rundepcheck(self, data):
super().init_rundepcheck(data)
self.server = data.getVar('BB_HASHSERVE')
if not self.server:
bb.fatal("OEEquivHash requires BB_HASHSERVE to be set")
self.method = data.getVar('SSTATE_HASHEQUIV_METHOD')
if not self.method:
bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_METHOD to be set")
self.max_parallel = int(data.getVar('BB_HASHSERVE_MAX_PARALLEL') or 1)
self.username = data.getVar("BB_HASHSERVE_USERNAME")
self.password = data.getVar("BB_HASHSERVE_PASSWORD")
if not self.username or not self.password:
try:
n = netrc.netrc()
auth = n.authenticators(self.server)
if auth is not None:
self.username, _, self.password = auth
except FileNotFoundError:
pass
except netrc.NetrcParseError as e:
bb.warn("Error parsing %s:%s: %s" % (e.filename, str(e.lineno), e.msg))
# Insert these classes into siggen's namespace so it can see and select them
bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
bb.siggen.SignatureGeneratorOEEquivHash = SignatureGeneratorOEEquivHash
def find_siginfo(pn, taskname, taskhashlist, d):
""" Find signature data files for comparison purposes """
import fnmatch
import glob
if not taskname:
# We have to derive pn and taskname
key = pn
if key.startswith("mc:"):
# mc:<mc>:<pn>:<task>
_, _, pn, taskname = key.split(':', 3)
else:
# <pn>:<task>
pn, taskname = key.split(':', 1)
hashfiles = {}
def get_hashval(siginfo):
if siginfo.endswith('.siginfo'):
return siginfo.rpartition(':')[2].partition('_')[0]
else:
return siginfo.rpartition('.')[2]
def get_time(fullpath):
return os.stat(fullpath).st_mtime
# First search in stamps dir
localdata = d.createCopy()
localdata.setVar('MULTIMACH_TARGET_SYS', '*')
localdata.setVar('PN', pn)
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('EXTENDPE', '')
stamp = localdata.getVar('STAMP')
if pn.startswith("gcc-source"):
# gcc-source shared workdir is a special case :(
stamp = localdata.expand("${STAMPS_DIR}/work-shared/gcc-${PV}-${PR}")
filespec = '%s.%s.sigdata.*' % (stamp, taskname)
foundall = False
import glob
bb.debug(1, "Calling glob.glob on {}".format(filespec))
for fullpath in glob.glob(filespec):
match = False
if taskhashlist:
for taskhash in taskhashlist:
if fullpath.endswith('.%s' % taskhash):
hashfiles[taskhash] = {'path':fullpath, 'sstate':False, 'time':get_time(fullpath)}
if len(hashfiles) == len(taskhashlist):
foundall = True
break
else:
hashval = get_hashval(fullpath)
hashfiles[hashval] = {'path':fullpath, 'sstate':False, 'time':get_time(fullpath)}
if not taskhashlist or (len(hashfiles) < 2 and not foundall):
# That didn't work, look in sstate-cache
hashes = taskhashlist or ['?' * 64]
localdata = bb.data.createCopy(d)
for hashval in hashes:
localdata.setVar('PACKAGE_ARCH', '*')
localdata.setVar('TARGET_VENDOR', '*')
localdata.setVar('TARGET_OS', '*')
localdata.setVar('PN', pn)
# gcc-source is a special case, same as with local stamps above
if pn.startswith("gcc-source"):
localdata.setVar('PN', "gcc")
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('BB_TASKHASH', hashval)
localdata.setVar('SSTATE_CURRTASK', taskname[3:])
swspec = localdata.getVar('SSTATE_SWSPEC')
if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic', 'do_preconfigure'] and swspec:
localdata.setVar('SSTATE_PKGSPEC', '${SSTATE_SWSPEC}')
elif pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
filespec = '%s.siginfo' % localdata.getVar('SSTATE_PKG')
bb.debug(1, "Calling glob.glob on {}".format(filespec))
matchedfiles = glob.glob(filespec)
for fullpath in matchedfiles:
actual_hashval = get_hashval(fullpath)
if actual_hashval in hashfiles:
continue
hashfiles[actual_hashval] = {'path':fullpath, 'sstate':True, 'time':get_time(fullpath)}
return hashfiles
bb.siggen.find_siginfo = find_siginfo
bb.siggen.find_siginfo_version = 2
def sstate_get_manifest_filename(task, d):
"""
Return the sstate manifest file path for a particular task.
Also returns the datastore that can be used to query related variables.
"""
d2 = d.createCopy()
extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info')
if extrainf:
d2.setVar("SSTATE_MANMACH", extrainf)
return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
d2 = d
variant = ''
curr_variant = ''
if d.getVar("BBEXTENDCURR") == "multilib":
curr_variant = d.getVar("BBEXTENDVARIANT")
if "virtclass-multilib" not in d.getVar("OVERRIDES"):
curr_variant = "invalid"
if taskdata2.startswith("virtual:multilib"):
variant = taskdata2.split(":")[2]
if curr_variant != variant:
if variant not in multilibcache:
multilibcache[variant] = oe.utils.get_multilib_datastore(variant, d)
d2 = multilibcache[variant]
if taskdata.endswith("-native"):
pkgarchs = ["${BUILD_ARCH}", "${BUILD_ARCH}_${ORIGNATIVELSBSTRING}"]
elif taskdata.startswith("nativesdk-"):
pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"]
elif "-cross-canadian" in taskdata:
pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"]
elif "-cross-" in taskdata:
pkgarchs = ["${BUILD_ARCH}"]
elif "-crosssdk" in taskdata:
pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"]
else:
pkgarchs = ['${MACHINE_ARCH}']
pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split()))
pkgarchs.append('allarch')
pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
searched_manifests = []
for pkgarch in pkgarchs:
manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
if os.path.exists(manifest):
return manifest, d2
searched_manifests.append(manifest)
bb.fatal("The sstate manifest for task '%s:%s' (multilib variant '%s') could not be found.\nThe pkgarchs considered were: %s.\nBut none of these manifests exists:\n %s"
% (taskdata, taskname, variant, d2.expand(", ".join(pkgarchs)),"\n ".join(searched_manifests)))
return None, d2
def OEOuthashBasic(path, sigfile, task, d):
"""
Basic output hash function
Calculates the output hash of a task by hashing all output file metadata,
and file contents.
"""
import hashlib
import stat
import pwd
import grp
import re
import fnmatch
def update_hash(s):
s = s.encode('utf-8')
h.update(s)
if sigfile:
sigfile.write(s)
h = hashlib.sha256()
prev_dir = os.getcwd()
corebase = d.getVar("COREBASE")
tmpdir = d.getVar("TMPDIR")
include_owners = os.environ.get('PSEUDO_DISABLED') == '0'
if "package_write_" in task or task == "package_qa":
include_owners = False
include_timestamps = False
include_root = True
if task == "package":
include_timestamps = True
include_root = False
source_date_epoch = float(d.getVar("SOURCE_DATE_EPOCH"))
hash_version = d.getVar('HASHEQUIV_HASH_VERSION')
extra_sigdata = d.getVar("HASHEQUIV_EXTRA_SIGDATA")
filemaps = {}
for m in (d.getVar('SSTATE_HASHEQUIV_FILEMAP') or '').split():
entry = m.split(":")
if len(entry) != 3 or entry[0] != task:
continue
filemaps.setdefault(entry[1], [])
filemaps[entry[1]].append(entry[2])
try:
os.chdir(path)
basepath = os.path.normpath(path)
update_hash("OEOuthashBasic\n")
if hash_version:
update_hash(hash_version + "\n")
if extra_sigdata:
update_hash(extra_sigdata + "\n")
# It is only currently useful to get equivalent hashes for things that
# can be restored from sstate. Since the sstate object is named using
# SSTATE_PKGSPEC and the task name, those should be included in the
# output hash calculation.
update_hash("SSTATE_PKGSPEC=%s\n" % d.getVar('SSTATE_PKGSPEC'))
update_hash("task=%s\n" % task)
for root, dirs, files in os.walk('.', topdown=True):
# Sort directories to ensure consistent ordering when recursing
dirs.sort()
files.sort()
def process(path):
s = os.lstat(path)
if stat.S_ISDIR(s.st_mode):
update_hash('d')
elif stat.S_ISCHR(s.st_mode):
update_hash('c')
elif stat.S_ISBLK(s.st_mode):
update_hash('b')
elif stat.S_ISSOCK(s.st_mode):
update_hash('s')
elif stat.S_ISLNK(s.st_mode):
update_hash('l')
elif stat.S_ISFIFO(s.st_mode):
update_hash('p')
else:
update_hash('-')
def add_perm(mask, on, off='-'):
if mask & s.st_mode:
update_hash(on)
else:
update_hash(off)
add_perm(stat.S_IRUSR, 'r')
add_perm(stat.S_IWUSR, 'w')
if stat.S_ISUID & s.st_mode:
add_perm(stat.S_IXUSR, 's', 'S')
else:
add_perm(stat.S_IXUSR, 'x')
if include_owners:
# Group/other permissions are only relevant in pseudo context
add_perm(stat.S_IRGRP, 'r')
add_perm(stat.S_IWGRP, 'w')
if stat.S_ISGID & s.st_mode:
add_perm(stat.S_IXGRP, 's', 'S')
else:
add_perm(stat.S_IXGRP, 'x')
add_perm(stat.S_IROTH, 'r')
add_perm(stat.S_IWOTH, 'w')
if stat.S_ISVTX & s.st_mode:
update_hash('t')
else:
add_perm(stat.S_IXOTH, 'x')
try:
update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
except KeyError as e:
msg = ("KeyError: %s\nPath %s is owned by uid %d, gid %d, which doesn't match "
"any user/group on target. This may be due to host contamination." %
(e, os.path.abspath(path), s.st_uid, s.st_gid))
raise Exception(msg).with_traceback(e.__traceback__)
if include_timestamps:
# Need to clamp to SOURCE_DATE_EPOCH
if s.st_mtime > source_date_epoch:
update_hash(" %10d" % source_date_epoch)
else:
update_hash(" %10d" % s.st_mtime)
update_hash(" ")
if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
update_hash("%9s" % ("%d.%d" % (os.major(s.st_rdev), os.minor(s.st_rdev))))
else:
update_hash(" " * 9)
filterfile = False
for entry in filemaps:
if fnmatch.fnmatch(path, entry):
filterfile = True
update_hash(" ")
if stat.S_ISREG(s.st_mode) and not filterfile:
update_hash("%10d" % s.st_size)
else:
update_hash(" " * 10)
update_hash(" ")
fh = hashlib.sha256()
if stat.S_ISREG(s.st_mode):
# Hash file contents
if filterfile:
# Need to ignore paths in crossscripts and postinst-useradd files.
with open(path, 'rb') as d:
chunk = d.read()
chunk = chunk.replace(bytes(basepath, encoding='utf8'), b'')
for entry in filemaps:
if not fnmatch.fnmatch(path, entry):
continue
for r in filemaps[entry]:
if r.startswith("regex-"):
chunk = re.sub(bytes(r[6:], encoding='utf8'), b'', chunk)
else:
chunk = chunk.replace(bytes(r, encoding='utf8'), b'')
fh.update(chunk)
else:
with open(path, 'rb') as d:
for chunk in iter(lambda: d.read(4096), b""):
fh.update(chunk)
update_hash(fh.hexdigest())
else:
update_hash(" " * len(fh.hexdigest()))
update_hash(" %s" % path)
if stat.S_ISLNK(s.st_mode):
update_hash(" -> %s" % os.readlink(path))
update_hash("\n")
# Process this directory and all its child files
if include_root or root != ".":
process(root)
for f in files:
if f == 'fixmepath':
continue
process(os.path.join(root, f))
for dir in dirs:
if os.path.islink(os.path.join(root, dir)):
process(os.path.join(root, dir))
finally:
os.chdir(prev_dir)
return h.hexdigest()

View File

@@ -0,0 +1,332 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import logging
import oe.classutils
import shlex
from bb.process import Popen, ExecutionError
logger = logging.getLogger('BitBake.OE.Terminal')
class UnsupportedTerminal(Exception):
pass
class NoSupportedTerminals(Exception):
def __init__(self, terms):
self.terms = terms
class Registry(oe.classutils.ClassRegistry):
command = None
def __init__(cls, name, bases, attrs):
super(Registry, cls).__init__(name.lower(), bases, attrs)
@property
def implemented(cls):
return bool(cls.command)
class Terminal(Popen, metaclass=Registry):
def __init__(self, sh_cmd, title=None, env=None, d=None):
from subprocess import STDOUT
fmt_sh_cmd = self.format_command(sh_cmd, title)
try:
Popen.__init__(self, fmt_sh_cmd, env=env, stderr=STDOUT)
except OSError as exc:
import errno
if exc.errno == errno.ENOENT:
raise UnsupportedTerminal(self.name)
else:
raise
def format_command(self, sh_cmd, title):
fmt = {'title': title or 'Terminal', 'command': sh_cmd, 'cwd': os.getcwd() }
if isinstance(self.command, str):
return shlex.split(self.command.format(**fmt))
else:
return [element.format(**fmt) for element in self.command]
class XTerminal(Terminal):
def __init__(self, sh_cmd, title=None, env=None, d=None):
Terminal.__init__(self, sh_cmd, title, env, d)
if not os.environ.get('DISPLAY'):
raise UnsupportedTerminal(self.name)
class Gnome(XTerminal):
command = 'gnome-terminal -t "{title}" -- {command}'
priority = 2
def __init__(self, sh_cmd, title=None, env=None, d=None):
# Recent versions of gnome-terminal does not support non-UTF8 charset:
# https://bugzilla.gnome.org/show_bug.cgi?id=732127; as a workaround,
# clearing the LC_ALL environment variable so it uses the locale.
# Once fixed on the gnome-terminal project, this should be removed.
if os.getenv('LC_ALL'): os.putenv('LC_ALL','')
XTerminal.__init__(self, sh_cmd, title, env, d)
class Mate(XTerminal):
command = 'mate-terminal --disable-factory -t "{title}" -x {command}'
priority = 2
class Xfce(XTerminal):
command = 'xfce4-terminal -T "{title}" -e "{command}"'
priority = 2
class Terminology(XTerminal):
command = 'terminology -T="{title}" -e {command}'
priority = 2
class Konsole(XTerminal):
command = 'konsole --separate --workdir . -p tabtitle="{title}" -e {command}'
priority = 2
def __init__(self, sh_cmd, title=None, env=None, d=None):
# Check version
vernum = check_terminal_version("konsole")
if vernum and bb.utils.vercmp_string_op(vernum, "2.0.0", "<"):
# Konsole from KDE 3.x
self.command = 'konsole -T "{title}" -e {command}'
elif vernum and bb.utils.vercmp_string_op(vernum, "16.08.1", "<"):
# Konsole pre 16.08.01 Has nofork
self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
XTerminal.__init__(self, sh_cmd, title, env, d)
class XTerm(XTerminal):
command = 'xterm -T "{title}" -e {command}'
priority = 1
class Rxvt(XTerminal):
command = 'rxvt -T "{title}" -e {command}'
priority = 1
class URxvt(XTerminal):
command = 'urxvt -T "{title}" -e {command}'
priority = 1
class Screen(Terminal):
command = 'screen -D -m -t "{title}" -S devshell {command}'
def __init__(self, sh_cmd, title=None, env=None, d=None):
s_id = "devshell_%i" % os.getpid()
self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id
Terminal.__init__(self, sh_cmd, title, env, d)
msg = 'Screen started. Please connect in another terminal with ' \
'"screen -r %s"' % s_id
if (d):
bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
0.5, 10), d)
else:
logger.warning(msg)
class TmuxRunning(Terminal):
"""Open a new pane in the current running tmux window"""
name = 'tmux-running'
command = 'tmux split-window -c "{cwd}" "{command}"'
priority = 2.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
if not bb.utils.which(os.getenv('PATH'), 'tmux'):
raise UnsupportedTerminal('tmux is not installed')
if not os.getenv('TMUX'):
raise UnsupportedTerminal('tmux is not running')
if not check_tmux_pane_size('tmux'):
raise UnsupportedTerminal('tmux pane too small or tmux < 1.9 version is being used')
Terminal.__init__(self, sh_cmd, title, env, d)
class TmuxNewWindow(Terminal):
"""Open a new window in the current running tmux session"""
name = 'tmux-new-window'
command = 'tmux new-window -c "{cwd}" -n "{title}" "{command}"'
priority = 2.70
def __init__(self, sh_cmd, title=None, env=None, d=None):
if not bb.utils.which(os.getenv('PATH'), 'tmux'):
raise UnsupportedTerminal('tmux is not installed')
if not os.getenv('TMUX'):
raise UnsupportedTerminal('tmux is not running')
Terminal.__init__(self, sh_cmd, title, env, d)
class Tmux(Terminal):
"""Start a new tmux session and window"""
command = 'tmux new -c "{cwd}" -d -s devshell -n devshell "{command}"'
priority = 0.75
def __init__(self, sh_cmd, title=None, env=None, d=None):
if not bb.utils.which(os.getenv('PATH'), 'tmux'):
raise UnsupportedTerminal('tmux is not installed')
# TODO: consider using a 'devshell' session shared amongst all
# devshells, if it's already there, add a new window to it.
window_name = 'devshell-%i' % os.getpid()
self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'
if not check_tmux_version('1.9'):
# `tmux new-session -c` was added in 1.9;
# older versions fail with that flag
self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'
self.command = self.command.format(window_name)
Terminal.__init__(self, sh_cmd, title, env, d)
attach_cmd = 'tmux att -t {0}'.format(window_name)
msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name)
if d:
bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
else:
logger.warning(msg)
class Custom(Terminal):
command = 'false' # This is a placeholder
priority = 3
def __init__(self, sh_cmd, title=None, env=None, d=None):
self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD')
if self.command:
if not '{command}' in self.command:
self.command += ' {command}'
Terminal.__init__(self, sh_cmd, title, env, d)
logger.warning('Custom terminal was started.')
else:
logger.debug('No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
def prioritized():
return Registry.prioritized()
def get_cmd_list():
terms = Registry.prioritized()
cmds = []
for term in terms:
if term.command:
cmds.append(term.command)
return cmds
def spawn_preferred(sh_cmd, title=None, env=None, d=None):
"""Spawn the first supported terminal, by priority"""
for terminal in prioritized():
try:
spawn(terminal.name, sh_cmd, title, env, d)
break
except UnsupportedTerminal:
pass
except:
bb.warn("Terminal %s is supported but did not start" % (terminal.name))
# when we've run out of options
else:
raise NoSupportedTerminals(get_cmd_list())
def spawn(name, sh_cmd, title=None, env=None, d=None):
"""Spawn the specified terminal, by name"""
logger.debug('Attempting to spawn terminal "%s"', name)
try:
terminal = Registry.registry[name]
except KeyError:
raise UnsupportedTerminal(name)
# We need to know when the command completes but some terminals (at least
# gnome and tmux) gives us no way to do this. We therefore write the pid
# to a file using a "phonehome" wrapper script, then monitor the pid
# until it exits.
import tempfile
import time
pidfile = tempfile.NamedTemporaryFile(delete = False).name
try:
sh_cmd = bb.utils.which(os.getenv('PATH'), "oe-gnome-terminal-phonehome") + " " + pidfile + " " + sh_cmd
pipe = terminal(sh_cmd, title, env, d)
output = pipe.communicate()[0]
if output:
output = output.decode("utf-8")
if pipe.returncode != 0:
raise ExecutionError(sh_cmd, pipe.returncode, output)
while os.stat(pidfile).st_size <= 0:
time.sleep(0.01)
continue
with open(pidfile, "r") as f:
pid = int(f.readline())
finally:
os.unlink(pidfile)
while True:
try:
os.kill(pid, 0)
time.sleep(0.1)
except OSError:
return
def check_tmux_version(desired):
vernum = check_terminal_version("tmux")
if vernum and bb.utils.vercmp_string_op(vernum, desired, "<"):
return False
return vernum
def check_tmux_pane_size(tmux):
import subprocess as sub
# On older tmux versions (<1.9), return false. The reason
# is that there is no easy way to get the height of the active panel
# on current window without nested formats (available from version 1.9)
if not check_tmux_version('1.9'):
return False
try:
p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux,
shell=True,stdout=sub.PIPE,stderr=sub.PIPE)
out, err = p.communicate()
size = int(out.strip())
except OSError as exc:
import errno
if exc.errno == errno.ENOENT:
return None
else:
raise
return size/2 >= 19
def check_terminal_version(terminalName):
import subprocess as sub
try:
cmdversion = '%s --version' % terminalName
if terminalName.startswith('tmux'):
cmdversion = '%s -V' % terminalName
newenv = os.environ.copy()
newenv["LANG"] = "C"
p = sub.Popen(['sh', '-c', cmdversion], stdout=sub.PIPE, stderr=sub.PIPE, env=newenv)
out, err = p.communicate()
ver_info = out.decode().rstrip().split('\n')
except OSError as exc:
import errno
if exc.errno == errno.ENOENT:
return None
else:
raise
vernum = None
for ver in ver_info:
if ver.startswith('Konsole'):
vernum = ver.split(' ')[-1]
if ver.startswith('GNOME Terminal'):
vernum = ver.split(' ')[-1]
if ver.startswith('MATE Terminal'):
vernum = ver.split(' ')[-1]
if ver.startswith('tmux'):
vernum = ver.split()[-1]
if ver.startswith('tmux next-'):
vernum = ver.split()[-1][5:]
return vernum
def distro_name():
try:
p = Popen(['lsb_release', '-i'])
out, err = p.communicate()
distro = out.split(':')[1].strip().lower()
except:
distro = "unknown"
return distro

View File

@@ -0,0 +1,188 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import errno
import re
import os
class OEList(list):
"""OpenEmbedded 'list' type
Acts as an ordinary list, but is constructed from a string value and a
separator (optional), and re-joins itself when converted to a string with
str(). Set the variable type flag to 'list' to use this type, and the
'separator' flag may be specified (defaulting to whitespace)."""
name = "list"
def __init__(self, value, separator = None):
if value is not None:
list.__init__(self, value.split(separator))
else:
list.__init__(self)
if separator is None:
self.separator = " "
else:
self.separator = separator
def __str__(self):
return self.separator.join(self)
def choice(value, choices):
"""OpenEmbedded 'choice' type
Acts as a multiple choice for the user. To use this, set the variable
type flag to 'choice', and set the 'choices' flag to a space separated
list of valid values."""
if not isinstance(value, str):
raise TypeError("choice accepts a string, not '%s'" % type(value))
value = value.lower()
choices = choices.lower()
if value not in choices.split():
raise ValueError("Invalid choice '%s'. Valid choices: %s" %
(value, choices))
return value
class NoMatch(object):
"""Stub python regex pattern object which never matches anything"""
def findall(self, string, flags=0):
return None
def finditer(self, string, flags=0):
return None
def match(self, flags=0):
return None
def search(self, string, flags=0):
return None
def split(self, string, maxsplit=0):
return None
def sub(pattern, repl, string, count=0):
return None
def subn(pattern, repl, string, count=0):
return None
NoMatch = NoMatch()
def regex(value, regexflags=None):
"""OpenEmbedded 'regex' type
Acts as a regular expression, returning the pre-compiled regular
expression pattern object. To use this type, set the variable type flag
to 'regex', and optionally, set the 'regexflags' type to a space separated
list of the flags to control the regular expression matching (e.g.
FOO[regexflags] += 'ignorecase'). See the python documentation on the
're' module for a list of valid flags."""
flagval = 0
if regexflags:
for flag in regexflags.split():
flag = flag.upper()
try:
flagval |= getattr(re, flag)
except AttributeError:
raise ValueError("Invalid regex flag '%s'" % flag)
if not value:
# Let's ensure that the default behavior for an undefined or empty
# variable is to match nothing. If the user explicitly wants to match
# anything, they can match '.*' instead.
return NoMatch
try:
return re.compile(value, flagval)
except re.error as exc:
raise ValueError("Invalid regex value '%s': %s" %
(value, exc.args[0]))
def boolean(value):
"""OpenEmbedded 'boolean' type
Valid values for true: 'yes', 'y', 'true', 't', '1'
Valid values for false: 'no', 'n', 'false', 'f', '0', None
"""
if value is None:
return False
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise TypeError("boolean accepts a string, not '%s'" % type(value))
value = value.lower()
if value in ('yes', 'y', 'true', 't', '1'):
return True
elif value in ('no', 'n', 'false', 'f', '0'):
return False
raise ValueError("Invalid boolean value '%s'" % value)
def integer(value, numberbase=10):
"""OpenEmbedded 'integer' type
Defaults to base 10, but this can be specified using the optional
'numberbase' flag."""
return int(value, int(numberbase))
_float = float
def float(value, fromhex='false'):
"""OpenEmbedded floating point type
To use this type, set the type flag to 'float', and optionally set the
'fromhex' flag to a true value (obeying the same rules as for the
'boolean' type) if the value is in base 16 rather than base 10."""
if boolean(fromhex):
return _float.fromhex(value)
else:
return _float(value)
def path(value, relativeto='', normalize='true', mustexist='false'):
value = os.path.join(relativeto, value)
if boolean(normalize):
value = os.path.normpath(value)
if boolean(mustexist):
try:
with open(value, 'r'):
pass
except IOError as exc:
if exc.errno == errno.ENOENT:
raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
return value
def is_x86(arch):
"""
Check whether arch is x86 or x86_64
"""
if arch.startswith('x86_') or re.match('i.*86', arch):
return True
else:
return False
def qemu_use_kvm(kvm, target_arch):
"""
Enable kvm if target_arch == build_arch or both of them are x86 archs.
"""
use_kvm = False
if kvm and boolean(kvm):
build_arch = os.uname()[4]
if is_x86(build_arch) and is_x86(target_arch):
use_kvm = True
elif build_arch == target_arch:
use_kvm = True
return use_kvm

View File

@@ -0,0 +1,71 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import argparse
import re
class myArgumentParser(argparse.ArgumentParser):
def _print_message(self, message, file=None):
bb.warn("%s - %s: %s" % (d.getVar('PN'), pkg, message))
# This should never be called...
def exit(self, status=0, message=None):
message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN'), pkg))
error(message)
def error(self, message):
bb.fatal(message)
def split_commands(params):
params = re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
# Remove any empty items
return [x for x in params if x]
def split_args(params):
params = re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
# Remove any empty items
return [x for x in params if x]
def build_useradd_parser():
# The following comes from --help on useradd from shadow
parser = myArgumentParser(prog='useradd')
parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account")
parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account")
parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true")
parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account")
parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account")
parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account")
parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account")
parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_const", const=True)
parser.add_argument("-M", "--no-create-home", dest="create_home", help="do not create the user's home directory", action="store_const", const=False)
parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
parser.add_argument("-r", "--system", help="create a system account", action="store_true")
parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_const", const=True)
parser.add_argument("LOGIN", help="Login name of the new user")
return parser
def build_groupadd_parser():
# The following comes from --help on groupadd from shadow
parser = myArgumentParser(prog='groupadd')
parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group")
parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
parser.add_argument("-r", "--system", help="create a system account", action="store_true")
parser.add_argument("GROUP", help="Group name of the new group")
return parser

View File

@@ -0,0 +1,529 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import subprocess
import multiprocessing
import traceback
import errno
def read_file(filename):
try:
f = open( filename, "r" )
except IOError as reason:
return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M:
else:
data = f.read().strip()
f.close()
return data
return None
def ifelse(condition, iftrue = True, iffalse = False):
if condition:
return iftrue
else:
return iffalse
def conditional(variable, checkvalue, truevalue, falsevalue, d):
if d.getVar(variable) == checkvalue:
return truevalue
else:
return falsevalue
def vartrue(var, iftrue, iffalse, d):
import oe.types
if oe.types.boolean(d.getVar(var)):
return iftrue
else:
return iffalse
def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
if float(d.getVar(variable)) <= float(checkvalue):
return truevalue
else:
return falsevalue
def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
result = bb.utils.vercmp_string(d.getVar(variable), checkvalue)
if result <= 0:
return truevalue
else:
return falsevalue
def both_contain(variable1, variable2, checkvalue, d):
val1 = d.getVar(variable1)
val2 = d.getVar(variable2)
val1 = set(val1.split())
val2 = set(val2.split())
if isinstance(checkvalue, str):
checkvalue = set(checkvalue.split())
else:
checkvalue = set(checkvalue)
if checkvalue.issubset(val1) and checkvalue.issubset(val2):
return " ".join(checkvalue)
else:
return ""
def set_intersect(variable1, variable2, d):
"""
Expand both variables, interpret them as lists of strings, and return the
intersection as a flattened string.
For example:
s1 = "a b c"
s2 = "b c d"
s3 = set_intersect(s1, s2)
=> s3 = "b c"
"""
val1 = set(d.getVar(variable1).split())
val2 = set(d.getVar(variable2).split())
return " ".join(val1 & val2)
def prune_suffix(var, suffixes, d):
# See if var ends with any of the suffixes listed and
# remove it if found
for suffix in suffixes:
if suffix and var.endswith(suffix):
var = var[:-len(suffix)]
prefix = d.getVar("MLPREFIX")
if prefix and var.startswith(prefix):
var = var[len(prefix):]
return var
def str_filter(f, str, d):
from re import match
return " ".join([x for x in str.split() if match(f, x, 0)])
def str_filter_out(f, str, d):
from re import match
return " ".join([x for x in str.split() if not match(f, x, 0)])
def build_depends_string(depends, task):
"""Append a taskname to a string of dependencies as used by the [depends] flag"""
return " ".join(dep + ":" + task for dep in depends.split())
def inherits(d, *classes):
"""Return True if the metadata inherits any of the specified classes"""
return any(bb.data.inherits_class(cls, d) for cls in classes)
def features_backfill(var,d):
# This construct allows the addition of new features to variable specified
# as var
# Example for var = "DISTRO_FEATURES"
# This construct allows the addition of new features to DISTRO_FEATURES
# that if not present would disable existing functionality, without
# disturbing distributions that have already set DISTRO_FEATURES.
# Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
# add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
features = (d.getVar(var) or "").split()
backfill = (d.getVar(var+"_BACKFILL") or "").split()
considered = (d.getVar(var+"_BACKFILL_CONSIDERED") or "").split()
addfeatures = []
for feature in backfill:
if feature not in features and feature not in considered:
addfeatures.append(feature)
if addfeatures:
d.appendVar(var, " " + " ".join(addfeatures))
def all_distro_features(d, features, truevalue="1", falsevalue=""):
"""
Returns truevalue if *all* given features are set in DISTRO_FEATURES,
else falsevalue. The features can be given as single string or anything
that can be turned into a set.
This is a shorter, more flexible version of
bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d).
Without explicit true/false values it can be used directly where
Python expects a boolean:
if oe.utils.all_distro_features(d, "foo bar"):
bb.fatal("foo and bar are mutually exclusive DISTRO_FEATURES")
With just a truevalue, it can be used to include files that are meant to be
used only when requested via DISTRO_FEATURES:
require ${@ oe.utils.all_distro_features(d, "foo bar", "foo-and-bar.inc")
"""
return bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d)
def any_distro_features(d, features, truevalue="1", falsevalue=""):
"""
Returns truevalue if at least *one* of the given features is set in DISTRO_FEATURES,
else falsevalue. The features can be given as single string or anything
that can be turned into a set.
This is a shorter, more flexible version of
bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d).
Without explicit true/false values it can be used directly where
Python expects a boolean:
if not oe.utils.any_distro_features(d, "foo bar"):
bb.fatal("foo, bar or both must be set in DISTRO_FEATURES")
With just a truevalue, it can be used to include files that are meant to be
used only when requested via DISTRO_FEATURES:
require ${@ oe.utils.any_distro_features(d, "foo bar", "foo-or-bar.inc")
"""
return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
def parallel_make(d, makeinst=False):
"""
Return the integer value for the number of parallel threads to use when
building, scraped out of PARALLEL_MAKE. If no parallelization option is
found, returns None
e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
"""
if makeinst:
pm = (d.getVar('PARALLEL_MAKEINST') or '').split()
else:
pm = (d.getVar('PARALLEL_MAKE') or '').split()
# look for '-j' and throw other options (e.g. '-l') away
while pm:
opt = pm.pop(0)
if opt == '-j':
v = pm.pop(0)
elif opt.startswith('-j'):
v = opt[2:].strip()
else:
continue
return int(v)
return ''
def parallel_make_argument(d, fmt, limit=None, makeinst=False):
"""
Helper utility to construct a parallel make argument from the number of
parallel threads specified in PARALLEL_MAKE.
Returns the input format string `fmt` where a single '%d' will be expanded
with the number of parallel threads to use. If `limit` is specified, the
number of parallel threads will be no larger than it. If no parallelization
option is found in PARALLEL_MAKE, returns an empty string
e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
"-n 10"
"""
v = parallel_make(d, makeinst)
if v:
if limit:
v = min(limit, v)
return fmt % v
return ''
def packages_filter_out_system(d):
"""
Return a list of packages from PACKAGES with the "system" packages such as
PN-dbg PN-doc PN-locale-eb-gb removed.
"""
pn = d.getVar('PN')
pkgfilter = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
localepkg = pn + "-locale-"
pkgs = []
for pkg in d.getVar('PACKAGES').split():
if pkg not in pkgfilter and localepkg not in pkg:
pkgs.append(pkg)
return pkgs
def getstatusoutput(cmd):
return subprocess.getstatusoutput(cmd)
def trim_version(version, num_parts=2):
"""
Return just the first <num_parts> of <version>, split by periods. For
example, trim_version("1.2.3", 2) will return "1.2".
"""
if type(version) is not str:
raise TypeError("Version should be a string")
if num_parts < 1:
raise ValueError("Cannot split to parts < 1")
parts = version.split(".")
trimmed = ".".join(parts[:num_parts])
return trimmed
def cpu_count(at_least=1, at_most=64):
cpus = len(os.sched_getaffinity(0))
return max(min(cpus, at_most), at_least)
def execute_pre_post_process(d, cmds):
if cmds is None:
return
cmds = cmds.replace(";", " ")
for cmd in cmds.split():
bb.note("Executing %s ..." % cmd)
bb.build.exec_func(cmd, d)
def get_bb_number_threads(d):
return int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
def multiprocess_launch(target, items, d, extraargs=None):
max_process = get_bb_number_threads(d)
return multiprocess_launch_mp(target, items, max_process, extraargs)
# For each item in items, call the function 'target' with item as the first
# argument, extraargs as the other arguments and handle any exceptions in the
# parent thread
def multiprocess_launch_mp(target, items, max_process, extraargs=None):
class ProcessLaunch(multiprocessing.Process):
def __init__(self, *args, **kwargs):
multiprocessing.Process.__init__(self, *args, **kwargs)
self._pconn, self._cconn = multiprocessing.Pipe()
self._exception = None
self._result = None
def run(self):
try:
ret = self._target(*self._args, **self._kwargs)
self._cconn.send((None, ret))
except Exception as e:
tb = traceback.format_exc()
self._cconn.send((e, tb))
def update(self):
if self._pconn.poll():
(e, tb) = self._pconn.recv()
if e is not None:
self._exception = (e, tb)
else:
self._result = tb
@property
def exception(self):
self.update()
return self._exception
@property
def result(self):
self.update()
return self._result
launched = []
errors = []
results = []
items = list(items)
while (items and not errors) or launched:
if not errors and items and len(launched) < max_process:
args = (items.pop(),)
if extraargs is not None:
args = args + extraargs
p = ProcessLaunch(target=target, args=args)
p.start()
launched.append(p)
for q in launched:
# Have to manually call update() to avoid deadlocks. The pipe can be full and
# transfer stalled until we try and read the results object but the subprocess won't exit
# as it still has data to write (https://bugs.python.org/issue8426)
q.update()
# The finished processes are joined when calling is_alive()
if not q.is_alive():
if q.exception:
errors.append(q.exception)
if q.result:
results.append(q.result)
launched.remove(q)
# Paranoia doesn't hurt
for p in launched:
p.join()
if errors:
msg = ""
for (e, tb) in errors:
if isinstance(e, subprocess.CalledProcessError) and e.output:
msg = msg + str(e) + "\n"
msg = msg + "Subprocess output:"
msg = msg + e.output.decode("utf-8", errors="ignore")
else:
msg = msg + str(e) + ": " + str(tb) + "\n"
bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg)
return results
def squashspaces(string):
import re
return re.sub(r"\s+", " ", string).strip()
def rprovides_map(pkgdata_dir, pkg_dict):
# Map file -> pkg provider
rprov_map = {}
for pkg in pkg_dict:
path_to_pkgfile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg)
if not os.path.isfile(path_to_pkgfile):
continue
with open(path_to_pkgfile) as f:
for line in f:
if line.startswith('RPROVIDES') or line.startswith('FILERPROVIDES'):
# List all components provided by pkg.
# Exclude version strings, i.e. those starting with (
provides = [x for x in line.split()[1:] if not x.startswith('(')]
for prov in provides:
if prov in rprov_map:
rprov_map[prov].append(pkg)
else:
rprov_map[prov] = [pkg]
return rprov_map
def format_pkg_list(pkg_dict, ret_format=None, pkgdata_dir=None):
output = []
if ret_format == "arch":
for pkg in sorted(pkg_dict):
output.append("%s %s" % (pkg, pkg_dict[pkg]["arch"]))
elif ret_format == "file":
for pkg in sorted(pkg_dict):
output.append("%s %s %s" % (pkg, pkg_dict[pkg]["filename"], pkg_dict[pkg]["arch"]))
elif ret_format == "ver":
for pkg in sorted(pkg_dict):
output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"]))
elif ret_format == "deps":
rprov_map = rprovides_map(pkgdata_dir, pkg_dict)
for pkg in sorted(pkg_dict):
for dep in pkg_dict[pkg]["deps"]:
if dep in rprov_map:
# There could be multiple providers within the image
for pkg_provider in rprov_map[dep]:
output.append("%s|%s * %s [RPROVIDES]" % (pkg, pkg_provider, dep))
else:
output.append("%s|%s" % (pkg, dep))
else:
for pkg in sorted(pkg_dict):
output.append(pkg)
output_str = '\n'.join(output)
if output_str:
# make sure last line is newline terminated
output_str += '\n'
return output_str
# Helper function to get the host compiler version
# Do not assume the compiler is gcc
def get_host_compiler_version(d, taskcontextonly=False):
import re, subprocess
if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
return
compiler = d.getVar("BUILD_CC")
# Get rid of ccache since it is not present when parsing.
if compiler.startswith('ccache '):
compiler = compiler[7:]
try:
env = os.environ.copy()
# datastore PATH does not contain session PATH as set by environment-setup-...
# this breaks the install-buildtools use-case
# env["PATH"] = d.getVar("PATH")
output = subprocess.check_output("%s --version" % compiler, \
shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
if not match:
bb.fatal("Can't get compiler version from %s --version output" % compiler)
version = match.group(1)
return compiler, version
def host_gcc_version(d, taskcontextonly=False):
import re, subprocess
if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
return
compiler = d.getVar("BUILD_CC")
# Get rid of ccache since it is not present when parsing.
if compiler.startswith('ccache '):
compiler = compiler[7:]
try:
env = os.environ.copy()
env["PATH"] = d.getVar("PATH")
output = subprocess.check_output("%s --version" % compiler, \
shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8")
except subprocess.CalledProcessError as e:
bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0])
if not match:
bb.fatal("Can't get compiler version from %s --version output" % compiler)
version = match.group(1)
return "-%s" % version if version in ("4.8", "4.9") else ""
def get_multilib_datastore(variant, d):
localdata = bb.data.createCopy(d)
if variant:
overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
localdata.setVar("OVERRIDES", overrides)
localdata.setVar("MLPREFIX", variant + "-")
else:
origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL")
if origdefault:
localdata.setVar("DEFAULTTUNE", origdefault)
overrides = localdata.getVar("OVERRIDES", False).split(":")
overrides = ":".join([x for x in overrides if not x.startswith("virtclass-multilib-")])
localdata.setVar("OVERRIDES", overrides)
localdata.setVar("MLPREFIX", "")
return localdata
def sh_quote(string):
import shlex
return shlex.quote(string)
def directory_size(root, blocksize=4096):
"""
Calculate the size of the directory, taking into account hard links,
rounding up every size to multiples of the blocksize.
"""
def roundup(size):
"""
Round the size up to the nearest multiple of the block size.
"""
import math
return math.ceil(size / blocksize) * blocksize
def getsize(filename):
"""
Get the size of the filename, not following symlinks, taking into
account hard links.
"""
stat = os.lstat(filename)
if stat.st_ino not in inodes:
inodes.add(stat.st_ino)
return stat.st_size
else:
return 0
inodes = set()
total = 0
for root, dirs, files in os.walk(root):
total += sum(roundup(getsize(os.path.join(root, name))) for name in files)
total += roundup(getsize(root))
return total
# Update the mtime of a file, skip if permission/read-only issues
def touch(filename):
try:
os.utime(filename, None)
except PermissionError:
pass
except OSError as e:
# Handle read-only file systems gracefully
if e.errno != errno.EROFS:
raise e

View File

@@ -0,0 +1,13 @@
# Copyright (c) 2016, Intel Corporation.
#
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Build performance tests"""
from .base import (BuildPerfTestCase,
BuildPerfTestLoader,
BuildPerfTestResult,
BuildPerfTestRunner,
KernelDropCaches,
runCmd2)
from .test_basic import *

View File

@@ -0,0 +1,504 @@
# Copyright (c) 2016, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Build performance test base classes and functionality"""
import json
import logging
import os
import re
import resource
import socket
import shutil
import time
import unittest
import xml.etree.ElementTree as ET
from collections import OrderedDict
from datetime import datetime, timedelta
from functools import partial
from multiprocessing import Process
from multiprocessing import SimpleQueue
from xml.dom import minidom
import oe.path
from oeqa.utils.commands import CommandError, runCmd, get_bb_vars
from oeqa.utils.git import GitError, GitRepo
# Get logger for this module
log = logging.getLogger('build-perf')
# Our own version of runCmd which does not raise AssertErrors which would cause
# errors to interpreted as failures
runCmd2 = partial(runCmd, assert_error=False, limit_exc_output=40)
class KernelDropCaches(object):
"""Container of the functions for dropping kernel caches"""
sudo_passwd = None
@classmethod
def check(cls):
"""Check permssions for dropping kernel caches"""
from getpass import getpass
from locale import getdefaultlocale
cmd = ['sudo', '-k', '-n', 'tee', '/proc/sys/vm/drop_caches']
ret = runCmd2(cmd, ignore_status=True, data=b'0')
if ret.output.startswith('sudo:'):
pass_str = getpass(
"\nThe script requires sudo access to drop caches between "
"builds (echo 3 > /proc/sys/vm/drop_caches).\n"
"Please enter your sudo password: ")
cls.sudo_passwd = bytes(pass_str, getdefaultlocale()[1])
@classmethod
def drop(cls):
"""Drop kernel caches"""
cmd = ['sudo', '-k']
if cls.sudo_passwd:
cmd.append('-S')
input_data = cls.sudo_passwd + b'\n'
else:
cmd.append('-n')
input_data = b''
cmd += ['tee', '/proc/sys/vm/drop_caches']
input_data += b'3'
runCmd2(cmd, data=input_data)
def str_to_fn(string):
"""Convert string to a sanitized filename"""
return re.sub(r'(\W+)', '-', string, flags=re.LOCALE)
class ResultsJsonEncoder(json.JSONEncoder):
"""Extended encoder for build perf test results"""
unix_epoch = datetime.utcfromtimestamp(0)
def default(self, obj):
"""Encoder for our types"""
if isinstance(obj, datetime):
# NOTE: we assume that all timestamps are in UTC time
return (obj - self.unix_epoch).total_seconds()
if isinstance(obj, timedelta):
return obj.total_seconds()
return json.JSONEncoder.default(self, obj)
class BuildPerfTestResult(unittest.TextTestResult):
"""Runner class for executing the individual tests"""
# List of test cases to run
test_run_queue = []
def __init__(self, out_dir, *args, **kwargs):
super(BuildPerfTestResult, self).__init__(*args, **kwargs)
self.out_dir = out_dir
self.hostname = socket.gethostname()
self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core')
self.start_time = self.elapsed_time = None
self.successes = []
def addSuccess(self, test):
"""Record results from successful tests"""
super(BuildPerfTestResult, self).addSuccess(test)
self.successes.append(test)
def addError(self, test, err):
"""Record results from crashed test"""
test.err = err
super(BuildPerfTestResult, self).addError(test, err)
def addFailure(self, test, err):
"""Record results from failed test"""
test.err = err
super(BuildPerfTestResult, self).addFailure(test, err)
def addExpectedFailure(self, test, err):
"""Record results from expectedly failed test"""
test.err = err
super(BuildPerfTestResult, self).addExpectedFailure(test, err)
def startTest(self, test):
"""Pre-test hook"""
test.base_dir = self.out_dir
log.info("Executing test %s: %s", test.name, test.shortDescription())
self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] "))
super(BuildPerfTestResult, self).startTest(test)
def startTestRun(self):
"""Pre-run hook"""
self.start_time = datetime.utcnow()
def stopTestRun(self):
"""Pre-run hook"""
self.elapsed_time = datetime.utcnow() - self.start_time
def all_results(self):
compound = [('SUCCESS', t, None) for t in self.successes] + \
[('FAILURE', t, m) for t, m in self.failures] + \
[('ERROR', t, m) for t, m in self.errors] + \
[('EXPECTED_FAILURE', t, m) for t, m in self.expectedFailures] + \
[('UNEXPECTED_SUCCESS', t, None) for t in self.unexpectedSuccesses] + \
[('SKIPPED', t, m) for t, m in self.skipped]
return sorted(compound, key=lambda info: info[1].start_time)
def write_buildstats_json(self):
"""Write buildstats file"""
buildstats = OrderedDict()
for _, test, _ in self.all_results():
for key, val in test.buildstats.items():
buildstats[test.name + '.' + key] = val
with open(os.path.join(self.out_dir, 'buildstats.json'), 'w') as fobj:
json.dump(buildstats, fobj, cls=ResultsJsonEncoder)
def write_results_json(self):
"""Write test results into a json-formatted file"""
results = OrderedDict([('tester_host', self.hostname),
('start_time', self.start_time),
('elapsed_time', self.elapsed_time),
('tests', OrderedDict())])
for status, test, reason in self.all_results():
test_result = OrderedDict([('name', test.name),
('description', test.shortDescription()),
('status', status),
('start_time', test.start_time),
('elapsed_time', test.elapsed_time),
('measurements', test.measurements)])
if status in ('ERROR', 'FAILURE', 'EXPECTED_FAILURE'):
test_result['message'] = str(test.err[1])
test_result['err_type'] = test.err[0].__name__
test_result['err_output'] = reason
elif reason:
test_result['message'] = reason
results['tests'][test.name] = test_result
with open(os.path.join(self.out_dir, 'results.json'), 'w') as fobj:
json.dump(results, fobj, indent=4,
cls=ResultsJsonEncoder)
def write_results_xml(self):
"""Write test results into a JUnit XML file"""
top = ET.Element('testsuites')
suite = ET.SubElement(top, 'testsuite')
suite.set('name', 'oeqa.buildperf')
suite.set('timestamp', self.start_time.isoformat())
suite.set('time', str(self.elapsed_time.total_seconds()))
suite.set('hostname', self.hostname)
suite.set('failures', str(len(self.failures) + len(self.expectedFailures)))
suite.set('errors', str(len(self.errors)))
suite.set('skipped', str(len(self.skipped)))
test_cnt = 0
for status, test, reason in self.all_results():
test_cnt += 1
testcase = ET.SubElement(suite, 'testcase')
testcase.set('classname', test.__module__ + '.' + test.__class__.__name__)
testcase.set('name', test.name)
testcase.set('description', test.shortDescription())
testcase.set('timestamp', test.start_time.isoformat())
testcase.set('time', str(test.elapsed_time.total_seconds()))
if status in ('ERROR', 'FAILURE', 'EXP_FAILURE'):
if status in ('FAILURE', 'EXP_FAILURE'):
result = ET.SubElement(testcase, 'failure')
else:
result = ET.SubElement(testcase, 'error')
result.set('message', str(test.err[1]))
result.set('type', test.err[0].__name__)
result.text = reason
elif status == 'SKIPPED':
result = ET.SubElement(testcase, 'skipped')
result.text = reason
elif status not in ('SUCCESS', 'UNEXPECTED_SUCCESS'):
raise TypeError("BUG: invalid test status '%s'" % status)
for data in test.measurements.values():
measurement = ET.SubElement(testcase, data['type'])
measurement.set('name', data['name'])
measurement.set('legend', data['legend'])
vals = data['values']
if data['type'] == BuildPerfTestCase.SYSRES:
ET.SubElement(measurement, 'time',
timestamp=vals['start_time'].isoformat()).text = \
str(vals['elapsed_time'].total_seconds())
attrib = dict((k, str(v)) for k, v in vals['iostat'].items())
ET.SubElement(measurement, 'iostat', attrib=attrib)
attrib = dict((k, str(v)) for k, v in vals['rusage'].items())
ET.SubElement(measurement, 'rusage', attrib=attrib)
elif data['type'] == BuildPerfTestCase.DISKUSAGE:
ET.SubElement(measurement, 'size').text = str(vals['size'])
else:
raise TypeError('BUG: unsupported measurement type')
suite.set('tests', str(test_cnt))
# Use minidom for pretty-printing
dom_doc = minidom.parseString(ET.tostring(top, 'utf-8'))
with open(os.path.join(self.out_dir, 'results.xml'), 'w') as fobj:
dom_doc.writexml(fobj, addindent=' ', newl='\n', encoding='utf-8')
class BuildPerfTestCase(unittest.TestCase):
"""Base class for build performance tests"""
SYSRES = 'sysres'
DISKUSAGE = 'diskusage'
build_target = None
def __init__(self, *args, **kwargs):
super(BuildPerfTestCase, self).__init__(*args, **kwargs)
self.name = self._testMethodName
self.base_dir = None
self.start_time = None
self.elapsed_time = None
self.measurements = OrderedDict()
self.buildstats = OrderedDict()
# self.err is supposed to be a tuple from sys.exc_info()
self.err = None
self.bb_vars = get_bb_vars()
# TODO: remove 'times' and 'sizes' arrays when globalres support is
# removed
self.times = []
self.sizes = []
@property
def tmp_dir(self):
return os.path.join(self.base_dir, self.name + '.tmp')
def shortDescription(self):
return super(BuildPerfTestCase, self).shortDescription() or ""
def setUp(self):
"""Set-up fixture for each test"""
if not os.path.isdir(self.tmp_dir):
os.mkdir(self.tmp_dir)
if self.build_target:
self.run_cmd(['bitbake', self.build_target, '--runall=fetch'])
def tearDown(self):
"""Tear-down fixture for each test"""
if os.path.isdir(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
def run(self, *args, **kwargs):
"""Run test"""
self.start_time = datetime.now()
super(BuildPerfTestCase, self).run(*args, **kwargs)
self.elapsed_time = datetime.now() - self.start_time
def run_cmd(self, cmd):
"""Convenience method for running a command"""
cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
log.info("Logging command: %s", cmd_str)
try:
runCmd2(cmd)
except CommandError as err:
log.error("Command failed: %s", err.retcode)
raise
def _append_measurement(self, measurement):
"""Simple helper for adding measurements results"""
if measurement['name'] in self.measurements:
raise ValueError('BUG: two measurements with the same name in {}'.format(
self.__class__.__name__))
self.measurements[measurement['name']] = measurement
def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
"""Measure system resource usage of a command"""
def _worker(data_q, cmd, **kwargs):
"""Worker process for measuring resources"""
try:
start_time = datetime.now()
ret = runCmd2(cmd, **kwargs)
etime = datetime.now() - start_time
rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
iostat = OrderedDict()
with open('/proc/{}/io'.format(os.getpid())) as fobj:
for line in fobj.readlines():
key, val = line.split(':')
iostat[key] = int(val)
rusage = OrderedDict()
# Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
# 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
for key in ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
'ru_majflt', 'ru_inblock', 'ru_oublock',
'ru_nvcsw', 'ru_nivcsw']:
rusage[key] = getattr(rusage_struct, key)
data_q.put({'ret': ret,
'start_time': start_time,
'elapsed_time': etime,
'rusage': rusage,
'iostat': iostat})
except Exception as err:
data_q.put(err)
cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
log.info("Timing command: %s", cmd_str)
data_q = SimpleQueue()
try:
proc = Process(target=_worker, args=(data_q, cmd,))
proc.start()
data = data_q.get()
proc.join()
if isinstance(data, Exception):
raise data
except CommandError:
log.error("Command '%s' failed", cmd_str)
raise
etime = data['elapsed_time']
measurement = OrderedDict([('type', self.SYSRES),
('name', name),
('legend', legend)])
measurement['values'] = OrderedDict([('start_time', data['start_time']),
('elapsed_time', etime),
('rusage', data['rusage']),
('iostat', data['iostat'])])
if save_bs:
self.save_buildstats(name)
self._append_measurement(measurement)
# Append to 'times' array for globalres log
e_sec = etime.total_seconds()
self.times.append('{:d}:{:02d}:{:05.2f}'.format(int(e_sec / 3600),
int((e_sec % 3600) / 60),
e_sec % 60))
def measure_disk_usage(self, path, name, legend, apparent_size=False):
"""Estimate disk usage of a file or directory"""
cmd = ['du', '-s', '--block-size', '1024']
if apparent_size:
cmd.append('--apparent-size')
cmd.append(path)
ret = runCmd2(cmd)
size = int(ret.output.split()[0])
log.debug("Size of %s path is %s", path, size)
measurement = OrderedDict([('type', self.DISKUSAGE),
('name', name),
('legend', legend)])
measurement['values'] = OrderedDict([('size', size)])
self._append_measurement(measurement)
# Append to 'sizes' array for globalres log
self.sizes.append(str(size))
def save_buildstats(self, measurement_name):
"""Save buildstats"""
def split_nevr(nevr):
"""Split name and version information from recipe "nevr" string"""
n_e_v, revision = nevr.rsplit('-', 1)
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
n_e_v)
if not match:
# If we're not able to parse a version starting with a number, just
# take the part after last dash
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
n_e_v)
name = match.group('name')
version = match.group('version')
epoch = match.group('epoch')
return name, epoch, version, revision
def bs_to_json(filename):
"""Convert (task) buildstats file into json format"""
bs_json = OrderedDict()
iostat = OrderedDict()
rusage = OrderedDict()
with open(filename) as fobj:
for line in fobj.readlines():
key, val = line.split(':', 1)
val = val.strip()
if key == 'Started':
start_time = datetime.utcfromtimestamp(float(val))
bs_json['start_time'] = start_time
elif key == 'Ended':
end_time = datetime.utcfromtimestamp(float(val))
elif key.startswith('IO '):
split = key.split()
iostat[split[1]] = int(val)
elif key.find('rusage') >= 0:
split = key.split()
ru_key = split[-1]
if ru_key in ('ru_stime', 'ru_utime'):
val = float(val)
else:
val = int(val)
rusage[ru_key] = rusage.get(ru_key, 0) + val
elif key == 'Status':
bs_json['status'] = val
bs_json['elapsed_time'] = end_time - start_time
bs_json['rusage'] = rusage
bs_json['iostat'] = iostat
return bs_json
log.info('Saving buildstats in JSON format')
bs_dirs = sorted(os.listdir(self.bb_vars['BUILDSTATS_BASE']))
if len(bs_dirs) > 1:
log.warning("Multiple buildstats found for test %s, only "
"archiving the last one", self.name)
bs_dir = os.path.join(self.bb_vars['BUILDSTATS_BASE'], bs_dirs[-1])
buildstats = []
for fname in os.listdir(bs_dir):
recipe_dir = os.path.join(bs_dir, fname)
if not os.path.isdir(recipe_dir) or fname == "reduced_proc_pressure":
continue
name, epoch, version, revision = split_nevr(fname)
recipe_bs = OrderedDict((('name', name),
('epoch', epoch),
('version', version),
('revision', revision),
('tasks', OrderedDict())))
for task in os.listdir(recipe_dir):
recipe_bs['tasks'][task] = bs_to_json(os.path.join(recipe_dir,
task))
buildstats.append(recipe_bs)
self.buildstats[measurement_name] = buildstats
def rm_tmp(self):
"""Cleanup temporary/intermediate files and directories"""
log.debug("Removing temporary and cache files")
for name in ['bitbake.lock', 'cache/sanity_info',
self.bb_vars['TMPDIR']]:
oe.path.remove(name, recurse=True)
def rm_sstate(self):
"""Remove sstate directory"""
log.debug("Removing sstate-cache")
oe.path.remove(self.bb_vars['SSTATE_DIR'], recurse=True)
def rm_cache(self):
"""Drop bitbake caches"""
oe.path.remove(self.bb_vars['PERSISTENT_DIR'], recurse=True)
@staticmethod
def sync():
"""Sync and drop kernel caches"""
runCmd2('bitbake -m', ignore_status=True)
log.debug("Syncing and dropping kernel caches""")
KernelDropCaches.drop()
os.sync()
# Wait a bit for all the dirty blocks to be written onto disk
time.sleep(3)
class BuildPerfTestLoader(unittest.TestLoader):
"""Test loader for build performance tests"""
sortTestMethodsUsing = None
class BuildPerfTestRunner(unittest.TextTestRunner):
"""Test loader for build performance tests"""
sortTestMethodsUsing = None
def __init__(self, out_dir, *args, **kwargs):
super(BuildPerfTestRunner, self).__init__(*args, **kwargs)
self.out_dir = out_dir
def _makeResult(self):
return BuildPerfTestResult(self.out_dir, self.stream, self.descriptions,
self.verbosity)

View File

@@ -0,0 +1,120 @@
# Copyright (c) 2016, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Basic set of build performance tests"""
import os
import shutil
import oe.path
from oeqa.buildperf import BuildPerfTestCase
from oeqa.utils.commands import get_bb_var, get_bb_vars
class Test1P1(BuildPerfTestCase):
build_target = 'core-image-sato'
def test1(self):
"""Build core-image-sato"""
self.rm_tmp()
self.rm_sstate()
self.rm_cache()
self.sync()
self.measure_cmd_resources(['bitbake', self.build_target], 'build',
'bitbake ' + self.build_target, save_bs=True)
self.measure_disk_usage(self.bb_vars['TMPDIR'], 'tmpdir', 'tmpdir')
self.measure_disk_usage(get_bb_var("IMAGE_ROOTFS", self.build_target), 'rootfs', 'rootfs', True)
class Test1P2(BuildPerfTestCase):
build_target = 'virtual/kernel'
def test12(self):
"""Build virtual/kernel"""
# Build and cleans state in order to get all dependencies pre-built
self.run_cmd(['bitbake', self.build_target])
self.run_cmd(['bitbake', self.build_target, '-c', 'cleansstate'])
self.sync()
self.measure_cmd_resources(['bitbake', self.build_target], 'build',
'bitbake ' + self.build_target)
class Test1P3(BuildPerfTestCase):
build_target = 'core-image-sato'
def test13(self):
"""Build core-image-sato with rm_work enabled"""
postfile = os.path.join(self.tmp_dir, 'postfile.conf')
with open(postfile, 'w') as fobj:
fobj.write('INHERIT += "rm_work"\n')
self.rm_tmp()
self.rm_sstate()
self.rm_cache()
self.sync()
cmd = ['bitbake', '-R', postfile, self.build_target]
self.measure_cmd_resources(cmd, 'build',
'bitbake' + self.build_target,
save_bs=True)
self.measure_disk_usage(self.bb_vars['TMPDIR'], 'tmpdir', 'tmpdir')
class Test2(BuildPerfTestCase):
build_target = 'core-image-sato'
def test2(self):
"""Run core-image-sato do_rootfs with sstate"""
# Build once in order to populate sstate cache
self.run_cmd(['bitbake', self.build_target])
self.rm_tmp()
self.rm_cache()
self.sync()
cmd = ['bitbake', self.build_target, '-c', 'rootfs']
self.measure_cmd_resources(cmd, 'do_rootfs', 'bitbake do_rootfs')
class Test3(BuildPerfTestCase):
def test3(self):
"""Bitbake parsing (bitbake -p)"""
# Drop all caches and parse
self.rm_cache()
oe.path.remove(os.path.join(self.bb_vars['TMPDIR'], 'cache'), True)
self.measure_cmd_resources(['bitbake', '-p'], 'parse_1',
'bitbake -p (no caches)')
# Drop tmp/cache
oe.path.remove(os.path.join(self.bb_vars['TMPDIR'], 'cache'), True)
self.measure_cmd_resources(['bitbake', '-p'], 'parse_2',
'bitbake -p (no tmp/cache)')
# Parse with fully cached data
self.measure_cmd_resources(['bitbake', '-p'], 'parse_3',
'bitbake -p (cached)')
class Test4(BuildPerfTestCase):
build_target = 'core-image-sato'
def test4(self):
"""eSDK metrics"""
self.run_cmd(['bitbake', '-c', 'do_populate_sdk_ext',
self.build_target])
self.bb_vars = get_bb_vars(None, self.build_target)
tmp_dir = self.bb_vars['TMPDIR']
installer = os.path.join(
self.bb_vars['SDK_DEPLOY'],
self.bb_vars['TOOLCHAINEXT_OUTPUTNAME'] + '.sh')
# Measure installer size
self.measure_disk_usage(installer, 'installer_bin', 'eSDK installer',
apparent_size=True)
# Measure deployment time and deployed size
deploy_dir = os.path.join(tmp_dir, 'esdk-deploy')
if os.path.exists(deploy_dir):
shutil.rmtree(deploy_dir)
self.sync()
self.measure_cmd_resources([installer, '-y', '-d', deploy_dir],
'deploy', 'eSDK deploy')
#make sure bitbake is unloaded
self.sync()
self.measure_disk_usage(deploy_dir, 'deploy_dir', 'deploy dir',
apparent_size=True)

View File

@@ -0,0 +1,8 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
# Enable other layers to have modules in the same named directory
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)

View File

@@ -0,0 +1,199 @@
# Copyright (C) 2014 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
# This module adds support to testimage.bbclass to deploy images and run
# tests using a "controller image" - this is a "known good" image that is
# installed onto the device as part of initial setup and will be booted into
# with no interaction; we can then use it to deploy the image to be tested
# to a second partition before running the tests.
#
# For an example controller image, see core-image-testcontroller
# (meta/recipes-extended/images/core-image-testcontroller.bb)
import os
import bb
import traceback
import time
import subprocess
import oeqa.targetcontrol
import oeqa.utils.sshcontrol as sshcontrol
import oeqa.utils.commands as commands
from oeqa.utils import CommandError
from abc import ABCMeta, abstractmethod
class ControllerImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta):
supported_image_fstypes = ['tar.gz', 'tar.bz2']
def __init__(self, d):
super(ControllerImageHardwareTarget, self).__init__(d)
# target ip
addr = d.getVar("TEST_TARGET_IP") or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
self.ip = addr.split(":")[0]
try:
self.port = addr.split(":")[1]
except IndexError:
self.port = None
bb.note("Target IP: %s" % self.ip)
self.server_ip = d.getVar("TEST_SERVER_IP")
if not self.server_ip:
try:
self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1]
except Exception as e:
bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
bb.note("Server IP: %s" % self.server_ip)
# test rootfs + kernel
self.image_fstype = self.get_image_fstype(d)
self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("IMAGE_LINK_NAME") + '.' + self.image_fstype)
self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("KERNEL_IMAGETYPE", False) + '-' + d.getVar('MACHINE', False) + '.bin')
if not os.path.isfile(self.rootfs):
# we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be
# the same as the config with which the image was build, ie
# you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz"
# and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage
bb.fatal("No rootfs found. Did you build the image ?\nIf yes, did you build it with IMAGE_FSTYPES += \"tar.gz\" ? \
\nExpected path: %s" % self.rootfs)
if not os.path.isfile(self.kernel):
bb.fatal("No kernel found. Expected path: %s" % self.kernel)
# controller ssh connection
self.controller = None
# if the user knows what they are doing, then by all means...
self.user_cmds = d.getVar("TEST_DEPLOY_CMDS")
self.deploy_cmds = None
# this is the name of the command that controls the power for a board
# e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants"
# the command should take as the last argument "off" and "on" and "cycle" (off, on)
self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD") or None
self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS", False) or ""
self.serialcontrol_cmd = d.getVar("TEST_SERIALCONTROL_CMD") or None
self.serialcontrol_args = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS", False) or ""
self.origenv = os.environ
if self.powercontrol_cmd or self.serialcontrol_cmd:
# the external script for controlling power might use ssh
# ssh + keys means we need the original user env
bborigenv = d.getVar("BB_ORIGENV", False) or {}
for key in bborigenv:
val = bborigenv.getVar(key)
if val is not None:
self.origenv[key] = str(val)
if self.powercontrol_cmd:
if self.powercontrol_args:
self.powercontrol_cmd = "%s %s" % (self.powercontrol_cmd, self.powercontrol_args)
if self.serialcontrol_cmd:
if self.serialcontrol_args:
self.serialcontrol_cmd = "%s %s" % (self.serialcontrol_cmd, self.serialcontrol_args)
def power_ctl(self, msg):
if self.powercontrol_cmd:
cmd = "%s %s" % (self.powercontrol_cmd, msg)
try:
commands.runCmd(cmd, assert_error=False, start_new_session=True, env=self.origenv)
except CommandError as e:
bb.fatal(str(e))
def power_cycle(self, conn):
if self.powercontrol_cmd:
# be nice, don't just cut power
conn.run("shutdown -h now")
time.sleep(10)
self.power_ctl("cycle")
else:
status, output = conn.run("sync; { sleep 1; reboot; } > /dev/null &")
if status != 0:
bb.error("Failed rebooting target and no power control command defined. You need to manually reset the device.\n%s" % output)
def _wait_until_booted(self):
''' Waits until the target device has booted (if we have just power cycled it) '''
# Subclasses with better methods of determining boot can override this
time.sleep(120)
def deploy(self):
# base class just sets the ssh log file for us
super(ControllerImageHardwareTarget, self).deploy()
self.controller = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port)
status, output = self.controller.run("cat /etc/controllerimage")
if status != 0:
# We're not booted into the controller image, so try rebooting
bb.plain("%s - booting into the controller image" % self.pn)
self.power_ctl("cycle")
self._wait_until_booted()
bb.plain("%s - deploying image on target" % self.pn)
status, output = self.controller.run("cat /etc/controllerimage")
if status != 0:
bb.fatal("No ssh connectivity or target isn't running a controller image.\n%s" % output)
if self.user_cmds:
self.deploy_cmds = self.user_cmds.split("\n")
try:
self._deploy()
except Exception as e:
bb.fatal("Failed deploying test image: %s" % e)
@abstractmethod
def _deploy(self):
pass
def start(self, extra_bootparams=None):
bb.plain("%s - boot test image on target" % self.pn)
self._start()
# set the ssh object for the target/test image
self.connection = sshcontrol.SSHControl(self.ip, logfile=self.sshlog, port=self.port)
bb.plain("%s - start running tests" % self.pn)
@abstractmethod
def _start(self):
pass
def stop(self):
bb.plain("%s - reboot/powercycle target" % self.pn)
self.power_cycle(self.controller)
class SystemdbootTarget(ControllerImageHardwareTarget):
def __init__(self, d):
super(SystemdbootTarget, self).__init__(d)
# this the value we need to set in the LoaderEntryOneShot EFI variable
# so the system boots the 'test' bootloader label and not the default
# The first four bytes are EFI bits, and the rest is an utf-16le string
# (EFI vars values need to be utf-16)
# $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C
# 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...|
self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00'
self.deploy_cmds = [
'mount -L boot /boot',
'mkdir -p /mnt/testrootfs',
'mount -L testrootfs /mnt/testrootfs',
'modprobe efivarfs',
'mount -t efivarfs efivarfs /sys/firmware/efi/efivars',
'cp ~/test-kernel /boot',
'rm -rf /mnt/testrootfs/*',
'tar xvf ~/test-rootfs.%s -C /mnt/testrootfs' % self.image_fstype,
'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue
]
def _deploy(self):
# make sure these aren't mounted
self.controller.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
# from now on, every deploy cmd should return 0
# else an exception will be thrown by sshcontrol
self.controller.ignore_status = False
self.controller.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype)
self.controller.copy_to(self.kernel, "~/test-kernel")
for cmd in self.deploy_cmds:
self.controller.run(cmd)
def _start(self, params=None):
self.power_cycle(self.controller)
# there are better ways than a timeout but this should work for now
time.sleep(120)

View File

@@ -0,0 +1,74 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import types
import bb
import os
# This class is responsible for loading a test target controller
class TestTargetLoader:
# Search oeqa.controllers module directory for and return a controller
# corresponding to the given target name.
# AttributeError raised if not found.
# ImportError raised if a provided module can not be imported.
def get_controller_module(self, target, bbpath):
controllerslist = self.get_controller_modulenames(bbpath)
bb.note("Available controller modules: %s" % str(controllerslist))
controller = self.load_controller_from_name(target, controllerslist)
return controller
# Return a list of all python modules in lib/oeqa/controllers for each
# layer in bbpath
def get_controller_modulenames(self, bbpath):
controllerslist = []
def add_controller_list(path):
if not os.path.exists(os.path.join(path, '__init__.py')):
bb.fatal('Controllers directory %s exists but is missing __init__.py' % path)
files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
for f in files:
module = 'oeqa.controllers.' + f[:-3]
if module not in controllerslist:
controllerslist.append(module)
else:
bb.warn("Duplicate controller module found for %s, only one added. Layers should create unique controller module names" % module)
for p in bbpath:
controllerpath = os.path.join(p, 'lib', 'oeqa', 'controllers')
bb.debug(2, 'Searching for target controllers in %s' % controllerpath)
if os.path.exists(controllerpath):
add_controller_list(controllerpath)
return controllerslist
# Search for and return a controller from given target name and
# set of module names.
# Raise AttributeError if not found.
# Raise ImportError if a provided module can not be imported
def load_controller_from_name(self, target, modulenames):
for name in modulenames:
obj = self.load_controller_from_module(target, name)
if obj:
return obj
raise AttributeError("Unable to load {0} from available modules: {1}".format(target, str(modulenames)))
# Search for and return a controller or None from given module name
def load_controller_from_module(self, target, modulename):
obj = None
# import module, allowing it to raise import exception
module = __import__(modulename, globals(), locals(), [target])
# look for target class in the module, catching any exceptions as it
# is valid that a module may not have the target class.
try:
obj = getattr(module, target)
if obj:
from oeqa.targetcontrol import BaseTarget
if( not issubclass(obj, BaseTarget)):
bb.warn("Target {0} found, but subclass is not BaseTarget".format(target))
except:
obj = None
return obj

View File

@@ -0,0 +1,76 @@
= OEQA (v2) Framework =
== Introduction ==
This is version 2 of the OEQA framework. Base clases are located in the
'oeqa/core' directory and subsequent components must extend from these.
The main design consideration was to implement the needed functionality on
top of the Python unittest framework. To achieve this goal, the following
modules are used:
* oeqa/core/runner.py: Provides OETestResult and OETestRunner base
classes extending the unittest class. These classes support exporting
results to different formats; currently RAW and XML support exist.
* oeqa/core/loader.py: Provides OETestLoader extending the unittest class.
It also features a unified implementation of decorator support and
filtering test cases.
* oeqa/core/case.py: Provides OETestCase base class extending
unittest.TestCase and provides access to the Test data (td), Test context
and Logger functionality.
* oeqa/core/decorator: Provides OETestDecorator, a new class to implement
decorators for Test cases.
* oeqa/core/context: Provides OETestContext, a high-level API for
loadTests and runTests of certain Test component and
OETestContextExecutor a base class to enable oe-test to discover/use
the Test component.
Also, a new 'oe-test' runner is located under 'scripts', allowing scans for components
that supports OETestContextExecutor (see below).
== Terminology ==
* Test component: The area of testing in the Project, for example: runtime, SDK, eSDK, selftest.
* Test data: Data associated with the Test component. Currently we use bitbake datastore as
a Test data input.
* Test context: A context of what tests needs to be run and how to do it; this additionally
provides access to the Test data and could have custom methods and/or attrs.
== oe-test ==
The new tool, oe-test, has the ability to scan the code base for test components and provide
a unified way to run test cases. Internally it scans folders inside oeqa module in order to find
specific classes that implement a test component.
== Usage ==
Executing the example test component
$ source oe-init-build-env
$ oe-test core
Getting help
$ oe-test -h
== Creating new Test Component ==
Adding a new test component the developer needs to extend OETestContext/OETestContextExecutor
(from context.py) and OETestCase (from case.py)
== Selftesting the framework ==
Run all tests:
$ PATH=$PATH:../../ python3 -m unittest discover -s tests
Run some test:
$ cd tests/
$ ./test_data.py

View File

@@ -0,0 +1,105 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import base64
import zlib
import unittest
from oeqa.core.exception import OEQAMissingVariable
def _validate_td_vars(td, td_vars, type_msg):
if td_vars:
for v in td_vars:
if not v in td:
raise OEQAMissingVariable("Test %s need %s variable but"\
" isn't into td" % (type_msg, v))
class OETestCase(unittest.TestCase):
# TestContext and Logger instance set by OETestLoader.
tc = None
logger = None
# td has all the variables needed by the test cases
# is the same across all the test cases.
td = None
# td_vars has the variables needed by a test class
# or test case instance, if some var isn't into td a
# OEQAMissingVariable exception is raised
td_vars = None
@classmethod
def _oeSetUpClass(clss):
_validate_td_vars(clss.td, clss.td_vars, "class")
if hasattr(clss, 'setUpHooker') and callable(getattr(clss, 'setUpHooker')):
clss.setUpHooker()
clss.setUpClassMethod()
@classmethod
def _oeTearDownClass(clss):
clss.tearDownClassMethod()
def _oeSetUp(self):
try:
for d in self.decorators:
d.setUpDecorator()
except:
for d in self.decorators:
d.tearDownDecorator()
raise
self.setUpMethod()
def _oeTearDown(self):
for d in self.decorators:
d.tearDownDecorator()
self.tearDownMethod()
class OEPTestResultTestCase:
"""
Mix-in class to provide functions to make interacting with extraresults for
the purposes of storing ptestresult data.
"""
@staticmethod
def _compress_log(log):
logdata = log.encode("utf-8") if isinstance(log, str) else log
logdata = zlib.compress(logdata)
logdata = base64.b64encode(logdata).decode("utf-8")
return {"compressed" : logdata}
def ptest_rawlog(self, log):
if not hasattr(self, "extraresults"):
self.extraresults = {"ptestresult.sections" : {}}
self.extraresults["ptestresult.rawlogs"] = {"log" : self._compress_log(log)}
def ptest_section(self, section, duration = None, log = None, logfile = None, exitcode = None):
if not hasattr(self, "extraresults"):
self.extraresults = {"ptestresult.sections" : {}}
sections = self.extraresults.get("ptestresult.sections")
if section not in sections:
sections[section] = {}
if log is not None:
sections[section]["log"] = self._compress_log(log)
elif logfile is not None:
with open(logfile, "rb") as f:
sections[section]["log"] = self._compress_log(f.read())
if duration is not None:
sections[section]["duration"] = duration
if exitcode is not None:
sections[section]["exitcode"] = exitcode
def ptest_result(self, section, test, result):
if not hasattr(self, "extraresults"):
self.extraresults = {"ptestresult.sections" : {}}
sections = self.extraresults.get("ptestresult.sections")
if section not in sections:
sections[section] = {}
resultname = "ptestresult.{}.{}".format(section, test)
self.extraresults[resultname] = {"status" : result}

View File

@@ -0,0 +1 @@
{"ARCH": "x86", "IMAGE": "core-image-minimal"}

View File

@@ -0,0 +1,22 @@
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
from oeqa.core.case import OETestCase
from oeqa.core.decorator.depends import OETestDepends
class OETestExample(OETestCase):
def test_example(self):
self.logger.info('IMAGE: %s' % self.td.get('IMAGE'))
self.assertEqual('core-image-minimal', self.td.get('IMAGE'))
self.logger.info('ARCH: %s' % self.td.get('ARCH'))
self.assertEqual('x86', self.td.get('ARCH'))
class OETestExampleDepend(OETestCase):
@OETestDepends(['OETestExample.test_example'])
def test_example_depends(self):
pass
def test_example_no_depends(self):
pass

View File

@@ -0,0 +1,246 @@
## Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import os
import sys
import json
import time
import logging
import collections
import unittest
from oeqa.core.loader import OETestLoader
from oeqa.core.runner import OETestRunner
from oeqa.core.exception import OEQAMissingManifest, OEQATestNotFound
class OETestContext(object):
loaderClass = OETestLoader
runnerClass = OETestRunner
files_dir = os.path.abspath(os.path.join(os.path.dirname(
os.path.abspath(__file__)), "../files"))
def __init__(self, td=None, logger=None):
if not type(td) is dict:
raise TypeError("td isn't dictionary type")
self.td = td
self.logger = logger
self._registry = {}
self._registry['cases'] = collections.OrderedDict()
self.results = unittest.TestResult()
unittest.registerResult(self.results)
def _read_modules_from_manifest(self, manifest):
if not os.path.exists(manifest):
raise OEQAMissingManifest("Manifest does not exist on %s" % manifest)
modules = []
for line in open(manifest).readlines():
line = line.strip()
if line and not line.startswith("#"):
modules.append(line)
return modules
def skipTests(self, skips):
if not skips:
return
def skipfuncgen(skipmsg):
def func():
raise unittest.SkipTest(skipmsg)
return func
class_ids = {}
for test in self.suites:
if test.__class__ not in class_ids:
class_ids[test.__class__] = '.'.join(test.id().split('.')[:-1])
for skip in skips:
if (test.id()+'.').startswith(skip+'.'):
setattr(test, 'setUp', skipfuncgen('Skip by the command line argument "%s"' % skip))
for tclass in class_ids:
cid = class_ids[tclass]
for skip in skips:
if (cid + '.').startswith(skip + '.'):
setattr(tclass, 'setUpHooker', skipfuncgen('Skip by the command line argument "%s"' % skip))
def loadTests(self, module_paths, modules=[], tests=[],
modules_manifest="", modules_required=[], **kwargs):
if modules_manifest:
modules = self._read_modules_from_manifest(modules_manifest)
self.loader = self.loaderClass(self, module_paths, modules, tests,
modules_required, **kwargs)
self.suites = self.loader.discover()
def prepareSuite(self, suites, processes):
return suites
def runTests(self, processes=None, skips=[]):
self.runner = self.runnerClass(self, descriptions=False, verbosity=2)
# Dynamically skip those tests specified though arguments
self.skipTests(skips)
self._run_start_time = time.time()
self._run_end_time = self._run_start_time
if not processes:
self.runner.buffer = True
result = self.runner.run(self.prepareSuite(self.suites, processes))
self._run_end_time = time.time()
return result
def listTests(self, display_type):
self.runner = self.runnerClass(self, verbosity=2)
return self.runner.list_tests(self.suites, display_type)
class OETestContextExecutor(object):
_context_class = OETestContext
_script_executor = 'oe-test'
name = 'core'
help = 'core test component example'
description = 'executes core test suite example'
datetime = time.strftime("%Y%m%d%H%M%S")
default_cases = [os.path.join(os.path.abspath(os.path.dirname(__file__)),
'cases/example')]
default_test_data = os.path.join(default_cases[0], 'data.json')
default_tests = None
default_json_result_dir = None
def register_commands(self, logger, subparsers):
self.parser = subparsers.add_parser(self.name, help=self.help,
description=self.description, group='components')
self.default_output_log = '%s-results-%s.log' % (self.name, self.datetime)
self.parser.add_argument('--output-log', action='store',
default=self.default_output_log,
help="results output log, default: %s" % self.default_output_log)
self.parser.add_argument('--json-result-dir', action='store',
default=self.default_json_result_dir,
help="json result output dir, default: %s" % self.default_json_result_dir)
group = self.parser.add_mutually_exclusive_group()
group.add_argument('--run-tests', action='store', nargs='+',
default=self.default_tests,
help="tests to run in <module>[.<class>[.<name>]]")
group.add_argument('--list-tests', action='store',
choices=('module', 'class', 'name'),
help="lists available tests")
if self.default_test_data:
self.parser.add_argument('--test-data-file', action='store',
default=self.default_test_data,
help="data file to load, default: %s" % self.default_test_data)
else:
self.parser.add_argument('--test-data-file', action='store',
help="data file to load")
if self.default_cases:
self.parser.add_argument('CASES_PATHS', action='store',
default=self.default_cases, nargs='*',
help="paths to directories with test cases, default: %s"\
% self.default_cases)
else:
self.parser.add_argument('CASES_PATHS', action='store',
nargs='+', help="paths to directories with test cases")
self.parser.set_defaults(func=self.run)
def _setup_logger(self, logger, args):
formatter = logging.Formatter('%(asctime)s - ' + self.name + \
' - %(levelname)s - %(message)s')
sh = logger.handlers[0]
sh.setFormatter(formatter)
fh = logging.FileHandler(args.output_log)
fh.setFormatter(formatter)
logger.addHandler(fh)
if getattr(args, 'verbose', False):
logger.setLevel('DEBUG')
return logger
def _process_args(self, logger, args):
self.tc_kwargs = {}
self.tc_kwargs['init'] = {}
self.tc_kwargs['load'] = {}
self.tc_kwargs['list'] = {}
self.tc_kwargs['run'] = {}
self.tc_kwargs['init']['logger'] = self._setup_logger(logger, args)
if args.test_data_file:
self.tc_kwargs['init']['td'] = json.load(
open(args.test_data_file, "r"))
else:
self.tc_kwargs['init']['td'] = {}
if args.run_tests:
self.tc_kwargs['load']['modules'] = args.run_tests
self.tc_kwargs['load']['modules_required'] = args.run_tests
else:
self.tc_kwargs['load']['modules'] = []
self.tc_kwargs['run']['skips'] = []
self.module_paths = args.CASES_PATHS
def _get_json_result_dir(self, args):
return args.json_result_dir
def _get_configuration(self):
td = self.tc_kwargs['init']['td']
configuration = {'TEST_TYPE': self.name,
'MACHINE': td.get("MACHINE"),
'DISTRO': td.get("DISTRO"),
'IMAGE_BASENAME': td.get("IMAGE_BASENAME"),
'DATETIME': td.get("DATETIME")}
return configuration
def _get_result_id(self, configuration):
return '%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'],
configuration['MACHINE'], self.datetime)
def _pre_run(self):
pass
def run(self, logger, args):
self._process_args(logger, args)
self.tc = self._context_class(**self.tc_kwargs['init'])
try:
self.tc.loadTests(self.module_paths, **self.tc_kwargs['load'])
except OEQATestNotFound as ex:
logger.error(ex)
sys.exit(1)
if args.list_tests:
rc = self.tc.listTests(args.list_tests, **self.tc_kwargs['list'])
else:
self._pre_run()
rc = self.tc.runTests(**self.tc_kwargs['run'])
json_result_dir = self._get_json_result_dir(args)
if json_result_dir:
configuration = self._get_configuration()
rc.logDetails(json_result_dir,
configuration,
self._get_result_id(configuration))
else:
rc.logDetails()
rc.logSummary(self.name)
output_link = os.path.join(os.path.dirname(args.output_log),
"%s-results.log" % self.name)
if os.path.exists(output_link):
os.remove(output_link)
os.symlink(args.output_log, output_link)
return rc
_executor_class = OETestContextExecutor

View File

@@ -0,0 +1,74 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
from functools import wraps
from abc import ABCMeta
decoratorClasses = set()
def registerDecorator(cls):
decoratorClasses.add(cls)
return cls
class OETestDecorator(object, metaclass=ABCMeta):
case = None # Reference of OETestCase decorated
attrs = None # Attributes to be loaded by decorator implementation
def __init__(self, *args, **kwargs):
if not self.attrs:
return
for idx, attr in enumerate(self.attrs):
if attr in kwargs:
value = kwargs[attr]
else:
value = args[idx]
setattr(self, attr, value)
def __call__(self, func):
@wraps(func)
def wrapped_f(*args, **kwargs):
self.attrs = self.attrs # XXX: Enables OETestLoader discover
return func(*args, **kwargs)
return wrapped_f
# OETestLoader call it when is loading test cases.
# XXX: Most methods would change the registry for later
# processing; be aware that filtrate method needs to
# run later than bind, so there could be data (in the
# registry) of a cases that were filtered.
def bind(self, registry, case):
self.case = case
self.logger = case.tc.logger
self.case.decorators.append(self)
# OETestRunner call this method when tries to run
# the test case.
def setUpDecorator(self):
pass
# OETestRunner call it after a test method has been
# called even if the method raised an exception.
def tearDownDecorator(self):
pass
class OETestDiscover(OETestDecorator):
# OETestLoader call it after discover test cases
# needs to return the cases to be run.
@staticmethod
def discover(registry):
return registry['cases']
def OETestTag(*tags):
def decorator(item):
if hasattr(item, "__oeqa_testtags"):
# do not append, create a new list (to handle classes with inheritance)
item.__oeqa_testtags = list(item.__oeqa_testtags) + list(tags)
else:
item.__oeqa_testtags = tags
return item
return decorator

View File

@@ -0,0 +1,230 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
from oeqa.core.exception import OEQAMissingVariable
from . import OETestDecorator, registerDecorator
def has_feature(td, feature):
"""
Checks for feature in DISTRO_FEATURES or IMAGE_FEATURES.
"""
if (feature in td.get('DISTRO_FEATURES', '').split() or
feature in td.get('IMAGE_FEATURES', '').split()):
return True
return False
def has_machine(td, machine):
"""
Checks for MACHINE.
"""
if (machine == td.get('MACHINE', '')):
return True
return False
@registerDecorator
class skipIfDataVar(OETestDecorator):
"""
Skip test based on value of a data store's variable.
It will get the info of var from the data store and will
check it against value; if are equal it will skip the test
with msg as the reason.
"""
attrs = ('var', 'value', 'msg')
def setUpDecorator(self):
msg = ('Checking if %r value is %r to skip test' %
(self.var, self.value))
self.logger.debug(msg)
if self.case.td.get(self.var) == self.value:
self.case.skipTest(self.msg)
@registerDecorator
class skipIfNotDataVar(OETestDecorator):
"""
Skip test based on value of a data store's variable.
It will get the info of var from the data store and will
check it against value; if are not equal it will skip the
test with msg as the reason.
"""
attrs = ('var', 'value', 'msg')
def setUpDecorator(self):
msg = ('Checking if %r value is not %r to skip test' %
(self.var, self.value))
self.logger.debug(msg)
if not self.case.td.get(self.var) == self.value:
self.case.skipTest(self.msg)
@registerDecorator
class skipIfInDataVar(OETestDecorator):
"""
Skip test if value is in data store's variable.
"""
attrs = ('var', 'value', 'msg')
def setUpDecorator(self):
msg = ('Checking if %r value contains %r to skip '
'the test' % (self.var, self.value))
self.logger.debug(msg)
if self.value in (self.case.td.get(self.var)):
self.case.skipTest(self.msg)
@registerDecorator
class skipIfNotInDataVar(OETestDecorator):
"""
Skip test if value is not in data store's variable.
"""
attrs = ('var', 'value', 'msg')
def setUpDecorator(self):
msg = ('Checking if %r value contains %r to run '
'the test' % (self.var, self.value))
self.logger.debug(msg)
if not self.value in (self.case.td.get(self.var) or ""):
self.case.skipTest(self.msg)
@registerDecorator
class OETestDataDepends(OETestDecorator):
attrs = ('td_depends',)
def setUpDecorator(self):
for v in self.td_depends:
try:
value = self.case.td[v]
except KeyError:
raise OEQAMissingVariable("Test case need %s variable but"\
" isn't into td" % v)
@registerDecorator
class skipIfNotFeature(OETestDecorator):
"""
Skip test based on DISTRO_FEATURES.
value must be in distro features or it will skip the test
with msg as the reason.
"""
attrs = ('value', 'msg')
def setUpDecorator(self):
msg = ('Checking if %s is in DISTRO_FEATURES '
'or IMAGE_FEATURES' % (self.value))
self.logger.debug(msg)
if not has_feature(self.case.td, self.value):
self.case.skipTest(self.msg)
@registerDecorator
class skipIfFeature(OETestDecorator):
"""
Skip test based on DISTRO_FEATURES.
value must not be in distro features or it will skip the test
with msg as the reason.
"""
attrs = ('value', 'msg')
def setUpDecorator(self):
msg = ('Checking if %s is not in DISTRO_FEATURES '
'or IMAGE_FEATURES' % (self.value))
self.logger.debug(msg)
if has_feature(self.case.td, self.value):
self.case.skipTest(self.msg)
@registerDecorator
class skipIfNotMachine(OETestDecorator):
"""
Skip test based on MACHINE.
value must be match MACHINE or it will skip the test
with msg as the reason.
"""
attrs = ('value', 'msg')
def setUpDecorator(self):
msg = ('Checking if %s is not this MACHINE' % self.value)
self.logger.debug(msg)
if not has_machine(self.case.td, self.value):
self.case.skipTest(self.msg)
@registerDecorator
class skipIfMachine(OETestDecorator):
"""
Skip test based on Machine.
value must not be this machine or it will skip the test
with msg as the reason.
"""
attrs = ('value', 'msg')
def setUpDecorator(self):
msg = ('Checking if %s is this MACHINE' % self.value)
self.logger.debug(msg)
if has_machine(self.case.td, self.value):
self.case.skipTest(self.msg)
@registerDecorator
class skipIfNotQemu(OETestDecorator):
"""
Skip test if MACHINE is not qemu*
"""
def setUpDecorator(self):
self.logger.debug("Checking if not qemu MACHINE")
if not self.case.td.get('MACHINE', '').startswith('qemu'):
self.case.skipTest('Test only runs on qemu machines')
@registerDecorator
class skipIfNotQemuUsermode(OETestDecorator):
"""
Skip test if MACHINE_FEATURES does not contain qemu-usermode
"""
def setUpDecorator(self):
self.logger.debug("Checking if MACHINE_FEATURES does not contain qemu-usermode")
if 'qemu-usermode' not in self.case.td.get('MACHINE_FEATURES', '').split():
self.case.skipTest('Test requires qemu-usermode in MACHINE_FEATURES')
@registerDecorator
class skipIfQemu(OETestDecorator):
"""
Skip test if MACHINE is qemu*
"""
def setUpDecorator(self):
self.logger.debug("Checking if qemu MACHINE")
if self.case.td.get('MACHINE', '').startswith('qemu'):
self.case.skipTest('Test only runs on real hardware')
@registerDecorator
class skipIfArch(OETestDecorator):
"""
Skip test if HOST_ARCH is present in the tuple specified.
"""
attrs = ('archs',)
def setUpDecorator(self):
arch = self.case.td['HOST_ARCH']
if arch in self.archs:
self.case.skipTest('Test skipped on %s' % arch)
@registerDecorator
class skipIfNotArch(OETestDecorator):
"""
Skip test if HOST_ARCH is not present in the tuple specified.
"""
attrs = ('archs',)
def setUpDecorator(self):
arch = self.case.td['HOST_ARCH']
if arch not in self.archs:
self.case.skipTest('Test skipped on %s' % arch)

View File

@@ -0,0 +1,98 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
from unittest import SkipTest
from oeqa.core.exception import OEQADependency
from . import OETestDiscover, registerDecorator
def _add_depends(registry, case, depends):
module_name = case.__module__
class_name = case.__class__.__name__
case_id = case.id()
for depend in depends:
dparts = depend.split('.')
if len(dparts) == 1:
depend_id = ".".join((module_name, class_name, dparts[0]))
elif len(dparts) == 2:
depend_id = ".".join((module_name, dparts[0], dparts[1]))
else:
depend_id = depend
if not case_id in registry:
registry[case_id] = []
if not depend_id in registry[case_id]:
registry[case_id].append(depend_id)
def _validate_test_case_depends(cases, depends):
for case in depends:
if not case in cases:
continue
for dep in depends[case]:
if not dep in cases:
raise OEQADependency("TestCase %s depends on %s and isn't available"\
", cases available %s." % (case, dep, str(cases.keys())))
def _order_test_case_by_depends(cases, depends):
def _dep_resolve(graph, node, resolved, seen):
seen.append(node)
for edge in graph[node]:
if edge not in resolved:
if edge in seen:
raise OEQADependency("Test cases %s and %s have a circular" \
" dependency." % (node, edge))
_dep_resolve(graph, edge, resolved, seen)
resolved.append(node)
dep_graph = {}
dep_graph['__root__'] = cases.keys()
for case in cases:
if case in depends:
dep_graph[case] = depends[case]
else:
dep_graph[case] = []
cases_ordered = []
_dep_resolve(dep_graph, '__root__', cases_ordered, [])
cases_ordered.remove('__root__')
return [cases[case_id] for case_id in cases_ordered]
def _skipTestDependency(case, depends):
for dep in depends:
found = False
for test, _ in case.tc.results.successes:
if test.id() == dep:
found = True
break
if not found:
raise SkipTest("Test case %s depends on %s but it didn't pass/run." \
% (case.id(), dep))
@registerDecorator
class OETestDepends(OETestDiscover):
attrs = ('depends',)
def bind(self, registry, case):
super(OETestDepends, self).bind(registry, case)
if not registry.get('depends'):
registry['depends'] = {}
_add_depends(registry['depends'], case, self.depends)
@staticmethod
def discover(registry):
if registry.get('depends'):
_validate_test_case_depends(registry['cases'], registry['depends'])
return _order_test_case_by_depends(registry['cases'], registry['depends'])
else:
return [registry['cases'][case_id] for case_id in registry['cases']]
def setUpDecorator(self):
_skipTestDependency(self.case, self.depends)

View File

@@ -0,0 +1,29 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import signal
from . import OETestDecorator, registerDecorator
from oeqa.core.exception import OEQATimeoutError
@registerDecorator
class OETimeout(OETestDecorator):
attrs = ('oetimeout',)
def setUpDecorator(self):
timeout = self.oetimeout
def _timeoutHandler(signum, frame):
raise OEQATimeoutError("Timed out after %s "
"seconds of execution" % timeout)
self.logger.debug("Setting up a %d second(s) timeout" % self.oetimeout)
self.alarmSignal = signal.signal(signal.SIGALRM, _timeoutHandler)
signal.alarm(self.oetimeout)
def tearDownDecorator(self):
signal.alarm(0)
if hasattr(self, 'alarmSignal'):
signal.signal(signal.SIGALRM, self.alarmSignal)
self.logger.debug("Removed SIGALRM handler")

View File

@@ -0,0 +1,26 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
class OEQAException(Exception):
pass
class OEQATimeoutError(OEQAException):
pass
class OEQAMissingVariable(OEQAException):
pass
class OEQADependency(OEQAException):
pass
class OEQAMissingManifest(OEQAException):
pass
class OEQAPreRun(OEQAException):
pass
class OEQATestNotFound(OEQAException):
pass

View File

@@ -0,0 +1,350 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import os
import re
import sys
import unittest
import inspect
from oeqa.core.utils.path import findFile
from oeqa.core.utils.test import getSuiteModules, getCaseID
from oeqa.core.exception import OEQATestNotFound
from oeqa.core.case import OETestCase
from oeqa.core.decorator import decoratorClasses, OETestDecorator, \
OETestDiscover
# When loading tests, the unittest framework stores any exceptions and
# displays them only when the run method is called.
#
# For our purposes, it is better to raise the exceptions in the loading
# step rather than waiting to run the test suite.
#
# Generate the function definition because this differ across python versions
# Python >= 3.4.4 uses tree parameters instead four but for example Python 3.5.3
# ueses four parameters so isn't incremental.
_failed_test_args = inspect.getfullargspec(unittest.loader._make_failed_test).args
exec("""def _make_failed_test(%s): raise exception""" % ', '.join(_failed_test_args))
unittest.loader._make_failed_test = _make_failed_test
def _find_duplicated_modules(suite, directory):
for module in getSuiteModules(suite):
path = findFile('%s.py' % module, directory)
if path:
raise ImportError("Duplicated %s module found in %s" % (module, path))
def _built_modules_dict(modules, logger):
modules_dict = {}
if modules == None:
return modules_dict
for module in modules:
# Assumption: package and module names do not contain upper case
# characters, whereas class names do
m = re.match(r'^([0-9a-z_.]+)(?:\.(\w[^.]*)(?:\.([^.]+))?)?$', module, flags=re.ASCII)
if not m:
logger.warn("module '%s' was skipped from selected modules, "\
"because it doesn't match with module name assumptions: "\
"package and module names do not contain upper case characters, whereas class names do" % module)
continue
module_name, class_name, test_name = m.groups()
if module_name and module_name not in modules_dict:
modules_dict[module_name] = {}
if class_name and class_name not in modules_dict[module_name]:
modules_dict[module_name][class_name] = []
if test_name and test_name not in modules_dict[module_name][class_name]:
modules_dict[module_name][class_name].append(test_name)
if modules and not modules_dict:
raise OEQATestNotFound("All selected modules were skipped, this would trigger selftest with all tests and -r ignored.")
return modules_dict
class OETestLoader(unittest.TestLoader):
caseClass = OETestCase
kwargs_names = ['testMethodPrefix', 'sortTestMethodUsing', 'suiteClass',
'_top_level_dir']
def __init__(self, tc, module_paths, modules, tests, modules_required,
*args, **kwargs):
self.tc = tc
self.modules = _built_modules_dict(modules, tc.logger)
self.tests = tests
self.modules_required = modules_required
self.tags_filter = kwargs.get("tags_filter", None)
if isinstance(module_paths, str):
module_paths = [module_paths]
elif not isinstance(module_paths, list):
raise TypeError('module_paths must be a str or a list of str')
self.module_paths = module_paths
for kwname in self.kwargs_names:
if kwname in kwargs:
setattr(self, kwname, kwargs[kwname])
self._patchCaseClass(self.caseClass)
super(OETestLoader, self).__init__()
def _patchCaseClass(self, testCaseClass):
# Adds custom attributes to the OETestCase class
setattr(testCaseClass, 'tc', self.tc)
setattr(testCaseClass, 'td', self.tc.td)
setattr(testCaseClass, 'logger', self.tc.logger)
def _registerTestCase(self, case):
case_id = case.id()
self.tc._registry['cases'][case_id] = case
def _handleTestCaseDecorators(self, case):
def _handle(obj):
if isinstance(obj, OETestDecorator):
if not obj.__class__ in decoratorClasses:
raise Exception("Decorator %s isn't registered" \
" in decoratorClasses." % obj.__name__)
obj.bind(self.tc._registry, case)
def _walk_closure(obj):
if hasattr(obj, '__closure__') and obj.__closure__:
for f in obj.__closure__:
obj = f.cell_contents
_handle(obj)
_walk_closure(obj)
method = getattr(case, case._testMethodName, None)
_walk_closure(method)
def _filterTest(self, case):
"""
Returns True if test case must be filtered, False otherwise.
"""
# XXX; If the module has more than one namespace only use
# the first to support run the whole module specifying the
# <module_name>.[test_class].[test_name]
module_name_small = case.__module__.split('.')[0]
module_name = case.__module__
class_name = case.__class__.__name__
test_name = case._testMethodName
# 'auto' is a reserved key word to run test cases automatically
# warn users if their test case belong to a module named 'auto'
if module_name_small == "auto":
bb.warn("'auto' is a reserved key word for TEST_SUITES. "
"But test case '%s' is detected to belong to auto module. "
"Please condier using a new name for your module." % str(case))
# check if case belongs to any specified module
# if 'auto' is specified, such check is skipped
if self.modules and not 'auto' in self.modules:
module = None
try:
module = self.modules[module_name_small]
except KeyError:
try:
module = self.modules[module_name]
except KeyError:
return True
if module:
if not class_name in module:
return True
if module[class_name]:
if test_name not in module[class_name]:
return True
# Decorator filters
if self.tags_filter is not None and callable(self.tags_filter):
alltags = set()
# pull tags from the case class
if hasattr(case, "__oeqa_testtags"):
for t in getattr(case, "__oeqa_testtags"):
alltags.add(t)
# pull tags from the method itself
if hasattr(case, test_name):
method = getattr(case, test_name)
if hasattr(method, "__oeqa_testtags"):
for t in getattr(method, "__oeqa_testtags"):
alltags.add(t)
if self.tags_filter(alltags):
return True
return False
def _getTestCase(self, testCaseClass, tcName):
if not hasattr(testCaseClass, '__oeqa_loader') and \
issubclass(testCaseClass, OETestCase):
# In order to support data_vars validation
# monkey patch the default setUp/tearDown{Class} to use
# the ones provided by OETestCase
setattr(testCaseClass, 'setUpClassMethod',
getattr(testCaseClass, 'setUpClass'))
setattr(testCaseClass, 'tearDownClassMethod',
getattr(testCaseClass, 'tearDownClass'))
setattr(testCaseClass, 'setUpClass',
testCaseClass._oeSetUpClass)
setattr(testCaseClass, 'tearDownClass',
testCaseClass._oeTearDownClass)
# In order to support decorators initialization
# monkey patch the default setUp/tearDown to use
# a setUpDecorators/tearDownDecorators that methods
# will call setUp/tearDown original methods.
setattr(testCaseClass, 'setUpMethod',
getattr(testCaseClass, 'setUp'))
setattr(testCaseClass, 'tearDownMethod',
getattr(testCaseClass, 'tearDown'))
setattr(testCaseClass, 'setUp', testCaseClass._oeSetUp)
setattr(testCaseClass, 'tearDown', testCaseClass._oeTearDown)
setattr(testCaseClass, '__oeqa_loader', True)
case = testCaseClass(tcName)
if isinstance(case, OETestCase):
setattr(case, 'decorators', [])
return case
def loadTestsFromTestCase(self, testCaseClass):
"""
Returns a suite of all tests cases contained in testCaseClass.
"""
if issubclass(testCaseClass, unittest.suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite." \
" Maybe you meant to derive %s from TestCase?" \
% testCaseClass.__name__)
if not issubclass(testCaseClass, unittest.case.TestCase):
raise TypeError("Test %s is not derived from %s" % \
(testCaseClass.__name__, unittest.case.TestCase.__name__))
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
suite = []
for tcName in testCaseNames:
case = self._getTestCase(testCaseClass, tcName)
# Filer by case id
if not (self.tests and not 'auto' in self.tests
and not getCaseID(case) in self.tests):
self._handleTestCaseDecorators(case)
# Filter by decorators
if not self._filterTest(case):
self._registerTestCase(case)
suite.append(case)
return self.suiteClass(suite)
def _required_modules_validation(self):
"""
Search in Test context registry if a required
test is found, raise an exception when not found.
"""
for module in self.modules_required:
found = False
# The module name is splitted to only compare the
# first part of a test case id.
comp_len = len(module.split('.'))
for case in self.tc._registry['cases']:
case_comp = '.'.join(case.split('.')[0:comp_len])
if module == case_comp:
found = True
break
if not found:
raise OEQATestNotFound("Not found %s in loaded test cases" % \
module)
def discover(self):
big_suite = self.suiteClass()
for path in self.module_paths:
_find_duplicated_modules(big_suite, path)
suite = super(OETestLoader, self).discover(path,
pattern='*.py', top_level_dir=path)
big_suite.addTests(suite)
cases = None
discover_classes = [clss for clss in decoratorClasses
if issubclass(clss, OETestDiscover)]
for clss in discover_classes:
cases = clss.discover(self.tc._registry)
if self.modules_required:
self._required_modules_validation()
return self.suiteClass(cases) if cases else big_suite
def _filterModule(self, module):
if module.__name__ in sys.builtin_module_names:
msg = 'Tried to import %s test module but is a built-in'
raise ImportError(msg % module.__name__)
# XXX; If the module has more than one namespace only use
# the first to support run the whole module specifying the
# <module_name>.[test_class].[test_name]
module_name_small = module.__name__.split('.')[0]
module_name = module.__name__
# Normal test modules are loaded if no modules were specified,
# if module is in the specified module list or if 'auto' is in
# module list.
# Underscore modules are loaded only if specified in module list.
load_module = True if not module_name.startswith('_') \
and (not self.modules \
or module_name in self.modules \
or module_name_small in self.modules \
or 'auto' in self.modules) \
else False
load_underscore = True if module_name.startswith('_') \
and (module_name in self.modules or \
module_name_small in self.modules) \
else False
if any(c.isupper() for c in module.__name__):
raise SystemExit("Module '%s' contains uppercase characters and this isn't supported. Please fix the module name." % module.__name__)
return (load_module, load_underscore)
# XXX After Python 3.5, remove backward compatibility hacks for
# use_load_tests deprecation via *args and **kws. See issue 16662.
if sys.version_info >= (3,5):
def loadTestsFromModule(self, module, *args, pattern=None, **kws):
"""
Returns a suite of all tests cases contained in module.
"""
load_module, load_underscore = self._filterModule(module)
if load_module or load_underscore:
return super(OETestLoader, self).loadTestsFromModule(
module, *args, pattern=pattern, **kws)
else:
return self.suiteClass()
else:
def loadTestsFromModule(self, module, use_load_tests=True):
"""
Returns a suite of all tests cases contained in module.
"""
load_module, load_underscore = self._filterModule(module)
if load_module or load_underscore:
return super(OETestLoader, self).loadTestsFromModule(
module, use_load_tests)
else:
return self.suiteClass()

View File

@@ -0,0 +1,363 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import os
import time
import unittest
import logging
import re
import json
import sys
from unittest import TextTestResult as _TestResult
from unittest import TextTestRunner as _TestRunner
class OEStreamLogger(object):
def __init__(self, logger):
self.logger = logger
self.buffer = ""
def write(self, msg):
if len(msg) > 1 and msg[0] != '\n':
if '...' in msg:
self.buffer += msg
elif self.buffer:
self.buffer += msg
self.logger.log(logging.INFO, self.buffer)
self.buffer = ""
else:
self.logger.log(logging.INFO, msg)
def flush(self):
for handler in self.logger.handlers:
handler.flush()
class OETestResult(_TestResult):
def __init__(self, tc, *args, **kwargs):
super(OETestResult, self).__init__(*args, **kwargs)
self.successes = []
self.starttime = {}
self.endtime = {}
self.progressinfo = {}
self.extraresults = {}
self.shownmsg = []
# Inject into tc so that TestDepends decorator can see results
tc.results = self
self.tc = tc
# stdout and stderr for each test case
self.logged_output = {}
def startTest(self, test):
# May have been set by concurrencytest
if test.id() not in self.starttime:
self.starttime[test.id()] = time.time()
super(OETestResult, self).startTest(test)
def stopTest(self, test):
self.endtime[test.id()] = time.time()
if self.buffer:
self.logged_output[test.id()] = (
sys.stdout.getvalue(), sys.stderr.getvalue())
super(OETestResult, self).stopTest(test)
if test.id() in self.progressinfo:
self.tc.logger.info(self.progressinfo[test.id()])
# Print the errors/failures early to aid/speed debugging, its a pain
# to wait until selftest finishes to see them.
for t in ['failures', 'errors', 'skipped', 'expectedFailures']:
for (scase, msg) in getattr(self, t):
if test.id() == scase.id():
self.tc.logger.info(str(msg))
self.shownmsg.append(test.id())
break
def logSummary(self, component, context_msg=''):
elapsed_time = self.tc._run_end_time - self.tc._run_start_time
self.tc.logger.info("SUMMARY:")
self.tc.logger.info("%s (%s) - Ran %d test%s in %.3fs" % (component,
context_msg, self.testsRun, self.testsRun != 1 and "s" or "",
elapsed_time))
if self.wasSuccessful():
msg = "%s - OK - All required tests passed" % component
else:
msg = "%s - FAIL - Required tests failed" % component
msg += " (successes=%d, skipped=%d, failures=%d, errors=%d)" % (len(self.successes), len(self.skipped), len(self.failures), len(self.errors))
self.tc.logger.info(msg)
def _getTestResultDetails(self, case):
result_types = {'failures': 'FAILED', 'errors': 'ERROR', 'skipped': 'SKIPPED',
'expectedFailures': 'EXPECTEDFAIL', 'successes': 'PASSED',
'unexpectedSuccesses' : 'PASSED'}
for rtype in result_types:
found = False
for resultclass in getattr(self, rtype):
# unexpectedSuccesses are just lists, not lists of tuples
if isinstance(resultclass, tuple):
scase, msg = resultclass
else:
scase, msg = resultclass, None
if case.id() == scase.id():
found = True
break
scase_str = str(scase.id())
# When fails at module or class level the class name is passed as string
# so figure out to see if match
m = re.search(r"^setUpModule \((?P<module_name>.*)\).*$", scase_str)
if m:
if case.__class__.__module__ == m.group('module_name'):
found = True
break
m = re.search(r"^setUpClass \((?P<class_name>.*)\).*$", scase_str)
if m:
class_name = "%s.%s" % (case.__class__.__module__,
case.__class__.__name__)
if class_name == m.group('class_name'):
found = True
break
if found:
return result_types[rtype], msg
return 'UNKNOWN', None
def extractExtraResults(self, test, details = None):
extraresults = None
if details is not None and "extraresults" in details:
extraresults = details.get("extraresults", {})
elif hasattr(test, "extraresults"):
extraresults = test.extraresults
if extraresults is not None:
for k, v in extraresults.items():
# handle updating already existing entries (e.g. ptestresults.sections)
if k in self.extraresults:
self.extraresults[k].update(v)
else:
self.extraresults[k] = v
def addError(self, test, *args, details = None):
self.extractExtraResults(test, details = details)
return super(OETestResult, self).addError(test, *args)
def addFailure(self, test, *args, details = None):
self.extractExtraResults(test, details = details)
return super(OETestResult, self).addFailure(test, *args)
def addSuccess(self, test, details = None):
#Added so we can keep track of successes too
self.successes.append((test, None))
self.extractExtraResults(test, details = details)
return super(OETestResult, self).addSuccess(test)
def addExpectedFailure(self, test, *args, details = None):
self.extractExtraResults(test, details = details)
return super(OETestResult, self).addExpectedFailure(test, *args)
def addUnexpectedSuccess(self, test, details = None):
self.extractExtraResults(test, details = details)
return super(OETestResult, self).addUnexpectedSuccess(test)
def logDetails(self, json_file_dir=None, configuration=None, result_id=None,
dump_streams=False):
result = self.extraresults
logs = {}
if hasattr(self.tc, "extraresults"):
result.update(self.tc.extraresults)
for case_name in self.tc._registry['cases']:
case = self.tc._registry['cases'][case_name]
(status, log) = self._getTestResultDetails(case)
t = ""
duration = 0
if case.id() in self.starttime and case.id() in self.endtime:
duration = self.endtime[case.id()] - self.starttime[case.id()]
t = " (" + "{0:.2f}".format(duration) + "s)"
if status not in logs:
logs[status] = []
logs[status].append("RESULTS - %s: %s%s" % (case.id(), status, t))
report = {'status': status}
if log:
report['log'] = log
# Class setup failures wouldn't enter stopTest so would never display
if case.id() not in self.shownmsg:
self.tc.logger.info("Failure (%s) for %s:\n" % (status, case.id()) + log)
if duration:
report['duration'] = duration
alltags = []
# pull tags from the case class
if hasattr(case, "__oeqa_testtags"):
alltags.extend(getattr(case, "__oeqa_testtags"))
# pull tags from the method itself
test_name = case._testMethodName
if hasattr(case, test_name):
method = getattr(case, test_name)
if hasattr(method, "__oeqa_testtags"):
alltags.extend(getattr(method, "__oeqa_testtags"))
if alltags:
report['oetags'] = alltags
if dump_streams and case.id() in self.logged_output:
(stdout, stderr) = self.logged_output[case.id()]
report['stdout'] = stdout
report['stderr'] = stderr
result[case.id()] = report
self.tc.logger.info("RESULTS:")
for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']:
if i not in logs:
continue
for l in logs[i]:
self.tc.logger.info(l)
if json_file_dir:
tresultjsonhelper = OETestResultJSONHelper()
tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result)
def wasSuccessful(self):
# Override as we unexpected successes aren't failures for us
return (len(self.failures) == len(self.errors) == 0)
def hasAnyFailingTest(self):
# Account for expected failures
return not self.wasSuccessful() or len(self.expectedFailures)
class OEListTestsResult(object):
def wasSuccessful(self):
return True
class OETestRunner(_TestRunner):
streamLoggerClass = OEStreamLogger
def __init__(self, tc, *args, **kwargs):
kwargs['stream'] = self.streamLoggerClass(tc.logger)
super(OETestRunner, self).__init__(*args, **kwargs)
self.tc = tc
self.resultclass = OETestResult
def _makeResult(self):
return self.resultclass(self.tc, self.stream, self.descriptions,
self.verbosity)
def _walk_suite(self, suite, func):
for obj in suite:
if isinstance(obj, unittest.suite.TestSuite):
if len(obj._tests):
self._walk_suite(obj, func)
elif isinstance(obj, unittest.case.TestCase):
func(self.tc.logger, obj)
self._walked_cases = self._walked_cases + 1
def _list_tests_name(self, suite):
self._walked_cases = 0
def _list_cases(logger, case):
oetags = []
if hasattr(case, '__oeqa_testtags'):
oetags = getattr(case, '__oeqa_testtags')
if oetags:
logger.info("%s (%s)" % (case.id(), ",".join(oetags)))
else:
logger.info("%s" % (case.id()))
self.tc.logger.info("Listing all available tests:")
self._walked_cases = 0
self.tc.logger.info("test (tags)")
self.tc.logger.info("-" * 80)
self._walk_suite(suite, _list_cases)
self.tc.logger.info("-" * 80)
self.tc.logger.info("Total found:\t%s" % self._walked_cases)
def _list_tests_class(self, suite):
self._walked_cases = 0
curr = {}
def _list_classes(logger, case):
if not 'module' in curr or curr['module'] != case.__module__:
curr['module'] = case.__module__
logger.info(curr['module'])
if not 'class' in curr or curr['class'] != \
case.__class__.__name__:
curr['class'] = case.__class__.__name__
logger.info(" -- %s" % curr['class'])
logger.info(" -- -- %s" % case._testMethodName)
self.tc.logger.info("Listing all available test classes:")
self._walk_suite(suite, _list_classes)
def _list_tests_module(self, suite):
self._walked_cases = 0
listed = []
def _list_modules(logger, case):
if not case.__module__ in listed:
if case.__module__.startswith('_'):
logger.info("%s (hidden)" % case.__module__)
else:
logger.info(case.__module__)
listed.append(case.__module__)
self.tc.logger.info("Listing all available test modules:")
self._walk_suite(suite, _list_modules)
def list_tests(self, suite, display_type):
if display_type == 'name':
self._list_tests_name(suite)
elif display_type == 'class':
self._list_tests_class(suite)
elif display_type == 'module':
self._list_tests_module(suite)
return OEListTestsResult()
class OETestResultJSONHelper(object):
testresult_filename = 'testresults.json'
def _get_existing_testresults_if_available(self, write_dir):
testresults = {}
file = os.path.join(write_dir, self.testresult_filename)
if os.path.exists(file):
with open(file, "r") as f:
testresults = json.load(f)
return testresults
def _write_file(self, write_dir, file_name, file_content):
file_path = os.path.join(write_dir, file_name)
with open(file_path, 'w') as the_file:
the_file.write(file_content)
def dump_testresult_file(self, write_dir, configuration, result_id, test_result):
try:
import bb
has_bb = True
bb.utils.mkdirhier(write_dir)
lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock'))
except ImportError:
has_bb = False
os.makedirs(write_dir, exist_ok=True)
test_results = self._get_existing_testresults_if_available(write_dir)
test_results[result_id] = {'configuration': configuration, 'result': test_result}
json_testresults = json.dumps(test_results, sort_keys=True, indent=1)
self._write_file(write_dir, self.testresult_filename, json_testresults)
if has_bb:
bb.utils.unlockfile(lf)

View File

@@ -0,0 +1,36 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
from abc import abstractmethod
class OETarget(object):
def __init__(self, logger, *args, **kwargs):
self.logger = logger
@abstractmethod
def start(self):
pass
@abstractmethod
def stop(self):
pass
@abstractmethod
def run(self, cmd, timeout=None):
pass
@abstractmethod
def copyTo(self, localSrc, remoteDst):
pass
@abstractmethod
def copyFrom(self, remoteSrc, localDst):
pass
@abstractmethod
def copyDirTo(self, localSrc, remoteDst):
pass

View File

@@ -0,0 +1,94 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import os
import sys
import signal
import time
import glob
import subprocess
from collections import defaultdict
from .ssh import OESSHTarget
from oeqa.utils.qemurunner import QemuRunner
supported_fstypes = ['ext3', 'ext4', 'cpio.gz', 'wic']
class OEQemuTarget(OESSHTarget):
def __init__(self, logger, server_ip, timeout=300, user='root',
port=None, machine='', rootfs='', kernel='', kvm=False, slirp=False,
dump_dir='', display='', bootlog='',
tmpdir='', dir_image='', boottime=60, serial_ports=2,
boot_patterns = defaultdict(str), ovmf=False, tmpfsdir=None, **kwargs):
super(OEQemuTarget, self).__init__(logger, None, server_ip, timeout,
user, port)
self.server_ip = server_ip
self.server_port = 0
self.machine = machine
self.rootfs = rootfs
self.kernel = kernel
self.kvm = kvm
self.ovmf = ovmf
self.use_slirp = slirp
self.boot_patterns = boot_patterns
self.dump_dir = dump_dir
self.bootlog = bootlog
self.runner = QemuRunner(machine=machine, rootfs=rootfs, tmpdir=tmpdir,
deploy_dir_image=dir_image, display=display,
logfile=bootlog, boottime=boottime,
use_kvm=kvm, use_slirp=slirp, dump_dir=dump_dir, logger=logger,
serial_ports=serial_ports, boot_patterns = boot_patterns,
use_ovmf=ovmf, tmpfsdir=tmpfsdir)
def start(self, params=None, extra_bootparams=None, runqemuparams=''):
if self.use_slirp and not self.server_ip:
self.logger.error("Could not start qemu with slirp without server ip - provide 'TEST_SERVER_IP'")
raise RuntimeError("FAILED to start qemu - check the task log and the boot log")
if self.runner.start(params, extra_bootparams=extra_bootparams, runqemuparams=runqemuparams):
self.ip = self.runner.ip
if self.use_slirp:
target_ip_port = self.runner.ip.split(':')
if len(target_ip_port) == 2:
target_ip = target_ip_port[0]
port = target_ip_port[1]
self.ip = target_ip
self.ssh = self.ssh + ['-p', port]
self.scp = self.scp + ['-P', port]
else:
self.logger.error("Could not get host machine port to connect qemu with slirp, ssh will not be "
"able to connect to qemu with slirp")
if self.runner.server_ip:
self.server_ip = self.runner.server_ip
else:
self.stop()
# Display the first 20 lines of top and
# last 20 lines of the bootlog when the
# target is not being booted up.
topfile = glob.glob(self.dump_dir + "/*_qemu/host_*_top")
msg = "\n\n===== start: snippet =====\n\n"
for f in topfile:
msg += "file: %s\n\n" % f
with open(f) as tf:
for x in range(20):
msg += next(tf)
msg += "\n\n===== end: snippet =====\n\n"
blcmd = ["tail", "-20", self.bootlog]
msg += "===== start: snippet =====\n\n"
try:
out = subprocess.check_output(blcmd, stderr=subprocess.STDOUT, timeout=1).decode('utf-8')
msg += "file: %s\n\n" % self.bootlog
msg += out
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError) as err:
msg += "Error running command: %s\n%s\n" % (blcmd, err)
msg += "\n\n===== end: snippet =====\n"
raise RuntimeError("FAILED to start qemu - check the task log and the boot log %s" % (msg))
def stop(self):
self.runner.stop()

View File

@@ -0,0 +1,323 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import os
import time
import select
import logging
import subprocess
import codecs
from . import OETarget
class OESSHTarget(OETarget):
def __init__(self, logger, ip, server_ip, timeout=300, user='root',
port=None, server_port=0, **kwargs):
if not logger:
logger = logging.getLogger('target')
logger.setLevel(logging.INFO)
filePath = os.path.join(os.getcwd(), 'remoteTarget.log')
fileHandler = logging.FileHandler(filePath, 'w', 'utf-8')
formatter = logging.Formatter(
'%(asctime)s.%(msecs)03d %(levelname)s: %(message)s',
'%H:%M:%S')
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
super(OESSHTarget, self).__init__(logger)
self.ip = ip
self.server_ip = server_ip
self.server_port = server_port
self.timeout = timeout
self.user = user
ssh_options = [
'-o', 'ServerAliveCountMax=2',
'-o', 'ServerAliveInterval=30',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'-o', 'LogLevel=ERROR'
]
scp_options = [
'-r'
]
self.ssh = ['ssh', '-l', self.user ] + ssh_options
self.scp = ['scp'] + ssh_options + scp_options
if port:
self.ssh = self.ssh + [ '-p', port ]
self.scp = self.scp + [ '-P', port ]
def start(self, **kwargs):
pass
def stop(self, **kwargs):
pass
def _run(self, command, timeout=None, ignore_status=True, raw=False):
"""
Runs command in target using SSHProcess.
"""
self.logger.debug("[Running]$ %s" % " ".join(command))
starttime = time.time()
status, output = SSHCall(command, self.logger, timeout, raw)
self.logger.debug("[Command returned '%d' after %.2f seconds]"
"" % (status, time.time() - starttime))
if status and not ignore_status:
raise AssertionError("Command '%s' returned non-zero exit "
"status %d:\n%s" % (command, status, output))
return (status, output)
def run(self, command, timeout=None, ignore_status=True, raw=False):
"""
Runs command in target.
command: Command to run on target.
timeout: <value>: Kill command after <val> seconds.
None: Kill command default value seconds.
0: No timeout, runs until return.
"""
targetCmd = 'export PATH=/usr/sbin:/sbin:/usr/bin:/bin; %s' % command
sshCmd = self.ssh + [self.ip, targetCmd]
if timeout:
processTimeout = timeout
elif timeout==0:
processTimeout = None
else:
processTimeout = self.timeout
status, output = self._run(sshCmd, processTimeout, ignore_status, raw)
self.logger.debug('Command: %s\nStatus: %d Output: %s\n' % (command, status, output))
return (status, output)
def copyTo(self, localSrc, remoteDst):
"""
Copy file to target.
If local file is symlink, recreate symlink in target.
"""
if os.path.islink(localSrc):
link = os.readlink(localSrc)
dstDir, dstBase = os.path.split(remoteDst)
sshCmd = 'cd %s; ln -s %s %s' % (dstDir, link, dstBase)
return self.run(sshCmd)
else:
remotePath = '%s@%s:%s' % (self.user, self.ip, remoteDst)
scpCmd = self.scp + [localSrc, remotePath]
return self._run(scpCmd, ignore_status=False)
def copyFrom(self, remoteSrc, localDst, warn_on_failure=False):
"""
Copy file from target.
"""
remotePath = '%s@%s:%s' % (self.user, self.ip, remoteSrc)
scpCmd = self.scp + [remotePath, localDst]
(status, output) = self._run(scpCmd, ignore_status=warn_on_failure)
if warn_on_failure and status:
self.logger.warning("Copy returned non-zero exit status %d:\n%s" % (status, output))
return (status, output)
def copyDirTo(self, localSrc, remoteDst):
"""
Copy recursively localSrc directory to remoteDst in target.
"""
for root, dirs, files in os.walk(localSrc):
# Create directories in the target as needed
for d in dirs:
tmpDir = os.path.join(root, d).replace(localSrc, "")
newDir = os.path.join(remoteDst, tmpDir.lstrip("/"))
cmd = "mkdir -p %s" % newDir
self.run(cmd)
# Copy files into the target
for f in files:
tmpFile = os.path.join(root, f).replace(localSrc, "")
dstFile = os.path.join(remoteDst, tmpFile.lstrip("/"))
srcFile = os.path.join(root, f)
self.copyTo(srcFile, dstFile)
def deleteFiles(self, remotePath, files):
"""
Deletes files in target's remotePath.
"""
cmd = "rm"
if not isinstance(files, list):
files = [files]
for f in files:
cmd = "%s %s" % (cmd, os.path.join(remotePath, f))
self.run(cmd)
def deleteDir(self, remotePath):
"""
Deletes target's remotePath directory.
"""
cmd = "rmdir %s" % remotePath
self.run(cmd)
def deleteDirStructure(self, localPath, remotePath):
"""
Delete recursively localPath structure directory in target's remotePath.
This function is very usefult to delete a package that is installed in
the DUT and the host running the test has such package extracted in tmp
directory.
Example:
pwd: /home/user/tmp
tree: .
└── work
├── dir1
│   └── file1
└── dir2
localpath = "/home/user/tmp" and remotepath = "/home/user"
With the above variables this function will try to delete the
directory in the DUT in this order:
/home/user/work/dir1/file1
/home/user/work/dir1 (if dir is empty)
/home/user/work/dir2 (if dir is empty)
/home/user/work (if dir is empty)
"""
for root, dirs, files in os.walk(localPath, topdown=False):
# Delete files first
tmpDir = os.path.join(root).replace(localPath, "")
remoteDir = os.path.join(remotePath, tmpDir.lstrip("/"))
self.deleteFiles(remoteDir, files)
# Remove dirs if empty
for d in dirs:
tmpDir = os.path.join(root, d).replace(localPath, "")
remoteDir = os.path.join(remotePath, tmpDir.lstrip("/"))
self.deleteDir(remoteDir)
def SSHCall(command, logger, timeout=None, raw=False, **opts):
def run():
nonlocal output
nonlocal process
output_raw = b''
starttime = time.time()
process = subprocess.Popen(command, **options)
has_timeout = False
if timeout:
endtime = starttime + timeout
eof = False
os.set_blocking(process.stdout.fileno(), False)
while not has_timeout and not eof:
try:
logger.debug('Waiting for process output: time: %s, endtime: %s' % (time.time(), endtime))
if select.select([process.stdout], [], [], 5)[0] != []:
# wait a bit for more data, tries to avoid reading single characters
time.sleep(0.2)
data = process.stdout.read()
if not data:
eof = True
else:
output_raw += data
# ignore errors to capture as much as possible
logger.debug('Partial data from SSH call:\n%s' % data.decode('utf-8', errors='ignore'))
endtime = time.time() + timeout
except InterruptedError:
logger.debug('InterruptedError')
continue
except BlockingIOError:
logger.debug('BlockingIOError')
continue
if time.time() >= endtime:
logger.debug('SSHCall has timeout! Time: %s, endtime: %s' % (time.time(), endtime))
has_timeout = True
process.stdout.close()
# process hasn't returned yet
if not eof:
process.terminate()
time.sleep(5)
try:
process.kill()
except OSError:
logger.debug('OSError when killing process')
pass
endtime = time.time() - starttime
lastline = ("\nProcess killed - no output for %d seconds. Total"
" running time: %d seconds." % (timeout, endtime))
logger.debug('Received data from SSH call:\n%s ' % lastline)
output += lastline
process.wait()
else:
output_raw = process.communicate()[0]
output = output_raw if raw else output_raw.decode('utf-8', errors='ignore')
logger.debug('Data from SSH call:\n%s' % output.rstrip())
# timout or not, make sure process exits and is not hanging
if process.returncode == None:
try:
process.wait(timeout=5)
except TimeoutExpired:
try:
process.kill()
except OSError:
logger.debug('OSError')
pass
process.wait()
if has_timeout:
# Version of openssh before 8.6_p1 returns error code 0 when killed
# by a signal, when the timeout occurs we will receive a 0 error
# code because the process is been terminated and it's wrong because
# that value means success, but the process timed out.
# Afterwards, from version 8.6_p1 onwards, the returned code is 255.
# Fix this behaviour by checking the return code
if process.returncode == 0:
process.returncode = 255
options = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT if not raw else None,
"stdin": None,
"shell": False,
"bufsize": -1,
"start_new_session": True,
}
options.update(opts)
output = ''
process = None
# Unset DISPLAY which means we won't trigger SSH_ASKPASS
env = os.environ.copy()
if "DISPLAY" in env:
del env['DISPLAY']
options['env'] = env
try:
run()
except:
# Need to guard against a SystemExit or other exception ocurring
# whilst running and ensure we don't leave a process behind.
if process.poll() is None:
process.kill()
if process.returncode == None:
process.wait()
logger.debug('Something went wrong, killing SSH process')
raise
return (process.returncode, output if raw else output.rstrip())

View File

@@ -0,0 +1,23 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
from oeqa.core.case import OETestCase
from oeqa.core.decorator import OETestTag
from oeqa.core.decorator.data import OETestDataDepends
class DataTest(OETestCase):
data_vars = ['IMAGE', 'ARCH']
@OETestDataDepends(['MACHINE',])
@OETestTag('dataTestOk')
def testDataOk(self):
self.assertEqual(self.td.get('IMAGE'), 'core-image-minimal')
self.assertEqual(self.td.get('ARCH'), 'x86')
self.assertEqual(self.td.get('MACHINE'), 'qemuarm')
@OETestTag('dataTestFail')
def testDataFail(self):
pass

View File

@@ -0,0 +1,41 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
from oeqa.core.case import OETestCase
from oeqa.core.decorator.depends import OETestDepends
class DependsTest(OETestCase):
def testDependsFirst(self):
self.assertTrue(True, msg='How is this possible?')
@OETestDepends(['testDependsFirst'])
def testDependsSecond(self):
self.assertTrue(True, msg='How is this possible?')
@OETestDepends(['testDependsSecond'])
def testDependsThird(self):
self.assertTrue(True, msg='How is this possible?')
@OETestDepends(['testDependsSecond'])
def testDependsFourth(self):
self.assertTrue(True, msg='How is this possible?')
@OETestDepends(['testDependsThird', 'testDependsFourth'])
def testDependsFifth(self):
self.assertTrue(True, msg='How is this possible?')
@OETestDepends(['testDependsCircular3'])
def testDependsCircular1(self):
self.assertTrue(True, msg='How is this possible?')
@OETestDepends(['testDependsCircular1'])
def testDependsCircular2(self):
self.assertTrue(True, msg='How is this possible?')
@OETestDepends(['testDependsCircular2'])
def testDependsCircular3(self):
self.assertTrue(True, msg='How is this possible?')

View File

@@ -0,0 +1,12 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
from oeqa.core.case import OETestCase
class AnotherTest(OETestCase):
def testAnother(self):
self.assertTrue(True, msg='How is this possible?')

View File

@@ -0,0 +1,38 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
from oeqa.core.case import OETestCase
from oeqa.core.decorator import OETestTag
class TagTest(OETestCase):
@OETestTag('goodTag')
def testTagGood(self):
self.assertTrue(True, msg='How is this possible?')
@OETestTag('otherTag')
def testTagOther(self):
self.assertTrue(True, msg='How is this possible?')
@OETestTag('otherTag', 'multiTag')
def testTagOtherMulti(self):
self.assertTrue(True, msg='How is this possible?')
def testTagNone(self):
self.assertTrue(True, msg='How is this possible?')
@OETestTag('classTag')
class TagClassTest(OETestCase):
@OETestTag('otherTag')
def testTagOther(self):
self.assertTrue(True, msg='How is this possible?')
@OETestTag('otherTag', 'multiTag')
def testTagOtherMulti(self):
self.assertTrue(True, msg='How is this possible?')
def testTagNone(self):
self.assertTrue(True, msg='How is this possible?')

View File

@@ -0,0 +1,34 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
from time import sleep
from oeqa.core.case import OETestCase
from oeqa.core.decorator.oetimeout import OETimeout
from oeqa.core.decorator.depends import OETestDepends
class TimeoutTest(OETestCase):
@OETimeout(1)
def testTimeoutPass(self):
self.assertTrue(True, msg='How is this possible?')
@OETimeout(1)
def testTimeoutFail(self):
sleep(2)
self.assertTrue(True, msg='How is this possible?')
def testTimeoutSkip(self):
self.skipTest("This test needs to be skipped, so that testTimeoutDepends()'s OETestDepends kicks in")
@OETestDepends(["timeout.TimeoutTest.testTimeoutSkip"])
@OETimeout(3)
def testTimeoutDepends(self):
self.assertTrue(False, msg='How is this possible?')
def testTimeoutUnrelated(self):
sleep(6)

View File

@@ -0,0 +1,38 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import sys
import os
import unittest
import logging
import os
logger = logging.getLogger("oeqa")
logger.setLevel(logging.INFO)
consoleHandler = logging.StreamHandler()
formatter = logging.Formatter('OEQATest: %(message)s')
consoleHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
def setup_sys_path():
directory = os.path.dirname(os.path.abspath(__file__))
oeqa_lib = os.path.realpath(os.path.join(directory, '../../../'))
if not oeqa_lib in sys.path:
sys.path.insert(0, oeqa_lib)
class TestBase(unittest.TestCase):
def setUp(self):
self.logger = logger
directory = os.path.dirname(os.path.abspath(__file__))
self.cases_path = os.path.join(directory, 'cases')
def _testLoader(self, d={}, modules=[], tests=[], **kwargs):
from oeqa.core.context import OETestContext
tc = OETestContext(d, self.logger)
tc.loadTests(self.cases_path, modules=modules, tests=tests,
**kwargs)
return tc

View File

@@ -0,0 +1,55 @@
#!/usr/bin/env python3
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import unittest
import logging
import os
from common import setup_sys_path, TestBase
setup_sys_path()
from oeqa.core.exception import OEQAMissingVariable
from oeqa.core.utils.test import getCaseMethod, getSuiteCasesNames
class TestData(TestBase):
modules = ['data']
def test_data_fail_missing_variable(self):
expectedException = "oeqa.core.exception.OEQAMissingVariable"
tc = self._testLoader(modules=self.modules)
results = tc.runTests()
self.assertFalse(results.wasSuccessful())
for test, data in results.errors:
expect = False
if expectedException in data:
expect = True
self.assertTrue(expect)
def test_data_fail_wrong_variable(self):
expectedError = 'AssertionError'
d = {'IMAGE' : 'core-image-weston', 'ARCH' : 'arm'}
tc = self._testLoader(d=d, modules=self.modules)
results = tc.runTests()
self.assertFalse(results.wasSuccessful())
for test, data in results.failures:
expect = False
if expectedError in data:
expect = True
self.assertTrue(expect)
def test_data_ok(self):
d = {'IMAGE' : 'core-image-minimal', 'ARCH' : 'x86', 'MACHINE' : 'qemuarm'}
tc = self._testLoader(d=d, modules=self.modules)
self.assertEqual(True, tc.runTests().wasSuccessful())
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,143 @@
#!/usr/bin/env python3
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import signal
import unittest
from common import setup_sys_path, TestBase
setup_sys_path()
from oeqa.core.exception import OEQADependency
from oeqa.core.utils.test import getCaseMethod, getSuiteCasesNames, getSuiteCasesIDs
class TestTagDecorator(TestBase):
def _runTest(self, modules, filterfn, expect):
tc = self._testLoader(modules = modules, tags_filter = filterfn)
test_loaded = set(getSuiteCasesIDs(tc.suites))
self.assertEqual(expect, test_loaded)
def test_oetag(self):
# get all cases without any filtering
self._runTest(['oetag'], None, {
'oetag.TagTest.testTagGood',
'oetag.TagTest.testTagOther',
'oetag.TagTest.testTagOtherMulti',
'oetag.TagTest.testTagNone',
'oetag.TagClassTest.testTagOther',
'oetag.TagClassTest.testTagOtherMulti',
'oetag.TagClassTest.testTagNone',
})
# exclude any case with tags
self._runTest(['oetag'], lambda tags: tags, {
'oetag.TagTest.testTagNone',
})
# exclude any case with otherTag
self._runTest(['oetag'], lambda tags: "otherTag" in tags, {
'oetag.TagTest.testTagGood',
'oetag.TagTest.testTagNone',
'oetag.TagClassTest.testTagNone',
})
# exclude any case with classTag
self._runTest(['oetag'], lambda tags: "classTag" in tags, {
'oetag.TagTest.testTagGood',
'oetag.TagTest.testTagOther',
'oetag.TagTest.testTagOtherMulti',
'oetag.TagTest.testTagNone',
})
# include any case with classTag
self._runTest(['oetag'], lambda tags: "classTag" not in tags, {
'oetag.TagClassTest.testTagOther',
'oetag.TagClassTest.testTagOtherMulti',
'oetag.TagClassTest.testTagNone',
})
# include any case with classTag or no tags
self._runTest(['oetag'], lambda tags: tags and "classTag" not in tags, {
'oetag.TagTest.testTagNone',
'oetag.TagClassTest.testTagOther',
'oetag.TagClassTest.testTagOtherMulti',
'oetag.TagClassTest.testTagNone',
})
class TestDependsDecorator(TestBase):
modules = ['depends']
def test_depends_order(self):
tests = ['depends.DependsTest.testDependsFirst',
'depends.DependsTest.testDependsSecond',
'depends.DependsTest.testDependsThird',
'depends.DependsTest.testDependsFourth',
'depends.DependsTest.testDependsFifth']
tests2 = list(tests)
tests2[2], tests2[3] = tests[3], tests[2]
tc = self._testLoader(modules=self.modules, tests=tests)
test_loaded = getSuiteCasesIDs(tc.suites)
result = True if test_loaded == tests or test_loaded == tests2 else False
msg = 'Failed to order tests using OETestDepends decorator.\nTest order:'\
' %s.\nExpected: %s\nOr: %s' % (test_loaded, tests, tests2)
self.assertTrue(result, msg=msg)
def test_depends_fail_missing_dependency(self):
expect = "TestCase depends.DependsTest.testDependsSecond depends on "\
"depends.DependsTest.testDependsFirst and isn't available"
tests = ['depends.DependsTest.testDependsSecond']
try:
# Must throw OEQADependency because missing 'testDependsFirst'
tc = self._testLoader(modules=self.modules, tests=tests)
self.fail('Expected OEQADependency exception')
except OEQADependency as e:
result = True if expect in str(e) else False
msg = 'Expected OEQADependency exception missing testDependsFirst test'
self.assertTrue(result, msg=msg)
def test_depends_fail_circular_dependency(self):
expect = 'have a circular dependency'
tests = ['depends.DependsTest.testDependsCircular1',
'depends.DependsTest.testDependsCircular2',
'depends.DependsTest.testDependsCircular3']
try:
# Must throw OEQADependency because circular dependency
tc = self._testLoader(modules=self.modules, tests=tests)
self.fail('Expected OEQADependency exception')
except OEQADependency as e:
result = True if expect in str(e) else False
msg = 'Expected OEQADependency exception having a circular dependency'
self.assertTrue(result, msg=msg)
class TestTimeoutDecorator(TestBase):
modules = ['timeout']
def test_timeout(self):
tests = ['timeout.TimeoutTest.testTimeoutPass']
msg = 'Failed to run test using OETestTimeout'
alarm_signal = signal.getsignal(signal.SIGALRM)
tc = self._testLoader(modules=self.modules, tests=tests)
self.assertTrue(tc.runTests().wasSuccessful(), msg=msg)
msg = "OETestTimeout didn't restore SIGALRM"
self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
def test_timeout_fail(self):
tests = ['timeout.TimeoutTest.testTimeoutFail']
msg = "OETestTimeout test didn't timeout as expected"
alarm_signal = signal.getsignal(signal.SIGALRM)
tc = self._testLoader(modules=self.modules, tests=tests)
self.assertFalse(tc.runTests().wasSuccessful(), msg=msg)
msg = "OETestTimeout didn't restore SIGALRM"
self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
def test_timeout_cancel(self):
tests = ['timeout.TimeoutTest.testTimeoutSkip', 'timeout.TimeoutTest.testTimeoutDepends', 'timeout.TimeoutTest.testTimeoutUnrelated']
msg = 'Unrelated test failed to complete'
tc = self._testLoader(modules=self.modules, tests=tests)
self.assertTrue(tc.runTests().wasSuccessful(), msg=msg)
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,63 @@
#!/usr/bin/env python3
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import os
import unittest
from common import setup_sys_path, TestBase
setup_sys_path()
from oeqa.core.exception import OEQADependency
from oeqa.core.utils.test import getSuiteModules, getSuiteCasesIDs
class TestLoader(TestBase):
@unittest.skip("invalid directory is missing oetag.py")
def test_fail_duplicated_module(self):
cases_path = self.cases_path
invalid_path = os.path.join(cases_path, 'loader', 'invalid')
self.cases_path = [self.cases_path, invalid_path]
expect = 'Duplicated oetag module found in'
msg = 'Expected ImportError exception for having duplicated module'
try:
# Must throw ImportEror because duplicated module
tc = self._testLoader()
self.fail(msg)
except ImportError as e:
result = True if expect in str(e) else False
self.assertTrue(result, msg=msg)
finally:
self.cases_path = cases_path
def test_filter_modules(self):
expected_modules = {'oetag'}
tc = self._testLoader(modules=expected_modules)
modules = getSuiteModules(tc.suites)
msg = 'Expected just %s modules' % ', '.join(expected_modules)
self.assertEqual(modules, expected_modules, msg=msg)
def test_filter_cases(self):
modules = ['oetag', 'data']
expected_cases = {'data.DataTest.testDataOk',
'oetag.TagTest.testTagGood'}
tc = self._testLoader(modules=modules, tests=expected_cases)
cases = set(getSuiteCasesIDs(tc.suites))
msg = 'Expected just %s cases' % ', '.join(expected_cases)
self.assertEqual(cases, expected_cases, msg=msg)
def test_import_from_paths(self):
cases_path = self.cases_path
cases2_path = os.path.join(cases_path, 'loader', 'valid')
expected_modules = {'another'}
self.cases_path = [self.cases_path, cases2_path]
tc = self._testLoader(modules=expected_modules)
modules = getSuiteModules(tc.suites)
self.cases_path = cases_path
msg = 'Expected modules from two different paths'
self.assertEqual(modules, expected_modules, msg=msg)
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env python3
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import unittest
import logging
import tempfile
from common import setup_sys_path, TestBase
setup_sys_path()
from oeqa.core.runner import OEStreamLogger
class TestRunner(TestBase):
def test_stream_logger(self):
fp = tempfile.TemporaryFile(mode='w+')
logging.basicConfig(format='%(message)s', stream=fp)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
oeSL = OEStreamLogger(logger)
lines = ['init', 'bigline_' * 65535, 'morebigline_' * 65535 * 4, 'end']
for line in lines:
oeSL.write(line)
fp.seek(0)
fp_lines = fp.readlines()
for i, fp_line in enumerate(fp_lines):
fp_line = fp_line.strip()
self.assertEqual(lines[i], fp_line)
fp.close()
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,336 @@
#!/usr/bin/env python3
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Modified for use in OE by Richard Purdie, 2018
#
# Modified by: Corey Goldberg, 2013
# License: GPLv2+
#
# Original code from:
# Bazaar (bzrlib.tests.__init__.py, v2.6, copied Jun 01 2013)
# Copyright (C) 2005-2011 Canonical Ltd
# License: GPLv2+
import os
import sys
import traceback
import unittest
import subprocess
import testtools
import threading
import time
import io
import json
import subunit
from queue import Queue
from itertools import cycle
from subunit import ProtocolTestCase, TestProtocolClient
from subunit.test_results import AutoTimingTestResultDecorator
from testtools import ThreadsafeForwardingResult, iterate_tests
from testtools.content import Content
from testtools.content_type import ContentType
from oeqa.utils.commands import get_test_layer
import bb.utils
import oe.path
_all__ = [
'ConcurrentTestSuite',
'fork_for_tests',
'partition_tests',
]
#
# Patch the version from testtools to allow access to _test_start and allow
# computation of timing information and threading progress
#
class BBThreadsafeForwardingResult(ThreadsafeForwardingResult):
def __init__(self, target, semaphore, threadnum, totalinprocess, totaltests, output, finalresult):
super(BBThreadsafeForwardingResult, self).__init__(target, semaphore)
self.threadnum = threadnum
self.totalinprocess = totalinprocess
self.totaltests = totaltests
self.buffer = True
self.outputbuf = output
self.finalresult = finalresult
self.finalresult.buffer = True
self.target = target
def _add_result_with_semaphore(self, method, test, *args, **kwargs):
self.semaphore.acquire()
try:
if self._test_start:
self.result.starttime[test.id()] = self._test_start.timestamp()
self.result.threadprogress[self.threadnum].append(test.id())
totalprogress = sum(len(x) for x in self.result.threadprogress.values())
self.result.progressinfo[test.id()] = "%s: %s/%s %s/%s (%ss) (%s failed) (%s)" % (
self.threadnum,
len(self.result.threadprogress[self.threadnum]),
self.totalinprocess,
totalprogress,
self.totaltests,
"{0:.2f}".format(time.time()-self._test_start.timestamp()),
self.target.failed_tests,
test.id())
finally:
self.semaphore.release()
self.finalresult._stderr_buffer = io.StringIO(initial_value=self.outputbuf.getvalue().decode("utf-8"))
self.finalresult._stdout_buffer = io.StringIO()
super(BBThreadsafeForwardingResult, self)._add_result_with_semaphore(method, test, *args, **kwargs)
class ProxyTestResult:
# a very basic TestResult proxy, in order to modify add* calls
def __init__(self, target):
self.result = target
self.failed_tests = 0
def _addResult(self, method, test, *args, exception = False, **kwargs):
return method(test, *args, **kwargs)
def addError(self, test, err = None, **kwargs):
self.failed_tests += 1
self._addResult(self.result.addError, test, err, exception = True, **kwargs)
def addFailure(self, test, err = None, **kwargs):
self.failed_tests += 1
self._addResult(self.result.addFailure, test, err, exception = True, **kwargs)
def addSuccess(self, test, **kwargs):
self._addResult(self.result.addSuccess, test, **kwargs)
def addExpectedFailure(self, test, err = None, **kwargs):
self._addResult(self.result.addExpectedFailure, test, err, exception = True, **kwargs)
def addUnexpectedSuccess(self, test, **kwargs):
self._addResult(self.result.addUnexpectedSuccess, test, **kwargs)
def wasSuccessful(self):
return self.failed_tests == 0
def __getattr__(self, attr):
return getattr(self.result, attr)
class ExtraResultsDecoderTestResult(ProxyTestResult):
def _addResult(self, method, test, *args, exception = False, **kwargs):
if "details" in kwargs and "extraresults" in kwargs["details"]:
if isinstance(kwargs["details"]["extraresults"], Content):
kwargs = kwargs.copy()
kwargs["details"] = kwargs["details"].copy()
extraresults = kwargs["details"]["extraresults"]
data = bytearray()
for b in extraresults.iter_bytes():
data += b
extraresults = json.loads(data.decode())
kwargs["details"]["extraresults"] = extraresults
return method(test, *args, **kwargs)
class ExtraResultsEncoderTestResult(ProxyTestResult):
def _addResult(self, method, test, *args, exception = False, **kwargs):
if hasattr(test, "extraresults"):
extras = lambda : [json.dumps(test.extraresults).encode()]
kwargs = kwargs.copy()
if "details" not in kwargs:
kwargs["details"] = {}
else:
kwargs["details"] = kwargs["details"].copy()
kwargs["details"]["extraresults"] = Content(ContentType("application", "json", {'charset': 'utf8'}), extras)
# if using details, need to encode any exceptions into the details obj,
# testtools does not handle "err" and "details" together.
if "details" in kwargs and exception and (len(args) >= 1 and args[0] is not None):
kwargs["details"]["traceback"] = testtools.content.TracebackContent(args[0], test)
args = []
return method(test, *args, **kwargs)
#
# We have to patch subunit since it doesn't understand how to handle addError
# outside of a running test case. This can happen if classSetUp() fails
# for a class of tests. This unfortunately has horrible internal knowledge.
#
def outSideTestaddError(self, offset, line):
"""An 'error:' directive has been read."""
test_name = line[offset:-1].decode('utf8')
self.parser._current_test = subunit.RemotedTestCase(test_name)
self.parser.current_test_description = test_name
self.parser._state = self.parser._reading_error_details
self.parser._reading_error_details.set_simple()
self.parser.subunitLineReceived(line)
subunit._OutSideTest.addError = outSideTestaddError
# Like outSideTestaddError above, we need an equivalent for skips
# happening at the setUpClass() level, otherwise we will see "UNKNOWN"
# as a result for concurrent tests
#
def outSideTestaddSkip(self, offset, line):
"""A 'skip:' directive has been read."""
test_name = line[offset:-1].decode('utf8')
self.parser._current_test = subunit.RemotedTestCase(test_name)
self.parser.current_test_description = test_name
self.parser._state = self.parser._reading_skip_details
self.parser._reading_skip_details.set_simple()
self.parser.subunitLineReceived(line)
subunit._OutSideTest.addSkip = outSideTestaddSkip
#
# A dummy structure to add to io.StringIO so that the .buffer object
# is available and accepts writes. This allows unittest with buffer=True
# to interact ok with subunit which wants to access sys.stdout.buffer.
#
class dummybuf(object):
def __init__(self, parent):
self.p = parent
def write(self, data):
self.p.write(data.decode("utf-8"))
#
# Taken from testtools.ConncurrencyTestSuite but modified for OE use
#
class ConcurrentTestSuite(unittest.TestSuite):
def __init__(self, suite, processes, setupfunc, removefunc, bb_vars):
super(ConcurrentTestSuite, self).__init__([suite])
self.processes = processes
self.setupfunc = setupfunc
self.removefunc = removefunc
self.bb_vars = bb_vars
def run(self, result):
testservers, totaltests = fork_for_tests(self.processes, self)
try:
threads = {}
queue = Queue()
semaphore = threading.Semaphore(1)
result.threadprogress = {}
for i, (testserver, testnum, output) in enumerate(testservers):
result.threadprogress[i] = []
process_result = BBThreadsafeForwardingResult(
ExtraResultsDecoderTestResult(result),
semaphore, i, testnum, totaltests, output, result)
reader_thread = threading.Thread(
target=self._run_test, args=(testserver, process_result, queue))
threads[testserver] = reader_thread, process_result
reader_thread.start()
while threads:
finished_test = queue.get()
threads[finished_test][0].join()
del threads[finished_test]
except:
for thread, process_result in threads.values():
process_result.stop()
raise
finally:
for testserver in testservers:
testserver[0]._stream.close()
def _run_test(self, testserver, process_result, queue):
try:
try:
testserver.run(process_result)
except Exception:
# The run logic itself failed
case = testtools.ErrorHolder(
"broken-runner",
error=sys.exc_info())
case.run(process_result)
finally:
queue.put(testserver)
def fork_for_tests(concurrency_num, suite):
testservers = []
if 'BUILDDIR' in os.environ:
selftestdir = get_test_layer(suite.bb_vars['BBLAYERS'])
test_blocks = partition_tests(suite, concurrency_num)
# Clear the tests from the original suite so it doesn't keep them alive
suite._tests[:] = []
totaltests = sum(len(x) for x in test_blocks)
for process_tests in test_blocks:
numtests = len(process_tests)
process_suite = unittest.TestSuite(process_tests)
# Also clear each split list so new suite has only reference
process_tests[:] = []
c2pread, c2pwrite = os.pipe()
# Clear buffers before fork to avoid duplicate output
sys.stdout.flush()
sys.stderr.flush()
pid = os.fork()
if pid == 0:
ourpid = os.getpid()
try:
newbuilddir = None
stream = os.fdopen(c2pwrite, 'wb')
os.close(c2pread)
(builddir, newbuilddir) = suite.setupfunc("-st-" + str(ourpid), selftestdir, process_suite)
# Leave stderr and stdout open so we can see test noise
# Close stdin so that the child goes away if it decides to
# read from stdin (otherwise its a roulette to see what
# child actually gets keystrokes for pdb etc).
newsi = os.open(os.devnull, os.O_RDWR)
os.dup2(newsi, sys.stdin.fileno())
# Send stdout/stderr over the stream
os.dup2(c2pwrite, sys.stdout.fileno())
os.dup2(c2pwrite, sys.stderr.fileno())
subunit_client = TestProtocolClient(stream)
subunit_result = AutoTimingTestResultDecorator(subunit_client)
unittest_result = process_suite.run(ExtraResultsEncoderTestResult(subunit_result))
if ourpid != os.getpid():
os._exit(0)
if newbuilddir and unittest_result.wasSuccessful():
suite.removefunc(newbuilddir)
except:
# Don't do anything with process children
if ourpid != os.getpid():
os._exit(1)
# Try and report traceback on stream, but exit with error
# even if stream couldn't be created or something else
# goes wrong. The traceback is formatted to a string and
# written in one go to avoid interleaving lines from
# multiple failing children.
try:
stream.write(traceback.format_exc().encode('utf-8'))
except:
sys.stderr.write(traceback.format_exc())
finally:
if newbuilddir:
suite.removefunc(newbuilddir)
stream.flush()
os._exit(1)
stream.flush()
os._exit(0)
else:
os.close(c2pwrite)
stream = os.fdopen(c2pread, 'rb')
# Collect stdout/stderr into an io buffer
output = io.BytesIO()
testserver = ProtocolTestCase(stream, passthrough=output)
testservers.append((testserver, numtests, output))
return testservers, totaltests
def partition_tests(suite, count):
# Keep tests from the same class together but allow tests from modules
# to go to different processes to aid parallelisation.
modules = {}
for test in iterate_tests(suite):
m = test.__module__ + "." + test.__class__.__name__
if m not in modules:
modules[m] = []
modules[m].append(test)
# Simply divide the test blocks between the available processes
partitions = [list() for _ in range(count)]
for partition, m in zip(cycle(partitions), modules):
partition.extend(modules[m])
# No point in empty threads so drop them
return [p for p in partitions if p]

View File

@@ -0,0 +1,22 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import os
import sys
def findFile(file_name, directory):
"""
Search for a file in directory and returns its complete path.
"""
for r, d, f in os.walk(directory):
if file_name in f:
return os.path.join(r, file_name)
return None
def remove_safe(path):
if os.path.exists(path):
os.remove(path)

View File

@@ -0,0 +1,89 @@
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import os
import inspect
import unittest
def getSuiteCases(suite):
"""
Returns individual test from a test suite.
"""
tests = []
if isinstance(suite, unittest.TestCase):
tests.append(suite)
elif isinstance(suite, unittest.suite.TestSuite):
for item in suite:
tests.extend(getSuiteCases(item))
return tests
def getSuiteModules(suite):
"""
Returns modules in a test suite.
"""
modules = set()
for test in getSuiteCases(suite):
modules.add(getCaseModule(test))
return modules
def getSuiteCasesInfo(suite, func):
"""
Returns test case info from suite. Info is fetched from func.
"""
tests = []
for test in getSuiteCases(suite):
tests.append(func(test))
return tests
def getSuiteCasesNames(suite):
"""
Returns test case names from suite.
"""
return getSuiteCasesInfo(suite, getCaseMethod)
def getSuiteCasesIDs(suite):
"""
Returns test case ids from suite.
"""
return getSuiteCasesInfo(suite, getCaseID)
def getSuiteCasesFiles(suite):
"""
Returns test case files paths from suite.
"""
return getSuiteCasesInfo(suite, getCaseFile)
def getCaseModule(test_case):
"""
Returns test case module name.
"""
return test_case.__module__
def getCaseClass(test_case):
"""
Returns test case class name.
"""
return test_case.__class__.__name__
def getCaseID(test_case):
"""
Returns test case complete id.
"""
return test_case.id()
def getCaseFile(test_case):
"""
Returns test case file path.
"""
return inspect.getsourcefile(test_case.__class__)
def getCaseMethod(test_case):
"""
Returns test case method name.
"""
return getCaseID(test_case).split('.')[-1]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,20 @@
[package]
name = "guessing-game"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
name = "guessing_game"
# "cdylib" is necessary to produce a shared library for Python to import from.
crate-type = ["cdylib"]
[dependencies]
rand = "0.8.4"
[dependencies.pyo3]
version = "0.19.0"
# "abi3-py38" tells pyo3 (and maturin) to build using the stable ABI with minimum Python version 3.8
features = ["abi3-py38"]

Some files were not shown because too many files have changed in this diff Show More