Complete Yocto mirror with license table for TQMa6UL (2038-compliance)

- 264 license table entries with exact download URLs (224/264 resolved)
- Complete sources/ directory with all BitBake recipes
- Build configuration: tqma6ul-multi-mba6ulx, spaetzle (musl)
- Full traceability for Softwarefreigabeantrag
- GCC 13.4.0, Linux 6.6.102, U-Boot 2023.04, musl 1.2.4
- License distribution: GPL-2.0 (24), MIT (23), GPL-2.0+ (18), BSD-3 (16)
This commit is contained in:
Siggi (OpenClaw Agent)
2026-03-01 20:58:18 +00:00
commit 16accb6b24
15086 changed files with 1292356 additions and 0 deletions

View File

@@ -0,0 +1,182 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import sys
import argparse
from collections import defaultdict, OrderedDict
class ArgumentUsageError(Exception):
"""Exception class you can raise (and catch) in order to show the help"""
def __init__(self, message, subcommand=None):
self.message = message
self.subcommand = subcommand
class ArgumentParser(argparse.ArgumentParser):
"""Our own version of argparse's ArgumentParser"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('formatter_class', OeHelpFormatter)
self._subparser_groups = OrderedDict()
super(ArgumentParser, self).__init__(*args, **kwargs)
self._positionals.title = 'arguments'
self._optionals.title = 'options'
def error(self, message):
"""error(message: string)
Prints a help message incorporating the message to stderr and
exits.
"""
self._print_message('%s: error: %s\n' % (self.prog, message), sys.stderr)
self.print_help(sys.stderr)
sys.exit(2)
def error_subcommand(self, message, subcommand):
if subcommand:
action = self._get_subparser_action()
try:
subparser = action._name_parser_map[subcommand]
except KeyError:
self.error('no subparser for name "%s"' % subcommand)
else:
subparser.error(message)
self.error(message)
def add_subparsers(self, *args, **kwargs):
if 'dest' not in kwargs:
kwargs['dest'] = '_subparser_name'
ret = super(ArgumentParser, self).add_subparsers(*args, **kwargs)
# Need a way of accessing the parent parser
ret._parent_parser = self
# Ensure our class gets instantiated
ret._parser_class = ArgumentSubParser
# Hacky way of adding a method to the subparsers object
ret.add_subparser_group = self.add_subparser_group
return ret
def add_subparser_group(self, groupname, groupdesc, order=0):
self._subparser_groups[groupname] = (groupdesc, order)
def parse_args(self, args=None, namespace=None):
"""Parse arguments, using the correct subparser to show the error."""
args, argv = self.parse_known_args(args, namespace)
if argv:
message = 'unrecognized arguments: %s' % ' '.join(argv)
if self._subparsers:
subparser = self._get_subparser(args)
subparser.error(message)
else:
self.error(message)
sys.exit(2)
return args
def _get_subparser(self, args):
action = self._get_subparser_action()
if action.dest == argparse.SUPPRESS:
self.error('cannot get subparser, the subparser action dest is suppressed')
name = getattr(args, action.dest)
try:
return action._name_parser_map[name]
except KeyError:
self.error('no subparser for name "%s"' % name)
def _get_subparser_action(self):
if not self._subparsers:
self.error('cannot return the subparser action, no subparsers added')
for action in self._subparsers._group_actions:
if isinstance(action, argparse._SubParsersAction):
return action
class ArgumentSubParser(ArgumentParser):
def __init__(self, *args, **kwargs):
if 'group' in kwargs:
self._group = kwargs.pop('group')
if 'order' in kwargs:
self._order = kwargs.pop('order')
super(ArgumentSubParser, self).__init__(*args, **kwargs)
def parse_known_args(self, args=None, namespace=None):
# This works around argparse not handling optional positional arguments being
# intermixed with other options. A pretty horrible hack, but we're not left
# with much choice given that the bug in argparse exists and it's difficult
# to subclass.
# Borrowed from http://stackoverflow.com/questions/20165843/argparse-how-to-handle-variable-number-of-arguments-nargs
# with an extra workaround (in format_help() below) for the positional
# arguments disappearing from the --help output, as well as structural tweaks.
# Originally simplified from http://bugs.python.org/file30204/test_intermixed.py
positionals = self._get_positional_actions()
for action in positionals:
# deactivate positionals
action.save_nargs = action.nargs
action.nargs = 0
namespace, remaining_args = super(ArgumentSubParser, self).parse_known_args(args, namespace)
for action in positionals:
# remove the empty positional values from namespace
if hasattr(namespace, action.dest):
delattr(namespace, action.dest)
for action in positionals:
action.nargs = action.save_nargs
# parse positionals
namespace, extras = super(ArgumentSubParser, self).parse_known_args(remaining_args, namespace)
return namespace, extras
def format_help(self):
# Quick, restore the positionals!
positionals = self._get_positional_actions()
for action in positionals:
if hasattr(action, 'save_nargs'):
action.nargs = action.save_nargs
return super(ArgumentParser, self).format_help()
class OeHelpFormatter(argparse.HelpFormatter):
def _format_action(self, action):
if hasattr(action, '_get_subactions'):
# subcommands list
groupmap = defaultdict(list)
ordermap = {}
subparser_groups = action._parent_parser._subparser_groups
groups = sorted(subparser_groups.keys(), key=lambda item: subparser_groups[item][1], reverse=True)
for subaction in self._iter_indented_subactions(action):
parser = action._name_parser_map[subaction.dest]
group = getattr(parser, '_group', None)
groupmap[group].append(subaction)
if group not in groups:
groups.append(group)
order = getattr(parser, '_order', 0)
ordermap[subaction.dest] = order
lines = []
if len(groupmap) > 1:
groupindent = ' '
else:
groupindent = ''
for group in groups:
subactions = groupmap[group]
if not subactions:
continue
if groupindent:
if not group:
group = 'other'
groupdesc = subparser_groups.get(group, (group, 0))[0]
lines.append(' %s:' % groupdesc)
for subaction in sorted(subactions, key=lambda item: ordermap[item.dest], reverse=True):
lines.append('%s%s' % (groupindent, self._format_action(subaction).rstrip()))
return '\n'.join(lines)
else:
return super(OeHelpFormatter, self)._format_action(action)
def int_positive(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError(
"%s is not a positive int value" % value)
return ivalue

View File

@@ -0,0 +1,24 @@
#
# Copyright (c) 2017, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Build performance test library functions"""
def print_table(rows, row_fmt=None):
"""Print data table"""
if not rows:
return
if not row_fmt:
row_fmt = ['{:{wid}} '] * len(rows[0])
# Go through the data to get maximum cell widths
num_cols = len(row_fmt)
col_widths = [0] * num_cols
for row in rows:
for i, val in enumerate(row):
col_widths[i] = max(col_widths[i], len(str(val)))
for row in rows:
print(*[row_fmt[i].format(col, wid=col_widths[i]) for i, col in enumerate(row)])

View File

@@ -0,0 +1,12 @@
#
# Copyright (c) 2017, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Helper module for HTML reporting"""
from jinja2 import Environment, PackageLoader
env = Environment(loader=PackageLoader('build_perf', 'html'))
template = env.get_template('report.html')

View File

@@ -0,0 +1,50 @@
<script type="text/javascript">
chartsDrawing += 1;
google.charts.setOnLoadCallback(drawChart_{{ chart_elem_id }});
function drawChart_{{ chart_elem_id }}() {
var data = new google.visualization.DataTable();
// Chart options
var options = {
theme : 'material',
legend: 'none',
hAxis: { format: '', title: 'Commit number',
minValue: {{ chart_opts.haxis.min }},
maxValue: {{ chart_opts.haxis.max }} },
{% if measurement.type == 'time' %}
vAxis: { format: 'h:mm:ss' },
{% else %}
vAxis: { format: '' },
{% endif %}
pointSize: 5,
chartArea: { left: 80, right: 15 },
};
// Define data columns
data.addColumn('number', 'Commit');
data.addColumn('{{ measurement.value_type.gv_data_type }}',
'{{ measurement.value_type.quantity }}');
// Add data rows
data.addRows([
{% for sample in measurement.samples %}
[{{ sample.commit_num }}, {{ sample.mean.gv_value() }}],
{% endfor %}
]);
// Finally, draw the chart
chart_div = document.getElementById('{{ chart_elem_id }}');
var chart = new google.visualization.LineChart(chart_div);
google.visualization.events.addListener(chart, 'ready', function () {
//chart_div = document.getElementById('{{ chart_elem_id }}');
//chart_div.innerHTML = '<img src="' + chart.getImageURI() + '">';
png_div = document.getElementById('{{ chart_elem_id }}_png');
png_div.outerHTML = '<a id="{{ chart_elem_id }}_png" href="' + chart.getImageURI() + '">PNG</a>';
console.log("CHART READY: {{ chart_elem_id }}");
chartsDrawing -= 1;
if (chartsDrawing == 0)
console.log("ALL CHARTS READY");
});
chart.draw(data, options);
}
</script>

View File

@@ -0,0 +1,289 @@
<!DOCTYPE html>
<html lang="en">
<head>
{# Scripts, for visualization#}
<!--START-OF-SCRIPTS-->
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['corechart']});
var chartsDrawing = 0;
</script>
{# Render measurement result charts #}
{% for test in test_data %}
{% if test.status == 'SUCCESS' %}
{% for measurement in test.measurements %}
{% set chart_elem_id = test.name + '_' + measurement.name + '_chart' %}
{% include 'measurement_chart.html' %}
{% endfor %}
{% endif %}
{% endfor %}
<!--END-OF-SCRIPTS-->
{# Styles #}
<style>
.meta-table {
font-size: 14px;
text-align: left;
border-collapse: collapse;
}
.meta-table tr:nth-child(even){background-color: #f2f2f2}
meta-table th, .meta-table td {
padding: 4px;
}
.summary {
margin: 0;
font-size: 14px;
text-align: left;
border-collapse: collapse;
}
summary th, .meta-table td {
padding: 4px;
}
.measurement {
padding: 8px 0px 8px 8px;
border: 2px solid #f0f0f0;
margin-bottom: 10px;
}
.details {
margin: 0;
font-size: 12px;
text-align: left;
border-collapse: collapse;
}
.details th {
padding-right: 8px;
}
.details.plain th {
font-weight: normal;
}
.preformatted {
font-family: monospace;
white-space: pre-wrap;
background-color: #f0f0f0;
margin-left: 10px;
}
hr {
color: #f0f0f0;
}
h2 {
font-size: 20px;
margin-bottom: 0px;
color: #707070;
}
h3 {
font-size: 16px;
margin: 0px;
color: #707070;
}
</style>
<title>{{ title }}</title>
</head>
{% macro poky_link(commit) -%}
<a href="http://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?id={{ commit }}">{{ commit[0:11] }}</a>
{%- endmacro %}
<body><div style="width: 700px">
{# Test metadata #}
<h2>General</h2>
<hr>
<table class="meta-table" style="width: 100%">
<tr>
<th></th>
<th>Current commit</th>
<th>Comparing with</th>
</tr>
{% for key, item in metadata.items() %}
<tr>
<th>{{ item.title }}</th>
{%if key == 'commit' %}
<td>{{ poky_link(item.value) }}</td>
<td>{{ poky_link(item.value_old) }}</td>
{% else %}
<td>{{ item.value }}</td>
<td>{{ item.value_old }}</td>
{% endif %}
</tr>
{% endfor %}
</table>
{# Test result summary #}
<h2>Test result summary</h2>
<hr>
<table class="summary" style="width: 100%">
{% for test in test_data %}
{% if loop.index is even %}
{% set row_style = 'style="background-color: #f2f2f2"' %}
{% else %}
{% set row_style = 'style="background-color: #ffffff"' %}
{% endif %}
{% if test.status == 'SUCCESS' %}
{% for measurement in test.measurements %}
<tr {{ row_style }}>
{% if loop.index == 1 %}
<td>{{ test.name }}: {{ test.description }}</td>
{% else %}
{# add empty cell in place of the test name#}
<td></td>
{% endif %}
{% if measurement.absdiff > 0 %}
{% set result_style = "color: red" %}
{% elif measurement.absdiff == measurement.absdiff %}
{% set result_style = "color: green" %}
{% else %}
{% set result_style = "color: orange" %}
{%endif %}
{% if measurement.reldiff|abs > 2 %}
{% set result_style = result_style + "; font-weight: bold" %}
{% endif %}
<td>{{ measurement.description }}</td>
<td style="font-weight: bold">{{ measurement.value.mean }}</td>
<td style="{{ result_style }}">{{ measurement.absdiff_str }}</td>
<td style="{{ result_style }}">{{ measurement.reldiff_str }}</td>
</tr>
{% endfor %}
{% else %}
<td style="font-weight: bold; color: red;">{{test.status }}</td>
<td></td> <td></td> <td></td> <td></td>
{% endif %}
{% endfor %}
</table>
{# Detailed test results #}
{% for test in test_data %}
<h2>{{ test.name }}: {{ test.description }}</h2>
<hr>
{% if test.status == 'SUCCESS' %}
{% for measurement in test.measurements %}
<div class="measurement">
<h3>{{ measurement.description }}</h3>
<div style="font-weight:bold;">
<span style="font-size: 23px;">{{ measurement.value.mean }}</span>
<span style="font-size: 20px; margin-left: 12px">
{% if measurement.absdiff > 0 %}
<span style="color: red">
{% elif measurement.absdiff == measurement.absdiff %}
<span style="color: green">
{% else %}
<span style="color: orange">
{% endif %}
{{ measurement.absdiff_str }} ({{measurement.reldiff_str}})
</span></span>
</div>
{# Table for trendchart and the statistics #}
<table style="width: 100%">
<tr>
<td style="width: 75%">
{# Linechart #}
<div id="{{ test.name }}_{{ measurement.name }}_chart"></div>
</td>
<td>
{# Measurement statistics #}
<table class="details plain">
<tr>
<th>Test runs</th><td>{{ measurement.value.sample_cnt }}</td>
</tr><tr>
<th>-/+</th><td>-{{ measurement.value.minus }} / +{{ measurement.value.plus }}</td>
</tr><tr>
<th>Min</th><td>{{ measurement.value.min }}</td>
</tr><tr>
<th>Max</th><td>{{ measurement.value.max }}</td>
</tr><tr>
<th>Stdev</th><td>{{ measurement.value.stdev }}</td>
</tr><tr>
<th><div id="{{ test.name }}_{{ measurement.name }}_chart_png"></div></th>
<td></td>
</tr>
</table>
</td>
</tr>
</table>
{# Task and recipe summary from buildstats #}
{% if 'buildstats' in measurement %}
Task resource usage
<table class="details" style="width:100%">
<tr>
<th>Number of tasks</th>
<th>Top consumers of cputime</th>
</tr>
<tr>
<td style="vertical-align: top">{{ measurement.buildstats.tasks.count }} ({{ measurement.buildstats.tasks.change }})</td>
{# Table of most resource-hungry tasks #}
<td>
<table class="details plain">
{% for diff in measurement.buildstats.top_consumer|reverse %}
<tr>
<th>{{ diff.pkg }}.{{ diff.task }}</th>
<td>{{ '%0.0f' % diff.value2 }} s</td>
</tr>
{% endfor %}
</table>
</td>
</tr>
<tr>
<th>Biggest increase in cputime</th>
<th>Biggest decrease in cputime</th>
</tr>
<tr>
{# Table biggest increase in resource usage #}
<td>
<table class="details plain">
{% for diff in measurement.buildstats.top_increase|reverse %}
<tr>
<th>{{ diff.pkg }}.{{ diff.task }}</th>
<td>{{ '%+0.0f' % diff.absdiff }} s</td>
</tr>
{% endfor %}
</table>
</td>
{# Table biggest decrease in resource usage #}
<td>
<table class="details plain">
{% for diff in measurement.buildstats.top_decrease %}
<tr>
<th>{{ diff.pkg }}.{{ diff.task }}</th>
<td>{{ '%+0.0f' % diff.absdiff }} s</td>
</tr>
{% endfor %}
</table>
</td>
</tr>
</table>
{# Recipe version differences #}
{% if measurement.buildstats.ver_diff %}
<div style="margin-top: 16px">Recipe version changes</div>
<table class="details">
{% for head, recipes in measurement.buildstats.ver_diff.items() %}
<tr>
<th colspan="2">{{ head }}</th>
</tr>
{% for name, info in recipes|sort %}
<tr>
<td>{{ name }}</td>
<td>{{ info }}</td>
</tr>
{% endfor %}
{% endfor %}
</table>
{% else %}
<div style="margin-top: 16px">No recipe version changes detected</div>
{% endif %}
{% endif %}
</div>
{% endfor %}
{# Unsuccessful test #}
{% else %}
<span style="font-size: 150%; font-weight: bold; color: red;">{{ test.status }}
{% if test.err_type %}<span style="font-size: 75%; font-weight: normal">({{ test.err_type }})</span>{% endif %}
</span>
<div class="preformatted">{{ test.message }}</div>
{% endif %}
{% endfor %}
</div></body>
</html>

View File

@@ -0,0 +1,339 @@
#
# Copyright (c) 2017, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Handling of build perf test reports"""
from collections import OrderedDict, namedtuple
from collections.abc import Mapping
from datetime import datetime, timezone
from numbers import Number
from statistics import mean, stdev, variance
AggregateTestData = namedtuple('AggregateTestData', ['metadata', 'results'])
def isofmt_to_timestamp(string):
"""Convert timestamp string in ISO 8601 format into unix timestamp"""
if '.' in string:
dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S')
return dt.replace(tzinfo=timezone.utc).timestamp()
def metadata_xml_to_json(elem):
"""Convert metadata xml into JSON format"""
assert elem.tag == 'metadata', "Invalid metadata file format"
def _xml_to_json(elem):
"""Convert xml element to JSON object"""
out = OrderedDict()
for child in elem.getchildren():
key = child.attrib.get('name', child.tag)
if len(child):
out[key] = _xml_to_json(child)
else:
out[key] = child.text
return out
return _xml_to_json(elem)
def results_xml_to_json(elem):
"""Convert results xml into JSON format"""
rusage_fields = ('ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
'ru_majflt', 'ru_inblock', 'ru_oublock', 'ru_nvcsw',
'ru_nivcsw')
iostat_fields = ('rchar', 'wchar', 'syscr', 'syscw', 'read_bytes',
'write_bytes', 'cancelled_write_bytes')
def _read_measurement(elem):
"""Convert measurement to JSON"""
data = OrderedDict()
data['type'] = elem.tag
data['name'] = elem.attrib['name']
data['legend'] = elem.attrib['legend']
values = OrderedDict()
# SYSRES measurement
if elem.tag == 'sysres':
for subel in elem:
if subel.tag == 'time':
values['start_time'] = isofmt_to_timestamp(subel.attrib['timestamp'])
values['elapsed_time'] = float(subel.text)
elif subel.tag == 'rusage':
rusage = OrderedDict()
for field in rusage_fields:
if 'time' in field:
rusage[field] = float(subel.attrib[field])
else:
rusage[field] = int(subel.attrib[field])
values['rusage'] = rusage
elif subel.tag == 'iostat':
values['iostat'] = OrderedDict([(f, int(subel.attrib[f]))
for f in iostat_fields])
elif subel.tag == 'buildstats_file':
values['buildstats_file'] = subel.text
else:
raise TypeError("Unknown sysres value element '{}'".format(subel.tag))
# DISKUSAGE measurement
elif elem.tag == 'diskusage':
values['size'] = int(elem.find('size').text)
else:
raise Exception("Unknown measurement tag '{}'".format(elem.tag))
data['values'] = values
return data
def _read_testcase(elem):
"""Convert testcase into JSON"""
assert elem.tag == 'testcase', "Expecting 'testcase' element instead of {}".format(elem.tag)
data = OrderedDict()
data['name'] = elem.attrib['name']
data['description'] = elem.attrib['description']
data['status'] = 'SUCCESS'
data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp'])
data['elapsed_time'] = float(elem.attrib['time'])
measurements = OrderedDict()
for subel in elem.getchildren():
if subel.tag == 'error' or subel.tag == 'failure':
data['status'] = subel.tag.upper()
data['message'] = subel.attrib['message']
data['err_type'] = subel.attrib['type']
data['err_output'] = subel.text
elif subel.tag == 'skipped':
data['status'] = 'SKIPPED'
data['message'] = subel.text
else:
measurements[subel.attrib['name']] = _read_measurement(subel)
data['measurements'] = measurements
return data
def _read_testsuite(elem):
"""Convert suite to JSON"""
assert elem.tag == 'testsuite', \
"Expecting 'testsuite' element instead of {}".format(elem.tag)
data = OrderedDict()
if 'hostname' in elem.attrib:
data['tester_host'] = elem.attrib['hostname']
data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp'])
data['elapsed_time'] = float(elem.attrib['time'])
tests = OrderedDict()
for case in elem.getchildren():
tests[case.attrib['name']] = _read_testcase(case)
data['tests'] = tests
return data
# Main function
assert elem.tag == 'testsuites', "Invalid test report format"
assert len(elem) == 1, "Too many testsuites"
return _read_testsuite(elem.getchildren()[0])
def aggregate_metadata(metadata):
"""Aggregate metadata into one, basically a sanity check"""
mutable_keys = ('pretty_name', 'version_id')
def aggregate_obj(aggregate, obj, assert_str=True):
"""Aggregate objects together"""
assert type(aggregate) is type(obj), \
"Type mismatch: {} != {}".format(type(aggregate), type(obj))
if isinstance(obj, Mapping):
assert set(aggregate.keys()) == set(obj.keys())
for key, val in obj.items():
aggregate_obj(aggregate[key], val, key not in mutable_keys)
elif isinstance(obj, list):
assert len(aggregate) == len(obj)
for i, val in enumerate(obj):
aggregate_obj(aggregate[i], val)
elif not isinstance(obj, str) or (isinstance(obj, str) and assert_str):
assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj)
if not metadata:
return {}
# Do the aggregation
aggregate = metadata[0].copy()
for testrun in metadata[1:]:
aggregate_obj(aggregate, testrun)
aggregate['testrun_count'] = len(metadata)
return aggregate
def aggregate_data(data):
"""Aggregate multiple test results JSON structures into one"""
mutable_keys = ('status', 'message', 'err_type', 'err_output')
class SampleList(list):
"""Container for numerical samples"""
pass
def new_aggregate_obj(obj):
"""Create new object for aggregate"""
if isinstance(obj, Number):
new_obj = SampleList()
new_obj.append(obj)
elif isinstance(obj, str):
new_obj = obj
else:
# Lists and and dicts are kept as is
new_obj = obj.__class__()
aggregate_obj(new_obj, obj)
return new_obj
def aggregate_obj(aggregate, obj, assert_str=True):
"""Recursive "aggregation" of JSON objects"""
if isinstance(obj, Number):
assert isinstance(aggregate, SampleList)
aggregate.append(obj)
return
assert type(aggregate) == type(obj), \
"Type mismatch: {} != {}".format(type(aggregate), type(obj))
if isinstance(obj, Mapping):
for key, val in obj.items():
if not key in aggregate:
aggregate[key] = new_aggregate_obj(val)
else:
aggregate_obj(aggregate[key], val, key not in mutable_keys)
elif isinstance(obj, list):
for i, val in enumerate(obj):
if i >= len(aggregate):
aggregate[key] = new_aggregate_obj(val)
else:
aggregate_obj(aggregate[i], val)
elif isinstance(obj, str):
# Sanity check for data
if assert_str:
assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj)
else:
raise Exception("BUG: unable to aggregate '{}' ({})".format(type(obj), str(obj)))
if not data:
return {}
# Do the aggregation
aggregate = data[0].__class__()
for testrun in data:
aggregate_obj(aggregate, testrun)
return aggregate
class MeasurementVal(float):
"""Base class representing measurement values"""
gv_data_type = 'number'
def gv_value(self):
"""Value formatting for visualization"""
if self != self:
return "null"
else:
return self
class TimeVal(MeasurementVal):
"""Class representing time values"""
quantity = 'time'
gv_title = 'elapsed time'
gv_data_type = 'timeofday'
def hms(self):
"""Split time into hours, minutes and seconeds"""
hhh = int(abs(self) / 3600)
mmm = int((abs(self) % 3600) / 60)
sss = abs(self) % 60
return hhh, mmm, sss
def __str__(self):
if self != self:
return "nan"
hh, mm, ss = self.hms()
sign = '-' if self < 0 else ''
if hh > 0:
return '{}{:d}:{:02d}:{:02.0f}'.format(sign, hh, mm, ss)
elif mm > 0:
return '{}{:d}:{:04.1f}'.format(sign, mm, ss)
elif ss > 1:
return '{}{:.1f} s'.format(sign, ss)
else:
return '{}{:.2f} s'.format(sign, ss)
def gv_value(self):
"""Value formatting for visualization"""
if self != self:
return "null"
hh, mm, ss = self.hms()
return [hh, mm, int(ss), int(ss*1000) % 1000]
class SizeVal(MeasurementVal):
"""Class representing time values"""
quantity = 'size'
gv_title = 'size in MiB'
gv_data_type = 'number'
def __str__(self):
if self != self:
return "nan"
if abs(self) < 1024:
return '{:.1f} kiB'.format(self)
elif abs(self) < 1048576:
return '{:.2f} MiB'.format(self / 1024)
else:
return '{:.2f} GiB'.format(self / 1048576)
def gv_value(self):
"""Value formatting for visualization"""
if self != self:
return "null"
return self / 1024
def measurement_stats(meas, prefix=''):
"""Get statistics of a measurement"""
if not meas:
return {prefix + 'sample_cnt': 0,
prefix + 'mean': MeasurementVal('nan'),
prefix + 'stdev': MeasurementVal('nan'),
prefix + 'variance': MeasurementVal('nan'),
prefix + 'min': MeasurementVal('nan'),
prefix + 'max': MeasurementVal('nan'),
prefix + 'minus': MeasurementVal('nan'),
prefix + 'plus': MeasurementVal('nan')}
stats = {'name': meas['name']}
if meas['type'] == 'sysres':
val_cls = TimeVal
values = meas['values']['elapsed_time']
elif meas['type'] == 'diskusage':
val_cls = SizeVal
values = meas['values']['size']
else:
raise Exception("Unknown measurement type '{}'".format(meas['type']))
stats['val_cls'] = val_cls
stats['quantity'] = val_cls.quantity
stats[prefix + 'sample_cnt'] = len(values)
mean_val = val_cls(mean(values))
min_val = val_cls(min(values))
max_val = val_cls(max(values))
stats[prefix + 'mean'] = mean_val
if len(values) > 1:
stats[prefix + 'stdev'] = val_cls(stdev(values))
stats[prefix + 'variance'] = val_cls(variance(values))
else:
stats[prefix + 'stdev'] = float('nan')
stats[prefix + 'variance'] = float('nan')
stats[prefix + 'min'] = min_val
stats[prefix + 'max'] = max_val
stats[prefix + 'minus'] = val_cls(mean_val - min_val)
stats[prefix + 'plus'] = val_cls(max_val - mean_val)
return stats

View File

@@ -0,0 +1,56 @@
var fs = require('fs');
var system = require('system');
var page = require('webpage').create();
// Examine console log for message from chart drawing
page.onConsoleMessage = function(msg) {
console.log(msg);
if (msg === "ALL CHARTS READY") {
window.charts_ready = true;
}
else if (msg.slice(0, 11) === "CHART READY") {
var chart_id = msg.split(" ")[2];
console.log('grabbing ' + chart_id);
var png_data = page.evaluate(function (chart_id) {
var chart_div = document.getElementById(chart_id + '_png');
return chart_div.outerHTML;
}, chart_id);
fs.write(args[2] + '/' + chart_id + '.png', png_data, 'w');
}
};
// Check command line arguments
var args = system.args;
if (args.length != 3) {
console.log("USAGE: " + args[0] + " REPORT_HTML OUT_DIR\n");
phantom.exit(1);
}
// Open the web page
page.open(args[1], function(status) {
if (status == 'fail') {
console.log("Failed to open file '" + args[1] + "'");
phantom.exit(1);
}
});
// Check status every 100 ms
interval = window.setInterval(function () {
//console.log('waiting');
if (window.charts_ready) {
clearTimeout(timer);
clearInterval(interval);
var fname = args[1].replace(/\/+$/, "").split("/").pop()
console.log("saving " + fname);
fs.write(args[2] + '/' + fname, page.content, 'w');
phantom.exit(0);
}
}, 100);
// Time-out after 10 seconds
timer = window.setTimeout(function () {
clearInterval(interval);
console.log("ERROR: timeout");
phantom.exit(1);
}, 10000);

View File

@@ -0,0 +1,368 @@
#
# Copyright (c) 2017, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Functionality for analyzing buildstats"""
import json
import logging
import os
import re
from collections import namedtuple
from statistics import mean
log = logging.getLogger()
taskdiff_fields = ('pkg', 'pkg_op', 'task', 'task_op', 'value1', 'value2',
'absdiff', 'reldiff')
TaskDiff = namedtuple('TaskDiff', ' '.join(taskdiff_fields))
class BSError(Exception):
"""Error handling of buildstats"""
pass
class BSTask(dict):
def __init__(self, *args, **kwargs):
self['start_time'] = None
self['elapsed_time'] = None
self['status'] = None
self['iostat'] = {}
self['rusage'] = {}
self['child_rusage'] = {}
super(BSTask, self).__init__(*args, **kwargs)
@property
def cputime(self):
"""Sum of user and system time taken by the task"""
rusage = self['rusage']['ru_stime'] + self['rusage']['ru_utime']
if self['child_rusage']:
# Child rusage may have been optimized out
return rusage + self['child_rusage']['ru_stime'] + self['child_rusage']['ru_utime']
else:
return rusage
@property
def walltime(self):
"""Elapsed wall clock time"""
return self['elapsed_time']
@property
def read_bytes(self):
"""Bytes read from the block layer"""
return self['iostat']['read_bytes']
@property
def write_bytes(self):
"""Bytes written to the block layer"""
return self['iostat']['write_bytes']
@property
def read_ops(self):
"""Number of read operations on the block layer"""
if self['child_rusage']:
# Child rusage may have been optimized out
return self['rusage']['ru_inblock'] + self['child_rusage']['ru_inblock']
else:
return self['rusage']['ru_inblock']
@property
def write_ops(self):
"""Number of write operations on the block layer"""
if self['child_rusage']:
# Child rusage may have been optimized out
return self['rusage']['ru_oublock'] + self['child_rusage']['ru_oublock']
else:
return self['rusage']['ru_oublock']
@classmethod
def from_file(cls, buildstat_file, fallback_end=0):
"""Read buildstat text file. fallback_end is an optional end time for tasks that are not recorded as finishing."""
bs_task = cls()
log.debug("Reading task buildstats from %s", buildstat_file)
end_time = None
with open(buildstat_file) as fobj:
for line in fobj.readlines():
key, val = line.split(':', 1)
val = val.strip()
if key == 'Started':
start_time = float(val)
bs_task['start_time'] = start_time
elif key == 'Ended':
end_time = float(val)
elif key.startswith('IO '):
split = key.split()
bs_task['iostat'][split[1]] = int(val)
elif key.find('rusage') >= 0:
split = key.split()
ru_key = split[-1]
if ru_key in ('ru_stime', 'ru_utime'):
val = float(val)
else:
val = int(val)
ru_type = 'rusage' if split[0] == 'rusage' else \
'child_rusage'
bs_task[ru_type][ru_key] = val
elif key == 'Status':
bs_task['status'] = val
# If the task didn't finish, fill in the fallback end time if specified
if start_time and not end_time and fallback_end:
end_time = fallback_end
if start_time and end_time:
bs_task['elapsed_time'] = end_time - start_time
else:
raise BSError("{} looks like a invalid buildstats file".format(buildstat_file))
return bs_task
class BSTaskAggregate(object):
"""Class representing multiple runs of the same task"""
properties = ('cputime', 'walltime', 'read_bytes', 'write_bytes',
'read_ops', 'write_ops')
def __init__(self, tasks=None):
self._tasks = tasks or []
self._properties = {}
def __getattr__(self, name):
if name in self.properties:
if name not in self._properties:
# Calculate properties on demand only. We only provide mean
# value, so far
self._properties[name] = mean([getattr(t, name) for t in self._tasks])
return self._properties[name]
else:
raise AttributeError("'BSTaskAggregate' has no attribute '{}'".format(name))
def append(self, task):
"""Append new task"""
# Reset pre-calculated properties
assert isinstance(task, BSTask), "Type is '{}' instead of 'BSTask'".format(type(task))
self._properties = {}
self._tasks.append(task)
class BSRecipe(object):
"""Class representing buildstats of one recipe"""
def __init__(self, name, epoch, version, revision):
self.name = name
self.epoch = epoch
self.version = version
self.revision = revision
if epoch is None:
self.evr = "{}-{}".format(version, revision)
else:
self.evr = "{}_{}-{}".format(epoch, version, revision)
self.tasks = {}
def aggregate(self, bsrecipe):
"""Aggregate data of another recipe buildstats"""
if self.nevr != bsrecipe.nevr:
raise ValueError("Refusing to aggregate buildstats, recipe version "
"differs: {} vs. {}".format(self.nevr, bsrecipe.nevr))
if set(self.tasks.keys()) != set(bsrecipe.tasks.keys()):
raise ValueError("Refusing to aggregate buildstats, set of tasks "
"in {} differ".format(self.name))
for taskname, taskdata in bsrecipe.tasks.items():
if not isinstance(self.tasks[taskname], BSTaskAggregate):
self.tasks[taskname] = BSTaskAggregate([self.tasks[taskname]])
self.tasks[taskname].append(taskdata)
@property
def nevr(self):
return self.name + '-' + self.evr
class BuildStats(dict):
"""Class representing buildstats of one build"""
@property
def num_tasks(self):
"""Get number of tasks"""
num = 0
for recipe in self.values():
num += len(recipe.tasks)
return num
@classmethod
def from_json(cls, bs_json):
"""Create new BuildStats object from JSON object"""
buildstats = cls()
for recipe in bs_json:
if recipe['name'] in buildstats:
raise BSError("Cannot handle multiple versions of the same "
"package ({})".format(recipe['name']))
bsrecipe = BSRecipe(recipe['name'], recipe['epoch'],
recipe['version'], recipe['revision'])
for task, data in recipe['tasks'].items():
bsrecipe.tasks[task] = BSTask(data)
buildstats[recipe['name']] = bsrecipe
return buildstats
@staticmethod
def from_file_json(path):
"""Load buildstats from a JSON file"""
with open(path) as fobj:
bs_json = json.load(fobj)
return BuildStats.from_json(bs_json)
@staticmethod
def split_nevr(nevr):
"""Split name and version information from recipe "nevr" string"""
n_e_v, revision = nevr.rsplit('-', 1)
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
n_e_v)
if not match:
# If we're not able to parse a version starting with a number, just
# take the part after last dash
match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
n_e_v)
name = match.group('name')
version = match.group('version')
epoch = match.group('epoch')
return name, epoch, version, revision
@staticmethod
def parse_top_build_stats(path):
"""
Parse the top-level build_stats file for build-wide start and duration.
"""
start = elapsed = 0
with open(path) as fobj:
for line in fobj.readlines():
key, val = line.split(':', 1)
val = val.strip()
if key == 'Build Started':
start = float(val)
elif key == "Elapsed time":
elapsed = float(val.split()[0])
return start, elapsed
@classmethod
def from_dir(cls, path):
"""Load buildstats from a buildstats directory"""
top_stats = os.path.join(path, 'build_stats')
if not os.path.isfile(top_stats):
raise BSError("{} does not look like a buildstats directory".format(path))
log.debug("Reading buildstats directory %s", path)
buildstats = cls()
build_started, build_elapsed = buildstats.parse_top_build_stats(top_stats)
build_end = build_started + build_elapsed
subdirs = os.listdir(path)
for dirname in subdirs:
recipe_dir = os.path.join(path, dirname)
if dirname == "reduced_proc_pressure" or not os.path.isdir(recipe_dir):
continue
name, epoch, version, revision = cls.split_nevr(dirname)
bsrecipe = BSRecipe(name, epoch, version, revision)
for task in os.listdir(recipe_dir):
bsrecipe.tasks[task] = BSTask.from_file(
os.path.join(recipe_dir, task), build_end)
if name in buildstats:
raise BSError("Cannot handle multiple versions of the same "
"package ({})".format(name))
buildstats[name] = bsrecipe
return buildstats
def aggregate(self, buildstats):
"""Aggregate other buildstats into this"""
if set(self.keys()) != set(buildstats.keys()):
raise ValueError("Refusing to aggregate buildstats, set of "
"recipes is different: %s" % (set(self.keys()) ^ set(buildstats.keys())))
for pkg, data in buildstats.items():
self[pkg].aggregate(data)
def diff_buildstats(bs1, bs2, stat_attr, min_val=None, min_absdiff=None, only_tasks=[]):
"""Compare the tasks of two buildstats"""
tasks_diff = []
pkgs = set(bs1.keys()).union(set(bs2.keys()))
for pkg in pkgs:
tasks1 = bs1[pkg].tasks if pkg in bs1 else {}
tasks2 = bs2[pkg].tasks if pkg in bs2 else {}
if only_tasks:
tasks1 = {k: v for k, v in tasks1.items() if k in only_tasks}
tasks2 = {k: v for k, v in tasks2.items() if k in only_tasks}
if not tasks1:
pkg_op = '+'
elif not tasks2:
pkg_op = '-'
else:
pkg_op = ' '
for task in set(tasks1.keys()).union(set(tasks2.keys())):
task_op = ' '
if task in tasks1:
val1 = getattr(bs1[pkg].tasks[task], stat_attr)
else:
task_op = '+'
val1 = 0
if task in tasks2:
val2 = getattr(bs2[pkg].tasks[task], stat_attr)
else:
val2 = 0
task_op = '-'
if val1 == 0:
reldiff = float('inf')
else:
reldiff = 100 * (val2 - val1) / val1
if min_val and max(val1, val2) < min_val:
log.debug("Filtering out %s:%s (%s)", pkg, task,
max(val1, val2))
continue
if min_absdiff and abs(val2 - val1) < min_absdiff:
log.debug("Filtering out %s:%s (difference of %s)", pkg, task,
val2-val1)
continue
tasks_diff.append(TaskDiff(pkg, pkg_op, task, task_op, val1, val2,
val2-val1, reldiff))
return tasks_diff
class BSVerDiff(object):
"""Class representing recipe version differences between two buildstats"""
def __init__(self, bs1, bs2):
RecipeVerDiff = namedtuple('RecipeVerDiff', 'left right')
recipes1 = set(bs1.keys())
recipes2 = set(bs2.keys())
self.new = dict([(r, bs2[r]) for r in sorted(recipes2 - recipes1)])
self.dropped = dict([(r, bs1[r]) for r in sorted(recipes1 - recipes2)])
self.echanged = {}
self.vchanged = {}
self.rchanged = {}
self.unchanged = {}
self.empty_diff = False
common = recipes2.intersection(recipes1)
if common:
for recipe in common:
rdiff = RecipeVerDiff(bs1[recipe], bs2[recipe])
if bs1[recipe].epoch != bs2[recipe].epoch:
self.echanged[recipe] = rdiff
elif bs1[recipe].version != bs2[recipe].version:
self.vchanged[recipe] = rdiff
elif bs1[recipe].revision != bs2[recipe].revision:
self.rchanged[recipe] = rdiff
else:
self.unchanged[recipe] = rdiff
if len(recipes1) == len(recipes2) == len(self.unchanged):
self.empty_diff = True
def __bool__(self):
return not self.empty_diff

View File

@@ -0,0 +1,454 @@
# Yocto Project layer check tool
#
# Copyright (C) 2017 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import os
import re
import subprocess
from enum import Enum
import bb.tinfoil
class LayerType(Enum):
BSP = 0
DISTRO = 1
SOFTWARE = 2
CORE = 3
ERROR_NO_LAYER_CONF = 98
ERROR_BSP_DISTRO = 99
def _get_configurations(path):
configs = []
for f in os.listdir(path):
file_path = os.path.join(path, f)
if os.path.isfile(file_path) and f.endswith('.conf'):
configs.append(f[:-5]) # strip .conf
return configs
def _get_layer_collections(layer_path, lconf=None, data=None):
import bb.parse
import bb.data
if lconf is None:
lconf = os.path.join(layer_path, 'conf', 'layer.conf')
if data is None:
ldata = bb.data.init()
bb.parse.init_parser(ldata)
else:
ldata = data.createCopy()
ldata.setVar('LAYERDIR', layer_path)
try:
ldata = bb.parse.handle(lconf, ldata, include=True, baseconfig=True)
except:
raise RuntimeError("Parsing of layer.conf from layer: %s failed" % layer_path)
ldata.expandVarref('LAYERDIR')
collections = (ldata.getVar('BBFILE_COLLECTIONS') or '').split()
if not collections:
name = os.path.basename(layer_path)
collections = [name]
collections = {c: {} for c in collections}
for name in collections:
priority = ldata.getVar('BBFILE_PRIORITY_%s' % name)
pattern = ldata.getVar('BBFILE_PATTERN_%s' % name)
depends = ldata.getVar('LAYERDEPENDS_%s' % name)
compat = ldata.getVar('LAYERSERIES_COMPAT_%s' % name)
try:
depDict = bb.utils.explode_dep_versions2(depends or "")
except bb.utils.VersionStringException as vse:
bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (name, str(vse)))
collections[name]['priority'] = priority
collections[name]['pattern'] = pattern
collections[name]['depends'] = ' '.join(depDict.keys())
collections[name]['compat'] = compat
return collections
def _detect_layer(layer_path):
"""
Scans layer directory to detect what type of layer
is BSP, Distro or Software.
Returns a dictionary with layer name, type and path.
"""
layer = {}
layer_name = os.path.basename(layer_path)
layer['name'] = layer_name
layer['path'] = layer_path
layer['conf'] = {}
if not os.path.isfile(os.path.join(layer_path, 'conf', 'layer.conf')):
layer['type'] = LayerType.ERROR_NO_LAYER_CONF
return layer
machine_conf = os.path.join(layer_path, 'conf', 'machine')
distro_conf = os.path.join(layer_path, 'conf', 'distro')
is_bsp = False
is_distro = False
if os.path.isdir(machine_conf):
machines = _get_configurations(machine_conf)
if machines:
is_bsp = True
if os.path.isdir(distro_conf):
distros = _get_configurations(distro_conf)
if distros:
is_distro = True
layer['collections'] = _get_layer_collections(layer['path'])
if layer_name == "meta" and "core" in layer['collections']:
layer['type'] = LayerType.CORE
layer['conf']['machines'] = machines
layer['conf']['distros'] = distros
elif is_bsp and is_distro:
layer['type'] = LayerType.ERROR_BSP_DISTRO
elif is_bsp:
layer['type'] = LayerType.BSP
layer['conf']['machines'] = machines
elif is_distro:
layer['type'] = LayerType.DISTRO
layer['conf']['distros'] = distros
else:
layer['type'] = LayerType.SOFTWARE
return layer
def detect_layers(layer_directories, no_auto):
layers = []
for directory in layer_directories:
directory = os.path.realpath(directory)
if directory[-1] == '/':
directory = directory[0:-1]
if no_auto:
conf_dir = os.path.join(directory, 'conf')
if os.path.isdir(conf_dir):
layer = _detect_layer(directory)
if layer:
layers.append(layer)
else:
for root, dirs, files in os.walk(directory):
dir_name = os.path.basename(root)
conf_dir = os.path.join(root, 'conf')
if os.path.isdir(conf_dir):
layer = _detect_layer(root)
if layer:
layers.append(layer)
return layers
def _find_layer(depend, layers):
for layer in layers:
if 'collections' not in layer:
continue
for collection in layer['collections']:
if depend == collection:
return layer
return None
def sanity_check_layers(layers, logger):
"""
Check that we didn't find duplicate collection names, as the layer that will
be used is non-deterministic. The precise check is duplicate collections
with different patterns, as the same pattern being repeated won't cause
problems.
"""
import collections
passed = True
seen = collections.defaultdict(set)
for layer in layers:
for name, data in layer.get("collections", {}).items():
seen[name].add(data["pattern"])
for name, patterns in seen.items():
if len(patterns) > 1:
passed = False
logger.error("Collection %s found multiple times: %s" % (name, ", ".join(patterns)))
return passed
def get_layer_dependencies(layer, layers, logger):
def recurse_dependencies(depends, layer, layers, logger, ret = []):
logger.debug('Processing dependencies %s for layer %s.' % \
(depends, layer['name']))
for depend in depends.split():
# core (oe-core) is suppose to be provided
if depend == 'core':
continue
layer_depend = _find_layer(depend, layers)
if not layer_depend:
logger.error('Layer %s depends on %s and isn\'t found.' % \
(layer['name'], depend))
ret = None
continue
# We keep processing, even if ret is None, this allows us to report
# multiple errors at once
if ret is not None and layer_depend not in ret:
ret.append(layer_depend)
else:
# we might have processed this dependency already, in which case
# we should not do it again (avoid recursive loop)
continue
# Recursively process...
if 'collections' not in layer_depend:
continue
for collection in layer_depend['collections']:
collect_deps = layer_depend['collections'][collection]['depends']
if not collect_deps:
continue
ret = recurse_dependencies(collect_deps, layer_depend, layers, logger, ret)
return ret
layer_depends = []
for collection in layer['collections']:
depends = layer['collections'][collection]['depends']
if not depends:
continue
layer_depends = recurse_dependencies(depends, layer, layers, logger, layer_depends)
# Note: [] (empty) is allowed, None is not!
return layer_depends
def add_layer_dependencies(bblayersconf, layer, layers, logger):
layer_depends = get_layer_dependencies(layer, layers, logger)
if layer_depends is None:
return False
else:
add_layers(bblayersconf, layer_depends, logger)
return True
def add_layers(bblayersconf, layers, logger):
# Don't add a layer that is already present.
added = set()
output = check_command('Getting existing layers failed.', 'bitbake-layers show-layers').decode('utf-8')
for layer, path, pri in re.findall(r'^(\S+) +([^\n]*?) +(\d+)$', output, re.MULTILINE):
added.add(path)
with open(bblayersconf, 'a+') as f:
for layer in layers:
logger.info('Adding layer %s' % layer['name'])
name = layer['name']
path = layer['path']
if path in added:
logger.info('%s is already in %s' % (name, bblayersconf))
else:
added.add(path)
f.write("\nBBLAYERS += \"%s\"\n" % path)
return True
def check_bblayers(bblayersconf, layer_path, logger):
'''
If layer_path found in BBLAYERS return True
'''
import bb.parse
import bb.data
ldata = bb.parse.handle(bblayersconf, bb.data.init(), include=True)
for bblayer in (ldata.getVar('BBLAYERS') or '').split():
if os.path.normpath(bblayer) == os.path.normpath(layer_path):
return True
return False
def check_command(error_msg, cmd, cwd=None):
'''
Run a command under a shell, capture stdout and stderr in a single stream,
throw an error when command returns non-zero exit code. Returns the output.
'''
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd)
output, _ = p.communicate()
if p.returncode:
msg = "%s\nCommand: %s\nOutput:\n%s" % (error_msg, cmd, output.decode('utf-8'))
raise RuntimeError(msg)
return output
def get_signatures(builddir, failsafe=False, machine=None, extravars=None):
import re
# some recipes needs to be excluded like meta-world-pkgdata
# because a layer can add recipes to a world build so signature
# will be change
exclude_recipes = ('meta-world-pkgdata',)
sigs = {}
tune2tasks = {}
cmd = 'BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS BB_SIGNATURE_HANDLER" BB_SIGNATURE_HANDLER="OEBasicHash" '
if extravars:
cmd += extravars
cmd += ' '
if machine:
cmd += 'MACHINE=%s ' % machine
cmd += 'bitbake '
if failsafe:
cmd += '-k '
cmd += '-S lockedsigs world'
sigs_file = os.path.join(builddir, 'locked-sigs.inc')
if os.path.exists(sigs_file):
os.unlink(sigs_file)
try:
check_command('Generating signatures failed. This might be due to some parse error and/or general layer incompatibilities.',
cmd, builddir)
except RuntimeError as ex:
if failsafe and os.path.exists(sigs_file):
# Ignore the error here. Most likely some recipes active
# in a world build lack some dependencies. There is a
# separate test_machine_world_build which exposes the
# failure.
pass
else:
raise
sig_regex = re.compile(r"^(?P<task>.*:.*):(?P<hash>.*) .$")
tune_regex = re.compile(r"(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*")
current_tune = None
with open(sigs_file, 'r') as f:
for line in f.readlines():
line = line.strip()
t = tune_regex.search(line)
if t:
current_tune = t.group('tune')
s = sig_regex.match(line)
if s:
exclude = False
for er in exclude_recipes:
(recipe, task) = s.group('task').split(':')
if er == recipe:
exclude = True
break
if exclude:
continue
sigs[s.group('task')] = s.group('hash')
tune2tasks.setdefault(current_tune, []).append(s.group('task'))
if not sigs:
raise RuntimeError('Can\'t load signatures from %s' % sigs_file)
return (sigs, tune2tasks)
def get_depgraph(targets=['world'], failsafe=False):
'''
Returns the dependency graph for the given target(s).
The dependency graph is taken directly from DepTreeEvent.
'''
depgraph = None
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=False)
tinfoil.set_event_mask(['bb.event.NoProvider', 'bb.event.DepTreeGenerated', 'bb.command.CommandCompleted'])
if not tinfoil.run_command('generateDepTreeEvent', targets, 'do_build'):
raise RuntimeError('starting generateDepTreeEvent failed')
while True:
event = tinfoil.wait_event(timeout=1000)
if event:
if isinstance(event, bb.command.CommandFailed):
raise RuntimeError('Generating dependency information failed: %s' % event.error)
elif isinstance(event, bb.command.CommandCompleted):
break
elif isinstance(event, bb.event.NoProvider):
if failsafe:
# The event is informational, we will get information about the
# remaining dependencies eventually and thus can ignore this
# here like we do in get_signatures(), if desired.
continue
if event._reasons:
raise RuntimeError('Nothing provides %s: %s' % (event._item, event._reasons))
else:
raise RuntimeError('Nothing provides %s.' % (event._item))
elif isinstance(event, bb.event.DepTreeGenerated):
depgraph = event._depgraph
if depgraph is None:
raise RuntimeError('Could not retrieve the depgraph.')
return depgraph
def compare_signatures(old_sigs, curr_sigs):
'''
Compares the result of two get_signatures() calls. Returns None if no
problems found, otherwise a string that can be used as additional
explanation in self.fail().
'''
# task -> (old signature, new signature)
sig_diff = {}
for task in old_sigs:
if task in curr_sigs and \
old_sigs[task] != curr_sigs[task]:
sig_diff[task] = (old_sigs[task], curr_sigs[task])
if not sig_diff:
return None
# Beware, depgraph uses task=<pn>.<taskname> whereas get_signatures()
# uses <pn>:<taskname>. Need to convert sometimes. The output follows
# the convention from get_signatures() because that seems closer to
# normal bitbake output.
def sig2graph(task):
pn, taskname = task.rsplit(':', 1)
return pn + '.' + taskname
def graph2sig(task):
pn, taskname = task.rsplit('.', 1)
return pn + ':' + taskname
depgraph = get_depgraph(failsafe=True)
depends = depgraph['tdepends']
# If a task A has a changed signature, but none of its
# dependencies, then we need to report it because it is
# the one which introduces a change. Any task depending on
# A (directly or indirectly) will also have a changed
# signature, but we don't need to report it. It might have
# its own changes, which will become apparent once the
# issues that we do report are fixed and the test gets run
# again.
sig_diff_filtered = []
for task, (old_sig, new_sig) in sig_diff.items():
deps_tainted = False
for dep in depends.get(sig2graph(task), ()):
if graph2sig(dep) in sig_diff:
deps_tainted = True
break
if not deps_tainted:
sig_diff_filtered.append((task, old_sig, new_sig))
msg = []
msg.append('%d signatures changed, initial differences (first hash before, second after):' %
len(sig_diff))
for diff in sorted(sig_diff_filtered):
recipe, taskname = diff[0].rsplit(':', 1)
cmd = 'bitbake-diffsigs --task %s %s --signature %s %s' % \
(recipe, taskname, diff[1], diff[2])
msg.append(' %s: %s -> %s' % diff)
msg.append(' %s' % cmd)
try:
output = check_command('Determining signature difference failed.',
cmd).decode('utf-8')
except RuntimeError as error:
output = str(error)
if output:
msg.extend([' ' + line for line in output.splitlines()])
msg.append('')
return '\n'.join(msg)

View File

@@ -0,0 +1,9 @@
# Copyright (C) 2017 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
from oeqa.core.case import OETestCase
class OECheckLayerTestCase(OETestCase):
pass

View File

@@ -0,0 +1,206 @@
# Copyright (C) 2017 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import unittest
from checklayer import LayerType, get_signatures, check_command, get_depgraph
from checklayer.case import OECheckLayerTestCase
class BSPCheckLayer(OECheckLayerTestCase):
@classmethod
def setUpClass(self):
if self.tc.layer['type'] not in (LayerType.BSP, LayerType.CORE):
raise unittest.SkipTest("BSPCheckLayer: Layer %s isn't BSP one." %\
self.tc.layer['name'])
def test_bsp_defines_machines(self):
self.assertTrue(self.tc.layer['conf']['machines'],
"Layer is BSP but doesn't defines machines.")
def test_bsp_no_set_machine(self):
from oeqa.utils.commands import get_bb_var
machine = get_bb_var('MACHINE')
self.assertEqual(self.td['bbvars']['MACHINE'], machine,
msg="Layer %s modified machine %s -> %s" % \
(self.tc.layer['name'], self.td['bbvars']['MACHINE'], machine))
def test_machine_world(self):
'''
"bitbake world" is expected to work regardless which machine is selected.
BSP layers sometimes break that by enabling a recipe for a certain machine
without checking whether that recipe actually can be built in the current
distro configuration (for example, OpenGL might not enabled).
This test iterates over all machines. It would be nicer to instantiate
it once per machine. It merely checks for errors during parse
time. It does not actually attempt to build anything.
'''
if not self.td['machines']:
self.skipTest('No machines set with --machines.')
msg = []
for machine in self.td['machines']:
# In contrast to test_machine_signatures() below, errors are fatal here.
try:
get_signatures(self.td['builddir'], failsafe=False, machine=machine)
except RuntimeError as ex:
msg.append(str(ex))
if msg:
msg.insert(0, 'The following machines broke a world build:')
self.fail('\n'.join(msg))
def test_machine_signatures(self):
'''
Selecting a machine may only affect the signature of tasks that are specific
to that machine. In other words, when MACHINE=A and MACHINE=B share a recipe
foo and the output of foo, then both machine configurations must build foo
in exactly the same way. Otherwise it is not possible to use both machines
in the same distribution.
This criteria can only be tested by testing different machines in combination,
i.e. one main layer, potentially several additional BSP layers and an explicit
choice of machines:
yocto-check-layer --additional-layers .../meta-intel --machines intel-corei7-64 imx6slevk -- .../meta-freescale
'''
if not self.td['machines']:
self.skipTest('No machines set with --machines.')
# Collect signatures for all machines that we are testing
# and merge that into a hash:
# tune -> task -> signature -> list of machines with that combination
#
# It is an error if any tune/task pair has more than one signature,
# because that implies that the machines that caused those different
# signatures do not agree on how to execute the task.
tunes = {}
# Preserve ordering of machines as chosen by the user.
for machine in self.td['machines']:
curr_sigs, tune2tasks = get_signatures(self.td['builddir'], failsafe=True, machine=machine)
# Invert the tune -> [tasks] mapping.
tasks2tune = {}
for tune, tasks in tune2tasks.items():
for task in tasks:
tasks2tune[task] = tune
for task, sighash in curr_sigs.items():
tunes.setdefault(tasks2tune[task], {}).setdefault(task, {}).setdefault(sighash, []).append(machine)
msg = []
pruned = 0
last_line_key = None
# do_fetch, do_unpack, ..., do_build
taskname_list = []
if tunes:
# The output below is most useful when we start with tasks that are at
# the bottom of the dependency chain, i.e. those that run first. If
# those tasks differ, the rest also does.
#
# To get an ordering of tasks, we do a topological sort of the entire
# depgraph for the base configuration, then on-the-fly flatten that list by stripping
# out the recipe names and removing duplicates. The base configuration
# is not necessarily representative, but should be close enough. Tasks
# that were not encountered get a default priority.
depgraph = get_depgraph()
depends = depgraph['tdepends']
WHITE = 1
GRAY = 2
BLACK = 3
color = {}
found = set()
def visit(task):
color[task] = GRAY
for dep in depends.get(task, ()):
if color.setdefault(dep, WHITE) == WHITE:
visit(dep)
color[task] = BLACK
pn, taskname = task.rsplit('.', 1)
if taskname not in found:
taskname_list.append(taskname)
found.add(taskname)
for task in depends.keys():
if color.setdefault(task, WHITE) == WHITE:
visit(task)
taskname_order = dict([(task, index) for index, task in enumerate(taskname_list) ])
def task_key(task):
pn, taskname = task.rsplit(':', 1)
return (pn, taskname_order.get(taskname, len(taskname_list)), taskname)
for tune in sorted(tunes.keys()):
tasks = tunes[tune]
# As for test_signatures it would be nicer to sort tasks
# by dependencies here, but that is harder because we have
# to report on tasks from different machines, which might
# have different dependencies. We resort to pruning the
# output by reporting only one task per recipe if the set
# of machines matches.
#
# "bitbake-diffsigs -t -s" is intelligent enough to print
# diffs recursively, so often it does not matter that much
# if we don't pick the underlying difference
# here. However, sometimes recursion fails
# (https://bugzilla.yoctoproject.org/show_bug.cgi?id=6428).
#
# To mitigate that a bit, we use a hard-coded ordering of
# tasks that represents how they normally run and prefer
# to print the ones that run first.
for task in sorted(tasks.keys(), key=task_key):
signatures = tasks[task]
# do_build can be ignored: it is know to have
# different signatures in some cases, for example in
# the allarch ca-certificates due to RDEPENDS=openssl.
# That particular dependency is marked via
# SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS, but still shows up
# in the sstate signature hash because filtering it
# out would be hard and running do_build multiple
# times doesn't really matter.
if len(signatures.keys()) > 1 and \
not task.endswith(':do_build'):
# Error!
#
# Sort signatures by machines, because the hex values don't mean anything.
# => all-arch adwaita-icon-theme:do_build: 1234... (beaglebone, qemux86) != abcdf... (qemux86-64)
#
# Skip the line if it is covered already by the predecessor (same pn, same sets of machines).
pn, taskname = task.rsplit(':', 1)
next_line_key = (pn, sorted(signatures.values()))
if next_line_key != last_line_key:
line = ' %s %s: ' % (tune, task)
line += ' != '.join(['%s (%s)' % (signature, ', '.join([m for m in signatures[signature]])) for
signature in sorted(signatures.keys(), key=lambda s: signatures[s])])
last_line_key = next_line_key
msg.append(line)
# Randomly pick two mismatched signatures and remember how to invoke
# bitbake-diffsigs for them.
iterator = iter(signatures.items())
a = next(iterator)
b = next(iterator)
diffsig_machines = '(%s) != (%s)' % (', '.join(a[1]), ', '.join(b[1]))
diffsig_params = '-t %s %s -s %s %s' % (pn, taskname, a[0], b[0])
else:
pruned += 1
if msg:
msg.insert(0, 'The machines have conflicting signatures for some shared tasks:')
if pruned > 0:
msg.append('')
msg.append('%d tasks where not listed because some other task of the recipe already differed.' % pruned)
msg.append('It is likely that differences from different recipes also have the same root cause.')
msg.append('')
# Explain how to investigate...
msg.append('To investigate, run bitbake-diffsigs -t recipename taskname -s fromsig tosig.')
cmd = 'bitbake-diffsigs %s' % diffsig_params
msg.append('Example: %s in the last line' % diffsig_machines)
msg.append('Command: %s' % cmd)
# ... and actually do it automatically for that example, but without aborting
# when that fails.
try:
output = check_command('Comparing signatures failed.', cmd).decode('utf-8')
except RuntimeError as ex:
output = str(ex)
msg.extend([' ' + line for line in output.splitlines()])
self.fail('\n'.join(msg))

View File

@@ -0,0 +1,104 @@
# Copyright (C) 2017 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import glob
import os
import unittest
import re
from checklayer import get_signatures, LayerType, check_command, get_depgraph, compare_signatures
from checklayer.case import OECheckLayerTestCase
class CommonCheckLayer(OECheckLayerTestCase):
def test_readme(self):
if self.tc.layer['type'] == LayerType.CORE:
raise unittest.SkipTest("Core layer's README is top level")
# The top-level README file may have a suffix (like README.rst or README.txt).
readme_files = glob.glob(os.path.join(self.tc.layer['path'], '[Rr][Ee][Aa][Dd][Mm][Ee]*'))
self.assertTrue(len(readme_files) > 0,
msg="Layer doesn't contain a README file.")
# There might be more than one file matching the file pattern above
# (for example, README.rst and README-COPYING.rst). The one with the shortest
# name is considered the "main" one.
readme_file = sorted(readme_files)[0]
data = ''
with open(readme_file, 'r') as f:
data = f.read()
self.assertTrue(data,
msg="Layer contains a README file but it is empty.")
# If a layer's README references another README, then the checks below are not valid
if re.search('README', data, re.IGNORECASE):
return
self.assertIn('maintainer', data.lower())
self.assertIn('patch', data.lower())
# Check that there is an email address in the README
email_regex = re.compile(r"[^@]+@[^@]+")
self.assertTrue(email_regex.match(data))
def test_parse(self):
check_command('Layer %s failed to parse.' % self.tc.layer['name'],
'bitbake -p')
def test_show_environment(self):
check_command('Layer %s failed to show environment.' % self.tc.layer['name'],
'bitbake -e')
def test_world(self):
'''
"bitbake world" is expected to work. test_signatures does not cover that
because it is more lenient and ignores recipes in a world build that
are not actually buildable, so here we fail when "bitbake -S none world"
fails.
'''
get_signatures(self.td['builddir'], failsafe=False)
def test_world_inherit_class(self):
'''
This also does "bitbake -S none world" along with inheriting "yocto-check-layer"
class, which can do additional per-recipe test cases.
'''
msg = []
try:
get_signatures(self.td['builddir'], failsafe=False, machine=None, extravars='BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS INHERIT" INHERIT="yocto-check-layer"')
except RuntimeError as ex:
msg.append(str(ex))
if msg:
msg.insert(0, 'Layer %s failed additional checks from yocto-check-layer.bbclass\nSee below log for specific recipe parsing errors:\n' % \
self.tc.layer['name'])
self.fail('\n'.join(msg))
@unittest.expectedFailure
def test_patches_upstream_status(self):
import sys
sys.path.append(os.path.join(sys.path[0], '../../../../meta/lib/'))
import oe.qa
patches = []
for dirpath, dirs, files in os.walk(self.tc.layer['path']):
for filename in files:
if filename.endswith(".patch"):
ppath = os.path.join(dirpath, filename)
if oe.qa.check_upstream_status(ppath):
patches.append(ppath)
self.assertEqual(len(patches), 0 , \
msg="Found following patches with malformed or missing upstream status:\n%s" % '\n'.join([str(patch) for patch in patches]))
def test_signatures(self):
if self.tc.layer['type'] == LayerType.SOFTWARE and \
not self.tc.test_software_layer_signatures:
raise unittest.SkipTest("Not testing for signature changes in a software layer %s." \
% self.tc.layer['name'])
curr_sigs, _ = get_signatures(self.td['builddir'], failsafe=True)
msg = compare_signatures(self.td['sigs'], curr_sigs)
if msg is not None:
self.fail('Adding layer %s changed signatures.\n%s' % (self.tc.layer['name'], msg))
def test_layerseries_compat(self):
for collection_name, collection_data in self.tc.layer['collections'].items():
self.assertTrue(collection_data['compat'], "Collection %s from layer %s does not set compatible oe-core versions via LAYERSERIES_COMPAT_collection." \
% (collection_name, self.tc.layer['name']))

View File

@@ -0,0 +1,28 @@
# Copyright (C) 2017 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import unittest
from checklayer import LayerType
from checklayer.case import OECheckLayerTestCase
class DistroCheckLayer(OECheckLayerTestCase):
@classmethod
def setUpClass(self):
if self.tc.layer['type'] not in (LayerType.DISTRO, LayerType.CORE):
raise unittest.SkipTest("DistroCheckLayer: Layer %s isn't Distro one." %\
self.tc.layer['name'])
def test_distro_defines_distros(self):
self.assertTrue(self.tc.layer['conf']['distros'],
"Layer is BSP but doesn't defines machines.")
def test_distro_no_set_distros(self):
from oeqa.utils.commands import get_bb_var
distro = get_bb_var('DISTRO')
self.assertEqual(self.td['bbvars']['DISTRO'], distro,
msg="Layer %s modified distro %s -> %s" % \
(self.tc.layer['name'], self.td['bbvars']['DISTRO'], distro))

View File

@@ -0,0 +1,17 @@
# Copyright (C) 2017 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import os
import sys
import glob
import re
from oeqa.core.context import OETestContext
class CheckLayerTestContext(OETestContext):
def __init__(self, td=None, logger=None, layer=None, test_software_layer_signatures=True):
super(CheckLayerTestContext, self).__init__(td, logger)
self.layer = layer
self.test_software_layer_signatures = test_software_layer_signatures

View File

@@ -0,0 +1,404 @@
#!/usr/bin/env python3
# Development tool - utility functions for plugins
#
# Copyright (C) 2014 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool plugins module"""
import os
import sys
import subprocess
import logging
import re
import codecs
logger = logging.getLogger('devtool')
class DevtoolError(Exception):
"""Exception for handling devtool errors"""
def __init__(self, message, exitcode=1):
super(DevtoolError, self).__init__(message)
self.exitcode = exitcode
def exec_build_env_command(init_path, builddir, cmd, watch=False, **options):
"""Run a program in bitbake build context"""
import bb
if not 'cwd' in options:
options["cwd"] = builddir
if init_path:
# As the OE init script makes use of BASH_SOURCE to determine OEROOT,
# and can't determine it when running under dash, we need to set
# the executable to bash to correctly set things up
if not 'executable' in options:
options['executable'] = 'bash'
logger.debug('Executing command: "%s" using init path %s' % (cmd, init_path))
init_prefix = '. %s %s > /dev/null && ' % (init_path, builddir)
else:
logger.debug('Executing command "%s"' % cmd)
init_prefix = ''
if watch:
if sys.stdout.isatty():
# Fool bitbake into thinking it's outputting to a terminal (because it is, indirectly)
cmd = 'script -e -q -c "%s" /dev/null' % cmd
return exec_watch('%s%s' % (init_prefix, cmd), **options)
else:
return bb.process.run('%s%s' % (init_prefix, cmd), **options)
def exec_watch(cmd, **options):
"""Run program with stdout shown on sys.stdout"""
import bb
if isinstance(cmd, str) and not "shell" in options:
options["shell"] = True
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **options
)
reader = codecs.getreader('utf-8')(process.stdout)
buf = ''
while True:
out = reader.read(1, 1)
if out:
sys.stdout.write(out)
sys.stdout.flush()
buf += out
elif out == '' and process.poll() != None:
break
if process.returncode != 0:
raise bb.process.ExecutionError(cmd, process.returncode, buf, None)
return buf, None
def exec_fakeroot(d, cmd, **kwargs):
"""Run a command under fakeroot (pseudo, in fact) so that it picks up the appropriate file permissions"""
# Grab the command and check it actually exists
fakerootcmd = d.getVar('FAKEROOTCMD')
fakerootenv = d.getVar('FAKEROOTENV')
exec_fakeroot_no_d(fakerootcmd, fakerootenv, cmd, kwargs)
def exec_fakeroot_no_d(fakerootcmd, fakerootenv, cmd, **kwargs):
if not os.path.exists(fakerootcmd):
logger.error('pseudo executable %s could not be found - have you run a build yet? pseudo-native should install this and if you have run any build then that should have been built')
return 2
# Set up the appropriate environment
newenv = dict(os.environ)
for varvalue in fakerootenv.split():
if '=' in varvalue:
splitval = varvalue.split('=', 1)
newenv[splitval[0]] = splitval[1]
return subprocess.call("%s %s" % (fakerootcmd, cmd), env=newenv, **kwargs)
def setup_tinfoil(config_only=False, basepath=None, tracking=False):
"""Initialize tinfoil api from bitbake"""
import scriptpath
orig_cwd = os.path.abspath(os.curdir)
try:
if basepath:
os.chdir(basepath)
bitbakepath = scriptpath.add_bitbake_lib_path()
if not bitbakepath:
logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
sys.exit(1)
import bb.tinfoil
tinfoil = bb.tinfoil.Tinfoil(tracking=tracking)
try:
tinfoil.logger.setLevel(logger.getEffectiveLevel())
tinfoil.prepare(config_only)
except bb.tinfoil.TinfoilUIException:
tinfoil.shutdown()
raise DevtoolError('Failed to start bitbake environment')
except:
tinfoil.shutdown()
raise
finally:
os.chdir(orig_cwd)
return tinfoil
def parse_recipe(config, tinfoil, pn, appends, filter_workspace=True):
"""Parse the specified recipe"""
try:
recipefile = tinfoil.get_recipe_file(pn)
except bb.providers.NoProvider as e:
logger.error(str(e))
return None
if appends:
append_files = tinfoil.get_file_appends(recipefile)
if filter_workspace:
# Filter out appends from the workspace
append_files = [path for path in append_files if
not path.startswith(config.workspace_path)]
else:
append_files = None
try:
rd = tinfoil.parse_recipe_file(recipefile, appends, append_files)
except Exception as e:
logger.error(str(e))
return None
return rd
def check_workspace_recipe(workspace, pn, checksrc=True, bbclassextend=False):
"""
Check that a recipe is in the workspace and (optionally) that source
is present.
"""
workspacepn = pn
for recipe, value in workspace.items():
if recipe == pn:
break
if bbclassextend:
recipefile = value['recipefile']
if recipefile:
targets = get_bbclassextend_targets(recipefile, recipe)
if pn in targets:
workspacepn = recipe
break
else:
raise DevtoolError("No recipe named '%s' in your workspace" % pn)
if checksrc:
srctree = workspace[workspacepn]['srctree']
if not os.path.exists(srctree):
raise DevtoolError("Source tree %s for recipe %s does not exist" % (srctree, workspacepn))
if not os.listdir(srctree):
raise DevtoolError("Source tree %s for recipe %s is empty" % (srctree, workspacepn))
return workspacepn
def use_external_build(same_dir, no_same_dir, d):
"""
Determine if we should use B!=S (separate build and source directories) or not
"""
b_is_s = True
if no_same_dir:
logger.info('Using separate build directory since --no-same-dir specified')
b_is_s = False
elif same_dir:
logger.info('Using source tree as build directory since --same-dir specified')
elif bb.data.inherits_class('autotools-brokensep', d):
logger.info('Using source tree as build directory since recipe inherits autotools-brokensep')
elif os.path.abspath(d.getVar('B')) == os.path.abspath(d.getVar('S')):
logger.info('Using source tree as build directory since that would be the default for this recipe')
else:
b_is_s = False
return b_is_s
def setup_git_repo(repodir, version, devbranch, basetag='devtool-base', d=None):
"""
Set up the git repository for the source tree
"""
import bb.process
import oe.patch
if not os.path.exists(os.path.join(repodir, '.git')):
bb.process.run('git init', cwd=repodir)
bb.process.run('git config --local gc.autodetach 0', cwd=repodir)
bb.process.run('git add -f -A .', cwd=repodir)
commit_cmd = ['git']
oe.patch.GitApplyTree.gitCommandUserOptions(commit_cmd, d=d)
commit_cmd += ['commit', '-q']
stdout, _ = bb.process.run('git status --porcelain', cwd=repodir)
if not stdout:
commit_cmd.append('--allow-empty')
commitmsg = "Initial empty commit with no upstream sources"
elif version:
commitmsg = "Initial commit from upstream at version %s" % version
else:
commitmsg = "Initial commit from upstream"
commit_cmd += ['-m', commitmsg]
bb.process.run(commit_cmd, cwd=repodir)
# Ensure singletask.lock (as used by externalsrc.bbclass) is ignored by git
gitinfodir = os.path.join(repodir, '.git', 'info')
try:
os.mkdir(gitinfodir)
except FileExistsError:
pass
excludes = []
excludefile = os.path.join(gitinfodir, 'exclude')
try:
with open(excludefile, 'r') as f:
excludes = f.readlines()
except FileNotFoundError:
pass
if 'singletask.lock\n' not in excludes:
excludes.append('singletask.lock\n')
with open(excludefile, 'w') as f:
for line in excludes:
f.write(line)
bb.process.run('git checkout -b %s' % devbranch, cwd=repodir)
bb.process.run('git tag -f %s' % basetag, cwd=repodir)
# if recipe unpacks another git repo inside S, we need to declare it as a regular git submodule now,
# so we will be able to tag branches on it and extract patches when doing finish/update on the recipe
stdout, _ = bb.process.run("git status --porcelain", cwd=repodir)
found = False
for line in stdout.splitlines():
if line.endswith("/"):
new_dir = line.split()[1]
for root, dirs, files in os.walk(os.path.join(repodir, new_dir)):
if ".git" in dirs + files:
(stdout, _) = bb.process.run('git remote', cwd=root)
remote = stdout.splitlines()[0]
(stdout, _) = bb.process.run('git remote get-url %s' % remote, cwd=root)
remote_url = stdout.splitlines()[0]
logger.error(os.path.relpath(os.path.join(root, ".."), root))
bb.process.run('git submodule add %s %s' % (remote_url, os.path.relpath(root, os.path.join(root, ".."))), cwd=os.path.join(root, ".."))
found = True
if found:
oe.patch.GitApplyTree.commitIgnored("Add additional submodule from SRC_URI", dir=os.path.join(root, ".."), d=d)
found = False
if os.path.exists(os.path.join(repodir, '.gitmodules')):
bb.process.run('git submodule foreach --recursive "git tag -f %s"' % basetag, cwd=repodir)
def recipe_to_append(recipefile, config, wildcard=False):
"""
Convert a recipe file to a bbappend file path within the workspace.
NOTE: if the bbappend already exists, you should be using
workspace[args.recipename]['bbappend'] instead of calling this
function.
"""
appendname = os.path.splitext(os.path.basename(recipefile))[0]
if wildcard:
appendname = re.sub(r'_.*', '_%', appendname)
appendpath = os.path.join(config.workspace_path, 'appends')
appendfile = os.path.join(appendpath, appendname + '.bbappend')
return appendfile
def get_bbclassextend_targets(recipefile, pn):
"""
Cheap function to get BBCLASSEXTEND and then convert that to the
list of targets that would result.
"""
import bb.utils
values = {}
def get_bbclassextend_varfunc(varname, origvalue, op, newlines):
values[varname] = origvalue
return origvalue, None, 0, True
with open(recipefile, 'r') as f:
bb.utils.edit_metadata(f, ['BBCLASSEXTEND'], get_bbclassextend_varfunc)
targets = []
bbclassextend = values.get('BBCLASSEXTEND', '').split()
if bbclassextend:
for variant in bbclassextend:
if variant == 'nativesdk':
targets.append('%s-%s' % (variant, pn))
elif variant in ['native', 'cross', 'crosssdk']:
targets.append('%s-%s' % (pn, variant))
return targets
def replace_from_file(path, old, new):
"""Replace strings on a file"""
def read_file(path):
data = None
with open(path) as f:
data = f.read()
return data
def write_file(path, data):
if data is None:
return
wdata = data.rstrip() + "\n"
with open(path, "w") as f:
f.write(wdata)
# In case old is None, return immediately
if old is None:
return
try:
rdata = read_file(path)
except IOError as e:
# if file does not exit, just quit, otherwise raise an exception
if e.errno == errno.ENOENT:
return
else:
raise
old_contents = rdata.splitlines()
new_contents = []
for old_content in old_contents:
try:
new_contents.append(old_content.replace(old, new))
except ValueError:
pass
write_file(path, "\n".join(new_contents))
def update_unlockedsigs(basepath, workspace, fixed_setup, extra=None):
""" This function will make unlocked-sigs.inc match the recipes in the
workspace plus any extras we want unlocked. """
if not fixed_setup:
# Only need to write this out within the eSDK
return
if not extra:
extra = []
confdir = os.path.join(basepath, 'conf')
unlockedsigs = os.path.join(confdir, 'unlocked-sigs.inc')
# Get current unlocked list if any
values = {}
def get_unlockedsigs_varfunc(varname, origvalue, op, newlines):
values[varname] = origvalue
return origvalue, None, 0, True
if os.path.exists(unlockedsigs):
with open(unlockedsigs, 'r') as f:
bb.utils.edit_metadata(f, ['SIGGEN_UNLOCKED_RECIPES'], get_unlockedsigs_varfunc)
unlocked = sorted(values.get('SIGGEN_UNLOCKED_RECIPES', []))
# If the new list is different to the current list, write it out
newunlocked = sorted(list(workspace.keys()) + extra)
if unlocked != newunlocked:
bb.utils.mkdirhier(confdir)
with open(unlockedsigs, 'w') as f:
f.write("# DO NOT MODIFY! YOUR CHANGES WILL BE LOST.\n" +
"# This layer was created by the OpenEmbedded devtool" +
" utility in order to\n" +
"# contain recipes that are unlocked.\n")
f.write('SIGGEN_UNLOCKED_RECIPES += "\\\n')
for pn in newunlocked:
f.write(' ' + pn)
f.write('"')
def check_prerelease_version(ver, operation):
if 'pre' in ver or 'rc' in ver:
logger.warning('Version "%s" looks like a pre-release version. '
'If that is the case, in order to ensure that the '
'version doesn\'t appear to go backwards when you '
'later upgrade to the final release version, it is '
'recommmended that instead you use '
'<current version>+<pre-release version> e.g. if '
'upgrading from 1.9 to 2.0-rc2 use "1.9+2.0-rc2". '
'If you prefer not to reset and re-try, you can change '
'the version after %s succeeds using "devtool rename" '
'with -V/--version.' % (ver, operation))
def check_git_repo_dirty(repodir):
"""Check if a git repository is clean or not"""
stdout, _ = bb.process.run('git status --porcelain', cwd=repodir)
return stdout
def check_git_repo_op(srctree, ignoredirs=None):
"""Check if a git repository is in the middle of a rebase"""
stdout, _ = bb.process.run('git rev-parse --show-toplevel', cwd=srctree)
topleveldir = stdout.strip()
if ignoredirs and topleveldir in ignoredirs:
return
gitdir = os.path.join(topleveldir, '.git')
if os.path.exists(os.path.join(gitdir, 'rebase-merge')):
raise DevtoolError("Source tree %s appears to be in the middle of a rebase - please resolve this first" % srctree)
if os.path.exists(os.path.join(gitdir, 'rebase-apply')):
raise DevtoolError("Source tree %s appears to be in the middle of 'git am' or 'git apply' - please resolve this first" % srctree)

View File

@@ -0,0 +1,92 @@
# Development tool - build command plugin
#
# Copyright (C) 2014-2015 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool build plugin"""
import os
import bb
import logging
import argparse
import tempfile
from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError
from devtool import parse_recipe
logger = logging.getLogger('devtool')
def _set_file_values(fn, values):
remaining = list(values.keys())
def varfunc(varname, origvalue, op, newlines):
newvalue = values.get(varname, origvalue)
remaining.remove(varname)
return (newvalue, '=', 0, True)
with open(fn, 'r') as f:
(updated, newlines) = bb.utils.edit_metadata(f, values, varfunc)
for item in remaining:
updated = True
newlines.append('%s = "%s"' % (item, values[item]))
if updated:
with open(fn, 'w') as f:
f.writelines(newlines)
return updated
def _get_build_tasks(config):
tasks = config.get('Build', 'build_task', 'populate_sysroot,packagedata').split(',')
return ['do_%s' % task.strip() for task in tasks]
def build(args, config, basepath, workspace):
"""Entry point for the devtool 'build' subcommand"""
workspacepn = check_workspace_recipe(workspace, args.recipename, bbclassextend=True)
tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
try:
rd = parse_recipe(config, tinfoil, args.recipename, appends=True, filter_workspace=False)
if not rd:
return 1
deploytask = 'do_deploy' in rd.getVar('__BBTASKS')
finally:
tinfoil.shutdown()
if args.clean:
# use clean instead of cleansstate to avoid messing things up in eSDK
build_tasks = ['do_clean']
else:
build_tasks = _get_build_tasks(config)
if deploytask:
build_tasks.append('do_deploy')
bbappend = workspace[workspacepn]['bbappend']
if args.disable_parallel_make:
logger.info("Disabling 'make' parallelism")
_set_file_values(bbappend, {'PARALLEL_MAKE': ''})
try:
bbargs = []
for task in build_tasks:
if args.recipename.endswith('-native') and 'package' in task:
continue
bbargs.append('%s:%s' % (args.recipename, task))
exec_build_env_command(config.init_path, basepath, 'bitbake %s' % ' '.join(bbargs), watch=True)
except bb.process.ExecutionError as e:
# We've already seen the output since watch=True, so just ensure we return something to the user
return e.exitcode
finally:
if args.disable_parallel_make:
_set_file_values(bbappend, {'PARALLEL_MAKE': None})
return 0
def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
parser_build = subparsers.add_parser('build', help='Build a recipe',
description='Builds the specified recipe using bitbake (up to and including %s)' % ', '.join(_get_build_tasks(context.config)),
group='working', order=50)
parser_build.add_argument('recipename', help='Recipe to build')
parser_build.add_argument('-s', '--disable-parallel-make', action="store_true", help='Disable make parallelism')
parser_build.add_argument('-c', '--clean', action='store_true', help='clean up recipe building results')
parser_build.set_defaults(func=build)

View File

@@ -0,0 +1,164 @@
# Development tool - build-image plugin
#
# Copyright (C) 2015 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool plugin containing the build-image subcommand."""
import os
import errno
import logging
from bb.process import ExecutionError
from devtool import exec_build_env_command, setup_tinfoil, parse_recipe, DevtoolError
logger = logging.getLogger('devtool')
class TargetNotImageError(Exception):
pass
def _get_packages(tinfoil, workspace, config):
"""Get list of packages from recipes in the workspace."""
result = []
for recipe in workspace:
data = parse_recipe(config, tinfoil, recipe, True)
if 'class-target' in data.getVar('OVERRIDES').split(':'):
if recipe in data.getVar('PACKAGES').split():
result.append(recipe)
else:
logger.warning("Skipping recipe %s as it doesn't produce a "
"package with the same name", recipe)
return result
def build_image(args, config, basepath, workspace):
"""Entry point for the devtool 'build-image' subcommand."""
image = args.imagename
auto_image = False
if not image:
sdk_targets = config.get('SDK', 'sdk_targets', '').split()
if sdk_targets:
image = sdk_targets[0]
auto_image = True
if not image:
raise DevtoolError('Unable to determine image to build, please specify one')
try:
if args.add_packages:
add_packages = args.add_packages.split(',')
else:
add_packages = None
result, outputdir = build_image_task(config, basepath, workspace, image, add_packages)
except TargetNotImageError:
if auto_image:
raise DevtoolError('Unable to determine image to build, please specify one')
else:
raise DevtoolError('Specified recipe %s is not an image recipe' % image)
if result == 0:
logger.info('Successfully built %s. You can find output files in %s'
% (image, outputdir))
return result
def build_image_task(config, basepath, workspace, image, add_packages=None, task=None, extra_append=None):
# remove <image>.bbappend to make sure setup_tinfoil doesn't
# break because of it
target_basename = config.get('SDK', 'target_basename', '')
if target_basename:
appendfile = os.path.join(config.workspace_path, 'appends',
'%s.bbappend' % target_basename)
try:
os.unlink(appendfile)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
tinfoil = setup_tinfoil(basepath=basepath)
try:
rd = parse_recipe(config, tinfoil, image, True)
if not rd:
# Error already shown
return (1, None)
if not bb.data.inherits_class('image', rd):
raise TargetNotImageError()
# Get the actual filename used and strip the .bb and full path
target_basename = rd.getVar('FILE')
target_basename = os.path.splitext(os.path.basename(target_basename))[0]
config.set('SDK', 'target_basename', target_basename)
config.write()
appendfile = os.path.join(config.workspace_path, 'appends',
'%s.bbappend' % target_basename)
outputdir = None
try:
if workspace or add_packages:
if add_packages:
packages = add_packages
else:
packages = _get_packages(tinfoil, workspace, config)
else:
packages = None
if not task:
if not packages and not add_packages and workspace:
logger.warning('No recipes in workspace, building image %s unmodified', image)
elif not packages:
logger.warning('No packages to add, building image %s unmodified', image)
if packages or extra_append:
bb.utils.mkdirhier(os.path.dirname(appendfile))
with open(appendfile, 'w') as afile:
if packages:
# include packages from workspace recipes into the image
afile.write('IMAGE_INSTALL:append = " %s"\n' % ' '.join(packages))
if not task:
logger.info('Building image %s with the following '
'additional packages: %s', image, ' '.join(packages))
if extra_append:
for line in extra_append:
afile.write('%s\n' % line)
if task in ['populate_sdk', 'populate_sdk_ext']:
outputdir = rd.getVar('SDK_DEPLOY')
else:
outputdir = rd.getVar('DEPLOY_DIR_IMAGE')
tmp_tinfoil = tinfoil
tinfoil = None
tmp_tinfoil.shutdown()
options = ''
if task:
options += '-c %s' % task
# run bitbake to build image (or specified task)
try:
exec_build_env_command(config.init_path, basepath,
'bitbake %s %s' % (options, image), watch=True)
except ExecutionError as err:
return (err.exitcode, None)
finally:
if os.path.isfile(appendfile):
os.unlink(appendfile)
finally:
if tinfoil:
tinfoil.shutdown()
return (0, outputdir)
def register_commands(subparsers, context):
"""Register devtool subcommands from the build-image plugin"""
parser = subparsers.add_parser('build-image',
help='Build image including workspace recipe packages',
description='Builds an image, extending it to include '
'packages from recipes in the workspace',
group='testbuild', order=-10)
parser.add_argument('imagename', help='Image recipe to build', nargs='?')
parser.add_argument('-p', '--add-packages', help='Instead of adding packages for the '
'entire workspace, specify packages to be added to the image '
'(separate multiple packages by commas)',
metavar='PACKAGES')
parser.set_defaults(func=build_image)

View File

@@ -0,0 +1,55 @@
# Development tool - build-sdk command plugin
#
# Copyright (C) 2015-2016 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import os
import subprocess
import logging
import glob
import shutil
import errno
import sys
import tempfile
from devtool import DevtoolError
from devtool import build_image
logger = logging.getLogger('devtool')
def build_sdk(args, config, basepath, workspace):
"""Entry point for the devtool build-sdk command"""
sdk_targets = config.get('SDK', 'sdk_targets', '').split()
if sdk_targets:
image = sdk_targets[0]
else:
raise DevtoolError('Unable to determine image to build SDK for')
extra_append = ['SDK_DERIVATIVE = "1"']
try:
result, outputdir = build_image.build_image_task(config,
basepath,
workspace,
image,
task='populate_sdk_ext',
extra_append=extra_append)
except build_image.TargetNotImageError:
raise DevtoolError('Unable to determine image to build SDK for')
if result == 0:
logger.info('Successfully built SDK. You can find output files in %s'
% outputdir)
return result
def register_commands(subparsers, context):
"""Register devtool subcommands"""
if context.fixed_setup:
parser_build_sdk = subparsers.add_parser('build-sdk',
help='Build a derivative SDK of this one',
description='Builds an extensible SDK based upon this one and the items in your workspace',
group='advanced')
parser_build_sdk.set_defaults(func=build_sdk)

View File

@@ -0,0 +1,378 @@
# Development tool - deploy/undeploy command plugin
#
# Copyright (C) 2014-2016 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool plugin containing the deploy subcommands"""
import logging
import os
import shutil
import subprocess
import tempfile
import bb.utils
import argparse_oe
import oe.types
from devtool import exec_fakeroot_no_d, setup_tinfoil, check_workspace_recipe, DevtoolError
logger = logging.getLogger('devtool')
deploylist_path = '/.devtool'
def _prepare_remote_script(deploy, verbose=False, dryrun=False, undeployall=False, nopreserve=False, nocheckspace=False):
"""
Prepare a shell script for running on the target to
deploy/undeploy files. We have to be careful what we put in this
script - only commands that are likely to be available on the
target are suitable (the target might be constrained, e.g. using
busybox rather than bash with coreutils).
"""
lines = []
lines.append('#!/bin/sh')
lines.append('set -e')
if undeployall:
# Yes, I know this is crude - but it does work
lines.append('for entry in %s/*.list; do' % deploylist_path)
lines.append('[ ! -f $entry ] && exit')
lines.append('set `basename $entry | sed "s/.list//"`')
if dryrun:
if not deploy:
lines.append('echo "Previously deployed files for $1:"')
lines.append('manifest="%s/$1.list"' % deploylist_path)
lines.append('preservedir="%s/$1.preserve"' % deploylist_path)
lines.append('if [ -f $manifest ] ; then')
# Read manifest in reverse and delete files / remove empty dirs
lines.append(' sed \'1!G;h;$!d\' $manifest | while read file')
lines.append(' do')
if dryrun:
lines.append(' if [ ! -d $file ] ; then')
lines.append(' echo $file')
lines.append(' fi')
else:
lines.append(' if [ -d $file ] ; then')
# Avoid deleting a preserved directory in case it has special perms
lines.append(' if [ ! -d $preservedir/$file ] ; then')
lines.append(' rmdir $file > /dev/null 2>&1 || true')
lines.append(' fi')
lines.append(' else')
lines.append(' rm -f $file')
lines.append(' fi')
lines.append(' done')
if not dryrun:
lines.append(' rm $manifest')
if not deploy and not dryrun:
# May as well remove all traces
lines.append(' rmdir `dirname $manifest` > /dev/null 2>&1 || true')
lines.append('fi')
if deploy:
if not nocheckspace:
# Check for available space
# FIXME This doesn't take into account files spread across multiple
# partitions, but doing that is non-trivial
# Find the part of the destination path that exists
lines.append('checkpath="$2"')
lines.append('while [ "$checkpath" != "/" ] && [ ! -e $checkpath ]')
lines.append('do')
lines.append(' checkpath=`dirname "$checkpath"`')
lines.append('done')
lines.append(r'freespace=$(df -P $checkpath | sed -nre "s/^(\S+\s+){3}([0-9]+).*/\2/p")')
# First line of the file is the total space
lines.append('total=`head -n1 $3`')
lines.append('if [ $total -gt $freespace ] ; then')
lines.append(' echo "ERROR: insufficient space on target (available ${freespace}, needed ${total})"')
lines.append(' exit 1')
lines.append('fi')
if not nopreserve:
# Preserve any files that exist. Note that this will add to the
# preserved list with successive deployments if the list of files
# deployed changes, but because we've deleted any previously
# deployed files at this point it will never preserve anything
# that was deployed, only files that existed prior to any deploying
# (which makes the most sense)
lines.append('cat $3 | sed "1d" | while read file fsize')
lines.append('do')
lines.append(' if [ -e $file ] ; then')
lines.append(' dest="$preservedir/$file"')
lines.append(' mkdir -p `dirname $dest`')
lines.append(' mv $file $dest')
lines.append(' fi')
lines.append('done')
lines.append('rm $3')
lines.append('mkdir -p `dirname $manifest`')
lines.append('mkdir -p $2')
if verbose:
lines.append(' tar xv -C $2 -f - | tee $manifest')
else:
lines.append(' tar xv -C $2 -f - > $manifest')
lines.append('sed -i "s!^./!$2!" $manifest')
elif not dryrun:
# Put any preserved files back
lines.append('if [ -d $preservedir ] ; then')
lines.append(' cd $preservedir')
# find from busybox might not have -exec, so we don't use that
lines.append(' find . -type f | while read file')
lines.append(' do')
lines.append(' mv $file /$file')
lines.append(' done')
lines.append(' cd /')
lines.append(' rm -rf $preservedir')
lines.append('fi')
if undeployall:
if not dryrun:
lines.append('echo "NOTE: Successfully undeployed $1"')
lines.append('done')
# Delete the script itself
lines.append('rm $0')
lines.append('')
return '\n'.join(lines)
def deploy(args, config, basepath, workspace):
"""Entry point for the devtool 'deploy' subcommand"""
import oe.utils
check_workspace_recipe(workspace, args.recipename, checksrc=False)
tinfoil = setup_tinfoil(basepath=basepath)
try:
try:
rd = tinfoil.parse_recipe(args.recipename)
except Exception as e:
raise DevtoolError('Exception parsing recipe %s: %s' %
(args.recipename, e))
srcdir = rd.getVar('D')
workdir = rd.getVar('WORKDIR')
path = rd.getVar('PATH')
strip_cmd = rd.getVar('STRIP')
libdir = rd.getVar('libdir')
base_libdir = rd.getVar('base_libdir')
max_process = oe.utils.get_bb_number_threads(rd)
fakerootcmd = rd.getVar('FAKEROOTCMD')
fakerootenv = rd.getVar('FAKEROOTENV')
finally:
tinfoil.shutdown()
return deploy_no_d(srcdir, workdir, path, strip_cmd, libdir, base_libdir, max_process, fakerootcmd, fakerootenv, args)
def deploy_no_d(srcdir, workdir, path, strip_cmd, libdir, base_libdir, max_process, fakerootcmd, fakerootenv, args):
import math
import oe.package
try:
host, destdir = args.target.split(':')
except ValueError:
destdir = '/'
else:
args.target = host
if not destdir.endswith('/'):
destdir += '/'
recipe_outdir = srcdir
if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
raise DevtoolError('No files to deploy - have you built the %s '
'recipe? If so, the install step has not installed '
'any files.' % args.recipename)
if args.strip and not args.dry_run:
# Fakeroot copy to new destination
srcdir = recipe_outdir
recipe_outdir = os.path.join(workdir, 'devtool-deploy-target-stripped')
if os.path.isdir(recipe_outdir):
exec_fakeroot_no_d(fakerootcmd, fakerootenv, "rm -rf %s" % recipe_outdir, shell=True)
exec_fakeroot_no_d(fakerootcmd, fakerootenv, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True)
os.environ['PATH'] = ':'.join([os.environ['PATH'], path or ''])
oe.package.strip_execs(args.recipename, recipe_outdir, strip_cmd, libdir, base_libdir, max_process)
filelist = []
inodes = set({})
ftotalsize = 0
for root, _, files in os.walk(recipe_outdir):
for fn in files:
fstat = os.lstat(os.path.join(root, fn))
# Get the size in kiB (since we'll be comparing it to the output of du -k)
# MUST use lstat() here not stat() or getfilesize() since we don't want to
# dereference symlinks
if fstat.st_ino in inodes:
fsize = 0
else:
fsize = int(math.ceil(float(fstat.st_size)/1024))
inodes.add(fstat.st_ino)
ftotalsize += fsize
# The path as it would appear on the target
fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn)
filelist.append((fpath, fsize))
if args.dry_run:
print('Files to be deployed for %s on target %s:' % (args.recipename, args.target))
for item, _ in filelist:
print(' %s' % item)
return 0
extraoptions = ''
if args.no_host_check:
extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
if not args.show_status:
extraoptions += ' -q'
scp_sshexec = ''
ssh_sshexec = 'ssh'
if args.ssh_exec:
scp_sshexec = "-S %s" % args.ssh_exec
ssh_sshexec = args.ssh_exec
scp_port = ''
ssh_port = ''
if args.port:
scp_port = "-P %s" % args.port
ssh_port = "-p %s" % args.port
if args.key:
extraoptions += ' -i %s' % args.key
# In order to delete previously deployed files and have the manifest file on
# the target, we write out a shell script and then copy it to the target
# so we can then run it (piping tar output to it).
# (We cannot use scp here, because it doesn't preserve symlinks.)
tmpdir = tempfile.mkdtemp(prefix='devtool')
try:
tmpscript = '/tmp/devtool_deploy.sh'
tmpfilelist = os.path.join(os.path.dirname(tmpscript), 'devtool_deploy.list')
shellscript = _prepare_remote_script(deploy=True,
verbose=args.show_status,
nopreserve=args.no_preserve,
nocheckspace=args.no_check_space)
# Write out the script to a file
with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
f.write(shellscript)
# Write out the file list
with open(os.path.join(tmpdir, os.path.basename(tmpfilelist)), 'w') as f:
f.write('%d\n' % ftotalsize)
for fpath, fsize in filelist:
f.write('%s %d\n' % (fpath, fsize))
# Copy them to the target
ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
if ret != 0:
raise DevtoolError('Failed to copy script to %s - rerun with -s to '
'get a complete error message' % args.target)
finally:
shutil.rmtree(tmpdir)
# Now run the script
ret = exec_fakeroot_no_d(fakerootcmd, fakerootenv, 'tar cf - . | %s %s %s %s \'sh %s %s %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
if ret != 0:
raise DevtoolError('Deploy failed - rerun with -s to get a complete '
'error message')
logger.info('Successfully deployed %s' % recipe_outdir)
files_list = []
for root, _, files in os.walk(recipe_outdir):
for filename in files:
filename = os.path.relpath(os.path.join(root, filename), recipe_outdir)
files_list.append(os.path.join(destdir, filename))
return 0
def undeploy(args, config, basepath, workspace):
"""Entry point for the devtool 'undeploy' subcommand"""
if args.all and args.recipename:
raise argparse_oe.ArgumentUsageError('Cannot specify -a/--all with a recipe name', 'undeploy-target')
elif not args.recipename and not args.all:
raise argparse_oe.ArgumentUsageError('If you don\'t specify a recipe, you must specify -a/--all', 'undeploy-target')
extraoptions = ''
if args.no_host_check:
extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
if not args.show_status:
extraoptions += ' -q'
scp_sshexec = ''
ssh_sshexec = 'ssh'
if args.ssh_exec:
scp_sshexec = "-S %s" % args.ssh_exec
ssh_sshexec = args.ssh_exec
scp_port = ''
ssh_port = ''
if args.port:
scp_port = "-P %s" % args.port
ssh_port = "-p %s" % args.port
args.target = args.target.split(':')[0]
tmpdir = tempfile.mkdtemp(prefix='devtool')
try:
tmpscript = '/tmp/devtool_undeploy.sh'
shellscript = _prepare_remote_script(deploy=False, dryrun=args.dry_run, undeployall=args.all)
# Write out the script to a file
with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
f.write(shellscript)
# Copy it to the target
ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
if ret != 0:
raise DevtoolError('Failed to copy script to %s - rerun with -s to '
'get a complete error message' % args.target)
finally:
shutil.rmtree(tmpdir)
# Now run the script
ret = subprocess.call('%s %s %s %s \'sh %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename), shell=True)
if ret != 0:
raise DevtoolError('Undeploy failed - rerun with -s to get a complete '
'error message')
if not args.all and not args.dry_run:
logger.info('Successfully undeployed %s' % args.recipename)
return 0
def register_commands(subparsers, context):
"""Register devtool subcommands from the deploy plugin"""
parser_deploy = subparsers.add_parser('deploy-target',
help='Deploy recipe output files to live target machine',
description='Deploys a recipe\'s build output (i.e. the output of the do_install task) to a live target machine over ssh. By default, any existing files will be preserved instead of being overwritten and will be restored if you run devtool undeploy-target. Note: this only deploys the recipe itself and not any runtime dependencies, so it is assumed that those have been installed on the target beforehand.',
group='testbuild')
parser_deploy.add_argument('recipename', help='Recipe to deploy')
parser_deploy.add_argument('target', help='Live target machine running an ssh server: user@hostname[:destdir]')
parser_deploy.add_argument('-c', '--no-host-check', help='Disable ssh host key checking', action='store_true')
parser_deploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true')
parser_deploy.add_argument('-n', '--dry-run', help='List files to be deployed only', action='store_true')
parser_deploy.add_argument('-p', '--no-preserve', help='Do not preserve existing files', action='store_true')
parser_deploy.add_argument('--no-check-space', help='Do not check for available space before deploying', action='store_true')
parser_deploy.add_argument('-e', '--ssh-exec', help='Executable to use in place of ssh')
parser_deploy.add_argument('-P', '--port', help='Specify port to use for connection to the target')
parser_deploy.add_argument('-I', '--key',
help='Specify ssh private key for connection to the target')
strip_opts = parser_deploy.add_mutually_exclusive_group(required=False)
strip_opts.add_argument('-S', '--strip',
help='Strip executables prior to deploying (default: %(default)s). '
'The default value of this option can be controlled by setting the strip option in the [Deploy] section to True or False.',
default=oe.types.boolean(context.config.get('Deploy', 'strip', default='0')),
action='store_true')
strip_opts.add_argument('--no-strip', help='Do not strip executables prior to deploy', dest='strip', action='store_false')
parser_deploy.set_defaults(func=deploy)
parser_undeploy = subparsers.add_parser('undeploy-target',
help='Undeploy recipe output files in live target machine',
description='Un-deploys recipe output files previously deployed to a live target machine by devtool deploy-target.',
group='testbuild')
parser_undeploy.add_argument('recipename', help='Recipe to undeploy (if not using -a/--all)', nargs='?')
parser_undeploy.add_argument('target', help='Live target machine running an ssh server: user@hostname')
parser_undeploy.add_argument('-c', '--no-host-check', help='Disable ssh host key checking', action='store_true')
parser_undeploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true')
parser_undeploy.add_argument('-a', '--all', help='Undeploy all recipes deployed on the target', action='store_true')
parser_undeploy.add_argument('-n', '--dry-run', help='List files to be undeployed only', action='store_true')
parser_undeploy.add_argument('-e', '--ssh-exec', help='Executable to use in place of ssh')
parser_undeploy.add_argument('-P', '--port', help='Specify port to use for connection to the target')
parser_undeploy.add_argument('-I', '--key',
help='Specify ssh private key for connection to the target')
parser_undeploy.set_defaults(func=undeploy)

View File

@@ -0,0 +1,109 @@
# Development tool - export command plugin
#
# Copyright (C) 2014-2017 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool export plugin"""
import os
import argparse
import tarfile
import logging
import datetime
import json
logger = logging.getLogger('devtool')
# output files
default_arcname_prefix = "workspace-export"
metadata = '.export_metadata'
def export(args, config, basepath, workspace):
"""Entry point for the devtool 'export' subcommand"""
def add_metadata(tar):
"""Archive the workspace object"""
# finally store the workspace metadata
with open(metadata, 'w') as fd:
fd.write(json.dumps((config.workspace_path, workspace)))
tar.add(metadata)
os.unlink(metadata)
def add_recipe(tar, recipe, data):
"""Archive recipe with proper arcname"""
# Create a map of name/arcnames
arcnames = []
for key, name in data.items():
if name:
if key == 'srctree':
# all sources, no matter where are located, goes into the sources directory
arcname = 'sources/%s' % recipe
else:
arcname = name.replace(config.workspace_path, '')
arcnames.append((name, arcname))
for name, arcname in arcnames:
tar.add(name, arcname=arcname)
# Make sure workspace is non-empty and possible listed include/excluded recipes are in workspace
if not workspace:
logger.info('Workspace contains no recipes, nothing to export')
return 0
else:
for param, recipes in {'include':args.include,'exclude':args.exclude}.items():
for recipe in recipes:
if recipe not in workspace:
logger.error('Recipe (%s) on %s argument not in the current workspace' % (recipe, param))
return 1
name = args.file
default_name = "%s-%s.tar.gz" % (default_arcname_prefix, datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
if not name:
name = default_name
else:
# if name is a directory, append the default name
if os.path.isdir(name):
name = os.path.join(name, default_name)
if os.path.exists(name) and not args.overwrite:
logger.error('Tar archive %s exists. Use --overwrite/-o to overwrite it')
return 1
# if all workspace is excluded, quit
if not len(set(workspace.keys()).difference(set(args.exclude))):
logger.warning('All recipes in workspace excluded, nothing to export')
return 0
exported = []
with tarfile.open(name, 'w:gz') as tar:
if args.include:
for recipe in args.include:
add_recipe(tar, recipe, workspace[recipe])
exported.append(recipe)
else:
for recipe, data in workspace.items():
if recipe not in args.exclude:
add_recipe(tar, recipe, data)
exported.append(recipe)
add_metadata(tar)
logger.info('Tar archive created at %s with the following recipes: %s' % (name, ', '.join(exported)))
return 0
def register_commands(subparsers, context):
"""Register devtool export subcommands"""
parser = subparsers.add_parser('export',
help='Export workspace into a tar archive',
description='Export one or more recipes from current workspace into a tar archive',
group='advanced')
parser.add_argument('--file', '-f', help='Output archive file name')
parser.add_argument('--overwrite', '-o', action="store_true", help='Overwrite previous export tar archive')
group = parser.add_mutually_exclusive_group()
group.add_argument('--include', '-i', nargs='+', default=[], help='Include recipes into the tar archive')
group.add_argument('--exclude', '-e', nargs='+', default=[], help='Exclude recipes into the tar archive')
parser.set_defaults(func=export)

View File

@@ -0,0 +1,282 @@
#
# Copyright (C) 2023-2024 Siemens AG
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool ide-sdk IDE plugin interface definition and helper functions"""
import errno
import json
import logging
import os
import stat
from enum import Enum, auto
from devtool import DevtoolError
from bb.utils import mkdirhier
logger = logging.getLogger('devtool')
class BuildTool(Enum):
UNDEFINED = auto()
CMAKE = auto()
MESON = auto()
@property
def is_c_ccp(self):
if self is BuildTool.CMAKE:
return True
if self is BuildTool.MESON:
return True
return False
class GdbCrossConfig:
"""Base class defining the GDB configuration generator interface
Generate a GDB configuration for a binary on the target device.
Only one instance per binary is allowed. This allows to assign unique port
numbers for all gdbserver instances.
"""
_gdbserver_port_next = 1234
_binaries = []
def __init__(self, image_recipe, modified_recipe, binary, gdbserver_multi=True):
self.image_recipe = image_recipe
self.modified_recipe = modified_recipe
self.gdb_cross = modified_recipe.gdb_cross
self.binary = binary
if binary in GdbCrossConfig._binaries:
raise DevtoolError(
"gdbserver config for binary %s is already generated" % binary)
GdbCrossConfig._binaries.append(binary)
self.script_dir = modified_recipe.ide_sdk_scripts_dir
self.gdbinit_dir = os.path.join(self.script_dir, 'gdbinit')
self.gdbserver_multi = gdbserver_multi
self.binary_pretty = self.binary.replace(os.sep, '-').lstrip('-')
self.gdbserver_port = GdbCrossConfig._gdbserver_port_next
GdbCrossConfig._gdbserver_port_next += 1
self.id_pretty = "%d_%s" % (self.gdbserver_port, self.binary_pretty)
# gdbserver start script
gdbserver_script_file = 'gdbserver_' + self.id_pretty
if self.gdbserver_multi:
gdbserver_script_file += "_m"
self.gdbserver_script = os.path.join(
self.script_dir, gdbserver_script_file)
# gdbinit file
self.gdbinit = os.path.join(
self.gdbinit_dir, 'gdbinit_' + self.id_pretty)
# gdb start script
self.gdb_script = os.path.join(
self.script_dir, 'gdb_' + self.id_pretty)
def _gen_gdbserver_start_script(self):
"""Generate a shell command starting the gdbserver on the remote device via ssh
GDB supports two modes:
multi: gdbserver remains running over several debug sessions
once: gdbserver terminates after the debugged process terminates
"""
cmd_lines = ['#!/bin/sh']
if self.gdbserver_multi:
temp_dir = "TEMP_DIR=/tmp/gdbserver_%s; " % self.id_pretty
gdbserver_cmd_start = temp_dir
gdbserver_cmd_start += "test -f \\$TEMP_DIR/pid && exit 0; "
gdbserver_cmd_start += "mkdir -p \\$TEMP_DIR; "
gdbserver_cmd_start += "%s --multi :%s > \\$TEMP_DIR/log 2>&1 & " % (
self.gdb_cross.gdbserver_path, self.gdbserver_port)
gdbserver_cmd_start += "echo \\$! > \\$TEMP_DIR/pid;"
gdbserver_cmd_stop = temp_dir
gdbserver_cmd_stop += "test -f \\$TEMP_DIR/pid && kill \\$(cat \\$TEMP_DIR/pid); "
gdbserver_cmd_stop += "rm -rf \\$TEMP_DIR; "
gdbserver_cmd_l = []
gdbserver_cmd_l.append('if [ "$1" = "stop" ]; then')
gdbserver_cmd_l.append(' shift')
gdbserver_cmd_l.append(" %s %s %s %s 'sh -c \"%s\"'" % (
self.gdb_cross.target_device.ssh_sshexec, self.gdb_cross.target_device.ssh_port, self.gdb_cross.target_device.extraoptions, self.gdb_cross.target_device.target, gdbserver_cmd_stop))
gdbserver_cmd_l.append('else')
gdbserver_cmd_l.append(" %s %s %s %s 'sh -c \"%s\"'" % (
self.gdb_cross.target_device.ssh_sshexec, self.gdb_cross.target_device.ssh_port, self.gdb_cross.target_device.extraoptions, self.gdb_cross.target_device.target, gdbserver_cmd_start))
gdbserver_cmd_l.append('fi')
gdbserver_cmd = os.linesep.join(gdbserver_cmd_l)
else:
gdbserver_cmd_start = "%s --once :%s %s" % (
self.gdb_cross.gdbserver_path, self.gdbserver_port, self.binary)
gdbserver_cmd = "%s %s %s %s 'sh -c \"%s\"'" % (
self.gdb_cross.target_device.ssh_sshexec, self.gdb_cross.target_device.ssh_port, self.gdb_cross.target_device.extraoptions, self.gdb_cross.target_device.target, gdbserver_cmd_start)
cmd_lines.append(gdbserver_cmd)
GdbCrossConfig.write_file(self.gdbserver_script, cmd_lines, True)
def _gen_gdbinit_config(self):
"""Generate a gdbinit file for this binary and the corresponding gdbserver configuration"""
gdbinit_lines = ['# This file is generated by devtool ide-sdk']
if self.gdbserver_multi:
target_help = '# gdbserver --multi :%d' % self.gdbserver_port
remote_cmd = 'target extended-remote'
else:
target_help = '# gdbserver :%d %s' % (
self.gdbserver_port, self.binary)
remote_cmd = 'target remote'
gdbinit_lines.append('# On the remote target:')
gdbinit_lines.append(target_help)
gdbinit_lines.append('# On the build machine:')
gdbinit_lines.append('# cd ' + self.modified_recipe.real_srctree)
gdbinit_lines.append(
'# ' + self.gdb_cross.gdb + ' -ix ' + self.gdbinit)
gdbinit_lines.append('set sysroot ' + self.modified_recipe.d)
gdbinit_lines.append('set substitute-path "/usr/include" "' +
os.path.join(self.modified_recipe.recipe_sysroot, 'usr', 'include') + '"')
# Disable debuginfod for now, the IDE configuration uses rootfs-dbg from the image workdir.
gdbinit_lines.append('set debuginfod enabled off')
if self.image_recipe.rootfs_dbg:
gdbinit_lines.append(
'set solib-search-path "' + self.modified_recipe.solib_search_path_str(self.image_recipe) + '"')
# First: Search for sources of this recipe in the workspace folder
if self.modified_recipe.pn in self.modified_recipe.target_dbgsrc_dir:
gdbinit_lines.append('set substitute-path "%s" "%s"' %
(self.modified_recipe.target_dbgsrc_dir, self.modified_recipe.real_srctree))
else:
logger.error(
"TARGET_DBGSRC_DIR must contain the recipe name PN.")
# Second: Search for sources of other recipes in the rootfs-dbg
if self.modified_recipe.target_dbgsrc_dir.startswith("/usr/src/debug"):
gdbinit_lines.append('set substitute-path "/usr/src/debug" "%s"' % os.path.join(
self.image_recipe.rootfs_dbg, "usr", "src", "debug"))
else:
logger.error(
"TARGET_DBGSRC_DIR must start with /usr/src/debug.")
else:
logger.warning(
"Cannot setup debug symbols configuration for GDB. IMAGE_GEN_DEBUGFS is not enabled.")
gdbinit_lines.append(
'%s %s:%d' % (remote_cmd, self.gdb_cross.host, self.gdbserver_port))
gdbinit_lines.append('set remote exec-file ' + self.binary)
gdbinit_lines.append(
'run ' + os.path.join(self.modified_recipe.d, self.binary))
GdbCrossConfig.write_file(self.gdbinit, gdbinit_lines)
def _gen_gdb_start_script(self):
"""Generate a script starting GDB with the corresponding gdbinit configuration."""
cmd_lines = ['#!/bin/sh']
cmd_lines.append('cd ' + self.modified_recipe.real_srctree)
cmd_lines.append(self.gdb_cross.gdb + ' -ix ' +
self.gdbinit + ' "$@"')
GdbCrossConfig.write_file(self.gdb_script, cmd_lines, True)
def initialize(self):
self._gen_gdbserver_start_script()
self._gen_gdbinit_config()
self._gen_gdb_start_script()
@staticmethod
def write_file(script_file, cmd_lines, executable=False):
script_dir = os.path.dirname(script_file)
mkdirhier(script_dir)
with open(script_file, 'w') as script_f:
script_f.write(os.linesep.join(cmd_lines))
script_f.write(os.linesep)
if executable:
st = os.stat(script_file)
os.chmod(script_file, st.st_mode | stat.S_IEXEC)
logger.info("Created: %s" % script_file)
class IdeBase:
"""Base class defining the interface for IDE plugins"""
def __init__(self):
self.ide_name = 'undefined'
self.gdb_cross_configs = []
@classmethod
def ide_plugin_priority(cls):
"""Used to find the default ide handler if --ide is not passed"""
return 10
def setup_shared_sysroots(self, shared_env):
logger.warn("Shared sysroot mode is not supported for IDE %s" %
self.ide_name)
def setup_modified_recipe(self, args, image_recipe, modified_recipe):
logger.warn("Modified recipe mode is not supported for IDE %s" %
self.ide_name)
def initialize_gdb_cross_configs(self, image_recipe, modified_recipe, gdb_cross_config_class=GdbCrossConfig):
binaries = modified_recipe.find_installed_binaries()
for binary in binaries:
gdb_cross_config = gdb_cross_config_class(
image_recipe, modified_recipe, binary)
gdb_cross_config.initialize()
self.gdb_cross_configs.append(gdb_cross_config)
@staticmethod
def gen_oe_scrtips_sym_link(modified_recipe):
# create a sym-link from sources to the scripts directory
if os.path.isdir(modified_recipe.ide_sdk_scripts_dir):
IdeBase.symlink_force(modified_recipe.ide_sdk_scripts_dir,
os.path.join(modified_recipe.real_srctree, 'oe-scripts'))
@staticmethod
def update_json_file(json_dir, json_file, update_dict):
"""Update a json file
By default it uses the dict.update function. If this is not sutiable
the update function might be passed via update_func parameter.
"""
json_path = os.path.join(json_dir, json_file)
logger.info("Updating IDE config file: %s (%s)" %
(json_file, json_path))
if not os.path.exists(json_dir):
os.makedirs(json_dir)
try:
with open(json_path) as f:
orig_dict = json.load(f)
except json.decoder.JSONDecodeError:
logger.info(
"Decoding %s failed. Probably because of comments in the json file" % json_path)
orig_dict = {}
except FileNotFoundError:
orig_dict = {}
orig_dict.update(update_dict)
with open(json_path, 'w') as f:
json.dump(orig_dict, f, indent=4)
@staticmethod
def symlink_force(tgt, dst):
try:
os.symlink(tgt, dst)
except OSError as err:
if err.errno == errno.EEXIST:
if os.readlink(dst) != tgt:
os.remove(dst)
os.symlink(tgt, dst)
else:
raise err
def get_devtool_deploy_opts(args):
"""Filter args for devtool deploy-target args"""
if not args.target:
return None
devtool_deploy_opts = [args.target]
if args.no_host_check:
devtool_deploy_opts += ["-c"]
if args.show_status:
devtool_deploy_opts += ["-s"]
if args.no_preserve:
devtool_deploy_opts += ["-p"]
if args.no_check_space:
devtool_deploy_opts += ["--no-check-space"]
if args.ssh_exec:
devtool_deploy_opts += ["-e", args.ssh.exec]
if args.port:
devtool_deploy_opts += ["-P", args.port]
if args.key:
devtool_deploy_opts += ["-I", args.key]
if args.strip is False:
devtool_deploy_opts += ["--no-strip"]
return devtool_deploy_opts

View File

@@ -0,0 +1,463 @@
#
# Copyright (C) 2023-2024 Siemens AG
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool ide-sdk IDE plugin for VSCode and VSCodium"""
import json
import logging
import os
import shutil
from devtool.ide_plugins import BuildTool, IdeBase, GdbCrossConfig, get_devtool_deploy_opts
logger = logging.getLogger('devtool')
class GdbCrossConfigVSCode(GdbCrossConfig):
def __init__(self, image_recipe, modified_recipe, binary):
super().__init__(image_recipe, modified_recipe, binary, False)
def initialize(self):
self._gen_gdbserver_start_script()
class IdeVSCode(IdeBase):
"""Manage IDE configurations for VSCode
Modified recipe mode:
- cmake: use the cmake-preset generated by devtool ide-sdk
- meson: meson is called via a wrapper script generated by devtool ide-sdk
Shared sysroot mode:
In shared sysroot mode, the cross tool-chain is exported to the user's global configuration.
A workspace cannot be created because there is no recipe that defines how a workspace could
be set up.
- cmake: adds a cmake-kit to .local/share/CMakeTools/cmake-tools-kits.json
The cmake-kit uses the environment script and the tool-chain file
generated by meta-ide-support.
- meson: Meson needs manual workspace configuration.
"""
@classmethod
def ide_plugin_priority(cls):
"""If --ide is not passed this is the default plugin"""
if shutil.which('code'):
return 100
return 0
def setup_shared_sysroots(self, shared_env):
"""Expose the toolchain of the shared sysroots SDK"""
datadir = shared_env.ide_support.datadir
deploy_dir_image = shared_env.ide_support.deploy_dir_image
real_multimach_target_sys = shared_env.ide_support.real_multimach_target_sys
standalone_sysroot_native = shared_env.build_sysroots.standalone_sysroot_native
vscode_ws_path = os.path.join(
os.environ['HOME'], '.local', 'share', 'CMakeTools')
cmake_kits_path = os.path.join(vscode_ws_path, 'cmake-tools-kits.json')
oecmake_generator = "Ninja"
env_script = os.path.join(
deploy_dir_image, 'environment-setup-' + real_multimach_target_sys)
if not os.path.isdir(vscode_ws_path):
os.makedirs(vscode_ws_path)
cmake_kits_old = []
if os.path.exists(cmake_kits_path):
with open(cmake_kits_path, 'r', encoding='utf-8') as cmake_kits_file:
cmake_kits_old = json.load(cmake_kits_file)
cmake_kits = cmake_kits_old.copy()
cmake_kit_new = {
"name": "OE " + real_multimach_target_sys,
"environmentSetupScript": env_script,
"toolchainFile": standalone_sysroot_native + datadir + "/cmake/OEToolchainConfig.cmake",
"preferredGenerator": {
"name": oecmake_generator
}
}
def merge_kit(cmake_kits, cmake_kit_new):
i = 0
while i < len(cmake_kits):
if 'environmentSetupScript' in cmake_kits[i] and \
cmake_kits[i]['environmentSetupScript'] == cmake_kit_new['environmentSetupScript']:
cmake_kits[i] = cmake_kit_new
return
i += 1
cmake_kits.append(cmake_kit_new)
merge_kit(cmake_kits, cmake_kit_new)
if cmake_kits != cmake_kits_old:
logger.info("Updating: %s" % cmake_kits_path)
with open(cmake_kits_path, 'w', encoding='utf-8') as cmake_kits_file:
json.dump(cmake_kits, cmake_kits_file, indent=4)
else:
logger.info("Already up to date: %s" % cmake_kits_path)
cmake_native = os.path.join(
shared_env.build_sysroots.standalone_sysroot_native, 'usr', 'bin', 'cmake')
if os.path.isfile(cmake_native):
logger.info('cmake-kits call cmake by default. If the cmake provided by this SDK should be used, please add the following line to ".vscode/settings.json" file: "cmake.cmakePath": "%s"' % cmake_native)
else:
logger.error("Cannot find cmake native at: %s" % cmake_native)
def dot_code_dir(self, modified_recipe):
return os.path.join(modified_recipe.srctree, '.vscode')
def __vscode_settings_meson(self, settings_dict, modified_recipe):
if modified_recipe.build_tool is not BuildTool.MESON:
return
settings_dict["mesonbuild.mesonPath"] = modified_recipe.meson_wrapper
confopts = modified_recipe.mesonopts.split()
confopts += modified_recipe.meson_cross_file.split()
confopts += modified_recipe.extra_oemeson.split()
settings_dict["mesonbuild.configureOptions"] = confopts
settings_dict["mesonbuild.buildFolder"] = modified_recipe.b
def __vscode_settings_cmake(self, settings_dict, modified_recipe):
"""Add cmake specific settings to settings.json.
Note: most settings are passed to the cmake preset.
"""
if modified_recipe.build_tool is not BuildTool.CMAKE:
return
settings_dict["cmake.configureOnOpen"] = True
settings_dict["cmake.sourceDirectory"] = modified_recipe.real_srctree
def vscode_settings(self, modified_recipe, image_recipe):
files_excludes = {
"**/.git/**": True,
"**/oe-logs/**": True,
"**/oe-workdir/**": True,
"**/source-date-epoch/**": True
}
python_exclude = [
"**/.git/**",
"**/oe-logs/**",
"**/oe-workdir/**",
"**/source-date-epoch/**"
]
files_readonly = {
modified_recipe.recipe_sysroot + '/**': True,
modified_recipe.recipe_sysroot_native + '/**': True,
}
if image_recipe.rootfs_dbg is not None:
files_readonly[image_recipe.rootfs_dbg + '/**'] = True
settings_dict = {
"files.watcherExclude": files_excludes,
"files.exclude": files_excludes,
"files.readonlyInclude": files_readonly,
"python.analysis.exclude": python_exclude
}
self.__vscode_settings_cmake(settings_dict, modified_recipe)
self.__vscode_settings_meson(settings_dict, modified_recipe)
settings_file = 'settings.json'
IdeBase.update_json_file(
self.dot_code_dir(modified_recipe), settings_file, settings_dict)
def __vscode_extensions_cmake(self, modified_recipe, recommendations):
if modified_recipe.build_tool is not BuildTool.CMAKE:
return
recommendations += [
"twxs.cmake",
"ms-vscode.cmake-tools",
"ms-vscode.cpptools",
"ms-vscode.cpptools-extension-pack",
"ms-vscode.cpptools-themes"
]
def __vscode_extensions_meson(self, modified_recipe, recommendations):
if modified_recipe.build_tool is not BuildTool.MESON:
return
recommendations += [
'mesonbuild.mesonbuild',
"ms-vscode.cpptools",
"ms-vscode.cpptools-extension-pack",
"ms-vscode.cpptools-themes"
]
def vscode_extensions(self, modified_recipe):
recommendations = []
self.__vscode_extensions_cmake(modified_recipe, recommendations)
self.__vscode_extensions_meson(modified_recipe, recommendations)
extensions_file = 'extensions.json'
IdeBase.update_json_file(
self.dot_code_dir(modified_recipe), extensions_file, {"recommendations": recommendations})
def vscode_c_cpp_properties(self, modified_recipe):
properties_dict = {
"name": modified_recipe.recipe_id_pretty,
}
if modified_recipe.build_tool is BuildTool.CMAKE:
properties_dict["configurationProvider"] = "ms-vscode.cmake-tools"
elif modified_recipe.build_tool is BuildTool.MESON:
properties_dict["configurationProvider"] = "mesonbuild.mesonbuild"
properties_dict["compilerPath"] = os.path.join(modified_recipe.staging_bindir_toolchain, modified_recipe.cxx.split()[0])
else: # no C/C++ build
return
properties_dicts = {
"configurations": [
properties_dict
],
"version": 4
}
prop_file = 'c_cpp_properties.json'
IdeBase.update_json_file(
self.dot_code_dir(modified_recipe), prop_file, properties_dicts)
def vscode_launch_bin_dbg(self, gdb_cross_config):
modified_recipe = gdb_cross_config.modified_recipe
launch_config = {
"name": gdb_cross_config.id_pretty,
"type": "cppdbg",
"request": "launch",
"program": os.path.join(modified_recipe.d, gdb_cross_config.binary.lstrip('/')),
"stopAtEntry": True,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": False,
"MIMode": "gdb",
"preLaunchTask": gdb_cross_config.id_pretty,
"miDebuggerPath": modified_recipe.gdb_cross.gdb,
"miDebuggerServerAddress": "%s:%d" % (modified_recipe.gdb_cross.host, gdb_cross_config.gdbserver_port)
}
# Search for header files in recipe-sysroot.
src_file_map = {
"/usr/include": os.path.join(modified_recipe.recipe_sysroot, "usr", "include")
}
# First of all search for not stripped binaries in the image folder.
# These binaries are copied (and optionally stripped) by deploy-target
setup_commands = [
{
"description": "sysroot",
"text": "set sysroot " + modified_recipe.d
}
]
if gdb_cross_config.image_recipe.rootfs_dbg:
launch_config['additionalSOLibSearchPath'] = modified_recipe.solib_search_path_str(
gdb_cross_config.image_recipe)
# First: Search for sources of this recipe in the workspace folder
if modified_recipe.pn in modified_recipe.target_dbgsrc_dir:
src_file_map[modified_recipe.target_dbgsrc_dir] = "${workspaceFolder}"
else:
logger.error(
"TARGET_DBGSRC_DIR must contain the recipe name PN.")
# Second: Search for sources of other recipes in the rootfs-dbg
if modified_recipe.target_dbgsrc_dir.startswith("/usr/src/debug"):
src_file_map["/usr/src/debug"] = os.path.join(
gdb_cross_config.image_recipe.rootfs_dbg, "usr", "src", "debug")
else:
logger.error(
"TARGET_DBGSRC_DIR must start with /usr/src/debug.")
else:
logger.warning(
"Cannot setup debug symbols configuration for GDB. IMAGE_GEN_DEBUGFS is not enabled.")
launch_config['sourceFileMap'] = src_file_map
launch_config['setupCommands'] = setup_commands
return launch_config
def vscode_launch(self, modified_recipe):
"""GDB Launch configuration for binaries (elf files)"""
configurations = []
for gdb_cross_config in self.gdb_cross_configs:
if gdb_cross_config.modified_recipe is modified_recipe:
configurations.append(self.vscode_launch_bin_dbg(gdb_cross_config))
launch_dict = {
"version": "0.2.0",
"configurations": configurations
}
launch_file = 'launch.json'
IdeBase.update_json_file(
self.dot_code_dir(modified_recipe), launch_file, launch_dict)
def vscode_tasks_cpp(self, args, modified_recipe):
run_install_deploy = modified_recipe.gen_install_deploy_script(args)
install_task_name = "install && deploy-target %s" % modified_recipe.recipe_id_pretty
tasks_dict = {
"version": "2.0.0",
"tasks": [
{
"label": install_task_name,
"type": "shell",
"command": run_install_deploy,
"problemMatcher": []
}
]
}
for gdb_cross_config in self.gdb_cross_configs:
if gdb_cross_config.modified_recipe is not modified_recipe:
continue
tasks_dict['tasks'].append(
{
"label": gdb_cross_config.id_pretty,
"type": "shell",
"isBackground": True,
"dependsOn": [
install_task_name
],
"command": gdb_cross_config.gdbserver_script,
"problemMatcher": [
{
"pattern": [
{
"regexp": ".",
"file": 1,
"location": 2,
"message": 3
}
],
"background": {
"activeOnStart": True,
"beginsPattern": ".",
"endsPattern": ".",
}
}
]
})
tasks_file = 'tasks.json'
IdeBase.update_json_file(
self.dot_code_dir(modified_recipe), tasks_file, tasks_dict)
def vscode_tasks_fallback(self, args, modified_recipe):
oe_init_dir = modified_recipe.oe_init_dir
oe_init = ". %s %s > /dev/null && " % (modified_recipe.oe_init_build_env, modified_recipe.topdir)
dt_build = "devtool build "
dt_build_label = dt_build + modified_recipe.recipe_id_pretty
dt_build_cmd = dt_build + modified_recipe.bpn
clean_opt = " --clean"
dt_build_clean_label = dt_build + modified_recipe.recipe_id_pretty + clean_opt
dt_build_clean_cmd = dt_build + modified_recipe.bpn + clean_opt
dt_deploy = "devtool deploy-target "
dt_deploy_label = dt_deploy + modified_recipe.recipe_id_pretty
dt_deploy_cmd = dt_deploy + modified_recipe.bpn
dt_build_deploy_label = "devtool build & deploy-target %s" % modified_recipe.recipe_id_pretty
deploy_opts = ' '.join(get_devtool_deploy_opts(args))
tasks_dict = {
"version": "2.0.0",
"tasks": [
{
"label": dt_build_label,
"type": "shell",
"command": "bash",
"linux": {
"options": {
"cwd": oe_init_dir
}
},
"args": [
"--login",
"-c",
"%s%s" % (oe_init, dt_build_cmd)
],
"problemMatcher": []
},
{
"label": dt_deploy_label,
"type": "shell",
"command": "bash",
"linux": {
"options": {
"cwd": oe_init_dir
}
},
"args": [
"--login",
"-c",
"%s%s %s" % (
oe_init, dt_deploy_cmd, deploy_opts)
],
"problemMatcher": []
},
{
"label": dt_build_deploy_label,
"dependsOrder": "sequence",
"dependsOn": [
dt_build_label,
dt_deploy_label
],
"problemMatcher": [],
"group": {
"kind": "build",
"isDefault": True
}
},
{
"label": dt_build_clean_label,
"type": "shell",
"command": "bash",
"linux": {
"options": {
"cwd": oe_init_dir
}
},
"args": [
"--login",
"-c",
"%s%s" % (oe_init, dt_build_clean_cmd)
],
"problemMatcher": []
}
]
}
if modified_recipe.gdb_cross:
for gdb_cross_config in self.gdb_cross_configs:
if gdb_cross_config.modified_recipe is not modified_recipe:
continue
tasks_dict['tasks'].append(
{
"label": gdb_cross_config.id_pretty,
"type": "shell",
"isBackground": True,
"dependsOn": [
dt_build_deploy_label
],
"command": gdb_cross_config.gdbserver_script,
"problemMatcher": [
{
"pattern": [
{
"regexp": ".",
"file": 1,
"location": 2,
"message": 3
}
],
"background": {
"activeOnStart": True,
"beginsPattern": ".",
"endsPattern": ".",
}
}
]
})
tasks_file = 'tasks.json'
IdeBase.update_json_file(
self.dot_code_dir(modified_recipe), tasks_file, tasks_dict)
def vscode_tasks(self, args, modified_recipe):
if modified_recipe.build_tool.is_c_ccp:
self.vscode_tasks_cpp(args, modified_recipe)
else:
self.vscode_tasks_fallback(args, modified_recipe)
def setup_modified_recipe(self, args, image_recipe, modified_recipe):
self.vscode_settings(modified_recipe, image_recipe)
self.vscode_extensions(modified_recipe)
self.vscode_c_cpp_properties(modified_recipe)
if args.target:
self.initialize_gdb_cross_configs(
image_recipe, modified_recipe, gdb_cross_config_class=GdbCrossConfigVSCode)
self.vscode_launch(modified_recipe)
self.vscode_tasks(args, modified_recipe)
def register_ide_plugin(ide_plugins):
ide_plugins['code'] = IdeVSCode

View File

@@ -0,0 +1,53 @@
#
# Copyright (C) 2023-2024 Siemens AG
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool ide-sdk generic IDE plugin"""
import os
import logging
from devtool.ide_plugins import IdeBase, GdbCrossConfig
logger = logging.getLogger('devtool')
class IdeNone(IdeBase):
"""Generate some generic helpers for other IDEs
Modified recipe mode:
Generate some helper scripts for remote debugging with GDB
Shared sysroot mode:
A wrapper for bitbake meta-ide-support and bitbake build-sysroots
"""
def __init__(self):
super().__init__()
def setup_shared_sysroots(self, shared_env):
real_multimach_target_sys = shared_env.ide_support.real_multimach_target_sys
deploy_dir_image = shared_env.ide_support.deploy_dir_image
env_script = os.path.join(
deploy_dir_image, 'environment-setup-' + real_multimach_target_sys)
logger.info(
"To use this SDK please source this: %s" % env_script)
def setup_modified_recipe(self, args, image_recipe, modified_recipe):
"""generate some helper scripts and config files
- Execute the do_install task
- Execute devtool deploy-target
- Generate a gdbinit file per executable
- Generate the oe-scripts sym-link
"""
script_path = modified_recipe.gen_install_deploy_script(args)
logger.info("Created: %s" % script_path)
self.initialize_gdb_cross_configs(image_recipe, modified_recipe)
IdeBase.gen_oe_scrtips_sym_link(modified_recipe)
def register_ide_plugin(ide_plugins):
ide_plugins['none'] = IdeNone

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,134 @@
# Development tool - import command plugin
#
# Copyright (C) 2014-2017 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool import plugin"""
import os
import tarfile
import logging
import collections
import json
import fnmatch
from devtool import standard, setup_tinfoil, replace_from_file, DevtoolError
from devtool import export
logger = logging.getLogger('devtool')
def devimport(args, config, basepath, workspace):
"""Entry point for the devtool 'import' subcommand"""
def get_pn(name):
""" Returns the filename of a workspace recipe/append"""
metadata = name.split('/')[-1]
fn, _ = os.path.splitext(metadata)
return fn
if not os.path.exists(args.file):
raise DevtoolError('Tar archive %s does not exist. Export your workspace using "devtool export"' % args.file)
with tarfile.open(args.file) as tar:
# Get exported metadata
export_workspace_path = export_workspace = None
try:
metadata = tar.getmember(export.metadata)
except KeyError as ke:
raise DevtoolError('The export metadata file created by "devtool export" was not found. "devtool import" can only be used to import tar archives created by "devtool export".')
tar.extract(metadata)
with open(metadata.name) as fdm:
export_workspace_path, export_workspace = json.load(fdm)
os.unlink(metadata.name)
members = tar.getmembers()
# Get appends and recipes from the exported archive, these
# will be needed to find out those appends without corresponding
# recipe pair
append_fns, recipe_fns = set(), set()
for member in members:
if member.name.startswith('appends'):
append_fns.add(get_pn(member.name))
elif member.name.startswith('recipes'):
recipe_fns.add(get_pn(member.name))
# Setup tinfoil, get required data and shutdown
tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
try:
current_fns = [os.path.basename(recipe[0]) for recipe in tinfoil.cooker.recipecaches[''].pkg_fn.items()]
finally:
tinfoil.shutdown()
# Find those appends that do not have recipes in current metadata
non_importables = []
for fn in append_fns - recipe_fns:
# Check on current metadata (covering those layers indicated in bblayers.conf)
for current_fn in current_fns:
if fnmatch.fnmatch(current_fn, '*' + fn.replace('%', '') + '*'):
break
else:
non_importables.append(fn)
logger.warning('No recipe to append %s.bbapppend, skipping' % fn)
# Extract
imported = []
for member in members:
if member.name == export.metadata:
continue
for nonimp in non_importables:
pn = nonimp.split('_')[0]
# do not extract data from non-importable recipes or metadata
if member.name.startswith('appends/%s' % nonimp) or \
member.name.startswith('recipes/%s' % nonimp) or \
member.name.startswith('sources/%s' % pn):
break
else:
path = os.path.join(config.workspace_path, member.name)
if os.path.exists(path):
# by default, no file overwrite is done unless -o is given by the user
if args.overwrite:
try:
tar.extract(member, path=config.workspace_path)
except PermissionError as pe:
logger.warning(pe)
else:
logger.warning('File already present. Use --overwrite/-o to overwrite it: %s' % member.name)
continue
else:
tar.extract(member, path=config.workspace_path)
# Update EXTERNALSRC and the devtool md5 file
if member.name.startswith('appends'):
if export_workspace_path:
# appends created by 'devtool modify' just need to update the workspace
replace_from_file(path, export_workspace_path, config.workspace_path)
# appends created by 'devtool add' need replacement of exported source tree
pn = get_pn(member.name).split('_')[0]
exported_srctree = export_workspace[pn]['srctree']
if exported_srctree:
replace_from_file(path, exported_srctree, os.path.join(config.workspace_path, 'sources', pn))
standard._add_md5(config, pn, path)
imported.append(pn)
if imported:
logger.info('Imported recipes into workspace %s: %s' % (config.workspace_path, ', '.join(imported)))
else:
logger.warning('No recipes imported into the workspace')
return 0
def register_commands(subparsers, context):
"""Register devtool import subcommands"""
parser = subparsers.add_parser('import',
help='Import exported tar archive into workspace',
description='Import tar archive previously created by "devtool export" into workspace',
group='advanced')
parser.add_argument('file', metavar='FILE', help='Name of the tar archive to import')
parser.add_argument('--overwrite', '-o', action="store_true", help='Overwrite files when extracting')
parser.set_defaults(func=devimport)

View File

@@ -0,0 +1,81 @@
# OpenEmbedded Development tool - menuconfig command plugin
#
# Copyright (C) 2018 Xilinx
# Written by: Chandana Kalluri <ckalluri@xilinx.com>
#
# SPDX-License-Identifier: MIT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool menuconfig plugin"""
import os
import bb
import logging
import argparse
import re
import glob
from devtool import setup_tinfoil, parse_recipe, DevtoolError, standard, exec_build_env_command
from devtool import check_workspace_recipe
logger = logging.getLogger('devtool')
def menuconfig(args, config, basepath, workspace):
"""Entry point for the devtool 'menuconfig' subcommand"""
rd = ""
kconfigpath = ""
pn_src = ""
localfilesdir = ""
workspace_dir = ""
tinfoil = setup_tinfoil(basepath=basepath)
try:
rd = parse_recipe(config, tinfoil, args.component, appends=True, filter_workspace=False)
if not rd:
return 1
check_workspace_recipe(workspace, args.component)
pn = rd.getVar('PN')
if not rd.getVarFlag('do_menuconfig','task'):
raise DevtoolError("This recipe does not support menuconfig option")
workspace_dir = os.path.join(config.workspace_path,'sources')
kconfigpath = rd.getVar('B')
pn_src = os.path.join(workspace_dir,pn)
# add check to see if oe_local_files exists or not
localfilesdir = os.path.join(pn_src,'oe-local-files')
if not os.path.exists(localfilesdir):
bb.utils.mkdirhier(localfilesdir)
# Add gitignore to ensure source tree is clean
gitignorefile = os.path.join(localfilesdir,'.gitignore')
with open(gitignorefile, 'w') as f:
f.write('# Ignore local files, by default. Remove this file if you want to commit the directory to Git\n')
f.write('*\n')
finally:
tinfoil.shutdown()
logger.info('Launching menuconfig')
exec_build_env_command(config.init_path, basepath, 'bitbake -c menuconfig %s' % pn, watch=True)
fragment = os.path.join(localfilesdir, 'devtool-fragment.cfg')
res = standard._create_kconfig_diff(pn_src,rd,fragment)
return 0
def register_commands(subparsers, context):
"""register devtool subcommands from this plugin"""
parser_menuconfig = subparsers.add_parser('menuconfig',help='Alter build-time configuration for a recipe', description='Launches the make menuconfig command (for recipes where do_menuconfig is available), allowing users to make changes to the build-time configuration. Creates a config fragment corresponding to changes made.', group='advanced')
parser_menuconfig.add_argument('component', help='compenent to alter config')
parser_menuconfig.set_defaults(func=menuconfig,fixed_setup=context.fixed_setup)

View File

@@ -0,0 +1,50 @@
# Development tool - package command plugin
#
# Copyright (C) 2014-2015 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool plugin containing the package subcommands"""
import os
import subprocess
import logging
from bb.process import ExecutionError
from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError
logger = logging.getLogger('devtool')
def package(args, config, basepath, workspace):
"""Entry point for the devtool 'package' subcommand"""
check_workspace_recipe(workspace, args.recipename)
tinfoil = setup_tinfoil(basepath=basepath, config_only=True)
try:
image_pkgtype = config.get('Package', 'image_pkgtype', '')
if not image_pkgtype:
image_pkgtype = tinfoil.config_data.getVar('IMAGE_PKGTYPE')
deploy_dir_pkg = tinfoil.config_data.getVar('DEPLOY_DIR_%s' % image_pkgtype.upper())
finally:
tinfoil.shutdown()
package_task = config.get('Package', 'package_task', 'package_write_%s' % image_pkgtype)
try:
exec_build_env_command(config.init_path, basepath, 'bitbake -c %s %s' % (package_task, args.recipename), watch=True)
except bb.process.ExecutionError as e:
# We've already seen the output since watch=True, so just ensure we return something to the user
return e.exitcode
logger.info('Your packages are in %s' % deploy_dir_pkg)
return 0
def register_commands(subparsers, context):
"""Register devtool subcommands from the package plugin"""
if context.fixed_setup:
parser_package = subparsers.add_parser('package',
help='Build packages for a recipe',
description='Builds packages for a recipe\'s output files',
group='testbuild', order=-5)
parser_package.add_argument('recipename', help='Recipe to package')
parser_package.set_defaults(func=package)

View File

@@ -0,0 +1,64 @@
# Development tool - runqemu command plugin
#
# Copyright (C) 2015 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool runqemu plugin"""
import os
import bb
import logging
import argparse
import glob
from devtool import exec_build_env_command, setup_tinfoil, DevtoolError
logger = logging.getLogger('devtool')
def runqemu(args, config, basepath, workspace):
"""Entry point for the devtool 'runqemu' subcommand"""
tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
try:
machine = tinfoil.config_data.getVar('MACHINE')
bindir_native = os.path.join(tinfoil.config_data.getVar('STAGING_DIR'),
tinfoil.config_data.getVar('BUILD_ARCH'),
tinfoil.config_data.getVar('bindir_native').lstrip(os.path.sep))
finally:
tinfoil.shutdown()
if not glob.glob(os.path.join(bindir_native, 'qemu-system-*')):
raise DevtoolError('QEMU is not available within this SDK')
imagename = args.imagename
if not imagename:
sdk_targets = config.get('SDK', 'sdk_targets', '').split()
if sdk_targets:
imagename = sdk_targets[0]
if not imagename:
raise DevtoolError('Unable to determine image name to run, please specify one')
try:
# FIXME runqemu assumes that if OECORE_NATIVE_SYSROOT is set then it shouldn't
# run bitbake to find out the values of various environment variables, which
# isn't the case for the extensible SDK. Work around it for now.
newenv = dict(os.environ)
newenv.pop('OECORE_NATIVE_SYSROOT', '')
exec_build_env_command(config.init_path, basepath, 'runqemu %s %s %s' % (machine, imagename, " ".join(args.args)), watch=True, env=newenv)
except bb.process.ExecutionError as e:
# We've already seen the output since watch=True, so just ensure we return something to the user
return e.exitcode
return 0
def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
if context.fixed_setup:
parser_runqemu = subparsers.add_parser('runqemu', help='Run QEMU on the specified image',
description='Runs QEMU to boot the specified image',
group='testbuild', order=-20)
parser_runqemu.add_argument('imagename', help='Name of built image to boot within QEMU', nargs='?')
parser_runqemu.add_argument('args', help='Any remaining arguments are passed to the runqemu script (pass --help after imagename to see what these are)',
nargs=argparse.REMAINDER)
parser_runqemu.set_defaults(func=runqemu)

View File

@@ -0,0 +1,330 @@
# Development tool - sdk-update command plugin
#
# Copyright (C) 2015-2016 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import os
import subprocess
import logging
import glob
import shutil
import errno
import sys
import tempfile
import re
from devtool import exec_build_env_command, setup_tinfoil, parse_recipe, DevtoolError
logger = logging.getLogger('devtool')
def parse_locked_sigs(sigfile_path):
"""Return <pn:task>:<hash> dictionary"""
sig_dict = {}
with open(sigfile_path) as f:
lines = f.readlines()
for line in lines:
if ':' in line:
taskkey, _, hashval = line.rpartition(':')
sig_dict[taskkey.strip()] = hashval.split()[0]
return sig_dict
def generate_update_dict(sigfile_new, sigfile_old):
"""Return a dict containing <pn:task>:<hash> which indicates what need to be updated"""
update_dict = {}
sigdict_new = parse_locked_sigs(sigfile_new)
sigdict_old = parse_locked_sigs(sigfile_old)
for k in sigdict_new:
if k not in sigdict_old:
update_dict[k] = sigdict_new[k]
continue
if sigdict_new[k] != sigdict_old[k]:
update_dict[k] = sigdict_new[k]
continue
return update_dict
def get_sstate_objects(update_dict, sstate_dir):
"""Return a list containing sstate objects which are to be installed"""
sstate_objects = []
for k in update_dict:
files = set()
hashval = update_dict[k]
p = sstate_dir + '/' + hashval[:2] + '/*' + hashval + '*.tgz'
files |= set(glob.glob(p))
p = sstate_dir + '/*/' + hashval[:2] + '/*' + hashval + '*.tgz'
files |= set(glob.glob(p))
files = list(files)
if len(files) == 1:
sstate_objects.extend(files)
elif len(files) > 1:
logger.error("More than one matching sstate object found for %s" % hashval)
return sstate_objects
def mkdir(d):
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
def install_sstate_objects(sstate_objects, src_sdk, dest_sdk):
"""Install sstate objects into destination SDK"""
sstate_dir = os.path.join(dest_sdk, 'sstate-cache')
if not os.path.exists(sstate_dir):
logger.error("Missing sstate-cache directory in %s, it might not be an extensible SDK." % dest_sdk)
raise
for sb in sstate_objects:
dst = sb.replace(src_sdk, dest_sdk)
destdir = os.path.dirname(dst)
mkdir(destdir)
logger.debug("Copying %s to %s" % (sb, dst))
shutil.copy(sb, dst)
def check_manifest(fn, basepath):
import bb.utils
changedfiles = []
with open(fn, 'r') as f:
for line in f:
splitline = line.split()
if len(splitline) > 1:
chksum = splitline[0]
fpath = splitline[1]
curr_chksum = bb.utils.sha256_file(os.path.join(basepath, fpath))
if chksum != curr_chksum:
logger.debug('File %s changed: old csum = %s, new = %s' % (os.path.join(basepath, fpath), curr_chksum, chksum))
changedfiles.append(fpath)
return changedfiles
def sdk_update(args, config, basepath, workspace):
"""Entry point for devtool sdk-update command"""
updateserver = args.updateserver
if not updateserver:
updateserver = config.get('SDK', 'updateserver', '')
logger.debug("updateserver: %s" % updateserver)
# Make sure we are using sdk-update from within SDK
logger.debug("basepath = %s" % basepath)
old_locked_sig_file_path = os.path.join(basepath, 'conf/locked-sigs.inc')
if not os.path.exists(old_locked_sig_file_path):
logger.error("Not using devtool's sdk-update command from within an extensible SDK. Please specify correct basepath via --basepath option")
return -1
else:
logger.debug("Found conf/locked-sigs.inc in %s" % basepath)
if not '://' in updateserver:
logger.error("Update server must be a URL")
return -1
layers_dir = os.path.join(basepath, 'layers')
conf_dir = os.path.join(basepath, 'conf')
# Grab variable values
tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
try:
stamps_dir = tinfoil.config_data.getVar('STAMPS_DIR')
sstate_mirrors = tinfoil.config_data.getVar('SSTATE_MIRRORS')
site_conf_version = tinfoil.config_data.getVar('SITE_CONF_VERSION')
finally:
tinfoil.shutdown()
tmpsdk_dir = tempfile.mkdtemp()
try:
os.makedirs(os.path.join(tmpsdk_dir, 'conf'))
new_locked_sig_file_path = os.path.join(tmpsdk_dir, 'conf', 'locked-sigs.inc')
# Fetch manifest from server
tmpmanifest = os.path.join(tmpsdk_dir, 'conf', 'sdk-conf-manifest')
ret = subprocess.call("wget -q -O %s %s/conf/sdk-conf-manifest" % (tmpmanifest, updateserver), shell=True)
if ret != 0:
logger.error("Cannot dowload files from %s" % updateserver)
return ret
changedfiles = check_manifest(tmpmanifest, basepath)
if not changedfiles:
logger.info("Already up-to-date")
return 0
# Update metadata
logger.debug("Updating metadata via git ...")
#Check for the status before doing a fetch and reset
if os.path.exists(os.path.join(basepath, 'layers/.git')):
out = subprocess.check_output("git status --porcelain", shell=True, cwd=layers_dir)
if not out:
ret = subprocess.call("git fetch --all; git reset --hard @{u}", shell=True, cwd=layers_dir)
else:
logger.error("Failed to update metadata as there have been changes made to it. Aborting.");
logger.error("Changed files:\n%s" % out);
return -1
else:
ret = -1
if ret != 0:
ret = subprocess.call("git clone %s/layers/.git" % updateserver, shell=True, cwd=tmpsdk_dir)
if ret != 0:
logger.error("Updating metadata via git failed")
return ret
logger.debug("Updating conf files ...")
for changedfile in changedfiles:
ret = subprocess.call("wget -q -O %s %s/%s" % (changedfile, updateserver, changedfile), shell=True, cwd=tmpsdk_dir)
if ret != 0:
logger.error("Updating %s failed" % changedfile)
return ret
# Check if UNINATIVE_CHECKSUM changed
uninative = False
if 'conf/local.conf' in changedfiles:
def read_uninative_checksums(fn):
chksumitems = []
with open(fn, 'r') as f:
for line in f:
if line.startswith('UNINATIVE_CHECKSUM'):
splitline = re.split(r'[\[\]"\']', line)
if len(splitline) > 3:
chksumitems.append((splitline[1], splitline[3]))
return chksumitems
oldsums = read_uninative_checksums(os.path.join(basepath, 'conf/local.conf'))
newsums = read_uninative_checksums(os.path.join(tmpsdk_dir, 'conf/local.conf'))
if oldsums != newsums:
uninative = True
for buildarch, chksum in newsums:
uninative_file = os.path.join('downloads', 'uninative', chksum, '%s-nativesdk-libc.tar.bz2' % buildarch)
mkdir(os.path.join(tmpsdk_dir, os.path.dirname(uninative_file)))
ret = subprocess.call("wget -q -O %s %s/%s" % (uninative_file, updateserver, uninative_file), shell=True, cwd=tmpsdk_dir)
# Ok, all is well at this point - move everything over
tmplayers_dir = os.path.join(tmpsdk_dir, 'layers')
if os.path.exists(tmplayers_dir):
shutil.rmtree(layers_dir)
shutil.move(tmplayers_dir, layers_dir)
for changedfile in changedfiles:
destfile = os.path.join(basepath, changedfile)
os.remove(destfile)
shutil.move(os.path.join(tmpsdk_dir, changedfile), destfile)
os.remove(os.path.join(conf_dir, 'sdk-conf-manifest'))
shutil.move(tmpmanifest, conf_dir)
if uninative:
shutil.rmtree(os.path.join(basepath, 'downloads', 'uninative'))
shutil.move(os.path.join(tmpsdk_dir, 'downloads', 'uninative'), os.path.join(basepath, 'downloads'))
if not sstate_mirrors:
with open(os.path.join(conf_dir, 'site.conf'), 'a') as f:
f.write('SCONF_VERSION = "%s"\n' % site_conf_version)
f.write('SSTATE_MIRRORS:append = " file://.* %s/sstate-cache/PATH"\n' % updateserver)
finally:
shutil.rmtree(tmpsdk_dir)
if not args.skip_prepare:
# Find all potentially updateable tasks
sdk_update_targets = []
tasks = ['do_populate_sysroot', 'do_packagedata']
for root, _, files in os.walk(stamps_dir):
for fn in files:
if not '.sigdata.' in fn:
for task in tasks:
if '.%s.' % task in fn or '.%s_setscene.' % task in fn:
sdk_update_targets.append('%s:%s' % (os.path.basename(root), task))
# Run bitbake command for the whole SDK
logger.info("Preparing build system... (This may take some time.)")
try:
exec_build_env_command(config.init_path, basepath, 'bitbake --setscene-only %s' % ' '.join(sdk_update_targets), stderr=subprocess.STDOUT)
output, _ = exec_build_env_command(config.init_path, basepath, 'bitbake -n %s' % ' '.join(sdk_update_targets), stderr=subprocess.STDOUT)
runlines = []
for line in output.splitlines():
if 'Running task ' in line:
runlines.append(line)
if runlines:
logger.error('Unexecuted tasks found in preparation log:\n %s' % '\n '.join(runlines))
return -1
except bb.process.ExecutionError as e:
logger.error('Preparation failed:\n%s' % e.stdout)
return -1
return 0
def sdk_install(args, config, basepath, workspace):
"""Entry point for the devtool sdk-install command"""
import oe.recipeutils
import bb.process
for recipe in args.recipename:
if recipe in workspace:
raise DevtoolError('recipe %s is a recipe in your workspace' % recipe)
tasks = ['do_populate_sysroot', 'do_packagedata']
stampprefixes = {}
def checkstamp(recipe):
stampprefix = stampprefixes[recipe]
stamps = glob.glob(stampprefix + '*')
for stamp in stamps:
if '.sigdata.' not in stamp and stamp.startswith((stampprefix + '.', stampprefix + '_setscene.')):
return True
else:
return False
install_recipes = []
tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
try:
for recipe in args.recipename:
rd = parse_recipe(config, tinfoil, recipe, True)
if not rd:
return 1
stampprefixes[recipe] = '%s.%s' % (rd.getVar('STAMP'), tasks[0])
if checkstamp(recipe):
logger.info('%s is already installed' % recipe)
else:
install_recipes.append(recipe)
finally:
tinfoil.shutdown()
if install_recipes:
logger.info('Installing %s...' % ', '.join(install_recipes))
install_tasks = []
for recipe in install_recipes:
for task in tasks:
if recipe.endswith('-native') and 'package' in task:
continue
install_tasks.append('%s:%s' % (recipe, task))
options = ''
if not args.allow_build:
options += ' --setscene-only'
try:
exec_build_env_command(config.init_path, basepath, 'bitbake %s %s' % (options, ' '.join(install_tasks)), watch=True)
except bb.process.ExecutionError as e:
raise DevtoolError('Failed to install %s:\n%s' % (recipe, str(e)))
failed = False
for recipe in install_recipes:
if checkstamp(recipe):
logger.info('Successfully installed %s' % recipe)
else:
raise DevtoolError('Failed to install %s - unavailable' % recipe)
failed = True
if failed:
return 2
try:
exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots -c build_native_sysroot', watch=True)
exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots -c build_target_sysroot', watch=True)
except bb.process.ExecutionError as e:
raise DevtoolError('Failed to bitbake build-sysroots:\n%s' % (str(e)))
def register_commands(subparsers, context):
"""Register devtool subcommands from the sdk plugin"""
if context.fixed_setup:
parser_sdk = subparsers.add_parser('sdk-update',
help='Update SDK components',
description='Updates installed SDK components from a remote server',
group='sdk')
updateserver = context.config.get('SDK', 'updateserver', '')
if updateserver:
parser_sdk.add_argument('updateserver', help='The update server to fetch latest SDK components from (default %s)' % updateserver, nargs='?')
else:
parser_sdk.add_argument('updateserver', help='The update server to fetch latest SDK components from')
parser_sdk.add_argument('--skip-prepare', action="store_true", help='Skip re-preparing the build system after updating (for debugging only)')
parser_sdk.set_defaults(func=sdk_update)
parser_sdk_install = subparsers.add_parser('sdk-install',
help='Install additional SDK components',
description='Installs additional recipe development files into the SDK. (You can use "devtool search" to find available recipes.)',
group='sdk')
parser_sdk_install.add_argument('recipename', help='Name of the recipe to install the development artifacts for', nargs='+')
parser_sdk_install.add_argument('-s', '--allow-build', help='Allow building requested item(s) from source', action='store_true')
parser_sdk_install.set_defaults(func=sdk_install)

View File

@@ -0,0 +1,109 @@
# Development tool - search command plugin
#
# Copyright (C) 2015 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool search plugin"""
import os
import bb
import logging
import argparse
import re
from devtool import setup_tinfoil, parse_recipe, DevtoolError
logger = logging.getLogger('devtool')
def search(args, config, basepath, workspace):
"""Entry point for the devtool 'search' subcommand"""
tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
try:
pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
defsummary = tinfoil.config_data.getVar('SUMMARY', False) or ''
keyword_rc = re.compile(args.keyword)
def print_match(pn):
rd = parse_recipe(config, tinfoil, pn, True)
if not rd:
return
summary = rd.getVar('SUMMARY')
if summary == rd.expand(defsummary):
summary = ''
print("%s %s" % (pn.ljust(20), summary))
matches = []
if os.path.exists(pkgdata_dir):
for fn in os.listdir(pkgdata_dir):
pfn = os.path.join(pkgdata_dir, fn)
if not os.path.isfile(pfn):
continue
packages = []
match = False
if keyword_rc.search(fn):
match = True
if not match:
with open(pfn, 'r') as f:
for line in f:
if line.startswith('PACKAGES:'):
packages = line.split(':', 1)[1].strip().split()
for pkg in packages:
if keyword_rc.search(pkg):
match = True
break
if os.path.exists(os.path.join(pkgdata_dir, 'runtime', pkg + '.packaged')):
with open(os.path.join(pkgdata_dir, 'runtime', pkg), 'r') as f:
for line in f:
if ': ' in line:
splitline = line.split(': ', 1)
key = splitline[0]
value = splitline[1].strip()
key = key.replace(":" + pkg, "")
if key in ['PKG', 'DESCRIPTION', 'FILES_INFO', 'FILERPROVIDES']:
if keyword_rc.search(value):
match = True
break
if match:
print_match(fn)
matches.append(fn)
else:
logger.warning('Package data is not available, results may be limited')
for recipe in tinfoil.all_recipes():
if args.fixed_setup and 'nativesdk' in recipe.inherits():
continue
match = False
if keyword_rc.search(recipe.pn):
match = True
else:
for prov in recipe.provides:
if keyword_rc.search(prov):
match = True
break
if not match:
for rprov in recipe.rprovides:
if keyword_rc.search(rprov):
match = True
break
if match and not recipe.pn in matches:
print_match(recipe.pn)
finally:
tinfoil.shutdown()
return 0
def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
parser_search = subparsers.add_parser('search', help='Search available recipes',
description='Searches for available recipes. Matches on recipe name, package name, description and installed files, and prints the recipe name and summary on match.',
group='info')
parser_search.add_argument('keyword', help='Keyword to search for (regular expression syntax allowed, use quotes to avoid shell expansion)')
parser_search.set_defaults(func=search, no_workspace=True, fixed_setup=context.fixed_setup)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,693 @@
# Development tool - upgrade command plugin
#
# Copyright (C) 2014-2017 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool upgrade plugin"""
import os
import sys
import re
import shutil
import tempfile
import logging
import argparse
import scriptutils
import errno
import bb
devtool_path = os.path.dirname(os.path.realpath(__file__)) + '/../../../meta/lib'
sys.path = sys.path + [devtool_path]
import oe.recipeutils
from devtool import standard
from devtool import exec_build_env_command, setup_tinfoil, DevtoolError, parse_recipe, use_external_build, update_unlockedsigs, check_prerelease_version
logger = logging.getLogger('devtool')
def _run(cmd, cwd=''):
logger.debug("Running command %s> %s" % (cwd,cmd))
return bb.process.run('%s' % cmd, cwd=cwd)
def _get_srctree(tmpdir):
srctree = tmpdir
dirs = scriptutils.filter_src_subdirs(tmpdir)
if len(dirs) == 1:
srctree = os.path.join(tmpdir, dirs[0])
else:
raise DevtoolError("Cannot determine where the source tree is after unpacking in {}: {}".format(tmpdir,dirs))
return srctree
def _copy_source_code(orig, dest):
for path in standard._ls_tree(orig):
dest_dir = os.path.join(dest, os.path.dirname(path))
bb.utils.mkdirhier(dest_dir)
dest_path = os.path.join(dest, path)
shutil.move(os.path.join(orig, path), dest_path)
def _remove_patch_dirs(recipefolder):
for root, dirs, files in os.walk(recipefolder):
for d in dirs:
shutil.rmtree(os.path.join(root,d))
def _recipe_contains(rd, var):
rf = rd.getVar('FILE')
varfiles = oe.recipeutils.get_var_files(rf, [var], rd)
for var, fn in varfiles.items():
if fn and fn.startswith(os.path.dirname(rf) + os.sep):
return True
return False
def _rename_recipe_dirs(oldpv, newpv, path):
for root, dirs, files in os.walk(path):
# Rename directories with the version in their name
for olddir in dirs:
if olddir.find(oldpv) != -1:
newdir = olddir.replace(oldpv, newpv)
if olddir != newdir:
shutil.move(os.path.join(path, olddir), os.path.join(path, newdir))
# Rename any inc files with the version in their name (unusual, but possible)
for oldfile in files:
if oldfile.endswith('.inc'):
if oldfile.find(oldpv) != -1:
newfile = oldfile.replace(oldpv, newpv)
if oldfile != newfile:
bb.utils.rename(os.path.join(path, oldfile),
os.path.join(path, newfile))
def _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path):
oldrecipe = os.path.basename(oldrecipe)
if oldrecipe.endswith('_%s.bb' % oldpv):
newrecipe = '%s_%s.bb' % (bpn, newpv)
if oldrecipe != newrecipe:
shutil.move(os.path.join(path, oldrecipe), os.path.join(path, newrecipe))
else:
newrecipe = oldrecipe
return os.path.join(path, newrecipe)
def _rename_recipe_files(oldrecipe, bpn, oldpv, newpv, path):
_rename_recipe_dirs(oldpv, newpv, path)
return _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path)
def _write_append(rc, srctreebase, srctree, same_dir, no_same_dir, revs, copied, workspace, d):
"""Writes an append file"""
if not os.path.exists(rc):
raise DevtoolError("bbappend not created because %s does not exist" % rc)
appendpath = os.path.join(workspace, 'appends')
if not os.path.exists(appendpath):
bb.utils.mkdirhier(appendpath)
brf = os.path.basename(os.path.splitext(rc)[0]) # rc basename
srctree = os.path.abspath(srctree)
pn = d.getVar('PN')
af = os.path.join(appendpath, '%s.bbappend' % brf)
with open(af, 'w') as f:
f.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n\n')
# Local files can be modified/tracked in separate subdir under srctree
# Mostly useful for packages with S != WORKDIR
f.write('FILESPATH:prepend := "%s:"\n' %
os.path.join(srctreebase, 'oe-local-files'))
f.write('# srctreebase: %s\n' % srctreebase)
f.write('inherit externalsrc\n')
f.write(('# NOTE: We use pn- overrides here to avoid affecting'
'multiple variants in the case where the recipe uses BBCLASSEXTEND\n'))
f.write('EXTERNALSRC:pn-%s = "%s"\n' % (pn, srctree))
b_is_s = use_external_build(same_dir, no_same_dir, d)
if b_is_s:
f.write('EXTERNALSRC_BUILD:pn-%s = "%s"\n' % (pn, srctree))
f.write('\n')
if revs:
for name, rev in revs.items():
f.write('# initial_rev %s: %s\n' % (name, rev))
if copied:
f.write('# original_path: %s\n' % os.path.dirname(d.getVar('FILE')))
f.write('# original_files: %s\n' % ' '.join(copied))
return af
def _cleanup_on_error(rd, srctree):
if os.path.exists(rd):
shutil.rmtree(rd)
srctree = os.path.abspath(srctree)
if os.path.exists(srctree):
shutil.rmtree(srctree)
def _upgrade_error(e, rd, srctree, keep_failure=False, extramsg=None):
if not keep_failure:
_cleanup_on_error(rd, srctree)
logger.error(e)
if extramsg:
logger.error(extramsg)
if keep_failure:
logger.info('Preserving failed upgrade files (--keep-failure)')
sys.exit(1)
def _get_uri(rd):
srcuris = rd.getVar('SRC_URI').split()
if not len(srcuris):
raise DevtoolError('SRC_URI not found on recipe')
# Get first non-local entry in SRC_URI - usually by convention it's
# the first entry, but not always!
srcuri = None
for entry in srcuris:
if not entry.startswith('file://'):
srcuri = entry
break
if not srcuri:
raise DevtoolError('Unable to find non-local entry in SRC_URI')
srcrev = '${AUTOREV}'
if '://' in srcuri:
# Fetch a URL
rev_re = re.compile(';rev=([^;]+)')
res = rev_re.search(srcuri)
if res:
srcrev = res.group(1)
srcuri = rev_re.sub('', srcuri)
return srcuri, srcrev
def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, keep_temp, tinfoil, rd):
"""Extract sources of a recipe with a new version"""
def __run(cmd):
"""Simple wrapper which calls _run with srctree as cwd"""
return _run(cmd, srctree)
crd = rd.createCopy()
pv = crd.getVar('PV')
crd.setVar('PV', newpv)
tmpsrctree = None
uri, rev = _get_uri(crd)
if srcrev:
rev = srcrev
paths = [srctree]
if uri.startswith('git://') or uri.startswith('gitsm://'):
__run('git fetch')
__run('git checkout %s' % rev)
__run('git tag -f devtool-base-new')
__run('git submodule update --recursive')
__run('git submodule foreach \'git tag -f devtool-base-new\'')
(stdout, _) = __run('git submodule --quiet foreach \'echo $sm_path\'')
paths += [os.path.join(srctree, p) for p in stdout.splitlines()]
checksums = {}
_, _, _, _, _, params = bb.fetch2.decodeurl(uri)
srcsubdir_rel = params.get('destsuffix', 'git')
if not srcbranch:
check_branch, check_branch_err = __run('git branch -r --contains %s' % srcrev)
get_branch = [x.strip() for x in check_branch.splitlines()]
# Remove HEAD reference point and drop remote prefix
get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
if len(get_branch) == 1:
# If srcrev is on only ONE branch, then use that branch
srcbranch = get_branch[0]
elif 'main' in get_branch:
# If srcrev is on multiple branches, then choose 'main' if it is one of them
srcbranch = 'main'
elif 'master' in get_branch:
# Otherwise choose 'master' if it is one of the branches
srcbranch = 'master'
else:
# If get_branch contains more than one objects, then display error and exit.
mbrch = '\n ' + '\n '.join(get_branch)
raise DevtoolError('Revision %s was found on multiple branches: %s\nPlease provide the correct branch in the devtool command with "--srcbranch" or "-B" option.' % (srcrev, mbrch))
else:
__run('git checkout devtool-base -b devtool-%s' % newpv)
tmpdir = tempfile.mkdtemp(prefix='devtool')
try:
checksums, ftmpdir = scriptutils.fetch_url(tinfoil, uri, rev, tmpdir, logger, preserve_tmp=keep_temp)
except scriptutils.FetchUrlFailure as e:
raise DevtoolError(e)
if ftmpdir and keep_temp:
logger.info('Fetch temp directory is %s' % ftmpdir)
tmpsrctree = _get_srctree(tmpdir)
srctree = os.path.abspath(srctree)
srcsubdir_rel = os.path.relpath(tmpsrctree, tmpdir)
# Delete all sources so we ensure no stray files are left over
for item in os.listdir(srctree):
if item in ['.git', 'oe-local-files']:
continue
itempath = os.path.join(srctree, item)
if os.path.isdir(itempath):
shutil.rmtree(itempath)
else:
os.remove(itempath)
# Copy in new ones
_copy_source_code(tmpsrctree, srctree)
(stdout,_) = __run('git ls-files --modified --others')
filelist = stdout.splitlines()
pbar = bb.ui.knotty.BBProgress('Adding changed files', len(filelist))
pbar.start()
batchsize = 100
for i in range(0, len(filelist), batchsize):
batch = filelist[i:i+batchsize]
__run('git add -f -A %s' % ' '.join(['"%s"' % item for item in batch]))
pbar.update(i)
pbar.finish()
useroptions = []
oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=rd)
__run('git %s commit -q -m "Commit of upstream changes at version %s" --allow-empty' % (' '.join(useroptions), newpv))
__run('git tag -f devtool-base-%s' % newpv)
revs = {}
for path in paths:
(stdout, _) = _run('git rev-parse HEAD', cwd=path)
revs[os.path.relpath(path, srctree)] = stdout.rstrip()
if no_patch:
patches = oe.recipeutils.get_recipe_patches(crd)
if patches:
logger.warning('By user choice, the following patches will NOT be applied to the new source tree:\n %s' % '\n '.join([os.path.basename(patch) for patch in patches]))
else:
for path in paths:
_run('git checkout devtool-patched -b %s' % branch, cwd=path)
(stdout, _) = _run('git branch --list devtool-override-*', cwd=path)
branches_to_rebase = [branch] + stdout.split()
target_branch = revs[os.path.relpath(path, srctree)]
# There is a bug (or feature?) in git rebase where if a commit with
# a note is fully rebased away by being part of an old commit, the
# note is still attached to the old commit. Avoid this by making
# sure all old devtool related commits have a note attached to them
# (this assumes git config notes.rewriteMode is set to ignore).
(stdout, _) = __run('git rev-list devtool-base..%s' % target_branch)
for rev in stdout.splitlines():
if not oe.patch.GitApplyTree.getNotes(path, rev):
oe.patch.GitApplyTree.addNote(path, rev, "dummy")
for b in branches_to_rebase:
logger.info("Rebasing {} onto {}".format(b, target_branch))
_run('git checkout %s' % b, cwd=path)
try:
_run('git rebase %s' % target_branch, cwd=path)
except bb.process.ExecutionError as e:
if 'conflict' in e.stdout:
logger.warning('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
_run('git rebase --abort', cwd=path)
else:
logger.warning('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
# Remove any dummy notes added above.
(stdout, _) = __run('git rev-list devtool-base..%s' % target_branch)
for rev in stdout.splitlines():
oe.patch.GitApplyTree.removeNote(path, rev, "dummy")
_run('git checkout %s' % branch, cwd=path)
if tmpsrctree:
if keep_temp:
logger.info('Preserving temporary directory %s' % tmpsrctree)
else:
shutil.rmtree(tmpsrctree)
if tmpdir != tmpsrctree:
shutil.rmtree(tmpdir)
return (revs, checksums, srcbranch, srcsubdir_rel)
def _add_license_diff_to_recipe(path, diff):
notice_text = """# FIXME: the LIC_FILES_CHKSUM values have been updated by 'devtool upgrade'.
# The following is the difference between the old and the new license text.
# Please update the LICENSE value if needed, and summarize the changes in
# the commit message via 'License-Update:' tag.
# (example: 'License-Update: copyright years updated.')
#
# The changes:
#
"""
commented_diff = "\n".join(["# {}".format(l) for l in diff.split('\n')])
with open(path, 'rb') as f:
orig_content = f.read()
with open(path, 'wb') as f:
f.write(notice_text.encode())
f.write(commented_diff.encode())
f.write("\n#\n\n".encode())
f.write(orig_content)
def _create_new_recipe(newpv, checksums, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses, srctree, keep_failure):
"""Creates the new recipe under workspace"""
bpn = rd.getVar('BPN')
path = os.path.join(workspace, 'recipes', bpn)
bb.utils.mkdirhier(path)
copied, _ = oe.recipeutils.copy_recipe_files(rd, path, all_variants=True)
if not copied:
raise DevtoolError('Internal error - no files were copied for recipe %s' % bpn)
logger.debug('Copied %s to %s' % (copied, path))
oldpv = rd.getVar('PV')
if not newpv:
newpv = oldpv
origpath = rd.getVar('FILE')
fullpath = _rename_recipe_files(origpath, bpn, oldpv, newpv, path)
logger.debug('Upgraded %s => %s' % (origpath, fullpath))
newvalues = {}
if _recipe_contains(rd, 'PV') and newpv != oldpv:
newvalues['PV'] = newpv
if srcrev:
newvalues['SRCREV'] = srcrev
if srcbranch:
src_uri = oe.recipeutils.split_var_value(rd.getVar('SRC_URI', False) or '')
changed = False
replacing = True
new_src_uri = []
for entry in src_uri:
try:
scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(entry)
except bb.fetch2.MalformedUrl as e:
raise DevtoolError("Could not decode SRC_URI: {}".format(e))
if replacing and scheme in ['git', 'gitsm']:
branch = params.get('branch', 'master')
if rd.expand(branch) != srcbranch:
# Handle case where branch is set through a variable
res = re.match(r'\$\{([^}@]+)\}', branch)
if res:
newvalues[res.group(1)] = srcbranch
# We know we won't change SRC_URI now, so break out
break
else:
params['branch'] = srcbranch
entry = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
changed = True
replacing = False
new_src_uri.append(entry)
if changed:
newvalues['SRC_URI'] = ' '.join(new_src_uri)
newvalues['PR'] = None
# Work out which SRC_URI entries have changed in case the entry uses a name
crd = rd.createCopy()
crd.setVar('PV', newpv)
for var, value in newvalues.items():
crd.setVar(var, value)
old_src_uri = (rd.getVar('SRC_URI') or '').split()
new_src_uri = (crd.getVar('SRC_URI') or '').split()
newnames = []
addnames = []
for newentry in new_src_uri:
_, _, _, _, _, params = bb.fetch2.decodeurl(newentry)
if 'name' in params:
newnames.append(params['name'])
if newentry not in old_src_uri:
addnames.append(params['name'])
# Find what's been set in the original recipe
oldnames = []
oldsums = []
noname = False
for varflag in rd.getVarFlags('SRC_URI'):
for checksum in checksums:
if varflag.endswith('.' + checksum):
name = varflag.rsplit('.', 1)[0]
if name not in oldnames:
oldnames.append(name)
oldsums.append(checksum)
elif varflag == checksum:
noname = True
oldsums.append(checksum)
# Even if SRC_URI has named entries it doesn't have to actually use the name
if noname and addnames and addnames[0] not in oldnames:
addnames = []
# Drop any old names (the name actually might include ${PV})
for name in oldnames:
if name not in newnames:
for checksum in oldsums:
newvalues['SRC_URI[%s.%s]' % (name, checksum)] = None
nameprefix = '%s.' % addnames[0] if addnames else ''
# md5sum is deprecated, remove any traces of it. If it was the only old
# checksum, then replace it with the default checksums.
if 'md5sum' in oldsums:
newvalues['SRC_URI[%smd5sum]' % nameprefix] = None
oldsums.remove('md5sum')
if not oldsums:
oldsums = ["%ssum" % s for s in bb.fetch2.SHOWN_CHECKSUM_LIST]
for checksum in oldsums:
newvalues['SRC_URI[%s%s]' % (nameprefix, checksum)] = checksums[checksum]
if srcsubdir_new != srcsubdir_old:
s_subdir_old = os.path.relpath(os.path.abspath(rd.getVar('S')), rd.getVar('WORKDIR'))
s_subdir_new = os.path.relpath(os.path.abspath(crd.getVar('S')), crd.getVar('WORKDIR'))
if srcsubdir_old == s_subdir_old and srcsubdir_new != s_subdir_new:
# Subdir for old extracted source matches what S points to (it should!)
# but subdir for new extracted source doesn't match what S will be
newvalues['S'] = '${WORKDIR}/%s' % srcsubdir_new.replace(newpv, '${PV}')
if crd.expand(newvalues['S']) == crd.expand('${WORKDIR}/${BP}'):
# It's the default, drop it
# FIXME what if S is being set in a .inc?
newvalues['S'] = None
logger.info('Source subdirectory has changed, dropping S value since it now matches the default ("${WORKDIR}/${BP}")')
else:
logger.info('Source subdirectory has changed, updating S value')
if license_diff:
newlicchksum = " ".join(["file://{}".format(l['path']) +
(";beginline={}".format(l['beginline']) if l['beginline'] else "") +
(";endline={}".format(l['endline']) if l['endline'] else "") +
(";md5={}".format(l['actual_md5'])) for l in new_licenses])
newvalues["LIC_FILES_CHKSUM"] = newlicchksum
_add_license_diff_to_recipe(fullpath, license_diff)
tinfoil.modified_files()
try:
rd = tinfoil.parse_recipe_file(fullpath, False)
except bb.tinfoil.TinfoilCommandFailed as e:
_upgrade_error(e, os.path.dirname(fullpath), srctree, keep_failure, 'Parsing of upgraded recipe failed')
oe.recipeutils.patch_recipe(rd, fullpath, newvalues)
return fullpath, copied
def _check_git_config():
def getconfig(name):
try:
value = bb.process.run('git config %s' % name)[0].strip()
except bb.process.ExecutionError as e:
if e.exitcode == 1:
value = None
else:
raise
return value
username = getconfig('user.name')
useremail = getconfig('user.email')
configerr = []
if not username:
configerr.append('Please set your name using:\n git config --global user.name')
if not useremail:
configerr.append('Please set your email using:\n git config --global user.email')
if configerr:
raise DevtoolError('Your git configuration is incomplete which will prevent rebases from working:\n' + '\n'.join(configerr))
def _extract_licenses(srcpath, recipe_licenses):
licenses = []
for url in recipe_licenses.split():
license = {}
(type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
license['path'] = path
license['md5'] = parm.get('md5', '')
license['beginline'], license['endline'] = 0, 0
if 'beginline' in parm:
license['beginline'] = int(parm['beginline'])
if 'endline' in parm:
license['endline'] = int(parm['endline'])
license['text'] = []
with open(os.path.join(srcpath, path), 'rb') as f:
import hashlib
actual_md5 = hashlib.md5()
lineno = 0
for line in f:
lineno += 1
if (lineno >= license['beginline']) and ((lineno <= license['endline']) or not license['endline']):
license['text'].append(line.decode(errors='ignore'))
actual_md5.update(line)
license['actual_md5'] = actual_md5.hexdigest()
licenses.append(license)
return licenses
def _generate_license_diff(old_licenses, new_licenses):
need_diff = False
for l in new_licenses:
if l['md5'] != l['actual_md5']:
need_diff = True
break
if need_diff == False:
return None
import difflib
diff = ''
for old, new in zip(old_licenses, new_licenses):
for line in difflib.unified_diff(old['text'], new['text'], old['path'], new['path']):
diff = diff + line
return diff
def upgrade(args, config, basepath, workspace):
"""Entry point for the devtool 'upgrade' subcommand"""
if args.recipename in workspace:
raise DevtoolError("recipe %s is already in your workspace" % args.recipename)
if args.srcbranch and not args.srcrev:
raise DevtoolError("If you specify --srcbranch/-B then you must use --srcrev/-S to specify the revision" % args.recipename)
_check_git_config()
tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
try:
rd = parse_recipe(config, tinfoil, args.recipename, True)
if not rd:
return 1
pn = rd.getVar('PN')
if pn != args.recipename:
logger.info('Mapping %s to %s' % (args.recipename, pn))
if pn in workspace:
raise DevtoolError("recipe %s is already in your workspace" % pn)
if args.srctree:
srctree = os.path.abspath(args.srctree)
else:
srctree = standard.get_default_srctree(config, pn)
srctree_s = standard.get_real_srctree(srctree, rd.getVar('S'), rd.getVar('WORKDIR'))
# try to automatically discover latest version and revision if not provided on command line
if not args.version and not args.srcrev:
version_info = oe.recipeutils.get_recipe_upstream_version(rd)
if version_info['version'] and not version_info['version'].endswith("new-commits-available"):
args.version = version_info['version']
if version_info['revision']:
args.srcrev = version_info['revision']
if not args.version and not args.srcrev:
raise DevtoolError("Automatic discovery of latest version/revision failed - you must provide a version using the --version/-V option, or for recipes that fetch from an SCM such as git, the --srcrev/-S option.")
standard._check_compatible_recipe(pn, rd)
old_srcrev = rd.getVar('SRCREV')
if old_srcrev == 'INVALID':
old_srcrev = None
if old_srcrev and not args.srcrev:
raise DevtoolError("Recipe specifies a SRCREV value; you must specify a new one when upgrading")
old_ver = rd.getVar('PV')
if old_ver == args.version and old_srcrev == args.srcrev:
raise DevtoolError("Current and upgrade versions are the same version")
if args.version:
if bb.utils.vercmp_string(args.version, old_ver) < 0:
logger.warning('Upgrade version %s compares as less than the current version %s. If you are using a package feed for on-target upgrades or providing this recipe for general consumption, then you should increment PE in the recipe (or if there is no current PE value set, set it to "1")' % (args.version, old_ver))
check_prerelease_version(args.version, 'devtool upgrade')
rf = None
license_diff = None
try:
logger.info('Extracting current version source...')
rev1, srcsubdir1 = standard._extract_source(srctree, False, 'devtool-orig', False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
old_licenses = _extract_licenses(srctree_s, (rd.getVar('LIC_FILES_CHKSUM') or ""))
logger.info('Extracting upgraded version source...')
rev2, checksums, srcbranch, srcsubdir2 = _extract_new_source(args.version, srctree, args.no_patch,
args.srcrev, args.srcbranch, args.branch, args.keep_temp,
tinfoil, rd)
new_licenses = _extract_licenses(srctree_s, (rd.getVar('LIC_FILES_CHKSUM') or ""))
license_diff = _generate_license_diff(old_licenses, new_licenses)
rf, copied = _create_new_recipe(args.version, checksums, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses, srctree, args.keep_failure)
except (bb.process.CmdError, DevtoolError) as e:
recipedir = os.path.join(config.workspace_path, 'recipes', rd.getVar('BPN'))
_upgrade_error(e, recipedir, srctree, args.keep_failure)
standard._add_md5(config, pn, os.path.dirname(rf))
af = _write_append(rf, srctree, srctree_s, args.same_dir, args.no_same_dir, rev2,
copied, config.workspace_path, rd)
standard._add_md5(config, pn, af)
update_unlockedsigs(basepath, workspace, args.fixed_setup, [pn])
logger.info('Upgraded source extracted to %s' % srctree)
logger.info('New recipe is %s' % rf)
if license_diff:
logger.info('License checksums have been updated in the new recipe; please refer to it for the difference between the old and the new license texts.')
preferred_version = rd.getVar('PREFERRED_VERSION_%s' % rd.getVar('PN'))
if preferred_version:
logger.warning('Version is pinned to %s via PREFERRED_VERSION; it may need adjustment to match the new version before any further steps are taken' % preferred_version)
finally:
tinfoil.shutdown()
return 0
def latest_version(args, config, basepath, workspace):
"""Entry point for the devtool 'latest_version' subcommand"""
tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
try:
rd = parse_recipe(config, tinfoil, args.recipename, True)
if not rd:
return 1
version_info = oe.recipeutils.get_recipe_upstream_version(rd)
# "new-commits-available" is an indication that upstream never issues version tags
if not version_info['version'].endswith("new-commits-available"):
logger.info("Current version: {}".format(version_info['current_version']))
logger.info("Latest version: {}".format(version_info['version']))
if version_info['revision']:
logger.info("Latest version's commit: {}".format(version_info['revision']))
else:
logger.info("Latest commit: {}".format(version_info['revision']))
finally:
tinfoil.shutdown()
return 0
def check_upgrade_status(args, config, basepath, workspace):
if not args.recipe:
logger.info("Checking the upstream status for all recipes may take a few minutes")
results = oe.recipeutils.get_recipe_upgrade_status(args.recipe)
for result in results:
# pn, update_status, current, latest, maintainer, latest_commit, no_update_reason
if args.all or result[1] != 'MATCH':
print("{:25} {:15} {:15} {} {} {}".format( result[0],
result[2],
result[1] if result[1] != 'UPDATE' else (result[3] if not result[3].endswith("new-commits-available") else "new commits"),
result[4],
result[5] if result[5] != 'N/A' else "",
"cannot be updated due to: %s" %(result[6]) if result[6] else ""))
def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
defsrctree = standard.get_default_srctree(context.config)
parser_upgrade = subparsers.add_parser('upgrade', help='Upgrade an existing recipe',
description='Upgrades an existing recipe to a new upstream version. Puts the upgraded recipe file into the workspace along with any associated files, and extracts the source tree to a specified location (in case patches need rebasing or adding to as a result of the upgrade).',
group='starting')
parser_upgrade.add_argument('recipename', help='Name of recipe to upgrade (just name - no version, path or extension)')
parser_upgrade.add_argument('srctree', nargs='?', help='Path to where to extract the source tree. If not specified, a subdirectory of %s will be used.' % defsrctree)
parser_upgrade.add_argument('--version', '-V', help='Version to upgrade to (PV). If omitted, latest upstream version will be determined and used, if possible.')
parser_upgrade.add_argument('--srcrev', '-S', help='Source revision to upgrade to (useful when fetching from an SCM such as git)')
parser_upgrade.add_argument('--srcbranch', '-B', help='Branch in source repository containing the revision to use (if fetching from an SCM such as git)')
parser_upgrade.add_argument('--branch', '-b', default="devtool", help='Name for new development branch to checkout (default "%(default)s")')
parser_upgrade.add_argument('--no-patch', action="store_true", help='Do not apply patches from the recipe to the new source code')
parser_upgrade.add_argument('--no-overrides', '-O', action="store_true", help='Do not create branches for other override configurations')
group = parser_upgrade.add_mutually_exclusive_group()
group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_upgrade.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
parser_upgrade.add_argument('--keep-failure', action="store_true", help='Keep failed upgrade recipe and associated files (for debugging)')
parser_upgrade.set_defaults(func=upgrade, fixed_setup=context.fixed_setup)
parser_latest_version = subparsers.add_parser('latest-version', help='Report the latest version of an existing recipe',
description='Queries the upstream server for what the latest upstream release is (for git, tags are checked, for tarballs, a list of them is obtained, and one with the highest version number is reported)',
group='info')
parser_latest_version.add_argument('recipename', help='Name of recipe to query (just name - no version, path or extension)')
parser_latest_version.set_defaults(func=latest_version)
parser_check_upgrade_status = subparsers.add_parser('check-upgrade-status', help="Report upgradability for multiple (or all) recipes",
description="Prints a table of recipes together with versions currently provided by recipes, and latest upstream versions, when there is a later version available",
group='info')
parser_check_upgrade_status.add_argument('recipe', help='Name of the recipe to report (omit to report upgrade info for all recipes)', nargs='*')
parser_check_upgrade_status.add_argument('--all', '-a', help='Show all recipes, not just recipes needing upgrade', action="store_true")
parser_check_upgrade_status.set_defaults(func=check_upgrade_status)

View File

@@ -0,0 +1,242 @@
# Development tool - utility commands plugin
#
# Copyright (C) 2015-2016 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool utility plugins"""
import os
import sys
import shutil
import tempfile
import logging
import argparse
import subprocess
import scriptutils
from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError
from devtool import parse_recipe
logger = logging.getLogger('devtool')
def _find_recipe_path(args, config, basepath, workspace):
if args.any_recipe:
logger.warning('-a/--any-recipe option is now always active, and thus the option will be removed in a future release')
if args.recipename in workspace:
recipefile = workspace[args.recipename]['recipefile']
else:
recipefile = None
if not recipefile:
tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
try:
rd = parse_recipe(config, tinfoil, args.recipename, True)
if not rd:
raise DevtoolError("Failed to find specified recipe")
recipefile = rd.getVar('FILE')
finally:
tinfoil.shutdown()
return recipefile
def find_recipe(args, config, basepath, workspace):
"""Entry point for the devtool 'find-recipe' subcommand"""
recipefile = _find_recipe_path(args, config, basepath, workspace)
print(recipefile)
return 0
def edit_recipe(args, config, basepath, workspace):
"""Entry point for the devtool 'edit-recipe' subcommand"""
return scriptutils.run_editor(_find_recipe_path(args, config, basepath, workspace), logger)
def configure_help(args, config, basepath, workspace):
"""Entry point for the devtool 'configure-help' subcommand"""
import oe.utils
check_workspace_recipe(workspace, args.recipename)
tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
try:
rd = parse_recipe(config, tinfoil, args.recipename, appends=True, filter_workspace=False)
if not rd:
return 1
b = rd.getVar('B')
s = rd.getVar('S')
configurescript = os.path.join(s, 'configure')
confdisabled = 'noexec' in rd.getVarFlags('do_configure') or 'do_configure' not in (rd.getVar('__BBTASKS', False) or [])
configureopts = oe.utils.squashspaces(rd.getVar('CONFIGUREOPTS') or '')
extra_oeconf = oe.utils.squashspaces(rd.getVar('EXTRA_OECONF') or '')
extra_oecmake = oe.utils.squashspaces(rd.getVar('EXTRA_OECMAKE') or '')
do_configure = rd.getVar('do_configure') or ''
do_configure_noexpand = rd.getVar('do_configure', False) or ''
packageconfig = rd.getVarFlags('PACKAGECONFIG') or []
autotools = bb.data.inherits_class('autotools', rd) and ('oe_runconf' in do_configure or 'autotools_do_configure' in do_configure)
cmake = bb.data.inherits_class('cmake', rd) and ('cmake_do_configure' in do_configure)
cmake_do_configure = rd.getVar('cmake_do_configure')
pn = rd.getVar('PN')
finally:
tinfoil.shutdown()
if 'doc' in packageconfig:
del packageconfig['doc']
if autotools and not os.path.exists(configurescript):
logger.info('Running do_configure to generate configure script')
try:
stdout, _ = exec_build_env_command(config.init_path, basepath,
'bitbake -c configure %s' % args.recipename,
stderr=subprocess.STDOUT)
except bb.process.ExecutionError:
pass
if confdisabled or do_configure.strip() in ('', ':'):
raise DevtoolError("do_configure task has been disabled for this recipe")
elif args.no_pager and not os.path.exists(configurescript):
raise DevtoolError("No configure script found and no other information to display")
else:
configopttext = ''
if autotools and configureopts:
configopttext = '''
Arguments currently passed to the configure script:
%s
Some of those are fixed.''' % (configureopts + ' ' + extra_oeconf)
if extra_oeconf:
configopttext += ''' The ones that are specified through EXTRA_OECONF (which you can change or add to easily):
%s''' % extra_oeconf
elif cmake:
in_cmake = False
cmake_cmd = ''
for line in cmake_do_configure.splitlines():
if in_cmake:
cmake_cmd = cmake_cmd + ' ' + line.strip().rstrip('\\')
if not line.endswith('\\'):
break
if line.lstrip().startswith('cmake '):
cmake_cmd = line.strip().rstrip('\\')
if line.endswith('\\'):
in_cmake = True
else:
break
if cmake_cmd:
configopttext = '''
The current cmake command line:
%s
Arguments specified through EXTRA_OECMAKE (which you can change or add to easily)
%s''' % (oe.utils.squashspaces(cmake_cmd), extra_oecmake)
else:
configopttext = '''
The current implementation of cmake_do_configure:
cmake_do_configure() {
%s
}
Arguments specified through EXTRA_OECMAKE (which you can change or add to easily)
%s''' % (cmake_do_configure.rstrip(), extra_oecmake)
elif do_configure:
configopttext = '''
The current implementation of do_configure:
do_configure() {
%s
}''' % do_configure.rstrip()
if '${EXTRA_OECONF}' in do_configure_noexpand:
configopttext += '''
Arguments specified through EXTRA_OECONF (which you can change or add to easily):
%s''' % extra_oeconf
if packageconfig:
configopttext += '''
Some of these options may be controlled through PACKAGECONFIG; for more details please see the recipe.'''
if args.arg:
helpargs = ' '.join(args.arg)
elif cmake:
helpargs = '-LH'
else:
helpargs = '--help'
msg = '''configure information for %s
------------------------------------------
%s''' % (pn, configopttext)
if cmake:
msg += '''
The cmake %s output for %s follows. After "-- Cache values" you should see a list of variables you can add to EXTRA_OECMAKE (prefixed with -D and suffixed with = followed by the desired value, without any spaces).
------------------------------------------''' % (helpargs, pn)
elif os.path.exists(configurescript):
msg += '''
The ./configure %s output for %s follows.
------------------------------------------''' % (helpargs, pn)
olddir = os.getcwd()
tmppath = tempfile.mkdtemp()
with tempfile.NamedTemporaryFile('w', delete=False) as tf:
if not args.no_header:
tf.write(msg + '\n')
tf.close()
try:
try:
cmd = 'cat %s' % tf.name
if cmake:
cmd += '; cmake %s %s 2>&1' % (helpargs, s)
os.chdir(b)
elif os.path.exists(configurescript):
cmd += '; %s %s' % (configurescript, helpargs)
if sys.stdout.isatty() and not args.no_pager:
pager = os.environ.get('PAGER', 'less')
cmd = '(%s) | %s' % (cmd, pager)
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
return e.returncode
finally:
os.chdir(olddir)
shutil.rmtree(tmppath)
os.remove(tf.name)
def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
parser_edit_recipe = subparsers.add_parser('edit-recipe', help='Edit a recipe file',
description='Runs the default editor (as specified by the EDITOR variable) on the specified recipe. Note that this will be quicker for recipes in the workspace as the cache does not need to be loaded in that case.',
group='working')
parser_edit_recipe.add_argument('recipename', help='Recipe to edit')
# FIXME drop -a at some point in future
parser_edit_recipe.add_argument('--any-recipe', '-a', action="store_true", help='Does nothing (exists for backwards-compatibility)')
parser_edit_recipe.set_defaults(func=edit_recipe)
# Find-recipe
parser_find_recipe = subparsers.add_parser('find-recipe', help='Find a recipe file',
description='Finds a recipe file. Note that this will be quicker for recipes in the workspace as the cache does not need to be loaded in that case.',
group='working')
parser_find_recipe.add_argument('recipename', help='Recipe to find')
# FIXME drop -a at some point in future
parser_find_recipe.add_argument('--any-recipe', '-a', action="store_true", help='Does nothing (exists for backwards-compatibility)')
parser_find_recipe.set_defaults(func=find_recipe)
# NOTE: Needed to override the usage string here since the default
# gets the order wrong - recipename must come before --arg
parser_configure_help = subparsers.add_parser('configure-help', help='Get help on configure script options',
usage='devtool configure-help [options] recipename [--arg ...]',
description='Displays the help for the configure script for the specified recipe (i.e. runs ./configure --help) prefaced by a header describing the current options being specified. Output is piped through less (or whatever PAGER is set to, if set) for easy browsing.',
group='working')
parser_configure_help.add_argument('recipename', help='Recipe to show configure help for')
parser_configure_help.add_argument('-p', '--no-pager', help='Disable paged output', action="store_true")
parser_configure_help.add_argument('-n', '--no-header', help='Disable explanatory header text', action="store_true")
parser_configure_help.add_argument('--arg', help='Pass remaining arguments to the configure script instead of --help (useful if the script has additional help options)', nargs=argparse.REMAINDER)
parser_configure_help.set_defaults(func=configure_help)

View File

@@ -0,0 +1,477 @@
# Recipe creation tool - append plugin
#
# Copyright (C) 2015 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import sys
import os
import argparse
import glob
import fnmatch
import re
import subprocess
import logging
import stat
import shutil
import scriptutils
import errno
from collections import defaultdict
import difflib
logger = logging.getLogger('recipetool')
tinfoil = None
def tinfoil_init(instance):
global tinfoil
tinfoil = instance
# FIXME guessing when we don't have pkgdata?
# FIXME mode to create patch rather than directly substitute
class InvalidTargetFileError(Exception):
pass
def find_target_file(targetpath, d, pkglist=None):
"""Find the recipe installing the specified target path, optionally limited to a select list of packages"""
import json
pkgdata_dir = d.getVar('PKGDATA_DIR')
# The mix between /etc and ${sysconfdir} here may look odd, but it is just
# being consistent with usage elsewhere
invalidtargets = {'${sysconfdir}/version': '${sysconfdir}/version is written out at image creation time',
'/etc/timestamp': '/etc/timestamp is written out at image creation time',
'/dev/*': '/dev is handled by udev (or equivalent) and the kernel (devtmpfs)',
'/etc/passwd': '/etc/passwd should be managed through the useradd and extrausers classes',
'/etc/group': '/etc/group should be managed through the useradd and extrausers classes',
'/etc/shadow': '/etc/shadow should be managed through the useradd and extrausers classes',
'/etc/gshadow': '/etc/gshadow should be managed through the useradd and extrausers classes',
'${sysconfdir}/hostname': '${sysconfdir}/hostname contents should be set by setting hostname:pn-base-files = "value" in configuration',}
for pthspec, message in invalidtargets.items():
if fnmatch.fnmatchcase(targetpath, d.expand(pthspec)):
raise InvalidTargetFileError(d.expand(message))
targetpath_re = re.compile(r'\s+(\$D)?%s(\s|$)' % targetpath)
recipes = defaultdict(list)
for root, dirs, files in os.walk(os.path.join(pkgdata_dir, 'runtime')):
if pkglist:
filelist = pkglist
else:
filelist = files
for fn in filelist:
pkgdatafile = os.path.join(root, fn)
if pkglist and not os.path.exists(pkgdatafile):
continue
with open(pkgdatafile, 'r') as f:
pn = ''
# This does assume that PN comes before other values, but that's a fairly safe assumption
for line in f:
if line.startswith('PN:'):
pn = line.split(': ', 1)[1].strip()
elif line.startswith('FILES_INFO'):
val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
for fullpth in dictval.keys():
if fnmatch.fnmatchcase(fullpth, targetpath):
recipes[targetpath].append(pn)
elif line.startswith('pkg_preinst:') or line.startswith('pkg_postinst:'):
scriptval = line.split(': ', 1)[1].strip().encode('utf-8').decode('unicode_escape')
if 'update-alternatives --install %s ' % targetpath in scriptval:
recipes[targetpath].append('?%s' % pn)
elif targetpath_re.search(scriptval):
recipes[targetpath].append('!%s' % pn)
return recipes
def _parse_recipe(pn, tinfoil):
try:
rd = tinfoil.parse_recipe(pn)
except bb.providers.NoProvider as e:
logger.error(str(e))
return None
return rd
def determine_file_source(targetpath, rd):
"""Assuming we know a file came from a specific recipe, figure out exactly where it came from"""
import oe.recipeutils
# See if it's in do_install for the recipe
workdir = rd.getVar('WORKDIR')
src_uri = rd.getVar('SRC_URI')
srcfile = ''
modpatches = []
elements = check_do_install(rd, targetpath)
if elements:
logger.debug('do_install line:\n%s' % ' '.join(elements))
srcpath = get_source_path(elements)
logger.debug('source path: %s' % srcpath)
if not srcpath.startswith('/'):
# Handle non-absolute path
srcpath = os.path.abspath(os.path.join(rd.getVarFlag('do_install', 'dirs').split()[-1], srcpath))
if srcpath.startswith(workdir):
# OK, now we have the source file name, look for it in SRC_URI
workdirfile = os.path.relpath(srcpath, workdir)
# FIXME this is where we ought to have some code in the fetcher, because this is naive
for item in src_uri.split():
localpath = bb.fetch2.localpath(item, rd)
# Source path specified in do_install might be a glob
if fnmatch.fnmatch(os.path.basename(localpath), workdirfile):
srcfile = 'file://%s' % localpath
elif '/' in workdirfile:
if item == 'file://%s' % workdirfile:
srcfile = 'file://%s' % localpath
# Check patches
srcpatches = []
patchedfiles = oe.recipeutils.get_recipe_patched_files(rd)
for patch, filelist in patchedfiles.items():
for fileitem in filelist:
if fileitem[0] == srcpath:
srcpatches.append((patch, fileitem[1]))
if srcpatches:
addpatch = None
for patch in srcpatches:
if patch[1] == 'A':
addpatch = patch[0]
else:
modpatches.append(patch[0])
if addpatch:
srcfile = 'patch://%s' % addpatch
return (srcfile, elements, modpatches)
def get_source_path(cmdelements):
"""Find the source path specified within a command"""
command = cmdelements[0]
if command in ['install', 'cp']:
helptext = subprocess.check_output('LC_ALL=C %s --help' % command, shell=True).decode('utf-8')
argopts = ''
argopt_line_re = re.compile('^-([a-zA-Z0-9]), --[a-z-]+=')
for line in helptext.splitlines():
line = line.lstrip()
res = argopt_line_re.search(line)
if res:
argopts += res.group(1)
if not argopts:
# Fallback
if command == 'install':
argopts = 'gmoSt'
elif command == 'cp':
argopts = 't'
else:
raise Exception('No fallback arguments for command %s' % command)
skipnext = False
for elem in cmdelements[1:-1]:
if elem.startswith('-'):
if len(elem) > 1 and elem[1] in argopts:
skipnext = True
continue
if skipnext:
skipnext = False
continue
return elem
else:
raise Exception('get_source_path: no handling for command "%s"')
def get_func_deps(func, d):
"""Find the function dependencies of a shell function"""
deps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func))
deps |= set((d.getVarFlag(func, "vardeps") or "").split())
funcdeps = []
for dep in deps:
if d.getVarFlag(dep, 'func'):
funcdeps.append(dep)
return funcdeps
def check_do_install(rd, targetpath):
"""Look at do_install for a command that installs/copies the specified target path"""
instpath = os.path.abspath(os.path.join(rd.getVar('D'), targetpath.lstrip('/')))
do_install = rd.getVar('do_install')
# Handle where do_install calls other functions (somewhat crudely, but good enough for this purpose)
deps = get_func_deps('do_install', rd)
for dep in deps:
do_install = do_install.replace(dep, rd.getVar(dep))
# Look backwards through do_install as we want to catch where a later line (perhaps
# from a bbappend) is writing over the top
for line in reversed(do_install.splitlines()):
line = line.strip()
if (line.startswith('install ') and ' -m' in line) or line.startswith('cp '):
elements = line.split()
destpath = os.path.abspath(elements[-1])
if destpath == instpath:
return elements
elif destpath.rstrip('/') == os.path.dirname(instpath):
# FIXME this doesn't take recursive copy into account; unsure if it's practical to do so
srcpath = get_source_path(elements)
if fnmatch.fnmatchcase(os.path.basename(instpath), os.path.basename(srcpath)):
return elements
return None
def appendfile(args):
import oe.recipeutils
stdout = ''
try:
(stdout, _) = bb.process.run('LANG=C file -b %s' % args.newfile, shell=True)
if 'cannot open' in stdout:
raise bb.process.ExecutionError(stdout)
except bb.process.ExecutionError as err:
logger.debug('file command returned error: %s' % err)
stdout = ''
if stdout:
logger.debug('file command output: %s' % stdout.rstrip())
if ('executable' in stdout and not 'shell script' in stdout) or 'shared object' in stdout:
logger.warning('This file looks like it is a binary or otherwise the output of compilation. If it is, you should consider building it properly instead of substituting a binary file directly.')
if args.recipe:
recipes = {args.targetpath: [args.recipe],}
else:
try:
recipes = find_target_file(args.targetpath, tinfoil.config_data)
except InvalidTargetFileError as e:
logger.error('%s cannot be handled by this tool: %s' % (args.targetpath, e))
return 1
if not recipes:
logger.error('Unable to find any package producing path %s - this may be because the recipe packaging it has not been built yet' % args.targetpath)
return 1
alternative_pns = []
postinst_pns = []
selectpn = None
for targetpath, pnlist in recipes.items():
for pn in pnlist:
if pn.startswith('?'):
alternative_pns.append(pn[1:])
elif pn.startswith('!'):
postinst_pns.append(pn[1:])
elif selectpn:
# hit here with multilibs
continue
else:
selectpn = pn
if not selectpn and len(alternative_pns) == 1:
selectpn = alternative_pns[0]
logger.error('File %s is an alternative possibly provided by recipe %s but seemingly no other, selecting it by default - you should double check other recipes' % (args.targetpath, selectpn))
if selectpn:
logger.debug('Selecting recipe %s for file %s' % (selectpn, args.targetpath))
if postinst_pns:
logger.warning('%s be modified by postinstall scripts for the following recipes:\n %s\nThis may or may not be an issue depending on what modifications these postinstall scripts make.' % (args.targetpath, '\n '.join(postinst_pns)))
rd = _parse_recipe(selectpn, tinfoil)
if not rd:
# Error message already shown
return 1
sourcefile, instelements, modpatches = determine_file_source(args.targetpath, rd)
sourcepath = None
if sourcefile:
sourcetype, sourcepath = sourcefile.split('://', 1)
logger.debug('Original source file is %s (%s)' % (sourcepath, sourcetype))
if sourcetype == 'patch':
logger.warning('File %s is added by the patch %s - you may need to remove or replace this patch in order to replace the file.' % (args.targetpath, sourcepath))
sourcepath = None
else:
logger.debug('Unable to determine source file, proceeding anyway')
if modpatches:
logger.warning('File %s is modified by the following patches:\n %s' % (args.targetpath, '\n '.join(modpatches)))
if instelements and sourcepath:
install = None
else:
# Auto-determine permissions
# Check destination
binpaths = '${bindir}:${sbindir}:${base_bindir}:${base_sbindir}:${libexecdir}:${sysconfdir}/init.d'
perms = '0644'
if os.path.abspath(os.path.dirname(args.targetpath)) in rd.expand(binpaths).split(':'):
# File is going into a directory normally reserved for executables, so it should be executable
perms = '0755'
else:
# Check source
st = os.stat(args.newfile)
if st.st_mode & stat.S_IXUSR:
perms = '0755'
install = {args.newfile: (args.targetpath, perms)}
if sourcepath:
sourcepath = os.path.basename(sourcepath)
oe.recipeutils.bbappend_recipe(rd, args.destlayer, {args.newfile: {'newname' : sourcepath}}, install, wildcardver=args.wildcard_version, machine=args.machine)
tinfoil.modified_files()
return 0
else:
if alternative_pns:
logger.error('File %s is an alternative possibly provided by the following recipes:\n %s\nPlease select recipe with -r/--recipe' % (targetpath, '\n '.join(alternative_pns)))
elif postinst_pns:
logger.error('File %s may be written out in a pre/postinstall script of the following recipes:\n %s\nPlease select recipe with -r/--recipe' % (targetpath, '\n '.join(postinst_pns)))
return 3
def appendsrc(args, files, rd, extralines=None):
import oe.recipeutils
srcdir = rd.getVar('S')
workdir = rd.getVar('WORKDIR')
import bb.fetch
simplified = {}
src_uri = rd.getVar('SRC_URI').split()
for uri in src_uri:
if uri.endswith(';'):
uri = uri[:-1]
simple_uri = bb.fetch.URI(uri)
simple_uri.params = {}
simplified[str(simple_uri)] = uri
copyfiles = {}
extralines = extralines or []
params = []
for newfile, srcfile in files.items():
src_destdir = os.path.dirname(srcfile)
if not args.use_workdir:
if rd.getVar('S') == rd.getVar('STAGING_KERNEL_DIR'):
srcdir = os.path.join(workdir, 'git')
if not bb.data.inherits_class('kernel-yocto', rd):
logger.warning('S == STAGING_KERNEL_DIR and non-kernel-yocto, unable to determine path to srcdir, defaulting to ${WORKDIR}/git')
src_destdir = os.path.join(os.path.relpath(srcdir, workdir), src_destdir)
src_destdir = os.path.normpath(src_destdir)
if src_destdir and src_destdir != '.':
params.append({'subdir': src_destdir})
else:
params.append({})
copyfiles[newfile] = {'newname' : os.path.basename(srcfile)}
dry_run_output = None
dry_run_outdir = None
if args.dry_run:
import tempfile
dry_run_output = tempfile.TemporaryDirectory(prefix='devtool')
dry_run_outdir = dry_run_output.name
appendfile, _ = oe.recipeutils.bbappend_recipe(rd, args.destlayer, copyfiles, None, wildcardver=args.wildcard_version, machine=args.machine, extralines=extralines, params=params,
redirect_output=dry_run_outdir, update_original_recipe=args.update_recipe)
if not appendfile:
return
if args.dry_run:
output = ''
appendfilename = os.path.basename(appendfile)
newappendfile = appendfile
if appendfile and os.path.exists(appendfile):
with open(appendfile, 'r') as f:
oldlines = f.readlines()
else:
appendfile = '/dev/null'
oldlines = []
with open(os.path.join(dry_run_outdir, appendfilename), 'r') as f:
newlines = f.readlines()
diff = difflib.unified_diff(oldlines, newlines, appendfile, newappendfile)
difflines = list(diff)
if difflines:
output += ''.join(difflines)
if output:
logger.info('Diff of changed files:\n%s' % output)
else:
logger.info('No changed files')
tinfoil.modified_files()
def appendsrcfiles(parser, args):
recipedata = _parse_recipe(args.recipe, tinfoil)
if not recipedata:
parser.error('RECIPE must be a valid recipe name')
files = dict((f, os.path.join(args.destdir, os.path.basename(f)))
for f in args.files)
return appendsrc(args, files, recipedata)
def appendsrcfile(parser, args):
recipedata = _parse_recipe(args.recipe, tinfoil)
if not recipedata:
parser.error('RECIPE must be a valid recipe name')
if not args.destfile:
args.destfile = os.path.basename(args.file)
elif args.destfile.endswith('/'):
args.destfile = os.path.join(args.destfile, os.path.basename(args.file))
return appendsrc(args, {args.file: args.destfile}, recipedata)
def layer(layerpath):
if not os.path.exists(os.path.join(layerpath, 'conf', 'layer.conf')):
raise argparse.ArgumentTypeError('{0!r} must be a path to a valid layer'.format(layerpath))
return layerpath
def existing_path(filepath):
if not os.path.exists(filepath):
raise argparse.ArgumentTypeError('{0!r} must be an existing path'.format(filepath))
return filepath
def existing_file(filepath):
filepath = existing_path(filepath)
if os.path.isdir(filepath):
raise argparse.ArgumentTypeError('{0!r} must be a file, not a directory'.format(filepath))
return filepath
def destination_path(destpath):
if os.path.isabs(destpath):
raise argparse.ArgumentTypeError('{0!r} must be a relative path, not absolute'.format(destpath))
return destpath
def target_path(targetpath):
if not os.path.isabs(targetpath):
raise argparse.ArgumentTypeError('{0!r} must be an absolute path, not relative'.format(targetpath))
return targetpath
def register_commands(subparsers):
common = argparse.ArgumentParser(add_help=False)
common.add_argument('-m', '--machine', help='Make bbappend changes specific to a machine only', metavar='MACHINE')
common.add_argument('-w', '--wildcard-version', help='Use wildcard to make the bbappend apply to any recipe version', action='store_true')
common.add_argument('destlayer', metavar='DESTLAYER', help='Base directory of the destination layer to write the bbappend to', type=layer)
parser_appendfile = subparsers.add_parser('appendfile',
parents=[common],
help='Create/update a bbappend to replace a target file',
description='Creates a bbappend (or updates an existing one) to replace the specified file that appears in the target system, determining the recipe that packages the file and the required path and name for the bbappend automatically. Note that the ability to determine the recipe packaging a particular file depends upon the recipe\'s do_packagedata task having already run prior to running this command (which it will have when the recipe has been built successfully, which in turn will have happened if one or more of the recipe\'s packages is included in an image that has been built successfully).')
parser_appendfile.add_argument('targetpath', help='Path to the file to be replaced (as it would appear within the target image, e.g. /etc/motd)', type=target_path)
parser_appendfile.add_argument('newfile', help='Custom file to replace the target file with', type=existing_file)
parser_appendfile.add_argument('-r', '--recipe', help='Override recipe to apply to (default is to find which recipe already packages the file)')
parser_appendfile.set_defaults(func=appendfile, parserecipes=True)
common_src = argparse.ArgumentParser(add_help=False, parents=[common])
common_src.add_argument('-W', '--workdir', help='Unpack file into WORKDIR rather than S', dest='use_workdir', action='store_true')
common_src.add_argument('recipe', metavar='RECIPE', help='Override recipe to apply to')
parser = subparsers.add_parser('appendsrcfiles',
parents=[common_src],
help='Create/update a bbappend to add or replace source files',
description='Creates a bbappend (or updates an existing one) to add or replace the specified file in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify multiple files with a destination directory, so cannot specify the destination filename. See the `appendsrcfile` command for the other behavior.')
parser.add_argument('-D', '--destdir', help='Destination directory (relative to S or WORKDIR, defaults to ".")', default='', type=destination_path)
parser.add_argument('-u', '--update-recipe', help='Update recipe instead of creating (or updating) a bbapend file. DESTLAYER must contains the recipe to update', action='store_true')
parser.add_argument('-n', '--dry-run', help='Dry run mode', action='store_true')
parser.add_argument('files', nargs='+', metavar='FILE', help='File(s) to be added to the recipe sources (WORKDIR or S)', type=existing_path)
parser.set_defaults(func=lambda a: appendsrcfiles(parser, a), parserecipes=True)
parser = subparsers.add_parser('appendsrcfile',
parents=[common_src],
help='Create/update a bbappend to add or replace a source file',
description='Creates a bbappend (or updates an existing one) to add or replace the specified files in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify the destination filename, not just destination directory, but only works for one file. See the `appendsrcfiles` command for the other behavior.')
parser.add_argument('-u', '--update-recipe', help='Update recipe instead of creating (or updating) a bbapend file. DESTLAYER must contains the recipe to update', action='store_true')
parser.add_argument('-n', '--dry-run', help='Dry run mode', action='store_true')
parser.add_argument('file', metavar='FILE', help='File to be added to the recipe sources (WORKDIR or S)', type=existing_path)
parser.add_argument('destfile', metavar='DESTFILE', nargs='?', help='Destination path (relative to S or WORKDIR, optional)', type=destination_path)
parser.set_defaults(func=lambda a: appendsrcfile(parser, a), parserecipes=True)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,875 @@
# Recipe creation tool - create command build system handlers
#
# Copyright (C) 2014-2016 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import os
import re
import logging
from recipetool.create import RecipeHandler, validate_pv
logger = logging.getLogger('recipetool')
tinfoil = None
plugins = None
def plugin_init(pluginlist):
# Take a reference to the list so we can use it later
global plugins
plugins = pluginlist
def tinfoil_init(instance):
global tinfoil
tinfoil = instance
class CmakeRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
if 'buildsystem' in handled:
return False
if RecipeHandler.checkfiles(srctree, ['CMakeLists.txt']):
classes.append('cmake')
values = CmakeRecipeHandler.extract_cmake_deps(lines_before, srctree, extravalues)
classes.extend(values.pop('inherit', '').split())
for var, value in values.items():
lines_before.append('%s = "%s"' % (var, value))
lines_after.append('# Specify any options you want to pass to cmake using EXTRA_OECMAKE:')
lines_after.append('EXTRA_OECMAKE = ""')
lines_after.append('')
handled.append('buildsystem')
return True
return False
@staticmethod
def extract_cmake_deps(outlines, srctree, extravalues, cmakelistsfile=None):
# Find all plugins that want to register handlers
logger.debug('Loading cmake handlers')
handlers = []
for plugin in plugins:
if hasattr(plugin, 'register_cmake_handlers'):
plugin.register_cmake_handlers(handlers)
values = {}
inherits = []
if cmakelistsfile:
srcfiles = [cmakelistsfile]
else:
srcfiles = RecipeHandler.checkfiles(srctree, ['CMakeLists.txt'])
# Note that some of these are non-standard, but probably better to
# be able to map them anyway if we see them
cmake_pkgmap = {'alsa': 'alsa-lib',
'aspell': 'aspell',
'atk': 'atk',
'bison': 'bison-native',
'boost': 'boost',
'bzip2': 'bzip2',
'cairo': 'cairo',
'cups': 'cups',
'curl': 'curl',
'curses': 'ncurses',
'cvs': 'cvs',
'drm': 'libdrm',
'dbus': 'dbus',
'dbusglib': 'dbus-glib',
'egl': 'virtual/egl',
'expat': 'expat',
'flex': 'flex-native',
'fontconfig': 'fontconfig',
'freetype': 'freetype',
'gettext': '',
'git': '',
'gio': 'glib-2.0',
'giounix': 'glib-2.0',
'glew': 'glew',
'glib': 'glib-2.0',
'glib2': 'glib-2.0',
'glu': 'libglu',
'glut': 'freeglut',
'gobject': 'glib-2.0',
'gperf': 'gperf-native',
'gnutls': 'gnutls',
'gtk2': 'gtk+',
'gtk3': 'gtk+3',
'gtk': 'gtk+3',
'harfbuzz': 'harfbuzz',
'icu': 'icu',
'intl': 'virtual/libintl',
'jpeg': 'jpeg',
'libarchive': 'libarchive',
'libiconv': 'virtual/libiconv',
'liblzma': 'xz',
'libxml2': 'libxml2',
'libxslt': 'libxslt',
'opengl': 'virtual/libgl',
'openmp': '',
'openssl': 'openssl',
'pango': 'pango',
'perl': '',
'perllibs': '',
'pkgconfig': '',
'png': 'libpng',
'pthread': '',
'pythoninterp': '',
'pythonlibs': '',
'ruby': 'ruby-native',
'sdl': 'libsdl',
'sdl2': 'libsdl2',
'subversion': 'subversion-native',
'swig': 'swig-native',
'tcl': 'tcl-native',
'threads': '',
'tiff': 'tiff',
'wget': 'wget',
'x11': 'libx11',
'xcb': 'libxcb',
'xext': 'libxext',
'xfixes': 'libxfixes',
'zlib': 'zlib',
}
pcdeps = []
libdeps = []
deps = []
unmappedpkgs = []
proj_re = re.compile(r'project\s*\(([^)]*)\)', re.IGNORECASE)
pkgcm_re = re.compile(r'pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE)
pkgsm_re = re.compile(r'pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE)
findpackage_re = re.compile(r'find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE)
findlibrary_re = re.compile(r'find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*')
checklib_re = re.compile(r'check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE)
include_re = re.compile(r'include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE)
subdir_re = re.compile(r'add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE)
dep_re = re.compile(r'([^ ><=]+)( *[<>=]+ *[^ ><=]+)?')
def find_cmake_package(pkg):
RecipeHandler.load_devel_filemap(tinfoil.config_data)
for fn, pn in RecipeHandler.recipecmakefilemap.items():
splitname = fn.split('/')
if len(splitname) > 1:
if splitname[0].lower().startswith(pkg.lower()):
if splitname[1] == '%s-config.cmake' % pkg.lower() or splitname[1] == '%sConfig.cmake' % pkg or splitname[1] == 'Find%s.cmake' % pkg:
return pn
return None
def interpret_value(value):
return value.strip('"')
def parse_cmake_file(fn, paths=None):
searchpaths = (paths or []) + [os.path.dirname(fn)]
logger.debug('Parsing file %s' % fn)
with open(fn, 'r', errors='surrogateescape') as f:
for line in f:
line = line.strip()
for handler in handlers:
if handler.process_line(srctree, fn, line, libdeps, pcdeps, deps, outlines, inherits, values):
continue
res = include_re.match(line)
if res:
includefn = bb.utils.which(':'.join(searchpaths), res.group(1))
if includefn:
parse_cmake_file(includefn, searchpaths)
else:
logger.debug('Unable to recurse into include file %s' % res.group(1))
continue
res = subdir_re.match(line)
if res:
subdirfn = os.path.join(os.path.dirname(fn), res.group(1), 'CMakeLists.txt')
if os.path.exists(subdirfn):
parse_cmake_file(subdirfn, searchpaths)
else:
logger.debug('Unable to recurse into subdirectory file %s' % subdirfn)
continue
res = proj_re.match(line)
if res:
extravalues['PN'] = interpret_value(res.group(1).split()[0])
continue
res = pkgcm_re.match(line)
if res:
res = dep_re.findall(res.group(2))
if res:
pcdeps.extend([interpret_value(x[0]) for x in res])
inherits.append('pkgconfig')
continue
res = pkgsm_re.match(line)
if res:
res = dep_re.findall(res.group(2))
if res:
# Note: appending a tuple here!
item = tuple((interpret_value(x[0]) for x in res))
if len(item) == 1:
item = item[0]
pcdeps.append(item)
inherits.append('pkgconfig')
continue
res = findpackage_re.match(line)
if res:
origpkg = res.group(1)
pkg = interpret_value(origpkg)
found = False
for handler in handlers:
if handler.process_findpackage(srctree, fn, pkg, deps, outlines, inherits, values):
logger.debug('Mapped CMake package %s via handler %s' % (pkg, handler.__class__.__name__))
found = True
break
if found:
continue
elif pkg == 'Gettext':
inherits.append('gettext')
elif pkg == 'Perl':
inherits.append('perlnative')
elif pkg == 'PkgConfig':
inherits.append('pkgconfig')
elif pkg == 'PythonInterp':
inherits.append('python3native')
elif pkg == 'PythonLibs':
inherits.append('python3-dir')
else:
# Try to map via looking at installed CMake packages in pkgdata
dep = find_cmake_package(pkg)
if dep:
logger.debug('Mapped CMake package %s to recipe %s via pkgdata' % (pkg, dep))
deps.append(dep)
else:
dep = cmake_pkgmap.get(pkg.lower(), None)
if dep:
logger.debug('Mapped CMake package %s to recipe %s via internal list' % (pkg, dep))
deps.append(dep)
elif dep is None:
unmappedpkgs.append(origpkg)
continue
res = checklib_re.match(line)
if res:
lib = interpret_value(res.group(1))
if not lib.startswith('$'):
libdeps.append(lib)
res = findlibrary_re.match(line)
if res:
libs = res.group(2).split()
for lib in libs:
if lib in ['HINTS', 'PATHS', 'PATH_SUFFIXES', 'DOC', 'NAMES_PER_DIR'] or lib.startswith(('NO_', 'CMAKE_', 'ONLY_CMAKE_')):
break
lib = interpret_value(lib)
if not lib.startswith('$'):
libdeps.append(lib)
if line.lower().startswith('useswig'):
deps.append('swig-native')
continue
parse_cmake_file(srcfiles[0])
if unmappedpkgs:
outlines.append('# NOTE: unable to map the following CMake package dependencies: %s' % ' '.join(list(set(unmappedpkgs))))
RecipeHandler.handle_depends(libdeps, pcdeps, deps, outlines, values, tinfoil.config_data)
for handler in handlers:
handler.post_process(srctree, libdeps, pcdeps, deps, outlines, inherits, values)
if inherits:
values['inherit'] = ' '.join(list(set(inherits)))
return values
class CmakeExtensionHandler(object):
'''Base class for CMake extension handlers'''
def process_line(self, srctree, fn, line, libdeps, pcdeps, deps, outlines, inherits, values):
'''
Handle a line parsed out of an CMake file.
Return True if you've completely handled the passed in line, otherwise return False.
'''
return False
def process_findpackage(self, srctree, fn, pkg, deps, outlines, inherits, values):
'''
Handle a find_package package parsed out of a CMake file.
Return True if you've completely handled the passed in package, otherwise return False.
'''
return False
def post_process(self, srctree, fn, pkg, deps, outlines, inherits, values):
'''
Apply any desired post-processing on the output
'''
return
class SconsRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
if 'buildsystem' in handled:
return False
if RecipeHandler.checkfiles(srctree, ['SConstruct', 'Sconstruct', 'sconstruct']):
classes.append('scons')
lines_after.append('# Specify any options you want to pass to scons using EXTRA_OESCONS:')
lines_after.append('EXTRA_OESCONS = ""')
lines_after.append('')
handled.append('buildsystem')
return True
return False
class QmakeRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
if 'buildsystem' in handled:
return False
if RecipeHandler.checkfiles(srctree, ['*.pro']):
classes.append('qmake2')
handled.append('buildsystem')
return True
return False
class AutotoolsRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
if 'buildsystem' in handled:
return False
autoconf = False
if RecipeHandler.checkfiles(srctree, ['configure.ac', 'configure.in']):
autoconf = True
values = AutotoolsRecipeHandler.extract_autotools_deps(lines_before, srctree, extravalues)
classes.extend(values.pop('inherit', '').split())
for var, value in values.items():
lines_before.append('%s = "%s"' % (var, value))
else:
conffile = RecipeHandler.checkfiles(srctree, ['configure'])
if conffile:
# Check if this is just a pre-generated autoconf configure script
with open(conffile[0], 'r', errors='surrogateescape') as f:
for i in range(1, 10):
if 'Generated by GNU Autoconf' in f.readline():
autoconf = True
break
if autoconf and not ('PV' in extravalues and 'PN' in extravalues):
# Last resort
conffile = RecipeHandler.checkfiles(srctree, ['configure'])
if conffile:
with open(conffile[0], 'r', errors='surrogateescape') as f:
for line in f:
line = line.strip()
if line.startswith('VERSION=') or line.startswith('PACKAGE_VERSION='):
pv = line.split('=')[1].strip('"\'')
if pv and not 'PV' in extravalues and validate_pv(pv):
extravalues['PV'] = pv
elif line.startswith('PACKAGE_NAME=') or line.startswith('PACKAGE='):
pn = line.split('=')[1].strip('"\'')
if pn and not 'PN' in extravalues:
extravalues['PN'] = pn
if autoconf:
lines_before.append('')
lines_before.append('# NOTE: if this software is not capable of being built in a separate build directory')
lines_before.append('# from the source, you should replace autotools with autotools-brokensep in the')
lines_before.append('# inherit line')
classes.append('autotools')
lines_after.append('# Specify any options you want to pass to the configure script using EXTRA_OECONF:')
lines_after.append('EXTRA_OECONF = ""')
lines_after.append('')
handled.append('buildsystem')
return True
return False
@staticmethod
def extract_autotools_deps(outlines, srctree, extravalues=None, acfile=None):
import shlex
# Find all plugins that want to register handlers
logger.debug('Loading autotools handlers')
handlers = []
for plugin in plugins:
if hasattr(plugin, 'register_autotools_handlers'):
plugin.register_autotools_handlers(handlers)
values = {}
inherits = []
# Hardcoded map, we also use a dynamic one based on what's in the sysroot
progmap = {'flex': 'flex-native',
'bison': 'bison-native',
'm4': 'm4-native',
'tar': 'tar-native',
'ar': 'binutils-native',
'ranlib': 'binutils-native',
'ld': 'binutils-native',
'strip': 'binutils-native',
'libtool': '',
'autoconf': '',
'autoheader': '',
'automake': '',
'uname': '',
'rm': '',
'cp': '',
'mv': '',
'find': '',
'awk': '',
'sed': '',
}
progclassmap = {'gconftool-2': 'gconf',
'pkg-config': 'pkgconfig',
'python': 'python3native',
'python3': 'python3native',
'perl': 'perlnative',
'makeinfo': 'texinfo',
}
pkg_re = re.compile(r'PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
pkgce_re = re.compile(r'PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*')
lib_re = re.compile(r'AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*')
libx_re = re.compile(r'AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*')
progs_re = re.compile(r'_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
dep_re = re.compile(r'([^ ><=]+)( [<>=]+ [^ ><=]+)?')
ac_init_re = re.compile(r'AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*')
am_init_re = re.compile(r'AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*')
define_re = re.compile(r'\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)')
version_re = re.compile(r'([0-9.]+)')
defines = {}
def subst_defines(value):
newvalue = value
for define, defval in defines.items():
newvalue = newvalue.replace(define, defval)
if newvalue != value:
return subst_defines(newvalue)
return value
def process_value(value):
value = value.replace('[', '').replace(']', '')
if value.startswith('m4_esyscmd(') or value.startswith('m4_esyscmd_s('):
cmd = subst_defines(value[value.index('(')+1:-1])
try:
if '|' in cmd:
cmd = 'set -o pipefail; ' + cmd
stdout, _ = bb.process.run(cmd, cwd=srctree, shell=True)
ret = stdout.rstrip()
except bb.process.ExecutionError as e:
ret = ''
elif value.startswith('m4_'):
return None
ret = subst_defines(value)
if ret:
ret = ret.strip('"\'')
return ret
# Since a configure.ac file is essentially a program, this is only ever going to be
# a hack unfortunately; but it ought to be enough of an approximation
if acfile:
srcfiles = [acfile]
else:
srcfiles = RecipeHandler.checkfiles(srctree, ['acinclude.m4', 'configure.ac', 'configure.in'])
pcdeps = []
libdeps = []
deps = []
unmapped = []
RecipeHandler.load_binmap(tinfoil.config_data)
def process_macro(keyword, value):
for handler in handlers:
if handler.process_macro(srctree, keyword, value, process_value, libdeps, pcdeps, deps, outlines, inherits, values):
return
logger.debug('Found keyword %s with value "%s"' % (keyword, value))
if keyword == 'PKG_CHECK_MODULES':
res = pkg_re.search(value)
if res:
res = dep_re.findall(res.group(1))
if res:
pcdeps.extend([x[0] for x in res])
inherits.append('pkgconfig')
elif keyword == 'PKG_CHECK_EXISTS':
res = pkgce_re.search(value)
if res:
res = dep_re.findall(res.group(1))
if res:
pcdeps.extend([x[0] for x in res])
inherits.append('pkgconfig')
elif keyword in ('AM_GNU_GETTEXT', 'AM_GLIB_GNU_GETTEXT', 'GETTEXT_PACKAGE'):
inherits.append('gettext')
elif keyword in ('AC_PROG_INTLTOOL', 'IT_PROG_INTLTOOL'):
deps.append('intltool-native')
elif keyword == 'AM_PATH_GLIB_2_0':
deps.append('glib-2.0')
elif keyword in ('AC_CHECK_PROG', 'AC_PATH_PROG', 'AX_WITH_PROG'):
res = progs_re.search(value)
if res:
for prog in shlex.split(res.group(1)):
prog = prog.split()[0]
for handler in handlers:
if handler.process_prog(srctree, keyword, value, prog, deps, outlines, inherits, values):
return
progclass = progclassmap.get(prog, None)
if progclass:
inherits.append(progclass)
else:
progdep = RecipeHandler.recipebinmap.get(prog, None)
if not progdep:
progdep = progmap.get(prog, None)
if progdep:
deps.append(progdep)
elif progdep is None:
if not prog.startswith('$'):
unmapped.append(prog)
elif keyword == 'AC_CHECK_LIB':
res = lib_re.search(value)
if res:
lib = res.group(1)
if not lib.startswith('$'):
libdeps.append(lib)
elif keyword == 'AX_CHECK_LIBRARY':
res = libx_re.search(value)
if res:
lib = res.group(2)
if not lib.startswith('$'):
header = res.group(1)
libdeps.append((lib, header))
elif keyword == 'AC_PATH_X':
deps.append('libx11')
elif keyword in ('AX_BOOST', 'BOOST_REQUIRE'):
deps.append('boost')
elif keyword in ('AC_PROG_LEX', 'AM_PROG_LEX', 'AX_PROG_FLEX'):
deps.append('flex-native')
elif keyword in ('AC_PROG_YACC', 'AX_PROG_BISON'):
deps.append('bison-native')
elif keyword == 'AX_CHECK_ZLIB':
deps.append('zlib')
elif keyword in ('AX_CHECK_OPENSSL', 'AX_LIB_CRYPTO'):
deps.append('openssl')
elif keyword in ('AX_LIB_CURL', 'LIBCURL_CHECK_CONFIG'):
deps.append('curl')
elif keyword == 'AX_LIB_BEECRYPT':
deps.append('beecrypt')
elif keyword == 'AX_LIB_EXPAT':
deps.append('expat')
elif keyword == 'AX_LIB_GCRYPT':
deps.append('libgcrypt')
elif keyword == 'AX_LIB_NETTLE':
deps.append('nettle')
elif keyword == 'AX_LIB_READLINE':
deps.append('readline')
elif keyword == 'AX_LIB_SQLITE3':
deps.append('sqlite3')
elif keyword == 'AX_LIB_TAGLIB':
deps.append('taglib')
elif keyword in ['AX_PKG_SWIG', 'AC_PROG_SWIG']:
deps.append('swig-native')
elif keyword == 'AX_PROG_XSLTPROC':
deps.append('libxslt-native')
elif keyword in ['AC_PYTHON_DEVEL', 'AX_PYTHON_DEVEL', 'AM_PATH_PYTHON']:
pythonclass = 'python3native'
elif keyword == 'AX_WITH_CURSES':
deps.append('ncurses')
elif keyword == 'AX_PATH_BDB':
deps.append('db')
elif keyword == 'AX_PATH_LIB_PCRE':
deps.append('libpcre')
elif keyword == 'AC_INIT':
if extravalues is not None:
res = ac_init_re.match(value)
if res:
extravalues['PN'] = process_value(res.group(1))
pv = process_value(res.group(2))
if validate_pv(pv):
extravalues['PV'] = pv
elif keyword == 'AM_INIT_AUTOMAKE':
if extravalues is not None:
if 'PN' not in extravalues:
res = am_init_re.match(value)
if res:
if res.group(1) != 'AC_PACKAGE_NAME':
extravalues['PN'] = process_value(res.group(1))
pv = process_value(res.group(2))
if validate_pv(pv):
extravalues['PV'] = pv
elif keyword == 'define(':
res = define_re.match(value)
if res:
key = res.group(2).strip('[]')
value = process_value(res.group(3))
if value is not None:
defines[key] = value
keywords = ['PKG_CHECK_MODULES',
'PKG_CHECK_EXISTS',
'AM_GNU_GETTEXT',
'AM_GLIB_GNU_GETTEXT',
'GETTEXT_PACKAGE',
'AC_PROG_INTLTOOL',
'IT_PROG_INTLTOOL',
'AM_PATH_GLIB_2_0',
'AC_CHECK_PROG',
'AC_PATH_PROG',
'AX_WITH_PROG',
'AC_CHECK_LIB',
'AX_CHECK_LIBRARY',
'AC_PATH_X',
'AX_BOOST',
'BOOST_REQUIRE',
'AC_PROG_LEX',
'AM_PROG_LEX',
'AX_PROG_FLEX',
'AC_PROG_YACC',
'AX_PROG_BISON',
'AX_CHECK_ZLIB',
'AX_CHECK_OPENSSL',
'AX_LIB_CRYPTO',
'AX_LIB_CURL',
'LIBCURL_CHECK_CONFIG',
'AX_LIB_BEECRYPT',
'AX_LIB_EXPAT',
'AX_LIB_GCRYPT',
'AX_LIB_NETTLE',
'AX_LIB_READLINE'
'AX_LIB_SQLITE3',
'AX_LIB_TAGLIB',
'AX_PKG_SWIG',
'AC_PROG_SWIG',
'AX_PROG_XSLTPROC',
'AC_PYTHON_DEVEL',
'AX_PYTHON_DEVEL',
'AM_PATH_PYTHON',
'AX_WITH_CURSES',
'AX_PATH_BDB',
'AX_PATH_LIB_PCRE',
'AC_INIT',
'AM_INIT_AUTOMAKE',
'define(',
]
for handler in handlers:
handler.extend_keywords(keywords)
for srcfile in srcfiles:
nesting = 0
in_keyword = ''
partial = ''
with open(srcfile, 'r', errors='surrogateescape') as f:
for line in f:
if in_keyword:
partial += ' ' + line.strip()
if partial.endswith('\\'):
partial = partial[:-1]
nesting = nesting + line.count('(') - line.count(')')
if nesting == 0:
process_macro(in_keyword, partial)
partial = ''
in_keyword = ''
else:
for keyword in keywords:
if keyword in line:
nesting = line.count('(') - line.count(')')
if nesting > 0:
partial = line.strip()
if partial.endswith('\\'):
partial = partial[:-1]
in_keyword = keyword
else:
process_macro(keyword, line.strip())
break
if in_keyword:
process_macro(in_keyword, partial)
if extravalues:
for k,v in list(extravalues.items()):
if v:
if v.startswith('$') or v.startswith('@') or v.startswith('%'):
del extravalues[k]
else:
extravalues[k] = v.strip('"\'').rstrip('()')
if unmapped:
outlines.append('# NOTE: the following prog dependencies are unknown, ignoring: %s' % ' '.join(list(set(unmapped))))
RecipeHandler.handle_depends(libdeps, pcdeps, deps, outlines, values, tinfoil.config_data)
for handler in handlers:
handler.post_process(srctree, libdeps, pcdeps, deps, outlines, inherits, values)
if inherits:
values['inherit'] = ' '.join(list(set(inherits)))
return values
class AutotoolsExtensionHandler(object):
'''Base class for Autotools extension handlers'''
def process_macro(self, srctree, keyword, value, process_value, libdeps, pcdeps, deps, outlines, inherits, values):
'''
Handle a macro parsed out of an autotools file. Note that if you want this to be called
for any macro other than the ones AutotoolsRecipeHandler already looks for, you'll need
to add it to the keywords list in extend_keywords().
Return True if you've completely handled the passed in macro, otherwise return False.
'''
return False
def extend_keywords(self, keywords):
'''Adds keywords to be recognised by the parser (so that you get a call to process_macro)'''
return
def process_prog(self, srctree, keyword, value, prog, deps, outlines, inherits, values):
'''
Handle an AC_PATH_PROG, AC_CHECK_PROG etc. line
Return True if you've completely handled the passed in macro, otherwise return False.
'''
return False
def post_process(self, srctree, fn, pkg, deps, outlines, inherits, values):
'''
Apply any desired post-processing on the output
'''
return
class MakefileRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
if 'buildsystem' in handled:
return False
makefile = RecipeHandler.checkfiles(srctree, ['Makefile', 'makefile', 'GNUmakefile'])
if makefile:
lines_after.append('# NOTE: this is a Makefile-only piece of software, so we cannot generate much of the')
lines_after.append('# recipe automatically - you will need to examine the Makefile yourself and ensure')
lines_after.append('# that the appropriate arguments are passed in.')
lines_after.append('')
scanfile = os.path.join(srctree, 'configure.scan')
skipscan = False
try:
stdout, stderr = bb.process.run('autoscan', cwd=srctree, shell=True)
except bb.process.ExecutionError as e:
skipscan = True
if scanfile and os.path.exists(scanfile):
values = AutotoolsRecipeHandler.extract_autotools_deps(lines_before, srctree, acfile=scanfile)
classes.extend(values.pop('inherit', '').split())
for var, value in values.items():
if var == 'DEPENDS':
lines_before.append('# NOTE: some of these dependencies may be optional, check the Makefile and/or upstream documentation')
lines_before.append('%s = "%s"' % (var, value))
lines_before.append('')
for f in ['configure.scan', 'autoscan.log']:
fp = os.path.join(srctree, f)
if os.path.exists(fp):
os.remove(fp)
self.genfunction(lines_after, 'do_configure', ['# Specify any needed configure commands here'])
func = []
func.append('# You will almost certainly need to add additional arguments here')
func.append('oe_runmake')
self.genfunction(lines_after, 'do_compile', func)
installtarget = True
try:
stdout, stderr = bb.process.run('make -n install', cwd=srctree, shell=True)
except bb.process.ExecutionError as e:
if e.exitcode != 1:
installtarget = False
func = []
if installtarget:
func.append('# This is a guess; additional arguments may be required')
makeargs = ''
with open(makefile[0], 'r', errors='surrogateescape') as f:
for i in range(1, 100):
if 'DESTDIR' in f.readline():
makeargs += " 'DESTDIR=${D}'"
break
func.append('oe_runmake install%s' % makeargs)
else:
func.append('# NOTE: unable to determine what to put here - there is a Makefile but no')
func.append('# target named "install", so you will need to define this yourself')
self.genfunction(lines_after, 'do_install', func)
handled.append('buildsystem')
else:
lines_after.append('# NOTE: no Makefile found, unable to determine what needs to be done')
lines_after.append('')
self.genfunction(lines_after, 'do_configure', ['# Specify any needed configure commands here'])
self.genfunction(lines_after, 'do_compile', ['# Specify compilation commands here'])
self.genfunction(lines_after, 'do_install', ['# Specify install commands here'])
class VersionFileRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
if 'PV' not in extravalues:
# Look for a VERSION or version file containing a single line consisting
# only of a version number
filelist = RecipeHandler.checkfiles(srctree, ['VERSION', 'version'])
version = None
for fileitem in filelist:
linecount = 0
with open(fileitem, 'r', errors='surrogateescape') as f:
for line in f:
line = line.rstrip().strip('"\'')
linecount += 1
if line:
if linecount > 1:
version = None
break
else:
if validate_pv(line):
version = line
if version:
extravalues['PV'] = version
break
class SpecFileRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
if 'PV' in extravalues and 'PN' in extravalues:
return
filelist = RecipeHandler.checkfiles(srctree, ['*.spec'], recursive=True)
valuemap = {'Name': 'PN',
'Version': 'PV',
'Summary': 'SUMMARY',
'Url': 'HOMEPAGE',
'License': 'LICENSE'}
foundvalues = {}
for fileitem in filelist:
linecount = 0
with open(fileitem, 'r', errors='surrogateescape') as f:
for line in f:
for value, varname in valuemap.items():
if line.startswith(value + ':') and not varname in foundvalues:
foundvalues[varname] = line.split(':', 1)[1].strip()
break
if len(foundvalues) == len(valuemap):
break
# Drop values containing unexpanded RPM macros
for k in list(foundvalues.keys()):
if '%' in foundvalues[k]:
del foundvalues[k]
if 'PV' in foundvalues:
if not validate_pv(foundvalues['PV']):
del foundvalues['PV']
license = foundvalues.pop('LICENSE', None)
if license:
liccomment = '# NOTE: spec file indicates the license may be "%s"' % license
for i, line in enumerate(lines_before):
if line.startswith('LICENSE ='):
lines_before.insert(i, liccomment)
break
else:
lines_before.append(liccomment)
extravalues.update(foundvalues)
def register_recipe_handlers(handlers):
# Set priorities with some gaps so that other plugins can insert
# their own handlers (so avoid changing these numbers)
handlers.append((CmakeRecipeHandler(), 50))
handlers.append((AutotoolsRecipeHandler(), 40))
handlers.append((SconsRecipeHandler(), 30))
handlers.append((QmakeRecipeHandler(), 20))
handlers.append((MakefileRecipeHandler(), 10))
handlers.append((VersionFileRecipeHandler(), -1))
handlers.append((SpecFileRecipeHandler(), -1))

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,777 @@
# Recipe creation tool - go support plugin
#
# The code is based on golang internals. See the afftected
# methods for further reference and information.
#
# Copyright (C) 2023 Weidmueller GmbH & Co KG
# Author: Lukas Funke <lukas.funke@weidmueller.com>
#
# SPDX-License-Identifier: GPL-2.0-only
#
from collections import namedtuple
from enum import Enum
from html.parser import HTMLParser
from recipetool.create import RecipeHandler, handle_license_vars
from recipetool.create import guess_license, tidy_licenses, fixup_license
from recipetool.create import determine_from_url
from urllib.error import URLError, HTTPError
import bb.utils
import json
import logging
import os
import re
import subprocess
import sys
import shutil
import tempfile
import urllib.parse
import urllib.request
GoImport = namedtuple('GoImport', 'root vcs url suffix')
logger = logging.getLogger('recipetool')
CodeRepo = namedtuple(
'CodeRepo', 'path codeRoot codeDir pathMajor pathPrefix pseudoMajor')
tinfoil = None
# Regular expression to parse pseudo semantic version
# see https://go.dev/ref/mod#pseudo-versions
re_pseudo_semver = re.compile(
r"^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)(?P<utc>\d{14})-(?P<commithash>[A-Za-z0-9]+)(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$")
# Regular expression to parse semantic version
re_semver = re.compile(
r"^v(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$")
def tinfoil_init(instance):
global tinfoil
tinfoil = instance
class GoRecipeHandler(RecipeHandler):
"""Class to handle the go recipe creation"""
@staticmethod
def __ensure_go():
"""Check if the 'go' command is available in the recipes"""
recipe = "go-native"
if not tinfoil.recipes_parsed:
tinfoil.parse_recipes()
try:
rd = tinfoil.parse_recipe(recipe)
except bb.providers.NoProvider:
bb.error(
"Nothing provides '%s' which is required for the build" % (recipe))
bb.note(
"You will likely need to add a layer that provides '%s'" % (recipe))
return None
bindir = rd.getVar('STAGING_BINDIR_NATIVE')
gopath = os.path.join(bindir, 'go')
if not os.path.exists(gopath):
tinfoil.build_targets(recipe, 'addto_recipe_sysroot')
if not os.path.exists(gopath):
logger.error(
'%s required to process specified source, but %s did not seem to populate it' % 'go', recipe)
return None
return bindir
def __resolve_repository_static(self, modulepath):
"""Resolve the repository in a static manner
The method is based on the go implementation of
`repoRootFromVCSPaths` in
https://github.com/golang/go/blob/master/src/cmd/go/internal/vcs/vcs.go
"""
url = urllib.parse.urlparse("https://" + modulepath)
req = urllib.request.Request(url.geturl())
try:
resp = urllib.request.urlopen(req)
# Some modulepath are just redirects to github (or some other vcs
# hoster). Therefore, we check if this modulepath redirects to
# somewhere else
if resp.geturl() != url.geturl():
bb.debug(1, "%s is redirectred to %s" %
(url.geturl(), resp.geturl()))
url = urllib.parse.urlparse(resp.geturl())
modulepath = url.netloc + url.path
except URLError as url_err:
# This is probably because the module path
# contains the subdir and major path. Thus,
# we ignore this error for now
logger.debug(
1, "Failed to fetch page from [%s]: %s" % (url, str(url_err)))
host, _, _ = modulepath.partition('/')
class vcs(Enum):
pathprefix = "pathprefix"
regexp = "regexp"
type = "type"
repo = "repo"
check = "check"
schemelessRepo = "schemelessRepo"
# GitHub
vcsGitHub = {}
vcsGitHub[vcs.pathprefix] = "github.com"
vcsGitHub[vcs.regexp] = re.compile(
r'^(?P<root>github\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
vcsGitHub[vcs.type] = "git"
vcsGitHub[vcs.repo] = "https://\\g<root>"
# Bitbucket
vcsBitbucket = {}
vcsBitbucket[vcs.pathprefix] = "bitbucket.org"
vcsBitbucket[vcs.regexp] = re.compile(
r'^(?P<root>bitbucket\.org/(?P<bitname>[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
vcsBitbucket[vcs.type] = "git"
vcsBitbucket[vcs.repo] = "https://\\g<root>"
# IBM DevOps Services (JazzHub)
vcsIBMDevOps = {}
vcsIBMDevOps[vcs.pathprefix] = "hub.jazz.net/git"
vcsIBMDevOps[vcs.regexp] = re.compile(
r'^(?P<root>hub\.jazz\.net/git/[a-z0-9]+/[A-Za-z0-9_.\-]+)(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
vcsIBMDevOps[vcs.type] = "git"
vcsIBMDevOps[vcs.repo] = "https://\\g<root>"
# Git at Apache
vcsApacheGit = {}
vcsApacheGit[vcs.pathprefix] = "git.apache.org"
vcsApacheGit[vcs.regexp] = re.compile(
r'^(?P<root>git\.apache\.org/[a-z0-9_.\-]+\.git)(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
vcsApacheGit[vcs.type] = "git"
vcsApacheGit[vcs.repo] = "https://\\g<root>"
# Git at OpenStack
vcsOpenStackGit = {}
vcsOpenStackGit[vcs.pathprefix] = "git.openstack.org"
vcsOpenStackGit[vcs.regexp] = re.compile(
r'^(?P<root>git\.openstack\.org/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(\.git)?(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
vcsOpenStackGit[vcs.type] = "git"
vcsOpenStackGit[vcs.repo] = "https://\\g<root>"
# chiselapp.com for fossil
vcsChiselapp = {}
vcsChiselapp[vcs.pathprefix] = "chiselapp.com"
vcsChiselapp[vcs.regexp] = re.compile(
r'^(?P<root>chiselapp\.com/user/[A-Za-z0-9]+/repository/[A-Za-z0-9_.\-]+)$')
vcsChiselapp[vcs.type] = "fossil"
vcsChiselapp[vcs.repo] = "https://\\g<root>"
# General syntax for any server.
# Must be last.
vcsGeneralServer = {}
vcsGeneralServer[vcs.regexp] = re.compile(
"(?P<root>(?P<repo>([a-z0-9.\\-]+\\.)+[a-z0-9.\\-]+(:[0-9]+)?(/~?[A-Za-z0-9_.\\-]+)+?)\\.(?P<vcs>bzr|fossil|git|hg|svn))(/~?(?P<suffix>[A-Za-z0-9_.\\-]+))*$")
vcsGeneralServer[vcs.schemelessRepo] = True
vcsPaths = [vcsGitHub, vcsBitbucket, vcsIBMDevOps,
vcsApacheGit, vcsOpenStackGit, vcsChiselapp,
vcsGeneralServer]
if modulepath.startswith("example.net") or modulepath == "rsc.io":
logger.warning("Suspicious module path %s" % modulepath)
return None
if modulepath.startswith("http:") or modulepath.startswith("https:"):
logger.warning("Import path should not start with %s %s" %
("http", "https"))
return None
rootpath = None
vcstype = None
repourl = None
suffix = None
for srv in vcsPaths:
m = srv[vcs.regexp].match(modulepath)
if vcs.pathprefix in srv:
if host == srv[vcs.pathprefix]:
rootpath = m.group('root')
vcstype = srv[vcs.type]
repourl = m.expand(srv[vcs.repo])
suffix = m.group('suffix')
break
elif m and srv[vcs.schemelessRepo]:
rootpath = m.group('root')
vcstype = m[vcs.type]
repourl = m[vcs.repo]
suffix = m.group('suffix')
break
return GoImport(rootpath, vcstype, repourl, suffix)
def __resolve_repository_dynamic(self, modulepath):
"""Resolve the repository root in a dynamic manner.
The method is based on the go implementation of
`repoRootForImportDynamic` in
https://github.com/golang/go/blob/master/src/cmd/go/internal/vcs/vcs.go
"""
url = urllib.parse.urlparse("https://" + modulepath)
class GoImportHTMLParser(HTMLParser):
def __init__(self):
super().__init__()
self.__srv = {}
def handle_starttag(self, tag, attrs):
if tag == 'meta' and list(
filter(lambda a: (a[0] == 'name' and a[1] == 'go-import'), attrs)):
content = list(
filter(lambda a: (a[0] == 'content'), attrs))
if content:
srv = content[0][1].split()
self.__srv[srv[0]] = srv
def go_import(self, modulepath):
if modulepath in self.__srv:
srv = self.__srv[modulepath]
return GoImport(srv[0], srv[1], srv[2], None)
return None
url = url.geturl() + "?go-get=1"
req = urllib.request.Request(url)
try:
body = urllib.request.urlopen(req).read()
except HTTPError as http_err:
logger.warning(
"Unclean status when fetching page from [%s]: %s", url, str(http_err))
body = http_err.fp.read()
except URLError as url_err:
logger.warning(
"Failed to fetch page from [%s]: %s", url, str(url_err))
return None
parser = GoImportHTMLParser()
parser.feed(body.decode('utf-8'))
parser.close()
return parser.go_import(modulepath)
def __resolve_from_golang_proxy(self, modulepath, version):
"""
Resolves repository data from golang proxy
"""
url = urllib.parse.urlparse("https://proxy.golang.org/"
+ modulepath
+ "/@v/"
+ version
+ ".info")
# Transform url to lower case, golang proxy doesn't like mixed case
req = urllib.request.Request(url.geturl().lower())
try:
resp = urllib.request.urlopen(req)
except URLError as url_err:
logger.warning(
"Failed to fetch page from [%s]: %s", url, str(url_err))
return None
golang_proxy_res = resp.read().decode('utf-8')
modinfo = json.loads(golang_proxy_res)
if modinfo and 'Origin' in modinfo:
origin = modinfo['Origin']
_root_url = urllib.parse.urlparse(origin['URL'])
# We normalize the repo URL since we don't want the scheme in it
_subdir = origin['Subdir'] if 'Subdir' in origin else None
_root, _, _ = self.__split_path_version(modulepath)
if _subdir:
_root = _root[:-len(_subdir)].strip('/')
_commit = origin['Hash']
_vcs = origin['VCS']
return (GoImport(_root, _vcs, _root_url.geturl(), None), _commit)
return None
def __resolve_repository(self, modulepath):
"""
Resolves src uri from go module-path
"""
repodata = self.__resolve_repository_static(modulepath)
if not repodata or not repodata.url:
repodata = self.__resolve_repository_dynamic(modulepath)
if not repodata or not repodata.url:
logger.error(
"Could not resolve repository for module path '%s'" % modulepath)
# There is no way to recover from this
sys.exit(14)
if repodata:
logger.debug(1, "Resolved download path for import '%s' => %s" % (
modulepath, repodata.url))
return repodata
def __split_path_version(self, path):
i = len(path)
dot = False
for j in range(i, 0, -1):
if path[j - 1] < '0' or path[j - 1] > '9':
break
if path[j - 1] == '.':
dot = True
break
i = j - 1
if i <= 1 or i == len(
path) or path[i - 1] != 'v' or path[i - 2] != '/':
return path, "", True
prefix, pathMajor = path[:i - 2], path[i - 2:]
if dot or len(
pathMajor) <= 2 or pathMajor[2] == '0' or pathMajor == "/v1":
return path, "", False
return prefix, pathMajor, True
def __get_path_major(self, pathMajor):
if not pathMajor:
return ""
if pathMajor[0] != '/' and pathMajor[0] != '.':
logger.error(
"pathMajor suffix %s passed to PathMajorPrefix lacks separator", pathMajor)
if pathMajor.startswith(".v") and pathMajor.endswith("-unstable"):
pathMajor = pathMajor[:len("-unstable") - 2]
return pathMajor[1:]
def __build_coderepo(self, repo, path):
codedir = ""
pathprefix, pathMajor, _ = self.__split_path_version(path)
if repo.root == path:
pathprefix = path
elif path.startswith(repo.root):
codedir = pathprefix[len(repo.root):].strip('/')
pseudoMajor = self.__get_path_major(pathMajor)
logger.debug("root='%s', codedir='%s', prefix='%s', pathMajor='%s', pseudoMajor='%s'",
repo.root, codedir, pathprefix, pathMajor, pseudoMajor)
return CodeRepo(path, repo.root, codedir,
pathMajor, pathprefix, pseudoMajor)
def __resolve_version(self, repo, path, version):
hash = None
coderoot = self.__build_coderepo(repo, path)
def vcs_fetch_all():
tmpdir = tempfile.mkdtemp()
clone_cmd = "%s clone --bare %s %s" % ('git', repo.url, tmpdir)
bb.process.run(clone_cmd)
log_cmd = "git log --all --pretty='%H %d' --decorate=short"
output, _ = bb.process.run(
log_cmd, shell=True, stderr=subprocess.PIPE, cwd=tmpdir)
bb.utils.prunedir(tmpdir)
return output.strip().split('\n')
def vcs_fetch_remote(tag):
# add * to grab ^{}
refs = {}
ls_remote_cmd = "git ls-remote -q --tags {} {}*".format(
repo.url, tag)
output, _ = bb.process.run(ls_remote_cmd)
output = output.strip().split('\n')
for line in output:
f = line.split(maxsplit=1)
if len(f) != 2:
continue
for prefix in ["HEAD", "refs/heads/", "refs/tags/"]:
if f[1].startswith(prefix):
refs[f[1][len(prefix):]] = f[0]
for key, hash in refs.items():
if key.endswith(r"^{}"):
refs[key.strip(r"^{}")] = hash
return refs[tag]
m_pseudo_semver = re_pseudo_semver.match(version)
if m_pseudo_semver:
remote_refs = vcs_fetch_all()
short_commit = m_pseudo_semver.group('commithash')
for l in remote_refs:
r = l.split(maxsplit=1)
sha1 = r[0] if len(r) else None
if not sha1:
logger.error(
"Ups: could not resolve abbref commit for %s" % short_commit)
elif sha1.startswith(short_commit):
hash = sha1
break
else:
m_semver = re_semver.match(version)
if m_semver:
def get_sha1_remote(re):
rsha1 = None
for line in remote_refs:
# Split lines of the following format:
# 22e90d9b964610628c10f673ca5f85b8c2a2ca9a (tag: sometag)
lineparts = line.split(maxsplit=1)
sha1 = lineparts[0] if len(lineparts) else None
refstring = lineparts[1] if len(
lineparts) == 2 else None
if refstring:
# Normalize tag string and split in case of multiple
# regs e.g. (tag: speech/v1.10.0, tag: orchestration/v1.5.0 ...)
refs = refstring.strip('(), ').split(',')
for ref in refs:
if re.match(ref.strip()):
rsha1 = sha1
return rsha1
semver = "v" + m_semver.group('major') + "."\
+ m_semver.group('minor') + "."\
+ m_semver.group('patch') \
+ (("-" + m_semver.group('prerelease'))
if m_semver.group('prerelease') else "")
tag = os.path.join(
coderoot.codeDir, semver) if coderoot.codeDir else semver
# probe tag using 'ls-remote', which is faster than fetching
# complete history
hash = vcs_fetch_remote(tag)
if not hash:
# backup: fetch complete history
remote_refs = vcs_fetch_all()
hash = get_sha1_remote(
re.compile(fr"(tag:|HEAD ->) ({tag})"))
logger.debug(
"Resolving commit for tag '%s' -> '%s'", tag, hash)
return hash
def __generate_srcuri_inline_fcn(self, path, version, replaces=None):
"""Generate SRC_URI functions for go imports"""
logger.info("Resolving repository for module %s", path)
# First try to resolve repo and commit from golang proxy
# Most info is already there and we don't have to go through the
# repository or even perform the version resolve magic
golang_proxy_info = self.__resolve_from_golang_proxy(path, version)
if golang_proxy_info:
repo = golang_proxy_info[0]
commit = golang_proxy_info[1]
else:
# Fallback
# Resolve repository by 'hand'
repo = self.__resolve_repository(path)
commit = self.__resolve_version(repo, path, version)
url = urllib.parse.urlparse(repo.url)
repo_url = url.netloc + url.path
coderoot = self.__build_coderepo(repo, path)
inline_fcn = "${@go_src_uri("
inline_fcn += f"'{repo_url}','{version}'"
if repo_url != path:
inline_fcn += f",path='{path}'"
if coderoot.codeDir:
inline_fcn += f",subdir='{coderoot.codeDir}'"
if repo.vcs != 'git':
inline_fcn += f",vcs='{repo.vcs}'"
if replaces:
inline_fcn += f",replaces='{replaces}'"
if coderoot.pathMajor:
inline_fcn += f",pathmajor='{coderoot.pathMajor}'"
inline_fcn += ")}"
return inline_fcn, commit
def __go_handle_dependencies(self, go_mod, srctree, localfilesdir, extravalues, d):
import re
src_uris = []
src_revs = []
def generate_src_rev(path, version, commithash):
src_rev = f"# {path}@{version} => {commithash}\n"
# Ups...maybe someone manipulated the source repository and the
# version or commit could not be resolved. This is a sign of
# a) the supply chain was manipulated (bad)
# b) the implementation for the version resolving didn't work
# anymore (less bad)
if not commithash:
src_rev += f"#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
src_rev += f"#!!! Could not resolve version !!!\n"
src_rev += f"#!!! Possible supply chain attack !!!\n"
src_rev += f"#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
src_rev += f"SRCREV_{path.replace('/', '.')} = \"{commithash}\""
return src_rev
# we first go over replacement list, because we are essentialy
# interested only in the replaced path
if go_mod['Replace']:
for replacement in go_mod['Replace']:
oldpath = replacement['Old']['Path']
path = replacement['New']['Path']
version = ''
if 'Version' in replacement['New']:
version = replacement['New']['Version']
if os.path.exists(os.path.join(srctree, path)):
# the module refers to the local path, remove it from requirement list
# because it's a local module
go_mod['Require'][:] = [v for v in go_mod['Require'] if v.get('Path') != oldpath]
else:
# Replace the path and the version, so we don't iterate replacement list anymore
for require in go_mod['Require']:
if require['Path'] == oldpath:
require.update({'Path': path, 'Version': version})
break
for require in go_mod['Require']:
path = require['Path']
version = require['Version']
inline_fcn, commithash = self.__generate_srcuri_inline_fcn(
path, version)
src_uris.append(inline_fcn)
src_revs.append(generate_src_rev(path, version, commithash))
# strip version part from module URL /vXX
baseurl = re.sub(r'/v(\d+)$', '', go_mod['Module']['Path'])
pn, _ = determine_from_url(baseurl)
go_mods_basename = "%s-modules.inc" % pn
go_mods_filename = os.path.join(localfilesdir, go_mods_basename)
with open(go_mods_filename, "w") as f:
# We introduce this indirection to make the tests a little easier
f.write("SRC_URI += \"${GO_DEPENDENCIES_SRC_URI}\"\n")
f.write("GO_DEPENDENCIES_SRC_URI = \"\\\n")
for uri in src_uris:
f.write(" " + uri + " \\\n")
f.write("\"\n\n")
for rev in src_revs:
f.write(rev + "\n")
extravalues['extrafiles'][go_mods_basename] = go_mods_filename
def __go_run_cmd(self, cmd, cwd, d):
return bb.process.run(cmd, env=dict(os.environ, PATH=d.getVar('PATH')),
shell=True, cwd=cwd)
def __go_native_version(self, d):
stdout, _ = self.__go_run_cmd("go version", None, d)
m = re.match(r".*\sgo((\d+).(\d+).(\d+))\s([\w\/]*)", stdout)
major = int(m.group(2))
minor = int(m.group(3))
patch = int(m.group(4))
return major, minor, patch
def __go_mod_patch(self, srctree, localfilesdir, extravalues, d):
patchfilename = "go.mod.patch"
go_native_version_major, go_native_version_minor, _ = self.__go_native_version(
d)
self.__go_run_cmd("go mod tidy -go=%d.%d" %
(go_native_version_major, go_native_version_minor), srctree, d)
stdout, _ = self.__go_run_cmd("go mod edit -json", srctree, d)
# Create patch in order to upgrade go version
self.__go_run_cmd("git diff go.mod > %s" % (patchfilename), srctree, d)
# Restore original state
self.__go_run_cmd("git checkout HEAD go.mod go.sum", srctree, d)
go_mod = json.loads(stdout)
tmpfile = os.path.join(localfilesdir, patchfilename)
shutil.move(os.path.join(srctree, patchfilename), tmpfile)
extravalues['extrafiles'][patchfilename] = tmpfile
return go_mod, patchfilename
def __go_mod_vendor(self, go_mod, srctree, localfilesdir, extravalues, d):
# Perform vendoring to retrieve the correct modules.txt
tmp_vendor_dir = tempfile.mkdtemp()
# -v causes to go to print modules.txt to stderr
_, stderr = self.__go_run_cmd(
"go mod vendor -v -o %s" % (tmp_vendor_dir), srctree, d)
modules_txt_basename = "modules.txt"
modules_txt_filename = os.path.join(localfilesdir, modules_txt_basename)
with open(modules_txt_filename, "w") as f:
f.write(stderr)
extravalues['extrafiles'][modules_txt_basename] = modules_txt_filename
licenses = []
lic_files_chksum = []
licvalues = guess_license(tmp_vendor_dir, d)
shutil.rmtree(tmp_vendor_dir)
if licvalues:
for licvalue in licvalues:
license = licvalue[0]
lics = tidy_licenses(fixup_license(license))
lics = [lic for lic in lics if lic not in licenses]
if len(lics):
licenses.extend(lics)
lic_files_chksum.append(
'file://src/${GO_IMPORT}/vendor/%s;md5=%s' % (licvalue[1], licvalue[2]))
# strip version part from module URL /vXX
baseurl = re.sub(r'/v(\d+)$', '', go_mod['Module']['Path'])
pn, _ = determine_from_url(baseurl)
licenses_basename = "%s-licenses.inc" % pn
licenses_filename = os.path.join(localfilesdir, licenses_basename)
with open(licenses_filename, "w") as f:
f.write("GO_MOD_LICENSES = \"%s\"\n\n" %
' & '.join(sorted(licenses, key=str.casefold)))
# We introduce this indirection to make the tests a little easier
f.write("LIC_FILES_CHKSUM += \"${VENDORED_LIC_FILES_CHKSUM}\"\n")
f.write("VENDORED_LIC_FILES_CHKSUM = \"\\\n")
for lic in lic_files_chksum:
f.write(" " + lic + " \\\n")
f.write("\"\n")
extravalues['extrafiles'][licenses_basename] = licenses_filename
def process(self, srctree, classes, lines_before,
lines_after, handled, extravalues):
if 'buildsystem' in handled:
return False
files = RecipeHandler.checkfiles(srctree, ['go.mod'])
if not files:
return False
d = bb.data.createCopy(tinfoil.config_data)
go_bindir = self.__ensure_go()
if not go_bindir:
sys.exit(14)
d.prependVar('PATH', '%s:' % go_bindir)
handled.append('buildsystem')
classes.append("go-vendor")
stdout, _ = self.__go_run_cmd("go mod edit -json", srctree, d)
go_mod = json.loads(stdout)
go_import = go_mod['Module']['Path']
go_version_match = re.match("([0-9]+).([0-9]+)", go_mod['Go'])
go_version_major = int(go_version_match.group(1))
go_version_minor = int(go_version_match.group(2))
src_uris = []
localfilesdir = tempfile.mkdtemp(prefix='recipetool-go-')
extravalues.setdefault('extrafiles', {})
# Use an explicit name determined from the module name because it
# might differ from the actual URL for replaced modules
# strip version part from module URL /vXX
baseurl = re.sub(r'/v(\d+)$', '', go_mod['Module']['Path'])
pn, _ = determine_from_url(baseurl)
# go.mod files with version < 1.17 may not include all indirect
# dependencies. Thus, we have to upgrade the go version.
if go_version_major == 1 and go_version_minor < 17:
logger.warning(
"go.mod files generated by Go < 1.17 might have incomplete indirect dependencies.")
go_mod, patchfilename = self.__go_mod_patch(srctree, localfilesdir,
extravalues, d)
src_uris.append(
"file://%s;patchdir=src/${GO_IMPORT}" % (patchfilename))
# Check whether the module is vendored. If so, we have nothing to do.
# Otherwise we gather all dependencies and add them to the recipe
if not os.path.exists(os.path.join(srctree, "vendor")):
# Write additional $BPN-modules.inc file
self.__go_mod_vendor(go_mod, srctree, localfilesdir, extravalues, d)
lines_before.append("LICENSE += \" & ${GO_MOD_LICENSES}\"")
lines_before.append("require %s-licenses.inc" % (pn))
self.__rewrite_src_uri(lines_before, ["file://modules.txt"])
self.__go_handle_dependencies(go_mod, srctree, localfilesdir, extravalues, d)
lines_before.append("require %s-modules.inc" % (pn))
# Do generic license handling
handle_license_vars(srctree, lines_before, handled, extravalues, d)
self.__rewrite_lic_uri(lines_before)
lines_before.append("GO_IMPORT = \"{}\"".format(baseurl))
lines_before.append("SRCREV_FORMAT = \"${BPN}\"")
def __update_lines_before(self, updated, newlines, lines_before):
if updated:
del lines_before[:]
for line in newlines:
# Hack to avoid newlines that edit_metadata inserts
if line.endswith('\n'):
line = line[:-1]
lines_before.append(line)
return updated
def __rewrite_lic_uri(self, lines_before):
def varfunc(varname, origvalue, op, newlines):
if varname == 'LIC_FILES_CHKSUM':
new_licenses = []
licenses = origvalue.split('\\')
for license in licenses:
if not license:
logger.warning("No license file was detected for the main module!")
# the license list of the main recipe must be empty
# this can happen for example in case of CLOSED license
# Fall through to complete recipe generation
continue
license = license.strip()
uri, chksum = license.split(';', 1)
url = urllib.parse.urlparse(uri)
new_uri = os.path.join(
url.scheme + "://", "src", "${GO_IMPORT}", url.netloc + url.path) + ";" + chksum
new_licenses.append(new_uri)
return new_licenses, None, -1, True
return origvalue, None, 0, True
updated, newlines = bb.utils.edit_metadata(
lines_before, ['LIC_FILES_CHKSUM'], varfunc)
return self.__update_lines_before(updated, newlines, lines_before)
def __rewrite_src_uri(self, lines_before, additional_uris = []):
def varfunc(varname, origvalue, op, newlines):
if varname == 'SRC_URI':
src_uri = ["git://${GO_IMPORT};destsuffix=git/src/${GO_IMPORT};nobranch=1;name=${BPN};protocol=https"]
src_uri.extend(additional_uris)
return src_uri, None, -1, True
return origvalue, None, 0, True
updated, newlines = bb.utils.edit_metadata(lines_before, ['SRC_URI'], varfunc)
return self.__update_lines_before(updated, newlines, lines_before)
def register_recipe_handlers(handlers):
handlers.append((GoRecipeHandler(), 60))

View File

@@ -0,0 +1,89 @@
# Recipe creation tool - kernel support plugin
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import re
import logging
from recipetool.create import RecipeHandler, read_pkgconfig_provides, validate_pv
logger = logging.getLogger('recipetool')
tinfoil = None
def tinfoil_init(instance):
global tinfoil
tinfoil = instance
class KernelRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
import bb.process
if 'buildsystem' in handled:
return False
for tell in ['arch', 'firmware', 'Kbuild', 'Kconfig']:
if not os.path.exists(os.path.join(srctree, tell)):
return False
handled.append('buildsystem')
del lines_after[:]
del classes[:]
template = os.path.join(tinfoil.config_data.getVar('COREBASE'), 'meta-skeleton', 'recipes-kernel', 'linux', 'linux-yocto-custom.bb')
def handle_var(varname, origvalue, op, newlines):
if varname in ['SRCREV', 'SRCREV_machine']:
while newlines[-1].startswith('#'):
del newlines[-1]
try:
stdout, _ = bb.process.run('git rev-parse HEAD', cwd=srctree, shell=True)
except bb.process.ExecutionError as e:
stdout = None
if stdout:
return stdout.strip(), op, 0, True
elif varname == 'LINUX_VERSION':
makefile = os.path.join(srctree, 'Makefile')
if os.path.exists(makefile):
kversion = -1
kpatchlevel = -1
ksublevel = -1
kextraversion = ''
with open(makefile, 'r', errors='surrogateescape') as f:
for i, line in enumerate(f):
if i > 10:
break
if line.startswith('VERSION ='):
kversion = int(line.split('=')[1].strip())
elif line.startswith('PATCHLEVEL ='):
kpatchlevel = int(line.split('=')[1].strip())
elif line.startswith('SUBLEVEL ='):
ksublevel = int(line.split('=')[1].strip())
elif line.startswith('EXTRAVERSION ='):
kextraversion = line.split('=')[1].strip()
version = ''
if kversion > -1 and kpatchlevel > -1:
version = '%d.%d' % (kversion, kpatchlevel)
if ksublevel > -1:
version += '.%d' % ksublevel
version += kextraversion
if version:
return version, op, 0, True
elif varname == 'SRC_URI':
while newlines[-1].startswith('#'):
del newlines[-1]
elif varname == 'COMPATIBLE_MACHINE':
while newlines[-1].startswith('#'):
del newlines[-1]
machine = tinfoil.config_data.getVar('MACHINE')
return machine, op, 0, True
return origvalue, op, 0, True
with open(template, 'r') as f:
varlist = ['SRCREV', 'SRCREV_machine', 'SRC_URI', 'LINUX_VERSION', 'COMPATIBLE_MACHINE']
(_, newlines) = bb.utils.edit_metadata(f, varlist, handle_var)
lines_before[:] = [line.rstrip('\n') for line in newlines]
return True
def register_recipe_handlers(handlers):
handlers.append((KernelRecipeHandler(), 100))

View File

@@ -0,0 +1,142 @@
# Recipe creation tool - kernel module support plugin
#
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import re
import logging
from recipetool.create import RecipeHandler, read_pkgconfig_provides, validate_pv
logger = logging.getLogger('recipetool')
tinfoil = None
def tinfoil_init(instance):
global tinfoil
tinfoil = instance
class KernelModuleRecipeHandler(RecipeHandler):
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
import bb.process
if 'buildsystem' in handled:
return False
module_inc_re = re.compile(r'^#include\s+<linux/module.h>$')
makefiles = []
is_module = False
makefiles = []
files = RecipeHandler.checkfiles(srctree, ['*.c', '*.h'], recursive=True, excludedirs=['contrib', 'test', 'examples'])
if files:
for cfile in files:
# Look in same dir or parent for Makefile
for makefile in [os.path.join(os.path.dirname(cfile), 'Makefile'), os.path.join(os.path.dirname(os.path.dirname(cfile)), 'Makefile')]:
if makefile in makefiles:
break
else:
if os.path.exists(makefile):
makefiles.append(makefile)
break
else:
continue
with open(cfile, 'r', errors='surrogateescape') as f:
for line in f:
if module_inc_re.match(line.strip()):
is_module = True
break
if is_module:
break
if is_module:
classes.append('module')
handled.append('buildsystem')
# module.bbclass and the classes it inherits do most of the hard
# work, but we need to tweak it slightly depending on what the
# Makefile does (and there is a range of those)
# Check the makefile for the appropriate install target
install_lines = []
compile_lines = []
in_install = False
in_compile = False
install_target = None
with open(makefile, 'r', errors='surrogateescape') as f:
for line in f:
if line.startswith('install:'):
if not install_lines:
in_install = True
install_target = 'install'
elif line.startswith('modules_install:'):
install_lines = []
in_install = True
install_target = 'modules_install'
elif line.startswith('modules:'):
compile_lines = []
in_compile = True
elif line.startswith(('all:', 'default:')):
if not compile_lines:
in_compile = True
elif line:
if line[0] == '\t':
if in_install:
install_lines.append(line)
elif in_compile:
compile_lines.append(line)
elif ':' in line:
in_install = False
in_compile = False
def check_target(lines, install):
kdirpath = ''
manual_install = False
for line in lines:
splitline = line.split()
if splitline[0] in ['make', 'gmake', '$(MAKE)']:
if '-C' in splitline:
idx = splitline.index('-C') + 1
if idx < len(splitline):
kdirpath = splitline[idx]
break
elif install and splitline[0] == 'install':
if '.ko' in line:
manual_install = True
return kdirpath, manual_install
kdirpath = None
manual_install = False
if install_lines:
kdirpath, manual_install = check_target(install_lines, install=True)
if compile_lines and not kdirpath:
kdirpath, _ = check_target(compile_lines, install=False)
if manual_install or not install_lines:
lines_after.append('EXTRA_OEMAKE:append:task-install = " -C ${STAGING_KERNEL_DIR} M=${S}"')
elif install_target and install_target != 'modules_install':
lines_after.append('MODULES_INSTALL_TARGET = "install"')
warnmsg = None
kdirvar = None
if kdirpath:
res = re.match(r'\$\(([^$)]+)\)', kdirpath)
if res:
kdirvar = res.group(1)
if kdirvar != 'KERNEL_SRC':
lines_after.append('EXTRA_OEMAKE += "%s=${STAGING_KERNEL_DIR}"' % kdirvar)
elif kdirpath.startswith('/lib/'):
warnmsg = 'Kernel path in install makefile is hardcoded - you will need to patch the makefile'
if not kdirvar and not warnmsg:
warnmsg = 'Unable to find means of passing kernel path into install makefile - if kernel path is hardcoded you will need to patch the makefile'
if warnmsg:
warnmsg += '. Note that the variable KERNEL_SRC will be passed in as the kernel source path.'
logger.warning(warnmsg)
lines_after.append('# %s' % warnmsg)
return True
return False
def register_recipe_handlers(handlers):
handlers.append((KernelModuleRecipeHandler(), 15))

View File

@@ -0,0 +1,310 @@
# Copyright (C) 2016 Intel Corporation
# Copyright (C) 2020 Savoir-Faire Linux
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Recipe creation tool - npm module support plugin"""
import json
import logging
import os
import re
import sys
import tempfile
import bb
from bb.fetch2.npm import NpmEnvironment
from bb.fetch2.npm import npm_package
from bb.fetch2.npmsw import foreach_dependencies
from recipetool.create import RecipeHandler
from recipetool.create import get_license_md5sums
from recipetool.create import guess_license
from recipetool.create import split_pkg_licenses
logger = logging.getLogger('recipetool')
TINFOIL = None
def tinfoil_init(instance):
"""Initialize tinfoil"""
global TINFOIL
TINFOIL = instance
class NpmRecipeHandler(RecipeHandler):
"""Class to handle the npm recipe creation"""
@staticmethod
def _get_registry(lines):
"""Get the registry value from the 'npm://registry' url"""
registry = None
def _handle_registry(varname, origvalue, op, newlines):
nonlocal registry
if origvalue.startswith("npm://"):
registry = re.sub(r"^npm://", "http://", origvalue.split(";")[0])
return origvalue, None, 0, True
bb.utils.edit_metadata(lines, ["SRC_URI"], _handle_registry)
return registry
@staticmethod
def _ensure_npm():
"""Check if the 'npm' command is available in the recipes"""
if not TINFOIL.recipes_parsed:
TINFOIL.parse_recipes()
try:
d = TINFOIL.parse_recipe("nodejs-native")
except bb.providers.NoProvider:
bb.error("Nothing provides 'nodejs-native' which is required for the build")
bb.note("You will likely need to add a layer that provides nodejs")
sys.exit(14)
bindir = d.getVar("STAGING_BINDIR_NATIVE")
npmpath = os.path.join(bindir, "npm")
if not os.path.exists(npmpath):
TINFOIL.build_targets("nodejs-native", "addto_recipe_sysroot")
if not os.path.exists(npmpath):
bb.error("Failed to add 'npm' to sysroot")
sys.exit(14)
return bindir
@staticmethod
def _npm_global_configs(dev):
"""Get the npm global configuration"""
configs = []
if dev:
configs.append(("also", "development"))
else:
configs.append(("only", "production"))
configs.append(("save", "false"))
configs.append(("package-lock", "false"))
configs.append(("shrinkwrap", "false"))
return configs
def _run_npm_install(self, d, srctree, registry, dev):
"""Run the 'npm install' command without building the addons"""
configs = self._npm_global_configs(dev)
configs.append(("ignore-scripts", "true"))
if registry:
configs.append(("registry", registry))
bb.utils.remove(os.path.join(srctree, "node_modules"), recurse=True)
env = NpmEnvironment(d, configs=configs)
env.run("npm install", workdir=srctree)
def _generate_shrinkwrap(self, d, srctree, dev):
"""Check and generate the 'npm-shrinkwrap.json' file if needed"""
configs = self._npm_global_configs(dev)
env = NpmEnvironment(d, configs=configs)
env.run("npm shrinkwrap", workdir=srctree)
return os.path.join(srctree, "npm-shrinkwrap.json")
def _handle_licenses(self, srctree, shrinkwrap_file, dev):
"""Return the extra license files and the list of packages"""
licfiles = []
packages = {}
# Handle the parent package
packages["${PN}"] = ""
def _licfiles_append_fallback_readme_files(destdir):
"""Append README files as fallback to license files if a license files is missing"""
fallback = True
readmes = []
basedir = os.path.join(srctree, destdir)
for fn in os.listdir(basedir):
upper = fn.upper()
if upper.startswith("README"):
fullpath = os.path.join(basedir, fn)
readmes.append(fullpath)
if upper.startswith("COPYING") or "LICENCE" in upper or "LICENSE" in upper:
fallback = False
if fallback:
for readme in readmes:
licfiles.append(os.path.relpath(readme, srctree))
# Handle the dependencies
def _handle_dependency(name, params, destdir):
deptree = destdir.split('node_modules/')
suffix = "-".join([npm_package(dep) for dep in deptree])
packages["${PN}" + suffix] = destdir
_licfiles_append_fallback_readme_files(destdir)
with open(shrinkwrap_file, "r") as f:
shrinkwrap = json.load(f)
foreach_dependencies(shrinkwrap, _handle_dependency, dev)
return licfiles, packages
# Handle the peer dependencies
def _handle_peer_dependency(self, shrinkwrap_file):
"""Check if package has peer dependencies and show warning if it is the case"""
with open(shrinkwrap_file, "r") as f:
shrinkwrap = json.load(f)
packages = shrinkwrap.get("packages", {})
peer_deps = packages.get("", {}).get("peerDependencies", {})
for peer_dep in peer_deps:
peer_dep_yocto_name = npm_package(peer_dep)
bb.warn(peer_dep + " is a peer dependencie of the actual package. " +
"Please add this peer dependencie to the RDEPENDS variable as %s and generate its recipe with devtool"
% peer_dep_yocto_name)
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
"""Handle the npm recipe creation"""
if "buildsystem" in handled:
return False
files = RecipeHandler.checkfiles(srctree, ["package.json"])
if not files:
return False
with open(files[0], "r") as f:
data = json.load(f)
if "name" not in data or "version" not in data:
return False
extravalues["PN"] = npm_package(data["name"])
extravalues["PV"] = data["version"]
if "description" in data:
extravalues["SUMMARY"] = data["description"]
if "homepage" in data:
extravalues["HOMEPAGE"] = data["homepage"]
dev = bb.utils.to_boolean(str(extravalues.get("NPM_INSTALL_DEV", "0")), False)
registry = self._get_registry(lines_before)
bb.note("Checking if npm is available ...")
# The native npm is used here (and not the host one) to ensure that the
# npm version is high enough to ensure an efficient dependency tree
# resolution and avoid issue with the shrinkwrap file format.
# Moreover the native npm is mandatory for the build.
bindir = self._ensure_npm()
d = bb.data.createCopy(TINFOIL.config_data)
d.prependVar("PATH", bindir + ":")
d.setVar("S", srctree)
bb.note("Generating shrinkwrap file ...")
# To generate the shrinkwrap file the dependencies have to be installed
# first. During the generation process some files may be updated /
# deleted. By default devtool tracks the diffs in the srctree and raises
# errors when finishing the recipe if some diffs are found.
git_exclude_file = os.path.join(srctree, ".git", "info", "exclude")
if os.path.exists(git_exclude_file):
with open(git_exclude_file, "r+") as f:
lines = f.readlines()
for line in ["/node_modules/", "/npm-shrinkwrap.json"]:
if line not in lines:
f.write(line + "\n")
lock_file = os.path.join(srctree, "package-lock.json")
lock_copy = lock_file + ".copy"
if os.path.exists(lock_file):
bb.utils.copyfile(lock_file, lock_copy)
self._run_npm_install(d, srctree, registry, dev)
shrinkwrap_file = self._generate_shrinkwrap(d, srctree, dev)
with open(shrinkwrap_file, "r") as f:
shrinkwrap = json.load(f)
if os.path.exists(lock_copy):
bb.utils.movefile(lock_copy, lock_file)
# Add the shrinkwrap file as 'extrafiles'
shrinkwrap_copy = shrinkwrap_file + ".copy"
bb.utils.copyfile(shrinkwrap_file, shrinkwrap_copy)
extravalues.setdefault("extrafiles", {})
extravalues["extrafiles"]["npm-shrinkwrap.json"] = shrinkwrap_copy
url_local = "npmsw://%s" % shrinkwrap_file
url_recipe= "npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json"
if dev:
url_local += ";dev=1"
url_recipe += ";dev=1"
# Add the npmsw url in the SRC_URI of the generated recipe
def _handle_srcuri(varname, origvalue, op, newlines):
"""Update the version value and add the 'npmsw://' url"""
value = origvalue.replace("version=" + data["version"], "version=${PV}")
value = value.replace("version=latest", "version=${PV}")
values = [line.strip() for line in value.strip('\n').splitlines()]
if "dependencies" in shrinkwrap.get("packages", {}).get("", {}):
values.append(url_recipe)
return values, None, 4, False
(_, newlines) = bb.utils.edit_metadata(lines_before, ["SRC_URI"], _handle_srcuri)
lines_before[:] = [line.rstrip('\n') for line in newlines]
# In order to generate correct licence checksums in the recipe the
# dependencies have to be fetched again using the npmsw url
bb.note("Fetching npm dependencies ...")
bb.utils.remove(os.path.join(srctree, "node_modules"), recurse=True)
fetcher = bb.fetch2.Fetch([url_local], d)
fetcher.download()
fetcher.unpack(srctree)
bb.note("Handling licences ...")
(licfiles, packages) = self._handle_licenses(srctree, shrinkwrap_file, dev)
def _guess_odd_license(licfiles):
import bb
md5sums = get_license_md5sums(d, linenumbers=True)
chksums = []
licenses = []
for licfile in licfiles:
f = os.path.join(srctree, licfile)
md5value = bb.utils.md5_file(f)
(license, beginline, endline, md5) = md5sums.get(md5value,
(None, "", "", ""))
if not license:
license = "Unknown"
logger.info("Please add the following line for '%s' to a "
"'lib/recipetool/licenses.csv' and replace `Unknown`, "
"`X`, `Y` and `MD5` with the license, begin line, "
"end line and partial MD5 checksum:\n" \
"%s,Unknown,X,Y,MD5" % (licfile, md5value))
chksums.append("file://%s%s%s;md5=%s" % (licfile,
";beginline=%s" % (beginline) if beginline else "",
";endline=%s" % (endline) if endline else "",
md5 if md5 else md5value))
licenses.append((license, licfile, md5value))
return (licenses, chksums)
(licenses, extravalues["LIC_FILES_CHKSUM"]) = _guess_odd_license(licfiles)
split_pkg_licenses([*licenses, *guess_license(srctree, d)], packages, lines_after)
classes.append("npm")
handled.append("buildsystem")
# Check if package has peer dependencies and inform the user
self._handle_peer_dependency(shrinkwrap_file)
return True
def register_recipe_handlers(handlers):
"""Register the npm handler"""
handlers.append((NpmRecipeHandler(), 60))

View File

@@ -0,0 +1,44 @@
# Recipe creation tool - edit plugin
#
# This sub-command edits the recipe and appends for the specified target
#
# Example: recipetool edit busybox
#
# Copyright (C) 2018 Mentor Graphics Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import argparse
import errno
import logging
import os
import re
import subprocess
import sys
import scriptutils
logger = logging.getLogger('recipetool')
tinfoil = None
def tinfoil_init(instance):
global tinfoil
tinfoil = instance
def edit(args):
import oe.recipeutils
recipe_path = tinfoil.get_recipe_file(args.target)
appends = tinfoil.get_file_appends(recipe_path)
return scriptutils.run_editor([recipe_path] + list(appends), logger)
def register_commands(subparsers):
parser = subparsers.add_parser('edit',
help='Edit the recipe and appends for the specified target. This obeys $VISUAL if set, otherwise $EDITOR, otherwise vi.')
parser.add_argument('target', help='Target recipe/provide to edit')
parser.set_defaults(func=edit, parserecipes=True)

View File

@@ -0,0 +1,37 @@
0636e73ff0215e8d672dc4c32c317bb3,GPL-2.0-only
12f884d2ae1ff87c09e5b7ccc2c4ca7e,GPL-2.0-only
18810669f13b87348459e611d31ab760,GPL-2.0-only
252890d9eee26aab7b432e8b8a616475,LGPL-2.0-only
2d5025d4aa3495befef8f17206a5b0a1,LGPL-2.1-only
3214f080875748938ba060314b4f727d,LGPL-2.0-only
385c55653886acac3821999a3ccd17b3,Artistic-1.0 | GPL-2.0-only
393a5ca445f6965873eca0259a17f833,GPL-2.0-only
3b83ef96387f14655fc854ddc3c6bd57,Apache-2.0
3bf50002aefd002f49e7bb854063f7e7,LGPL-2.0-only
4325afd396febcb659c36b49533135d4,GPL-2.0-only
4fbd65380cdd255951079008b364516c,LGPL-2.1-only
54c7042be62e169199200bc6477f04d1,BSD-3-Clause
55ca817ccb7d5b5b66355690e9abc605,LGPL-2.0-only
59530bdf33659b29e73d4adb9f9f6552,GPL-2.0-only
5f30f0716dfdd0d91eb439ebec522ec2,LGPL-2.0-only
6a6a8e020838b23406c81b19c1d46df6,LGPL-3.0-only
751419260aa954499f7abaabaa882bbe,GPL-2.0-only
7fbc338309ac38fefcd64b04bb903e34,LGPL-2.1-only
8ca43cbc842c2336e835926c2166c28b,GPL-2.0-only
94d55d512a9ba36caa9b7df079bae19f,GPL-2.0-only
9ac2e7cff1ddaf48b6eab6028f23ef88,GPL-2.0-only
9f604d8a4f8e74f4f5140845a21b6674,LGPL-2.0-only
a6f89e2100d9b6cdffcea4f398e37343,LGPL-2.1-only
b234ee4d69f5fce4486a80fdaf4a4263,GPL-2.0-only
bbb461211a33b134d42ed5ee802b37ff,LGPL-2.1-only
bfe1f75d606912a4111c90743d6c7325,MPL-1.1-only
c93c0550bd3173f4504b2cbd8991e50b,GPL-2.0-only
d32239bcb673463ab874e80d47fae504,GPL-3.0-only
d7810fab7487fb0aad327b76f1be7cd7,GPL-2.0-only
d8045f3b8f929c1cb29a1e3fd737b499,LGPL-2.1-only
db979804f025cf55aabec7129cb671ed,LGPL-2.0-only
eb723b61539feef013de476e68b5c50a,GPL-2.0-only
ebb5c50ab7cab4baeffba14977030c07,GPL-2.0-only
f27defe1e96c2e1ecd4e0c9be8967949,GPL-3.0-only
fad9b3332be894bab9bc501572864b29,LGPL-2.1-only
fbc093901857fcd118f065f900982c24,LGPL-2.1-only
1 0636e73ff0215e8d672dc4c32c317bb3 GPL-2.0-only
2 12f884d2ae1ff87c09e5b7ccc2c4ca7e GPL-2.0-only
3 18810669f13b87348459e611d31ab760 GPL-2.0-only
4 252890d9eee26aab7b432e8b8a616475 LGPL-2.0-only
5 2d5025d4aa3495befef8f17206a5b0a1 LGPL-2.1-only
6 3214f080875748938ba060314b4f727d LGPL-2.0-only
7 385c55653886acac3821999a3ccd17b3 Artistic-1.0 | GPL-2.0-only
8 393a5ca445f6965873eca0259a17f833 GPL-2.0-only
9 3b83ef96387f14655fc854ddc3c6bd57 Apache-2.0
10 3bf50002aefd002f49e7bb854063f7e7 LGPL-2.0-only
11 4325afd396febcb659c36b49533135d4 GPL-2.0-only
12 4fbd65380cdd255951079008b364516c LGPL-2.1-only
13 54c7042be62e169199200bc6477f04d1 BSD-3-Clause
14 55ca817ccb7d5b5b66355690e9abc605 LGPL-2.0-only
15 59530bdf33659b29e73d4adb9f9f6552 GPL-2.0-only
16 5f30f0716dfdd0d91eb439ebec522ec2 LGPL-2.0-only
17 6a6a8e020838b23406c81b19c1d46df6 LGPL-3.0-only
18 751419260aa954499f7abaabaa882bbe GPL-2.0-only
19 7fbc338309ac38fefcd64b04bb903e34 LGPL-2.1-only
20 8ca43cbc842c2336e835926c2166c28b GPL-2.0-only
21 94d55d512a9ba36caa9b7df079bae19f GPL-2.0-only
22 9ac2e7cff1ddaf48b6eab6028f23ef88 GPL-2.0-only
23 9f604d8a4f8e74f4f5140845a21b6674 LGPL-2.0-only
24 a6f89e2100d9b6cdffcea4f398e37343 LGPL-2.1-only
25 b234ee4d69f5fce4486a80fdaf4a4263 GPL-2.0-only
26 bbb461211a33b134d42ed5ee802b37ff LGPL-2.1-only
27 bfe1f75d606912a4111c90743d6c7325 MPL-1.1-only
28 c93c0550bd3173f4504b2cbd8991e50b GPL-2.0-only
29 d32239bcb673463ab874e80d47fae504 GPL-3.0-only
30 d7810fab7487fb0aad327b76f1be7cd7 GPL-2.0-only
31 d8045f3b8f929c1cb29a1e3fd737b499 LGPL-2.1-only
32 db979804f025cf55aabec7129cb671ed LGPL-2.0-only
33 eb723b61539feef013de476e68b5c50a GPL-2.0-only
34 ebb5c50ab7cab4baeffba14977030c07 GPL-2.0-only
35 f27defe1e96c2e1ecd4e0c9be8967949 GPL-3.0-only
36 fad9b3332be894bab9bc501572864b29 LGPL-2.1-only
37 fbc093901857fcd118f065f900982c24 LGPL-2.1-only

View File

@@ -0,0 +1,79 @@
# Recipe creation tool - newappend plugin
#
# This sub-command creates a bbappend for the specified target and prints the
# path to the bbappend.
#
# Example: recipetool newappend meta-mylayer busybox
#
# Copyright (C) 2015 Christopher Larson <kergoth@gmail.com>
#
# SPDX-License-Identifier: GPL-2.0-only
#
import argparse
import errno
import logging
import os
import re
import subprocess
import sys
import scriptutils
logger = logging.getLogger('recipetool')
tinfoil = None
def tinfoil_init(instance):
global tinfoil
tinfoil = instance
def layer(layerpath):
if not os.path.exists(os.path.join(layerpath, 'conf', 'layer.conf')):
raise argparse.ArgumentTypeError('{0!r} must be a path to a valid layer'.format(layerpath))
return layerpath
def newappend(args):
import oe.recipeutils
recipe_path = tinfoil.get_recipe_file(args.target)
rd = tinfoil.config_data.createCopy()
rd.setVar('FILE', recipe_path)
append_path, path_ok = oe.recipeutils.get_bbappend_path(rd, args.destlayer, args.wildcard_version)
if not append_path:
logger.error('Unable to determine layer directory containing %s', recipe_path)
return 1
if not path_ok:
logger.warning('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.', os.path.join(args.destlayer, 'conf', 'layer.conf'), os.path.dirname(append_path))
layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
if not os.path.abspath(args.destlayer) in layerdirs:
logger.warning('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
if not os.path.exists(append_path):
bb.utils.mkdirhier(os.path.dirname(append_path))
try:
open(append_path, 'a').close()
except (OSError, IOError) as exc:
logger.critical(str(exc))
return 1
if args.edit:
return scriptutils.run_editor([append_path, recipe_path], logger)
else:
print(append_path)
def register_commands(subparsers):
parser = subparsers.add_parser('newappend',
help='Create a bbappend for the specified target in the specified layer')
parser.add_argument('-e', '--edit', help='Edit the new append. This obeys $VISUAL if set, otherwise $EDITOR, otherwise vi.', action='store_true')
parser.add_argument('-w', '--wildcard-version', help='Use wildcard to make the bbappend apply to any recipe version', action='store_true')
parser.add_argument('destlayer', help='Base directory of the destination layer to write the bbappend to', type=layer)
parser.add_argument('target', help='Target recipe/provide to append')
parser.set_defaults(func=newappend, parserecipes=True)

View File

@@ -0,0 +1,66 @@
# Recipe creation tool - set variable plugin
#
# Copyright (C) 2015 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import sys
import os
import argparse
import glob
import fnmatch
import re
import logging
import scriptutils
logger = logging.getLogger('recipetool')
tinfoil = None
plugins = None
def tinfoil_init(instance):
global tinfoil
tinfoil = instance
def setvar(args):
import oe.recipeutils
if args.delete:
if args.value:
logger.error('-D/--delete and specifying a value are mutually exclusive')
return 1
value = None
else:
if args.value is None:
logger.error('You must specify a value if not using -D/--delete')
return 1
value = args.value
varvalues = {args.varname: value}
if args.recipe_only:
patches = [oe.recipeutils.patch_recipe_file(args.recipefile, varvalues, patch=args.patch)]
else:
rd = tinfoil.parse_recipe_file(args.recipefile, False)
if not rd:
return 1
patches = oe.recipeutils.patch_recipe(rd, args.recipefile, varvalues, patch=args.patch)
if args.patch:
for patch in patches:
for line in patch:
sys.stdout.write(line)
tinfoil.modified_files()
return 0
def register_commands(subparsers):
parser_setvar = subparsers.add_parser('setvar',
help='Set a variable within a recipe',
description='Adds/updates the value a variable is set to in a recipe')
parser_setvar.add_argument('recipefile', help='Recipe file to update')
parser_setvar.add_argument('varname', help='Variable name to set')
parser_setvar.add_argument('value', nargs='?', help='New value to set the variable to')
parser_setvar.add_argument('--recipe-only', '-r', help='Do not set variable in any include file if present', action='store_true')
parser_setvar.add_argument('--patch', '-p', help='Create a patch to make the change instead of modifying the recipe', action='store_true')
parser_setvar.add_argument('--delete', '-D', help='Delete the specified value instead of setting it', action='store_true')
parser_setvar.set_defaults(func=setvar)

View File

@@ -0,0 +1,107 @@
# resulttool - Show logs
#
# Copyright (c) 2019 Garmin International
#
# SPDX-License-Identifier: GPL-2.0-only
#
import os
import resulttool.resultutils as resultutils
def show_ptest(result, ptest, logger):
logdata = resultutils.ptestresult_get_log(result, ptest)
if logdata is not None:
print(logdata)
return 0
print("ptest '%s' log not found" % ptest)
return 1
def show_reproducible(result, reproducible, logger):
try:
print(result['reproducible'][reproducible]['diffoscope.text'])
return 0
except KeyError:
print("reproducible '%s' not found" % reproducible)
return 1
def log(args, logger):
results = resultutils.load_resultsdata(args.source)
for _, run_name, _, r in resultutils.test_run_results(results):
if args.list_ptest:
print('\n'.join(sorted(r['ptestresult.sections'].keys())))
if args.dump_ptest:
for sectname in ['ptestresult.sections', 'ltpposixresult.sections', 'ltpresult.sections']:
if sectname in r:
for name, ptest in r[sectname].items():
logdata = resultutils.generic_get_log(sectname, r, name)
if logdata is not None:
dest_dir = args.dump_ptest
if args.prepend_run:
dest_dir = os.path.join(dest_dir, run_name)
if not sectname.startswith("ptest"):
dest_dir = os.path.join(dest_dir, sectname.split(".")[0])
os.makedirs(dest_dir, exist_ok=True)
dest = os.path.join(dest_dir, '%s.log' % name)
if os.path.exists(dest):
print("Overlapping ptest logs found, skipping %s. The '--prepend-run' option would avoid this" % name)
continue
print(dest)
with open(dest, 'w') as f:
f.write(logdata)
if args.raw_ptest:
found = False
for sectname in ['ptestresult.rawlogs', 'ltpposixresult.rawlogs', 'ltpresult.rawlogs']:
rawlog = resultutils.generic_get_rawlogs(sectname, r)
if rawlog is not None:
print(rawlog)
found = True
if not found:
print('Raw ptest logs not found')
return 1
if args.raw_reproducible:
if 'reproducible.rawlogs' in r:
print(r['reproducible.rawlogs']['log'])
else:
print('Raw reproducible logs not found')
return 1
for ptest in args.ptest:
if not show_ptest(r, ptest, logger):
return 1
for reproducible in args.reproducible:
if not show_reproducible(r, reproducible, logger):
return 1
def register_commands(subparsers):
"""Register subcommands from this plugin"""
parser = subparsers.add_parser('log', help='show logs',
description='show the logs from test results',
group='analysis')
parser.set_defaults(func=log)
parser.add_argument('source',
help='the results file/directory/URL to import')
parser.add_argument('--list-ptest', action='store_true',
help='list the ptest test names')
parser.add_argument('--ptest', action='append', default=[],
help='show logs for a ptest')
parser.add_argument('--dump-ptest', metavar='DIR',
help='Dump all ptest log files to the specified directory.')
parser.add_argument('--reproducible', action='append', default=[],
help='show logs for a reproducible test')
parser.add_argument('--prepend-run', action='store_true',
help='''Dump ptest results to a subdirectory named after the test run when using --dump-ptest.
Required if more than one test run is present in the result file''')
parser.add_argument('--raw', action='store_true',
help='show raw (ptest) logs. Deprecated. Alias for "--raw-ptest"', dest='raw_ptest')
parser.add_argument('--raw-ptest', action='store_true',
help='show raw ptest log')
parser.add_argument('--raw-reproducible', action='store_true',
help='show raw reproducible build logs')

View File

@@ -0,0 +1,235 @@
# test case management tool - manual execution from testopia test cases
#
# Copyright (c) 2018, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
import argparse
import json
import os
import sys
import datetime
import re
import copy
from oeqa.core.runner import OETestResultJSONHelper
def load_json_file(f):
with open(f, "r") as filedata:
return json.load(filedata)
def write_json_file(f, json_data):
os.makedirs(os.path.dirname(f), exist_ok=True)
with open(f, 'w') as filedata:
filedata.write(json.dumps(json_data, sort_keys=True, indent=1))
class ManualTestRunner(object):
def _get_test_module(self, case_file):
return os.path.basename(case_file).split('.')[0]
def _get_input(self, config):
while True:
output = input('{} = '.format(config))
if re.match('^[a-z0-9-.]+$', output):
break
print('Only lowercase alphanumeric, hyphen and dot are allowed. Please try again')
return output
def _get_available_config_options(self, config_options, test_module, target_config):
avail_config_options = None
if test_module in config_options:
avail_config_options = config_options[test_module].get(target_config)
return avail_config_options
def _choose_config_option(self, options):
while True:
output = input('{} = '.format('Option index number'))
if output in options:
break
print('Only integer index inputs from above available configuration options are allowed. Please try again.')
return options[output]
def _get_config(self, config_options, test_module):
from oeqa.utils.metadata import get_layers
from oeqa.utils.commands import get_bb_var
from resulttool.resultutils import store_map
layers = get_layers(get_bb_var('BBLAYERS'))
configurations = {}
configurations['LAYERS'] = layers
configurations['STARTTIME'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
configurations['TEST_TYPE'] = 'manual'
configurations['TEST_MODULE'] = test_module
extra_config = set(store_map['manual']) - set(configurations)
for config in sorted(extra_config):
avail_config_options = self._get_available_config_options(config_options, test_module, config)
if avail_config_options:
print('---------------------------------------------')
print('These are available configuration #%s options:' % config)
print('---------------------------------------------')
for option, _ in sorted(avail_config_options.items(), key=lambda x: int(x[0])):
print('%s: %s' % (option, avail_config_options[option]))
print('Please select configuration option, enter the integer index number.')
value_conf = self._choose_config_option(avail_config_options)
print('---------------------------------------------\n')
else:
print('---------------------------------------------')
print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config)
print('---------------------------------------------')
value_conf = self._get_input('Configuration Value')
print('---------------------------------------------\n')
configurations[config] = value_conf
return configurations
def _execute_test_steps(self, case):
test_result = {}
print('------------------------------------------------------------------------')
print('Executing test case: %s' % case['test']['@alias'])
print('------------------------------------------------------------------------')
print('You have total %s test steps to be executed.' % len(case['test']['execution']))
print('------------------------------------------------------------------------\n')
for step, _ in sorted(case['test']['execution'].items(), key=lambda x: int(x[0])):
print('Step %s: %s' % (step, case['test']['execution'][step]['action']))
expected_output = case['test']['execution'][step]['expected_results']
if expected_output:
print('Expected output: %s' % expected_output)
while True:
done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower()
result_types = {'p':'PASSED',
'f':'FAILED',
'b':'BLOCKED',
's':'SKIPPED'}
if done in result_types:
for r in result_types:
if done == r:
res = result_types[r]
if res == 'FAILED':
log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
test_result.update({case['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}})
else:
test_result.update({case['test']['@alias']: {'status': '%s' % res}})
break
print('Invalid input!')
return test_result
def _get_write_dir(self):
return os.environ['BUILDDIR'] + '/tmp/log/manual/'
def run_test(self, case_file, config_options_file, testcase_config_file):
test_module = self._get_test_module(case_file)
cases = load_json_file(case_file)
config_options = {}
if config_options_file:
config_options = load_json_file(config_options_file)
configurations = self._get_config(config_options, test_module)
result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME'])
test_results = {}
if testcase_config_file:
test_case_config = load_json_file(testcase_config_file)
test_case_to_execute = test_case_config['testcases']
for case in copy.deepcopy(cases) :
if case['test']['@alias'] not in test_case_to_execute:
cases.remove(case)
print('\nTotal number of test cases in this test suite: %s\n' % len(cases))
for c in cases:
test_result = self._execute_test_steps(c)
test_results.update(test_result)
return configurations, result_id, self._get_write_dir(), test_results
def _get_true_false_input(self, input_message):
yes_list = ['Y', 'YES']
no_list = ['N', 'NO']
while True:
more_config_option = input(input_message).upper()
if more_config_option in yes_list or more_config_option in no_list:
break
print('Invalid input!')
if more_config_option in no_list:
return False
return True
def make_config_option_file(self, logger, case_file, config_options_file):
config_options = {}
if config_options_file:
config_options = load_json_file(config_options_file)
new_test_module = self._get_test_module(case_file)
print('Creating configuration options file for test module: %s' % new_test_module)
new_config_options = {}
while True:
config_name = input('\nPlease provide test configuration to create:\n').upper()
new_config_options[config_name] = {}
while True:
config_value = self._get_input('Configuration possible option value')
config_option_index = len(new_config_options[config_name]) + 1
new_config_options[config_name][config_option_index] = config_value
more_config_option = self._get_true_false_input('\nIs there more configuration option input: (Y)es/(N)o\n')
if not more_config_option:
break
more_config = self._get_true_false_input('\nIs there more configuration to create: (Y)es/(N)o\n')
if not more_config:
break
if new_config_options:
config_options[new_test_module] = new_config_options
if not config_options_file:
config_options_file = os.path.join(self._get_write_dir(), 'manual_config_options.json')
write_json_file(config_options_file, config_options)
logger.info('Configuration option file created at %s' % config_options_file)
def make_testcase_config_file(self, logger, case_file, testcase_config_file):
if testcase_config_file:
if os.path.exists(testcase_config_file):
print('\nTest configuration file with name %s already exists. Please provide a unique file name' % (testcase_config_file))
return 0
if not testcase_config_file:
testcase_config_file = os.path.join(self._get_write_dir(), "testconfig_new.json")
testcase_config = {}
cases = load_json_file(case_file)
new_test_module = self._get_test_module(case_file)
new_testcase_config = {}
new_testcase_config['testcases'] = []
print('\nAdd testcases for this configuration file:')
for case in cases:
print('\n' + case['test']['@alias'])
add_tc_config = self._get_true_false_input('\nDo you want to add this test case to test configuration : (Y)es/(N)o\n')
if add_tc_config:
new_testcase_config['testcases'].append(case['test']['@alias'])
write_json_file(testcase_config_file, new_testcase_config)
logger.info('Testcase Configuration file created at %s' % testcase_config_file)
def manualexecution(args, logger):
testrunner = ManualTestRunner()
if args.make_config_options_file:
testrunner.make_config_option_file(logger, args.file, args.config_options_file)
return 0
if args.make_testcase_config_file:
testrunner.make_testcase_config_file(logger, args.file, args.testcase_config_file)
return 0
configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
resultjsonhelper = OETestResultJSONHelper()
resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
return 0
def register_commands(subparsers):
"""Register subcommands from this plugin"""
parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.',
description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
group='manualexecution')
parser_build.set_defaults(func=manualexecution)
parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')
parser_build.add_argument('-c', '--config-options-file', default='',
help='the config options file to import and used as available configuration option selection or make config option file')
parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
help='make the configuration options file based on provided inputs')
parser_build.add_argument('-t', '--testcase-config-file', default='',
help='the testcase configuration file to enable user to run a selected set of test case or make a testcase configuration file')
parser_build.add_argument('-d', '--make-testcase-config-file', action='store_true',
help='make the testcase configuration file to run a set of test cases based on user selection')

View File

@@ -0,0 +1,46 @@
# resulttool - merge multiple testresults.json files into a file or directory
#
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import os
import json
import resulttool.resultutils as resultutils
def merge(args, logger):
configvars = {}
if not args.not_add_testseries:
configvars = resultutils.extra_configvars.copy()
if args.executed_by:
configvars['EXECUTED_BY'] = args.executed_by
if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results):
results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, configvars=configvars)
resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, configvars=configvars)
resultutils.save_resultsdata(results, args.target_results)
else:
results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, configvars=configvars)
if os.path.exists(args.target_results):
resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, configvars=configvars)
resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
logger.info('Merged results to %s' % os.path.dirname(args.target_results))
return 0
def register_commands(subparsers):
"""Register subcommands from this plugin"""
parser_build = subparsers.add_parser('merge', help='merge test result files/directories/URLs',
description='merge the results from multiple files/directories/URLs into the target file or directory',
group='setup')
parser_build.set_defaults(func=merge)
parser_build.add_argument('base_results',
help='the results file/directory/URL to import')
parser_build.add_argument('target_results',
help='the target file or directory to merge the base_results with')
parser_build.add_argument('-t', '--not-add-testseries', action='store_true',
help='do not add testseries configuration to results')
parser_build.add_argument('-x', '--executed-by', default='',
help='add executed-by configuration to each result file')

View File

@@ -0,0 +1,447 @@
# resulttool - regression analysis
#
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import resulttool.resultutils as resultutils
from oeqa.utils.git import GitRepo
import oeqa.utils.gitarchive as gitarchive
METADATA_MATCH_TABLE = {
"oeselftest": "OESELFTEST_METADATA"
}
OESELFTEST_METADATA_GUESS_TABLE={
"trigger-build-posttrigger": {
"run_all_tests": False,
"run_tests":["buildoptions.SourceMirroring.test_yocto_source_mirror"],
"skips": None,
"machine": None,
"select_tags":None,
"exclude_tags": None
},
"reproducible": {
"run_all_tests": False,
"run_tests":["reproducible"],
"skips": None,
"machine": None,
"select_tags":None,
"exclude_tags": None
},
"arch-qemu-quick": {
"run_all_tests": True,
"run_tests":None,
"skips": None,
"machine": None,
"select_tags":["machine"],
"exclude_tags": None
},
"arch-qemu-full-x86-or-x86_64": {
"run_all_tests": True,
"run_tests":None,
"skips": None,
"machine": None,
"select_tags":["machine", "toolchain-system"],
"exclude_tags": None
},
"arch-qemu-full-others": {
"run_all_tests": True,
"run_tests":None,
"skips": None,
"machine": None,
"select_tags":["machine", "toolchain-user"],
"exclude_tags": None
},
"selftest": {
"run_all_tests": True,
"run_tests":None,
"skips": ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror", "reproducible"],
"machine": None,
"select_tags":None,
"exclude_tags": ["machine", "toolchain-system", "toolchain-user"]
},
"bringup": {
"run_all_tests": True,
"run_tests":None,
"skips": ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror"],
"machine": None,
"select_tags":None,
"exclude_tags": ["machine", "toolchain-system", "toolchain-user"]
}
}
STATUS_STRINGS = {
"None": "No matching test result"
}
REGRESSIONS_DISPLAY_LIMIT=50
MISSING_TESTS_BANNER = "-------------------------- Missing tests --------------------------"
ADDITIONAL_DATA_BANNER = "--------------------- Matches and improvements --------------------"
def test_has_at_least_one_matching_tag(test, tag_list):
return "oetags" in test and any(oetag in tag_list for oetag in test["oetags"])
def all_tests_have_at_least_one_matching_tag(results, tag_list):
return all(test_has_at_least_one_matching_tag(test_result, tag_list) or test_name.startswith("ptestresult") for (test_name, test_result) in results.items())
def any_test_have_any_matching_tag(results, tag_list):
return any(test_has_at_least_one_matching_tag(test, tag_list) for test in results.values())
def have_skipped_test(result, test_prefix):
return all( result[test]['status'] == "SKIPPED" for test in result if test.startswith(test_prefix))
def have_all_tests_skipped(result, test_prefixes_list):
return all(have_skipped_test(result, test_prefix) for test_prefix in test_prefixes_list)
def guess_oeselftest_metadata(results):
"""
When an oeselftest test result is lacking OESELFTEST_METADATA, we can try to guess it based on results content.
Check results for specific values (absence/presence of oetags, number and name of executed tests...),
and if it matches one of known configuration from autobuilder configuration, apply guessed OSELFTEST_METADATA
to it to allow proper test filtering.
This guessing process is tightly coupled to config.json in autobuilder. It should trigger less and less,
as new tests will have OESELFTEST_METADATA properly appended at test reporting time
"""
if len(results) == 1 and "buildoptions.SourceMirroring.test_yocto_source_mirror" in results:
return OESELFTEST_METADATA_GUESS_TABLE['trigger-build-posttrigger']
elif all(result.startswith("reproducible") for result in results):
return OESELFTEST_METADATA_GUESS_TABLE['reproducible']
elif all_tests_have_at_least_one_matching_tag(results, ["machine"]):
return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-quick']
elif all_tests_have_at_least_one_matching_tag(results, ["machine", "toolchain-system"]):
return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-full-x86-or-x86_64']
elif all_tests_have_at_least_one_matching_tag(results, ["machine", "toolchain-user"]):
return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-full-others']
elif not any_test_have_any_matching_tag(results, ["machine", "toolchain-user", "toolchain-system"]):
if have_all_tests_skipped(results, ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror", "reproducible"]):
return OESELFTEST_METADATA_GUESS_TABLE['selftest']
elif have_all_tests_skipped(results, ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror"]):
return OESELFTEST_METADATA_GUESS_TABLE['bringup']
return None
def metadata_matches(base_configuration, target_configuration):
"""
For passed base and target, check test type. If test type matches one of
properties described in METADATA_MATCH_TABLE, compare metadata if it is
present in base. Return true if metadata matches, or if base lacks some
data (either TEST_TYPE or the corresponding metadata)
"""
test_type = base_configuration.get('TEST_TYPE')
if test_type not in METADATA_MATCH_TABLE:
return True
metadata_key = METADATA_MATCH_TABLE.get(test_type)
if target_configuration.get(metadata_key) != base_configuration.get(metadata_key):
return False
return True
def machine_matches(base_configuration, target_configuration):
return base_configuration.get('MACHINE') == target_configuration.get('MACHINE')
def can_be_compared(logger, base, target):
"""
Some tests are not relevant to be compared, for example some oeselftest
run with different tests sets or parameters. Return true if tests can be
compared
"""
ret = True
base_configuration = base['configuration']
target_configuration = target['configuration']
# Older test results lack proper OESELFTEST_METADATA: if not present, try to guess it based on tests results.
if base_configuration.get('TEST_TYPE') == 'oeselftest' and 'OESELFTEST_METADATA' not in base_configuration:
guess = guess_oeselftest_metadata(base['result'])
if guess is None:
logger.error(f"ERROR: did not manage to guess oeselftest metadata for {base_configuration['STARTTIME']}")
else:
logger.debug(f"Enriching {base_configuration['STARTTIME']} with {guess}")
base_configuration['OESELFTEST_METADATA'] = guess
if target_configuration.get('TEST_TYPE') == 'oeselftest' and 'OESELFTEST_METADATA' not in target_configuration:
guess = guess_oeselftest_metadata(target['result'])
if guess is None:
logger.error(f"ERROR: did not manage to guess oeselftest metadata for {target_configuration['STARTTIME']}")
else:
logger.debug(f"Enriching {target_configuration['STARTTIME']} with {guess}")
target_configuration['OESELFTEST_METADATA'] = guess
# Test runs with LTP results in should only be compared with other runs with LTP tests in them
if base_configuration.get('TEST_TYPE') == 'runtime' and any(result.startswith("ltpresult") for result in base['result']):
ret = target_configuration.get('TEST_TYPE') == 'runtime' and any(result.startswith("ltpresult") for result in target['result'])
return ret and metadata_matches(base_configuration, target_configuration) \
and machine_matches(base_configuration, target_configuration)
def get_status_str(raw_status):
raw_status_lower = raw_status.lower() if raw_status else "None"
return STATUS_STRINGS.get(raw_status_lower, raw_status)
def get_additional_info_line(new_pass_count, new_tests):
result=[]
if new_tests:
result.append(f'+{new_tests} test(s) present')
if new_pass_count:
result.append(f'+{new_pass_count} test(s) now passing')
if not result:
return ""
return ' -> ' + ', '.join(result) + '\n'
def compare_result(logger, base_name, target_name, base_result, target_result, display_limit=None):
base_result = base_result.get('result')
target_result = target_result.get('result')
result = {}
new_tests = 0
regressions = {}
resultstring = ""
new_tests = 0
new_pass_count = 0
display_limit = int(display_limit) if display_limit else REGRESSIONS_DISPLAY_LIMIT
if base_result and target_result:
for k in base_result:
base_testcase = base_result[k]
base_status = base_testcase.get('status')
if base_status:
target_testcase = target_result.get(k, {})
target_status = target_testcase.get('status')
if base_status != target_status:
result[k] = {'base': base_status, 'target': target_status}
else:
logger.error('Failed to retrieved base test case status: %s' % k)
# Also count new tests that were not present in base results: it
# could be newly added tests, but it could also highlights some tests
# renames or fixed faulty ptests
for k in target_result:
if k not in base_result:
new_tests += 1
if result:
new_pass_count = sum(test['target'] is not None and test['target'].startswith("PASS") for test in result.values())
# Print a regression report only if at least one test has a regression status (FAIL, SKIPPED, absent...)
if new_pass_count < len(result):
resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
for k in sorted(result):
if not result[k]['target'] or not result[k]['target'].startswith("PASS"):
# Differentiate each ptest kind when listing regressions
key_parts = k.split('.')
key = '.'.join(key_parts[:2]) if k.startswith('ptest') else key_parts[0]
# Append new regression to corresponding test family
regressions[key] = regressions.setdefault(key, []) + [' %s: %s -> %s\n' % (k, get_status_str(result[k]['base']), get_status_str(result[k]['target']))]
resultstring += f" Total: {sum([len(regressions[r]) for r in regressions])} new regression(s):\n"
for k in regressions:
resultstring += f" {len(regressions[k])} regression(s) for {k}\n"
count_to_print=min([display_limit, len(regressions[k])]) if display_limit > 0 else len(regressions[k])
resultstring += ''.join(regressions[k][:count_to_print])
if count_to_print < len(regressions[k]):
resultstring+=' [...]\n'
if new_pass_count > 0:
resultstring += f' Additionally, {new_pass_count} previously failing test(s) is/are now passing\n'
if new_tests > 0:
resultstring += f' Additionally, {new_tests} new test(s) is/are present\n'
else:
resultstring = "%s\n%s\n" % (base_name, target_name)
result = None
else:
resultstring = "%s\n%s\n" % (base_name, target_name)
if not result:
additional_info = get_additional_info_line(new_pass_count, new_tests)
if additional_info:
resultstring += additional_info
return result, resultstring
def get_results(logger, source):
return resultutils.load_resultsdata(source, configmap=resultutils.regression_map)
def regression(args, logger):
base_results = get_results(logger, args.base_result)
target_results = get_results(logger, args.target_result)
regression_common(args, logger, base_results, target_results)
# Some test case naming is poor and contains random strings, particularly lttng/babeltrace.
# Truncating the test names works since they contain file and line number identifiers
# which allows us to match them without the random components.
def fixup_ptest_names(results, logger):
for r in results:
for i in results[r]:
tests = list(results[r][i]['result'].keys())
for test in tests:
new = None
if test.startswith(("ptestresult.lttng-tools.", "ptestresult.babeltrace.", "ptestresult.babeltrace2")) and "_-_" in test:
new = test.split("_-_")[0]
elif test.startswith(("ptestresult.curl.")) and "__" in test:
new = test.split("__")[0]
elif test.startswith(("ptestresult.dbus.")) and "__" in test:
new = test.split("__")[0]
elif test.startswith("ptestresult.binutils") and "build-st-" in test:
new = test.split(" ")[0]
elif test.startswith("ptestresult.gcc") and "/tmp/runtest." in test:
new = ".".join(test.split(".")[:2])
if new:
results[r][i]['result'][new] = results[r][i]['result'][test]
del results[r][i]['result'][test]
def regression_common(args, logger, base_results, target_results):
if args.base_result_id:
base_results = resultutils.filter_resultsdata(base_results, args.base_result_id)
if args.target_result_id:
target_results = resultutils.filter_resultsdata(target_results, args.target_result_id)
fixup_ptest_names(base_results, logger)
fixup_ptest_names(target_results, logger)
matches = []
regressions = []
notfound = []
for a in base_results:
if a in target_results:
base = list(base_results[a].keys())
target = list(target_results[a].keys())
# We may have multiple base/targets which are for different configurations. Start by
# removing any pairs which match
for c in base.copy():
for b in target.copy():
if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
continue
res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit)
if not res:
matches.append(resstr)
base.remove(c)
target.remove(b)
break
# Should only now see regressions, we may not be able to match multiple pairs directly
for c in base:
for b in target:
if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
continue
res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit)
if res:
regressions.append(resstr)
else:
notfound.append("%s not found in target" % a)
print("\n".join(sorted(regressions)))
print("\n" + MISSING_TESTS_BANNER + "\n")
print("\n".join(sorted(notfound)))
print("\n" + ADDITIONAL_DATA_BANNER + "\n")
print("\n".join(sorted(matches)))
return 0
def regression_git(args, logger):
base_results = {}
target_results = {}
tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
repo = GitRepo(args.repo)
revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch)
if args.branch2:
revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2)
if not len(revs2):
logger.error("No revisions found to compare against")
return 1
if not len(revs):
logger.error("No revision to report on found")
return 1
else:
if len(revs) < 2:
logger.error("Only %d tester revisions found, unable to generate report" % len(revs))
return 1
# Pick revisions
if args.commit:
if args.commit_number:
logger.warning("Ignoring --commit-number as --commit was specified")
index1 = gitarchive.rev_find(revs, 'commit', args.commit)
elif args.commit_number:
index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
else:
index1 = len(revs) - 1
if args.branch2:
revs2.append(revs[index1])
index1 = len(revs2) - 1
revs = revs2
if args.commit2:
if args.commit_number2:
logger.warning("Ignoring --commit-number2 as --commit2 was specified")
index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
elif args.commit_number2:
index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
else:
if index1 > 0:
index2 = index1 - 1
# Find the closest matching commit number for comparision
# In future we could check the commit is a common ancestor and
# continue back if not but this good enough for now
while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number:
index2 = index2 - 1
else:
logger.error("Unable to determine the other commit, use "
"--commit2 or --commit-number2 to specify it")
return 1
logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2]))
base_results = resultutils.git_get_result(repo, revs[index1][2])
target_results = resultutils.git_get_result(repo, revs[index2][2])
regression_common(args, logger, base_results, target_results)
return 0
def register_commands(subparsers):
"""Register subcommands from this plugin"""
parser_build = subparsers.add_parser('regression', help='regression file/directory analysis',
description='regression analysis comparing the base set of results to the target results',
group='analysis')
parser_build.set_defaults(func=regression)
parser_build.add_argument('base_result',
help='base result file/directory/URL for the comparison')
parser_build.add_argument('target_result',
help='target result file/directory/URL to compare with')
parser_build.add_argument('-b', '--base-result-id', default='',
help='(optional) filter the base results to this result ID')
parser_build.add_argument('-t', '--target-result-id', default='',
help='(optional) filter the target results to this result ID')
parser_build = subparsers.add_parser('regression-git', help='regression git analysis',
description='regression analysis comparing base result set to target '
'result set',
group='analysis')
parser_build.set_defaults(func=regression_git)
parser_build.add_argument('repo',
help='the git repository containing the data')
parser_build.add_argument('-b', '--base-result-id', default='',
help='(optional) default select regression based on configurations unless base result '
'id was provided')
parser_build.add_argument('-t', '--target-result-id', default='',
help='(optional) default select regression based on configurations unless target result '
'id was provided')
parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
parser_build.add_argument('--branch2', help="Branch to find comparision revisions in")
parser_build.add_argument('--commit', help="Revision to search for")
parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
parser_build.add_argument('--commit2', help="Revision to compare with")
parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
parser_build.add_argument('-l', '--limit', default=REGRESSIONS_DISPLAY_LIMIT, help="Maximum number of changes to display per test. Can be set to 0 to print all changes")

View File

@@ -0,0 +1,315 @@
# test result tool - report text based test results
#
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import os
import glob
import json
import resulttool.resultutils as resultutils
from oeqa.utils.git import GitRepo
import oeqa.utils.gitarchive as gitarchive
class ResultsTextReport(object):
def __init__(self):
self.ptests = {}
self.ltptests = {}
self.ltpposixtests = {}
self.result_types = {'passed': ['PASSED', 'passed', 'PASS', 'XFAIL'],
'failed': ['FAILED', 'failed', 'FAIL', 'ERROR', 'error', 'UNKNOWN', 'XPASS'],
'skipped': ['SKIPPED', 'skipped', 'UNSUPPORTED', 'UNTESTED', 'UNRESOLVED']}
def handle_ptest_result(self, k, status, result, machine):
if machine not in self.ptests:
self.ptests[machine] = {}
if k == 'ptestresult.sections':
# Ensure tests without any test results still show up on the report
for suite in result['ptestresult.sections']:
if suite not in self.ptests[machine]:
self.ptests[machine][suite] = {
'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
'failed_testcases': [], "testcases": set(),
}
if 'duration' in result['ptestresult.sections'][suite]:
self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
if 'timeout' in result['ptestresult.sections'][suite]:
self.ptests[machine][suite]['duration'] += " T"
return True
# process test result
try:
_, suite, test = k.split(".", 2)
except ValueError:
return True
# Handle 'glib-2.0'
if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
try:
_, suite, suite1, test = k.split(".", 3)
if suite + "." + suite1 in result['ptestresult.sections']:
suite = suite + "." + suite1
except ValueError:
pass
if suite not in self.ptests[machine]:
self.ptests[machine][suite] = {
'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
'failed_testcases': [], "testcases": set(),
}
# do not process duplicate results
if test in self.ptests[machine][suite]["testcases"]:
print("Warning duplicate ptest result '{}.{}' for {}".format(suite, test, machine))
return False
for tk in self.result_types:
if status in self.result_types[tk]:
self.ptests[machine][suite][tk] += 1
self.ptests[machine][suite]["testcases"].add(test)
return True
def handle_ltptest_result(self, k, status, result, machine):
if machine not in self.ltptests:
self.ltptests[machine] = {}
if k == 'ltpresult.sections':
# Ensure tests without any test results still show up on the report
for suite in result['ltpresult.sections']:
if suite not in self.ltptests[machine]:
self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
if 'duration' in result['ltpresult.sections'][suite]:
self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
if 'timeout' in result['ltpresult.sections'][suite]:
self.ltptests[machine][suite]['duration'] += " T"
return
try:
_, suite, test = k.split(".", 2)
except ValueError:
return
# Handle 'glib-2.0'
if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
try:
_, suite, suite1, test = k.split(".", 3)
if suite + "." + suite1 in result['ltpresult.sections']:
suite = suite + "." + suite1
except ValueError:
pass
if suite not in self.ltptests[machine]:
self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
for tk in self.result_types:
if status in self.result_types[tk]:
self.ltptests[machine][suite][tk] += 1
def handle_ltpposixtest_result(self, k, status, result, machine):
if machine not in self.ltpposixtests:
self.ltpposixtests[machine] = {}
if k == 'ltpposixresult.sections':
# Ensure tests without any test results still show up on the report
for suite in result['ltpposixresult.sections']:
if suite not in self.ltpposixtests[machine]:
self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
if 'duration' in result['ltpposixresult.sections'][suite]:
self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
return
try:
_, suite, test = k.split(".", 2)
except ValueError:
return
# Handle 'glib-2.0'
if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']:
try:
_, suite, suite1, test = k.split(".", 3)
if suite + "." + suite1 in result['ltpposixresult.sections']:
suite = suite + "." + suite1
except ValueError:
pass
if suite not in self.ltpposixtests[machine]:
self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
for tk in self.result_types:
if status in self.result_types[tk]:
self.ltpposixtests[machine][suite][tk] += 1
def get_aggregated_test_result(self, logger, testresult, machine):
test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
result = testresult.get('result', [])
for k in result:
test_status = result[k].get('status', [])
if k.startswith("ptestresult."):
if not self.handle_ptest_result(k, test_status, result, machine):
continue
elif k.startswith("ltpresult."):
self.handle_ltptest_result(k, test_status, result, machine)
elif k.startswith("ltpposixresult."):
self.handle_ltpposixtest_result(k, test_status, result, machine)
# process result if it was not skipped by a handler
for tk in self.result_types:
if test_status in self.result_types[tk]:
test_count_report[tk] += 1
if test_status in self.result_types['failed']:
test_count_report['failed_testcases'].append(k)
return test_count_report
def print_test_report(self, template_file_name, test_count_reports):
from jinja2 import Environment, FileSystemLoader
script_path = os.path.dirname(os.path.realpath(__file__))
file_loader = FileSystemLoader(script_path + '/template')
env = Environment(loader=file_loader, trim_blocks=True)
template = env.get_template(template_file_name)
havefailed = False
reportvalues = []
machines = []
cols = ['passed', 'failed', 'skipped']
maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
for line in test_count_reports:
total_tested = line['passed'] + line['failed'] + line['skipped']
vals = {}
vals['result_id'] = line['result_id']
vals['testseries'] = line['testseries']
vals['sort'] = line['testseries'] + "_" + line['result_id']
vals['failed_testcases'] = line['failed_testcases']
for k in cols:
if total_tested:
vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
else:
vals[k] = "0 (0%)"
for k in maxlen:
if k in vals and len(vals[k]) > maxlen[k]:
maxlen[k] = len(vals[k])
reportvalues.append(vals)
if line['failed_testcases']:
havefailed = True
if line['machine'] not in machines:
machines.append(line['machine'])
reporttotalvalues = {}
for k in cols:
reporttotalvalues[k] = '%s' % sum([line[k] for line in test_count_reports])
reporttotalvalues['count'] = '%s' % len(test_count_reports)
for (machine, report) in self.ptests.items():
for ptest in self.ptests[machine]:
if len(ptest) > maxlen['ptest']:
maxlen['ptest'] = len(ptest)
for (machine, report) in self.ltptests.items():
for ltptest in self.ltptests[machine]:
if len(ltptest) > maxlen['ltptest']:
maxlen['ltptest'] = len(ltptest)
for (machine, report) in self.ltpposixtests.items():
for ltpposixtest in self.ltpposixtests[machine]:
if len(ltpposixtest) > maxlen['ltpposixtest']:
maxlen['ltpposixtest'] = len(ltpposixtest)
output = template.render(reportvalues=reportvalues,
reporttotalvalues=reporttotalvalues,
havefailed=havefailed,
machines=machines,
ptests=self.ptests,
ltptests=self.ltptests,
ltpposixtests=self.ltpposixtests,
maxlen=maxlen)
print(output)
def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test, selected_test_case_only):
def print_selected_testcase_result(testresults, selected_test_case_only):
for testsuite in testresults:
for resultid in testresults[testsuite]:
result = testresults[testsuite][resultid]['result']
test_case_result = result.get(selected_test_case_only, {})
if test_case_result.get('status'):
print('Found selected test case result for %s from %s' % (selected_test_case_only,
resultid))
print(test_case_result['status'])
else:
print('Could not find selected test case result for %s from %s' % (selected_test_case_only,
resultid))
if test_case_result.get('log'):
print(test_case_result['log'])
test_count_reports = []
configmap = resultutils.store_map
if use_regression_map:
configmap = resultutils.regression_map
if commit:
if tag:
logger.warning("Ignoring --tag as --commit was specified")
tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
repo = GitRepo(source_dir)
revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
rev_index = gitarchive.rev_find(revs, 'commit', commit)
testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap)
elif tag:
repo = GitRepo(source_dir)
testresults = resultutils.git_get_result(repo, [tag], configmap=configmap)
else:
testresults = resultutils.load_resultsdata(source_dir, configmap=configmap)
if raw_test:
raw_results = {}
for testsuite in testresults:
result = testresults[testsuite].get(raw_test, {})
if result:
raw_results[testsuite] = {raw_test: result}
if raw_results:
if selected_test_case_only:
print_selected_testcase_result(raw_results, selected_test_case_only)
else:
print(json.dumps(raw_results, sort_keys=True, indent=1))
else:
print('Could not find raw test result for %s' % raw_test)
return 0
if selected_test_case_only:
print_selected_testcase_result(testresults, selected_test_case_only)
return 0
for testsuite in testresults:
for resultid in testresults[testsuite]:
skip = False
result = testresults[testsuite][resultid]
machine = result['configuration']['MACHINE']
# Check to see if there is already results for these kinds of tests for the machine
for key in result['result'].keys():
testtype = str(key).split('.')[0]
if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or
(machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])):
print("Already have test results for %s on %s, skipping %s" %(str(key).split('.')[0], machine, resultid))
skip = True
break
if skip:
break
test_count_report = self.get_aggregated_test_result(logger, result, machine)
test_count_report['machine'] = machine
test_count_report['testseries'] = result['configuration']['TESTSERIES']
test_count_report['result_id'] = resultid
test_count_reports.append(test_count_report)
self.print_test_report('test_report_full_text.txt', test_count_reports)
def report(args, logger):
report = ResultsTextReport()
report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag, args.use_regression_map,
args.raw_test_only, args.selected_test_case_only)
return 0
def register_commands(subparsers):
"""Register subcommands from this plugin"""
parser_build = subparsers.add_parser('report', help='summarise test results',
description='print a text-based summary of the test results',
group='analysis')
parser_build.set_defaults(func=report)
parser_build.add_argument('source_dir',
help='source file/directory/URL that contain the test result files to summarise')
parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
parser_build.add_argument('--commit', help="Revision to report")
parser_build.add_argument('-t', '--tag', default='',
help='source_dir is a git repository, report on the tag specified from that repository')
parser_build.add_argument('-m', '--use_regression_map', action='store_true',
help='instead of the default "store_map", use the "regression_map" for report')
parser_build.add_argument('-r', '--raw_test_only', default='',
help='output raw test result only for the user provided test result id')
parser_build.add_argument('-s', '--selected_test_case_only', default='',
help='output selected test case result for the user provided test case id, if both test '
'result id and test case id are provided then output the selected test case result '
'from the provided test result id')

View File

@@ -0,0 +1,274 @@
# resulttool - common library/utility functions
#
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import os
import base64
import zlib
import json
import scriptpath
import copy
import urllib.request
import posixpath
import logging
scriptpath.add_oe_lib_path()
logger = logging.getLogger('resulttool')
flatten_map = {
"oeselftest": [],
"runtime": [],
"sdk": [],
"sdkext": [],
"manual": []
}
regression_map = {
"oeselftest": ['TEST_TYPE', 'MACHINE'],
"runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'],
"sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
"sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
"manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE']
}
store_map = {
"oeselftest": ['TEST_TYPE', 'TESTSERIES', 'MACHINE'],
"runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'],
"sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
"sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
"manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME']
}
rawlog_sections = {
"ptestresult.rawlogs": "ptest",
"ltpresult.rawlogs": "ltp",
"ltpposixresult.rawlogs": "ltpposix"
}
def is_url(p):
"""
Helper for determining if the given path is a URL
"""
return p.startswith('http://') or p.startswith('https://')
extra_configvars = {'TESTSERIES': ''}
#
# Load the json file and append the results data into the provided results dict
#
def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars):
if type(f) is str:
if is_url(f):
with urllib.request.urlopen(f) as response:
data = json.loads(response.read().decode('utf-8'))
url = urllib.parse.urlparse(f)
testseries = posixpath.basename(posixpath.dirname(url.path))
else:
with open(f, "r") as filedata:
try:
data = json.load(filedata)
except json.decoder.JSONDecodeError:
print("Cannot decode {}. Possible corruption. Skipping.".format(f))
data = ""
testseries = os.path.basename(os.path.dirname(f))
else:
data = f
for res in data:
if "configuration" not in data[res] or "result" not in data[res]:
raise ValueError("Test results data without configuration or result section?")
for config in configvars:
if config == "TESTSERIES" and "TESTSERIES" not in data[res]["configuration"]:
data[res]["configuration"]["TESTSERIES"] = testseries
continue
if config not in data[res]["configuration"]:
data[res]["configuration"][config] = configvars[config]
testtype = data[res]["configuration"].get("TEST_TYPE")
if testtype not in configmap:
raise ValueError("Unknown test type %s" % testtype)
testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
if testpath not in results:
results[testpath] = {}
results[testpath][res] = data[res]
#
# Walk a directory and find/load results data
# or load directly from a file
#
def load_resultsdata(source, configmap=store_map, configvars=extra_configvars):
results = {}
if is_url(source) or os.path.isfile(source):
append_resultsdata(results, source, configmap, configvars)
return results
for root, dirs, files in os.walk(source):
for name in files:
f = os.path.join(root, name)
if name == "testresults.json":
append_resultsdata(results, f, configmap, configvars)
return results
def filter_resultsdata(results, resultid):
newresults = {}
for r in results:
for i in results[r]:
if i == resultsid:
newresults[r] = {}
newresults[r][i] = results[r][i]
return newresults
def strip_logs(results):
newresults = copy.deepcopy(results)
for res in newresults:
if 'result' not in newresults[res]:
continue
for logtype in rawlog_sections:
if logtype in newresults[res]['result']:
del newresults[res]['result'][logtype]
if 'ptestresult.sections' in newresults[res]['result']:
for i in newresults[res]['result']['ptestresult.sections']:
if 'log' in newresults[res]['result']['ptestresult.sections'][i]:
del newresults[res]['result']['ptestresult.sections'][i]['log']
return newresults
# For timing numbers, crazy amounts of precision don't make sense and just confuse
# the logs. For numbers over 1, trim to 3 decimal places, for numbers less than 1,
# trim to 4 significant digits
def trim_durations(results):
for res in results:
if 'result' not in results[res]:
continue
for entry in results[res]['result']:
if 'duration' in results[res]['result'][entry]:
duration = results[res]['result'][entry]['duration']
if duration > 1:
results[res]['result'][entry]['duration'] = float("%.3f" % duration)
elif duration < 1:
results[res]['result'][entry]['duration'] = float("%.4g" % duration)
return results
def handle_cleanups(results):
# Remove pointless path duplication from old format reproducibility results
for res2 in results:
try:
section = results[res2]['result']['reproducible']['files']
for pkgtype in section:
for filelist in section[pkgtype].copy():
if section[pkgtype][filelist] and type(section[pkgtype][filelist][0]) == dict:
newlist = []
for entry in section[pkgtype][filelist]:
newlist.append(entry["reference"].split("/./")[1])
section[pkgtype][filelist] = newlist
except KeyError:
pass
# Remove pointless duplicate rawlogs data
try:
del results[res2]['result']['reproducible.rawlogs']
except KeyError:
pass
def decode_log(logdata):
if isinstance(logdata, str):
return logdata
elif isinstance(logdata, dict):
if "compressed" in logdata:
data = logdata.get("compressed")
data = base64.b64decode(data.encode("utf-8"))
data = zlib.decompress(data)
return data.decode("utf-8", errors='ignore')
return None
def generic_get_log(sectionname, results, section):
if sectionname not in results:
return None
if section not in results[sectionname]:
return None
ptest = results[sectionname][section]
if 'log' not in ptest:
return None
return decode_log(ptest['log'])
def ptestresult_get_log(results, section):
return generic_get_log('ptestresult.sections', results, section)
def generic_get_rawlogs(sectname, results):
if sectname not in results:
return None
if 'log' not in results[sectname]:
return None
return decode_log(results[sectname]['log'])
def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False):
for res in results:
if res:
dst = destdir + "/" + res + "/" + fn
else:
dst = destdir + "/" + fn
os.makedirs(os.path.dirname(dst), exist_ok=True)
resultsout = results[res]
if not ptestjson:
resultsout = strip_logs(results[res])
trim_durations(resultsout)
handle_cleanups(resultsout)
with open(dst, 'w') as f:
f.write(json.dumps(resultsout, sort_keys=True, indent=1))
for res2 in results[res]:
if ptestlogs and 'result' in results[res][res2]:
seriesresults = results[res][res2]['result']
for logtype in rawlog_sections:
logdata = generic_get_rawlogs(logtype, seriesresults)
if logdata is not None:
logger.info("Extracting " + rawlog_sections[logtype] + "-raw.log")
with open(dst.replace(fn, rawlog_sections[logtype] + "-raw.log"), "w+") as f:
f.write(logdata)
if 'ptestresult.sections' in seriesresults:
for i in seriesresults['ptestresult.sections']:
sectionlog = ptestresult_get_log(seriesresults, i)
if sectionlog is not None:
with open(dst.replace(fn, "ptest-%s.log" % i), "w+") as f:
f.write(sectionlog)
def git_get_result(repo, tags, configmap=store_map):
git_objs = []
for tag in tags:
files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines()
git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")])
def parse_json_stream(data):
"""Parse multiple concatenated JSON objects"""
objs = []
json_d = ""
for line in data.splitlines():
if line == '}{':
json_d += '}'
objs.append(json.loads(json_d))
json_d = '{'
else:
json_d += line
objs.append(json.loads(json_d))
return objs
# Optimize by reading all data with one git command
results = {}
for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])):
append_resultsdata(results, obj, configmap=configmap)
return results
def test_run_results(results):
"""
Convenient generator function that iterates over all test runs that have a
result section.
Generates a tuple of:
(result json file path, test run name, test run (dict), test run "results" (dict))
for each test run that has a "result" section
"""
for path in results:
for run_name, test_run in results[path].items():
if not 'result' in test_run:
continue
yield path, run_name, test_run, test_run['result']

View File

@@ -0,0 +1,124 @@
# resulttool - store test results
#
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import tempfile
import os
import subprocess
import json
import shutil
import scriptpath
scriptpath.add_bitbake_lib_path()
scriptpath.add_oe_lib_path()
import resulttool.resultutils as resultutils
import oeqa.utils.gitarchive as gitarchive
def store(args, logger):
tempdir = tempfile.mkdtemp(prefix='testresults.')
try:
configvars = resultutils.extra_configvars.copy()
if args.executed_by:
configvars['EXECUTED_BY'] = args.executed_by
if args.extra_test_env:
configvars['EXTRA_TEST_ENV'] = args.extra_test_env
results = {}
logger.info('Reading files from %s' % args.source)
if resultutils.is_url(args.source) or os.path.isfile(args.source):
resultutils.append_resultsdata(results, args.source, configvars=configvars)
else:
for root, dirs, files in os.walk(args.source):
for name in files:
f = os.path.join(root, name)
if name == "testresults.json":
resultutils.append_resultsdata(results, f, configvars=configvars)
elif args.all:
dst = f.replace(args.source, tempdir + "/")
os.makedirs(os.path.dirname(dst), exist_ok=True)
shutil.copyfile(f, dst)
revisions = {}
if not results and not args.all:
if args.allow_empty:
logger.info("No results found to store")
return 0
logger.error("No results found to store")
return 1
# Find the branch/commit/commit_count and ensure they all match
for suite in results:
for result in results[suite]:
config = results[suite][result]['configuration']['LAYERS']['meta']
revision = (config['commit'], config['branch'], str(config['commit_count']))
if revision not in revisions:
revisions[revision] = {}
if suite not in revisions[revision]:
revisions[revision][suite] = {}
revisions[revision][suite][result] = results[suite][result]
logger.info("Found %d revisions to store" % len(revisions))
for r in revisions:
results = revisions[r]
if args.revision and r[0] != args.revision:
logger.info('skipping %s as non-matching' % r[0])
continue
keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]}
subprocess.check_call(["find", tempdir, "-name", "testresults.json", "!", "-path", "./.git/*", "-delete"])
resultutils.save_resultsdata(results, tempdir, ptestlogs=True)
logger.info('Storing test result into git repository %s' % args.git_dir)
excludes = []
if args.logfile_archive:
excludes = ['*.log', "*.log.zst"]
tagname = gitarchive.gitarchive(tempdir, args.git_dir, False, False,
"Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}",
False, "{branch}/{commit_count}-g{commit}/{tag_number}",
'Test run #{tag_number} of {branch}:{commit}', '',
excludes, [], False, keywords, logger)
if args.logfile_archive:
logdir = args.logfile_archive + "/" + tagname
shutil.copytree(tempdir, logdir)
for root, dirs, files in os.walk(logdir):
for name in files:
if not name.endswith(".log"):
continue
f = os.path.join(root, name)
subprocess.run(["zstd", f, "--rm"], check=True, capture_output=True)
finally:
subprocess.check_call(["rm", "-rf", tempdir])
return 0
def register_commands(subparsers):
"""Register subcommands from this plugin"""
parser_build = subparsers.add_parser('store', help='store test results into a git repository',
description='takes a results file or directory of results files and stores '
'them into the destination git repository, splitting out the results '
'files as configured',
group='setup')
parser_build.set_defaults(func=store)
parser_build.add_argument('source',
help='source file/directory/URL that contain the test result files to be stored')
parser_build.add_argument('git_dir',
help='the location of the git repository to store the results in')
parser_build.add_argument('-a', '--all', action='store_true',
help='include all files, not just testresults.json files')
parser_build.add_argument('-e', '--allow-empty', action='store_true',
help='don\'t error if no results to store are found')
parser_build.add_argument('-x', '--executed-by', default='',
help='add executed-by configuration to each result file')
parser_build.add_argument('-t', '--extra-test-env', default='',
help='add extra test environment data to each result file configuration')
parser_build.add_argument('-r', '--revision', default='',
help='only store data for the specified revision')
parser_build.add_argument('-l', '--logfile-archive', default='',
help='directory to separately archive log files along with a copy of the results')

View File

@@ -0,0 +1,79 @@
==============================================================================================================
Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
==============================================================================================================
--------------------------------------------------------------------------------------------------------------
{{ 'Test Series'.ljust(maxlen['testseries']) }} | {{ 'ID'.ljust(maxlen['result_id']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }}
--------------------------------------------------------------------------------------------------------------
{% for report in reportvalues |sort(attribute='sort') %}
{{ report.testseries.ljust(maxlen['testseries']) }} | {{ report.result_id.ljust(maxlen['result_id']) }} | {{ (report.passed|string).ljust(maxlen['passed']) }} | {{ (report.failed|string).ljust(maxlen['failed']) }} | {{ (report.skipped|string).ljust(maxlen['skipped']) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
{{ 'Total'.ljust(maxlen['testseries']) }} | {{ reporttotalvalues['count'].ljust(maxlen['result_id']) }} | {{ reporttotalvalues['passed'].ljust(maxlen['passed']) }} | {{ reporttotalvalues['failed'].ljust(maxlen['failed']) }} | {{ reporttotalvalues['skipped'].ljust(maxlen['skipped']) }}
--------------------------------------------------------------------------------------------------------------
{% for machine in machines %}
{% if ptests[machine] %}
==============================================================================================================
{{ machine }} PTest Result Summary
==============================================================================================================
--------------------------------------------------------------------------------------------------------------
{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
--------------------------------------------------------------------------------------------------------------
{% for ptest in ptests[machine] |sort %}
{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[machine][ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[machine][ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[machine][ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[machine][ptest]['duration']|string) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
{% endif %}
{% endfor %}
{% for machine in machines %}
{% if ltptests[machine] %}
==============================================================================================================
{{ machine }} Ltp Test Result Summary
==============================================================================================================
--------------------------------------------------------------------------------------------------------------
{{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
--------------------------------------------------------------------------------------------------------------
{% for ltptest in ltptests[machine] |sort %}
{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[machine][ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[machine][ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[machine][ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[machine][ltptest]['duration']|string) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
{% endif %}
{% endfor %}
{% for machine in machines %}
{% if ltpposixtests[machine] %}
==============================================================================================================
{{ machine }} Ltp Posix Result Summary
==============================================================================================================
--------------------------------------------------------------------------------------------------------------
{{ 'Recipe'.ljust(maxlen['ltpposixtest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
--------------------------------------------------------------------------------------------------------------
{% for ltpposixtest in ltpposixtests[machine] |sort %}
{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[machine][ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[machine][ltpposixtest]['duration']|string) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
{% endif %}
{% endfor %}
==============================================================================================================
Failed test cases (sorted by testseries, ID)
==============================================================================================================
{% if havefailed %}
--------------------------------------------------------------------------------------------------------------
{% for report in reportvalues |sort(attribute='sort') %}
{% if report.failed_testcases %}
testseries | result_id : {{ report.testseries }} | {{ report.result_id }}
{% for testcase in report.failed_testcases %}
{{ testcase }}
{% endfor %}
{% endif %}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
{% else %}
There were no test failures
{% endif %}

View File

@@ -0,0 +1,32 @@
# Path utility functions for OE python scripts
#
# Copyright (C) 2012-2014 Intel Corporation
# Copyright (C) 2011 Mentor Graphics Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import sys
import os
import os.path
def add_oe_lib_path():
basepath = os.path.abspath(os.path.dirname(__file__) + '/../..')
newpath = basepath + '/meta/lib'
sys.path.insert(0, newpath)
def add_bitbake_lib_path():
basepath = os.path.abspath(os.path.dirname(__file__) + '/../..')
bitbakepath = None
if os.path.exists(basepath + '/bitbake/lib/bb'):
bitbakepath = basepath + '/bitbake'
else:
# look for bitbake/bin dir in PATH
for pth in os.environ['PATH'].split(':'):
if os.path.exists(os.path.join(pth, '../lib/bb')):
bitbakepath = os.path.abspath(os.path.join(pth, '..'))
break
if bitbakepath:
sys.path.insert(0, bitbakepath + '/lib')
return bitbakepath

View File

@@ -0,0 +1,282 @@
# Script utility functions
#
# Copyright (C) 2014 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import glob
import logging
import os
import random
import shlex
import shutil
import string
import subprocess
import sys
import tempfile
import threading
import importlib
import importlib.machinery
import importlib.util
class KeepAliveStreamHandler(logging.StreamHandler):
def __init__(self, keepalive=True, **kwargs):
super().__init__(**kwargs)
if keepalive is True:
keepalive = 5000 # default timeout
self._timeout = threading.Condition()
self._stop = False
# background thread waits on condition, if the condition does not
# happen emit a keep alive message
def thread():
while not self._stop:
with self._timeout:
if not self._timeout.wait(keepalive):
self.emit(logging.LogRecord("keepalive", logging.INFO,
None, None, "Keepalive message", None, None))
self._thread = threading.Thread(target=thread, daemon=True)
self._thread.start()
def close(self):
# mark the thread to stop and notify it
self._stop = True
with self._timeout:
self._timeout.notify()
# wait for it to join
self._thread.join()
super().close()
def emit(self, record):
super().emit(record)
# trigger timer reset
with self._timeout:
self._timeout.notify()
def logger_create(name, stream=None, keepalive=None):
logger = logging.getLogger(name)
if keepalive is not None:
loggerhandler = KeepAliveStreamHandler(stream=stream, keepalive=keepalive)
else:
loggerhandler = logging.StreamHandler(stream=stream)
loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logger.addHandler(loggerhandler)
logger.setLevel(logging.INFO)
return logger
def logger_setup_color(logger, color='auto'):
from bb.msg import BBLogFormatter
for handler in logger.handlers:
if (isinstance(handler, logging.StreamHandler) and
isinstance(handler.formatter, BBLogFormatter)):
if color == 'always' or (color == 'auto' and handler.stream.isatty()):
handler.formatter.enable_color()
def load_plugins(logger, plugins, pluginpath):
def load_plugin(name):
logger.debug('Loading plugin %s' % name)
spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath])
if spec:
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def plugin_name(filename):
return os.path.splitext(os.path.basename(filename))[0]
known_plugins = [plugin_name(p.__name__) for p in plugins]
logger.debug('Loading plugins from %s...' % pluginpath)
for fn in glob.glob(os.path.join(pluginpath, '*.py')):
name = plugin_name(fn)
if name != '__init__' and name not in known_plugins:
plugin = load_plugin(name)
if hasattr(plugin, 'plugin_init'):
plugin.plugin_init(plugins)
plugins.append(plugin)
def git_convert_standalone_clone(repodir):
"""If specified directory is a git repository, ensure it's a standalone clone"""
import bb.process
if os.path.exists(os.path.join(repodir, '.git')):
alternatesfile = os.path.join(repodir, '.git', 'objects', 'info', 'alternates')
if os.path.exists(alternatesfile):
# This will have been cloned with -s, so we need to convert it so none
# of the contents is shared
bb.process.run('git repack -a', cwd=repodir)
os.remove(alternatesfile)
def _get_temp_recipe_dir(d):
# This is a little bit hacky but we need to find a place where we can put
# the recipe so that bitbake can find it. We're going to delete it at the
# end so it doesn't really matter where we put it.
bbfiles = d.getVar('BBFILES').split()
fetchrecipedir = None
for pth in bbfiles:
if pth.endswith('.bb'):
pthdir = os.path.dirname(pth)
if os.access(os.path.dirname(os.path.dirname(pthdir)), os.W_OK):
fetchrecipedir = pthdir.replace('*', 'recipetool')
if pthdir.endswith('workspace/recipes/*'):
# Prefer the workspace
break
return fetchrecipedir
class FetchUrlFailure(Exception):
def __init__(self, url):
self.url = url
def __str__(self):
return "Failed to fetch URL %s" % self.url
def fetch_url(tinfoil, srcuri, srcrev, destdir, logger, preserve_tmp=False, mirrors=False):
"""
Fetch the specified URL using normal do_fetch and do_unpack tasks, i.e.
any dependencies that need to be satisfied in order to support the fetch
operation will be taken care of
"""
import bb
checksums = {}
fetchrecipepn = None
# We need to put our temp directory under ${BASE_WORKDIR} otherwise
# we may have problems with the recipe-specific sysroot population
tmpparent = tinfoil.config_data.getVar('BASE_WORKDIR')
bb.utils.mkdirhier(tmpparent)
tmpdir = tempfile.mkdtemp(prefix='recipetool-', dir=tmpparent)
try:
tmpworkdir = os.path.join(tmpdir, 'work')
logger.debug('fetch_url: temp dir is %s' % tmpdir)
fetchrecipedir = _get_temp_recipe_dir(tinfoil.config_data)
if not fetchrecipedir:
logger.error('Searched BBFILES but unable to find a writeable place to put temporary recipe')
sys.exit(1)
fetchrecipe = None
bb.utils.mkdirhier(fetchrecipedir)
try:
# Generate a dummy recipe so we can follow more or less normal paths
# for do_fetch and do_unpack
# I'd use tempfile functions here but underscores can be produced by that and those
# aren't allowed in recipe file names except to separate the version
rndstring = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8))
fetchrecipe = os.path.join(fetchrecipedir, 'tmp-recipetool-%s.bb' % rndstring)
fetchrecipepn = os.path.splitext(os.path.basename(fetchrecipe))[0]
logger.debug('Generating initial recipe %s for fetching' % fetchrecipe)
with open(fetchrecipe, 'w') as f:
# We don't want to have to specify LIC_FILES_CHKSUM
f.write('LICENSE = "CLOSED"\n')
# We don't need the cross-compiler
f.write('INHIBIT_DEFAULT_DEPS = "1"\n')
# We don't have the checksums yet so we can't require them
f.write('BB_STRICT_CHECKSUM = "ignore"\n')
f.write('SRC_URI = "%s"\n' % srcuri)
f.write('SRCREV = "%s"\n' % srcrev)
f.write('PV = "0.0+"\n')
f.write('WORKDIR = "%s"\n' % tmpworkdir)
# Set S out of the way so it doesn't get created under the workdir
f.write('S = "%s"\n' % os.path.join(tmpdir, 'emptysrc'))
if not mirrors:
# We do not need PREMIRRORS since we are almost certainly
# fetching new source rather than something that has already
# been fetched. Hence, we disable them by default.
# However, we provide an option for users to enable it.
f.write('PREMIRRORS = ""\n')
f.write('MIRRORS = ""\n')
logger.info('Fetching %s...' % srcuri)
# FIXME this is too noisy at the moment
# Parse recipes so our new recipe gets picked up
tinfoil.parse_recipes()
def eventhandler(event):
if isinstance(event, bb.fetch2.MissingChecksumEvent):
checksums.update(event.checksums)
return True
return False
# Run the fetch + unpack tasks
res = tinfoil.build_targets(fetchrecipepn,
'do_unpack',
handle_events=True,
extra_events=['bb.fetch2.MissingChecksumEvent'],
event_callback=eventhandler)
if not res:
raise FetchUrlFailure(srcuri)
# Remove unneeded directories
rd = tinfoil.parse_recipe(fetchrecipepn)
if rd:
pathvars = ['T', 'RECIPE_SYSROOT', 'RECIPE_SYSROOT_NATIVE']
for pathvar in pathvars:
path = rd.getVar(pathvar)
if os.path.exists(path):
shutil.rmtree(path)
finally:
if fetchrecipe:
try:
os.remove(fetchrecipe)
except FileNotFoundError:
pass
try:
os.rmdir(fetchrecipedir)
except OSError as e:
import errno
if e.errno != errno.ENOTEMPTY:
raise
bb.utils.mkdirhier(destdir)
for fn in os.listdir(tmpworkdir):
shutil.move(os.path.join(tmpworkdir, fn), destdir)
finally:
if not preserve_tmp:
shutil.rmtree(tmpdir)
tmpdir = None
return checksums, tmpdir
def run_editor(fn, logger=None):
if isinstance(fn, str):
files = [fn]
else:
files = fn
editor = os.getenv('VISUAL', os.getenv('EDITOR', 'vi'))
try:
#print(shlex.split(editor) + files)
return subprocess.check_call(shlex.split(editor) + files)
except subprocess.CalledProcessError as exc:
logger.error("Execution of '%s' failed: %s" % (editor, exc))
return 1
def is_src_url(param):
"""
Check if a parameter is a URL and return True if so
NOTE: be careful about changing this as it will influence how devtool/recipetool command line handling works
"""
if not param:
return False
elif '://' in param:
return True
elif param.startswith('git@') or ('@' in param and param.endswith('.git')):
return True
return False
def filter_src_subdirs(pth):
"""
Filter out subdirectories of initial unpacked source trees that we do not care about.
Used by devtool and recipetool.
"""
dirlist = os.listdir(pth)
filterout = ['git.indirectionsymlink', 'source-date-epoch', 'sstate-install-recipe_qa']
dirlist = [x for x in dirlist if x not in filterout]
return dirlist

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python3
#
# Copyright (c) 2007 Red Hat, Inc.
# Copyright (c) 2011 Intel, Inc.
#
# SPDX-License-Identifier: GPL-2.0-only
#
class WicError(Exception):
pass

View File

@@ -0,0 +1,3 @@
# This file is included into 3 canned wks files from this directory
part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
part / --source rootfs --use-uuid --fstype=ext4 --label platform --align 1024

View File

@@ -0,0 +1,27 @@
# This is an example configuration file for syslinux.
TIMEOUT 50
ALLOWOPTIONS 1
SERIAL 0 115200
PROMPT 0
UI vesamenu.c32
menu title Select boot options
menu tabmsg Press [Tab] to edit, [Return] to select
DEFAULT Graphics console boot
LABEL Graphics console boot
KERNEL /vmlinuz
APPEND label=boot rootwait
LABEL Serial console boot
KERNEL /vmlinuz
APPEND label=boot rootwait console=ttyS0,115200
LABEL Graphics console install
KERNEL /vmlinuz
APPEND label=install rootwait
LABEL Serial console install
KERNEL /vmlinuz
APPEND label=install rootwait console=ttyS0,115200

View File

@@ -0,0 +1,8 @@
# short-description: Create a 'pcbios' direct disk image with custom bootloader config
# long-description: Creates a partitioned legacy BIOS disk image that the user
# can directly dd to boot media. The bootloader configuration source is a user file.
include common.wks.inc
bootloader --configfile="directdisk-bootloader-config.cfg"

View File

@@ -0,0 +1,10 @@
# short-description: Create a 'pcbios' direct disk image
# long-description: Creates a partitioned legacy BIOS disk image that the user
# can directly dd to boot media.
part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
bootloader --ptable gpt --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0 console=ttyS0,115200n8"

View File

@@ -0,0 +1,23 @@
# short-description: Create multi rootfs image using rootfs plugin
# long-description: Creates a partitioned disk image with two rootfs partitions
# using rootfs plugin.
#
# Partitions can use either
# - indirect rootfs references to image recipe(s):
# wic create directdisk-multi-indirect-recipes -e core-image-minimal \
# --rootfs-dir rootfs1=core-image-minimal
# --rootfs-dir rootfs2=core-image-minimal-dev
#
# - or paths to rootfs directories:
# wic create directdisk-multi-rootfs \
# --rootfs-dir rootfs1=tmp/work/qemux86_64-poky-linux/core-image-minimal/1.0-r0/rootfs/
# --rootfs-dir rootfs2=tmp/work/qemux86_64-poky-linux/core-image-minimal-dev/1.0-r0/rootfs/
#
# - or any combinations of -r and --rootfs command line options
part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
part / --source rootfs --rootfs-dir=rootfs1 --ondisk sda --fstype=ext4 --label platform --align 1024
part /rescue --source rootfs --rootfs-dir=rootfs2 --ondisk sda --fstype=ext4 --label secondary --align 1024
bootloader --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0 console=ttyS0,115200n8"

View File

@@ -0,0 +1,8 @@
# short-description: Create a 'pcbios' direct disk image
# long-description: Creates a partitioned legacy BIOS disk image that the user
# can directly dd to boot media.
include common.wks.inc
bootloader --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0 console=ttyS0,115200n8"

View File

@@ -0,0 +1,3 @@
bootloader --ptable gpt
part /boot --source rootfs --rootfs-dir=${IMAGE_ROOTFS}/boot --fstype=vfat --label boot --active --align 1024 --use-uuid --overhead-factor 1.1
part / --source rootfs --fstype=ext4 --label root --align 1024 --exclude-path boot/

View File

@@ -0,0 +1,11 @@
# short-description: Create an EFI disk image
# long-description: Creates a partitioned EFI disk image that the user
# can directly dd to boot media.
part /boot --source bootimg-efi --sourceparams="loader=grub-efi" --ondisk sda --label msdos --active --align 1024
part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
part swap --ondisk sda --size 44 --label swap1 --fstype=swap
bootloader --ptable gpt --timeout=5 --append="rootfstype=ext4 console=ttyS0,115200 console=tty0"

View File

@@ -0,0 +1,7 @@
# short-description: Create a hybrid ISO image
# long-description: Creates an EFI and legacy bootable hybrid ISO image
# which can be used on optical media as well as USB media.
part /boot --source isoimage-isohybrid --sourceparams="loader=grub-efi,image_name=HYBRID_ISO_IMG" --ondisk cd --label HYBRIDISO
bootloader --timeout=15 --append=""

View File

@@ -0,0 +1,3 @@
# short-description: Create qcow2 image for LoongArch QEMU machines
part / --source rootfs --fstype=ext4 --label root --align 4096 --size 5G

View File

@@ -0,0 +1,3 @@
# short-description: Create qcow2 image for RISC-V QEMU machines
part / --source rootfs --fstype=ext4 --label root --align 4096 --size 5G

View File

@@ -0,0 +1,8 @@
# short-description: Create a qemu machine 'pcbios' direct disk image
# long-description: Creates a partitioned legacy BIOS disk image that the user
# can directly use to boot a qemu machine.
include common.wks.inc
bootloader --timeout=0 --append="rw oprofile.timer=1 rootfstype=ext4 console=tty console=ttyS0 "

View File

@@ -0,0 +1,6 @@
# short-description: Create SD card image with a boot partition
# long-description: Creates a partitioned SD card image. Boot files
# are located in the first vfat partition.
part /boot --source bootimg-partition --ondisk mmcblk0 --fstype=vfat --label boot --active --align 4 --size 16
part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --label root --align 4

View File

@@ -0,0 +1,11 @@
# short-description: Create an EFI disk image with systemd-boot
# long-description: Creates a partitioned EFI disk image that the user
# can directly dd to boot media. The selected bootloader is systemd-boot.
part /boot --source bootimg-efi --sourceparams="loader=systemd-boot" --ondisk sda --label msdos --active --align 1024 --use-uuid
part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
part swap --ondisk sda --size 44 --label swap1 --fstype=swap --use-uuid
bootloader --ptable gpt --timeout=5 --append="rootwait rootfstype=ext4 console=ttyS0,115200 console=tty0"

View File

@@ -0,0 +1,628 @@
#
# Copyright (c) 2013, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module implements the image creation engine used by 'wic' to
# create images. The engine parses through the OpenEmbedded kickstart
# (wks) file specified and generates images that can then be directly
# written onto media.
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
import logging
import os
import tempfile
import json
import subprocess
import shutil
import re
from collections import namedtuple, OrderedDict
from wic import WicError
from wic.filemap import sparse_copy
from wic.pluginbase import PluginMgr
from wic.misc import get_bitbake_var, exec_cmd
logger = logging.getLogger('wic')
def verify_build_env():
"""
Verify that the build environment is sane.
Returns True if it is, false otherwise
"""
if not os.environ.get("BUILDDIR"):
raise WicError("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
return True
CANNED_IMAGE_DIR = "lib/wic/canned-wks" # relative to scripts
SCRIPTS_CANNED_IMAGE_DIR = "scripts/" + CANNED_IMAGE_DIR
WIC_DIR = "wic"
def build_canned_image_list(path):
layers_path = get_bitbake_var("BBLAYERS")
canned_wks_layer_dirs = []
if layers_path is not None:
for layer_path in layers_path.split():
for wks_path in (WIC_DIR, SCRIPTS_CANNED_IMAGE_DIR):
cpath = os.path.join(layer_path, wks_path)
if os.path.isdir(cpath):
canned_wks_layer_dirs.append(cpath)
cpath = os.path.join(path, CANNED_IMAGE_DIR)
canned_wks_layer_dirs.append(cpath)
return canned_wks_layer_dirs
def find_canned_image(scripts_path, wks_file):
"""
Find a .wks file with the given name in the canned files dir.
Return False if not found
"""
layers_canned_wks_dir = build_canned_image_list(scripts_path)
for canned_wks_dir in layers_canned_wks_dir:
for root, dirs, files in os.walk(canned_wks_dir):
for fname in files:
if fname.endswith("~") or fname.endswith("#"):
continue
if ((fname.endswith(".wks") and wks_file + ".wks" == fname) or \
(fname.endswith(".wks.in") and wks_file + ".wks.in" == fname)):
fullpath = os.path.join(canned_wks_dir, fname)
return fullpath
return None
def list_canned_images(scripts_path):
"""
List the .wks files in the canned image dir, minus the extension.
"""
layers_canned_wks_dir = build_canned_image_list(scripts_path)
for canned_wks_dir in layers_canned_wks_dir:
for root, dirs, files in os.walk(canned_wks_dir):
for fname in files:
if fname.endswith("~") or fname.endswith("#"):
continue
if fname.endswith(".wks") or fname.endswith(".wks.in"):
fullpath = os.path.join(canned_wks_dir, fname)
with open(fullpath) as wks:
for line in wks:
desc = ""
idx = line.find("short-description:")
if idx != -1:
desc = line[idx + len("short-description:"):].strip()
break
basename = fname.split('.')[0]
print(" %s\t\t%s" % (basename.ljust(30), desc))
def list_canned_image_help(scripts_path, fullpath):
"""
List the help and params in the specified canned image.
"""
found = False
with open(fullpath) as wks:
for line in wks:
if not found:
idx = line.find("long-description:")
if idx != -1:
print()
print(line[idx + len("long-description:"):].strip())
found = True
continue
if not line.strip():
break
idx = line.find("#")
if idx != -1:
print(line[idx + len("#:"):].rstrip())
else:
break
def list_source_plugins():
"""
List the available source plugins i.e. plugins available for --source.
"""
plugins = PluginMgr.get_plugins('source')
for plugin in plugins:
print(" %s" % plugin)
def wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
native_sysroot, options):
"""
Create image
wks_file - user-defined OE kickstart file
rootfs_dir - absolute path to the build's /rootfs dir
bootimg_dir - absolute path to the build's boot artifacts directory
kernel_dir - absolute path to the build's kernel directory
native_sysroot - absolute path to the build's native sysroots dir
image_output_dir - dirname to create for image
options - wic command line options (debug, bmap, etc)
Normally, the values for the build artifacts values are determined
by 'wic -e' from the output of the 'bitbake -e' command given an
image name e.g. 'core-image-minimal' and a given machine set in
local.conf. If that's the case, the variables get the following
values from the output of 'bitbake -e':
rootfs_dir: IMAGE_ROOTFS
kernel_dir: DEPLOY_DIR_IMAGE
native_sysroot: STAGING_DIR_NATIVE
In the above case, bootimg_dir remains unset and the
plugin-specific image creation code is responsible for finding the
bootimg artifacts.
In the case where the values are passed in explicitly i.e 'wic -e'
is not used but rather the individual 'wic' options are used to
explicitly specify these values.
"""
try:
oe_builddir = os.environ["BUILDDIR"]
except KeyError:
raise WicError("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
if not os.path.exists(options.outdir):
os.makedirs(options.outdir)
pname = options.imager
plugin_class = PluginMgr.get_plugins('imager').get(pname)
if not plugin_class:
raise WicError('Unknown plugin: %s' % pname)
plugin = plugin_class(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
native_sysroot, oe_builddir, options)
plugin.do_create()
logger.info("The image(s) were created using OE kickstart file:\n %s", wks_file)
def wic_list(args, scripts_path):
"""
Print the list of images or source plugins.
"""
if args.list_type is None:
return False
if args.list_type == "images":
list_canned_images(scripts_path)
return True
elif args.list_type == "source-plugins":
list_source_plugins()
return True
elif len(args.help_for) == 1 and args.help_for[0] == 'help':
wks_file = args.list_type
fullpath = find_canned_image(scripts_path, wks_file)
if not fullpath:
raise WicError("No image named %s found, exiting. "
"(Use 'wic list images' to list available images, "
"or specify a fully-qualified OE kickstart (.wks) "
"filename)" % wks_file)
list_canned_image_help(scripts_path, fullpath)
return True
return False
class Disk:
def __init__(self, imagepath, native_sysroot, fstypes=('fat', 'ext')):
self.imagepath = imagepath
self.native_sysroot = native_sysroot
self.fstypes = fstypes
self._partitions = None
self._partimages = {}
self._lsector_size = None
self._psector_size = None
self._ptable_format = None
# find parted
# read paths from $PATH environment variable
# if it fails, use hardcoded paths
pathlist = "/bin:/usr/bin:/usr/sbin:/sbin/"
try:
self.paths = os.environ['PATH'] + ":" + pathlist
except KeyError:
self.paths = pathlist
if native_sysroot:
for path in pathlist.split(':'):
self.paths = "%s%s:%s" % (native_sysroot, path, self.paths)
self.parted = shutil.which("parted", path=self.paths)
if not self.parted:
raise WicError("Can't find executable parted")
self.partitions = self.get_partitions()
def __del__(self):
for path in self._partimages.values():
os.unlink(path)
def get_partitions(self):
if self._partitions is None:
self._partitions = OrderedDict()
out = exec_cmd("%s -sm %s unit B print" % (self.parted, self.imagepath))
parttype = namedtuple("Part", "pnum start end size fstype")
splitted = out.splitlines()
# skip over possible errors in exec_cmd output
try:
idx =splitted.index("BYT;")
except ValueError:
raise WicError("Error getting partition information from %s" % (self.parted))
lsector_size, psector_size, self._ptable_format = splitted[idx + 1].split(":")[3:6]
self._lsector_size = int(lsector_size)
self._psector_size = int(psector_size)
for line in splitted[idx + 2:]:
pnum, start, end, size, fstype = line.split(':')[:5]
partition = parttype(int(pnum), int(start[:-1]), int(end[:-1]),
int(size[:-1]), fstype)
self._partitions[pnum] = partition
return self._partitions
def __getattr__(self, name):
"""Get path to the executable in a lazy way."""
if name in ("mdir", "mcopy", "mdel", "mdeltree", "sfdisk", "e2fsck",
"resize2fs", "mkswap", "mkdosfs", "debugfs","blkid"):
aname = "_%s" % name
if aname not in self.__dict__:
setattr(self, aname, shutil.which(name, path=self.paths))
if aname not in self.__dict__ or self.__dict__[aname] is None:
raise WicError("Can't find executable '{}'".format(name))
return self.__dict__[aname]
return self.__dict__[name]
def _get_part_image(self, pnum):
if pnum not in self.partitions:
raise WicError("Partition %s is not in the image" % pnum)
part = self.partitions[pnum]
# check if fstype is supported
for fstype in self.fstypes:
if part.fstype.startswith(fstype):
break
else:
raise WicError("Not supported fstype: {}".format(part.fstype))
if pnum not in self._partimages:
tmpf = tempfile.NamedTemporaryFile(prefix="wic-part")
dst_fname = tmpf.name
tmpf.close()
sparse_copy(self.imagepath, dst_fname, skip=part.start, length=part.size)
self._partimages[pnum] = dst_fname
return self._partimages[pnum]
def _put_part_image(self, pnum):
"""Put partition image into partitioned image."""
sparse_copy(self._partimages[pnum], self.imagepath,
seek=self.partitions[pnum].start)
def dir(self, pnum, path):
if pnum not in self.partitions:
raise WicError("Partition %s is not in the image" % pnum)
if self.partitions[pnum].fstype.startswith('ext'):
return exec_cmd("{} {} -R 'ls -l {}'".format(self.debugfs,
self._get_part_image(pnum),
path), as_shell=True)
else: # fat
return exec_cmd("{} -i {} ::{}".format(self.mdir,
self._get_part_image(pnum),
path))
def copy(self, src, dest):
"""Copy partition image into wic image."""
pnum = dest.part if isinstance(src, str) else src.part
if self.partitions[pnum].fstype.startswith('ext'):
if isinstance(src, str):
cmd = "printf 'cd {}\nwrite {} {}\n' | {} -w {}".\
format(os.path.dirname(dest.path), src, os.path.basename(src),
self.debugfs, self._get_part_image(pnum))
else: # copy from wic
# run both dump and rdump to support both files and directory
cmd = "printf 'cd {}\ndump /{} {}\nrdump /{} {}\n' | {} {}".\
format(os.path.dirname(src.path), src.path,
dest, src.path, dest, self.debugfs,
self._get_part_image(pnum))
else: # fat
if isinstance(src, str):
cmd = "{} -i {} -snop {} ::{}".format(self.mcopy,
self._get_part_image(pnum),
src, dest.path)
else:
cmd = "{} -i {} -snop ::{} {}".format(self.mcopy,
self._get_part_image(pnum),
src.path, dest)
exec_cmd(cmd, as_shell=True)
self._put_part_image(pnum)
def remove_ext(self, pnum, path, recursive):
"""
Remove files/dirs and their contents from the partition.
This only applies to ext* partition.
"""
abs_path = re.sub(r'\/\/+', '/', path)
cmd = "{} {} -wR 'rm \"{}\"'".format(self.debugfs,
self._get_part_image(pnum),
abs_path)
out = exec_cmd(cmd , as_shell=True)
for line in out.splitlines():
if line.startswith("rm:"):
if "file is a directory" in line:
if recursive:
# loop through content and delete them one by one if
# flaged with -r
subdirs = iter(self.dir(pnum, abs_path).splitlines())
next(subdirs)
for subdir in subdirs:
dir = subdir.split(':')[1].split(" ", 1)[1]
if not dir == "." and not dir == "..":
self.remove_ext(pnum, "%s/%s" % (abs_path, dir), recursive)
rmdir_out = exec_cmd("{} {} -wR 'rmdir \"{}\"'".format(self.debugfs,
self._get_part_image(pnum),
abs_path.rstrip('/'))
, as_shell=True)
for rmdir_line in rmdir_out.splitlines():
if "directory not empty" in rmdir_line:
raise WicError("Could not complete operation: \n%s \n"
"use -r to remove non-empty directory" % rmdir_line)
if rmdir_line.startswith("rmdir:"):
raise WicError("Could not complete operation: \n%s "
"\n%s" % (str(line), rmdir_line))
else:
raise WicError("Could not complete operation: \n%s "
"\nUnable to remove %s" % (str(line), abs_path))
def remove(self, pnum, path, recursive):
"""Remove files/dirs from the partition."""
partimg = self._get_part_image(pnum)
if self.partitions[pnum].fstype.startswith('ext'):
self.remove_ext(pnum, path, recursive)
else: # fat
cmd = "{} -i {} ::{}".format(self.mdel, partimg, path)
try:
exec_cmd(cmd)
except WicError as err:
if "not found" in str(err) or "non empty" in str(err):
# mdel outputs 'File ... not found' or 'directory .. non empty"
# try to use mdeltree as path could be a directory
cmd = "{} -i {} ::{}".format(self.mdeltree,
partimg, path)
exec_cmd(cmd)
else:
raise err
self._put_part_image(pnum)
def write(self, target, expand):
"""Write disk image to the media or file."""
def write_sfdisk_script(outf, parts):
for key, val in parts['partitiontable'].items():
if key in ("partitions", "device", "firstlba", "lastlba"):
continue
if key == "id":
key = "label-id"
outf.write("{}: {}\n".format(key, val))
outf.write("\n")
for part in parts['partitiontable']['partitions']:
line = ''
for name in ('attrs', 'name', 'size', 'type', 'uuid'):
if name == 'size' and part['type'] == 'f':
# don't write size for extended partition
continue
val = part.get(name)
if val:
line += '{}={}, '.format(name, val)
if line:
line = line[:-2] # strip ', '
if part.get('bootable'):
line += ' ,bootable'
outf.write("{}\n".format(line))
outf.flush()
def read_ptable(path):
out = exec_cmd("{} -J {}".format(self.sfdisk, path))
return json.loads(out)
def write_ptable(parts, target):
with tempfile.NamedTemporaryFile(prefix="wic-sfdisk-", mode='w') as outf:
write_sfdisk_script(outf, parts)
cmd = "{} --no-reread {} < {} ".format(self.sfdisk, target, outf.name)
exec_cmd(cmd, as_shell=True)
if expand is None:
sparse_copy(self.imagepath, target)
else:
# copy first sectors that may contain bootloader
sparse_copy(self.imagepath, target, length=2048 * self._lsector_size)
# copy source partition table to the target
parts = read_ptable(self.imagepath)
write_ptable(parts, target)
# get size of unpartitioned space
free = None
for line in exec_cmd("{} -F {}".format(self.sfdisk, target)).splitlines():
if line.startswith("Unpartitioned space ") and line.endswith("sectors"):
free = int(line.split()[-2])
# Align free space to a 2048 sector boundary. YOCTO #12840.
free = free - (free % 2048)
if free is None:
raise WicError("Can't get size of unpartitioned space")
# calculate expanded partitions sizes
sizes = {}
num_auto_resize = 0
for num, part in enumerate(parts['partitiontable']['partitions'], 1):
if num in expand:
if expand[num] != 0: # don't resize partition if size is set to 0
sectors = expand[num] // self._lsector_size
free -= sectors - part['size']
part['size'] = sectors
sizes[num] = sectors
elif part['type'] != 'f':
sizes[num] = -1
num_auto_resize += 1
for num, part in enumerate(parts['partitiontable']['partitions'], 1):
if sizes.get(num) == -1:
part['size'] += free // num_auto_resize
# write resized partition table to the target
write_ptable(parts, target)
# read resized partition table
parts = read_ptable(target)
# copy partitions content
for num, part in enumerate(parts['partitiontable']['partitions'], 1):
pnum = str(num)
fstype = self.partitions[pnum].fstype
# copy unchanged partition
if part['size'] == self.partitions[pnum].size // self._lsector_size:
logger.info("copying unchanged partition {}".format(pnum))
sparse_copy(self._get_part_image(pnum), target, seek=part['start'] * self._lsector_size)
continue
# resize or re-create partitions
if fstype.startswith('ext') or fstype.startswith('fat') or \
fstype.startswith('linux-swap'):
partfname = None
with tempfile.NamedTemporaryFile(prefix="wic-part{}-".format(pnum)) as partf:
partfname = partf.name
if fstype.startswith('ext'):
logger.info("resizing ext partition {}".format(pnum))
partimg = self._get_part_image(pnum)
sparse_copy(partimg, partfname)
exec_cmd("{} -pf {}".format(self.e2fsck, partfname))
exec_cmd("{} {} {}s".format(\
self.resize2fs, partfname, part['size']))
elif fstype.startswith('fat'):
logger.info("copying content of the fat partition {}".format(pnum))
with tempfile.TemporaryDirectory(prefix='wic-fatdir-') as tmpdir:
# copy content to the temporary directory
cmd = "{} -snompi {} :: {}".format(self.mcopy,
self._get_part_image(pnum),
tmpdir)
exec_cmd(cmd)
# create new msdos partition
label = part.get("name")
label_str = "-n {}".format(label) if label else ''
cmd = "{} {} -C {} {}".format(self.mkdosfs, label_str, partfname,
part['size'])
exec_cmd(cmd)
# copy content from the temporary directory to the new partition
cmd = "{} -snompi {} {}/* ::".format(self.mcopy, partfname, tmpdir)
exec_cmd(cmd, as_shell=True)
elif fstype.startswith('linux-swap'):
logger.info("creating swap partition {}".format(pnum))
label = part.get("name")
label_str = "-L {}".format(label) if label else ''
out = exec_cmd("{} --probe {}".format(self.blkid, self._get_part_image(pnum)))
uuid = out[out.index("UUID=\"")+6:out.index("UUID=\"")+42]
uuid_str = "-U {}".format(uuid) if uuid else ''
with open(partfname, 'w') as sparse:
os.ftruncate(sparse.fileno(), part['size'] * self._lsector_size)
exec_cmd("{} {} {} {}".format(self.mkswap, label_str, uuid_str, partfname))
sparse_copy(partfname, target, seek=part['start'] * self._lsector_size)
os.unlink(partfname)
elif part['type'] != 'f':
logger.warning("skipping partition {}: unsupported fstype {}".format(pnum, fstype))
def wic_ls(args, native_sysroot):
"""List contents of partitioned image or vfat partition."""
disk = Disk(args.path.image, native_sysroot)
if not args.path.part:
if disk.partitions:
print('Num Start End Size Fstype')
for part in disk.partitions.values():
print("{:2d} {:12d} {:12d} {:12d} {}".format(\
part.pnum, part.start, part.end,
part.size, part.fstype))
else:
path = args.path.path or '/'
print(disk.dir(args.path.part, path))
def wic_cp(args, native_sysroot):
"""
Copy file or directory to/from the vfat/ext partition of
partitioned image.
"""
if isinstance(args.dest, str):
disk = Disk(args.src.image, native_sysroot)
else:
disk = Disk(args.dest.image, native_sysroot)
disk.copy(args.src, args.dest)
def wic_rm(args, native_sysroot):
"""
Remove files or directories from the vfat partition of
partitioned image.
"""
disk = Disk(args.path.image, native_sysroot)
disk.remove(args.path.part, args.path.path, args.recursive_delete)
def wic_write(args, native_sysroot):
"""
Write image to a target device.
"""
disk = Disk(args.image, native_sysroot, ('fat', 'ext', 'linux-swap'))
disk.write(args.target, args.expand)
def find_canned(scripts_path, file_name):
"""
Find a file either by its path or by name in the canned files dir.
Return None if not found
"""
if os.path.exists(file_name):
return file_name
layers_canned_wks_dir = build_canned_image_list(scripts_path)
for canned_wks_dir in layers_canned_wks_dir:
for root, dirs, files in os.walk(canned_wks_dir):
for fname in files:
if fname == file_name:
fullpath = os.path.join(canned_wks_dir, fname)
return fullpath
def get_custom_config(boot_file):
"""
Get the custom configuration to be used for the bootloader.
Return None if the file can't be found.
"""
# Get the scripts path of poky
scripts_path = os.path.abspath("%s/../.." % os.path.dirname(__file__))
cfg_file = find_canned(scripts_path, boot_file)
if cfg_file:
with open(cfg_file, "r") as f:
config = f.read()
return config

View File

@@ -0,0 +1,583 @@
#
# Copyright (c) 2012 Intel, Inc.
#
# SPDX-License-Identifier: GPL-2.0-only
#
"""
This module implements python implements a way to get file block. Two methods
are supported - the FIEMAP ioctl and the 'SEEK_HOLE / SEEK_DATA' features of
the file seek syscall. The former is implemented by the 'FilemapFiemap' class,
the latter is implemented by the 'FilemapSeek' class. Both classes provide the
same API. The 'filemap' function automatically selects which class can be used
and returns an instance of the class.
"""
# Disable the following pylint recommendations:
# * Too many instance attributes (R0902)
# pylint: disable=R0902
import errno
import os
import struct
import array
import fcntl
import tempfile
import logging
def get_block_size(file_obj):
"""
Returns block size for file object 'file_obj'. Errors are indicated by the
'IOError' exception.
"""
# Get the block size of the host file-system for the image file by calling
# the FIGETBSZ ioctl (number 2).
try:
binary_data = fcntl.ioctl(file_obj, 2, struct.pack('I', 0))
bsize = struct.unpack('I', binary_data)[0]
except OSError:
bsize = None
# If ioctl causes OSError or give bsize to zero failback to os.fstat
if not bsize:
import os
stat = os.fstat(file_obj.fileno())
if hasattr(stat, 'st_blksize'):
bsize = stat.st_blksize
else:
raise IOError("Unable to determine block size")
# The logic in this script only supports a maximum of a 4KB
# block size
max_block_size = 4 * 1024
if bsize > max_block_size:
bsize = max_block_size
return bsize
class ErrorNotSupp(Exception):
"""
An exception of this type is raised when the 'FIEMAP' or 'SEEK_HOLE' feature
is not supported either by the kernel or the file-system.
"""
pass
class Error(Exception):
"""A class for all the other exceptions raised by this module."""
pass
class _FilemapBase(object):
"""
This is a base class for a couple of other classes in this module. This
class simply performs the common parts of the initialization process: opens
the image file, gets its size, etc. The 'log' parameter is the logger object
to use for printing messages.
"""
def __init__(self, image, log=None):
"""
Initialize a class instance. The 'image' argument is full path to the
file or file object to operate on.
"""
self._log = log
if self._log is None:
self._log = logging.getLogger(__name__)
self._f_image_needs_close = False
if hasattr(image, "fileno"):
self._f_image = image
self._image_path = image.name
else:
self._image_path = image
self._open_image_file()
try:
self.image_size = os.fstat(self._f_image.fileno()).st_size
except IOError as err:
raise Error("cannot get information about file '%s': %s"
% (self._f_image.name, err))
try:
self.block_size = get_block_size(self._f_image)
except IOError as err:
raise Error("cannot get block size for '%s': %s"
% (self._image_path, err))
self.blocks_cnt = self.image_size + self.block_size - 1
self.blocks_cnt //= self.block_size
try:
self._f_image.flush()
except IOError as err:
raise Error("cannot flush image file '%s': %s"
% (self._image_path, err))
try:
os.fsync(self._f_image.fileno()),
except OSError as err:
raise Error("cannot synchronize image file '%s': %s "
% (self._image_path, err.strerror))
self._log.debug("opened image \"%s\"" % self._image_path)
self._log.debug("block size %d, blocks count %d, image size %d"
% (self.block_size, self.blocks_cnt, self.image_size))
def __del__(self):
"""The class destructor which just closes the image file."""
if self._f_image_needs_close:
self._f_image.close()
def _open_image_file(self):
"""Open the image file."""
try:
self._f_image = open(self._image_path, 'rb')
except IOError as err:
raise Error("cannot open image file '%s': %s"
% (self._image_path, err))
self._f_image_needs_close = True
def block_is_mapped(self, block): # pylint: disable=W0613,R0201
"""
This method has has to be implemented by child classes. It returns
'True' if block number 'block' of the image file is mapped and 'False'
otherwise.
"""
raise Error("the method is not implemented")
def get_mapped_ranges(self, start, count): # pylint: disable=W0613,R0201
"""
This method has has to be implemented by child classes. This is a
generator which yields ranges of mapped blocks in the file. The ranges
are tuples of 2 elements: [first, last], where 'first' is the first
mapped block and 'last' is the last mapped block.
The ranges are yielded for the area of the file of size 'count' blocks,
starting from block 'start'.
"""
raise Error("the method is not implemented")
# The 'SEEK_HOLE' and 'SEEK_DATA' options of the file seek system call
_SEEK_DATA = 3
_SEEK_HOLE = 4
def _lseek(file_obj, offset, whence):
"""This is a helper function which invokes 'os.lseek' for file object
'file_obj' and with specified 'offset' and 'whence'. The 'whence'
argument is supposed to be either '_SEEK_DATA' or '_SEEK_HOLE'. When
there is no more data or hole starting from 'offset', this function
returns '-1'. Otherwise the data or hole position is returned."""
try:
return os.lseek(file_obj.fileno(), offset, whence)
except OSError as err:
# The 'lseek' system call returns the ENXIO if there is no data or
# hole starting from the specified offset.
if err.errno == errno.ENXIO:
return -1
elif err.errno == errno.EINVAL:
raise ErrorNotSupp("the kernel or file-system does not support "
"\"SEEK_HOLE\" and \"SEEK_DATA\"")
else:
raise
class FilemapSeek(_FilemapBase):
"""
This class uses the 'SEEK_HOLE' and 'SEEK_DATA' to find file block mapping.
Unfortunately, the current implementation requires the caller to have write
access to the image file.
"""
def __init__(self, image, log=None):
"""Refer the '_FilemapBase' class for the documentation."""
# Call the base class constructor first
_FilemapBase.__init__(self, image, log)
self._log.debug("FilemapSeek: initializing")
self._probe_seek_hole()
def _probe_seek_hole(self):
"""
Check whether the system implements 'SEEK_HOLE' and 'SEEK_DATA'.
Unfortunately, there seems to be no clean way for detecting this,
because often the system just fakes them by just assuming that all
files are fully mapped, so 'SEEK_HOLE' always returns EOF and
'SEEK_DATA' always returns the requested offset.
I could not invent a better way of detecting the fake 'SEEK_HOLE'
implementation than just to create a temporary file in the same
directory where the image file resides. It would be nice to change this
to something better.
"""
directory = os.path.dirname(self._image_path)
try:
tmp_obj = tempfile.TemporaryFile("w+", dir=directory)
except IOError as err:
raise ErrorNotSupp("cannot create a temporary in \"%s\": %s" \
% (directory, err))
try:
os.ftruncate(tmp_obj.fileno(), self.block_size)
except OSError as err:
raise ErrorNotSupp("cannot truncate temporary file in \"%s\": %s"
% (directory, err))
offs = _lseek(tmp_obj, 0, _SEEK_HOLE)
if offs != 0:
# We are dealing with the stub 'SEEK_HOLE' implementation which
# always returns EOF.
self._log.debug("lseek(0, SEEK_HOLE) returned %d" % offs)
raise ErrorNotSupp("the file-system does not support "
"\"SEEK_HOLE\" and \"SEEK_DATA\" but only "
"provides a stub implementation")
tmp_obj.close()
def block_is_mapped(self, block):
"""Refer the '_FilemapBase' class for the documentation."""
offs = _lseek(self._f_image, block * self.block_size, _SEEK_DATA)
if offs == -1:
result = False
else:
result = (offs // self.block_size == block)
self._log.debug("FilemapSeek: block_is_mapped(%d) returns %s"
% (block, result))
return result
def _get_ranges(self, start, count, whence1, whence2):
"""
This function implements 'get_mapped_ranges()' depending
on what is passed in the 'whence1' and 'whence2' arguments.
"""
assert whence1 != whence2
end = start * self.block_size
limit = end + count * self.block_size
while True:
start = _lseek(self._f_image, end, whence1)
if start == -1 or start >= limit or start == self.image_size:
break
end = _lseek(self._f_image, start, whence2)
if end == -1 or end == self.image_size:
end = self.blocks_cnt * self.block_size
if end > limit:
end = limit
start_blk = start // self.block_size
end_blk = end // self.block_size - 1
self._log.debug("FilemapSeek: yielding range (%d, %d)"
% (start_blk, end_blk))
yield (start_blk, end_blk)
def get_mapped_ranges(self, start, count):
"""Refer the '_FilemapBase' class for the documentation."""
self._log.debug("FilemapSeek: get_mapped_ranges(%d, %d(%d))"
% (start, count, start + count - 1))
return self._get_ranges(start, count, _SEEK_DATA, _SEEK_HOLE)
# Below goes the FIEMAP ioctl implementation, which is not very readable
# because it deals with the rather complex FIEMAP ioctl. To understand the
# code, you need to know the FIEMAP interface, which is documented in the
# "Documentation/filesystems/fiemap.txt" file in the Linux kernel sources.
# Format string for 'struct fiemap'
_FIEMAP_FORMAT = "=QQLLLL"
# sizeof(struct fiemap)
_FIEMAP_SIZE = struct.calcsize(_FIEMAP_FORMAT)
# Format string for 'struct fiemap_extent'
_FIEMAP_EXTENT_FORMAT = "=QQQQQLLLL"
# sizeof(struct fiemap_extent)
_FIEMAP_EXTENT_SIZE = struct.calcsize(_FIEMAP_EXTENT_FORMAT)
# The FIEMAP ioctl number
_FIEMAP_IOCTL = 0xC020660B
# This FIEMAP ioctl flag which instructs the kernel to sync the file before
# reading the block map
_FIEMAP_FLAG_SYNC = 0x00000001
# Size of the buffer for 'struct fiemap_extent' elements which will be used
# when invoking the FIEMAP ioctl. The larger is the buffer, the less times the
# FIEMAP ioctl will be invoked.
_FIEMAP_BUFFER_SIZE = 256 * 1024
class FilemapFiemap(_FilemapBase):
"""
This class provides API to the FIEMAP ioctl. Namely, it allows to iterate
over all mapped blocks and over all holes.
This class synchronizes the image file every time it invokes the FIEMAP
ioctl in order to work-around early FIEMAP implementation kernel bugs.
"""
def __init__(self, image, log=None):
"""
Initialize a class instance. The 'image' argument is full the file
object to operate on.
"""
# Call the base class constructor first
_FilemapBase.__init__(self, image, log)
self._log.debug("FilemapFiemap: initializing")
self._buf_size = _FIEMAP_BUFFER_SIZE
# Calculate how many 'struct fiemap_extent' elements fit the buffer
self._buf_size -= _FIEMAP_SIZE
self._fiemap_extent_cnt = self._buf_size // _FIEMAP_EXTENT_SIZE
assert self._fiemap_extent_cnt > 0
self._buf_size = self._fiemap_extent_cnt * _FIEMAP_EXTENT_SIZE
self._buf_size += _FIEMAP_SIZE
# Allocate a mutable buffer for the FIEMAP ioctl
self._buf = array.array('B', [0] * self._buf_size)
# Check if the FIEMAP ioctl is supported
self.block_is_mapped(0)
def _invoke_fiemap(self, block, count):
"""
Invoke the FIEMAP ioctl for 'count' blocks of the file starting from
block number 'block'.
The full result of the operation is stored in 'self._buf' on exit.
Returns the unpacked 'struct fiemap' data structure in form of a python
list (just like 'struct.upack()').
"""
if self.blocks_cnt != 0 and (block < 0 or block >= self.blocks_cnt):
raise Error("bad block number %d, should be within [0, %d]"
% (block, self.blocks_cnt))
# Initialize the 'struct fiemap' part of the buffer. We use the
# '_FIEMAP_FLAG_SYNC' flag in order to make sure the file is
# synchronized. The reason for this is that early FIEMAP
# implementations had many bugs related to cached dirty data, and
# synchronizing the file is a necessary work-around.
struct.pack_into(_FIEMAP_FORMAT, self._buf, 0, block * self.block_size,
count * self.block_size, _FIEMAP_FLAG_SYNC, 0,
self._fiemap_extent_cnt, 0)
try:
fcntl.ioctl(self._f_image, _FIEMAP_IOCTL, self._buf, 1)
except IOError as err:
# Note, the FIEMAP ioctl is supported by the Linux kernel starting
# from version 2.6.28 (year 2008).
if err.errno == errno.EOPNOTSUPP:
errstr = "FilemapFiemap: the FIEMAP ioctl is not supported " \
"by the file-system"
self._log.debug(errstr)
raise ErrorNotSupp(errstr)
if err.errno == errno.ENOTTY:
errstr = "FilemapFiemap: the FIEMAP ioctl is not supported " \
"by the kernel"
self._log.debug(errstr)
raise ErrorNotSupp(errstr)
raise Error("the FIEMAP ioctl failed for '%s': %s"
% (self._image_path, err))
return struct.unpack(_FIEMAP_FORMAT, self._buf[:_FIEMAP_SIZE])
def block_is_mapped(self, block):
"""Refer the '_FilemapBase' class for the documentation."""
struct_fiemap = self._invoke_fiemap(block, 1)
# The 3rd element of 'struct_fiemap' is the 'fm_mapped_extents' field.
# If it contains zero, the block is not mapped, otherwise it is
# mapped.
result = bool(struct_fiemap[3])
self._log.debug("FilemapFiemap: block_is_mapped(%d) returns %s"
% (block, result))
return result
def _unpack_fiemap_extent(self, index):
"""
Unpack a 'struct fiemap_extent' structure object number 'index' from
the internal 'self._buf' buffer.
"""
offset = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE * index
return struct.unpack(_FIEMAP_EXTENT_FORMAT,
self._buf[offset : offset + _FIEMAP_EXTENT_SIZE])
def _do_get_mapped_ranges(self, start, count):
"""
Implements most the functionality for the 'get_mapped_ranges()'
generator: invokes the FIEMAP ioctl, walks through the mapped extents
and yields mapped block ranges. However, the ranges may be consecutive
(e.g., (1, 100), (100, 200)) and 'get_mapped_ranges()' simply merges
them.
"""
block = start
while block < start + count:
struct_fiemap = self._invoke_fiemap(block, count)
mapped_extents = struct_fiemap[3]
if mapped_extents == 0:
# No more mapped blocks
return
extent = 0
while extent < mapped_extents:
fiemap_extent = self._unpack_fiemap_extent(extent)
# Start of the extent
extent_start = fiemap_extent[0]
# Starting block number of the extent
extent_block = extent_start // self.block_size
# Length of the extent
extent_len = fiemap_extent[2]
# Count of blocks in the extent
extent_count = extent_len // self.block_size
# Extent length and offset have to be block-aligned
assert extent_start % self.block_size == 0
assert extent_len % self.block_size == 0
if extent_block > start + count - 1:
return
first = max(extent_block, block)
last = min(extent_block + extent_count, start + count) - 1
yield (first, last)
extent += 1
block = extent_block + extent_count
def get_mapped_ranges(self, start, count):
"""Refer the '_FilemapBase' class for the documentation."""
self._log.debug("FilemapFiemap: get_mapped_ranges(%d, %d(%d))"
% (start, count, start + count - 1))
iterator = self._do_get_mapped_ranges(start, count)
first_prev, last_prev = next(iterator)
for first, last in iterator:
if last_prev == first - 1:
last_prev = last
else:
self._log.debug("FilemapFiemap: yielding range (%d, %d)"
% (first_prev, last_prev))
yield (first_prev, last_prev)
first_prev, last_prev = first, last
self._log.debug("FilemapFiemap: yielding range (%d, %d)"
% (first_prev, last_prev))
yield (first_prev, last_prev)
class FilemapNobmap(_FilemapBase):
"""
This class is used when both the 'SEEK_DATA/HOLE' and FIEMAP are not
supported by the filesystem or kernel.
"""
def __init__(self, image, log=None):
"""Refer the '_FilemapBase' class for the documentation."""
# Call the base class constructor first
_FilemapBase.__init__(self, image, log)
self._log.debug("FilemapNobmap: initializing")
def block_is_mapped(self, block):
"""Refer the '_FilemapBase' class for the documentation."""
return True
def get_mapped_ranges(self, start, count):
"""Refer the '_FilemapBase' class for the documentation."""
self._log.debug("FilemapNobmap: get_mapped_ranges(%d, %d(%d))"
% (start, count, start + count - 1))
yield (start, start + count -1)
def filemap(image, log=None):
"""
Create and return an instance of a Filemap class - 'FilemapFiemap' or
'FilemapSeek', depending on what the system we run on supports. If the
FIEMAP ioctl is supported, an instance of the 'FilemapFiemap' class is
returned. Otherwise, if 'SEEK_HOLE' is supported an instance of the
'FilemapSeek' class is returned. If none of these are supported, the
function generates an 'Error' type exception.
"""
try:
return FilemapFiemap(image, log)
except ErrorNotSupp:
try:
return FilemapSeek(image, log)
except ErrorNotSupp:
return FilemapNobmap(image, log)
def sparse_copy(src_fname, dst_fname, skip=0, seek=0,
length=0, api=None):
"""
Efficiently copy sparse file to or into another file.
src_fname: path to source file
dst_fname: path to destination file
skip: skip N bytes at thestart of src
seek: seek N bytes from the start of dst
length: read N bytes from src and write them to dst
api: FilemapFiemap or FilemapSeek object
"""
if not api:
api = filemap
fmap = api(src_fname)
try:
dst_file = open(dst_fname, 'r+b')
except IOError:
dst_file = open(dst_fname, 'wb')
if length:
dst_size = length + seek
else:
dst_size = os.path.getsize(src_fname) + seek - skip
dst_file.truncate(dst_size)
written = 0
for first, last in fmap.get_mapped_ranges(0, fmap.blocks_cnt):
start = first * fmap.block_size
end = (last + 1) * fmap.block_size
if skip >= end:
continue
if start < skip < end:
start = skip
fmap._f_image.seek(start, os.SEEK_SET)
written += start - skip - written
if length and written >= length:
dst_file.seek(seek + length, os.SEEK_SET)
dst_file.close()
return
dst_file.seek(seek + start - skip, os.SEEK_SET)
chunk_size = 1024 * 1024
to_read = end - start
read = 0
while read < to_read:
if read + chunk_size > to_read:
chunk_size = to_read - read
size = chunk_size
if length and written + size > length:
size = length - written
chunk = fmap._f_image.read(size)
dst_file.write(chunk)
read += size
written += size
if written == length:
dst_file.close()
return
dst_file.close()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,298 @@
#!/usr/bin/env python3
#
# Copyright (c) 2016 Intel, Inc.
#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides parser for kickstart format
#
# AUTHORS
# Ed Bartosh <ed.bartosh> (at] linux.intel.com>
"""Kickstart parser module."""
import os
import shlex
import logging
import re
from argparse import ArgumentParser, ArgumentError, ArgumentTypeError
from wic.engine import find_canned
from wic.partition import Partition
from wic.misc import get_bitbake_var
logger = logging.getLogger('wic')
__expand_var_regexp__ = re.compile(r"\${[^{}@\n\t :]+}")
def expand_line(line):
while True:
m = __expand_var_regexp__.search(line)
if not m:
return line
key = m.group()[2:-1]
val = get_bitbake_var(key)
if val is None:
logger.warning("cannot expand variable %s" % key)
return line
line = line[:m.start()] + val + line[m.end():]
class KickStartError(Exception):
"""Custom exception."""
pass
class KickStartParser(ArgumentParser):
"""
This class overwrites error method to throw exception
instead of producing usage message(default argparse behavior).
"""
def error(self, message):
raise ArgumentError(None, message)
def sizetype(default, size_in_bytes=False):
def f(arg):
"""
Custom type for ArgumentParser
Converts size string in <num>[S|s|K|k|M|G] format into the integer value
"""
try:
suffix = default
size = int(arg)
except ValueError:
try:
suffix = arg[-1:]
size = int(arg[:-1])
except ValueError:
raise ArgumentTypeError("Invalid size: %r" % arg)
if size_in_bytes:
if suffix == 's' or suffix == 'S':
return size * 512
mult = 1024
else:
mult = 1
if suffix == "k" or suffix == "K":
return size * mult
if suffix == "M":
return size * mult * 1024
if suffix == "G":
return size * mult * 1024 * 1024
raise ArgumentTypeError("Invalid size: %r" % arg)
return f
def overheadtype(arg):
"""
Custom type for ArgumentParser
Converts overhead string to float and checks if it's bigger than 1.0
"""
try:
result = float(arg)
except ValueError:
raise ArgumentTypeError("Invalid value: %r" % arg)
if result < 1.0:
raise ArgumentTypeError("Overhead factor should be > 1.0" % arg)
return result
def cannedpathtype(arg):
"""
Custom type for ArgumentParser
Tries to find file in the list of canned wks paths
"""
scripts_path = os.path.abspath(os.path.dirname(__file__) + '../../..')
result = find_canned(scripts_path, arg)
if not result:
raise ArgumentTypeError("file not found: %s" % arg)
return result
def systemidtype(arg):
"""
Custom type for ArgumentParser
Checks if the argument sutisfies system id requirements,
i.e. if it's one byte long integer > 0
"""
error = "Invalid system type: %s. must be hex "\
"between 0x1 and 0xFF" % arg
try:
result = int(arg, 16)
except ValueError:
raise ArgumentTypeError(error)
if result <= 0 or result > 0xff:
raise ArgumentTypeError(error)
return arg
class KickStart():
"""Kickstart parser implementation."""
DEFAULT_EXTRA_SPACE = 10*1024
DEFAULT_OVERHEAD_FACTOR = 1.3
def __init__(self, confpath):
self.partitions = []
self.bootloader = None
self.lineno = 0
self.partnum = 0
parser = KickStartParser()
subparsers = parser.add_subparsers()
part = subparsers.add_parser('part')
part.add_argument('mountpoint', nargs='?')
part.add_argument('--active', action='store_true')
part.add_argument('--align', type=int)
part.add_argument('--offset', type=sizetype("K", True))
part.add_argument('--exclude-path', nargs='+')
part.add_argument('--include-path', nargs='+', action='append')
part.add_argument('--change-directory')
part.add_argument("--extra-space", type=sizetype("M"))
part.add_argument('--fsoptions', dest='fsopts')
part.add_argument('--fspassno', dest='fspassno')
part.add_argument('--fstype', default='vfat',
choices=('ext2', 'ext3', 'ext4', 'btrfs',
'squashfs', 'vfat', 'msdos', 'erofs',
'swap', 'none'))
part.add_argument('--mkfs-extraopts', default='')
part.add_argument('--label')
part.add_argument('--use-label', action='store_true')
part.add_argument('--no-table', action='store_true')
part.add_argument('--ondisk', '--ondrive', dest='disk', default='sda')
part.add_argument("--overhead-factor", type=overheadtype)
part.add_argument('--part-name')
part.add_argument('--part-type')
part.add_argument('--rootfs-dir')
part.add_argument('--type', default='primary',
choices = ('primary', 'logical'))
part.add_argument('--hidden', action='store_true')
# --size and --fixed-size cannot be specified together; options
# ----extra-space and --overhead-factor should also raise a parser
# --error, but since nesting mutually exclusive groups does not work,
# ----extra-space/--overhead-factor are handled later
sizeexcl = part.add_mutually_exclusive_group()
sizeexcl.add_argument('--size', type=sizetype("M"), default=0)
sizeexcl.add_argument('--fixed-size', type=sizetype("M"), default=0)
part.add_argument('--source')
part.add_argument('--sourceparams')
part.add_argument('--system-id', type=systemidtype)
part.add_argument('--use-uuid', action='store_true')
part.add_argument('--uuid')
part.add_argument('--fsuuid')
part.add_argument('--no-fstab-update', action='store_true')
part.add_argument('--mbr', action='store_true')
bootloader = subparsers.add_parser('bootloader')
bootloader.add_argument('--append')
bootloader.add_argument('--configfile')
bootloader.add_argument('--ptable', choices=('msdos', 'gpt', 'gpt-hybrid'),
default='msdos')
bootloader.add_argument('--timeout', type=int)
bootloader.add_argument('--source')
include = subparsers.add_parser('include')
include.add_argument('path', type=cannedpathtype)
self._parse(parser, confpath)
if not self.bootloader:
logger.warning('bootloader config not specified, using defaults\n')
self.bootloader = bootloader.parse_args([])
def _parse(self, parser, confpath):
"""
Parse file in .wks format using provided parser.
"""
with open(confpath) as conf:
lineno = 0
for line in conf:
line = line.strip()
lineno += 1
if line and line[0] != '#':
line = expand_line(line)
try:
line_args = shlex.split(line)
parsed = parser.parse_args(line_args)
except ArgumentError as err:
raise KickStartError('%s:%d: %s' % \
(confpath, lineno, err))
if line.startswith('part'):
# SquashFS does not support filesystem UUID
if parsed.fstype == 'squashfs':
if parsed.fsuuid:
err = "%s:%d: SquashFS does not support UUID" \
% (confpath, lineno)
raise KickStartError(err)
if parsed.label:
err = "%s:%d: SquashFS does not support LABEL" \
% (confpath, lineno)
raise KickStartError(err)
# erofs does not support filesystem labels
if parsed.fstype == 'erofs' and parsed.label:
err = "%s:%d: erofs does not support LABEL" % (confpath, lineno)
raise KickStartError(err)
if parsed.fstype == 'msdos' or parsed.fstype == 'vfat':
if parsed.fsuuid:
if parsed.fsuuid.upper().startswith('0X'):
if len(parsed.fsuuid) > 10:
err = "%s:%d: fsuuid %s given in wks kickstart file " \
"exceeds the length limit for %s filesystem. " \
"It should be in the form of a 32 bit hexadecimal" \
"number (for example, 0xABCD1234)." \
% (confpath, lineno, parsed.fsuuid, parsed.fstype)
raise KickStartError(err)
elif len(parsed.fsuuid) > 8:
err = "%s:%d: fsuuid %s given in wks kickstart file " \
"exceeds the length limit for %s filesystem. " \
"It should be in the form of a 32 bit hexadecimal" \
"number (for example, 0xABCD1234)." \
% (confpath, lineno, parsed.fsuuid, parsed.fstype)
raise KickStartError(err)
if parsed.use_label and not parsed.label:
err = "%s:%d: Must set the label with --label" \
% (confpath, lineno)
raise KickStartError(err)
# using ArgumentParser one cannot easily tell if option
# was passed as argument, if said option has a default
# value; --overhead-factor/--extra-space cannot be used
# with --fixed-size, so at least detect when these were
# passed with non-0 values ...
if parsed.fixed_size:
if parsed.overhead_factor or parsed.extra_space:
err = "%s:%d: arguments --overhead-factor and --extra-space not "\
"allowed with argument --fixed-size" \
% (confpath, lineno)
raise KickStartError(err)
else:
# ... and provide defaults if not using
# --fixed-size iff given option was not used
# (again, one cannot tell if option was passed but
# with value equal to 0)
if '--overhead-factor' not in line_args:
parsed.overhead_factor = self.DEFAULT_OVERHEAD_FACTOR
if '--extra-space' not in line_args:
parsed.extra_space = self.DEFAULT_EXTRA_SPACE
self.partnum += 1
self.partitions.append(Partition(parsed, self.partnum))
elif line.startswith('include'):
self._parse(parser, parsed.path)
elif line.startswith('bootloader'):
if not self.bootloader:
self.bootloader = parsed
# Concatenate the strings set in APPEND
append_var = get_bitbake_var("APPEND")
if append_var:
self.bootloader.append = ' '.join(filter(None, \
(self.bootloader.append, append_var)))
else:
err = "%s:%d: more than one bootloader specified" \
% (confpath, lineno)
raise KickStartError(err)

View File

@@ -0,0 +1,266 @@
#
# Copyright (c) 2013, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides a place to collect various wic-related utils
# for the OpenEmbedded Image Tools.
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
"""Miscellaneous functions."""
import logging
import os
import re
import subprocess
import shutil
from collections import defaultdict
from wic import WicError
logger = logging.getLogger('wic')
# executable -> recipe pairs for exec_native_cmd
NATIVE_RECIPES = {"bmaptool": "bmaptool",
"dumpe2fs": "e2fsprogs",
"grub-mkimage": "grub-efi",
"isohybrid": "syslinux",
"mcopy": "mtools",
"mdel" : "mtools",
"mdeltree" : "mtools",
"mdir" : "mtools",
"mkdosfs": "dosfstools",
"mkisofs": "cdrtools",
"mkfs.btrfs": "btrfs-tools",
"mkfs.erofs": "erofs-utils",
"mkfs.ext2": "e2fsprogs",
"mkfs.ext3": "e2fsprogs",
"mkfs.ext4": "e2fsprogs",
"mkfs.vfat": "dosfstools",
"mksquashfs": "squashfs-tools",
"mkswap": "util-linux",
"mmd": "mtools",
"parted": "parted",
"sfdisk": "util-linux",
"sgdisk": "gptfdisk",
"syslinux": "syslinux",
"tar": "tar"
}
def runtool(cmdln_or_args):
""" wrapper for most of the subprocess calls
input:
cmdln_or_args: can be both args and cmdln str (shell=True)
return:
rc, output
"""
if isinstance(cmdln_or_args, list):
cmd = cmdln_or_args[0]
shell = False
else:
import shlex
cmd = shlex.split(cmdln_or_args)[0]
shell = True
sout = subprocess.PIPE
serr = subprocess.STDOUT
try:
process = subprocess.Popen(cmdln_or_args, stdout=sout,
stderr=serr, shell=shell)
sout, serr = process.communicate()
# combine stdout and stderr, filter None out and decode
out = ''.join([out.decode('utf-8') for out in [sout, serr] if out])
except OSError as err:
if err.errno == 2:
# [Errno 2] No such file or directory
raise WicError('Cannot run command: %s, lost dependency?' % cmd)
else:
raise # relay
return process.returncode, out
def _exec_cmd(cmd_and_args, as_shell=False):
"""
Execute command, catching stderr, stdout
Need to execute as_shell if the command uses wildcards
"""
logger.debug("_exec_cmd: %s", cmd_and_args)
args = cmd_and_args.split()
logger.debug(args)
if as_shell:
ret, out = runtool(cmd_and_args)
else:
ret, out = runtool(args)
out = out.strip()
if ret != 0:
raise WicError("_exec_cmd: %s returned '%s' instead of 0\noutput: %s" % \
(cmd_and_args, ret, out))
logger.debug("_exec_cmd: output for %s (rc = %d): %s",
cmd_and_args, ret, out)
return ret, out
def exec_cmd(cmd_and_args, as_shell=False):
"""
Execute command, return output
"""
return _exec_cmd(cmd_and_args, as_shell)[1]
def find_executable(cmd, paths):
recipe = cmd
if recipe in NATIVE_RECIPES:
recipe = NATIVE_RECIPES[recipe]
provided = get_bitbake_var("ASSUME_PROVIDED")
if provided and "%s-native" % recipe in provided:
return True
return shutil.which(cmd, path=paths)
def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
"""
Execute native command, catching stderr, stdout
Need to execute as_shell if the command uses wildcards
Always need to execute native commands as_shell
"""
# The reason -1 is used is because there may be "export" commands.
args = cmd_and_args.split(';')[-1].split()
logger.debug(args)
if pseudo:
cmd_and_args = pseudo + cmd_and_args
hosttools_dir = get_bitbake_var("HOSTTOOLS_DIR")
target_sys = get_bitbake_var("TARGET_SYS")
native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/usr/bin/%s:%s/bin:%s" % \
(native_sysroot, native_sysroot,
native_sysroot, native_sysroot, target_sys,
native_sysroot, hosttools_dir)
native_cmd_and_args = "export PATH=%s:$PATH;%s" % \
(native_paths, cmd_and_args)
logger.debug("exec_native_cmd: %s", native_cmd_and_args)
# If the command isn't in the native sysroot say we failed.
if find_executable(args[0], native_paths):
ret, out = _exec_cmd(native_cmd_and_args, True)
else:
ret = 127
out = "can't find native executable %s in %s" % (args[0], native_paths)
prog = args[0]
# shell command-not-found
if ret == 127 \
or (pseudo and ret == 1 and out == "Can't find '%s' in $PATH." % prog):
msg = "A native program %s required to build the image "\
"was not found (see details above).\n\n" % prog
recipe = NATIVE_RECIPES.get(prog)
if recipe:
msg += "Please make sure wic-tools have %s-native in its DEPENDS, "\
"build it with 'bitbake wic-tools' and try again.\n" % recipe
else:
msg += "Wic failed to find a recipe to build native %s. Please "\
"file a bug against wic.\n" % prog
raise WicError(msg)
return ret, out
BOOTDD_EXTRA_SPACE = 16384
class BitbakeVars(defaultdict):
"""
Container for Bitbake variables.
"""
def __init__(self):
defaultdict.__init__(self, dict)
# default_image and vars_dir attributes should be set from outside
self.default_image = None
self.vars_dir = None
def _parse_line(self, line, image, matcher=re.compile(r"^([a-zA-Z0-9\-_+./~]+)=(.*)")):
"""
Parse one line from bitbake -e output or from .env file.
Put result key-value pair into the storage.
"""
if "=" not in line:
return
match = matcher.match(line)
if not match:
return
key, val = match.groups()
self[image][key] = val.strip('"')
def get_var(self, var, image=None, cache=True):
"""
Get bitbake variable from 'bitbake -e' output or from .env file.
This is a lazy method, i.e. it runs bitbake or parses file only when
only when variable is requested. It also caches results.
"""
if not image:
image = self.default_image
if image not in self:
if image and self.vars_dir:
fname = os.path.join(self.vars_dir, image + '.env')
if os.path.isfile(fname):
# parse .env file
with open(fname) as varsfile:
for line in varsfile:
self._parse_line(line, image)
else:
print("Couldn't get bitbake variable from %s." % fname)
print("File %s doesn't exist." % fname)
return
else:
# Get bitbake -e output
cmd = "bitbake -e"
if image:
cmd += " %s" % image
log_level = logger.getEffectiveLevel()
logger.setLevel(logging.INFO)
ret, lines = _exec_cmd(cmd)
logger.setLevel(log_level)
if ret:
logger.error("Couldn't get '%s' output.", cmd)
logger.error("Bitbake failed with error:\n%s\n", lines)
return
# Parse bitbake -e output
for line in lines.split('\n'):
self._parse_line(line, image)
# Make first image a default set of variables
if cache:
images = [key for key in self if key]
if len(images) == 1:
self[None] = self[image]
result = self[image].get(var)
if not cache:
self.pop(image, None)
return result
# Create BB_VARS singleton
BB_VARS = BitbakeVars()
def get_bitbake_var(var, image=None, cache=True):
"""
Provide old get_bitbake_var API by wrapping
get_var method of BB_VARS singleton.
"""
return BB_VARS.get_var(var, image, cache)

View File

@@ -0,0 +1,551 @@
#
# Copyright (c) 2013-2016 Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides the OpenEmbedded partition object definitions.
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
# Ed Bartosh <ed.bartosh> (at] linux.intel.com>
import logging
import os
import uuid
from wic import WicError
from wic.misc import exec_cmd, exec_native_cmd, get_bitbake_var
from wic.pluginbase import PluginMgr
logger = logging.getLogger('wic')
class Partition():
def __init__(self, args, lineno):
self.args = args
self.active = args.active
self.align = args.align
self.disk = args.disk
self.device = None
self.extra_space = args.extra_space
self.exclude_path = args.exclude_path
self.include_path = args.include_path
self.change_directory = args.change_directory
self.fsopts = args.fsopts
self.fspassno = args.fspassno
self.fstype = args.fstype
self.label = args.label
self.use_label = args.use_label
self.mkfs_extraopts = args.mkfs_extraopts
self.mountpoint = args.mountpoint
self.no_table = args.no_table
self.num = None
self.offset = args.offset
self.overhead_factor = args.overhead_factor
self.part_name = args.part_name
self.part_type = args.part_type
self.rootfs_dir = args.rootfs_dir
self.size = args.size
self.fixed_size = args.fixed_size
self.source = args.source
self.sourceparams = args.sourceparams
self.system_id = args.system_id
self.use_uuid = args.use_uuid
self.uuid = args.uuid
self.fsuuid = args.fsuuid
self.type = args.type
self.no_fstab_update = args.no_fstab_update
self.updated_fstab_path = None
self.has_fstab = False
self.update_fstab_in_rootfs = False
self.hidden = args.hidden
self.mbr = args.mbr
self.lineno = lineno
self.source_file = ""
def get_extra_block_count(self, current_blocks):
"""
The --size param is reflected in self.size (in kB), and we already
have current_blocks (1k) blocks, calculate and return the
number of (1k) blocks we need to add to get to --size, 0 if
we're already there or beyond.
"""
logger.debug("Requested partition size for %s: %d",
self.mountpoint, self.size)
if not self.size:
return 0
requested_blocks = self.size
logger.debug("Requested blocks %d, current_blocks %d",
requested_blocks, current_blocks)
if requested_blocks > current_blocks:
return requested_blocks - current_blocks
else:
return 0
def get_rootfs_size(self, actual_rootfs_size=0):
"""
Calculate the required size of rootfs taking into consideration
--size/--fixed-size flags as well as overhead and extra space, as
specified in kickstart file. Raises an error if the
`actual_rootfs_size` is larger than fixed-size rootfs.
"""
if self.fixed_size:
rootfs_size = self.fixed_size
if actual_rootfs_size > rootfs_size:
raise WicError("Actual rootfs size (%d kB) is larger than "
"allowed size %d kB" %
(actual_rootfs_size, rootfs_size))
else:
extra_blocks = self.get_extra_block_count(actual_rootfs_size)
if extra_blocks < self.extra_space:
extra_blocks = self.extra_space
rootfs_size = actual_rootfs_size + extra_blocks
rootfs_size = int(rootfs_size * self.overhead_factor)
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, self.mountpoint, rootfs_size)
return rootfs_size
@property
def disk_size(self):
"""
Obtain on-disk size of partition taking into consideration
--size/--fixed-size options.
"""
return self.fixed_size if self.fixed_size else self.size
def prepare(self, creator, cr_workdir, oe_builddir, rootfs_dir,
bootimg_dir, kernel_dir, native_sysroot, updated_fstab_path):
"""
Prepare content for individual partitions, depending on
partition command parameters.
"""
self.updated_fstab_path = updated_fstab_path
if self.updated_fstab_path and not (self.fstype.startswith("ext") or self.fstype == "msdos"):
self.update_fstab_in_rootfs = True
if not self.source:
if self.fstype == "none" or self.no_table:
return
if not self.size and not self.fixed_size:
raise WicError("The %s partition has a size of zero. Please "
"specify a non-zero --size/--fixed-size for that "
"partition." % self.mountpoint)
if self.fstype == "swap":
self.prepare_swap_partition(cr_workdir, oe_builddir,
native_sysroot)
self.source_file = "%s/fs.%s" % (cr_workdir, self.fstype)
else:
if self.fstype in ('squashfs', 'erofs'):
raise WicError("It's not possible to create empty %s "
"partition '%s'" % (self.fstype, self.mountpoint))
rootfs = "%s/fs_%s.%s.%s" % (cr_workdir, self.label,
self.lineno, self.fstype)
if os.path.isfile(rootfs):
os.remove(rootfs)
prefix = "ext" if self.fstype.startswith("ext") else self.fstype
method = getattr(self, "prepare_empty_partition_" + prefix)
method(rootfs, oe_builddir, native_sysroot)
self.source_file = rootfs
return
plugins = PluginMgr.get_plugins('source')
if self.source not in plugins:
raise WicError("The '%s' --source specified for %s doesn't exist.\n\t"
"See 'wic list source-plugins' for a list of available"
" --sources.\n\tSee 'wic help source-plugins' for "
"details on adding a new source plugin." %
(self.source, self.mountpoint))
srcparams_dict = {}
if self.sourceparams:
# Split sourceparams string of the form key1=val1[,key2=val2,...]
# into a dict. Also accepts valueless keys i.e. without =
splitted = self.sourceparams.split(',')
srcparams_dict = dict((par.split('=', 1) + [None])[:2] for par in splitted if par)
plugin = PluginMgr.get_plugins('source')[self.source]
plugin.do_configure_partition(self, srcparams_dict, creator,
cr_workdir, oe_builddir, bootimg_dir,
kernel_dir, native_sysroot)
plugin.do_stage_partition(self, srcparams_dict, creator,
cr_workdir, oe_builddir, bootimg_dir,
kernel_dir, native_sysroot)
plugin.do_prepare_partition(self, srcparams_dict, creator,
cr_workdir, oe_builddir, bootimg_dir,
kernel_dir, rootfs_dir, native_sysroot)
plugin.do_post_partition(self, srcparams_dict, creator,
cr_workdir, oe_builddir, bootimg_dir,
kernel_dir, rootfs_dir, native_sysroot)
# further processing required Partition.size to be an integer, make
# sure that it is one
if not isinstance(self.size, int):
raise WicError("Partition %s internal size is not an integer. "
"This a bug in source plugin %s and needs to be fixed." %
(self.mountpoint, self.source))
if self.fixed_size and self.size > self.fixed_size:
raise WicError("File system image of partition %s is "
"larger (%d kB) than its allowed size %d kB" %
(self.mountpoint, self.size, self.fixed_size))
def prepare_rootfs(self, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, real_rootfs = True, pseudo_dir = None):
"""
Prepare content for a rootfs partition i.e. create a partition
and fill it from a /rootfs dir.
Currently handles ext2/3/4, btrfs, vfat and squashfs.
"""
rootfs = "%s/rootfs_%s.%s.%s" % (cr_workdir, self.label,
self.lineno, self.fstype)
if os.path.isfile(rootfs):
os.remove(rootfs)
p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot)
if (pseudo_dir):
# Canonicalize the ignore paths. This corresponds to
# calling oe.path.canonicalize(), which is used in bitbake.conf.
ignore_paths = [rootfs] + (get_bitbake_var("PSEUDO_IGNORE_PATHS") or "").split(",")
canonical_paths = []
for path in ignore_paths:
if "$" not in path:
trailing_slash = path.endswith("/") and "/" or ""
canonical_paths.append(os.path.realpath(path) + trailing_slash)
ignore_paths = ",".join(canonical_paths)
pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix
pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % pseudo_dir
pseudo += "export PSEUDO_PASSWD=%s;" % rootfs_dir
pseudo += "export PSEUDO_NOSYMLINKEXP=1;"
pseudo += "export PSEUDO_IGNORE_PATHS=%s;" % ignore_paths
pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
else:
pseudo = None
if not self.size and real_rootfs:
# The rootfs size is not set in .ks file so try to get it
# from bitbake variable
rsize_bb = get_bitbake_var('ROOTFS_SIZE')
rdir = get_bitbake_var('IMAGE_ROOTFS')
if rsize_bb and rdir == rootfs_dir:
# Bitbake variable ROOTFS_SIZE is calculated in
# Image._get_rootfs_size method from meta/lib/oe/image.py
# using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
# IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
self.size = int(round(float(rsize_bb)))
else:
# Bitbake variable ROOTFS_SIZE is not defined so compute it
# from the rootfs_dir size using the same logic found in
# get_rootfs_size() from meta/classes/image.bbclass
du_cmd = "du -ks %s" % rootfs_dir
out = exec_cmd(du_cmd)
self.size = int(out.split()[0])
prefix = "ext" if self.fstype.startswith("ext") else self.fstype
method = getattr(self, "prepare_rootfs_" + prefix)
method(rootfs, cr_workdir, oe_builddir, rootfs_dir, native_sysroot, pseudo)
self.source_file = rootfs
# get the rootfs size in the right units for kickstart (kB)
du_cmd = "du -Lbks %s" % rootfs
out = exec_cmd(du_cmd)
self.size = int(out.split()[0])
def prepare_rootfs_ext(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for an ext2/3/4 rootfs partition.
"""
du_cmd = "du -ks %s" % rootfs_dir
out = exec_cmd(du_cmd)
actual_rootfs_size = int(out.split()[0])
rootfs_size = self.get_rootfs_size(actual_rootfs_size)
with open(rootfs, 'w') as sparse:
os.ftruncate(sparse.fileno(), rootfs_size * 1024)
extraopts = self.mkfs_extraopts or "-F -i 8192"
# use hash_seed to generate reproducible ext4 images
(extraopts, pseudo) = self.get_hash_seed_ext4(extraopts, pseudo)
label_str = ""
if self.label:
label_str = "-L %s" % self.label
mkfs_cmd = "mkfs.%s %s %s %s -U %s -d %s" % \
(self.fstype, extraopts, rootfs, label_str, self.fsuuid, rootfs_dir)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
debugfs_script_path = os.path.join(cr_workdir, "debugfs_script")
with open(debugfs_script_path, "w") as f:
f.write("cd etc\n")
f.write("rm fstab\n")
f.write("write %s fstab\n" % (self.updated_fstab_path))
debugfs_cmd = "debugfs -w -f %s %s" % (debugfs_script_path, rootfs)
exec_native_cmd(debugfs_cmd, native_sysroot)
mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
if os.getenv('SOURCE_DATE_EPOCH'):
sde_time = hex(int(os.getenv('SOURCE_DATE_EPOCH')))
debugfs_script_path = os.path.join(cr_workdir, "debugfs_script")
files = []
for root, dirs, others in os.walk(rootfs_dir):
base = root.replace(rootfs_dir, "").rstrip(os.sep)
files += [ "/" if base == "" else base ]
files += [ base + "/" + n for n in dirs + others ]
with open(debugfs_script_path, "w") as f:
f.write("set_current_time %s\n" % (sde_time))
if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
f.write("set_inode_field /etc/fstab mtime %s\n" % (sde_time))
f.write("set_inode_field /etc/fstab mtime_extra 0\n")
for file in set(files):
for time in ["atime", "ctime", "crtime"]:
f.write("set_inode_field \"%s\" %s %s\n" % (file, time, sde_time))
f.write("set_inode_field \"%s\" %s_extra 0\n" % (file, time))
for time in ["wtime", "mkfs_time", "lastcheck"]:
f.write("set_super_value %s %s\n" % (time, sde_time))
for time in ["mtime", "first_error_time", "last_error_time"]:
f.write("set_super_value %s 0\n" % (time))
debugfs_cmd = "debugfs -w -f %s %s" % (debugfs_script_path, rootfs)
exec_native_cmd(debugfs_cmd, native_sysroot)
self.check_for_Y2038_problem(rootfs, native_sysroot)
def get_hash_seed_ext4(self, extraopts, pseudo):
if os.getenv('SOURCE_DATE_EPOCH'):
sde_time = int(os.getenv('SOURCE_DATE_EPOCH'))
if pseudo:
pseudo = "export E2FSPROGS_FAKE_TIME=%s;%s " % (sde_time, pseudo)
else:
pseudo = "export E2FSPROGS_FAKE_TIME=%s; " % sde_time
# Set hash_seed to generate deterministic directory indexes
namespace = uuid.UUID("e7429877-e7b3-4a68-a5c9-2f2fdf33d460")
if self.fsuuid:
namespace = uuid.UUID(self.fsuuid)
hash_seed = str(uuid.uuid5(namespace, str(sde_time)))
extraopts += " -E hash_seed=%s" % hash_seed
return (extraopts, pseudo)
def prepare_rootfs_btrfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a btrfs rootfs partition.
"""
du_cmd = "du -ks %s" % rootfs_dir
out = exec_cmd(du_cmd)
actual_rootfs_size = int(out.split()[0])
rootfs_size = self.get_rootfs_size(actual_rootfs_size)
with open(rootfs, 'w') as sparse:
os.ftruncate(sparse.fileno(), rootfs_size * 1024)
label_str = ""
if self.label:
label_str = "-L %s" % self.label
mkfs_cmd = "mkfs.%s -b %d -r %s %s %s -U %s %s" % \
(self.fstype, rootfs_size * 1024, rootfs_dir, label_str,
self.mkfs_extraopts, self.fsuuid, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
def prepare_rootfs_msdos(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a msdos/vfat rootfs partition.
"""
du_cmd = "du -bks %s" % rootfs_dir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
rootfs_size = self.get_rootfs_size(blocks)
label_str = "-n boot"
if self.label:
label_str = "-n %s" % self.label
size_str = ""
extraopts = self.mkfs_extraopts or '-S 512'
dosfs_cmd = "mkdosfs %s -i %s %s %s -C %s %d" % \
(label_str, self.fsuuid, size_str, extraopts, rootfs,
rootfs_size)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (rootfs, rootfs_dir)
exec_native_cmd(mcopy_cmd, native_sysroot)
if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
mcopy_cmd = "mcopy -m -i %s %s ::/etc/fstab" % (rootfs, self.updated_fstab_path)
exec_native_cmd(mcopy_cmd, native_sysroot)
chmod_cmd = "chmod 644 %s" % rootfs
exec_cmd(chmod_cmd)
prepare_rootfs_vfat = prepare_rootfs_msdos
def prepare_rootfs_squashfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a squashfs rootfs partition.
"""
extraopts = self.mkfs_extraopts or '-noappend'
squashfs_cmd = "mksquashfs %s %s %s" % \
(rootfs_dir, rootfs, extraopts)
exec_native_cmd(squashfs_cmd, native_sysroot, pseudo=pseudo)
def prepare_rootfs_erofs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a erofs rootfs partition.
"""
extraopts = self.mkfs_extraopts or ''
erofs_cmd = "mkfs.erofs %s -U %s %s %s" % \
(extraopts, self.fsuuid, rootfs, rootfs_dir)
exec_native_cmd(erofs_cmd, native_sysroot, pseudo=pseudo)
def prepare_empty_partition_none(self, rootfs, oe_builddir, native_sysroot):
pass
def prepare_empty_partition_ext(self, rootfs, oe_builddir,
native_sysroot):
"""
Prepare an empty ext2/3/4 partition.
"""
size = self.disk_size
with open(rootfs, 'w') as sparse:
os.ftruncate(sparse.fileno(), size * 1024)
extraopts = self.mkfs_extraopts or "-i 8192"
# use hash_seed to generate reproducible ext4 images
(extraopts, pseudo) = self.get_hash_seed_ext4(extraopts, None)
label_str = ""
if self.label:
label_str = "-L %s" % self.label
mkfs_cmd = "mkfs.%s -F %s %s -U %s %s" % \
(self.fstype, extraopts, label_str, self.fsuuid, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
self.check_for_Y2038_problem(rootfs, native_sysroot)
def prepare_empty_partition_btrfs(self, rootfs, oe_builddir,
native_sysroot):
"""
Prepare an empty btrfs partition.
"""
size = self.disk_size
with open(rootfs, 'w') as sparse:
os.ftruncate(sparse.fileno(), size * 1024)
label_str = ""
if self.label:
label_str = "-L %s" % self.label
mkfs_cmd = "mkfs.%s -b %d %s -U %s %s %s" % \
(self.fstype, self.size * 1024, label_str, self.fsuuid,
self.mkfs_extraopts, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot)
def prepare_empty_partition_msdos(self, rootfs, oe_builddir,
native_sysroot):
"""
Prepare an empty vfat partition.
"""
blocks = self.disk_size
label_str = "-n boot"
if self.label:
label_str = "-n %s" % self.label
size_str = ""
extraopts = self.mkfs_extraopts or '-S 512'
dosfs_cmd = "mkdosfs %s -i %s %s %s -C %s %d" % \
(label_str, self.fsuuid, extraopts, size_str, rootfs,
blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
chmod_cmd = "chmod 644 %s" % rootfs
exec_cmd(chmod_cmd)
prepare_empty_partition_vfat = prepare_empty_partition_msdos
def prepare_swap_partition(self, cr_workdir, oe_builddir, native_sysroot):
"""
Prepare a swap partition.
"""
path = "%s/fs.%s" % (cr_workdir, self.fstype)
with open(path, 'w') as sparse:
os.ftruncate(sparse.fileno(), self.size * 1024)
label_str = ""
if self.label:
label_str = "-L %s" % self.label
mkswap_cmd = "mkswap %s -U %s %s" % (label_str, self.fsuuid, path)
exec_native_cmd(mkswap_cmd, native_sysroot)
def check_for_Y2038_problem(self, rootfs, native_sysroot):
"""
Check if the filesystem is affected by the Y2038 problem
(Y2038 problem = 32 bit time_t overflow in January 2038)
"""
def get_err_str(part):
err = "The {} filesystem {} has no Y2038 support."
if part.mountpoint:
args = [part.fstype, "mounted at %s" % part.mountpoint]
elif part.label:
args = [part.fstype, "labeled '%s'" % part.label]
elif part.part_name:
args = [part.fstype, "in partition '%s'" % part.part_name]
else:
args = [part.fstype, "in partition %s" % part.num]
return err.format(*args)
# ext2 and ext3 are always affected by the Y2038 problem
if self.fstype in ["ext2", "ext3"]:
logger.warn(get_err_str(self))
return
ret, out = exec_native_cmd("dumpe2fs %s" % rootfs, native_sysroot)
# if ext4 is affected by the Y2038 problem depends on the inode size
for line in out.splitlines():
if line.startswith("Inode size:"):
size = int(line.split(":")[1].strip())
if size < 256:
logger.warn("%s Inodes (of size %d) are too small." %
(get_err_str(self), size))
break

View File

@@ -0,0 +1,144 @@
#!/usr/bin/env python3
#
# Copyright (c) 2011 Intel, Inc.
#
# SPDX-License-Identifier: GPL-2.0-only
#
__all__ = ['ImagerPlugin', 'SourcePlugin']
import os
import logging
import types
from collections import defaultdict
import importlib
import importlib.util
from wic import WicError
from wic.misc import get_bitbake_var
PLUGIN_TYPES = ["imager", "source"]
SCRIPTS_PLUGIN_DIR = ["scripts/lib/wic/plugins", "lib/wic/plugins"]
logger = logging.getLogger('wic')
PLUGINS = defaultdict(dict)
class PluginMgr:
_plugin_dirs = []
@classmethod
def get_plugins(cls, ptype):
"""Get dictionary of <plugin_name>:<class> pairs."""
if ptype not in PLUGIN_TYPES:
raise WicError('%s is not valid plugin type' % ptype)
# collect plugin directories
if not cls._plugin_dirs:
cls._plugin_dirs = [os.path.join(os.path.dirname(__file__), 'plugins')]
layers = get_bitbake_var("BBLAYERS") or ''
for layer_path in layers.split():
for script_plugin_dir in SCRIPTS_PLUGIN_DIR:
path = os.path.join(layer_path, script_plugin_dir)
path = os.path.abspath(os.path.expanduser(path))
if path not in cls._plugin_dirs and os.path.isdir(path):
cls._plugin_dirs.insert(0, path)
if ptype not in PLUGINS:
# load all ptype plugins
for pdir in cls._plugin_dirs:
ppath = os.path.join(pdir, ptype)
if os.path.isdir(ppath):
for fname in os.listdir(ppath):
if fname.endswith('.py'):
mname = fname[:-3]
mpath = os.path.join(ppath, fname)
logger.debug("loading plugin module %s", mpath)
spec = importlib.util.spec_from_file_location(mname, mpath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return PLUGINS.get(ptype)
class PluginMeta(type):
def __new__(cls, name, bases, attrs):
class_type = type.__new__(cls, name, bases, attrs)
if 'name' in attrs:
PLUGINS[class_type.wic_plugin_type][attrs['name']] = class_type
return class_type
class ImagerPlugin(metaclass=PluginMeta):
wic_plugin_type = "imager"
def do_create(self):
raise WicError("Method %s.do_create is not implemented" %
self.__class__.__name__)
class SourcePlugin(metaclass=PluginMeta):
wic_plugin_type = "source"
"""
The methods that can be implemented by --source plugins.
Any methods not implemented in a subclass inherit these.
"""
@classmethod
def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
bootimg_dir, kernel_dir, native_sysroot):
"""
Called after all partitions have been prepared and assembled into a
disk image. This provides a hook to allow finalization of a
disk image e.g. to write an MBR to it.
"""
logger.debug("SourcePlugin: do_install_disk: disk: %s", disk_name)
@classmethod
def do_stage_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
native_sysroot):
"""
Special content staging hook called before do_prepare_partition(),
normally empty.
Typically, a partition will just use the passed-in parame e.g
straight bootimg_dir, etc, but in some cases, things need to
be more tailored e.g. to use a deploy dir + /boot, etc. This
hook allows those files to be staged in a customized fashion.
Not that get_bitbake_var() allows you to acces non-standard
variables that you might want to use for this.
"""
logger.debug("SourcePlugin: do_stage_partition: part: %s", part)
@classmethod
def do_configure_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
native_sysroot):
"""
Called before do_prepare_partition(), typically used to create
custom configuration files for a partition, for example
syslinux or grub config files.
"""
logger.debug("SourcePlugin: do_configure_partition: part: %s", part)
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir, rootfs_dir,
native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
"""
logger.debug("SourcePlugin: do_prepare_partition: part: %s", part)
@classmethod
def do_post_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir, rootfs_dir,
native_sysroot):
"""
Called after the partition is created. It is useful to add post
operations e.g. security signing the partition.
"""
logger.debug("SourcePlugin: do_post_partition: part: %s", part)

View File

@@ -0,0 +1,694 @@
#
# Copyright (c) 2013, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'direct' imager plugin class for 'wic'
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
import logging
import os
import random
import shutil
import tempfile
import uuid
from time import strftime
from oe.path import copyhardlinktree
from wic import WicError
from wic.filemap import sparse_copy
from wic.ksparser import KickStart, KickStartError
from wic.pluginbase import PluginMgr, ImagerPlugin
from wic.misc import get_bitbake_var, exec_cmd, exec_native_cmd
logger = logging.getLogger('wic')
class DirectPlugin(ImagerPlugin):
"""
Install a system into a file containing a partitioned disk image.
An image file is formatted with a partition table, each partition
created from a rootfs or other OpenEmbedded build artifact and dd'ed
into the virtual disk. The disk image can subsequently be dd'ed onto
media and used on actual hardware.
"""
name = 'direct'
def __init__(self, wks_file, rootfs_dir, bootimg_dir, kernel_dir,
native_sysroot, oe_builddir, options):
try:
self.ks = KickStart(wks_file)
except KickStartError as err:
raise WicError(str(err))
# parse possible 'rootfs=name' items
self.rootfs_dir = dict(rdir.split('=') for rdir in rootfs_dir.split(' '))
self.bootimg_dir = bootimg_dir
self.kernel_dir = kernel_dir
self.native_sysroot = native_sysroot
self.oe_builddir = oe_builddir
self.debug = options.debug
self.outdir = options.outdir
self.compressor = options.compressor
self.bmap = options.bmap
self.no_fstab_update = options.no_fstab_update
self.updated_fstab_path = None
self.name = "%s-%s" % (os.path.splitext(os.path.basename(wks_file))[0],
strftime("%Y%m%d%H%M"))
self.workdir = self.setup_workdir(options.workdir)
self._image = None
self.ptable_format = self.ks.bootloader.ptable
self.parts = self.ks.partitions
# as a convenience, set source to the boot partition source
# instead of forcing it to be set via bootloader --source
for part in self.parts:
if not self.ks.bootloader.source and part.mountpoint == "/boot":
self.ks.bootloader.source = part.source
break
image_path = self._full_path(self.workdir, self.parts[0].disk, "direct")
self._image = PartitionedImage(image_path, self.ptable_format,
self.parts, self.native_sysroot,
options.extra_space)
def setup_workdir(self, workdir):
if workdir:
if os.path.exists(workdir):
raise WicError("Internal workdir '%s' specified in wic arguments already exists!" % (workdir))
os.makedirs(workdir)
return workdir
else:
return tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.')
def do_create(self):
"""
Plugin entry point.
"""
try:
self.create()
self.assemble()
self.finalize()
self.print_info()
finally:
self.cleanup()
def update_fstab(self, image_rootfs):
"""Assume partition order same as in wks"""
if not image_rootfs:
return
fstab_path = image_rootfs + "/etc/fstab"
if not os.path.isfile(fstab_path):
return
with open(fstab_path) as fstab:
fstab_lines = fstab.readlines()
updated = False
for part in self.parts:
if not part.realnum or not part.mountpoint \
or part.mountpoint == "/" or not (part.mountpoint.startswith('/') or part.mountpoint == "swap"):
continue
if part.use_uuid:
if part.fsuuid:
# FAT UUID is different from others
if len(part.fsuuid) == 10:
device_name = "UUID=%s-%s" % \
(part.fsuuid[2:6], part.fsuuid[6:])
else:
device_name = "UUID=%s" % part.fsuuid
else:
device_name = "PARTUUID=%s" % part.uuid
elif part.use_label:
device_name = "LABEL=%s" % part.label
else:
# mmc device partitions are named mmcblk0p1, mmcblk0p2..
prefix = 'p' if part.disk.startswith('mmcblk') else ''
device_name = "/dev/%s%s%d" % (part.disk, prefix, part.realnum)
opts = part.fsopts if part.fsopts else "defaults"
passno = part.fspassno if part.fspassno else "0"
line = "\t".join([device_name, part.mountpoint, part.fstype,
opts, "0", passno]) + "\n"
fstab_lines.append(line)
updated = True
if updated:
self.updated_fstab_path = os.path.join(self.workdir, "fstab")
with open(self.updated_fstab_path, "w") as f:
f.writelines(fstab_lines)
if os.getenv('SOURCE_DATE_EPOCH'):
fstab_time = int(os.getenv('SOURCE_DATE_EPOCH'))
os.utime(self.updated_fstab_path, (fstab_time, fstab_time))
def _full_path(self, path, name, extention):
""" Construct full file path to a file we generate. """
return os.path.join(path, "%s-%s.%s" % (self.name, name, extention))
#
# Actual implemention
#
def create(self):
"""
For 'wic', we already have our build artifacts - we just create
filesystems from the artifacts directly and combine them into
a partitioned image.
"""
if not self.no_fstab_update:
self.update_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
for part in self.parts:
# get rootfs size from bitbake variable if it's not set in .ks file
if not part.size:
# and if rootfs name is specified for the partition
image_name = self.rootfs_dir.get(part.rootfs_dir)
if image_name and os.path.sep not in image_name:
# Bitbake variable ROOTFS_SIZE is calculated in
# Image._get_rootfs_size method from meta/lib/oe/image.py
# using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
# IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
rsize_bb = get_bitbake_var('ROOTFS_SIZE', image_name)
if rsize_bb:
part.size = int(round(float(rsize_bb)))
self._image.prepare(self)
self._image.layout_partitions()
self._image.create()
def assemble(self):
"""
Assemble partitions into disk image
"""
self._image.assemble()
def finalize(self):
"""
Finalize the disk image.
For example, prepare the image to be bootable by e.g.
creating and installing a bootloader configuration.
"""
source_plugin = self.ks.bootloader.source
disk_name = self.parts[0].disk
if source_plugin:
plugin = PluginMgr.get_plugins('source')[source_plugin]
plugin.do_install_disk(self._image, disk_name, self, self.workdir,
self.oe_builddir, self.bootimg_dir,
self.kernel_dir, self.native_sysroot)
full_path = self._image.path
# Generate .bmap
if self.bmap:
logger.debug("Generating bmap file for %s", disk_name)
python = os.path.join(self.native_sysroot, 'usr/bin/python3-native/python3')
bmaptool = os.path.join(self.native_sysroot, 'usr/bin/bmaptool')
exec_native_cmd("%s %s create %s -o %s.bmap" % \
(python, bmaptool, full_path, full_path), self.native_sysroot)
# Compress the image
if self.compressor:
logger.debug("Compressing disk %s with %s", disk_name, self.compressor)
exec_cmd("%s %s" % (self.compressor, full_path))
def print_info(self):
"""
Print the image(s) and artifacts used, for the user.
"""
msg = "The new image(s) can be found here:\n"
extension = "direct" + {"gzip": ".gz",
"bzip2": ".bz2",
"xz": ".xz",
None: ""}.get(self.compressor)
full_path = self._full_path(self.outdir, self.parts[0].disk, extension)
msg += ' %s\n\n' % full_path
msg += 'The following build artifacts were used to create the image(s):\n'
for part in self.parts:
if part.rootfs_dir is None:
continue
if part.mountpoint == '/':
suffix = ':'
else:
suffix = '["%s"]:' % (part.mountpoint or part.label)
rootdir = part.rootfs_dir
msg += ' ROOTFS_DIR%s%s\n' % (suffix.ljust(20), rootdir)
msg += ' BOOTIMG_DIR: %s\n' % self.bootimg_dir
msg += ' KERNEL_DIR: %s\n' % self.kernel_dir
msg += ' NATIVE_SYSROOT: %s\n' % self.native_sysroot
logger.info(msg)
@property
def rootdev(self):
"""
Get root device name to use as a 'root' parameter
in kernel command line.
Assume partition order same as in wks
"""
for part in self.parts:
if part.mountpoint == "/":
if part.uuid:
return "PARTUUID=%s" % part.uuid
elif part.label and self.ptable_format != 'msdos':
return "PARTLABEL=%s" % part.label
else:
suffix = 'p' if part.disk.startswith('mmcblk') else ''
return "/dev/%s%s%-d" % (part.disk, suffix, part.realnum)
def cleanup(self):
if self._image:
self._image.cleanup()
# Move results to the output dir
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
for fname in os.listdir(self.workdir):
path = os.path.join(self.workdir, fname)
if os.path.isfile(path):
shutil.move(path, os.path.join(self.outdir, fname))
# remove work directory when it is not in debugging mode
if not self.debug:
shutil.rmtree(self.workdir, ignore_errors=True)
# Overhead of the MBR partitioning scheme (just one sector)
MBR_OVERHEAD = 1
# Overhead of the GPT partitioning scheme
GPT_OVERHEAD = 34
# Size of a sector in bytes
SECTOR_SIZE = 512
class PartitionedImage():
"""
Partitioned image in a file.
"""
def __init__(self, path, ptable_format, partitions, native_sysroot=None, extra_space=0):
self.path = path # Path to the image file
self.numpart = 0 # Number of allocated partitions
self.realpart = 0 # Number of partitions in the partition table
self.primary_part_num = 0 # Number of primary partitions (msdos)
self.extendedpart = 0 # Create extended partition before this logical partition (msdos)
self.extended_size_sec = 0 # Size of exteded partition (msdos)
self.logical_part_cnt = 0 # Number of total logical paritions (msdos)
self.offset = 0 # Offset of next partition (in sectors)
self.min_size = 0 # Minimum required disk size to fit
# all partitions (in bytes)
self.ptable_format = ptable_format # Partition table format
# Disk system identifier
if os.getenv('SOURCE_DATE_EPOCH'):
self.identifier = random.Random(int(os.getenv('SOURCE_DATE_EPOCH'))).randint(1, 0xffffffff)
else:
self.identifier = random.SystemRandom().randint(1, 0xffffffff)
self.partitions = partitions
self.partimages = []
# Size of a sector used in calculations
self.sector_size = SECTOR_SIZE
self.native_sysroot = native_sysroot
num_real_partitions = len([p for p in self.partitions if not p.no_table])
self.extra_space = extra_space
# calculate the real partition number, accounting for partitions not
# in the partition table and logical partitions
realnum = 0
for part in self.partitions:
if part.no_table:
part.realnum = 0
else:
realnum += 1
if self.ptable_format == 'msdos' and realnum > 3 and num_real_partitions > 4:
part.realnum = realnum + 1
continue
part.realnum = realnum
# generate parition and filesystem UUIDs
for part in self.partitions:
if not part.uuid and part.use_uuid:
if self.ptable_format in ('gpt', 'gpt-hybrid'):
part.uuid = str(uuid.uuid4())
else: # msdos partition table
part.uuid = '%08x-%02d' % (self.identifier, part.realnum)
if not part.fsuuid:
if part.fstype == 'vfat' or part.fstype == 'msdos':
part.fsuuid = '0x' + str(uuid.uuid4())[:8].upper()
else:
part.fsuuid = str(uuid.uuid4())
else:
#make sure the fsuuid for vfat/msdos align with format 0xYYYYYYYY
if part.fstype == 'vfat' or part.fstype == 'msdos':
if part.fsuuid.upper().startswith("0X"):
part.fsuuid = '0x' + part.fsuuid.upper()[2:].rjust(8,"0")
else:
part.fsuuid = '0x' + part.fsuuid.upper().rjust(8,"0")
def prepare(self, imager):
"""Prepare an image. Call prepare method of all image partitions."""
for part in self.partitions:
# need to create the filesystems in order to get their
# sizes before we can add them and do the layout.
part.prepare(imager, imager.workdir, imager.oe_builddir,
imager.rootfs_dir, imager.bootimg_dir,
imager.kernel_dir, imager.native_sysroot,
imager.updated_fstab_path)
# Converting kB to sectors for parted
part.size_sec = part.disk_size * 1024 // self.sector_size
def layout_partitions(self):
""" Layout the partitions, meaning calculate the position of every
partition on the disk. The 'ptable_format' parameter defines the
partition table format and may be "msdos". """
logger.debug("Assigning %s partitions to disks", self.ptable_format)
# The number of primary and logical partitions. Extended partition and
# partitions not listed in the table are not included.
num_real_partitions = len([p for p in self.partitions if not p.no_table])
# Go through partitions in the order they are added in .ks file
for num in range(len(self.partitions)):
part = self.partitions[num]
if self.ptable_format == 'msdos' and part.part_name:
raise WicError("setting custom partition name is not " \
"implemented for msdos partitions")
if self.ptable_format == 'msdos' and part.part_type:
# The --part-type can also be implemented for MBR partitions,
# in which case it would map to the 1-byte "partition type"
# filed at offset 3 of the partition entry.
raise WicError("setting custom partition type is not " \
"implemented for msdos partitions")
if part.mbr and self.ptable_format != 'gpt-hybrid':
raise WicError("Partition may only be included in MBR with " \
"a gpt-hybrid partition table")
# Get the disk where the partition is located
self.numpart += 1
if not part.no_table:
self.realpart += 1
if self.numpart == 1:
if self.ptable_format == "msdos":
overhead = MBR_OVERHEAD
elif self.ptable_format in ("gpt", "gpt-hybrid"):
overhead = GPT_OVERHEAD
# Skip one sector required for the partitioning scheme overhead
self.offset += overhead
if self.ptable_format == "msdos":
if self.primary_part_num > 3 or \
(self.extendedpart == 0 and self.primary_part_num >= 3 and num_real_partitions > 4):
part.type = 'logical'
# Reserve a sector for EBR for every logical partition
# before alignment is performed.
if part.type == 'logical':
self.offset += 2
align_sectors = 0
if part.align:
# If not first partition and we do have alignment set we need
# to align the partition.
# FIXME: This leaves a empty spaces to the disk. To fill the
# gaps we could enlargea the previous partition?
# Calc how much the alignment is off.
align_sectors = self.offset % (part.align * 1024 // self.sector_size)
if align_sectors:
# If partition is not aligned as required, we need
# to move forward to the next alignment point
align_sectors = (part.align * 1024 // self.sector_size) - align_sectors
logger.debug("Realignment for %s%s with %s sectors, original"
" offset %s, target alignment is %sK.",
part.disk, self.numpart, align_sectors,
self.offset, part.align)
# increase the offset so we actually start the partition on right alignment
self.offset += align_sectors
if part.offset is not None:
offset = part.offset // self.sector_size
if offset * self.sector_size != part.offset:
raise WicError("Could not place %s%s at offset %d with sector size %d" % (part.disk, self.numpart, part.offset, self.sector_size))
delta = offset - self.offset
if delta < 0:
raise WicError("Could not place %s%s at offset %d: next free sector is %d (delta: %d)" % (part.disk, self.numpart, part.offset, self.offset, delta))
logger.debug("Skipping %d sectors to place %s%s at offset %dK",
delta, part.disk, self.numpart, part.offset)
self.offset = offset
part.start = self.offset
self.offset += part.size_sec
if not part.no_table:
part.num = self.realpart
else:
part.num = 0
if self.ptable_format == "msdos" and not part.no_table:
if part.type == 'logical':
self.logical_part_cnt += 1
part.num = self.logical_part_cnt + 4
if self.extendedpart == 0:
# Create extended partition as a primary partition
self.primary_part_num += 1
self.extendedpart = part.num
else:
self.extended_size_sec += align_sectors
self.extended_size_sec += part.size_sec + 2
else:
self.primary_part_num += 1
part.num = self.primary_part_num
logger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
"sectors (%d bytes).", part.mountpoint, part.disk,
part.num, part.start, self.offset - 1, part.size_sec,
part.size_sec * self.sector_size)
# Once all the partitions have been layed out, we can calculate the
# minumim disk size
self.min_size = self.offset
if self.ptable_format in ("gpt", "gpt-hybrid"):
self.min_size += GPT_OVERHEAD
self.min_size *= self.sector_size
self.min_size += self.extra_space
def _create_partition(self, device, parttype, fstype, start, size):
""" Create a partition on an image described by the 'device' object. """
# Start is included to the size so we need to substract one from the end.
end = start + size - 1
logger.debug("Added '%s' partition, sectors %d-%d, size %d sectors",
parttype, start, end, size)
cmd = "parted -s %s unit s mkpart %s" % (device, parttype)
if fstype:
cmd += " %s" % fstype
cmd += " %d %d" % (start, end)
return exec_native_cmd(cmd, self.native_sysroot)
def _write_identifier(self, device, identifier):
logger.debug("Set disk identifier %x", identifier)
with open(device, 'r+b') as img:
img.seek(0x1B8)
img.write(identifier.to_bytes(4, 'little'))
def _make_disk(self, device, ptable_format, min_size):
logger.debug("Creating sparse file %s", device)
with open(device, 'w') as sparse:
os.ftruncate(sparse.fileno(), min_size)
logger.debug("Initializing partition table for %s", device)
exec_native_cmd("parted -s %s mklabel %s" % (device, ptable_format),
self.native_sysroot)
def _write_disk_guid(self):
if self.ptable_format in ('gpt', 'gpt-hybrid'):
if os.getenv('SOURCE_DATE_EPOCH'):
self.disk_guid = uuid.UUID(int=int(os.getenv('SOURCE_DATE_EPOCH')))
else:
self.disk_guid = uuid.uuid4()
logger.debug("Set disk guid %s", self.disk_guid)
sfdisk_cmd = "sfdisk --disk-id %s %s" % (self.path, self.disk_guid)
exec_native_cmd(sfdisk_cmd, self.native_sysroot)
def create(self):
self._make_disk(self.path,
"gpt" if self.ptable_format == "gpt-hybrid" else self.ptable_format,
self.min_size)
self._write_identifier(self.path, self.identifier)
self._write_disk_guid()
if self.ptable_format == "gpt-hybrid":
mbr_path = self.path + ".mbr"
self._make_disk(mbr_path, "msdos", self.min_size)
self._write_identifier(mbr_path, self.identifier)
logger.debug("Creating partitions")
hybrid_mbr_part_num = 0
for part in self.partitions:
if part.num == 0:
continue
if self.ptable_format == "msdos" and part.num == self.extendedpart:
# Create an extended partition (note: extended
# partition is described in MBR and contains all
# logical partitions). The logical partitions save a
# sector for an EBR just before the start of a
# partition. The extended partition must start one
# sector before the start of the first logical
# partition. This way the first EBR is inside of the
# extended partition. Since the extended partitions
# starts a sector before the first logical partition,
# add a sector at the back, so that there is enough
# room for all logical partitions.
self._create_partition(self.path, "extended",
None, part.start - 2,
self.extended_size_sec)
if part.fstype == "swap":
parted_fs_type = "linux-swap"
elif part.fstype == "vfat":
parted_fs_type = "fat32"
elif part.fstype == "msdos":
parted_fs_type = "fat16"
if not part.system_id:
part.system_id = '0x6' # FAT16
else:
# Type for ext2/ext3/ext4/btrfs
parted_fs_type = "ext2"
# Boot ROM of OMAP boards require vfat boot partition to have an
# even number of sectors.
if part.mountpoint == "/boot" and part.fstype in ["vfat", "msdos"] \
and part.size_sec % 2:
logger.debug("Subtracting one sector from '%s' partition to "
"get even number of sectors for the partition",
part.mountpoint)
part.size_sec -= 1
self._create_partition(self.path, part.type,
parted_fs_type, part.start, part.size_sec)
if self.ptable_format == "gpt-hybrid" and part.mbr:
hybrid_mbr_part_num += 1
if hybrid_mbr_part_num > 4:
raise WicError("Extended MBR partitions are not supported in hybrid MBR")
self._create_partition(mbr_path, "primary",
parted_fs_type, part.start, part.size_sec)
if self.ptable_format in ("gpt", "gpt-hybrid") and (part.part_name or part.label):
partition_label = part.part_name if part.part_name else part.label
logger.debug("partition %d: set name to %s",
part.num, partition_label)
exec_native_cmd("sgdisk --change-name=%d:%s %s" % \
(part.num, partition_label,
self.path), self.native_sysroot)
if part.part_type:
logger.debug("partition %d: set type UID to %s",
part.num, part.part_type)
exec_native_cmd("sgdisk --typecode=%d:%s %s" % \
(part.num, part.part_type,
self.path), self.native_sysroot)
if part.uuid and self.ptable_format in ("gpt", "gpt-hybrid"):
logger.debug("partition %d: set UUID to %s",
part.num, part.uuid)
exec_native_cmd("sgdisk --partition-guid=%d:%s %s" % \
(part.num, part.uuid, self.path),
self.native_sysroot)
if part.active:
flag_name = "legacy_boot" if self.ptable_format in ('gpt', 'gpt-hybrid') else "boot"
logger.debug("Set '%s' flag for partition '%s' on disk '%s'",
flag_name, part.num, self.path)
exec_native_cmd("parted -s %s set %d %s on" % \
(self.path, part.num, flag_name),
self.native_sysroot)
if self.ptable_format == 'gpt-hybrid' and part.mbr:
exec_native_cmd("parted -s %s set %d %s on" % \
(mbr_path, hybrid_mbr_part_num, "boot"),
self.native_sysroot)
if part.system_id:
exec_native_cmd("sfdisk --part-type %s %s %s" % \
(self.path, part.num, part.system_id),
self.native_sysroot)
if part.hidden and self.ptable_format == "gpt":
logger.debug("Set hidden attribute for partition '%s' on disk '%s'",
part.num, self.path)
exec_native_cmd("sfdisk --part-attrs %s %s RequiredPartition" % \
(self.path, part.num),
self.native_sysroot)
if self.ptable_format == "gpt-hybrid":
# Write a protective GPT partition
hybrid_mbr_part_num += 1
if hybrid_mbr_part_num > 4:
raise WicError("Extended MBR partitions are not supported in hybrid MBR")
# parted cannot directly create a protective GPT partition, so
# create with an arbitrary type, then change it to the correct type
# with sfdisk
self._create_partition(mbr_path, "primary", "fat32", 1, GPT_OVERHEAD)
exec_native_cmd("sfdisk --part-type %s %d 0xee" % (mbr_path, hybrid_mbr_part_num),
self.native_sysroot)
# Copy hybrid MBR
with open(mbr_path, "rb") as mbr_file:
with open(self.path, "r+b") as image_file:
mbr = mbr_file.read(512)
image_file.write(mbr)
def cleanup(self):
pass
def assemble(self):
logger.debug("Installing partitions")
for part in self.partitions:
source = part.source_file
if source:
# install source_file contents into a partition
sparse_copy(source, self.path, seek=part.start * self.sector_size)
logger.debug("Installed %s in partition %d, sectors %d-%d, "
"size %d sectors", source, part.num, part.start,
part.start + part.size_sec - 1, part.size_sec)
partimage = self.path + '.p%d' % part.num
os.rename(source, partimage)
self.partimages.append(partimage)

View File

@@ -0,0 +1,213 @@
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This implements the 'bootimg-biosplusefi' source plugin class for 'wic'
#
# AUTHORS
# William Bourque <wbourque [at) gmail.com>
import types
from wic.pluginbase import SourcePlugin
from importlib.machinery import SourceFileLoader
class BootimgBiosPlusEFIPlugin(SourcePlugin):
"""
Create MBR + EFI boot partition
This plugin creates a boot partition that contains both
legacy BIOS and EFI content. It will be able to boot from both.
This is useful when managing PC fleet with some older machines
without EFI support.
Note it is possible to create an image that can boot from both
legacy BIOS and EFI by defining two partitions : one with arg
--source bootimg-efi and another one with --source bootimg-pcbios.
However, this method has the obvious downside that it requires TWO
partitions to be created on the storage device.
Both partitions will also be marked as "bootable" which does not work on
most BIOS, has BIOS often uses the "bootable" flag to determine
what to boot. If you have such a BIOS, you need to manually remove the
"bootable" flag from the EFI partition for the drive to be bootable.
Having two partitions also seems to confuse wic : the content of
the first partition will be duplicated into the second, even though it
will not be used at all.
Also, unlike "isoimage-isohybrid" that also does BIOS and EFI, this plugin
allows you to have more than only a single rootfs partitions and does
not turn the rootfs into an initramfs RAM image.
This plugin is made to put everything into a single /boot partition so it
does not have the limitations listed above.
The plugin is made so it does tries not to reimplement what's already
been done in other plugins; as such it imports "bootimg-pcbios"
and "bootimg-efi".
Plugin "bootimg-pcbios" is used to generate legacy BIOS boot.
Plugin "bootimg-efi" is used to generate the UEFI boot. Note that it
requires a --sourceparams argument to know which loader to use; refer
to "bootimg-efi" code/documentation for the list of loader.
Imports are handled with "SourceFileLoader" from importlib as it is
otherwise very difficult to import module that has hyphen "-" in their
filename.
The SourcePlugin() methods used in the plugins (do_install_disk,
do_configure_partition, do_prepare_partition) are then called on both,
beginning by "bootimg-efi".
Plugin options, such as "--sourceparams" can still be passed to a
plugin, as long they does not cause issue in the other plugin.
Example wic configuration:
part /boot --source bootimg-biosplusefi --sourceparams="loader=grub-efi"\\
--ondisk sda --label os_boot --active --align 1024 --use-uuid
"""
name = 'bootimg-biosplusefi'
__PCBIOS_MODULE_NAME = "bootimg-pcbios"
__EFI_MODULE_NAME = "bootimg-efi"
__imgEFIObj = None
__imgBiosObj = None
@classmethod
def __init__(cls):
"""
Constructor (init)
"""
# XXX
# For some reasons, __init__ constructor is never called.
# Something to do with how pluginbase works?
cls.__instanciateSubClasses()
@classmethod
def __instanciateSubClasses(cls):
"""
"""
# Import bootimg-pcbios (class name "BootimgPcbiosPlugin")
modulePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
cls.__PCBIOS_MODULE_NAME + ".py")
loader = SourceFileLoader(cls.__PCBIOS_MODULE_NAME, modulePath)
mod = types.ModuleType(loader.name)
loader.exec_module(mod)
cls.__imgBiosObj = mod.BootimgPcbiosPlugin()
# Import bootimg-efi (class name "BootimgEFIPlugin")
modulePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
cls.__EFI_MODULE_NAME + ".py")
loader = SourceFileLoader(cls.__EFI_MODULE_NAME, modulePath)
mod = types.ModuleType(loader.name)
loader.exec_module(mod)
cls.__imgEFIObj = mod.BootimgEFIPlugin()
@classmethod
def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
bootimg_dir, kernel_dir, native_sysroot):
"""
Called after all partitions have been prepared and assembled into a
disk image.
"""
if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
cls.__instanciateSubClasses()
cls.__imgEFIObj.do_install_disk(
disk,
disk_name,
creator,
workdir,
oe_builddir,
bootimg_dir,
kernel_dir,
native_sysroot)
cls.__imgBiosObj.do_install_disk(
disk,
disk_name,
creator,
workdir,
oe_builddir,
bootimg_dir,
kernel_dir,
native_sysroot)
@classmethod
def do_configure_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
native_sysroot):
"""
Called before do_prepare_partition()
"""
if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
cls.__instanciateSubClasses()
cls.__imgEFIObj.do_configure_partition(
part,
source_params,
creator,
cr_workdir,
oe_builddir,
bootimg_dir,
kernel_dir,
native_sysroot)
cls.__imgBiosObj.do_configure_partition(
part,
source_params,
creator,
cr_workdir,
oe_builddir,
bootimg_dir,
kernel_dir,
native_sysroot)
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
rootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
"""
if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
cls.__instanciateSubClasses()
cls.__imgEFIObj.do_prepare_partition(
part,
source_params,
creator,
cr_workdir,
oe_builddir,
bootimg_dir,
kernel_dir,
rootfs_dir,
native_sysroot)
cls.__imgBiosObj.do_prepare_partition(
part,
source_params,
creator,
cr_workdir,
oe_builddir,
bootimg_dir,
kernel_dir,
rootfs_dir,
native_sysroot)

View File

@@ -0,0 +1,507 @@
#
# Copyright (c) 2014, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-efi' source plugin class for 'wic'
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
import logging
import os
import tempfile
import shutil
import re
from glob import glob
from wic import WicError
from wic.engine import get_custom_config
from wic.pluginbase import SourcePlugin
from wic.misc import (exec_cmd, exec_native_cmd,
get_bitbake_var, BOOTDD_EXTRA_SPACE)
logger = logging.getLogger('wic')
class BootimgEFIPlugin(SourcePlugin):
"""
Create EFI boot partition.
This plugin supports GRUB 2 and systemd-boot bootloaders.
"""
name = 'bootimg-efi'
@classmethod
def _copy_additional_files(cls, hdddir, initrd, dtb):
bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
if not bootimg_dir:
raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
if initrd:
initrds = initrd.split(';')
for rd in initrds:
cp_cmd = "cp %s/%s %s" % (bootimg_dir, rd, hdddir)
exec_cmd(cp_cmd, True)
else:
logger.debug("Ignoring missing initrd")
if dtb:
if ';' in dtb:
raise WicError("Only one DTB supported, exiting")
cp_cmd = "cp %s/%s %s" % (bootimg_dir, dtb, hdddir)
exec_cmd(cp_cmd, True)
@classmethod
def do_configure_grubefi(cls, hdddir, creator, cr_workdir, source_params):
"""
Create loader-specific (grub-efi) config
"""
configfile = creator.ks.bootloader.configfile
custom_cfg = None
if configfile:
custom_cfg = get_custom_config(configfile)
if custom_cfg:
# Use a custom configuration for grub
grubefi_conf = custom_cfg
logger.debug("Using custom configuration file "
"%s for grub.cfg", configfile)
else:
raise WicError("configfile is specified but failed to "
"get it from %s." % configfile)
initrd = source_params.get('initrd')
dtb = source_params.get('dtb')
cls._copy_additional_files(hdddir, initrd, dtb)
if not custom_cfg:
# Create grub configuration using parameters from wks file
bootloader = creator.ks.bootloader
title = source_params.get('title')
grubefi_conf = ""
grubefi_conf += "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1\n"
grubefi_conf += "default=boot\n"
grubefi_conf += "timeout=%s\n" % bootloader.timeout
grubefi_conf += "menuentry '%s'{\n" % (title if title else "boot")
kernel = get_bitbake_var("KERNEL_IMAGETYPE")
if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
if get_bitbake_var("INITRAMFS_IMAGE"):
kernel = "%s-%s.bin" % \
(get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
label = source_params.get('label')
label_conf = "root=%s" % creator.rootdev
if label:
label_conf = "LABEL=%s" % label
grubefi_conf += "linux /%s %s rootwait %s\n" \
% (kernel, label_conf, bootloader.append)
if initrd:
initrds = initrd.split(';')
grubefi_conf += "initrd"
for rd in initrds:
grubefi_conf += " /%s" % rd
grubefi_conf += "\n"
if dtb:
grubefi_conf += "devicetree /%s\n" % dtb
grubefi_conf += "}\n"
logger.debug("Writing grubefi config %s/hdd/boot/EFI/BOOT/grub.cfg",
cr_workdir)
cfg = open("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir, "w")
cfg.write(grubefi_conf)
cfg.close()
@classmethod
def do_configure_systemdboot(cls, hdddir, creator, cr_workdir, source_params):
"""
Create loader-specific systemd-boot/gummiboot config
"""
install_cmd = "install -d %s/loader" % hdddir
exec_cmd(install_cmd)
install_cmd = "install -d %s/loader/entries" % hdddir
exec_cmd(install_cmd)
bootloader = creator.ks.bootloader
unified_image = source_params.get('create-unified-kernel-image') == "true"
loader_conf = ""
if not unified_image:
loader_conf += "default boot\n"
loader_conf += "timeout %d\n" % bootloader.timeout
initrd = source_params.get('initrd')
dtb = source_params.get('dtb')
if not unified_image:
cls._copy_additional_files(hdddir, initrd, dtb)
logger.debug("Writing systemd-boot config "
"%s/hdd/boot/loader/loader.conf", cr_workdir)
cfg = open("%s/hdd/boot/loader/loader.conf" % cr_workdir, "w")
cfg.write(loader_conf)
cfg.close()
configfile = creator.ks.bootloader.configfile
custom_cfg = None
if configfile:
custom_cfg = get_custom_config(configfile)
if custom_cfg:
# Use a custom configuration for systemd-boot
boot_conf = custom_cfg
logger.debug("Using custom configuration file "
"%s for systemd-boots's boot.conf", configfile)
else:
raise WicError("configfile is specified but failed to "
"get it from %s.", configfile)
if not custom_cfg:
# Create systemd-boot configuration using parameters from wks file
kernel = get_bitbake_var("KERNEL_IMAGETYPE")
if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
if get_bitbake_var("INITRAMFS_IMAGE"):
kernel = "%s-%s.bin" % \
(get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
title = source_params.get('title')
boot_conf = ""
boot_conf += "title %s\n" % (title if title else "boot")
boot_conf += "linux /%s\n" % kernel
label = source_params.get('label')
label_conf = "LABEL=Boot root=%s" % creator.rootdev
if label:
label_conf = "LABEL=%s" % label
boot_conf += "options %s %s\n" % \
(label_conf, bootloader.append)
if initrd:
initrds = initrd.split(';')
for rd in initrds:
boot_conf += "initrd /%s\n" % rd
if dtb:
boot_conf += "devicetree /%s\n" % dtb
if not unified_image:
logger.debug("Writing systemd-boot config "
"%s/hdd/boot/loader/entries/boot.conf", cr_workdir)
cfg = open("%s/hdd/boot/loader/entries/boot.conf" % cr_workdir, "w")
cfg.write(boot_conf)
cfg.close()
@classmethod
def do_configure_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
native_sysroot):
"""
Called before do_prepare_partition(), creates loader-specific config
"""
hdddir = "%s/hdd/boot" % cr_workdir
install_cmd = "install -d %s/EFI/BOOT" % hdddir
exec_cmd(install_cmd)
try:
if source_params['loader'] == 'grub-efi':
cls.do_configure_grubefi(hdddir, creator, cr_workdir, source_params)
elif source_params['loader'] == 'systemd-boot':
cls.do_configure_systemdboot(hdddir, creator, cr_workdir, source_params)
elif source_params['loader'] == 'uefi-kernel':
pass
else:
raise WicError("unrecognized bootimg-efi loader: %s" % source_params['loader'])
except KeyError:
raise WicError("bootimg-efi requires a loader, none specified")
if get_bitbake_var("IMAGE_EFI_BOOT_FILES") is None:
logger.debug('No boot files defined in IMAGE_EFI_BOOT_FILES')
else:
boot_files = None
for (fmt, id) in (("_uuid-%s", part.uuid), ("_label-%s", part.label), (None, None)):
if fmt:
var = fmt % id
else:
var = ""
boot_files = get_bitbake_var("IMAGE_EFI_BOOT_FILES" + var)
if boot_files:
break
logger.debug('Boot files: %s', boot_files)
# list of tuples (src_name, dst_name)
deploy_files = []
for src_entry in re.findall(r'[\w;\-\.\+/\*]+', boot_files):
if ';' in src_entry:
dst_entry = tuple(src_entry.split(';'))
if not dst_entry[0] or not dst_entry[1]:
raise WicError('Malformed boot file entry: %s' % src_entry)
else:
dst_entry = (src_entry, src_entry)
logger.debug('Destination entry: %r', dst_entry)
deploy_files.append(dst_entry)
cls.install_task = [];
for deploy_entry in deploy_files:
src, dst = deploy_entry
if '*' in src:
# by default install files under their basename
entry_name_fn = os.path.basename
if dst != src:
# unless a target name was given, then treat name
# as a directory and append a basename
entry_name_fn = lambda name: \
os.path.join(dst,
os.path.basename(name))
srcs = glob(os.path.join(kernel_dir, src))
logger.debug('Globbed sources: %s', ', '.join(srcs))
for entry in srcs:
src = os.path.relpath(entry, kernel_dir)
entry_dst_name = entry_name_fn(entry)
cls.install_task.append((src, entry_dst_name))
else:
cls.install_task.append((src, dst))
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
rootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
In this case, prepare content for an EFI (grub) boot partition.
"""
if not kernel_dir:
kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
if not kernel_dir:
raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
staging_kernel_dir = kernel_dir
hdddir = "%s/hdd/boot" % cr_workdir
kernel = get_bitbake_var("KERNEL_IMAGETYPE")
if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
if get_bitbake_var("INITRAMFS_IMAGE"):
kernel = "%s-%s.bin" % \
(get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
if source_params.get('create-unified-kernel-image') == "true":
initrd = source_params.get('initrd')
if not initrd:
raise WicError("initrd= must be specified when create-unified-kernel-image=true, exiting")
deploy_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
efi_stub = glob("%s/%s" % (deploy_dir, "linux*.efi.stub"))
if len(efi_stub) == 0:
raise WicError("Unified Kernel Image EFI stub not found, exiting")
efi_stub = efi_stub[0]
with tempfile.TemporaryDirectory() as tmp_dir:
label = source_params.get('label')
label_conf = "root=%s" % creator.rootdev
if label:
label_conf = "LABEL=%s" % label
bootloader = creator.ks.bootloader
cmdline = open("%s/cmdline" % tmp_dir, "w")
cmdline.write("%s %s" % (label_conf, bootloader.append))
cmdline.close()
initrds = initrd.split(';')
initrd = open("%s/initrd" % tmp_dir, "wb")
for f in initrds:
with open("%s/%s" % (deploy_dir, f), 'rb') as in_file:
shutil.copyfileobj(in_file, initrd)
initrd.close()
# Searched by systemd-boot:
# https://systemd.io/BOOT_LOADER_SPECIFICATION/#type-2-efi-unified-kernel-images
install_cmd = "install -d %s/EFI/Linux" % hdddir
exec_cmd(install_cmd)
staging_dir_host = get_bitbake_var("STAGING_DIR_HOST")
target_sys = get_bitbake_var("TARGET_SYS")
objdump_cmd = "%s-objdump" % target_sys
objdump_cmd += " -p %s" % efi_stub
objdump_cmd += " | awk '{ if ($1 == \"SectionAlignment\"){print $2} }'"
ret, align_str = exec_native_cmd(objdump_cmd, native_sysroot)
align = int(align_str, 16)
objdump_cmd = "%s-objdump" % target_sys
objdump_cmd += " -h %s | tail -2" % efi_stub
ret, output = exec_native_cmd(objdump_cmd, native_sysroot)
offset = int(output.split()[2], 16) + int(output.split()[3], 16)
osrel_off = offset + align - offset % align
osrel_path = "%s/usr/lib/os-release" % staging_dir_host
osrel_sz = os.stat(osrel_path).st_size
cmdline_off = osrel_off + osrel_sz
cmdline_off = cmdline_off + align - cmdline_off % align
cmdline_sz = os.stat(cmdline.name).st_size
dtb_off = cmdline_off + cmdline_sz
dtb_off = dtb_off + align - dtb_off % align
dtb = source_params.get('dtb')
if dtb:
if ';' in dtb:
raise WicError("Only one DTB supported, exiting")
dtb_path = "%s/%s" % (deploy_dir, dtb)
dtb_params = '--add-section .dtb=%s --change-section-vma .dtb=0x%x' % \
(dtb_path, dtb_off)
linux_off = dtb_off + os.stat(dtb_path).st_size
linux_off = linux_off + align - linux_off % align
else:
dtb_params = ''
linux_off = dtb_off
linux_path = "%s/%s" % (staging_kernel_dir, kernel)
linux_sz = os.stat(linux_path).st_size
initrd_off = linux_off + linux_sz
initrd_off = initrd_off + align - initrd_off % align
# https://www.freedesktop.org/software/systemd/man/systemd-stub.html
objcopy_cmd = "%s-objcopy" % target_sys
objcopy_cmd += " --enable-deterministic-archives"
objcopy_cmd += " --preserve-dates"
objcopy_cmd += " --add-section .osrel=%s" % osrel_path
objcopy_cmd += " --change-section-vma .osrel=0x%x" % osrel_off
objcopy_cmd += " --add-section .cmdline=%s" % cmdline.name
objcopy_cmd += " --change-section-vma .cmdline=0x%x" % cmdline_off
objcopy_cmd += dtb_params
objcopy_cmd += " --add-section .linux=%s" % linux_path
objcopy_cmd += " --change-section-vma .linux=0x%x" % linux_off
objcopy_cmd += " --add-section .initrd=%s" % initrd.name
objcopy_cmd += " --change-section-vma .initrd=0x%x" % initrd_off
objcopy_cmd += " %s %s/EFI/Linux/linux.efi" % (efi_stub, hdddir)
exec_native_cmd(objcopy_cmd, native_sysroot)
else:
if source_params.get('install-kernel-into-boot-dir') != 'false':
install_cmd = "install -m 0644 %s/%s %s/%s" % \
(staging_kernel_dir, kernel, hdddir, kernel)
exec_cmd(install_cmd)
if get_bitbake_var("IMAGE_EFI_BOOT_FILES"):
for src_path, dst_path in cls.install_task:
install_cmd = "install -m 0644 -D %s %s" \
% (os.path.join(kernel_dir, src_path),
os.path.join(hdddir, dst_path))
exec_cmd(install_cmd)
try:
if source_params['loader'] == 'grub-efi':
shutil.copyfile("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir,
"%s/grub.cfg" % cr_workdir)
for mod in [x for x in os.listdir(kernel_dir) if x.startswith("grub-efi-")]:
cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, mod[9:])
exec_cmd(cp_cmd, True)
shutil.move("%s/grub.cfg" % cr_workdir,
"%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir)
elif source_params['loader'] == 'systemd-boot':
for mod in [x for x in os.listdir(kernel_dir) if x.startswith("systemd-")]:
cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, mod[8:])
exec_cmd(cp_cmd, True)
elif source_params['loader'] == 'uefi-kernel':
kernel = get_bitbake_var("KERNEL_IMAGETYPE")
if not kernel:
raise WicError("Empty KERNEL_IMAGETYPE")
target = get_bitbake_var("TARGET_SYS")
if not target:
raise WicError("Empty TARGET_SYS")
if re.match("x86_64", target):
kernel_efi_image = "bootx64.efi"
elif re.match('i.86', target):
kernel_efi_image = "bootia32.efi"
elif re.match('aarch64', target):
kernel_efi_image = "bootaa64.efi"
elif re.match('arm', target):
kernel_efi_image = "bootarm.efi"
else:
raise WicError("UEFI stub kernel is incompatible with target %s" % target)
for mod in [x for x in os.listdir(kernel_dir) if x.startswith(kernel)]:
cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, kernel_efi_image)
exec_cmd(cp_cmd, True)
else:
raise WicError("unrecognized bootimg-efi loader: %s" %
source_params['loader'])
except KeyError:
raise WicError("bootimg-efi requires a loader, none specified")
startup = os.path.join(kernel_dir, "startup.nsh")
if os.path.exists(startup):
cp_cmd = "cp %s %s/" % (startup, hdddir)
exec_cmd(cp_cmd, True)
for paths in part.include_path or []:
for path in paths:
cp_cmd = "cp -r %s %s/" % (path, hdddir)
exec_cmd(cp_cmd, True)
du_cmd = "du -bks %s" % hdddir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
extra_blocks = part.get_extra_block_count(blocks)
if extra_blocks < BOOTDD_EXTRA_SPACE:
extra_blocks = BOOTDD_EXTRA_SPACE
blocks += extra_blocks
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, part.mountpoint, blocks)
# required for compatibility with certain devices expecting file system
# block count to be equal to partition block count
if blocks < part.fixed_size:
blocks = part.fixed_size
logger.debug("Overriding %s to %d total blocks for compatibility",
part.mountpoint, blocks)
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
label = part.label if part.label else "ESP"
dosfs_cmd = "mkdosfs -n %s -i %s -C %s %d" % \
(label, part.fsuuid, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
exec_native_cmd(mcopy_cmd, native_sysroot)
chmod_cmd = "chmod 644 %s" % bootimg
exec_cmd(chmod_cmd)
du_cmd = "du -Lbks %s" % bootimg
out = exec_cmd(du_cmd)
bootimg_size = out.split()[0]
part.size = int(bootimg_size)
part.source_file = bootimg

View File

@@ -0,0 +1,197 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-partition' source plugin class for
# 'wic'. The plugin creates an image of boot partition, copying over
# files listed in IMAGE_BOOT_FILES bitbake variable.
#
# AUTHORS
# Maciej Borzecki <maciej.borzecki (at] open-rnd.pl>
#
import logging
import os
import re
from glob import glob
from wic import WicError
from wic.engine import get_custom_config
from wic.pluginbase import SourcePlugin
from wic.misc import exec_cmd, get_bitbake_var
logger = logging.getLogger('wic')
class BootimgPartitionPlugin(SourcePlugin):
"""
Create an image of boot partition, copying over files
listed in IMAGE_BOOT_FILES bitbake variable.
"""
name = 'bootimg-partition'
image_boot_files_var_name = 'IMAGE_BOOT_FILES'
@classmethod
def do_configure_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
native_sysroot):
"""
Called before do_prepare_partition(), create u-boot specific boot config
"""
hdddir = "%s/boot.%d" % (cr_workdir, part.lineno)
install_cmd = "install -d %s" % hdddir
exec_cmd(install_cmd)
if not kernel_dir:
kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
if not kernel_dir:
raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
boot_files = None
for (fmt, id) in (("_uuid-%s", part.uuid), ("_label-%s", part.label), (None, None)):
if fmt:
var = fmt % id
else:
var = ""
boot_files = get_bitbake_var(cls.image_boot_files_var_name + var)
if boot_files is not None:
break
if boot_files is None:
raise WicError('No boot files defined, %s unset for entry #%d' % (cls.image_boot_files_var_name, part.lineno))
logger.debug('Boot files: %s', boot_files)
# list of tuples (src_name, dst_name)
deploy_files = []
for src_entry in re.findall(r'[\w;\-\./\*]+', boot_files):
if ';' in src_entry:
dst_entry = tuple(src_entry.split(';'))
if not dst_entry[0] or not dst_entry[1]:
raise WicError('Malformed boot file entry: %s' % src_entry)
else:
dst_entry = (src_entry, src_entry)
logger.debug('Destination entry: %r', dst_entry)
deploy_files.append(dst_entry)
cls.install_task = [];
for deploy_entry in deploy_files:
src, dst = deploy_entry
if '*' in src:
# by default install files under their basename
entry_name_fn = os.path.basename
if dst != src:
# unless a target name was given, then treat name
# as a directory and append a basename
entry_name_fn = lambda name: \
os.path.join(dst,
os.path.basename(name))
srcs = glob(os.path.join(kernel_dir, src))
logger.debug('Globbed sources: %s', ', '.join(srcs))
for entry in srcs:
src = os.path.relpath(entry, kernel_dir)
entry_dst_name = entry_name_fn(entry)
cls.install_task.append((src, entry_dst_name))
else:
cls.install_task.append((src, dst))
if source_params.get('loader') != "u-boot":
return
configfile = cr.ks.bootloader.configfile
custom_cfg = None
if configfile:
custom_cfg = get_custom_config(configfile)
if custom_cfg:
# Use a custom configuration for extlinux.conf
extlinux_conf = custom_cfg
logger.debug("Using custom configuration file "
"%s for extlinux.conf", configfile)
else:
raise WicError("configfile is specified but failed to "
"get it from %s." % configfile)
if not custom_cfg:
# The kernel types supported by the sysboot of u-boot
kernel_types = ["zImage", "Image", "fitImage", "uImage", "vmlinux"]
has_dtb = False
fdt_dir = '/'
kernel_name = None
# Find the kernel image name, from the highest precedence to lowest
for image in kernel_types:
for task in cls.install_task:
src, dst = task
if re.match(image, src):
kernel_name = os.path.join('/', dst)
break
if kernel_name:
break
for task in cls.install_task:
src, dst = task
# We suppose that all the dtb are in the same directory
if re.search(r'\.dtb', src) and fdt_dir == '/':
has_dtb = True
fdt_dir = os.path.join(fdt_dir, os.path.dirname(dst))
break
if not kernel_name:
raise WicError('No kernel file found')
# Compose the extlinux.conf
extlinux_conf = "default Yocto\n"
extlinux_conf += "label Yocto\n"
extlinux_conf += " kernel %s\n" % kernel_name
if has_dtb:
extlinux_conf += " fdtdir %s\n" % fdt_dir
bootloader = cr.ks.bootloader
extlinux_conf += "append root=%s rootwait %s\n" \
% (cr.rootdev, bootloader.append if bootloader.append else '')
install_cmd = "install -d %s/extlinux/" % hdddir
exec_cmd(install_cmd)
cfg = open("%s/extlinux/extlinux.conf" % hdddir, "w")
cfg.write(extlinux_conf)
cfg.close()
@classmethod
def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
rootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
In this case, does the following:
- sets up a vfat partition
- copies all files listed in IMAGE_BOOT_FILES variable
"""
hdddir = "%s/boot.%d" % (cr_workdir, part.lineno)
if not kernel_dir:
kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
if not kernel_dir:
raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
logger.debug('Kernel dir: %s', bootimg_dir)
for task in cls.install_task:
src_path, dst_path = task
logger.debug('Install %s as %s', src_path, dst_path)
install_cmd = "install -m 0644 -D %s %s" \
% (os.path.join(kernel_dir, src_path),
os.path.join(hdddir, dst_path))
exec_cmd(install_cmd)
logger.debug('Prepare boot partition using rootfs in %s', hdddir)
part.prepare_rootfs(cr_workdir, oe_builddir, hdddir,
native_sysroot, False)

View File

@@ -0,0 +1,209 @@
#
# Copyright (c) 2014, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-pcbios' source plugin class for 'wic'
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
import logging
import os
import re
from wic import WicError
from wic.engine import get_custom_config
from wic.pluginbase import SourcePlugin
from wic.misc import (exec_cmd, exec_native_cmd,
get_bitbake_var, BOOTDD_EXTRA_SPACE)
logger = logging.getLogger('wic')
class BootimgPcbiosPlugin(SourcePlugin):
"""
Create MBR boot partition and install syslinux on it.
"""
name = 'bootimg-pcbios'
@classmethod
def _get_bootimg_dir(cls, bootimg_dir, dirname):
"""
Check if dirname exists in default bootimg_dir or in STAGING_DIR.
"""
staging_datadir = get_bitbake_var("STAGING_DATADIR")
for result in (bootimg_dir, staging_datadir):
if os.path.exists("%s/%s" % (result, dirname)):
return result
# STAGING_DATADIR is expanded with MLPREFIX if multilib is enabled
# but dependency syslinux is still populated to original STAGING_DATADIR
nonarch_datadir = re.sub('/[^/]*recipe-sysroot', '/recipe-sysroot', staging_datadir)
if os.path.exists(os.path.join(nonarch_datadir, dirname)):
return nonarch_datadir
raise WicError("Couldn't find correct bootimg_dir, exiting")
@classmethod
def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
bootimg_dir, kernel_dir, native_sysroot):
"""
Called after all partitions have been prepared and assembled into a
disk image. In this case, we install the MBR.
"""
bootimg_dir = cls._get_bootimg_dir(bootimg_dir, 'syslinux')
mbrfile = "%s/syslinux/" % bootimg_dir
if creator.ptable_format == 'msdos':
mbrfile += "mbr.bin"
elif creator.ptable_format == 'gpt':
mbrfile += "gptmbr.bin"
else:
raise WicError("Unsupported partition table: %s" %
creator.ptable_format)
if not os.path.exists(mbrfile):
raise WicError("Couldn't find %s. If using the -e option, do you "
"have the right MACHINE set in local.conf? If not, "
"is the bootimg_dir path correct?" % mbrfile)
full_path = creator._full_path(workdir, disk_name, "direct")
logger.debug("Installing MBR on disk %s as %s with size %s bytes",
disk_name, full_path, disk.min_size)
dd_cmd = "dd if=%s of=%s conv=notrunc" % (mbrfile, full_path)
exec_cmd(dd_cmd, native_sysroot)
@classmethod
def do_configure_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
native_sysroot):
"""
Called before do_prepare_partition(), creates syslinux config
"""
hdddir = "%s/hdd/boot" % cr_workdir
install_cmd = "install -d %s" % hdddir
exec_cmd(install_cmd)
bootloader = creator.ks.bootloader
custom_cfg = None
if bootloader.configfile:
custom_cfg = get_custom_config(bootloader.configfile)
if custom_cfg:
# Use a custom configuration for grub
syslinux_conf = custom_cfg
logger.debug("Using custom configuration file %s "
"for syslinux.cfg", bootloader.configfile)
else:
raise WicError("configfile is specified but failed to "
"get it from %s." % bootloader.configfile)
if not custom_cfg:
# Create syslinux configuration using parameters from wks file
splash = os.path.join(cr_workdir, "/hdd/boot/splash.jpg")
if os.path.exists(splash):
splashline = "menu background splash.jpg"
else:
splashline = ""
syslinux_conf = ""
syslinux_conf += "PROMPT 0\n"
syslinux_conf += "TIMEOUT " + str(bootloader.timeout) + "\n"
syslinux_conf += "\n"
syslinux_conf += "ALLOWOPTIONS 1\n"
syslinux_conf += "SERIAL 0 115200\n"
syslinux_conf += "\n"
if splashline:
syslinux_conf += "%s\n" % splashline
syslinux_conf += "DEFAULT boot\n"
syslinux_conf += "LABEL boot\n"
kernel = "/" + get_bitbake_var("KERNEL_IMAGETYPE")
syslinux_conf += "KERNEL " + kernel + "\n"
syslinux_conf += "APPEND label=boot root=%s %s\n" % \
(creator.rootdev, bootloader.append)
logger.debug("Writing syslinux config %s/hdd/boot/syslinux.cfg",
cr_workdir)
cfg = open("%s/hdd/boot/syslinux.cfg" % cr_workdir, "w")
cfg.write(syslinux_conf)
cfg.close()
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
rootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
In this case, prepare content for legacy bios boot partition.
"""
bootimg_dir = cls._get_bootimg_dir(bootimg_dir, 'syslinux')
staging_kernel_dir = kernel_dir
hdddir = "%s/hdd/boot" % cr_workdir
kernel = get_bitbake_var("KERNEL_IMAGETYPE")
if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
if get_bitbake_var("INITRAMFS_IMAGE"):
kernel = "%s-%s.bin" % \
(get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
cmds = ("install -m 0644 %s/%s %s/%s" %
(staging_kernel_dir, kernel, hdddir, get_bitbake_var("KERNEL_IMAGETYPE")),
"install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" %
(bootimg_dir, hdddir),
"install -m 0644 %s/syslinux/vesamenu.c32 %s/vesamenu.c32" %
(bootimg_dir, hdddir),
"install -m 444 %s/syslinux/libcom32.c32 %s/libcom32.c32" %
(bootimg_dir, hdddir),
"install -m 444 %s/syslinux/libutil.c32 %s/libutil.c32" %
(bootimg_dir, hdddir))
for install_cmd in cmds:
exec_cmd(install_cmd)
du_cmd = "du -bks %s" % hdddir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
extra_blocks = part.get_extra_block_count(blocks)
if extra_blocks < BOOTDD_EXTRA_SPACE:
extra_blocks = BOOTDD_EXTRA_SPACE
blocks += extra_blocks
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, part.mountpoint, blocks)
# dosfs image, created by mkdosfs
bootimg = "%s/boot%s.img" % (cr_workdir, part.lineno)
label = part.label if part.label else "boot"
dosfs_cmd = "mkdosfs -n %s -i %s -S 512 -C %s %d" % \
(label, part.fsuuid, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
exec_native_cmd(mcopy_cmd, native_sysroot)
syslinux_cmd = "syslinux %s" % bootimg
exec_native_cmd(syslinux_cmd, native_sysroot)
chmod_cmd = "chmod 644 %s" % bootimg
exec_cmd(chmod_cmd)
du_cmd = "du -Lbks %s" % bootimg
out = exec_cmd(du_cmd)
bootimg_size = out.split()[0]
part.size = int(bootimg_size)
part.source_file = bootimg

View File

@@ -0,0 +1,89 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: MIT
#
# The empty wic plugin is used to create unformatted empty partitions for wic
# images.
# To use it you must pass "empty" as argument for the "--source" parameter in
# the wks file. For example:
# part foo --source empty --ondisk sda --size="1024" --align 1024
#
# The plugin supports writing zeros to the start of the
# partition. This is useful to overwrite old content like
# filesystem signatures which may be re-recognized otherwise.
# This feature can be enabled with
# '--sourceparams="[fill|size=<N>[S|s|K|k|M|G]][,][bs=<N>[S|s|K|k|M|G]]"'
# Conflicting or missing options throw errors.
import logging
import os
from wic import WicError
from wic.ksparser import sizetype
from wic.pluginbase import SourcePlugin
logger = logging.getLogger('wic')
class EmptyPartitionPlugin(SourcePlugin):
"""
Populate unformatted empty partition.
The following sourceparams are supported:
- fill
Fill the entire partition with zeros. Requires '--fixed-size' option
to be set.
- size=<N>[S|s|K|k|M|G]
Set the first N bytes of the partition to zero. Default unit is 'K'.
- bs=<N>[S|s|K|k|M|G]
Write at most N bytes at a time during source file creation.
Defaults to '1M'. Default unit is 'K'.
"""
name = 'empty'
@classmethod
def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
rootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
"""
get_byte_count = sizetype('K', True)
size = 0
if 'fill' in source_params and 'size' in source_params:
raise WicError("Conflicting source parameters 'fill' and 'size' specified, exiting.")
# Set the size of the zeros to be written to the partition
if 'fill' in source_params:
if part.fixed_size == 0:
raise WicError("Source parameter 'fill' only works with the '--fixed-size' option, exiting.")
size = get_byte_count(part.fixed_size)
elif 'size' in source_params:
size = get_byte_count(source_params['size'])
if size == 0:
# Nothing to do, create empty partition
return
if 'bs' in source_params:
bs = get_byte_count(source_params['bs'])
else:
bs = get_byte_count('1M')
# Create a binary file of the requested size filled with zeros
source_file = os.path.join(cr_workdir, 'empty-plugin-zeros%s.bin' % part.lineno)
if not os.path.exists(os.path.dirname(source_file)):
os.makedirs(os.path.dirname(source_file))
quotient, remainder = divmod(size, bs)
with open(source_file, 'wb') as file:
for _ in range(quotient):
file.write(bytearray(bs))
file.write(bytearray(remainder))
part.size = (size + 1024 - 1) // 1024 # size in KB rounded up
part.source_file = source_file

View File

@@ -0,0 +1,463 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'isoimage-isohybrid' source plugin class for 'wic'
#
# AUTHORS
# Mihaly Varga <mihaly.varga (at] ni.com>
import glob
import logging
import os
import re
import shutil
from wic import WicError
from wic.engine import get_custom_config
from wic.pluginbase import SourcePlugin
from wic.misc import exec_cmd, exec_native_cmd, get_bitbake_var
logger = logging.getLogger('wic')
class IsoImagePlugin(SourcePlugin):
"""
Create a bootable ISO image
This plugin creates a hybrid, legacy and EFI bootable ISO image. The
generated image can be used on optical media as well as USB media.
Legacy boot uses syslinux and EFI boot uses grub or gummiboot (not
implemented yet) as bootloader. The plugin creates the directories required
by bootloaders and populates them by creating and configuring the
bootloader files.
Example kickstart file:
part /boot --source isoimage-isohybrid --sourceparams="loader=grub-efi, \\
image_name= IsoImage" --ondisk cd --label LIVECD
bootloader --timeout=10 --append=" "
In --sourceparams "loader" specifies the bootloader used for booting in EFI
mode, while "image_name" specifies the name of the generated image. In the
example above, wic creates an ISO image named IsoImage-cd.direct (default
extension added by direct imeger plugin) and a file named IsoImage-cd.iso
"""
name = 'isoimage-isohybrid'
@classmethod
def do_configure_syslinux(cls, creator, cr_workdir):
"""
Create loader-specific (syslinux) config
"""
splash = os.path.join(cr_workdir, "ISO/boot/splash.jpg")
if os.path.exists(splash):
splashline = "menu background splash.jpg"
else:
splashline = ""
bootloader = creator.ks.bootloader
syslinux_conf = ""
syslinux_conf += "PROMPT 0\n"
syslinux_conf += "TIMEOUT %s \n" % (bootloader.timeout or 10)
syslinux_conf += "\n"
syslinux_conf += "ALLOWOPTIONS 1\n"
syslinux_conf += "SERIAL 0 115200\n"
syslinux_conf += "\n"
if splashline:
syslinux_conf += "%s\n" % splashline
syslinux_conf += "DEFAULT boot\n"
syslinux_conf += "LABEL boot\n"
kernel = get_bitbake_var("KERNEL_IMAGETYPE")
if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
if get_bitbake_var("INITRAMFS_IMAGE"):
kernel = "%s-%s.bin" % \
(get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
syslinux_conf += "KERNEL /" + kernel + "\n"
syslinux_conf += "APPEND initrd=/initrd LABEL=boot %s\n" \
% bootloader.append
logger.debug("Writing syslinux config %s/ISO/isolinux/isolinux.cfg",
cr_workdir)
with open("%s/ISO/isolinux/isolinux.cfg" % cr_workdir, "w") as cfg:
cfg.write(syslinux_conf)
@classmethod
def do_configure_grubefi(cls, part, creator, target_dir):
"""
Create loader-specific (grub-efi) config
"""
configfile = creator.ks.bootloader.configfile
if configfile:
grubefi_conf = get_custom_config(configfile)
if grubefi_conf:
logger.debug("Using custom configuration file %s for grub.cfg",
configfile)
else:
raise WicError("configfile is specified "
"but failed to get it from %s", configfile)
else:
splash = os.path.join(target_dir, "splash.jpg")
if os.path.exists(splash):
splashline = "menu background splash.jpg"
else:
splashline = ""
bootloader = creator.ks.bootloader
grubefi_conf = ""
grubefi_conf += "serial --unit=0 --speed=115200 --word=8 "
grubefi_conf += "--parity=no --stop=1\n"
grubefi_conf += "default=boot\n"
grubefi_conf += "timeout=%s\n" % (bootloader.timeout or 10)
grubefi_conf += "\n"
grubefi_conf += "search --set=root --label %s " % part.label
grubefi_conf += "\n"
grubefi_conf += "menuentry 'boot'{\n"
kernel = get_bitbake_var("KERNEL_IMAGETYPE")
if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
if get_bitbake_var("INITRAMFS_IMAGE"):
kernel = "%s-%s.bin" % \
(get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
grubefi_conf += "linux /%s rootwait %s\n" \
% (kernel, bootloader.append)
grubefi_conf += "initrd /initrd \n"
grubefi_conf += "}\n"
if splashline:
grubefi_conf += "%s\n" % splashline
cfg_path = os.path.join(target_dir, "grub.cfg")
logger.debug("Writing grubefi config %s", cfg_path)
with open(cfg_path, "w") as cfg:
cfg.write(grubefi_conf)
@staticmethod
def _build_initramfs_path(rootfs_dir, cr_workdir):
"""
Create path for initramfs image
"""
initrd = get_bitbake_var("INITRD_LIVE") or get_bitbake_var("INITRD")
if not initrd:
initrd_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
if not initrd_dir:
raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting.")
image_name = get_bitbake_var("IMAGE_BASENAME")
if not image_name:
raise WicError("Couldn't find IMAGE_BASENAME, exiting.")
image_type = get_bitbake_var("INITRAMFS_FSTYPES")
if not image_type:
raise WicError("Couldn't find INITRAMFS_FSTYPES, exiting.")
machine = os.path.basename(initrd_dir)
pattern = '%s/%s*%s.%s' % (initrd_dir, image_name, machine, image_type)
files = glob.glob(pattern)
if files:
initrd = files[0]
if not initrd or not os.path.exists(initrd):
# Create initrd from rootfs directory
initrd = "%s/initrd.cpio.gz" % cr_workdir
initrd_dir = "%s/INITRD" % cr_workdir
shutil.copytree("%s" % rootfs_dir, \
"%s" % initrd_dir, symlinks=True)
if os.path.isfile("%s/init" % rootfs_dir):
shutil.copy2("%s/init" % rootfs_dir, "%s/init" % initrd_dir)
elif os.path.lexists("%s/init" % rootfs_dir):
os.symlink(os.readlink("%s/init" % rootfs_dir), \
"%s/init" % initrd_dir)
elif os.path.isfile("%s/sbin/init" % rootfs_dir):
shutil.copy2("%s/sbin/init" % rootfs_dir, \
"%s" % initrd_dir)
elif os.path.lexists("%s/sbin/init" % rootfs_dir):
os.symlink(os.readlink("%s/sbin/init" % rootfs_dir), \
"%s/init" % initrd_dir)
else:
raise WicError("Couldn't find or build initrd, exiting.")
exec_cmd("cd %s && find . | cpio -o -H newc -R root:root >%s/initrd.cpio " \
% (initrd_dir, cr_workdir), as_shell=True)
exec_cmd("gzip -f -9 %s/initrd.cpio" % cr_workdir, as_shell=True)
shutil.rmtree(initrd_dir)
return initrd
@classmethod
def do_configure_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
native_sysroot):
"""
Called before do_prepare_partition(), creates loader-specific config
"""
isodir = "%s/ISO/" % cr_workdir
if os.path.exists(isodir):
shutil.rmtree(isodir)
install_cmd = "install -d %s " % isodir
exec_cmd(install_cmd)
# Overwrite the name of the created image
logger.debug(source_params)
if 'image_name' in source_params and \
source_params['image_name'].strip():
creator.name = source_params['image_name'].strip()
logger.debug("The name of the image is: %s", creator.name)
@staticmethod
def _install_payload(source_params, iso_dir):
"""
Copies contents of payload directory (as specified in 'payload_dir' param) into iso_dir
"""
if source_params.get('payload_dir'):
payload_dir = source_params['payload_dir']
logger.debug("Payload directory: %s", payload_dir)
shutil.copytree(payload_dir, iso_dir, symlinks=True, dirs_exist_ok=True)
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
rootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
In this case, prepare content for a bootable ISO image.
"""
isodir = "%s/ISO" % cr_workdir
cls._install_payload(source_params, isodir)
if part.rootfs_dir is None:
if not 'ROOTFS_DIR' in rootfs_dir:
raise WicError("Couldn't find --rootfs-dir, exiting.")
rootfs_dir = rootfs_dir['ROOTFS_DIR']
else:
if part.rootfs_dir in rootfs_dir:
rootfs_dir = rootfs_dir[part.rootfs_dir]
elif part.rootfs_dir:
rootfs_dir = part.rootfs_dir
else:
raise WicError("Couldn't find --rootfs-dir=%s connection "
"or it is not a valid path, exiting." %
part.rootfs_dir)
if not os.path.isdir(rootfs_dir):
rootfs_dir = get_bitbake_var("IMAGE_ROOTFS")
if not os.path.isdir(rootfs_dir):
raise WicError("Couldn't find IMAGE_ROOTFS, exiting.")
part.rootfs_dir = rootfs_dir
deploy_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
img_iso_dir = get_bitbake_var("ISODIR")
# Remove the temporary file created by part.prepare_rootfs()
if os.path.isfile(part.source_file):
os.remove(part.source_file)
# Support using a different initrd other than default
if source_params.get('initrd'):
initrd = source_params['initrd']
if not deploy_dir:
raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
cp_cmd = "cp %s/%s %s" % (deploy_dir, initrd, cr_workdir)
exec_cmd(cp_cmd)
else:
# Prepare initial ramdisk
initrd = "%s/initrd" % deploy_dir
if not os.path.isfile(initrd):
initrd = "%s/initrd" % img_iso_dir
if not os.path.isfile(initrd):
initrd = cls._build_initramfs_path(rootfs_dir, cr_workdir)
install_cmd = "install -m 0644 %s %s/initrd" % (initrd, isodir)
exec_cmd(install_cmd)
# Remove the temporary file created by _build_initramfs_path function
if os.path.isfile("%s/initrd.cpio.gz" % cr_workdir):
os.remove("%s/initrd.cpio.gz" % cr_workdir)
kernel = get_bitbake_var("KERNEL_IMAGETYPE")
if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
if get_bitbake_var("INITRAMFS_IMAGE"):
kernel = "%s-%s.bin" % \
(get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
install_cmd = "install -m 0644 %s/%s %s/%s" % \
(kernel_dir, kernel, isodir, kernel)
exec_cmd(install_cmd)
#Create bootloader for efi boot
try:
target_dir = "%s/EFI/BOOT" % isodir
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
os.makedirs(target_dir)
if source_params['loader'] == 'grub-efi':
# Builds bootx64.efi/bootia32.efi if ISODIR didn't exist or
# didn't contains it
target_arch = get_bitbake_var("TARGET_SYS")
if not target_arch:
raise WicError("Coludn't find target architecture")
if re.match("x86_64", target_arch):
grub_src_image = "grub-efi-bootx64.efi"
grub_dest_image = "bootx64.efi"
elif re.match('i.86', target_arch):
grub_src_image = "grub-efi-bootia32.efi"
grub_dest_image = "bootia32.efi"
else:
raise WicError("grub-efi is incompatible with target %s" %
target_arch)
grub_target = os.path.join(target_dir, grub_dest_image)
if not os.path.isfile(grub_target):
grub_src = os.path.join(deploy_dir, grub_src_image)
if not os.path.exists(grub_src):
raise WicError("Grub loader %s is not found in %s. "
"Please build grub-efi first" % (grub_src_image, deploy_dir))
shutil.copy(grub_src, grub_target)
if not os.path.isfile(os.path.join(target_dir, "boot.cfg")):
cls.do_configure_grubefi(part, creator, target_dir)
else:
raise WicError("unrecognized bootimg-efi loader: %s" %
source_params['loader'])
except KeyError:
raise WicError("bootimg-efi requires a loader, none specified")
# Create efi.img that contains bootloader files for EFI booting
# if ISODIR didn't exist or didn't contains it
if os.path.isfile("%s/efi.img" % img_iso_dir):
install_cmd = "install -m 0644 %s/efi.img %s/efi.img" % \
(img_iso_dir, isodir)
exec_cmd(install_cmd)
else:
# Default to 100 blocks of extra space for file system overhead
esp_extra_blocks = int(source_params.get('esp_extra_blocks', '100'))
du_cmd = "du -bks %s/EFI" % isodir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
blocks += esp_extra_blocks
logger.debug("Added 100 extra blocks to %s to get to %d "
"total blocks", part.mountpoint, blocks)
# dosfs image for EFI boot
bootimg = "%s/efi.img" % isodir
esp_label = source_params.get('esp_label', 'EFIimg')
dosfs_cmd = 'mkfs.vfat -n \'%s\' -S 512 -C %s %d' \
% (esp_label, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mmd_cmd = "mmd -i %s ::/EFI" % bootimg
exec_native_cmd(mmd_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/EFI/* ::/EFI/" \
% (bootimg, isodir)
exec_native_cmd(mcopy_cmd, native_sysroot)
chmod_cmd = "chmod 644 %s" % bootimg
exec_cmd(chmod_cmd)
# Prepare files for legacy boot
syslinux_dir = get_bitbake_var("STAGING_DATADIR")
if not syslinux_dir:
raise WicError("Couldn't find STAGING_DATADIR, exiting.")
if os.path.exists("%s/isolinux" % isodir):
shutil.rmtree("%s/isolinux" % isodir)
install_cmd = "install -d %s/isolinux" % isodir
exec_cmd(install_cmd)
cls.do_configure_syslinux(creator, cr_workdir)
install_cmd = "install -m 444 %s/syslinux/ldlinux.sys " % syslinux_dir
install_cmd += "%s/isolinux/ldlinux.sys" % isodir
exec_cmd(install_cmd)
install_cmd = "install -m 444 %s/syslinux/isohdpfx.bin " % syslinux_dir
install_cmd += "%s/isolinux/isohdpfx.bin" % isodir
exec_cmd(install_cmd)
install_cmd = "install -m 644 %s/syslinux/isolinux.bin " % syslinux_dir
install_cmd += "%s/isolinux/isolinux.bin" % isodir
exec_cmd(install_cmd)
install_cmd = "install -m 644 %s/syslinux/ldlinux.c32 " % syslinux_dir
install_cmd += "%s/isolinux/ldlinux.c32" % isodir
exec_cmd(install_cmd)
#create ISO image
iso_img = "%s/tempiso_img.iso" % cr_workdir
iso_bootimg = "isolinux/isolinux.bin"
iso_bootcat = "isolinux/boot.cat"
efi_img = "efi.img"
mkisofs_cmd = "mkisofs -V %s " % part.label
mkisofs_cmd += "-o %s -U " % iso_img
mkisofs_cmd += "-J -joliet-long -r -iso-level 2 -b %s " % iso_bootimg
mkisofs_cmd += "-c %s -no-emul-boot -boot-load-size 4 " % iso_bootcat
mkisofs_cmd += "-boot-info-table -eltorito-alt-boot "
mkisofs_cmd += "-eltorito-platform 0xEF -eltorito-boot %s " % efi_img
mkisofs_cmd += "-no-emul-boot %s " % isodir
logger.debug("running command: %s", mkisofs_cmd)
exec_native_cmd(mkisofs_cmd, native_sysroot)
shutil.rmtree(isodir)
du_cmd = "du -Lbks %s" % iso_img
out = exec_cmd(du_cmd)
isoimg_size = int(out.split()[0])
part.size = isoimg_size
part.source_file = iso_img
@classmethod
def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
bootimg_dir, kernel_dir, native_sysroot):
"""
Called after all partitions have been prepared and assembled into a
disk image. In this case, we insert/modify the MBR using isohybrid
utility for booting via BIOS from disk storage devices.
"""
iso_img = "%s.p1" % disk.path
full_path = creator._full_path(workdir, disk_name, "direct")
full_path_iso = creator._full_path(workdir, disk_name, "iso")
isohybrid_cmd = "isohybrid -u %s" % iso_img
logger.debug("running command: %s", isohybrid_cmd)
exec_native_cmd(isohybrid_cmd, native_sysroot)
# Replace the image created by direct plugin with the one created by
# mkisofs command. This is necessary because the iso image created by
# mkisofs has a very specific MBR is system area of the ISO image, and
# direct plugin adds and configures an another MBR.
logger.debug("Replaceing the image created by direct plugin\n")
os.remove(disk.path)
shutil.copy2(iso_img, full_path_iso)
shutil.copy2(full_path_iso, full_path)

View File

@@ -0,0 +1,115 @@
#
# Copyright OpenEmbedded Contributors
#
# SPDX-License-Identifier: GPL-2.0-only
#
import logging
import os
import signal
import subprocess
from wic import WicError
from wic.pluginbase import SourcePlugin
from wic.misc import exec_cmd, get_bitbake_var
from wic.filemap import sparse_copy
logger = logging.getLogger('wic')
class RawCopyPlugin(SourcePlugin):
"""
Populate partition content from raw image file.
"""
name = 'rawcopy'
@staticmethod
def do_image_label(fstype, dst, label):
# don't create label when fstype is none
if fstype == 'none':
return
if fstype.startswith('ext'):
cmd = 'tune2fs -L %s %s' % (label, dst)
elif fstype in ('msdos', 'vfat'):
cmd = 'dosfslabel %s %s' % (dst, label)
elif fstype == 'btrfs':
cmd = 'btrfs filesystem label %s %s' % (dst, label)
elif fstype == 'swap':
cmd = 'mkswap -L %s %s' % (label, dst)
elif fstype in ('squashfs', 'erofs'):
raise WicError("It's not possible to update a %s "
"filesystem label '%s'" % (fstype, label))
else:
raise WicError("Cannot update filesystem label: "
"Unknown fstype: '%s'" % (fstype))
exec_cmd(cmd)
@staticmethod
def do_image_uncompression(src, dst, workdir):
def subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
# SIGPIPE errors are known issues with gzip/bash
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
extension = os.path.splitext(src)[1]
decompressor = {
".bz2": "bzip2",
".gz": "gzip",
".xz": "xz",
".zst": "zstd -f",
}.get(extension)
if not decompressor:
raise WicError("Not supported compressor filename extension: %s" % extension)
cmd = "%s -dc %s > %s" % (decompressor, src, dst)
subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=workdir)
@classmethod
def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
rootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
"""
if not kernel_dir:
kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
if not kernel_dir:
raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
logger.debug('Kernel dir: %s', kernel_dir)
if 'file' not in source_params:
raise WicError("No file specified")
if 'unpack' in source_params:
img = os.path.join(kernel_dir, source_params['file'])
src = os.path.join(cr_workdir, os.path.splitext(source_params['file'])[0])
RawCopyPlugin.do_image_uncompression(img, src, cr_workdir)
else:
src = os.path.join(kernel_dir, source_params['file'])
dst = os.path.join(cr_workdir, "%s.%s" % (os.path.basename(source_params['file']), part.lineno))
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if 'skip' in source_params:
sparse_copy(src, dst, skip=int(source_params['skip']))
else:
sparse_copy(src, dst)
# get the size in the right units for kickstart (kB)
du_cmd = "du -Lbks %s" % dst
out = exec_cmd(du_cmd)
filesize = int(out.split()[0])
if filesize > part.size:
part.size = filesize
if part.label:
RawCopyPlugin.do_image_label(part.fstype, dst, part.label)
part.source_file = dst

View File

@@ -0,0 +1,236 @@
#
# Copyright (c) 2014, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'rootfs' source plugin class for 'wic'
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
# Joao Henrique Ferreira de Freitas <joaohf (at] gmail.com>
#
import logging
import os
import shutil
import sys
from oe.path import copyhardlinktree
from pathlib import Path
from wic import WicError
from wic.pluginbase import SourcePlugin
from wic.misc import get_bitbake_var, exec_native_cmd
logger = logging.getLogger('wic')
class RootfsPlugin(SourcePlugin):
"""
Populate partition content from a rootfs directory.
"""
name = 'rootfs'
@staticmethod
def __validate_path(cmd, rootfs_dir, path):
if os.path.isabs(path):
logger.error("%s: Must be relative: %s" % (cmd, path))
sys.exit(1)
# Disallow climbing outside of parent directory using '..',
# because doing so could be quite disastrous (we will delete the
# directory, or modify a directory outside OpenEmbedded).
full_path = os.path.realpath(os.path.join(rootfs_dir, path))
if not full_path.startswith(os.path.realpath(rootfs_dir)):
logger.error("%s: Must point inside the rootfs:" % (cmd, path))
sys.exit(1)
return full_path
@staticmethod
def __get_rootfs_dir(rootfs_dir):
if rootfs_dir and os.path.isdir(rootfs_dir):
return os.path.realpath(rootfs_dir)
image_rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", rootfs_dir)
if not os.path.isdir(image_rootfs_dir):
raise WicError("No valid artifact IMAGE_ROOTFS from image "
"named %s has been found at %s, exiting." %
(rootfs_dir, image_rootfs_dir))
return os.path.realpath(image_rootfs_dir)
@staticmethod
def __get_pseudo(native_sysroot, rootfs, pseudo_dir):
pseudo = "export PSEUDO_PREFIX=%s/usr;" % native_sysroot
pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % pseudo_dir
pseudo += "export PSEUDO_PASSWD=%s;" % rootfs
pseudo += "export PSEUDO_NOSYMLINKEXP=1;"
pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
return pseudo
@classmethod
def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
krootfs_dir, native_sysroot):
"""
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
In this case, prepare content for legacy bios boot partition.
"""
if part.rootfs_dir is None:
if not 'ROOTFS_DIR' in krootfs_dir:
raise WicError("Couldn't find --rootfs-dir, exiting")
rootfs_dir = krootfs_dir['ROOTFS_DIR']
else:
if part.rootfs_dir in krootfs_dir:
rootfs_dir = krootfs_dir[part.rootfs_dir]
elif part.rootfs_dir:
rootfs_dir = part.rootfs_dir
else:
raise WicError("Couldn't find --rootfs-dir=%s connection or "
"it is not a valid path, exiting" % part.rootfs_dir)
part.rootfs_dir = cls.__get_rootfs_dir(rootfs_dir)
part.has_fstab = os.path.exists(os.path.join(part.rootfs_dir, "etc/fstab"))
pseudo_dir = os.path.join(part.rootfs_dir, "../pseudo")
if not os.path.lexists(pseudo_dir):
pseudo_dir = os.path.join(cls.__get_rootfs_dir(None), '../pseudo')
if not os.path.lexists(pseudo_dir):
logger.warn("%s folder does not exist. "
"Usernames and permissions will be invalid " % pseudo_dir)
pseudo_dir = None
new_rootfs = None
new_pseudo = None
# Handle excluded paths.
if part.exclude_path or part.include_path or part.change_directory or part.update_fstab_in_rootfs:
# We need a new rootfs directory we can safely modify without
# interfering with other tasks. Copy to workdir.
new_rootfs = os.path.realpath(os.path.join(cr_workdir, "rootfs%d" % part.lineno))
if os.path.lexists(new_rootfs):
shutil.rmtree(os.path.join(new_rootfs))
if part.change_directory:
cd = part.change_directory
if cd[-1] == '/':
cd = cd[:-1]
orig_dir = cls.__validate_path("--change-directory", part.rootfs_dir, cd)
else:
orig_dir = part.rootfs_dir
copyhardlinktree(orig_dir, new_rootfs)
# Convert the pseudo directory to its new location
if (pseudo_dir):
new_pseudo = os.path.realpath(
os.path.join(cr_workdir, "pseudo%d" % part.lineno))
if os.path.lexists(new_pseudo):
shutil.rmtree(new_pseudo)
os.mkdir(new_pseudo)
shutil.copy(os.path.join(pseudo_dir, "files.db"),
os.path.join(new_pseudo, "files.db"))
pseudo_cmd = "%s -B -m %s -M %s" % (cls.__get_pseudo(native_sysroot,
new_rootfs,
new_pseudo),
orig_dir, new_rootfs)
exec_native_cmd(pseudo_cmd, native_sysroot)
for in_path in part.include_path or []:
#parse arguments
include_path = in_path[0]
if len(in_path) > 2:
logger.error("'Invalid number of arguments for include-path")
sys.exit(1)
if len(in_path) == 2:
path = in_path[1]
else:
path = None
# Pack files to be included into a tar file.
# We need to create a tar file, because that way we can keep the
# permissions from the files even when they belong to different
# pseudo enviroments.
# If we simply copy files using copyhardlinktree/copytree... the
# copied files will belong to the user running wic.
tar_file = os.path.realpath(
os.path.join(cr_workdir, "include-path%d.tar" % part.lineno))
if os.path.isfile(include_path):
parent = os.path.dirname(os.path.realpath(include_path))
tar_cmd = "tar c --owner=root --group=root -f %s -C %s %s" % (
tar_file, parent, os.path.relpath(include_path, parent))
exec_native_cmd(tar_cmd, native_sysroot)
else:
if include_path in krootfs_dir:
include_path = krootfs_dir[include_path]
include_path = cls.__get_rootfs_dir(include_path)
include_pseudo = os.path.join(include_path, "../pseudo")
if os.path.lexists(include_pseudo):
pseudo = cls.__get_pseudo(native_sysroot, include_path,
include_pseudo)
tar_cmd = "tar cf %s -C %s ." % (tar_file, include_path)
else:
pseudo = None
tar_cmd = "tar c --owner=root --group=root -f %s -C %s ." % (
tar_file, include_path)
exec_native_cmd(tar_cmd, native_sysroot, pseudo)
#create destination
if path:
destination = cls.__validate_path("--include-path", new_rootfs, path)
Path(destination).mkdir(parents=True, exist_ok=True)
else:
destination = new_rootfs
#extract destination
untar_cmd = "tar xf %s -C %s" % (tar_file, destination)
if new_pseudo:
pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
else:
pseudo = None
exec_native_cmd(untar_cmd, native_sysroot, pseudo)
os.remove(tar_file)
for orig_path in part.exclude_path or []:
path = orig_path
full_path = cls.__validate_path("--exclude-path", new_rootfs, path)
if not os.path.lexists(full_path):
continue
if new_pseudo:
pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
else:
pseudo = None
if path.endswith(os.sep):
# Delete content only.
for entry in os.listdir(full_path):
full_entry = os.path.join(full_path, entry)
rm_cmd = "rm -rf %s" % (full_entry)
exec_native_cmd(rm_cmd, native_sysroot, pseudo)
else:
# Delete whole directory.
rm_cmd = "rm -rf %s" % (full_path)
exec_native_cmd(rm_cmd, native_sysroot, pseudo)
# Update part.has_fstab here as fstab may have been added or
# removed by the above modifications.
part.has_fstab = os.path.exists(os.path.join(new_rootfs, "etc/fstab"))
if part.update_fstab_in_rootfs and part.has_fstab and not part.no_fstab_update:
fstab_path = os.path.join(new_rootfs, "etc/fstab")
# Assume that fstab should always be owned by root with fixed permissions
install_cmd = "install -m 0644 -p %s %s" % (part.updated_fstab_path, fstab_path)
if new_pseudo:
pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
else:
pseudo = None
exec_native_cmd(install_cmd, native_sysroot, pseudo)
part.prepare_rootfs(cr_workdir, oe_builddir,
new_rootfs or part.rootfs_dir, native_sysroot,
pseudo_dir = new_pseudo or pseudo_dir)