mirror of
https://github.com/u-boot/u-boot.git
synced 2024-11-26 13:44:29 +08:00
binman: Refactor fit to generate output at the end
At present the fit implementation creates the output tree while scanning the FIT description. Then it updates the tree later when the data is known. This works, but is a bit confusing, since it requires mixing the scanning code with the generation code, with a fix-up step at the end. It is actually possible to do this in two phases, one to scan everything and the other to generate the FIT. Thus the FIT is generated in one pass, when everything is known. Update the code accordingly. The only functional change is that the 'data' property for each node are now last instead of first, which is really a more natural position. Update the affected test to deal with this. One wrinkle is that the calculated properties (image-pos, size and offset) are now added before the FIT is generated. so we must filter these out when copying properties from the binman description to the FIT. Most of the change here is splitting out some of the code from the ReadEntries() implementation into _BuildInput(). So despite the large diff, most of the code is the same. It is not feasible to split this patch up, so far as I can tell. Signed-off-by: Simon Glass <sjg@chromium.org> Reviewed-by: Alper Nebi Yasak <alpernebiyasak@gmail.com>
This commit is contained in:
parent
c9ee33ac97
commit
38397d0833
@ -209,6 +209,81 @@ class Entry_fit(Entry_section):
|
||||
return oper
|
||||
|
||||
def ReadEntries(self):
|
||||
def _add_entries(base_node, depth, node):
|
||||
"""Add entries for any nodes that need them
|
||||
|
||||
Args:
|
||||
base_node: Base Node of the FIT (with 'description' property)
|
||||
depth: Current node depth (0 is the base 'fit' node)
|
||||
node: Current node to process
|
||||
|
||||
Here we only need to provide binman entries which are used to define
|
||||
the 'data' for each image. We create an entry_Section for each.
|
||||
"""
|
||||
rel_path = node.path[len(base_node.path):]
|
||||
in_images = rel_path.startswith('/images')
|
||||
has_images = depth == 2 and in_images
|
||||
if has_images:
|
||||
# This node is a FIT subimage node (e.g. "/images/kernel")
|
||||
# containing content nodes. We collect the subimage nodes and
|
||||
# section entries for them here to merge the content subnodes
|
||||
# together and put the merged contents in the subimage node's
|
||||
# 'data' property later.
|
||||
entry = Entry.Create(self.section, node, etype='section')
|
||||
entry.ReadNode()
|
||||
# The hash subnodes here are for mkimage, not binman.
|
||||
entry.SetUpdateHash(False)
|
||||
self._entries[rel_path] = entry
|
||||
|
||||
for subnode in node.subnodes:
|
||||
_add_entries(base_node, depth + 1, subnode)
|
||||
|
||||
_add_entries(self._node, 0, self._node)
|
||||
|
||||
def BuildSectionData(self, required):
|
||||
"""Build FIT entry contents
|
||||
|
||||
This adds the 'data' properties to the input ITB (Image-tree Binary)
|
||||
then runs mkimage to process it.
|
||||
|
||||
Args:
|
||||
required: True if the data must be present, False if it is OK to
|
||||
return None
|
||||
|
||||
Returns:
|
||||
Contents of the section (bytes)
|
||||
"""
|
||||
data = self._BuildInput()
|
||||
uniq = self.GetUniqueName()
|
||||
input_fname = tools.get_output_filename('%s.itb' % uniq)
|
||||
output_fname = tools.get_output_filename('%s.fit' % uniq)
|
||||
tools.write_file(input_fname, data)
|
||||
tools.write_file(output_fname, data)
|
||||
|
||||
args = {}
|
||||
ext_offset = self._fit_props.get('fit,external-offset')
|
||||
if ext_offset is not None:
|
||||
args = {
|
||||
'external': True,
|
||||
'pad': fdt_util.fdt32_to_cpu(ext_offset.value)
|
||||
}
|
||||
if self.mkimage.run(reset_timestamp=True, output_fname=output_fname,
|
||||
**args) is None:
|
||||
# Bintool is missing; just use empty data as the output
|
||||
self.record_missing_bintool(self.mkimage)
|
||||
return tools.get_bytes(0, 1024)
|
||||
|
||||
return tools.read_file(output_fname)
|
||||
|
||||
def _BuildInput(self):
|
||||
"""Finish the FIT by adding the 'data' properties to it
|
||||
|
||||
Arguments:
|
||||
fdt: FIT to update
|
||||
|
||||
Returns:
|
||||
New fdt contents (bytes)
|
||||
"""
|
||||
def _process_prop(pname, prop):
|
||||
"""Process special properties
|
||||
|
||||
@ -236,9 +311,15 @@ class Entry_fit(Entry_section):
|
||||
val = val[1:].replace('DEFAULT-SEQ', str(seq + 1))
|
||||
fsw.property_string(pname, val)
|
||||
return
|
||||
elif pname.startswith('fit,'):
|
||||
# Ignore these, which are commands for binman to process
|
||||
return
|
||||
elif pname in ['offset', 'size', 'image-pos']:
|
||||
# Don't add binman's calculated properties
|
||||
return
|
||||
fsw.property(pname, prop.bytes)
|
||||
|
||||
def _scan_gen_fdt_nodes(subnode, depth, in_images):
|
||||
def _gen_fdt_nodes(subnode, depth, in_images):
|
||||
"""Generate FDT nodes
|
||||
|
||||
This creates one node for each member of self._fdts using the
|
||||
@ -281,7 +362,7 @@ class Entry_fit(Entry_section):
|
||||
else:
|
||||
self.Raise("Generator node requires 'fit,fdt-list' property")
|
||||
|
||||
def _scan_node(subnode, depth, in_images):
|
||||
def _gen_node(subnode, depth, in_images):
|
||||
"""Generate nodes from a template
|
||||
|
||||
This creates one node for each member of self._fdts using the
|
||||
@ -298,10 +379,10 @@ class Entry_fit(Entry_section):
|
||||
"""
|
||||
oper = self._get_operation(subnode)
|
||||
if oper == OP_GEN_FDT_NODES:
|
||||
_scan_gen_fdt_nodes(subnode, depth, in_images)
|
||||
_gen_fdt_nodes(subnode, depth, in_images)
|
||||
|
||||
def _AddNode(base_node, depth, node):
|
||||
"""Add a node to the FIT
|
||||
def _add_node(base_node, depth, node):
|
||||
"""Add nodes to the output FIT
|
||||
|
||||
Args:
|
||||
base_node: Base Node of the FIT (with 'description' property)
|
||||
@ -311,104 +392,49 @@ class Entry_fit(Entry_section):
|
||||
There are two cases to deal with:
|
||||
- hash and signature nodes which become part of the FIT
|
||||
- binman entries which are used to define the 'data' for each
|
||||
image
|
||||
image, so don't appear in the FIT
|
||||
"""
|
||||
# Copy over all the relevant properties
|
||||
for pname, prop in node.props.items():
|
||||
if not pname.startswith('fit,'):
|
||||
_process_prop(pname, prop)
|
||||
_process_prop(pname, prop)
|
||||
|
||||
rel_path = node.path[len(base_node.path):]
|
||||
in_images = rel_path.startswith('/images')
|
||||
|
||||
has_images = depth == 2 and in_images
|
||||
if has_images:
|
||||
# This node is a FIT subimage node (e.g. "/images/kernel")
|
||||
# containing content nodes. We collect the subimage nodes and
|
||||
# section entries for them here to merge the content subnodes
|
||||
# together and put the merged contents in the subimage node's
|
||||
# 'data' property later.
|
||||
entry = Entry.Create(self.section, node, etype='section')
|
||||
entry.ReadNode()
|
||||
# The hash subnodes here are for mkimage, not binman.
|
||||
entry.SetUpdateHash(False)
|
||||
self._entries[rel_path] = entry
|
||||
entry = self._entries[rel_path]
|
||||
data = entry.GetData()
|
||||
fsw.property('data', bytes(data))
|
||||
|
||||
for subnode in node.subnodes:
|
||||
if has_images and not (subnode.name.startswith('hash') or
|
||||
subnode.name.startswith('signature')):
|
||||
# This subnode is a content node not meant to appear in
|
||||
# the FIT (e.g. "/images/kernel/u-boot"), so don't call
|
||||
# fsw.add_node() or _AddNode() for it.
|
||||
# fsw.add_node() or _add_node() for it.
|
||||
pass
|
||||
elif self.GetImage().generate and subnode.name.startswith('@'):
|
||||
_scan_node(subnode, depth, in_images)
|
||||
subnode_path = f'{rel_path}/{subnode.name}'
|
||||
entry = self._entries.get(subnode_path)
|
||||
_gen_node(subnode, depth, in_images)
|
||||
if entry:
|
||||
del self._entries[subnode_path]
|
||||
else:
|
||||
with fsw.add_node(subnode.name):
|
||||
_AddNode(base_node, depth + 1, subnode)
|
||||
_add_node(base_node, depth + 1, subnode)
|
||||
|
||||
# Build a new tree with all nodes and properties starting from the
|
||||
# entry node
|
||||
fsw = libfdt.FdtSw()
|
||||
fsw.finish_reservemap()
|
||||
with fsw.add_node(''):
|
||||
_AddNode(self._node, 0, self._node)
|
||||
_add_node(self._node, 0, self._node)
|
||||
fdt = fsw.as_fdt()
|
||||
|
||||
# Pack this new FDT and scan it so we can add the data later
|
||||
fdt.pack()
|
||||
self._fdt = Fdt.FromData(fdt.as_bytearray())
|
||||
self._fdt.Scan()
|
||||
|
||||
def BuildSectionData(self, required):
|
||||
"""Build FIT entry contents
|
||||
|
||||
This adds the 'data' properties to the input ITB (Image-tree Binary)
|
||||
then runs mkimage to process it.
|
||||
|
||||
Args:
|
||||
required: True if the data must be present, False if it is OK to
|
||||
return None
|
||||
|
||||
Returns:
|
||||
Contents of the section (bytes)
|
||||
"""
|
||||
data = self._BuildInput(self._fdt)
|
||||
uniq = self.GetUniqueName()
|
||||
input_fname = tools.get_output_filename('%s.itb' % uniq)
|
||||
output_fname = tools.get_output_filename('%s.fit' % uniq)
|
||||
tools.write_file(input_fname, data)
|
||||
tools.write_file(output_fname, data)
|
||||
|
||||
args = {}
|
||||
ext_offset = self._fit_props.get('fit,external-offset')
|
||||
if ext_offset is not None:
|
||||
args = {
|
||||
'external': True,
|
||||
'pad': fdt_util.fdt32_to_cpu(ext_offset.value)
|
||||
}
|
||||
if self.mkimage.run(reset_timestamp=True, output_fname=output_fname,
|
||||
**args) is None:
|
||||
# Bintool is missing; just use empty data as the output
|
||||
self.record_missing_bintool(self.mkimage)
|
||||
return tools.get_bytes(0, 1024)
|
||||
|
||||
return tools.read_file(output_fname)
|
||||
|
||||
def _BuildInput(self, fdt):
|
||||
"""Finish the FIT by adding the 'data' properties to it
|
||||
|
||||
Arguments:
|
||||
fdt: FIT to update
|
||||
|
||||
Returns:
|
||||
New fdt contents (bytes)
|
||||
"""
|
||||
for path, section in self._entries.items():
|
||||
node = fdt.GetNode(path)
|
||||
data = section.GetData()
|
||||
node.AddData('data', data)
|
||||
|
||||
fdt.Sync(auto_resize=True)
|
||||
data = fdt.GetContents()
|
||||
data = fdt.as_bytearray()
|
||||
return data
|
||||
|
||||
def SetImagePos(self, image_pos):
|
||||
|
@ -3780,6 +3780,7 @@ class TestFunctional(unittest.TestCase):
|
||||
dtb.Scan()
|
||||
props = self._GetPropTree(dtb, BASE_DTB_PROPS + REPACK_DTB_PROPS)
|
||||
|
||||
self.maxDiff = None
|
||||
self.assertEqual({
|
||||
'image-pos': 0,
|
||||
'offset': 0,
|
||||
@ -3793,19 +3794,19 @@ class TestFunctional(unittest.TestCase):
|
||||
'fit:offset': 4,
|
||||
'fit:size': 1840,
|
||||
|
||||
'fit/images/kernel:image-pos': 160,
|
||||
'fit/images/kernel:offset': 156,
|
||||
'fit/images/kernel:image-pos': 304,
|
||||
'fit/images/kernel:offset': 300,
|
||||
'fit/images/kernel:size': 4,
|
||||
|
||||
'fit/images/kernel/u-boot:image-pos': 160,
|
||||
'fit/images/kernel/u-boot:image-pos': 304,
|
||||
'fit/images/kernel/u-boot:offset': 0,
|
||||
'fit/images/kernel/u-boot:size': 4,
|
||||
|
||||
'fit/images/fdt-1:image-pos': 456,
|
||||
'fit/images/fdt-1:offset': 452,
|
||||
'fit/images/fdt-1:image-pos': 552,
|
||||
'fit/images/fdt-1:offset': 548,
|
||||
'fit/images/fdt-1:size': 6,
|
||||
|
||||
'fit/images/fdt-1/u-boot-spl-dtb:image-pos': 456,
|
||||
'fit/images/fdt-1/u-boot-spl-dtb:image-pos': 552,
|
||||
'fit/images/fdt-1/u-boot-spl-dtb:offset': 0,
|
||||
'fit/images/fdt-1/u-boot-spl-dtb:size': 6,
|
||||
|
||||
|
@ -21,7 +21,5 @@
|
||||
};
|
||||
};
|
||||
};
|
||||
fdtmap {
|
||||
};
|
||||
};
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user