aboutsummaryrefslogtreecommitdiff
path: root/tools/binman/etype
diff options
context:
space:
mode:
Diffstat (limited to 'tools/binman/etype')
-rw-r--r--tools/binman/etype/__init__.py0
-rw-r--r--tools/binman/etype/_testing.py14
-rw-r--r--tools/binman/etype/blob.py59
-rw-r--r--tools/binman/etype/blob_dtb.py10
-rw-r--r--tools/binman/etype/cbfs.py263
-rw-r--r--tools/binman/etype/fdtmap.py130
-rw-r--r--tools/binman/etype/files.py3
-rw-r--r--tools/binman/etype/fmap.py4
-rw-r--r--tools/binman/etype/image_header.py99
-rw-r--r--tools/binman/etype/intel_descriptor.py16
-rw-r--r--tools/binman/etype/intel_ifwi.py100
-rw-r--r--tools/binman/etype/intel_me.py2
-rw-r--r--tools/binman/etype/section.py439
-rw-r--r--tools/binman/etype/text.py23
-rw-r--r--tools/binman/etype/u_boot_spl_elf.py2
-rw-r--r--tools/binman/etype/u_boot_tpl_elf.py24
-rw-r--r--tools/binman/etype/u_boot_with_ucode_ptr.py8
17 files changed, 1101 insertions, 95 deletions
diff --git a/tools/binman/etype/__init__.py b/tools/binman/etype/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tools/binman/etype/__init__.py
diff --git a/tools/binman/etype/_testing.py b/tools/binman/etype/_testing.py
index ac62d2e200..ae24fe8105 100644
--- a/tools/binman/etype/_testing.py
+++ b/tools/binman/etype/_testing.py
@@ -50,6 +50,8 @@ class Entry__testing(Entry):
'bad-update-contents')
self.return_contents_once = fdt_util.GetBool(self._node,
'return-contents-once')
+ self.bad_update_contents_twice = fdt_util.GetBool(self._node,
+ 'bad-update-contents-twice')
# Set to True when the entry is ready to process the FDT.
self.process_fdt_ready = False
@@ -71,11 +73,12 @@ class Entry__testing(Entry):
if self.force_bad_datatype:
self.GetEntryArgsOrProps([EntryArg('test-bad-datatype-arg', bool)])
self.return_contents = True
+ self.contents = b'a'
def ObtainContents(self):
if self.return_unknown_contents or not self.return_contents:
return False
- self.data = b'a'
+ self.data = self.contents
self.contents_size = len(self.data)
if self.return_contents_once:
self.return_contents = False
@@ -88,9 +91,14 @@ class Entry__testing(Entry):
def ProcessContents(self):
if self.bad_update_contents:
- # Request to update the conents with something larger, to cause a
+ # Request to update the contents with something larger, to cause a
# failure.
- self.ProcessContentsUpdate('aa')
+ if self.bad_update_contents_twice:
+ self.contents += b'a'
+ else:
+ self.contents = b'aa'
+ return self.ProcessContentsUpdate(self.contents)
+ return True
def ProcessFdt(self, fdt):
"""Force reprocessing the first time"""
diff --git a/tools/binman/etype/blob.py b/tools/binman/etype/blob.py
index f56a1f8768..00cad33718 100644
--- a/tools/binman/etype/blob.py
+++ b/tools/binman/etype/blob.py
@@ -9,6 +9,7 @@ from entry import Entry
import fdt_util
import state
import tools
+import tout
class Entry_blob(Entry):
"""Entry containing an arbitrary binary blob
@@ -33,8 +34,7 @@ class Entry_blob(Entry):
def __init__(self, section, etype, node):
Entry.__init__(self, section, etype, node)
self._filename = fdt_util.GetString(self._node, 'filename', self.etype)
- self._compress = fdt_util.GetString(self._node, 'compress', 'none')
- self._uncompressed_size = None
+ self.compress = fdt_util.GetString(self._node, 'compress', 'none')
def ObtainContents(self):
self._filename = self.GetDefaultFilename()
@@ -42,37 +42,40 @@ class Entry_blob(Entry):
self.ReadBlobContents()
return True
+ def CompressData(self, indata):
+ if self.compress != 'none':
+ self.uncomp_size = len(indata)
+ data = tools.Compress(indata, self.compress)
+ return data
+
def ReadBlobContents(self):
- # We assume the data is small enough to fit into memory. If this
- # is used for large filesystem image that might not be true.
- # In that case, Image.BuildImage() could be adjusted to use a
- # new Entry method which can read in chunks. Then we could copy
- # the data in chunks and avoid reading it all at once. For now
- # this seems like an unnecessary complication.
- data = tools.ReadFile(self._pathname)
- if self._compress == 'lz4':
- self._uncompressed_size = len(data)
- '''
- import lz4 # Import this only if needed (python-lz4 dependency)
+ """Read blob contents into memory
+
+ This function compresses the data before storing if needed.
- try:
- data = lz4.frame.compress(data)
- except AttributeError:
- data = lz4.compress(data)
- '''
- data = tools.Run('lz4', '-c', self._pathname, binary=True)
+ We assume the data is small enough to fit into memory. If this
+ is used for large filesystem image that might not be true.
+ In that case, Image.BuildImage() could be adjusted to use a
+ new Entry method which can read in chunks. Then we could copy
+ the data in chunks and avoid reading it all at once. For now
+ this seems like an unnecessary complication.
+ """
+ indata = tools.ReadFile(self._pathname)
+ data = self.CompressData(indata)
self.SetContents(data)
return True
def GetDefaultFilename(self):
return self._filename
- def AddMissingProperties(self):
- Entry.AddMissingProperties(self)
- if self._compress != 'none':
- state.AddZeroProp(self._node, 'uncomp-size')
-
- def SetCalculatedProperties(self):
- Entry.SetCalculatedProperties(self)
- if self._uncompressed_size is not None:
- state.SetInt(self._node, 'uncomp-size', self._uncompressed_size)
+ def ReadData(self, decomp=True):
+ indata = Entry.ReadData(self, decomp)
+ if decomp:
+ data = tools.Decompress(indata, self.compress)
+ if self.uncomp_size:
+ tout.Info("%s: Decompressing data size %#x with algo '%s' to data size %#x" %
+ (self.GetPath(), len(indata), self.compress,
+ len(data)))
+ else:
+ data = indata
+ return data
diff --git a/tools/binman/etype/blob_dtb.py b/tools/binman/etype/blob_dtb.py
index cc5b4a3f76..88ed55d886 100644
--- a/tools/binman/etype/blob_dtb.py
+++ b/tools/binman/etype/blob_dtb.py
@@ -23,11 +23,11 @@ class Entry_blob_dtb(Entry_blob):
def ObtainContents(self):
"""Get the device-tree from the list held by the 'state' module"""
self._filename = self.GetDefaultFilename()
- self._pathname, data = state.GetFdtContents(self._filename)
- self.SetContents(data)
- return True
+ self._pathname, _ = state.GetFdtContents(self._filename)
+ return Entry_blob.ReadBlobContents(self)
def ProcessContents(self):
"""Re-read the DTB contents so that we get any calculated properties"""
- _, data = state.GetFdtContents(self._filename)
- self.SetContents(data)
+ _, indata = state.GetFdtContents(self._filename)
+ data = self.CompressData(indata)
+ return self.ProcessContentsUpdate(data)
diff --git a/tools/binman/etype/cbfs.py b/tools/binman/etype/cbfs.py
new file mode 100644
index 0000000000..edf2189fd2
--- /dev/null
+++ b/tools/binman/etype/cbfs.py
@@ -0,0 +1,263 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2019 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a Coreboot Filesystem (CBFS)
+#
+
+from collections import OrderedDict
+
+import cbfs_util
+from cbfs_util import CbfsWriter
+from entry import Entry
+import fdt_util
+import state
+
+class Entry_cbfs(Entry):
+ """Entry containing a Coreboot Filesystem (CBFS)
+
+ A CBFS provides a way to group files into a group. It has a simple directory
+ structure and allows the position of individual files to be set, since it is
+ designed to support execute-in-place in an x86 SPI-flash device. Where XIP
+ is not used, it supports compression and storing ELF files.
+
+ CBFS is used by coreboot as its way of orgnanising SPI-flash contents.
+
+ The contents of the CBFS are defined by subnodes of the cbfs entry, e.g.:
+
+ cbfs {
+ size = <0x100000>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ };
+ };
+
+ This creates a CBFS 1MB in size two files in it: u-boot.bin and u-boot.dtb.
+ Note that the size is required since binman does not support calculating it.
+ The contents of each entry is just what binman would normally provide if it
+ were not a CBFS node. A blob type can be used to import arbitrary files as
+ with the second subnode below:
+
+ cbfs {
+ size = <0x100000>;
+ u-boot {
+ cbfs-name = "BOOT";
+ cbfs-type = "raw";
+ };
+
+ dtb {
+ type = "blob";
+ filename = "u-boot.dtb";
+ cbfs-type = "raw";
+ cbfs-compress = "lz4";
+ cbfs-offset = <0x100000>;
+ };
+ };
+
+ This creates a CBFS 1MB in size with u-boot.bin (named "BOOT") and
+ u-boot.dtb (named "dtb") and compressed with the lz4 algorithm.
+
+
+ Properties supported in the top-level CBFS node:
+
+ cbfs-arch:
+ Defaults to "x86", but you can specify the architecture if needed.
+
+
+ Properties supported in the CBFS entry subnodes:
+
+ cbfs-name:
+ This is the name of the file created in CBFS. It defaults to the entry
+ name (which is the node name), but you can override it with this
+ property.
+
+ cbfs-type:
+ This is the CBFS file type. The following are supported:
+
+ raw:
+ This is a 'raw' file, although compression is supported. It can be
+ used to store any file in CBFS.
+
+ stage:
+ This is an ELF file that has been loaded (i.e. mapped to memory), so
+ appears in the CBFS as a flat binary. The input file must be an ELF
+ image, for example this puts "u-boot" (the ELF image) into a 'stage'
+ entry:
+
+ cbfs {
+ size = <0x100000>;
+ u-boot-elf {
+ cbfs-name = "BOOT";
+ cbfs-type = "stage";
+ };
+ };
+
+ You can use your own ELF file with something like:
+
+ cbfs {
+ size = <0x100000>;
+ something {
+ type = "blob";
+ filename = "cbfs-stage.elf";
+ cbfs-type = "stage";
+ };
+ };
+
+ As mentioned, the file is converted to a flat binary, so it is
+ equivalent to adding "u-boot.bin", for example, but with the load and
+ start addresses specified by the ELF. At present there is no option
+ to add a flat binary with a load/start address, similar to the
+ 'add-flat-binary' option in cbfstool.
+
+ cbfs-offset:
+ This is the offset of the file's data within the CBFS. It is used to
+ specify where the file should be placed in cases where a fixed position
+ is needed. Typical uses are for code which is not relocatable and must
+ execute in-place from a particular address. This works because SPI flash
+ is generally mapped into memory on x86 devices. The file header is
+ placed before this offset so that the data start lines up exactly with
+ the chosen offset. If this property is not provided, then the file is
+ placed in the next available spot.
+
+ The current implementation supports only a subset of CBFS features. It does
+ not support other file types (e.g. payload), adding multiple files (like the
+ 'files' entry with a pattern supported by binman), putting files at a
+ particular offset in the CBFS and a few other things.
+
+ Of course binman can create images containing multiple CBFSs, simply by
+ defining these in the binman config:
+
+
+ binman {
+ size = <0x800000>;
+ cbfs {
+ offset = <0x100000>;
+ size = <0x100000>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ };
+ };
+
+ cbfs2 {
+ offset = <0x700000>;
+ size = <0x100000>;
+ u-boot {
+ cbfs-type = "raw";
+ };
+ u-boot-dtb {
+ cbfs-type = "raw";
+ };
+ image {
+ type = "blob";
+ filename = "image.jpg";
+ };
+ };
+ };
+
+ This creates an 8MB image with two CBFSs, one at offset 1MB, one at 7MB,
+ both of size 1MB.
+ """
+ def __init__(self, section, etype, node):
+ Entry.__init__(self, section, etype, node)
+ self._cbfs_arg = fdt_util.GetString(node, 'cbfs-arch', 'x86')
+ self._cbfs_entries = OrderedDict()
+ self._ReadSubnodes()
+
+ def ObtainContents(self):
+ arch = cbfs_util.find_arch(self._cbfs_arg)
+ if arch is None:
+ self.Raise("Invalid architecture '%s'" % self._cbfs_arg)
+ if self.size is None:
+ self.Raise("'cbfs' entry must have a size property")
+ cbfs = CbfsWriter(self.size, arch)
+ for entry in self._cbfs_entries.values():
+ # First get the input data and put it in a file. If not available,
+ # try later.
+ if not entry.ObtainContents():
+ return False
+ data = entry.GetData()
+ cfile = None
+ if entry._type == 'raw':
+ cfile = cbfs.add_file_raw(entry._cbfs_name, data,
+ entry._cbfs_offset,
+ entry._cbfs_compress)
+ elif entry._type == 'stage':
+ cfile = cbfs.add_file_stage(entry._cbfs_name, data,
+ entry._cbfs_offset)
+ else:
+ entry.Raise("Unknown cbfs-type '%s' (use 'raw', 'stage')" %
+ entry._type)
+ if cfile:
+ entry._cbfs_file = cfile
+ data = cbfs.get_data()
+ self.SetContents(data)
+ return True
+
+ def _ReadSubnodes(self):
+ """Read the subnodes to find out what should go in this IFWI"""
+ for node in self._node.subnodes:
+ entry = Entry.Create(self.section, node)
+ entry._cbfs_name = fdt_util.GetString(node, 'cbfs-name', entry.name)
+ entry._type = fdt_util.GetString(node, 'cbfs-type')
+ compress = fdt_util.GetString(node, 'cbfs-compress', 'none')
+ entry._cbfs_offset = fdt_util.GetInt(node, 'cbfs-offset')
+ entry._cbfs_compress = cbfs_util.find_compress(compress)
+ if entry._cbfs_compress is None:
+ self.Raise("Invalid compression in '%s': '%s'" %
+ (node.name, compress))
+ self._cbfs_entries[entry._cbfs_name] = entry
+
+ def SetImagePos(self, image_pos):
+ """Override this function to set all the entry properties from CBFS
+
+ We can only do this once image_pos is known
+
+ Args:
+ image_pos: Position of this entry in the image
+ """
+ Entry.SetImagePos(self, image_pos)
+
+ # Now update the entries with info from the CBFS entries
+ for entry in self._cbfs_entries.values():
+ cfile = entry._cbfs_file
+ entry.size = cfile.data_len
+ entry.offset = cfile.calced_cbfs_offset
+ entry.image_pos = self.image_pos + entry.offset
+ if entry._cbfs_compress:
+ entry.uncomp_size = cfile.memlen
+
+ def AddMissingProperties(self):
+ Entry.AddMissingProperties(self)
+ for entry in self._cbfs_entries.values():
+ entry.AddMissingProperties()
+ if entry._cbfs_compress:
+ state.AddZeroProp(entry._node, 'uncomp-size')
+ # Store the 'compress' property, since we don't look at
+ # 'cbfs-compress' in Entry.ReadData()
+ state.AddString(entry._node, 'compress',
+ cbfs_util.compress_name(entry._cbfs_compress))
+
+ def SetCalculatedProperties(self):
+ """Set the value of device-tree properties calculated by binman"""
+ Entry.SetCalculatedProperties(self)
+ for entry in self._cbfs_entries.values():
+ state.SetInt(entry._node, 'offset', entry.offset)
+ state.SetInt(entry._node, 'size', entry.size)
+ state.SetInt(entry._node, 'image-pos', entry.image_pos)
+ if entry.uncomp_size is not None:
+ state.SetInt(entry._node, 'uncomp-size', entry.uncomp_size)
+
+ def ListEntries(self, entries, indent):
+ """Override this method to list all files in the section"""
+ Entry.ListEntries(self, entries, indent)
+ for entry in self._cbfs_entries.values():
+ entry.ListEntries(entries, indent + 1)
+
+ def GetEntries(self):
+ return self._cbfs_entries
diff --git a/tools/binman/etype/fdtmap.py b/tools/binman/etype/fdtmap.py
new file mode 100644
index 0000000000..ddb9738e5c
--- /dev/null
+++ b/tools/binman/etype/fdtmap.py
@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+
+"""# Entry-type module for a full map of the firmware image
+
+This handles putting an FDT into the image with just the information about the
+image.
+"""
+
+import libfdt
+
+from entry import Entry
+from fdt import Fdt
+import state
+import tools
+
+FDTMAP_MAGIC = b'_FDTMAP_'
+FDTMAP_HDR_LEN = 16
+
+def LocateFdtmap(data):
+ """Search an image for an fdt map
+
+ Args:
+ data: Data to search
+
+ Returns:
+ Position of fdt map in data, or None if not found. Note that the
+ position returned is of the FDT header, i.e. before the FDT data
+ """
+ hdr_pos = data.find(FDTMAP_MAGIC)
+ size = len(data)
+ if hdr_pos != -1:
+ hdr = data[hdr_pos:hdr_pos + FDTMAP_HDR_LEN]
+ if len(hdr) == FDTMAP_HDR_LEN:
+ return hdr_pos
+ return None
+
+class Entry_fdtmap(Entry):
+ """An entry which contains an FDT map
+
+ Properties / Entry arguments:
+ None
+
+ An FDT map is just a header followed by an FDT containing a list of all the
+ entries in the image. The root node corresponds to the image node in the
+ original FDT, and an image-name property indicates the image name in that
+ original tree.
+
+ The header is the string _FDTMAP_ followed by 8 unused bytes.
+
+ When used, this entry will be populated with an FDT map which reflects the
+ entries in the current image. Hierarchy is preserved, and all offsets and
+ sizes are included.
+
+ Note that the -u option must be provided to ensure that binman updates the
+ FDT with the position of each entry.
+
+ Example output for a simple image with U-Boot and an FDT map:
+
+ / {
+ size = <0x00000112>;
+ image-pos = <0x00000000>;
+ offset = <0x00000000>;
+ u-boot {
+ size = <0x00000004>;
+ image-pos = <0x00000000>;
+ offset = <0x00000000>;
+ };
+ fdtmap {
+ size = <0x0000010e>;
+ image-pos = <0x00000004>;
+ offset = <0x00000004>;
+ };
+ };
+ """
+ def __init__(self, section, etype, node):
+ Entry.__init__(self, section, etype, node)
+
+ def _GetFdtmap(self):
+ """Build an FDT map from the entries in the current image
+
+ Returns:
+ FDT map binary data
+ """
+ def _AddNode(node):
+ """Add a node to the FDT map"""
+ for pname, prop in node.props.items():
+ fsw.property(pname, prop.bytes)
+ for subnode in node.subnodes:
+ with fsw.add_node(subnode.name):
+ _AddNode(subnode)
+
+ # Get the FDT data into an Fdt object
+ data = state.GetFdtContents()[1]
+ infdt = Fdt.FromData(data)
+ infdt.Scan()
+
+ # Find the node for the image containing the Fdt-map entry
+ path = self.section.GetPath()
+ node = infdt.GetNode(path)
+ if not node:
+ self.Raise("Internal error: Cannot locate node for path '%s'" %
+ path)
+
+ # Build a new tree with all nodes and properties starting from that node
+ fsw = libfdt.FdtSw()
+ fsw.finish_reservemap()
+ with fsw.add_node(''):
+ _AddNode(node)
+ fdt = fsw.as_fdt()
+
+ # Pack this new FDT and return its contents
+ fdt.pack()
+ outfdt = Fdt.FromData(fdt.as_bytearray())
+ data = FDTMAP_MAGIC + tools.GetBytes(0, 8) + outfdt.GetContents()
+ return data
+
+ def ObtainContents(self):
+ """Obtain a placeholder for the fdt-map contents"""
+ self.SetContents(self._GetFdtmap())
+ return True
+
+ def ProcessContents(self):
+ """Write an updated version of the FDT map to this entry
+
+ This is necessary since new data may have been written back to it during
+ processing, e.g. the image-pos properties.
+ """
+ return self.ProcessContentsUpdate(self._GetFdtmap())
diff --git a/tools/binman/etype/files.py b/tools/binman/etype/files.py
index 99f2f2f67f..0068b305f7 100644
--- a/tools/binman/etype/files.py
+++ b/tools/binman/etype/files.py
@@ -14,7 +14,6 @@ import fdt_util
import state
import tools
-import bsection
class Entry_files(Entry_section):
"""Entry containing a set of files
@@ -54,4 +53,4 @@ class Entry_files(Entry_section):
state.AddString(subnode, 'compress', self._compress)
# Read entries again, now that we have some
- self._section._ReadEntries()
+ self._ReadEntries()
diff --git a/tools/binman/etype/fmap.py b/tools/binman/etype/fmap.py
index e6b5c5c74c..f8d8d866f1 100644
--- a/tools/binman/etype/fmap.py
+++ b/tools/binman/etype/fmap.py
@@ -49,7 +49,7 @@ class Entry_fmap(Entry):
areas.append(fmap_util.FmapArea(pos or 0, entry.size or 0,
tools.FromUnicode(entry.name), 0))
- entries = self.section._image.GetEntries()
+ entries = self.section.image.GetEntries()
areas = []
for entry in entries.values():
_AddEntries(areas, entry)
@@ -62,4 +62,4 @@ class Entry_fmap(Entry):
return True
def ProcessContents(self):
- self.SetContents(self._GetFmap())
+ return self.ProcessContentsUpdate(self._GetFmap())
diff --git a/tools/binman/etype/image_header.py b/tools/binman/etype/image_header.py
new file mode 100644
index 0000000000..8f9c5aa5d9
--- /dev/null
+++ b/tools/binman/etype/image_header.py
@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+
+"""Entry-type module for an image header which points to the FDT map
+
+This creates an 8-byte entry with a magic number and the offset of the FDT map
+(which is another entry in the image), relative to the start or end of the
+image.
+"""
+
+import struct
+
+from entry import Entry
+import fdt_util
+
+IMAGE_HEADER_MAGIC = b'BinM'
+IMAGE_HEADER_LEN = 8
+
+def LocateHeaderOffset(data):
+ """Search an image for an image header
+
+ Args:
+ data: Data to search
+
+ Returns:
+ Offset of image header in the image, or None if not found
+ """
+ hdr_pos = data.find(IMAGE_HEADER_MAGIC)
+ if hdr_pos != -1:
+ size = len(data)
+ hdr = data[hdr_pos:hdr_pos + IMAGE_HEADER_LEN]
+ if len(hdr) == IMAGE_HEADER_LEN:
+ offset = struct.unpack('<I', hdr[4:])[0]
+ if hdr_pos == len(data) - IMAGE_HEADER_LEN:
+ pos = size + offset - (1 << 32)
+ else:
+ pos = offset
+ return pos
+ return None
+
+class Entry_image_header(Entry):
+ """An entry which contains a pointer to the FDT map
+
+ Properties / Entry arguments:
+ location: Location of header ("start" or "end" of image). This is
+ optional. If omitted then the entry must have an offset property.
+
+ This adds an 8-byte entry to the start or end of the image, pointing to the
+ location of the FDT map. The format is a magic number followed by an offset
+ from the start or end of the image, in twos-compliment format.
+
+ This entry must be in the top-level part of the image.
+
+ NOTE: If the location is at the start/end, you will probably need to specify
+ sort-by-offset for the image, unless you actually put the image header
+ first/last in the entry list.
+ """
+ def __init__(self, section, etype, node):
+ Entry.__init__(self, section, etype, node)
+ self.location = fdt_util.GetString(self._node, 'location')
+
+ def _GetHeader(self):
+ image_pos = self.GetSiblingImagePos('fdtmap')
+ if image_pos == False:
+ self.Raise("'image_header' section must have an 'fdtmap' sibling")
+ elif image_pos is None:
+ # This will be available when called from ProcessContents(), but not
+ # when called from ObtainContents()
+ offset = 0xffffffff
+ else:
+ image_size = self.section.GetImageSize() or 0
+ base = (0 if self.location != 'end' else image_size)
+ offset = (image_pos - base) & 0xffffffff
+ data = IMAGE_HEADER_MAGIC + struct.pack('<I', offset)
+ return data
+
+ def ObtainContents(self):
+ """Obtain a placeholder for the header contents"""
+ self.SetContents(self._GetHeader())
+ return True
+
+ def Pack(self, offset):
+ """Special pack method to set the offset to start/end of image"""
+ if not self.offset:
+ if self.location not in ['start', 'end']:
+ self.Raise("Invalid location '%s', expected 'start' or 'end'" %
+ self.location)
+ image_size = self.section.GetImageSize() or 0
+ self.offset = (0 if self.location != 'end' else image_size - 8)
+ return Entry.Pack(self, offset)
+
+ def ProcessContents(self):
+ """Write an updated version of the FDT map to this entry
+
+ This is necessary since image_pos is not available when ObtainContents()
+ is called, since by then the entries have not been packed in the image.
+ """
+ return self.ProcessContentsUpdate(self._GetHeader())
diff --git a/tools/binman/etype/intel_descriptor.py b/tools/binman/etype/intel_descriptor.py
index 6acbbd8b7a..adea578080 100644
--- a/tools/binman/etype/intel_descriptor.py
+++ b/tools/binman/etype/intel_descriptor.py
@@ -47,17 +47,25 @@ class Entry_intel_descriptor(Entry_blob):
def __init__(self, section, etype, node):
Entry_blob.__init__(self, section, etype, node)
self._regions = []
+ if self.offset is None:
+ self.offset = self.section.GetStartOffset()
def GetOffsets(self):
offset = self.data.find(FD_SIGNATURE)
if offset == -1:
- self.Raise('Cannot find FD signature')
+ self.Raise('Cannot find Intel Flash Descriptor (FD) signature')
flvalsig, flmap0, flmap1, flmap2 = struct.unpack('<LLLL',
self.data[offset:offset + 16])
frba = ((flmap0 >> 16) & 0xff) << 4
for i in range(MAX_REGIONS):
self._regions.append(Region(self.data, frba, i))
- # Set the offset for ME only, for now, since the others are not used
- return {'intel-me': [self._regions[REGION_ME].base,
- self._regions[REGION_ME].size]}
+ # Set the offset for ME (Management Engine) and IFWI (Integrated
+ # Firmware Image), for now, since the others are not used.
+ info = {}
+ if self.HasSibling('intel-me'):
+ info['intel-me'] = [self._regions[REGION_ME].base,
+ self._regions[REGION_ME].size]
+ if self.HasSibling('intel-ifwi'):
+ info['intel-ifwi'] = [self._regions[REGION_BIOS].base, None]
+ return info
diff --git a/tools/binman/etype/intel_ifwi.py b/tools/binman/etype/intel_ifwi.py
new file mode 100644
index 0000000000..8c79b2dd29
--- /dev/null
+++ b/tools/binman/etype/intel_ifwi.py
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Management Engine binary blob
+#
+
+from collections import OrderedDict
+
+from entry import Entry
+from blob import Entry_blob
+import fdt_util
+import tools
+
+class Entry_intel_ifwi(Entry_blob):
+ """Entry containing an Intel Integrated Firmware Image (IFWI) file
+
+ Properties / Entry arguments:
+ - filename: Filename of file to read into entry. This is either the
+ IFWI file itself, or a file that can be converted into one using a
+ tool
+ - convert-fit: If present this indicates that the ifwitool should be
+ used to convert the provided file into a IFWI.
+
+ This file contains code and data used by the SoC that is required to make
+ it work. It includes U-Boot TPL, microcode, things related to the CSE
+ (Converged Security Engine, the microcontroller that loads all the firmware)
+ and other items beyond the wit of man.
+
+ A typical filename is 'ifwi.bin' for an IFWI file, or 'fitimage.bin' for a
+ file that will be converted to an IFWI.
+
+ The position of this entry is generally set by the intel-descriptor entry.
+
+ The contents of the IFWI are specified by the subnodes of the IFWI node.
+ Each subnode describes an entry which is placed into the IFWFI with a given
+ sub-partition (and optional entry name).
+
+ See README.x86 for information about x86 binary blobs.
+ """
+ def __init__(self, section, etype, node):
+ Entry_blob.__init__(self, section, etype, node)
+ self._convert_fit = fdt_util.GetBool(self._node, 'convert-fit')
+ self._ifwi_entries = OrderedDict()
+ self._ReadSubnodes()
+
+ def ObtainContents(self):
+ """Get the contects for the IFWI
+
+ Unfortunately we cannot create anything from scratch here, as Intel has
+ tools which create precursor binaries with lots of data and settings,
+ and these are not incorporated into binman.
+
+ The first step is to get a file in the IFWI format. This is either
+ supplied directly or is extracted from a fitimage using the 'create'
+ subcommand.
+
+ After that we delete the OBBP sub-partition and add each of the files
+ that we want in the IFWI file, one for each sub-entry of the IWFI node.
+ """
+ self._pathname = tools.GetInputFilename(self._filename)
+
+ # Create the IFWI file if needed
+ if self._convert_fit:
+ inname = self._pathname
+ outname = tools.GetOutputFilename('ifwi.bin')
+ tools.RunIfwiTool(inname, tools.CMD_CREATE, outname)
+ self._filename = 'ifwi.bin'
+ self._pathname = outname
+ else:
+ # Provide a different code path here to ensure we have test coverage
+ inname = self._pathname
+
+ # Delete OBBP if it is there, then add the required new items.
+ tools.RunIfwiTool(inname, tools.CMD_DELETE, subpart='OBBP')
+
+ for entry in self._ifwi_entries.values():
+ # First get the input data and put it in a file
+ if not entry.ObtainContents():
+ return False
+ data = entry.GetData()
+ uniq = self.GetUniqueName()
+ input_fname = tools.GetOutputFilename('input.%s' % uniq)
+ tools.WriteFile(input_fname, data)
+
+ tools.RunIfwiTool(inname,
+ tools.CMD_REPLACE if entry._ifwi_replace else tools.CMD_ADD,
+ input_fname, entry._ifwi_subpart, entry._ifwi_entry_name)
+
+ self.ReadBlobContents()
+ return True
+
+ def _ReadSubnodes(self):
+ """Read the subnodes to find out what should go in this IFWI"""
+ for node in self._node.subnodes:
+ entry = Entry.Create(self.section, node)
+ entry._ifwi_replace = fdt_util.GetBool(node, 'replace')
+ entry._ifwi_subpart = fdt_util.GetString(node, 'ifwi-subpart')
+ entry._ifwi_entry_name = fdt_util.GetString(node, 'ifwi-entry')
+ self._ifwi_entries[entry._ifwi_subpart] = entry
diff --git a/tools/binman/etype/intel_me.py b/tools/binman/etype/intel_me.py
index 247c5b3386..c932ec5222 100644
--- a/tools/binman/etype/intel_me.py
+++ b/tools/binman/etype/intel_me.py
@@ -22,6 +22,8 @@ class Entry_intel_me(Entry_blob):
A typical filename is 'me.bin'.
+ The position of this entry is generally set by the intel-descriptor entry.
+
See README.x86 for information about x86 binary blobs.
"""
def __init__(self, section, etype, node):
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py
index 3681a48468..6db3c7a6f0 100644
--- a/tools/binman/etype/section.py
+++ b/tools/binman/etype/section.py
@@ -1,59 +1,155 @@
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2018 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
-#
-# Entry-type module for sections, which are entries which can contain other
-# entries.
-#
+
+"""Entry-type module for sections (groups of entries)
+
+Sections are entries which can contain other entries. This allows hierarchical
+images to be created.
+"""
+
+from __future__ import print_function
+
+from collections import OrderedDict
+import re
+import sys
from entry import Entry
import fdt_util
import tools
-import bsection
class Entry_section(Entry):
"""Entry that contains other entries
Properties / Entry arguments: (see binman README for more information)
- - size: Size of section in bytes
- - align-size: Align size to a particular power of two
- - pad-before: Add padding before the entry
- - pad-after: Add padding after the entry
- - pad-byte: Pad byte to use when padding
- - sort-by-offset: Reorder the entries by offset
- - end-at-4gb: Used to build an x86 ROM which ends at 4GB (2^32)
- - name-prefix: Adds a prefix to the name of every entry in the section
+ pad-byte: Pad byte to use when padding
+ sort-by-offset: True if entries should be sorted by offset, False if
+ they must be in-order in the device tree description
+ end-at-4gb: Used to build an x86 ROM which ends at 4GB (2^32)
+ skip-at-start: Number of bytes before the first entry starts. These
+ effectively adjust the starting offset of entries. For example,
+ if this is 16, then the first entry would start at 16. An entry
+ with offset = 20 would in fact be written at offset 4 in the image
+ file, since the first 16 bytes are skipped when writing.
+ name-prefix: Adds a prefix to the name of every entry in the section
when writing out the map
+ Since a section is also an entry, it inherits all the properies of entries
+ too.
+
A section is an entry which can contain other entries, thus allowing
hierarchical images to be created. See 'Sections and hierarchical images'
in the binman README for more information.
"""
- def __init__(self, section, etype, node):
- Entry.__init__(self, section, etype, node)
- self._section = bsection.Section(node.name, section, node,
- section._image)
+ def __init__(self, section, etype, node, test=False):
+ if not test:
+ Entry.__init__(self, section, etype, node)
+ if section:
+ self.image = section.image
+ self._entries = OrderedDict()
+ self._pad_byte = 0
+ self._sort = False
+ self._skip_at_start = None
+ self._end_4gb = False
+ if not test:
+ self._ReadNode()
+ self._ReadEntries()
+
+ def _Raise(self, msg):
+ """Raises an error for this section
+
+ Args:
+ msg: Error message to use in the raise string
+ Raises:
+ ValueError()
+ """
+ raise ValueError("Section '%s': %s" % (self._node.path, msg))
+
+ def _ReadNode(self):
+ """Read properties from the image node"""
+ self._pad_byte = fdt_util.GetInt(self._node, 'pad-byte', 0)
+ self._sort = fdt_util.GetBool(self._node, 'sort-by-offset')
+ self._end_4gb = fdt_util.GetBool(self._node, 'end-at-4gb')
+ self._skip_at_start = fdt_util.GetInt(self._node, 'skip-at-start')
+ if self._end_4gb:
+ if not self.size:
+ self.Raise("Section size must be provided when using end-at-4gb")
+ if self._skip_at_start is not None:
+ self.Raise("Provide either 'end-at-4gb' or 'skip-at-start'")
+ else:
+ self._skip_at_start = 0x100000000 - self.size
+ else:
+ if self._skip_at_start is None:
+ self._skip_at_start = 0
+ self._name_prefix = fdt_util.GetString(self._node, 'name-prefix')
+ filename = fdt_util.GetString(self._node, 'filename')
+ if filename:
+ self._filename = filename
+
+ def _ReadEntries(self):
+ for node in self._node.subnodes:
+ if node.name == 'hash':
+ continue
+ entry = Entry.Create(self, node)
+ entry.SetPrefix(self._name_prefix)
+ self._entries[node.name] = entry
def GetFdtSet(self):
- return self._section.GetFdtSet()
+ fdt_set = set()
+ for entry in self._entries.values():
+ fdt_set.update(entry.GetFdtSet())
+ return fdt_set
def ProcessFdt(self, fdt):
- return self._section.ProcessFdt(fdt)
+ """Allow entries to adjust the device tree
+
+ Some entries need to adjust the device tree for their purposes. This
+ may involve adding or deleting properties.
+ """
+ todo = self._entries.values()
+ for passnum in range(3):
+ next_todo = []
+ for entry in todo:
+ if not entry.ProcessFdt(fdt):
+ next_todo.append(entry)
+ todo = next_todo
+ if not todo:
+ break
+ if todo:
+ self.Raise('Internal error: Could not complete processing of Fdt: remaining %s' %
+ todo)
+ return True
def ExpandEntries(self):
+ """Expand out any entries which have calculated sub-entries
+
+ Some entries are expanded out at runtime, e.g. 'files', which produces
+ a section containing a list of files. Process these entries so that
+ this information is added to the device tree.
+ """
Entry.ExpandEntries(self)
- self._section.ExpandEntries()
+ for entry in self._entries.values():
+ entry.ExpandEntries()
def AddMissingProperties(self):
+ """Add new properties to the device tree as needed for this entry"""
Entry.AddMissingProperties(self)
- self._section.AddMissingProperties()
+ for entry in self._entries.values():
+ entry.AddMissingProperties()
def ObtainContents(self):
- return self._section.GetEntryContents()
+ return self.GetEntryContents()
def GetData(self):
- return self._section.GetData()
+ section_data = tools.GetBytes(self._pad_byte, self.size)
+
+ for entry in self._entries.values():
+ data = entry.GetData()
+ base = self.pad_before + entry.offset - self._skip_at_start
+ section_data = (section_data[:base] + data +
+ section_data[base + len(data):])
+ return section_data
def GetOffsets(self):
"""Handle entries that want to set the offset/size of other entries
@@ -61,35 +157,94 @@ class Entry_section(Entry):
This calls each entry's GetOffsets() method. If it returns a list
of entries to update, it updates them.
"""
- self._section.GetEntryOffsets()
+ self.GetEntryOffsets()
return {}
+ def ResetForPack(self):
+ """Reset offset/size fields so that packing can be done again"""
+ Entry.ResetForPack(self)
+ for entry in self._entries.values():
+ entry.ResetForPack()
+
def Pack(self, offset):
"""Pack all entries into the section"""
- self._section.PackEntries()
- if self._section._offset is None:
- self._section.SetOffset(offset)
- self.size = self._section.GetSize()
- return super(Entry_section, self).Pack(offset)
+ self._PackEntries()
+ return Entry.Pack(self, offset)
- def SetImagePos(self, image_pos):
- Entry.SetImagePos(self, image_pos)
- self._section.SetImagePos(image_pos + self.offset)
+ def _PackEntries(self):
+ """Pack all entries into the image"""
+ offset = self._skip_at_start
+ for entry in self._entries.values():
+ offset = entry.Pack(offset)
+ self.size = self.CheckSize()
+
+ def _ExpandEntries(self):
+ """Expand any entries that are permitted to"""
+ exp_entry = None
+ for entry in self._entries.values():
+ if exp_entry:
+ exp_entry.ExpandToLimit(entry.offset)
+ exp_entry = None
+ if entry.expand_size:
+ exp_entry = entry
+ if exp_entry:
+ exp_entry.ExpandToLimit(self.size)
+
+ def _SortEntries(self):
+ """Sort entries by offset"""
+ entries = sorted(self._entries.values(), key=lambda entry: entry.offset)
+ self._entries.clear()
+ for entry in entries:
+ self._entries[entry._node.name] = entry
+
+ def CheckEntries(self):
+ """Check that entries do not overlap or extend outside the image"""
+ if self._sort:
+ self._SortEntries()
+ self._ExpandEntries()
+ offset = 0
+ prev_name = 'None'
+ for entry in self._entries.values():
+ entry.CheckOffset()
+ if (entry.offset < self._skip_at_start or
+ entry.offset + entry.size > self._skip_at_start +
+ self.size):
+ entry.Raise("Offset %#x (%d) is outside the section starting "
+ "at %#x (%d)" %
+ (entry.offset, entry.offset, self._skip_at_start,
+ self._skip_at_start))
+ if entry.offset < offset:
+ entry.Raise("Offset %#x (%d) overlaps with previous entry '%s' "
+ "ending at %#x (%d)" %
+ (entry.offset, entry.offset, prev_name, offset, offset))
+ offset = entry.offset + entry.size
+ prev_name = entry.GetPath()
def WriteSymbols(self, section):
"""Write symbol values into binary files for access at run time"""
- self._section.WriteSymbols()
+ for entry in self._entries.values():
+ entry.WriteSymbols(self)
def SetCalculatedProperties(self):
Entry.SetCalculatedProperties(self)
- self._section.SetCalculatedProperties()
+ for entry in self._entries.values():
+ entry.SetCalculatedProperties()
+
+ def SetImagePos(self, image_pos):
+ Entry.SetImagePos(self, image_pos)
+ for entry in self._entries.values():
+ entry.SetImagePos(image_pos + self.offset)
def ProcessContents(self):
- self._section.ProcessEntryContents()
- super(Entry_section, self).ProcessContents()
+ sizes_ok_base = super(Entry_section, self).ProcessContents()
+ sizes_ok = True
+ for entry in self._entries.values():
+ if not entry.ProcessContents():
+ sizes_ok = False
+ return sizes_ok and sizes_ok_base
def CheckOffset(self):
- self._section.CheckEntries()
+ self.CheckEntries()
def WriteMap(self, fd, indent):
"""Write a map of the section to a .map file
@@ -97,11 +252,211 @@ class Entry_section(Entry):
Args:
fd: File to write the map to
"""
- self._section.WriteMap(fd, indent)
+ Entry.WriteMapLine(fd, indent, self.name, self.offset or 0,
+ self.size, self.image_pos)
+ for entry in self._entries.values():
+ entry.WriteMap(fd, indent + 1)
def GetEntries(self):
- return self._section.GetEntries()
+ return self._entries
+
+ def GetContentsByPhandle(self, phandle, source_entry):
+ """Get the data contents of an entry specified by a phandle
+
+ This uses a phandle to look up a node and and find the entry
+ associated with it. Then it returnst he contents of that entry.
+
+ Args:
+ phandle: Phandle to look up (integer)
+ source_entry: Entry containing that phandle (used for error
+ reporting)
+
+ Returns:
+ data from associated entry (as a string), or None if not found
+ """
+ node = self._node.GetFdt().LookupPhandle(phandle)
+ if not node:
+ source_entry.Raise("Cannot find node for phandle %d" % phandle)
+ for entry in self._entries.values():
+ if entry._node == node:
+ return entry.GetData()
+ source_entry.Raise("Cannot find entry for node '%s'" % node.name)
+
+ def LookupSymbol(self, sym_name, optional, msg):
+ """Look up a symbol in an ELF file
+
+ Looks up a symbol in an ELF file. Only entry types which come from an
+ ELF image can be used by this function.
+
+ At present the only entry property supported is offset.
+
+ Args:
+ sym_name: Symbol name in the ELF file to look up in the format
+ _binman_<entry>_prop_<property> where <entry> is the name of
+ the entry and <property> is the property to find (e.g.
+ _binman_u_boot_prop_offset). As a special case, you can append
+ _any to <entry> to have it search for any matching entry. E.g.
+ _binman_u_boot_any_prop_offset will match entries called u-boot,
+ u-boot-img and u-boot-nodtb)
+ optional: True if the symbol is optional. If False this function
+ will raise if the symbol is not found
+ msg: Message to display if an error occurs
+
+ Returns:
+ Value that should be assigned to that symbol, or None if it was
+ optional and not found
+
+ Raises:
+ ValueError if the symbol is invalid or not found, or references a
+ property which is not supported
+ """
+ m = re.match(r'^_binman_(\w+)_prop_(\w+)$', sym_name)
+ if not m:
+ raise ValueError("%s: Symbol '%s' has invalid format" %
+ (msg, sym_name))
+ entry_name, prop_name = m.groups()
+ entry_name = entry_name.replace('_', '-')
+ entry = self._entries.get(entry_name)
+ if not entry:
+ if entry_name.endswith('-any'):
+ root = entry_name[:-4]
+ for name in self._entries:
+ if name.startswith(root):
+ rest = name[len(root):]
+ if rest in ['', '-img', '-nodtb']:
+ entry = self._entries[name]
+ if not entry:
+ err = ("%s: Entry '%s' not found in list (%s)" %
+ (msg, entry_name, ','.join(self._entries.keys())))
+ if optional:
+ print('Warning: %s' % err, file=sys.stderr)
+ return None
+ raise ValueError(err)
+ if prop_name == 'offset':
+ return entry.offset
+ elif prop_name == 'image_pos':
+ return entry.image_pos
+ else:
+ raise ValueError("%s: No such property '%s'" % (msg, prop_name))
+
+ def GetRootSkipAtStart(self):
+ """Get the skip-at-start value for the top-level section
+
+ This is used to find out the starting offset for root section that
+ contains this section. If this is a top-level section then it returns
+ the skip-at-start offset for this section.
+
+ This is used to get the absolute position of section within the image.
+
+ Returns:
+ Integer skip-at-start value for the root section containing this
+ section
+ """
+ if self.section:
+ return self.section.GetRootSkipAtStart()
+ return self._skip_at_start
+
+ def GetStartOffset(self):
+ """Get the start offset for this section
+
+ Returns:
+ The first available offset in this section (typically 0)
+ """
+ return self._skip_at_start
+
+ def GetImageSize(self):
+ """Get the size of the image containing this section
+
+ Returns:
+ Image size as an integer number of bytes, which may be None if the
+ image size is dynamic and its sections have not yet been packed
+ """
+ return self.image.size
+
+ def FindEntryType(self, etype):
+ """Find an entry type in the section
+
+ Args:
+ etype: Entry type to find
+ Returns:
+ entry matching that type, or None if not found
+ """
+ for entry in self._entries.values():
+ if entry.etype == etype:
+ return entry
+ return None
+
+ def GetEntryContents(self):
+ """Call ObtainContents() for the section
+ """
+ todo = self._entries.values()
+ for passnum in range(3):
+ next_todo = []
+ for entry in todo:
+ if not entry.ObtainContents():
+ next_todo.append(entry)
+ todo = next_todo
+ if not todo:
+ break
+ if todo:
+ self.Raise('Internal error: Could not complete processing of contents: remaining %s' %
+ todo)
+ return True
+
+ def _SetEntryOffsetSize(self, name, offset, size):
+ """Set the offset and size of an entry
+
+ Args:
+ name: Entry name to update
+ offset: New offset, or None to leave alone
+ size: New size, or None to leave alone
+ """
+ entry = self._entries.get(name)
+ if not entry:
+ self._Raise("Unable to set offset/size for unknown entry '%s'" %
+ name)
+ entry.SetOffsetSize(self._skip_at_start + offset if offset else None,
+ size)
+
+ def GetEntryOffsets(self):
+ """Handle entries that want to set the offset/size of other entries
+
+ This calls each entry's GetOffsets() method. If it returns a list
+ of entries to update, it updates them.
+ """
+ for entry in self._entries.values():
+ offset_dict = entry.GetOffsets()
+ for name, info in offset_dict.items():
+ self._SetEntryOffsetSize(name, *info)
+
+
+ def CheckSize(self):
+ """Check that the image contents does not exceed its size, etc."""
+ contents_size = 0
+ for entry in self._entries.values():
+ contents_size = max(contents_size, entry.offset + entry.size)
+
+ contents_size -= self._skip_at_start
+
+ size = self.size
+ if not size:
+ size = self.pad_before + contents_size + self.pad_after
+ size = tools.Align(size, self.align_size)
+
+ if self.size and contents_size > self.size:
+ self._Raise("contents size %#x (%d) exceeds section size %#x (%d)" %
+ (contents_size, contents_size, self.size, self.size))
+ if not self.size:
+ self.size = size
+ if self.size != tools.Align(self.size, self.align_size):
+ self._Raise("Size %#x (%d) does not match align-size %#x (%d)" %
+ (self.size, self.size, self.align_size,
+ self.align_size))
+ return size
- def ExpandToLimit(self, limit):
- super(Entry_section, self).ExpandToLimit(limit)
- self._section.ExpandSize(self.size)
+ def ListEntries(self, entries, indent):
+ """List the files in the section"""
+ Entry.AddEntryInfo(entries, indent, self.name, 'section', self.size,
+ self.image_pos, None, self.offset, self)
+ for entry in self._entries.values():
+ entry.ListEntries(entries, indent + 1)
diff --git a/tools/binman/etype/text.py b/tools/binman/etype/text.py
index 9ee04d7c9d..da1813a638 100644
--- a/tools/binman/etype/text.py
+++ b/tools/binman/etype/text.py
@@ -22,6 +22,8 @@ class Entry_text(Entry):
that contains the string to place in the entry
<xxx> (actual name is the value of text-label): contains the string to
place in the entry.
+ <text>: The text to place in the entry (overrides the above mechanism).
+ This is useful when the text is constant.
Example node:
@@ -44,15 +46,28 @@ class Entry_text(Entry):
message = "a message directly in the node"
};
+ or just:
+
+ text {
+ size = <8>;
+ text = "some text directly in the node"
+ };
+
The text is not itself nul-terminated. This can be achieved, if required,
by setting the size of the entry to something larger than the text.
"""
def __init__(self, section, etype, node):
Entry.__init__(self, section, etype, node)
- label, = self.GetEntryArgsOrProps([EntryArg('text-label', str)])
- self.text_label = tools.ToStr(label) if type(label) != str else label
- value, = self.GetEntryArgsOrProps([EntryArg(self.text_label, str)])
- value = tools.ToBytes(value) if value is not None else value
+ value = fdt_util.GetString(self._node, 'text')
+ if value:
+ value = tools.ToBytes(value)
+ else:
+ label, = self.GetEntryArgsOrProps([EntryArg('text-label', str)])
+ self.text_label = label
+ if self.text_label:
+ value, = self.GetEntryArgsOrProps([EntryArg(self.text_label,
+ str)])
+ value = tools.ToBytes(value) if value is not None else value
self.value = value
def ObtainContents(self):
diff --git a/tools/binman/etype/u_boot_spl_elf.py b/tools/binman/etype/u_boot_spl_elf.py
index da328ae15e..24ee77237e 100644
--- a/tools/binman/etype/u_boot_spl_elf.py
+++ b/tools/binman/etype/u_boot_spl_elf.py
@@ -12,7 +12,7 @@ class Entry_u_boot_spl_elf(Entry_blob):
"""U-Boot SPL ELF image
Properties / Entry arguments:
- - filename: Filename of SPL u-boot (default 'spl/u-boot')
+ - filename: Filename of SPL u-boot (default 'spl/u-boot-spl')
This is the U-Boot SPL ELF image. It does not include a device tree but can
be relocated to any address for execution.
diff --git a/tools/binman/etype/u_boot_tpl_elf.py b/tools/binman/etype/u_boot_tpl_elf.py
new file mode 100644
index 0000000000..9cc1cc2c45
--- /dev/null
+++ b/tools/binman/etype/u_boot_tpl_elf.py
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot TPL ELF image
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_u_boot_tpl_elf(Entry_blob):
+ """U-Boot TPL ELF image
+
+ Properties / Entry arguments:
+ - filename: Filename of TPL u-boot (default 'tpl/u-boot-tpl')
+
+ This is the U-Boot TPL ELF image. It does not include a device tree but can
+ be relocated to any address for execution.
+ """
+ def __init__(self, section, etype, node):
+ Entry_blob.__init__(self, section, etype, node)
+
+ def GetDefaultFilename(self):
+ return 'tpl/u-boot-tpl'
diff --git a/tools/binman/etype/u_boot_with_ucode_ptr.py b/tools/binman/etype/u_boot_with_ucode_ptr.py
index da0e12417b..cb7dbc68db 100644
--- a/tools/binman/etype/u_boot_with_ucode_ptr.py
+++ b/tools/binman/etype/u_boot_with_ucode_ptr.py
@@ -49,7 +49,7 @@ class Entry_u_boot_with_ucode_ptr(Entry_blob):
def ProcessContents(self):
# If the image does not need microcode, there is nothing to do
if not self.target_offset:
- return
+ return True
# Get the offset of the microcode
ucode_entry = self.section.FindEntryType('u-boot-ucode')
@@ -91,6 +91,6 @@ class Entry_u_boot_with_ucode_ptr(Entry_blob):
# Write the microcode offset and size into the entry
offset_and_size = struct.pack('<2L', offset, size)
self.target_offset -= self.image_pos
- self.ProcessContentsUpdate(self.data[:self.target_offset] +
- offset_and_size +
- self.data[self.target_offset + 8:])
+ return self.ProcessContentsUpdate(self.data[:self.target_offset] +
+ offset_and_size +
+ self.data[self.target_offset + 8:])