aboutsummaryrefslogtreecommitdiff
path: root/tools/binman/etype/blob.py
blob: ec94568ac0a2b714cadb2737b57939acbe63feb1 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2016 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# Entry-type module for blobs, which are binary objects read from files
#

from entry import Entry
import fdt_util
import state
import tools

class Entry_blob(Entry):
    """Entry containing an arbitrary binary blob

    Note: This should not be used by itself. It is normally used as a parent
    class by other entry types.

    Properties / Entry arguments:
        - filename: Filename of file to read into entry
        - compress: Compression algorithm to use:
            none: No compression
            lz4: Use lz4 compression (via 'lz4' command-line utility)

    This entry reads data from a file and places it in the entry. The
    default filename is often specified specified by the subclass. See for
    example the 'u_boot' entry which provides the filename 'u-boot.bin'.

    If compression is enabled, an extra 'uncomp-size' property is written to
    the node (if enabled with -u) which provides the uncompressed size of the
    data.
    """
    def __init__(self, section, etype, node):
        Entry.__init__(self, section, etype, node)
        self._filename = fdt_util.GetString(self._node, 'filename', self.etype)
        self.compress = fdt_util.GetString(self._node, 'compress', 'none')

    def ObtainContents(self):
        self._filename = self.GetDefaultFilename()
        self._pathname = tools.GetInputFilename(self._filename)
        self.ReadBlobContents()
        return True

    def ReadBlobContents(self):
        # We assume the data is small enough to fit into memory. If this
        # is used for large filesystem image that might not be true.
        # In that case, Image.BuildImage() could be adjusted to use a
        # new Entry method which can read in chunks. Then we could copy
        # the data in chunks and avoid reading it all at once. For now
        # this seems like an unnecessary complication.
        indata = tools.ReadFile(self._pathname)
        if self.compress != 'none':
            self.uncomp_size = len(indata)
        data = tools.Compress(indata, self.compress)
        self.SetContents(data)
        return True

    def GetDefaultFilename(self):
        return self._filename