diff options
Diffstat (limited to 'tools/binman/etype/blob.py')
-rw-r--r-- | tools/binman/etype/blob.py | 37 |
1 files changed, 37 insertions, 0 deletions
diff --git a/tools/binman/etype/blob.py b/tools/binman/etype/blob.py new file mode 100644 index 0000000000..def21640b5 --- /dev/null +++ b/tools/binman/etype/blob.py @@ -0,0 +1,37 @@ +# Copyright (c) 2016 Google, Inc +# Written by Simon Glass <sjg@chromium.org> +# +# SPDX-License-Identifier: GPL-2.0+ +# +# Entry-type module for blobs, which are binary objects read from files +# + +from entry import Entry +import fdt_util +import tools + +class Entry_blob(Entry): + def __init__(self, image, etype, node): + Entry.__init__(self, image, etype, node) + self._filename = fdt_util.GetString(self._node, "filename", self.etype) + + def ObtainContents(self): + self._filename = self.GetDefaultFilename() + self._pathname = tools.GetInputFilename(self._filename) + self.ReadContents() + return True + + def ReadContents(self): + with open(self._pathname) as fd: + # We assume the data is small enough to fit into memory. If this + # is used for large filesystem image that might not be true. + # In that case, Image.BuildImage() could be adjusted to use a + # new Entry method which can read in chunks. Then we could copy + # the data in chunks and avoid reading it all at once. For now + # this seems like an unnecessary complication. + self.data = fd.read() + self.contents_size = len(self.data) + return True + + def GetDefaultFilename(self): + return self._filename |