aboutsummaryrefslogtreecommitdiff
path: root/tools/binman/etype/blob.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/binman/etype/blob.py')
-rw-r--r--tools/binman/etype/blob.py59
1 files changed, 31 insertions, 28 deletions
diff --git a/tools/binman/etype/blob.py b/tools/binman/etype/blob.py
index f56a1f8768..00cad33718 100644
--- a/tools/binman/etype/blob.py
+++ b/tools/binman/etype/blob.py
@@ -9,6 +9,7 @@ from entry import Entry
import fdt_util
import state
import tools
+import tout
class Entry_blob(Entry):
"""Entry containing an arbitrary binary blob
@@ -33,8 +34,7 @@ class Entry_blob(Entry):
def __init__(self, section, etype, node):
Entry.__init__(self, section, etype, node)
self._filename = fdt_util.GetString(self._node, 'filename', self.etype)
- self._compress = fdt_util.GetString(self._node, 'compress', 'none')
- self._uncompressed_size = None
+ self.compress = fdt_util.GetString(self._node, 'compress', 'none')
def ObtainContents(self):
self._filename = self.GetDefaultFilename()
@@ -42,37 +42,40 @@ class Entry_blob(Entry):
self.ReadBlobContents()
return True
+ def CompressData(self, indata):
+ if self.compress != 'none':
+ self.uncomp_size = len(indata)
+ data = tools.Compress(indata, self.compress)
+ return data
+
def ReadBlobContents(self):
- # We assume the data is small enough to fit into memory. If this
- # is used for large filesystem image that might not be true.
- # In that case, Image.BuildImage() could be adjusted to use a
- # new Entry method which can read in chunks. Then we could copy
- # the data in chunks and avoid reading it all at once. For now
- # this seems like an unnecessary complication.
- data = tools.ReadFile(self._pathname)
- if self._compress == 'lz4':
- self._uncompressed_size = len(data)
- '''
- import lz4 # Import this only if needed (python-lz4 dependency)
+ """Read blob contents into memory
+
+ This function compresses the data before storing if needed.
- try:
- data = lz4.frame.compress(data)
- except AttributeError:
- data = lz4.compress(data)
- '''
- data = tools.Run('lz4', '-c', self._pathname, binary=True)
+ We assume the data is small enough to fit into memory. If this
+ is used for large filesystem image that might not be true.
+ In that case, Image.BuildImage() could be adjusted to use a
+ new Entry method which can read in chunks. Then we could copy
+ the data in chunks and avoid reading it all at once. For now
+ this seems like an unnecessary complication.
+ """
+ indata = tools.ReadFile(self._pathname)
+ data = self.CompressData(indata)
self.SetContents(data)
return True
def GetDefaultFilename(self):
return self._filename
- def AddMissingProperties(self):
- Entry.AddMissingProperties(self)
- if self._compress != 'none':
- state.AddZeroProp(self._node, 'uncomp-size')
-
- def SetCalculatedProperties(self):
- Entry.SetCalculatedProperties(self)
- if self._uncompressed_size is not None:
- state.SetInt(self._node, 'uncomp-size', self._uncompressed_size)
+ def ReadData(self, decomp=True):
+ indata = Entry.ReadData(self, decomp)
+ if decomp:
+ data = tools.Decompress(indata, self.compress)
+ if self.uncomp_size:
+ tout.Info("%s: Decompressing data size %#x with algo '%s' to data size %#x" %
+ (self.GetPath(), len(indata), self.compress,
+ len(data)))
+ else:
+ data = indata
+ return data