aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Schauer <matthew.schauer@e10x.net>2020-09-04 15:33:11 -0700
committerMatthew Schauer <matthew.schauer@e10x.net>2020-09-04 15:33:11 -0700
commitd6b836488f723d3c9d27316d5d8e328ef1cc09ab (patch)
tree3bf5d88ba66f1d4d1bc83ec113cc84b80b7131a2
parent713039e79821259fd7087a9d960675b5186d77de (diff)
Fix export table reader
-rw-r--r--src/read.rs22
-rw-r--r--src/write.rs52
2 files changed, 3 insertions, 71 deletions
diff --git a/src/read.rs b/src/read.rs
index 1108f66..7352b36 100644
--- a/src/read.rs
+++ b/src/read.rs
@@ -546,15 +546,15 @@ impl Archive {
enoent_ok(self.get_exists(path))
}
- pub fn get_id(&self, id: u64) -> Result<Node> { // TODO Return Result<Option<Node>> here as well
+ pub fn get_id(&self, id: u64) -> Result<Node> {
if self.superblock.flags & SQFS_SUPER_FLAGS_SQFS_FLAG_EXPORTABLE as u16 == 0 { Err(SquashfsError::Unsupported("inode indexing".to_string()))?; }
if id <= 0 || id > self.superblock.inode_count as u64 { Err(SquashfsError::Range(id, self.superblock.inode_count as u64))? }
let compressor = self.compressor()?;
let export_reader = self.meta_reader(&compressor, None)?; // TODO Would be nice if we could set bounds for this
- let (block, offset) = ((id - 1) * 8 / self.superblock.block_size as u64, (id - 1) * 8 % self.superblock.block_size as u64);
+ let (block, offset) = ((id - 1) / 1024, (id - 1) % 1024 * 8);
let block_start: u64 = sfs_init(&|x| unsafe {
let read_at = (**self.file).read_at.expect("File object does not implement read_at");
- read_at(*self.file, self.superblock.export_table_start + block, x as *mut libc::c_void, 8)
+ read_at(*self.file, self.superblock.export_table_start + block * 8, x as *mut libc::c_void, 8)
}, "Couldn't read inode table")?;
let mut noderef: u64 = 0;
@@ -573,22 +573,6 @@ impl Archive {
let map = &self.mmap.1;
unsafe { std::slice::from_raw_parts(map.data().offset(start as isize), len) }
}
-
- /*pub fn names_from_dirent_refs(&mut self, dirent_refs: &[u64]) -> Result<Vec<String>> {
- let compressor = self.compressor()?;
- let meta_reader = self.meta_reader(&compressor, None)?; // TODO Set bounds
- let mut ret = Vec::with_capacity(dirent_refs.len());
- for dirent_ref in dirent_refs {
- let (block, offset) = unpack_meta_ref(*dirent_ref);
- unsafe { sfs_check(sqfs_meta_reader_seek(*meta_reader, block, offset), "Couldn't seek to directory entry")?; }
- let entry = sfs_init_ptr(&|x| unsafe {
- sqfs_meta_reader_read_dir_ent(*meta_reader, x)
- }, "Couldn't read directory entry by reference", libc_free)?;
- let name_bytes = unsafe { (**entry).name.as_slice((**entry).size as usize + 1) };
- ret.push(String::from_utf8(name_bytes.to_vec())?);
- }
- Ok(ret)
- }*/
}
unsafe impl Send for Archive { }
diff --git a/src/write.rs b/src/write.rs
index 096bc5e..2eb7984 100644
--- a/src/write.rs
+++ b/src/write.rs
@@ -350,58 +350,6 @@ impl Writer {
}
Ok(())
}
-
- /*pub fn add_tree<P: AsRef<Path>>(&mut self, root: P, callback: &mut FnMut(SourceFile) -> std::result::Result<Vec<SourceFile>, BoxedError>) -> Result<()> {
- let mut childmap: HashMap<PathBuf, BTreeMap<OsString, u32>> = HashMap::new();
- for step in WalkDir::new(root.as_ref()).follow_links(false).contents_first(true) {
- match step {
- Ok(entry) => {
- // TODO Consider adding Unix-specific functionality with graceful degradation
- // TODO Catch all errors except from add() and continue
- let metadata = entry.metadata().unwrap();
- let mtime = metadata.modified()?.duration_since(SystemTime::UNIX_EPOCH)?.as_secs() as u32;
- let data = if metadata.file_type().is_dir() {
- SourceData::Dir(Box::new(childmap.remove(&entry.path().to_path_buf()).unwrap_or(BTreeMap::new()).into_iter()))
- }
- else if metadata.file_type().is_file() {
- SourceData::File(Box::new(std::fs::File::open(entry.path())?))
- }
- else if metadata.file_type().is_symlink() {
- SourceData::Symlink(std::fs::read_link(entry.path())?.into_os_string())
- }
- else {
- Err(SquashfsError::WriteType(metadata.file_type()))?;
- unreachable!();
- };
- let candidate = if cfg!(linux) {
- use std::os::linux::fs::MetadataExt;
- Source { data: data, xattrs: file_xattrs(entry.path())?, uid: metadata.st_uid(), gid: metadata.st_gid(), mode: (metadata.st_mode() & !S_IFMT) as u16, modified: mtime, flags: 0 }
- }
- else {
- Source { data: data, xattrs: HashMap::new(), uid: 0, gid: 0, mode: 0x1ff, modified: mtime, flags: 0 }
- };
- let candidate_file = SourceFile { path: entry.path().to_path_buf(), content: candidate };
- for mut result in callback(candidate_file).map_err(|e| SquashfsError::WrappedError(e))? {
- if let SourceData::Dir(children) = &mut result.content.data {
- let mut new_children = childmap.remove(&result.path).unwrap_or(BTreeMap::new());
- new_children.extend(children);
- result.content.data = SourceData::Dir(Box::new(new_children.into_iter()));
- }
- let id = self.add(result.content)?;
- if let Some(parent) = result.path.parent() {
- childmap.entry(parent.to_path_buf()).or_insert(BTreeMap::new()).insert(result.path.file_name().unwrap().to_os_string(), id);
- }
- println!("{}: {}", id, result.path.display());
- }
- },
- Err(e) => {
- let path = e.path().map(|x| x.to_string_lossy().into_owned()).unwrap_or("(unknown)".to_string());
- eprintln!("Not processing {}: {}", path, e.to_string());
- }
- }
- }
- Ok(())
- }*/
}
pub struct TreeProcessor {