aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Schauer <matthew.schauer@e10x.net>2020-08-22 09:28:03 -0700
committerMatthew Schauer <matthew.schauer@e10x.net>2020-08-22 09:28:03 -0700
commit993aecbec301646dd0c7e22a98dfc808957d23c2 (patch)
tree7d585e9c751032a4e71f010e1057cb893910bc38
parent04bb366c625eaaa2415413a8aad5aada07e008a2 (diff)
Clean up and add directory tree processor
-rw-r--r--Cargo.toml2
-rw-r--r--src/lib.rs47
-rw-r--r--src/read.rs144
-rw-r--r--src/write.rs269
4 files changed, 301 insertions, 161 deletions
diff --git a/Cargo.toml b/Cargo.toml
index 0ccd44a..d01eb57 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -8,6 +8,7 @@ edition = "2018"
bindgen = "0.53.1"
[dependencies]
+lazy_static = "1.4"
libc = "0.2"
mmap = "0.1"
num-traits = "0.2"
@@ -15,3 +16,4 @@ num-derive = "0.3"
owning_ref = "0.4"
thiserror = "1.0"
walkdir = "2.3"
+xattr = "0.2"
diff --git a/src/lib.rs b/src/lib.rs
index bd975e0..5769b54 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,11 +1,12 @@
+#[macro_use] extern crate lazy_static;
extern crate libc;
extern crate mmap;
extern crate num_derive;
extern crate num_traits;
extern crate owning_ref;
extern crate walkdir;
+extern crate xattr;
-// TODO Use AsRef<Path> rather than Path for public interfaces
use std::mem::MaybeUninit;
use std::ffi::{OsStr, OsString};
use std::path::PathBuf;
@@ -26,6 +27,8 @@ mod bindings {
pub mod read;
pub mod write;
+type BoxedError = Box<dyn std::error::Error + std::marker::Send + std::marker::Sync>;
+
#[derive(Error, Debug, FromPrimitive)]
#[repr(i32)]
pub enum LibError {
@@ -68,6 +71,9 @@ pub enum SquashfsError {
#[error("Memory mapping failed: {0}")] Mmap(#[from] mmap::MapError),
#[error("Couldn't get the current system time: {0}")] Time(#[from] std::time::SystemTimeError),
#[error("Refusing to create empty archive")] Empty,
+ #[error("Tried to write directory {0} before child {1}")] WriteOrder(u32, u32),
+ #[error("Tried to write unknown or unsupported file type")] WriteType(std::fs::FileType),
+ #[error("Callback returned an error")] WrappedError(BoxedError),
}
type Result<T> = std::result::Result<T, SquashfsError>;
@@ -82,26 +88,6 @@ fn sfs_check(code: i32, desc: &str) -> Result<i32> {
}
}
-// TODO Make these three funtions return ManagedPointer rather than requiring the caller to do the wrappning
-fn sfs_init<T>(init: &dyn Fn(*mut T) -> i32, err: &str) -> Result<T> {
- let mut ret: MaybeUninit<T> = MaybeUninit::uninit();
- sfs_check(init(ret.as_mut_ptr()), err)?;
- Ok(unsafe { ret.assume_init() })
-}
-
-fn sfs_init_ptr<T>(init: &dyn Fn(*mut *mut T) -> i32, err: &str) -> Result<*mut T> {
- let mut ret: *mut T = ptr::null_mut();
- sfs_check(init(&mut ret), err)?;
- if ret.is_null() { Err(SquashfsError::LibraryReturnError(err.to_string())) }
- else { Ok(ret) }
-}
-
-fn sfs_init_check_null<T>(init: &dyn Fn() -> *mut T, err: &str) -> Result<*mut T> {
- let ret = init();
- if ret.is_null() { Err(SquashfsError::LibraryNullError(err.to_string())) }
- else { Ok(ret) }
-}
-
fn sfs_destroy<T>(x: *mut T) {
unsafe {
let obj = x as *mut sqfs_object_t;
@@ -175,3 +161,22 @@ impl<T> std::fmt::Debug for ManagedPointer<T> {
write!(f, "ManagedPointer({:?})", self.ptr)
}
}
+
+fn sfs_init<T>(init: &dyn Fn(*mut T) -> i32, err: &str) -> Result<T> {
+ let mut ret: MaybeUninit<T> = MaybeUninit::uninit();
+ sfs_check(init(ret.as_mut_ptr()), err)?;
+ Ok(unsafe { ret.assume_init() })
+}
+
+fn sfs_init_ptr<T>(init: &dyn Fn(*mut *mut T) -> i32, err: &str, destroy: fn(*mut T)) -> Result<ManagedPointer<T>> {
+ let mut ret: *mut T = ptr::null_mut();
+ sfs_check(init(&mut ret), err)?;
+ if ret.is_null() { Err(SquashfsError::LibraryReturnError(err.to_string())) }
+ else { Ok(ManagedPointer::new(ret, destroy)) }
+}
+
+fn sfs_init_check_null<T>(init: &dyn Fn() -> *mut T, err: &str, destroy: fn(*mut T)) -> Result<ManagedPointer<T>> {
+ let ret = init();
+ if ret.is_null() { Err(SquashfsError::LibraryNullError(err.to_string())) }
+ else { Ok(ManagedPointer::new(ret, destroy)) }
+}
diff --git a/src/read.rs b/src/read.rs
index 6ba57f4..1108f66 100644
--- a/src/read.rs
+++ b/src/read.rs
@@ -16,7 +16,7 @@ fn dumb_canonicalize(path: &Path) -> PathBuf {
let mut ret = PathBuf::new();
for part in path.components() {
match part {
- Component::Prefix(_) => panic!("What is this, Windows?"),
+ Component::Prefix(_) => panic!("What is this, Windows?"), // TODO
Component::CurDir => (),
Component::RootDir => ret.clear(),
Component::ParentDir => { ret.pop(); },
@@ -26,6 +26,15 @@ fn dumb_canonicalize(path: &Path) -> PathBuf {
ret
}
+// Pass errors through, but convert missing file errors to None
+fn enoent_ok<T>(t: Result<T>) -> Result<Option<T>> {
+ match t {
+ Ok(ret) => Ok(Some(ret)),
+ Err(SquashfsError::LibraryError(_, LibError::NoEntry)) => Ok(None),
+ Err(e) => Err(e),
+ }
+}
+
#[derive(Debug)]
pub struct Dir<'a> {
node: &'a Node<'a>,
@@ -36,9 +45,9 @@ pub struct Dir<'a> {
impl<'a> Dir<'a> {
fn new(node: &'a Node) -> Result<Self> {
let compressor = node.container.compressor()?;
- let reader = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ let reader = sfs_init_check_null(&|| unsafe {
sqfs_dir_reader_create(&node.container.superblock, *compressor, *node.container.file, 0)
- }, "Couldn't create directory reader")?, sfs_destroy);
+ }, "Couldn't create directory reader", sfs_destroy)?;
unsafe { sfs_check(sqfs_dir_reader_open_dir(*reader, node.inode.as_const(), 0), "Couldn't open directory")?; }
Ok(Self { node: node, compressor: compressor, reader: Mutex::new(reader) })
}
@@ -49,20 +58,22 @@ impl<'a> Dir<'a> {
fn read<'b>(&'b self) -> Result<Node<'a>> {
let locked_reader = self.reader.lock().expect(LOCK_ERR);
- let entry = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ let entry = sfs_init_ptr(&|x| unsafe {
sqfs_dir_reader_read(**locked_reader, x)
- }, "Couldn't read directory entries")?, libc_free);
+ }, "Couldn't read directory entries", libc_free)?;
let name_bytes = unsafe { (**entry).name.as_slice((**entry).size as usize + 1) };
let name = String::from_utf8(name_bytes.to_vec())?;
- let node = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ let node = sfs_init_ptr(&|x| unsafe {
sqfs_dir_reader_get_inode(**locked_reader, x)
- }, "Couldn't read directory entry inode")?, libc_free);
+ }, "Couldn't read directory entry inode", libc_free)?;
Node::new(self.node.container, node, self.node.path.as_ref().map(|path| path.join(name)))
}
- pub fn child(&self, name: &str) -> Result<Node> {
- unsafe { sfs_check(sqfs_dir_reader_find(**self.reader.lock().expect(LOCK_ERR), CString::new(name)?.as_ptr()), &format!("Couldn't find child \"{}\"", name))? };
- self.read()
+ pub fn child(&self, name: &str) -> Result<Option<Node>> {
+ match unsafe { enoent_ok(sfs_check(sqfs_dir_reader_find(**self.reader.lock().expect(LOCK_ERR), CString::new(name)?.as_ptr()), &format!("Couldn't find child \"{}\"", name)))? } {
+ None => Ok(None),
+ Some(_) => Ok(Some(self.read()?)),
+ }
}
}
@@ -79,17 +90,16 @@ pub struct File<'a> {
compressor: ManagedPointer<sqfs_compressor_t>,
reader: Mutex<ManagedPointer<sqfs_data_reader_t>>,
offset: Mutex<u64>,
- mmap: Option<(std::fs::File, MemoryMap)>, // TODO Probably not thread-safe
}
impl<'a> File<'a> {
fn new(node: &'a Node) -> Result<Self> {
let compressor = node.container.compressor()?;
- let reader = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ let reader = sfs_init_check_null(&|| unsafe {
sqfs_data_reader_create(*node.container.file, node.container.superblock.block_size as u64, *compressor, 0)
- }, "Couldn't create data reader")?, sfs_destroy);
+ }, "Couldn't create data reader", sfs_destroy)?;
unsafe { sfs_check(sqfs_data_reader_load_fragment_table(*reader, &node.container.superblock), "Couldn't load fragment table")? };
- Ok(Self { node: node, compressor: compressor, reader: Mutex::new(reader), offset: Mutex::new(0), mmap: None })
+ Ok(Self { node: node, compressor: compressor, reader: Mutex::new(reader), offset: Mutex::new(0) })
}
pub fn size(&self) -> u64 {
@@ -110,7 +120,7 @@ impl<'a> File<'a> {
Ok(ret)
}
- pub fn mmap<'b>(&'b mut self) -> Result<Option<&'b [u8]>> {
+ pub fn mmap<'b>(&'b mut self) -> Option<&'b [u8]> {
let inode = unsafe { &***self.node.inode };
let (start, frag_idx) = unsafe {
match inode.base.type_ as u32 {
@@ -120,18 +130,10 @@ impl<'a> File<'a> {
}
};
let block_count = unsafe { inode.payload_bytes_used / std::mem::size_of::<sqfs_u32>() as u32 };
- println!("File starts at byte {} ({})", start, MemoryMap::granularity());
- if block_count == 0 || start == 0 || frag_idx != 0xffffffff { return Ok(None); }
+ if block_count == 0 || frag_idx != 0xffffffff { return None; }
let block_sizes = unsafe { inode.extra.as_slice(block_count as usize) };
- if block_sizes.iter().any(|x| x & 0x00800000 == 0) { return Ok(None); }
- if self.mmap.is_none() {
- let file = std::fs::File::open(&self.node.container.path)?;
- let map = MemoryMap::new(self.size() as usize, &vec![MapOption::MapReadable, MapOption::MapFd(file.as_raw_fd()), MapOption::MapOffset(start as usize)])?;
- self.mmap = Some((file, map));
- }
- let map = &self.mmap.as_ref().expect("Just-filled mmap is empty").1;
- println!("{:?} bytes at {:?}", map.len(), map.data());
- unsafe { Ok(Some(std::slice::from_raw_parts(map.data(), map.len()))) }
+ if block_sizes.iter().any(|x| x & 0x01000000 == 0) { return None; }
+ Some(self.node.container.map_range(start as usize, self.size() as usize))
}
}
@@ -315,13 +317,12 @@ impl<'a> Node<'a> {
if self.container.superblock.flags & SQFS_SUPER_FLAGS_SQFS_FLAG_NO_XATTRS as u16 != 0 { Ok(HashMap::new()) }
else {
let compressor = self.container.compressor()?;
- let xattr_reader = unsafe {
- let ret = ManagedPointer::new(sqfs_xattr_reader_create(0), sfs_destroy);
- sfs_check(sqfs_xattr_reader_load(*ret, &self.container.superblock, *self.container.file, *compressor), "Couldn't create xattr reader")?;
- ret
- };
+ let xattr_reader = sfs_init_check_null(&|| unsafe {
+ sqfs_xattr_reader_create(0)
+ }, "Coudn't create xattr reader", sfs_destroy)?;
+ unsafe { sfs_check(sqfs_xattr_reader_load(*xattr_reader, &self.container.superblock, *self.container.file, *compressor), "Couldn't load xattr reader")?; }
let mut xattr_idx: u32 = NO_XATTRS;
- unsafe { sfs_check(sqfs_inode_get_xattr_index(self.inode.as_const(), &mut xattr_idx), "Couldn't get xattr index")? };
+ unsafe { sfs_check(sqfs_inode_get_xattr_index(self.inode.as_const(), &mut xattr_idx), "Couldn't get xattr index")?; }
let desc = sfs_init(&|x| unsafe {
sqfs_xattr_reader_get_desc(*xattr_reader, xattr_idx, x)
}, "Couldn't get xattr descriptor")?;
@@ -329,15 +330,12 @@ impl<'a> Node<'a> {
unsafe { sfs_check(sqfs_xattr_reader_seek_kv(*xattr_reader, &desc), "Couldn't seek to xattr location")? };
for _ in 0..desc.count {
let prefixlen = unsafe { CStr::from_ptr(sqfs_get_xattr_prefix(category as u32)).to_bytes().len() };
- let key = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ let key = sfs_init_ptr(&|x| unsafe {
sqfs_xattr_reader_read_key(*xattr_reader, x)
- }, "Couldn't read xattr key")?, libc_free);
- if unsafe { (**key).type_ } as u32 & SQFS_XATTR_TYPE_SQFS_XATTR_FLAG_OOL != 0 {
- unimplemented!()
- }
- let val = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ }, "Couldn't read xattr key", libc_free)?;
+ let val = sfs_init_ptr(&|x| unsafe {
sqfs_xattr_reader_read_value(*xattr_reader, *key, x)
- }, "Couldn't read xattr value")?, libc_free);
+ }, "Couldn't read xattr value", libc_free)?;
if unsafe { (**key).type_ } as u32 & SQFS_XATTR_TYPE_SQFS_XATTR_PREFIX_MASK == category as u32 {
unsafe {
let keyvec = (**key).key.as_slice((**key).size as usize + prefixlen)[prefixlen..].to_vec();
@@ -376,7 +374,7 @@ impl<'a> Node<'a> {
pub fn parent(&self) -> Result<Self> {
self.path.as_ref().map(|path| {
let ppath = path.parent().unwrap_or(&Path::new(""));
- self.container.get(&os_to_string(ppath.as_os_str())?)
+ self.container.get_exists(&os_to_string(ppath.as_os_str())?)
}).ok_or(SquashfsError::NoPath)?
}
@@ -398,7 +396,7 @@ impl<'a> Node<'a> {
if !visited.insert(target.clone()) {
return Err(SquashfsError::LinkLoop(target));
}
- cur = Box::new(cur.container.get_path(&target)?);
+ cur = Box::new(cur.container.get_exists(&target)?);
}
_ => return Ok(*cur),
}
@@ -415,7 +413,8 @@ impl<'a> Node<'a> {
}
pub fn into_owned_file(self) -> Result<OwnedFile<'a>> {
- Ok(OwnedFile { handle: OwningHandle::try_new(Box::new(self), |x| unsafe { (*x).as_file().map(|x| Box::new(x)) })? })
+ let resolved = self.resolve()?;
+ Ok(OwnedFile { handle: OwningHandle::try_new(Box::new(resolved), |x| unsafe { (*x).as_file().map(|x| Box::new(x)) })? })
}
pub fn as_dir(&self) -> Result<Dir> {
@@ -426,7 +425,8 @@ impl<'a> Node<'a> {
}
pub fn into_owned_dir(self) -> Result<OwnedDir<'a>> {
- Ok(OwnedDir { handle: OwningHandle::try_new(Box::new(self), |x| unsafe { (*x).as_dir().map(|x| Box::new(x)) })? })
+ let resolved = self.resolve()?;
+ Ok(OwnedDir { handle: OwningHandle::try_new(Box::new(resolved), |x| unsafe { (*x).as_dir().map(|x| Box::new(x)) })? })
}
pub fn uid(&self) -> Result<u32> {
@@ -472,41 +472,44 @@ pub struct Archive {
file: ManagedPointer<sqfs_file_t>,
superblock: sqfs_super_t,
compressor_config: sqfs_compressor_config_t,
+ mmap: (std::fs::File, MemoryMap),
}
impl Archive {
pub fn new<T: AsRef<Path>>(path: T) -> Result<Self> {
let cpath = CString::new(os_to_string(path.as_ref().as_os_str())?)?;
- let file = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ let file = sfs_init_check_null(&|| unsafe {
sqfs_open_file(cpath.as_ptr(), SQFS_FILE_OPEN_FLAGS_SQFS_FILE_OPEN_READ_ONLY)
- }, &format!("Couldn't open input file {}", path.as_ref().display()))?, sfs_destroy);
+ }, &format!("Couldn't open input file {}", path.as_ref().display()), sfs_destroy)?;
let superblock = sfs_init(&|x| unsafe {
sqfs_super_read(x, *file)
}, "Couldn't read archive superblock")?;
let compressor_config = sfs_init(&|x| unsafe {
sqfs_compressor_config_init(x, superblock.compression_id as u32, superblock.block_size as u64, SQFS_COMP_FLAG_SQFS_COMP_FLAG_UNCOMPRESS as u16)
}, "Couldn't read archive compressor config")?;
- Ok(Self { path: path.as_ref().to_path_buf(), file: file, superblock: superblock, compressor_config: compressor_config })
+ let os_file = std::fs::File::open(&path)?;
+ let map = MemoryMap::new(superblock.bytes_used as usize, &vec![MapOption::MapReadable, MapOption::MapFd(os_file.as_raw_fd())])?;
+ Ok(Self { path: path.as_ref().to_path_buf(), file: file, superblock: superblock, compressor_config: compressor_config, mmap: (os_file, map) })
}
fn compressor(&self) -> Result<ManagedPointer<sqfs_compressor_t>> {
- Ok(ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ Ok(sfs_init_ptr(&|x| unsafe {
sqfs_compressor_create(&self.compressor_config, x)
- }, "Couldn't create compressor")?, sfs_destroy))
+ }, "Couldn't create compressor", sfs_destroy)?)
}
fn meta_reader(&self, compressor: &ManagedPointer<sqfs_compressor_t>, bounds: Option<(u64, u64)>) -> Result<ManagedPointer<sqfs_meta_reader_t>> {
let range = bounds.unwrap_or((0, self.superblock.bytes_used));
- Ok(ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ Ok(sfs_init_check_null(&|| unsafe {
sqfs_meta_reader_create(*self.file, **compressor, range.0, range.1)
- }, "Couldn't create metadata reader")?, sfs_destroy))
+ }, "Couldn't create metadata reader", sfs_destroy)?)
}
fn id_lookup(&self, idx: u16) -> Result<u32> {
// TODO Consider chaching the ID table to make lookups more efficient
- let mut id_table = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ let mut id_table = sfs_init_check_null(&|| unsafe {
sqfs_id_table_create(0)
- }, "Couldn't create ID table")?, sfs_destroy);
+ }, "Couldn't create ID table", sfs_destroy)?;
let compressor = self.compressor()?;
unsafe { sfs_check(sqfs_id_table_read(*id_table, *self.file, &self.superblock, *compressor), "Couldn't read ID table")?; }
Ok(sfs_init(&|x| unsafe {
@@ -518,28 +521,32 @@ impl Archive {
self.superblock.inode_count
}
- pub fn get_path<T: AsRef<Path>>(&self, path: T) -> Result<Node> {
+ pub fn get_exists<T: AsRef<Path>>(&self, path: T) -> Result<Node> {
let compressor = self.compressor()?;
- let dir_reader = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ let dir_reader = sfs_init_check_null(&|| unsafe {
sqfs_dir_reader_create(&self.superblock, *compressor, *self.file, 0)
- }, "Couldn't create directory reader")?, sfs_destroy);
- let root = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ }, "Couldn't create directory reader", sfs_destroy)?;
+ let root = sfs_init_ptr(&|x| unsafe {
sqfs_dir_reader_get_root_inode(*dir_reader, x)
- }, "Couldn't get filesystem root")?, libc_free);
+ }, "Couldn't get filesystem root", libc_free)?;
let pathbuf = dumb_canonicalize(path.as_ref());
if &pathbuf == Path::new("/") {
Node::new(&self, root, Some(pathbuf))
}
else {
let cpath = CString::new(os_to_string(pathbuf.as_os_str())?)?;
- let inode = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ let inode = sfs_init_ptr(&|x| unsafe {
sqfs_dir_reader_find_by_path(*dir_reader, *root, cpath.as_ptr(), x)
- }, &format!("Unable to access path {}", path.as_ref().display()))?, libc_free);
+ }, &format!("Unable to access path {}", path.as_ref().display()), libc_free)?;
Node::new(&self, inode, Some(pathbuf))
}
}
- pub fn get_id(&self, id: u64) -> Result<Node> {
+ pub fn get<T: AsRef<Path>>(&self, path: T) -> Result<Option<Node>> {
+ enoent_ok(self.get_exists(path))
+ }
+
+ pub fn get_id(&self, id: u64) -> Result<Node> { // TODO Return Result<Option<Node>> here as well
if self.superblock.flags & SQFS_SUPER_FLAGS_SQFS_FLAG_EXPORTABLE as u16 == 0 { Err(SquashfsError::Unsupported("inode indexing".to_string()))?; }
if id <= 0 || id > self.superblock.inode_count as u64 { Err(SquashfsError::Range(id, self.superblock.inode_count as u64))? }
let compressor = self.compressor()?;
@@ -556,31 +563,32 @@ impl Archive {
sfs_check(sqfs_meta_reader_read(*export_reader, &mut noderef as *mut u64 as *mut libc::c_void, 8), "Couldn't read inode reference")?;
}
let (block, offset) = unpack_meta_ref(noderef);
- let inode = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ let inode = sfs_init_ptr(&|x| unsafe {
sqfs_meta_reader_read_inode(*export_reader, &self.superblock, block, offset, x)
- }, "Couldn't read inode")?, libc_free);
+ }, "Couldn't read inode", libc_free)?;
Node::new(&self, inode, None)
}
- pub fn get(&self, path: &str) -> Result<Node> {
- self.get_path(Path::new(path))
+ fn map_range(&self, start: usize, len: usize) -> &[u8] {
+ let map = &self.mmap.1;
+ unsafe { std::slice::from_raw_parts(map.data().offset(start as isize), len) }
}
- pub fn names_from_dirent_refs(&mut self, dirent_refs: &[u64]) -> Result<Vec<String>> {
+ /*pub fn names_from_dirent_refs(&mut self, dirent_refs: &[u64]) -> Result<Vec<String>> {
let compressor = self.compressor()?;
let meta_reader = self.meta_reader(&compressor, None)?; // TODO Set bounds
let mut ret = Vec::with_capacity(dirent_refs.len());
for dirent_ref in dirent_refs {
let (block, offset) = unpack_meta_ref(*dirent_ref);
unsafe { sfs_check(sqfs_meta_reader_seek(*meta_reader, block, offset), "Couldn't seek to directory entry")?; }
- let entry = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ let entry = sfs_init_ptr(&|x| unsafe {
sqfs_meta_reader_read_dir_ent(*meta_reader, x)
- }, "Couldn't read directory entry by reference")?, libc_free);
+ }, "Couldn't read directory entry by reference", libc_free)?;
let name_bytes = unsafe { (**entry).name.as_slice((**entry).size as usize + 1) };
ret.push(String::from_utf8(name_bytes.to_vec())?);
}
Ok(ret)
- }
+ }*/
}
unsafe impl Send for Archive { }
diff --git a/src/write.rs b/src/write.rs
index 23579b2..096bc5e 100644
--- a/src/write.rs
+++ b/src/write.rs
@@ -2,7 +2,7 @@ use std::cell::RefCell;
use std::collections::{BTreeMap, HashMap};
use std::ffi::{CStr, CString, OsStr, OsString};
use std::io::Read;
-use std::path::Path;
+use std::path::{Path, PathBuf};
use std::time::SystemTime;
use bindings::*;
use super::*;
@@ -30,16 +30,26 @@ pub enum SourceData {
}
pub struct Source {
- data: SourceData,
- xattrs: HashMap<OsString, Vec<u8>>,
- uid: u32,
- gid: u32,
- mode: u16,
- modified: u32,
- flags: u32,
+ pub data: SourceData,
+ pub uid: u32,
+ pub gid: u32,
+ pub mode: u16,
+ pub modified: u32,
+ pub xattrs: HashMap<OsString, Vec<u8>>,
+ pub flags: u32,
}
-fn copy_metadata(src: &ManagedPointer<sqfs_inode_generic_t>, dst: &mut ManagedPointer<sqfs_inode_generic_t>) {
+fn file_xattrs(path: &Path) -> Result<HashMap<OsString, Vec<u8>>> {
+ xattr::list(path)?.map(|attr| {
+ match xattr::get(path, attr.clone()) {
+ Err(e) => panic!(), // TODO Panics
+ Ok(None) => panic!(), //Err(anyhow!("Couldn't retrieve xattr \"{:?}\" reported to be present", attr)),
+ Ok(Some(value)) => Ok((attr, value))
+ }
+ }).collect()
+}
+
+fn copy_metadata(src: &ManagedPointer<sqfs_inode_generic_t>, dst: &mut ManagedPointer<sqfs_inode_generic_t>) -> Result<()> {
fn nlink_ref(inode: &ManagedPointer<sqfs_inode_generic_t>) -> Option<&u32> {
unimplemented!();
}
@@ -49,12 +59,17 @@ fn copy_metadata(src: &ManagedPointer<sqfs_inode_generic_t>, dst: &mut ManagedPo
dst_base.gid_idx = src_base.gid_idx;
dst_base.mod_time = src_base.mod_time;
dst_base.inode_number = src_base.inode_number;
- // TODO xattr_idx, uid, git, mode, mtime, link_count
+ let mut xattr_idx: u32 = 0;
+ unsafe {
+ sfs_check(sqfs_inode_get_xattr_index(**src, &mut xattr_idx), "Couldn't get xattr index")?;
+ sfs_check(sqfs_inode_set_xattr_index(**dst, xattr_idx), "Couldn't set xattr index")?;
+ }
+ Ok(())
}
impl Source {
- pub fn new(data: SourceData, xattrs: HashMap<OsString, Vec<u8>>, parent: u32, flags: u32) -> Self { // TODO Parent not necessary?
- Self { data: data, xattrs: xattrs, uid: 1000, gid: 1001, mode: 0x1ff, modified: 0, flags: flags }
+ pub fn defaults(data: SourceData) -> Self {
+ Self { data: data, uid: 0, gid: 0, mode: 0x1ff, modified: 0, xattrs: HashMap::new(), flags: 0 }
}
fn devno(maj: u32, min: u32) -> u32 {
@@ -128,7 +143,11 @@ struct IntermediateNode {
inode: Box<ManagedPointer<sqfs_inode_generic_t>>,
dir_children: Option<Box<dyn Iterator<Item=(OsString, u32)>>>,
pos: u64,
- parent: u32, // TODO Calculate rather than requiring
+}
+
+pub struct SourceFile {
+ pub path: PathBuf,
+ pub content: Source,
}
pub struct Writer {
@@ -148,48 +167,48 @@ pub struct Writer {
}
impl Writer {
- pub fn open(path: &Path) -> Result<Self> {
- let cpath = CString::new(os_to_string(path.as_os_str())?)?;
+ pub fn open<T: AsRef<Path>>(path: T) -> Result<Self> {
+ let cpath = CString::new(os_to_string(path.as_ref().as_os_str())?)?;
let block_size = SQFS_DEFAULT_BLOCK_SIZE as u64;
- let num_workers = 1;
+ let num_workers = 4; // TODO Get from core count
let compressor_id = SQFS_COMPRESSOR_SQFS_COMP_ZSTD;
- let now = 0; // TODO Get current timestamp
- let outfile = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_secs() as u32;
+ let outfile = sfs_init_check_null(&|| unsafe {
sqfs_open_file(cpath.as_ptr(), SQFS_FILE_OPEN_FLAGS_SQFS_FILE_OPEN_OVERWRITE)
- }, &format!("Couldn't open output file {}", path.display()))?, sfs_destroy);
+ }, &format!("Couldn't open output file {}", path.as_ref().display()), sfs_destroy)?;
let compressor_config = sfs_init(&|x| unsafe {
sqfs_compressor_config_init(x, compressor_id, block_size, 0)
}, "Couldn't create compressor config")?;
- let compressor = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ let compressor = sfs_init_ptr(&|x| unsafe {
sqfs_compressor_create(&compressor_config, x)
- }, "Couldn't create compressor")?, sfs_destroy);
+ }, "Couldn't create compressor", sfs_destroy)?;
let superblock = sfs_init(&|x| unsafe {
sqfs_super_init(x, block_size, now, compressor_id)
}, "Couldn't create superblock")?;
- let frag_table = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ let frag_table = sfs_init_check_null(&|| unsafe {
sqfs_frag_table_create(0)
- }, "Couldn't create fragment table")?, sfs_destroy);
- let block_writer = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ }, "Couldn't create fragment table", sfs_destroy)?;
+ let block_writer = sfs_init_check_null(&|| unsafe {
sqfs_block_writer_create(*outfile, 4096, 0)
- }, "Couldn't create block writer")?, sfs_destroy);
- let block_processor = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ }, "Couldn't create block writer", sfs_destroy)?;
+ let block_processor = sfs_init_check_null(&|| unsafe {
sqfs_block_processor_create(block_size, *compressor, num_workers, 10 * num_workers as u64, *block_writer, *frag_table)
- }, "Couldn't create block processor")?, sfs_destroy);
- let id_table = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ }, "Couldn't create block processor", sfs_destroy)?;
+ let id_table = sfs_init_check_null(&|| unsafe {
sqfs_id_table_create(0)
- }, "Couldn't create ID table")?, sfs_destroy);
- let xattr_writer = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ }, "Couldn't create ID table", sfs_destroy)?;
+ let xattr_writer = sfs_init_check_null(&|| unsafe {
sqfs_xattr_writer_create(0)
- }, "Couldn't create xattr writer")?, sfs_destroy);
- let inode_writer = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ }, "Couldn't create xattr writer", sfs_destroy)?;
+ let inode_writer = sfs_init_check_null(&|| unsafe {
sqfs_meta_writer_create(*outfile, *compressor, 0)
- }, "Couldn't create inode metadata writer")?, sfs_destroy);
- let dirent_writer = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
- sqfs_meta_writer_create(*outfile, *compressor, SQFS_META_WRITER_FLAGS_SQFS_META_WRITER_KEEP_IN_MEMORY) // TODO Untangle so we don't have to keep in memory
- }, "Couldn't create directory entry metadata writer")?, sfs_destroy);
- let dir_writer = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ }, "Couldn't create inode metadata writer", sfs_destroy)?;
+ let dirent_writer = sfs_init_check_null(&|| unsafe {
+ sqfs_meta_writer_create(*outfile, *compressor, SQFS_META_WRITER_FLAGS_SQFS_META_WRITER_KEEP_IN_MEMORY)
+ }, "Couldn't create directory entry metadata writer", sfs_destroy)?;
+ let dir_writer = sfs_init_check_null(&|| unsafe {
sqfs_dir_writer_create(*dirent_writer, SQFS_DIR_WRITER_CREATE_FLAGS_SQFS_DIR_WRITER_CREATE_EXPORT_TABLE)
- }, "Couldn't create directory writer")?, sfs_destroy);
+ }, "Couldn't create directory writer", sfs_destroy)?;
unsafe {
sfs_check(sqfs_super_write(&superblock, *outfile), "Couldn't write archive superblock")?;
sfs_check((**compressor).write_options.expect("Compressor doesn't provide write_options")(*compressor, *outfile), "Couldn't write compressor options")?;
@@ -212,24 +231,26 @@ impl Writer {
}
fn mode_from_inode(inode: &ManagedPointer<sqfs_inode_generic_t>) -> u16 {
- let typenums = vec![ // TODO Lazy static
- (SQFS_INODE_TYPE_SQFS_INODE_DIR, S_IFDIR),
- (SQFS_INODE_TYPE_SQFS_INODE_FILE, S_IFREG),
- (SQFS_INODE_TYPE_SQFS_INODE_SLINK, S_IFLNK),
- (SQFS_INODE_TYPE_SQFS_INODE_BDEV, S_IFBLK),
- (SQFS_INODE_TYPE_SQFS_INODE_CDEV, S_IFCHR),
- (SQFS_INODE_TYPE_SQFS_INODE_FIFO, S_IFIFO),
- (SQFS_INODE_TYPE_SQFS_INODE_SOCKET, S_IFSOCK),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_DIR, S_IFDIR),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_FILE, S_IFREG),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_SLINK, S_IFLNK),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_BDEV, S_IFBLK),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_CDEV, S_IFCHR),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_FIFO, S_IFIFO),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_SOCKET, S_IFSOCK),
- ].into_iter().collect::<HashMap<u32, u32>>();
+ lazy_static! {
+ static ref TYPENUMS: HashMap<u32, u32> = vec![
+ (SQFS_INODE_TYPE_SQFS_INODE_DIR, S_IFDIR),
+ (SQFS_INODE_TYPE_SQFS_INODE_FILE, S_IFREG),
+ (SQFS_INODE_TYPE_SQFS_INODE_SLINK, S_IFLNK),
+ (SQFS_INODE_TYPE_SQFS_INODE_BDEV, S_IFBLK),
+ (SQFS_INODE_TYPE_SQFS_INODE_CDEV, S_IFCHR),
+ (SQFS_INODE_TYPE_SQFS_INODE_FIFO, S_IFIFO),
+ (SQFS_INODE_TYPE_SQFS_INODE_SOCKET, S_IFSOCK),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_DIR, S_IFDIR),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_FILE, S_IFREG),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_SLINK, S_IFLNK),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_BDEV, S_IFBLK),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_CDEV, S_IFCHR),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_FIFO, S_IFIFO),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_SOCKET, S_IFSOCK),
+ ].into_iter().collect();
+ }
let base = unsafe { (***inode).base };
- typenums[&(base.type_ as u32)] as u16 | base.mode
+ TYPENUMS[&(base.type_ as u32)] as u16 | base.mode
}
fn outfile_size(&self) -> u64 {
@@ -261,22 +282,22 @@ impl Writer {
sfs_check(sqfs_xattr_writer_begin(*self.xattr_writer, 0), "Couldn't start writing xattrs")?;
for (key, value) in &source.xattrs {
let ckey = CString::new(os_to_string(key)?)?;
- sfs_check(sqfs_xattr_writer_add(*self.xattr_writer, ckey.as_ptr() as *const i8, value as &[u8] as *const [u8] as *const libc::c_void, value.len() as u64), "Couldn't add xattr")?;
+ sfs_check(sqfs_xattr_writer_add(*self.xattr_writer, ckey.as_ptr() as *const i8, value.as_ptr() as *const libc::c_void, value.len() as u64), "Couldn't add xattr")?;
}
let xattr_idx = unsafe { sfs_init(&|x| sqfs_xattr_writer_end(*self.xattr_writer, x), "Couldn't finish writing xattrs")? };
let mut base = &mut (***inode).base;
base.mode = source.mode;
sqfs_inode_set_xattr_index(**inode, xattr_idx);
- sfs_check(sqfs_id_table_id_to_index(*self.id_table, source.uid, &mut base.uid_idx), "Couldn't set inode UID");
- sfs_check(sqfs_id_table_id_to_index(*self.id_table, source.gid, &mut base.gid_idx), "Couldn't set inode GID");
+ sfs_check(sqfs_id_table_id_to_index(*self.id_table, source.uid, &mut base.uid_idx), "Couldn't set inode UID")?;
+ sfs_check(sqfs_id_table_id_to_index(*self.id_table, source.gid, &mut base.gid_idx), "Couldn't set inode GID")?;
base.mod_time = source.modified;
- base.inode_number = self.nodes.len() as u32 + 1;;
+ base.inode_number = self.nodes.len() as u32 + 1;
}
let dir_children = match source.data {
SourceData::Dir(children) => Some(children),
_ => None,
};
- self.nodes.push(RefCell::new(IntermediateNode { inode: inode, dir_children: dir_children, pos: 0, parent: 0 }));
+ self.nodes.push(RefCell::new(IntermediateNode { inode: inode, dir_children: dir_children, pos: 0 }));
Ok(self.nodes.len() as u32)
}
@@ -287,20 +308,21 @@ impl Writer {
for raw_node in &self.nodes {
let mut node = raw_node.borrow_mut();
// TODO Handle extended inodes properly
- // TODO What happens if a dir tries to include itself as a child? Probably a RefCell borrow panic.
let id = (***node.inode).base.inode_number;
if let Some(children) = node.dir_children.take() {
sfs_check(sqfs_dir_writer_begin(*self.dir_writer, 0), "Couldn't start writing directory")?;
// For each child, need: name, ID, reference, mode
for (name, child_id) in children { // TODO Check that children are sorted
- if child_id >= id { panic!("Tried to write directory {} before child {}", id, child_id) } // TODO Allocate error
+ if child_id >= id { Err(SquashfsError::WriteOrder(id, child_id))?; }
let child_node = &self.nodes[child_id as usize - 1].borrow();
let child = child_node.inode.as_ref();
let child_ref = child_node.pos;
sfs_check(sqfs_dir_writer_add_entry(*self.dir_writer, CString::new(os_to_string(&name)?)?.as_ptr(), child_id, child_ref, Self::mode_from_inode(&child)), "Couldn't add directory entry")?;
}
sfs_check(sqfs_dir_writer_end(*self.dir_writer), "Couldn't finish writing directory")?;
- let mut ret = Box::new(ManagedPointer::new(sfs_init_check_null(&|| sqfs_dir_writer_create_inode(*self.dir_writer, 0, 0, node.parent), "Couldn't get inode for directory")?, libc_free));
+ let mut ret = Box::new(sfs_init_check_null(&|| {
+ sqfs_dir_writer_create_inode(*self.dir_writer, 0, 0, 0) // TODO Populate the parent inode number (how?)
+ }, "Couldn't get inode for directory", libc_free)?);
copy_metadata(&*node.inode, &mut ret);
node.inode = ret;
}
@@ -316,13 +338,12 @@ impl Writer {
sfs_check(sqfs_meta_writer_flush(*self.dirent_writer), "Couldn't flush directory entries")?;
self.superblock.directory_table_start = self.outfile_size();
sfs_check(sqfs_meta_write_write_to_file(*self.dirent_writer), "Couldn't write directory entries")?;
- (self.superblock).inode_count = self.nodes.len() as u32;
+ self.superblock.inode_count = self.nodes.len() as u32;
sfs_check(sqfs_frag_table_write(*self.frag_table, *self.outfile, &mut self.superblock, *self.compressor), "Couldn't write fragment table")?;
sfs_check(sqfs_dir_writer_write_export_table(*self.dir_writer, *self.outfile, *self.compressor, self.nodes.len() as u32, root_ref, &mut self.superblock), "Couldn't write export table")?;
sfs_check(sqfs_id_table_write(*self.id_table, *self.outfile, &mut self.superblock, *self.compressor), "Couldn't write ID table")?;
sfs_check(sqfs_xattr_writer_flush(*self.xattr_writer, *self.outfile, &mut self.superblock, *self.compressor), "Couldn't write xattr table")?;
self.superblock.bytes_used = self.outfile_size();
- self.superblock.modification_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_secs() as u32;
sfs_check(sqfs_super_write(&self.superblock, *self.outfile), "Couldn't rewrite archive superblock")?;
let padding: Vec<u8> = vec![0; PAD_TO - self.outfile_size() as usize % PAD_TO];
sfs_check((**self.outfile).write_at.expect("File does not provide write_at")(*self.outfile, self.outfile_size(), &padding as &[u8] as *const [u8] as *const libc::c_void, padding.len() as u64), "Couldn't pad file");
@@ -330,13 +351,13 @@ impl Writer {
Ok(())
}
- pub fn add_tree<T: AsRef<Path>>(&mut self, root: T, callback: &Fn(Source) -> Result<Source>) -> Result<()> {
+ /*pub fn add_tree<P: AsRef<Path>>(&mut self, root: P, callback: &mut FnMut(SourceFile) -> std::result::Result<Vec<SourceFile>, BoxedError>) -> Result<()> {
let mut childmap: HashMap<PathBuf, BTreeMap<OsString, u32>> = HashMap::new();
for step in WalkDir::new(root.as_ref()).follow_links(false).contents_first(true) {
match step {
Ok(entry) => {
// TODO Consider adding Unix-specific functionality with graceful degradation
- // TODO Catch all errors except add() and continue
+ // TODO Catch all errors except from add() and continue
let metadata = entry.metadata().unwrap();
let mtime = metadata.modified()?.duration_since(SystemTime::UNIX_EPOCH)?.as_secs() as u32;
let data = if metadata.file_type().is_dir() {
@@ -349,13 +370,29 @@ impl Writer {
SourceData::Symlink(std::fs::read_link(entry.path())?.into_os_string())
}
else {
- panic!("Unknown or unsupported file type"); // TODO Error
+ Err(SquashfsError::WriteType(metadata.file_type()))?;
+ unreachable!();
+ };
+ let candidate = if cfg!(linux) {
+ use std::os::linux::fs::MetadataExt;
+ Source { data: data, xattrs: file_xattrs(entry.path())?, uid: metadata.st_uid(), gid: metadata.st_gid(), mode: (metadata.st_mode() & !S_IFMT) as u16, modified: mtime, flags: 0 }
+ }
+ else {
+ Source { data: data, xattrs: HashMap::new(), uid: 0, gid: 0, mode: 0x1ff, modified: mtime, flags: 0 }
};
- let id = self.add(callback(Source { data: data, xattrs: HashMap::new(), uid: 0, gid: 0, mode: 0x1ff, modified: mtime, flags: 0 })?)?;
- if let Some(parent) = entry.path().parent() {
- childmap.entry(parent.to_path_buf()).or_insert(BTreeMap::new()).insert(entry.file_name().to_os_string(), id);
+ let candidate_file = SourceFile { path: entry.path().to_path_buf(), content: candidate };
+ for mut result in callback(candidate_file).map_err(|e| SquashfsError::WrappedError(e))? {
+ if let SourceData::Dir(children) = &mut result.content.data {
+ let mut new_children = childmap.remove(&result.path).unwrap_or(BTreeMap::new());
+ new_children.extend(children);
+ result.content.data = SourceData::Dir(Box::new(new_children.into_iter()));
+ }
+ let id = self.add(result.content)?;
+ if let Some(parent) = result.path.parent() {
+ childmap.entry(parent.to_path_buf()).or_insert(BTreeMap::new()).insert(result.path.file_name().unwrap().to_os_string(), id);
+ }
+ println!("{}: {}", id, result.path.display());
}
- println!("{}: {}", id, entry.path().display());
},
Err(e) => {
let path = e.path().map(|x| x.to_string_lossy().into_owned()).unwrap_or("(unknown)".to_string());
@@ -364,5 +401,93 @@ impl Writer {
}
}
Ok(())
+ }*/
+}
+
+pub struct TreeProcessor {
+ root: PathBuf,
+ writer: RefCell<Writer>,
+ childmap: RefCell<HashMap<PathBuf, BTreeMap<OsString, u32>>>,
+}
+
+impl TreeProcessor {
+ pub fn new<P: AsRef<Path>>(writer: Writer, root: P) -> Result<Self> {
+ Ok(Self { root: root.as_ref().to_path_buf(), writer: RefCell::new(writer), childmap: RefCell::new(HashMap::new()) })
+ }
+
+ pub fn add(&self, mut source: SourceFile) -> Result<u32> {
+ let mut childmap = self.childmap.borrow_mut();
+ if let SourceData::Dir(children) = &mut source.content.data {
+ // Pull in any last-minute additions made by the user
+ let mut new_children = childmap.remove(&source.path).unwrap_or(BTreeMap::new());
+ new_children.extend(children);
+ source.content.data = SourceData::Dir(Box::new(new_children.into_iter()));
+ }
+ let id = self.writer.borrow_mut().add(source.content)?;
+ if let Some(parent) = source.path.parent() {
+ childmap.entry(parent.to_path_buf()).or_insert(BTreeMap::new()).insert(source.path.file_name().expect("Path from walkdir has no file name").to_os_string(), id);
+ }
+ Ok(id)
+ }
+
+ pub fn finish(&self) -> Result<()> {
+ self.writer.borrow_mut().finish()
+ }
+
+ fn make_source(&self, entry: DirEntry) -> Result<Source> {
+ // TODO Consider adding Unix-specific functionality with graceful degradation
+ // TODO Catch all errors except add() and continue
+ let metadata = entry.metadata().unwrap();
+ let mtime = metadata.modified()?.duration_since(SystemTime::UNIX_EPOCH)?.as_secs() as u32;
+ let data = if metadata.file_type().is_dir() {
+ SourceData::Dir(Box::new(self.childmap.borrow_mut().remove(&entry.path().to_path_buf()).unwrap_or(BTreeMap::new()).into_iter()))
+ }
+ else if metadata.file_type().is_file() {
+ SourceData::File(Box::new(std::fs::File::open(entry.path())?))
+ }
+ else if metadata.file_type().is_symlink() {
+ SourceData::Symlink(std::fs::read_link(entry.path())?.into_os_string())
+ }
+ else {
+ Err(SquashfsError::WriteType(metadata.file_type()))?;
+ unreachable!();
+ };
+ let source = if cfg!(linux) {
+ use std::os::linux::fs::MetadataExt;
+ Source { data: data, xattrs: file_xattrs(entry.path())?, uid: metadata.st_uid(), gid: metadata.st_gid(), mode: (metadata.st_mode() & !S_IFMT) as u16, modified: mtime, flags: 0 }
+ }
+ else {
+ Source { data: data, xattrs: HashMap::new(), uid: 0, gid: 0, mode: 0x1ff, modified: mtime, flags: 0 }
+ };
+ Ok(source)
+ }
+
+ pub fn iter<'a>(&'a self) -> TreeIterator<'a> {
+ let tree = WalkDir::new(&self.root).follow_links(false).contents_first(true);
+ TreeIterator { processor: self, tree: tree.into_iter() }
+ }
+}
+
+pub struct TreeIterator<'a> {
+ processor: &'a TreeProcessor,
+ tree: walkdir::IntoIter,
+}
+
+impl<'a> std::iter::Iterator for TreeIterator<'a> {
+ type Item = Result<SourceFile>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ match self.tree.next() {
+ None => None,
+ Some(Ok(entry)) => {
+ let path = entry.path().to_path_buf();
+ Some(self.processor.make_source(entry).map(|source| SourceFile { path: path, content: source }))
+ }
+ Some(Err(e)) => {
+ let path = e.path().map(|x| x.to_string_lossy().into_owned()).unwrap_or("(unknown)".to_string());
+ eprintln!("Not processing {}: {}", path, e.to_string());
+ self.next()
+ }
+ }
}
}