aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Schauer <matthew.schauer@e10x.net>2020-07-15 16:48:44 -0700
committerMatthew Schauer <matthew.schauer@e10x.net>2020-07-15 16:48:44 -0700
commit04bb366c625eaaa2415413a8aad5aada07e008a2 (patch)
tree2447d143b5f2ab9738c77bf826039f391cff3427
parent259afaba3d749b02bafab7b56d7b3e3ec93d44ee (diff)
Very messy but essentially working writer implementation
-rw-r--r--Cargo.toml4
-rw-r--r--src/lib.rs551
-rw-r--r--src/read.rs587
-rw-r--r--src/write.rs368
-rw-r--r--wrapper.h1
5 files changed, 1001 insertions, 510 deletions
diff --git a/Cargo.toml b/Cargo.toml
index aa370dd..0ccd44a 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -9,7 +9,9 @@ bindgen = "0.53.1"
[dependencies]
libc = "0.2"
-thiserror = "1.0"
+mmap = "0.1"
num-traits = "0.2"
num-derive = "0.3"
owning_ref = "0.4"
+thiserror = "1.0"
+walkdir = "2.3"
diff --git a/src/lib.rs b/src/lib.rs
index d76adde..bd975e0 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,7 +1,19 @@
extern crate libc;
+extern crate mmap;
extern crate num_derive;
extern crate num_traits;
extern crate owning_ref;
+extern crate walkdir;
+
+// TODO Use AsRef<Path> rather than Path for public interfaces
+use std::mem::MaybeUninit;
+use std::ffi::{OsStr, OsString};
+use std::path::PathBuf;
+use std::ptr;
+use bindings::*;
+use num_derive::FromPrimitive;
+use num_traits::FromPrimitive;
+use thiserror::Error;
mod bindings {
#![allow(non_camel_case_types)]
@@ -11,19 +23,8 @@ mod bindings {
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
}
-use std::collections::{HashMap, HashSet};
-use std::ffi::{CStr, CString, OsString};
-use std::io;
-use std::io::{Read, Seek};
-use std::mem::MaybeUninit;
-use std::path::{Path, PathBuf, Component};
-use std::ptr;
-use std::sync::{Arc, Mutex};
-use bindings::*;
-use num_derive::FromPrimitive;
-use num_traits::FromPrimitive;
-use owning_ref::OwningHandle;
-use thiserror::Error;
+pub mod read;
+pub mod write;
#[derive(Error, Debug, FromPrimitive)]
#[repr(i32)]
@@ -62,8 +63,11 @@ pub enum SquashfsError {
#[error("Tried to copy an object that can't be copied")] Copy,
#[error("Tried to get parent of a node with an unknown path")] NoPath,
#[error("Inode index {0} is not within limits 1..{1}")] Range(u64, u64),
- #[error("Couldn't read file contents from archive: {0}")] Read(#[from] std::io::Error),
+ #[error("Couldn't read file: {0}")] Read(#[from] std::io::Error),
#[error("The filesystem does not support the feature: {0}")] Unsupported(String),
+ #[error("Memory mapping failed: {0}")] Mmap(#[from] mmap::MapError),
+ #[error("Couldn't get the current system time: {0}")] Time(#[from] std::time::SystemTimeError),
+ #[error("Refusing to create empty archive")] Empty,
}
type Result<T> = std::result::Result<T, SquashfsError>;
@@ -78,6 +82,7 @@ fn sfs_check(code: i32, desc: &str) -> Result<i32> {
}
}
+// TODO Make these three funtions return ManagedPointer rather than requiring the caller to do the wrappning
fn sfs_init<T>(init: &dyn Fn(*mut T) -> i32, err: &str) -> Result<T> {
let mut ret: MaybeUninit<T> = MaybeUninit::uninit();
sfs_check(init(ret.as_mut_ptr()), err)?;
@@ -91,8 +96,8 @@ fn sfs_init_ptr<T>(init: &dyn Fn(*mut *mut T) -> i32, err: &str) -> Result<*mut
else { Ok(ret) }
}
-fn sfs_init_check_null<T>(init: &dyn Fn() -> Result<*mut T>, err: &str) -> Result<*mut T> {
- let ret = init()?;
+fn sfs_init_check_null<T>(init: &dyn Fn() -> *mut T, err: &str) -> Result<*mut T> {
+ let ret = init();
if ret.is_null() { Err(SquashfsError::LibraryNullError(err.to_string())) }
else { Ok(ret) }
}
@@ -100,7 +105,7 @@ fn sfs_init_check_null<T>(init: &dyn Fn() -> Result<*mut T>, err: &str) -> Resul
fn sfs_destroy<T>(x: *mut T) {
unsafe {
let obj = x as *mut sqfs_object_t;
- ((*obj).destroy.expect("Squashfs object did not provide a destory callback"))(obj);
+ ((*obj).destroy.expect("Squashfs object did not provide a destroy callback"))(obj);
}
}
@@ -108,28 +113,23 @@ fn libc_free<T>(x: *mut T) {
unsafe { libc::free(x as *mut _ as *mut libc::c_void); }
}
-// Canonicalize without requiring the path to actually exist in the filesystem
-fn dumb_canonicalize(path: &Path) -> PathBuf {
- let mut ret = PathBuf::new();
- for part in path.components() {
- match part {
- Component::Prefix(_) => panic!("What is this, Windows?"),
- Component::CurDir => (),
- Component::RootDir => ret.clear(),
- Component::ParentDir => { ret.pop(); },
- Component::Normal(p) => ret.push(p),
- }
- }
- ret
+fn rust_dealloc<T>(x: *mut T) {
+ unsafe { std::alloc::dealloc(x as *mut u8, std::alloc::Layout::new::<T>()) }
+}
+
+fn unpack_meta_ref(meta_ref: u64) -> (u64, u64) {
+ (meta_ref >> 16 & 0xffffffff, meta_ref & 0xffff)
}
-fn path_to_string(path: &Path) -> Result<String> {
- Ok(path.to_str().ok_or_else(|| SquashfsError::OsUtf8(path.as_os_str().to_os_string()))?.to_string())
+fn os_to_string(s: &OsStr) -> Result<String> {
+ Ok(s.to_str().ok_or_else(|| SquashfsError::OsUtf8(s.to_os_string()))?.to_string())
}
const NO_XATTRS: u32 = 0xffffffff;
const LOCK_ERR: &str = "A thread panicked while holding a lock"; // Because poisoned locks only happen when a thread panics, we probably want to panic too.
const LINK_MAX: i32 = 1000;
+const BLOCK_BUF_SIZE: usize = 4096;
+const PAD_TO: usize = 4096;
struct ManagedPointer<T> {
ptr: *mut T,
@@ -137,6 +137,10 @@ struct ManagedPointer<T> {
}
impl<T> ManagedPointer<T> {
+ fn null(destroy: fn(*mut T)) -> Self {
+ Self { ptr: ptr::null_mut(), destroy: destroy }
+ }
+
fn new(ptr: *mut T, destroy: fn(*mut T)) -> Self {
Self { ptr: ptr, destroy: destroy }
}
@@ -154,6 +158,12 @@ impl<T> std::ops::Deref for ManagedPointer<T> {
}
}
+impl<T> std::ops::DerefMut for ManagedPointer<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.ptr
+ }
+}
+
impl<T> Drop for ManagedPointer<T> {
fn drop(&mut self) {
(self.destroy)(**self)
@@ -165,480 +175,3 @@ impl<T> std::fmt::Debug for ManagedPointer<T> {
write!(f, "ManagedPointer({:?})", self.ptr)
}
}
-
-#[derive(Debug)]
-pub struct Dir<'a> {
- node: &'a Node<'a>,
- compressor: ManagedPointer<sqfs_compressor_t>,
- reader: Mutex<ManagedPointer<sqfs_dir_reader_t>>,
-}
-
-impl<'a> Dir<'a> {
- fn new(node: &'a Node) -> Result<Self> {
- let compressor = node.container.compressor()?;
- let reader = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
- Ok(sqfs_dir_reader_create(&*node.container.superblock, *compressor, *node.container.file, 0))
- }, "Couldn't create directory reader")?, sfs_destroy);
- unsafe { sfs_check(sqfs_dir_reader_open_dir(*reader, node.inode.as_const(), 0), "Couldn't open directory")?; }
- Ok(Self { node: node, compressor: compressor, reader: Mutex::new(reader) })
- }
-
- pub fn reset(&mut self) {
- unsafe { sqfs_dir_reader_rewind(**self.reader.lock().expect(LOCK_ERR)); }
- }
-
- fn read<'b>(&'b self) -> Result<Node<'a>> {
- let locked_reader = self.reader.lock().expect(LOCK_ERR);
- let entry = sfs_init_ptr(&|x| unsafe {
- sqfs_dir_reader_read(**locked_reader, x)
- }, "Couldn't read directory entries")?;
- let name_bytes = unsafe { (*entry).name.as_slice((*entry).size as usize + 1) };
- let name = String::from_utf8(name_bytes.to_vec())?;
- let node = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
- sqfs_dir_reader_get_inode(**locked_reader, x)
- }, "Couldn't read directory entry inode")?, libc_free);
- Node::new(self.node.container, node, self.node.path.as_ref().map(|path| path.join(name)))
- }
-
- pub fn child(&self, name: &str) -> Result<Node> {
- unsafe { sfs_check(sqfs_dir_reader_find(**self.reader.lock().expect(LOCK_ERR), CString::new(name)?.as_ptr()), &format!("Couldn't find child \"{}\"", name))? };
- self.read()
- }
-}
-
-impl<'a> std::iter::Iterator for Dir<'a> {
- type Item = Node<'a>;
-
- fn next(&mut self) -> Option<Self::Item> {
- self.read().ok()
- }
-}
-
-#[derive(Debug)]
-pub struct File<'a> {
- node: &'a Node<'a>,
- compressor: ManagedPointer<sqfs_compressor_t>,
- reader: Mutex<ManagedPointer<sqfs_data_reader_t>>,
- offset: Mutex<u64>,
-}
-
-impl<'a> File<'a> {
- fn new(node: &'a Node) -> Result<Self> {
- let compressor = node.container.compressor()?;
- let reader = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
- Ok(sqfs_data_reader_create(*node.container.file, node.container.superblock.block_size as u64, *compressor, 0))
- }, "Couldn't create data reader")?, sfs_destroy);
- unsafe { sfs_check(sqfs_data_reader_load_fragment_table(*reader, &*node.container.superblock), "Couldn't load fragment table")? };
- Ok(Self { node: node, compressor: compressor, reader: Mutex::new(reader), offset: Mutex::new(0) })
- }
-
- pub fn size(&self) -> u64 {
- let mut ret: u64 = 0;
- unsafe { sqfs_inode_get_file_size(self.node.inode.as_const(), &mut ret) };
- ret
- }
-
- pub fn to_bytes(&mut self) -> Result<Vec<u8>> {
- let mut ret = Vec::with_capacity(self.size() as usize);
- self.read_to_end(&mut ret)?;
- Ok(ret)
- }
-
- pub fn to_string(&mut self) -> Result<String> {
- let mut ret = String::with_capacity(self.size() as usize);
- self.read_to_string(&mut ret)?;
- Ok(ret)
- }
-}
-
-impl<'a> Read for File<'a> {
- fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
- let mut locked_offset = self.offset.lock().expect(LOCK_ERR);
- if *locked_offset >= self.size() { Ok(0) }
- else {
- let locked_reader = self.reader.lock().expect(LOCK_ERR);
- let res = unsafe { sfs_check(sqfs_data_reader_read(**locked_reader, self.node.inode.as_const(), *locked_offset, buf.as_mut_ptr() as *mut libc::c_void, buf.len() as u32), "Couldn't read file content").map_err(|e| io::Error::new(io::ErrorKind::Other, e))? };
- *locked_offset += res as u64;
- Ok(res as usize)
- }
- }
-}
-
-impl<'a> Seek for File<'a> {
- fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
- let mut locked_offset = self.offset.lock().expect(LOCK_ERR);
- let newoff = match pos {
- io::SeekFrom::Start(off) => off as i64,
- io::SeekFrom::End(off) => self.size() as i64 + off,
- io::SeekFrom::Current(off) => *locked_offset as i64 + off,
- };
- if newoff < 0 {
- Err(io::Error::new(io::ErrorKind::Other, "Attempted to seek before beginning of file"))
- }
- else {
- *locked_offset = newoff as u64;
- Ok(*locked_offset)
- }
- }
-}
-
-#[derive(Debug)]
-pub enum Data<'a> {
- File(File<'a>),
- Dir(Dir<'a>),
- Symlink(String),
- BlockDev(u32, u32),
- CharDev(u32, u32),
- Fifo,
- Socket,
-}
-
-impl<'a> Data<'a> {
- fn new(node: &'a Node) -> Result<Self> {
- unsafe fn arr_to_string<'a, T>(arr: &bindings::__IncompleteArrayField<T>, len: usize) -> String {
- let slice = std::slice::from_raw_parts(arr.as_ptr() as *const u8, len);
- String::from_utf8_lossy(slice).into_owned()
- }
- fn get_dev_nums(dev: u32) -> (u32, u32) {
- ((dev & 0xfff00) >> 8, (dev & 0xff) | ((dev >> 12) & 0xfff00))
- }
- match unsafe { (***node.inode).base.type_ } as u32 {
- SQFS_INODE_TYPE_SQFS_INODE_DIR | SQFS_INODE_TYPE_SQFS_INODE_EXT_DIR => Ok(Self::Dir(Dir::new(node)?)),
- SQFS_INODE_TYPE_SQFS_INODE_FILE | SQFS_INODE_TYPE_SQFS_INODE_EXT_FILE => Ok(Self::File(File::new(node)?)),
- SQFS_INODE_TYPE_SQFS_INODE_SLINK => Ok(unsafe {
- Self::Symlink(arr_to_string(&(***node.inode).extra, (***node.inode).data.slink.target_size as usize))
- }),
- SQFS_INODE_TYPE_SQFS_INODE_EXT_SLINK => Ok(unsafe {
- Self::Symlink(arr_to_string(&(***node.inode).extra, (***node.inode).data.slink_ext.target_size as usize))
- }),
- SQFS_INODE_TYPE_SQFS_INODE_BDEV => Ok(unsafe {
- let (maj, min) = get_dev_nums((***node.inode).data.dev.devno);
- Self::BlockDev(maj, min)
- }),
- SQFS_INODE_TYPE_SQFS_INODE_EXT_BDEV => Ok(unsafe {
- let (maj, min) = get_dev_nums((***node.inode).data.dev.devno);
- Self::BlockDev(maj, min)
- }),
- SQFS_INODE_TYPE_SQFS_INODE_CDEV => Ok(unsafe {
- let (maj, min) = get_dev_nums((***node.inode).data.dev.devno);
- Self::CharDev(maj, min)
- }),
- SQFS_INODE_TYPE_SQFS_INODE_EXT_CDEV => Ok(unsafe {
- let (maj, min) = get_dev_nums((***node.inode).data.dev.devno);
- Self::CharDev(maj, min)
- }),
- SQFS_INODE_TYPE_SQFS_INODE_FIFO | SQFS_INODE_TYPE_SQFS_INODE_EXT_FIFO => Ok(Self::Fifo),
- SQFS_INODE_TYPE_SQFS_INODE_SOCKET | SQFS_INODE_TYPE_SQFS_INODE_EXT_SOCKET => Ok(Self::Socket),
- _ => Err(SquashfsError::LibraryReturnError("Unsupported inode type".to_string())),
- }
- }
-
- fn name(&self) -> String {
- match self {
- Data::File(_) => "regular file",
- Data::Dir(_) => "directory",
- Data::Symlink(_) => "symbolic link",
- Data::BlockDev(_, _) => "block device",
- Data::CharDev(_, _) => "character device",
- Data::Fifo => "named pipe",
- Data::Socket => "socket",
- }.to_string()
- }
-}
-
-#[repr(u32)]
-#[derive(Clone, Copy)]
-pub enum XattrType {
- User = SQFS_XATTR_TYPE_SQFS_XATTR_USER,
- Trusted = SQFS_XATTR_TYPE_SQFS_XATTR_TRUSTED,
- Security = SQFS_XATTR_TYPE_SQFS_XATTR_SECURITY,
-}
-
-pub struct OwnedFile<'a> {
- handle: OwningHandle<Box<Node<'a>>, Box<File<'a>>>,
-}
-
-impl<'a> Read for OwnedFile<'a> {
- fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
- (*self.handle).read(buf)
- }
-}
-
-impl<'a> Seek for OwnedFile<'a> {
- fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
- (*self.handle).seek(pos)
- }
-}
-
-impl<'a> std::ops::Deref for OwnedFile<'a> {
- type Target = File<'a>;
-
- fn deref(&self) -> &Self::Target {
- self.handle.deref()
- }
-}
-
-impl<'a> std::ops::DerefMut for OwnedFile<'a> {
- fn deref_mut(&mut self) -> &mut Self::Target {
- self.handle.deref_mut()
- }
-}
-
-pub struct OwnedDir<'a> {
- handle: OwningHandle<Box<Node<'a>>, Box<Dir<'a>>>,
-}
-
-impl<'a> std::iter::Iterator for OwnedDir<'a> {
- type Item = Node<'a>;
-
- fn next(&mut self) -> Option<Self::Item> {
- (*self.handle).next()
- }
-}
-
-impl<'a> std::ops::Deref for OwnedDir<'a> {
- type Target = Dir<'a>;
-
- fn deref(&self) -> &Self::Target {
- self.handle.deref()
- }
-}
-
-impl<'a> std::ops::DerefMut for OwnedDir<'a> {
- fn deref_mut(&mut self) -> &mut Self::Target {
- self.handle.deref_mut()
- }
-}
-
-pub struct Node<'a> {
- container: &'a Archive,
- path: Option<PathBuf>,
- inode: Arc<ManagedPointer<sqfs_inode_generic_t>>,
-}
-
-impl<'a> Node<'a> {
- fn new(container: &'a Archive, inode: ManagedPointer<sqfs_inode_generic_t>, path: Option<PathBuf>) -> Result<Self> {
- Ok(Self { container: container, path: path, inode: Arc::new(inode) })
- }
-
- pub fn xattrs(&self, category: XattrType) -> Result<HashMap<Vec<u8>, Vec<u8>>> {
- if self.container.superblock.flags & SQFS_SUPER_FLAGS_SQFS_FLAG_NO_XATTRS as u16 != 0 { Ok(HashMap::new()) }
- else {
- let compressor = self.container.compressor()?;
- let xattr_reader = unsafe {
- let ret = ManagedPointer::new(sqfs_xattr_reader_create(0), sfs_destroy);
- sfs_check(sqfs_xattr_reader_load(*ret, &*self.container.superblock, *self.container.file, *compressor), "Couldn't create xattr reader")?;
- ret
- };
- let mut xattr_idx: u32 = NO_XATTRS;
- unsafe { sfs_check(sqfs_inode_get_xattr_index(self.inode.as_const(), &mut xattr_idx), "Couldn't get xattr index")? };
- let desc = sfs_init(&|x| unsafe {
- sqfs_xattr_reader_get_desc(*xattr_reader, xattr_idx, x)
- }, "Couldn't get xattr descriptor")?;
- let mut ret: HashMap<Vec<u8>, Vec<u8>> = HashMap::new();
- unsafe { sfs_check(sqfs_xattr_reader_seek_kv(*xattr_reader, &desc), "Couldn't seek to xattr location")? };
- for _ in 0..desc.count {
- let prefixlen = unsafe { CStr::from_ptr(sqfs_get_xattr_prefix(category as u32)).to_bytes().len() };
- let key = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
- sqfs_xattr_reader_read_key(*xattr_reader, x)
- }, "Couldn't read xattr key")?, libc_free);
- if unsafe { (**key).type_ } as u32 & SQFS_XATTR_TYPE_SQFS_XATTR_FLAG_OOL != 0 {
- unimplemented!()
- }
- let val = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
- sqfs_xattr_reader_read_value(*xattr_reader, *key, x)
- }, "Couldn't read xattr value")?, libc_free);
- if unsafe { (**key).type_ } as u32 & SQFS_XATTR_TYPE_SQFS_XATTR_PREFIX_MASK == category as u32 {
- unsafe {
- let keyvec = (**key).key.as_slice((**key).size as usize + prefixlen)[prefixlen..].to_vec();
- let valvec = (**val).value.as_slice((**val).size as usize).to_vec();
- ret.insert(keyvec, valvec);
- }
- }
- }
- Ok(ret)
- }
- }
-
- pub fn id(&self) -> u32 {
- unsafe { (***self.inode).base.inode_number }
- }
-
- pub fn data(&self) -> Result<Data> {
- Data::new(&self)
- }
-
- pub fn path(&self) -> Option<&Path> {
- self.path.as_ref().map(|path| path.as_path())
- }
-
- fn path_string(&self) -> String {
- match &self.path {
- Some(path) => path.display().to_string(), //path_to_string(&path),
- None => "<unknown>".to_string(),
- }
- }
-
- pub fn name(&self) -> Option<String> {
- self.path.as_ref().map(|path| path.file_name().map(|x| x.to_string_lossy().to_string()).unwrap_or("/".to_string()))
- }
-
- pub fn parent(&self) -> Result<Self> {
- self.path.as_ref().map(|path| {
- let ppath = path.parent().unwrap_or(&Path::new(""));
- self.container.get(&path_to_string(ppath)?)
- }).ok_or(SquashfsError::NoPath)?
- }
-
- pub fn resolve(&self) -> Result<Self> {
- let mut visited = HashSet::new();
- let mut cur = Box::new(self.clone());
- let mut i = 0;
- loop {
- match cur.data()? {
- Data::Symlink(targetstr) => {
- let rawtarget = PathBuf::from(targetstr);
- let target = match cur.path {
- Some(path) => path.parent().unwrap_or(&Path::new("")).join(rawtarget),
- None => match rawtarget.is_absolute() {
- true => rawtarget,
- false => Err(SquashfsError::NoPath)?,
- }
- };
- if !visited.insert(target.clone()) {
- return Err(SquashfsError::LinkLoop(target));
- }
- cur = Box::new(cur.container.get_path(&target)?);
- }
- _ => return Ok(*cur),
- }
- i += 1;
- if i > LINK_MAX { Err(SquashfsError::LinkChain(LINK_MAX))?; }
- }
- }
-
- pub fn as_file(&self) -> Result<File> {
- match self.data()? {
- Data::File(f) => Ok(f),
- other => Err(SquashfsError::WrongType(self.path_string(), other.name(), "regular file".to_string())),
- }
- }
-
- pub fn into_owned_file(self) -> Result<OwnedFile<'a>> {
- Ok(OwnedFile { handle: OwningHandle::try_new(Box::new(self), |x| unsafe { (*x).as_file().map(|x| Box::new(x)) })? })
- }
-
- pub fn as_dir(&self) -> Result<Dir> {
- match self.data()? {
- Data::Dir(d) => Ok(d),
- other => Err(SquashfsError::WrongType(self.path_string(), other.name(), "directory".to_string())),
- }
- }
-
- pub fn into_owned_dir(self) -> Result<OwnedDir<'a>> {
- Ok(OwnedDir { handle: OwningHandle::try_new(Box::new(self), |x| unsafe { (*x).as_dir().map(|x| Box::new(x)) })? })
- }
-}
-
-impl<'a> std::clone::Clone for Node<'a> {
- fn clone(&self) -> Self {
- Self { container: self.container, path: self.path.clone(), inode: self.inode.clone() }
- }
-}
-
-impl<'a> std::fmt::Display for Node<'a> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "{} at {}", self.data().map(|x| x.name()).unwrap_or("inaccessible file".to_string()), self.path_string())
- }
-}
-
-impl<'a> std::fmt::Debug for Node<'a> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "Node({:?})", self.path)
- }
-}
-
-pub struct Archive {
- file: ManagedPointer<sqfs_file_t>,
- superblock: Box<sqfs_super_t>,
- compressor_config: Box<sqfs_compressor_config_t>,
-}
-
-impl Archive {
- pub fn new(path: &Path) -> Result<Self> {
- let cpath = CString::new(path_to_string(&path)?)?;
- let file = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
- Ok(sqfs_open_file(cpath.as_ptr(), SQFS_FILE_OPEN_FLAGS_SQFS_FILE_OPEN_READ_ONLY))
- }, &format!("Couldn't open input file {}", path.display()))?, sfs_destroy);
- let superblock = Box::new(sfs_init(&|x| unsafe {
- sqfs_super_read(x, *file)
- }, "Couldn't read archive superblock")?);
- let compressor_config = Box::new(sfs_init(&|x| unsafe {
- sqfs_compressor_config_init(x, superblock.compression_id as u32, superblock.block_size as u64, SQFS_COMP_FLAG_SQFS_COMP_FLAG_UNCOMPRESS as u16)
- }, "Couldn't read archive compressor config")?);
- Ok(Self { file: file, superblock: superblock, compressor_config: compressor_config })
- }
-
- fn compressor(&self) -> Result<ManagedPointer<sqfs_compressor_t>> {
- Ok(ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
- sqfs_compressor_create(&*self.compressor_config, x)
- }, "Couldn't create compressor")?, sfs_destroy))
- }
-
- pub fn size(&self) -> u32 {
- self.superblock.inode_count
- }
-
- pub fn get_path(&self, path: &Path) -> Result<Node> {
- let compressor = self.compressor()?;
- let dir_reader = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
- Ok(sqfs_dir_reader_create(&*self.superblock, *compressor, *self.file, 0))
- }, "Couldn't create directory reader")?, sfs_destroy);
- let root = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
- sqfs_dir_reader_get_root_inode(*dir_reader, x)
- }, "Couldn't get filesystem root")?, libc_free);
- let pathbuf = dumb_canonicalize(path);
- if &pathbuf == Path::new("/") {
- Node::new(&self, root, Some(pathbuf))
- }
- else {
- let cpath = CString::new(path_to_string(&pathbuf)?)?;
- let inode = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
- sqfs_dir_reader_find_by_path(*dir_reader, *root, cpath.as_ptr(), x)
- }, &format!("Unable to access path {}", path.display()))?, libc_free);
- Node::new(&self, inode, Some(pathbuf))
- }
- }
-
- pub fn get_id(&self, id: u64) -> Result<Node> {
- if self.superblock.flags & SQFS_SUPER_FLAGS_SQFS_FLAG_EXPORTABLE as u16 == 0 { Err(SquashfsError::Unsupported("inode indexing".to_string()))?; }
- if id <= 0 || id > self.superblock.inode_count as u64 { Err(SquashfsError::Range(id, self.superblock.inode_count as u64))? }
- let compressor = self.compressor()?;
- let export_reader = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
- Ok(sqfs_meta_reader_create(*self.file, *compressor, 0, self.superblock.bytes_used)) // It would be nice to be able to set reasonable limits here.
- }, "Couldn't create export table reader")?, sfs_destroy);
- let (block, offset) = ((id - 1) * 8 / self.superblock.block_size as u64, (id - 1) * 8 % self.superblock.block_size as u64);
- let block_start: u64 = sfs_init(&|x| unsafe {
- let read_at = (**self.file).read_at.expect("File object does not implement read_at");
- read_at(*self.file, self.superblock.export_table_start + block, x as *mut libc::c_void, 8)
- }, "Couldn't read inode table")?;
-
- let mut noderef: u64 = 0;
- unsafe {
- sfs_check(sqfs_meta_reader_seek(*export_reader, block_start, offset), "Couldn't seek to inode reference")?;
- sfs_check(sqfs_meta_reader_read(*export_reader, &mut noderef as *mut u64 as *mut libc::c_void, 8), "Couldn't read inode reference")?;
- }
- let (block, offset) = (noderef >> 16 & 0xffffffff, noderef & 0xffff);
- let inode = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
- sqfs_meta_reader_read_inode(*export_reader, &*self.superblock, block as u64, offset as u64, x)
- }, "Couldn't read inode")?, libc_free);
- Node::new(&self, inode, None)
- }
-
- pub fn get(&self, path: &str) -> Result<Node> {
- self.get_path(Path::new(path))
- }
-}
-
-unsafe impl Send for Archive { }
-unsafe impl Sync for Archive { }
diff --git a/src/read.rs b/src/read.rs
new file mode 100644
index 0000000..6ba57f4
--- /dev/null
+++ b/src/read.rs
@@ -0,0 +1,587 @@
+use std::collections::{HashMap, HashSet};
+use std::ffi::{CStr, CString, OsStr, OsString};
+use std::io;
+use std::io::{Read, Seek};
+use std::path::{Path, PathBuf, Component};
+use std::os::unix::io::AsRawFd; // TODO Is there a way to mmap cross-platform?
+use std::sync::{Arc, Mutex};
+use bindings::*;
+use super::*;
+use mmap::{MemoryMap, MapOption};
+use owning_ref::OwningHandle;
+use thiserror::Error;
+
+// Canonicalize without requiring the path to actually exist in the filesystem
+fn dumb_canonicalize(path: &Path) -> PathBuf {
+ let mut ret = PathBuf::new();
+ for part in path.components() {
+ match part {
+ Component::Prefix(_) => panic!("What is this, Windows?"),
+ Component::CurDir => (),
+ Component::RootDir => ret.clear(),
+ Component::ParentDir => { ret.pop(); },
+ Component::Normal(p) => ret.push(p),
+ }
+ }
+ ret
+}
+
+#[derive(Debug)]
+pub struct Dir<'a> {
+ node: &'a Node<'a>,
+ compressor: ManagedPointer<sqfs_compressor_t>,
+ reader: Mutex<ManagedPointer<sqfs_dir_reader_t>>,
+}
+
+impl<'a> Dir<'a> {
+ fn new(node: &'a Node) -> Result<Self> {
+ let compressor = node.container.compressor()?;
+ let reader = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_dir_reader_create(&node.container.superblock, *compressor, *node.container.file, 0)
+ }, "Couldn't create directory reader")?, sfs_destroy);
+ unsafe { sfs_check(sqfs_dir_reader_open_dir(*reader, node.inode.as_const(), 0), "Couldn't open directory")?; }
+ Ok(Self { node: node, compressor: compressor, reader: Mutex::new(reader) })
+ }
+
+ pub fn reset(&mut self) {
+ unsafe { sqfs_dir_reader_rewind(**self.reader.lock().expect(LOCK_ERR)); }
+ }
+
+ fn read<'b>(&'b self) -> Result<Node<'a>> {
+ let locked_reader = self.reader.lock().expect(LOCK_ERR);
+ let entry = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ sqfs_dir_reader_read(**locked_reader, x)
+ }, "Couldn't read directory entries")?, libc_free);
+ let name_bytes = unsafe { (**entry).name.as_slice((**entry).size as usize + 1) };
+ let name = String::from_utf8(name_bytes.to_vec())?;
+ let node = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ sqfs_dir_reader_get_inode(**locked_reader, x)
+ }, "Couldn't read directory entry inode")?, libc_free);
+ Node::new(self.node.container, node, self.node.path.as_ref().map(|path| path.join(name)))
+ }
+
+ pub fn child(&self, name: &str) -> Result<Node> {
+ unsafe { sfs_check(sqfs_dir_reader_find(**self.reader.lock().expect(LOCK_ERR), CString::new(name)?.as_ptr()), &format!("Couldn't find child \"{}\"", name))? };
+ self.read()
+ }
+}
+
+impl<'a> std::iter::Iterator for Dir<'a> {
+ type Item = Node<'a>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.read().ok()
+ }
+}
+
+pub struct File<'a> {
+ node: &'a Node<'a>,
+ compressor: ManagedPointer<sqfs_compressor_t>,
+ reader: Mutex<ManagedPointer<sqfs_data_reader_t>>,
+ offset: Mutex<u64>,
+ mmap: Option<(std::fs::File, MemoryMap)>, // TODO Probably not thread-safe
+}
+
+impl<'a> File<'a> {
+ fn new(node: &'a Node) -> Result<Self> {
+ let compressor = node.container.compressor()?;
+ let reader = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_data_reader_create(*node.container.file, node.container.superblock.block_size as u64, *compressor, 0)
+ }, "Couldn't create data reader")?, sfs_destroy);
+ unsafe { sfs_check(sqfs_data_reader_load_fragment_table(*reader, &node.container.superblock), "Couldn't load fragment table")? };
+ Ok(Self { node: node, compressor: compressor, reader: Mutex::new(reader), offset: Mutex::new(0), mmap: None })
+ }
+
+ pub fn size(&self) -> u64 {
+ let mut ret: u64 = 0;
+ unsafe { sqfs_inode_get_file_size(self.node.inode.as_const(), &mut ret) };
+ ret
+ }
+
+ pub fn to_bytes(&mut self) -> Result<Vec<u8>> {
+ let mut ret = Vec::with_capacity(self.size() as usize);
+ self.read_to_end(&mut ret)?;
+ Ok(ret)
+ }
+
+ pub fn to_string(&mut self) -> Result<String> {
+ let mut ret = String::with_capacity(self.size() as usize);
+ self.read_to_string(&mut ret)?;
+ Ok(ret)
+ }
+
+ pub fn mmap<'b>(&'b mut self) -> Result<Option<&'b [u8]>> {
+ let inode = unsafe { &***self.node.inode };
+ let (start, frag_idx) = unsafe {
+ match inode.base.type_ as u32 {
+ SQFS_INODE_TYPE_SQFS_INODE_FILE => (inode.data.file.blocks_start as u64, inode.data.file.fragment_index),
+ SQFS_INODE_TYPE_SQFS_INODE_EXT_FILE => (inode.data.file_ext.blocks_start, inode.data.file_ext.fragment_idx),
+ _ => panic!("File is not a file")
+ }
+ };
+ let block_count = unsafe { inode.payload_bytes_used / std::mem::size_of::<sqfs_u32>() as u32 };
+ println!("File starts at byte {} ({})", start, MemoryMap::granularity());
+ if block_count == 0 || start == 0 || frag_idx != 0xffffffff { return Ok(None); }
+ let block_sizes = unsafe { inode.extra.as_slice(block_count as usize) };
+ if block_sizes.iter().any(|x| x & 0x00800000 == 0) { return Ok(None); }
+ if self.mmap.is_none() {
+ let file = std::fs::File::open(&self.node.container.path)?;
+ let map = MemoryMap::new(self.size() as usize, &vec![MapOption::MapReadable, MapOption::MapFd(file.as_raw_fd()), MapOption::MapOffset(start as usize)])?;
+ self.mmap = Some((file, map));
+ }
+ let map = &self.mmap.as_ref().expect("Just-filled mmap is empty").1;
+ println!("{:?} bytes at {:?}", map.len(), map.data());
+ unsafe { Ok(Some(std::slice::from_raw_parts(map.data(), map.len()))) }
+ }
+}
+
+impl<'a> Read for File<'a> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let mut locked_offset = self.offset.lock().expect(LOCK_ERR);
+ if *locked_offset >= self.size() { Ok(0) }
+ else {
+ let locked_reader = self.reader.lock().expect(LOCK_ERR);
+ let res = unsafe { sfs_check(sqfs_data_reader_read(**locked_reader, self.node.inode.as_const(), *locked_offset, buf.as_mut_ptr() as *mut libc::c_void, buf.len() as u32), "Couldn't read file content").map_err(|e| io::Error::new(io::ErrorKind::Other, e))? };
+ *locked_offset += res as u64;
+ Ok(res as usize)
+ }
+ }
+}
+
+impl<'a> Seek for File<'a> {
+ fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
+ let mut locked_offset = self.offset.lock().expect(LOCK_ERR);
+ let newoff = match pos {
+ io::SeekFrom::Start(off) => off as i64,
+ io::SeekFrom::End(off) => self.size() as i64 + off,
+ io::SeekFrom::Current(off) => *locked_offset as i64 + off,
+ };
+ if newoff < 0 {
+ Err(io::Error::new(io::ErrorKind::Other, "Attempted to seek before beginning of file"))
+ }
+ else {
+ *locked_offset = newoff as u64;
+ Ok(*locked_offset)
+ }
+ }
+}
+
+impl<'a> std::fmt::Debug for File<'a> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "File at {:?}", self.node)
+ }
+}
+
+#[derive(Debug)]
+pub enum Data<'a> {
+ File(File<'a>),
+ Dir(Dir<'a>),
+ Symlink(String),
+ BlockDev(u32, u32),
+ CharDev(u32, u32),
+ Fifo,
+ Socket,
+}
+
+impl<'a> Data<'a> {
+ fn new(node: &'a Node) -> Result<Self> {
+ unsafe fn arr_to_string<'a, T>(arr: &bindings::__IncompleteArrayField<T>, len: usize) -> String {
+ let slice = std::slice::from_raw_parts(arr.as_ptr() as *const u8, len);
+ String::from_utf8_lossy(slice).into_owned()
+ }
+ fn get_dev_nums(dev: u32) -> (u32, u32) {
+ ((dev & 0xfff00) >> 8, (dev & 0xff) | ((dev >> 12) & 0xfff00))
+ }
+ match unsafe { (***node.inode).base.type_ } as u32 {
+ SQFS_INODE_TYPE_SQFS_INODE_DIR | SQFS_INODE_TYPE_SQFS_INODE_EXT_DIR => Ok(Self::Dir(Dir::new(node)?)),
+ SQFS_INODE_TYPE_SQFS_INODE_FILE | SQFS_INODE_TYPE_SQFS_INODE_EXT_FILE => Ok(Self::File(File::new(node)?)),
+ SQFS_INODE_TYPE_SQFS_INODE_SLINK => Ok(unsafe {
+ Self::Symlink(arr_to_string(&(***node.inode).extra, (***node.inode).data.slink.target_size as usize))
+ }),
+ SQFS_INODE_TYPE_SQFS_INODE_EXT_SLINK => Ok(unsafe {
+ Self::Symlink(arr_to_string(&(***node.inode).extra, (***node.inode).data.slink_ext.target_size as usize))
+ }),
+ SQFS_INODE_TYPE_SQFS_INODE_BDEV => Ok(unsafe {
+ let (maj, min) = get_dev_nums((***node.inode).data.dev.devno);
+ Self::BlockDev(maj, min)
+ }),
+ SQFS_INODE_TYPE_SQFS_INODE_EXT_BDEV => Ok(unsafe {
+ let (maj, min) = get_dev_nums((***node.inode).data.dev_ext.devno);
+ Self::BlockDev(maj, min)
+ }),
+ SQFS_INODE_TYPE_SQFS_INODE_CDEV => Ok(unsafe {
+ let (maj, min) = get_dev_nums((***node.inode).data.dev.devno);
+ Self::CharDev(maj, min)
+ }),
+ SQFS_INODE_TYPE_SQFS_INODE_EXT_CDEV => Ok(unsafe {
+ let (maj, min) = get_dev_nums((***node.inode).data.dev_ext.devno);
+ Self::CharDev(maj, min)
+ }),
+ SQFS_INODE_TYPE_SQFS_INODE_FIFO | SQFS_INODE_TYPE_SQFS_INODE_EXT_FIFO => Ok(Self::Fifo),
+ SQFS_INODE_TYPE_SQFS_INODE_SOCKET | SQFS_INODE_TYPE_SQFS_INODE_EXT_SOCKET => Ok(Self::Socket),
+ _ => Err(SquashfsError::LibraryReturnError("Unsupported inode type".to_string())),
+ }
+ }
+
+ fn name(&self) -> String {
+ match self {
+ Data::File(_) => "regular file",
+ Data::Dir(_) => "directory",
+ Data::Symlink(_) => "symbolic link",
+ Data::BlockDev(_, _) => "block device",
+ Data::CharDev(_, _) => "character device",
+ Data::Fifo => "named pipe",
+ Data::Socket => "socket",
+ }.to_string()
+ }
+}
+
+#[repr(u32)]
+#[derive(Clone, Copy)]
+pub enum XattrType {
+ User = SQFS_XATTR_TYPE_SQFS_XATTR_USER,
+ Trusted = SQFS_XATTR_TYPE_SQFS_XATTR_TRUSTED,
+ Security = SQFS_XATTR_TYPE_SQFS_XATTR_SECURITY,
+}
+
+pub struct OwnedFile<'a> {
+ handle: OwningHandle<Box<Node<'a>>, Box<File<'a>>>,
+}
+
+impl<'a> Read for OwnedFile<'a> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (*self.handle).read(buf)
+ }
+}
+
+impl<'a> Seek for OwnedFile<'a> {
+ fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
+ (*self.handle).seek(pos)
+ }
+}
+
+impl<'a> std::ops::Deref for OwnedFile<'a> {
+ type Target = File<'a>;
+
+ fn deref(&self) -> &Self::Target {
+ self.handle.deref()
+ }
+}
+
+impl<'a> std::ops::DerefMut for OwnedFile<'a> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.handle.deref_mut()
+ }
+}
+
+pub struct OwnedDir<'a> {
+ handle: OwningHandle<Box<Node<'a>>, Box<Dir<'a>>>,
+}
+
+impl<'a> std::iter::Iterator for OwnedDir<'a> {
+ type Item = Node<'a>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ (*self.handle).next()
+ }
+}
+
+impl<'a> std::ops::Deref for OwnedDir<'a> {
+ type Target = Dir<'a>;
+
+ fn deref(&self) -> &Self::Target {
+ self.handle.deref()
+ }
+}
+
+impl<'a> std::ops::DerefMut for OwnedDir<'a> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.handle.deref_mut()
+ }
+}
+
+pub struct Node<'a> {
+ container: &'a Archive,
+ path: Option<PathBuf>,
+ inode: Arc<ManagedPointer<sqfs_inode_generic_t>>,
+}
+
+impl<'a> Node<'a> {
+ fn new(container: &'a Archive, inode: ManagedPointer<sqfs_inode_generic_t>, path: Option<PathBuf>) -> Result<Self> {
+ Ok(Self { container: container, path: path, inode: Arc::new(inode) })
+ }
+
+ pub fn xattrs(&self, category: XattrType) -> Result<HashMap<Vec<u8>, Vec<u8>>> {
+ if self.container.superblock.flags & SQFS_SUPER_FLAGS_SQFS_FLAG_NO_XATTRS as u16 != 0 { Ok(HashMap::new()) }
+ else {
+ let compressor = self.container.compressor()?;
+ let xattr_reader = unsafe {
+ let ret = ManagedPointer::new(sqfs_xattr_reader_create(0), sfs_destroy);
+ sfs_check(sqfs_xattr_reader_load(*ret, &self.container.superblock, *self.container.file, *compressor), "Couldn't create xattr reader")?;
+ ret
+ };
+ let mut xattr_idx: u32 = NO_XATTRS;
+ unsafe { sfs_check(sqfs_inode_get_xattr_index(self.inode.as_const(), &mut xattr_idx), "Couldn't get xattr index")? };
+ let desc = sfs_init(&|x| unsafe {
+ sqfs_xattr_reader_get_desc(*xattr_reader, xattr_idx, x)
+ }, "Couldn't get xattr descriptor")?;
+ let mut ret: HashMap<Vec<u8>, Vec<u8>> = HashMap::new();
+ unsafe { sfs_check(sqfs_xattr_reader_seek_kv(*xattr_reader, &desc), "Couldn't seek to xattr location")? };
+ for _ in 0..desc.count {
+ let prefixlen = unsafe { CStr::from_ptr(sqfs_get_xattr_prefix(category as u32)).to_bytes().len() };
+ let key = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ sqfs_xattr_reader_read_key(*xattr_reader, x)
+ }, "Couldn't read xattr key")?, libc_free);
+ if unsafe { (**key).type_ } as u32 & SQFS_XATTR_TYPE_SQFS_XATTR_FLAG_OOL != 0 {
+ unimplemented!()
+ }
+ let val = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ sqfs_xattr_reader_read_value(*xattr_reader, *key, x)
+ }, "Couldn't read xattr value")?, libc_free);
+ if unsafe { (**key).type_ } as u32 & SQFS_XATTR_TYPE_SQFS_XATTR_PREFIX_MASK == category as u32 {
+ unsafe {
+ let keyvec = (**key).key.as_slice((**key).size as usize + prefixlen)[prefixlen..].to_vec();
+ let valvec = (**val).value.as_slice((**val).size as usize).to_vec();
+ ret.insert(keyvec, valvec);
+ }
+ }
+ }
+ Ok(ret)
+ }
+ }
+
+ pub fn id(&self) -> u32 {
+ unsafe { (***self.inode).base.inode_number }
+ }
+
+ pub fn data(&self) -> Result<Data> {
+ Data::new(&self)
+ }
+
+ pub fn path(&self) -> Option<&Path> {
+ self.path.as_ref().map(|path| path.as_path())
+ }
+
+ fn path_string(&self) -> String {
+ match &self.path {
+ Some(path) => path.display().to_string(), //os_to_string(path.as_os_str()),
+ None => "<unknown>".to_string(),
+ }
+ }
+
+ pub fn name(&self) -> Option<String> {
+ self.path.as_ref().map(|path| path.file_name().map(|x| x.to_string_lossy().to_string()).unwrap_or("/".to_string()))
+ }
+
+ pub fn parent(&self) -> Result<Self> {
+ self.path.as_ref().map(|path| {
+ let ppath = path.parent().unwrap_or(&Path::new(""));
+ self.container.get(&os_to_string(ppath.as_os_str())?)
+ }).ok_or(SquashfsError::NoPath)?
+ }
+
+ pub fn resolve(&self) -> Result<Self> {
+ let mut visited = HashSet::new();
+ let mut cur = Box::new(self.clone());
+ let mut i = 0;
+ loop {
+ match cur.data()? {
+ Data::Symlink(targetstr) => {
+ let rawtarget = PathBuf::from(targetstr);
+ let target = match cur.path {
+ Some(path) => path.parent().unwrap_or(&Path::new("")).join(rawtarget),
+ None => match rawtarget.is_absolute() {
+ true => rawtarget,
+ false => Err(SquashfsError::NoPath)?,
+ }
+ };
+ if !visited.insert(target.clone()) {
+ return Err(SquashfsError::LinkLoop(target));
+ }
+ cur = Box::new(cur.container.get_path(&target)?);
+ }
+ _ => return Ok(*cur),
+ }
+ i += 1;
+ if i > LINK_MAX { Err(SquashfsError::LinkChain(LINK_MAX))?; }
+ }
+ }
+
+ pub fn as_file(&self) -> Result<File> {
+ match self.data()? {
+ Data::File(f) => Ok(f),
+ other => Err(SquashfsError::WrongType(self.path_string(), other.name(), "regular file".to_string())),
+ }
+ }
+
+ pub fn into_owned_file(self) -> Result<OwnedFile<'a>> {
+ Ok(OwnedFile { handle: OwningHandle::try_new(Box::new(self), |x| unsafe { (*x).as_file().map(|x| Box::new(x)) })? })
+ }
+
+ pub fn as_dir(&self) -> Result<Dir> {
+ match self.data()? {
+ Data::Dir(d) => Ok(d),
+ other => Err(SquashfsError::WrongType(self.path_string(), other.name(), "directory".to_string())),
+ }
+ }
+
+ pub fn into_owned_dir(self) -> Result<OwnedDir<'a>> {
+ Ok(OwnedDir { handle: OwningHandle::try_new(Box::new(self), |x| unsafe { (*x).as_dir().map(|x| Box::new(x)) })? })
+ }
+
+ pub fn uid(&self) -> Result<u32> {
+ let idx = unsafe { (***self.inode).base.uid_idx };
+ self.container.id_lookup(idx)
+ }
+
+ pub fn gid(&self) -> Result<u32> {
+ let idx = unsafe { (***self.inode).base.gid_idx };
+ self.container.id_lookup(idx)
+ }
+
+ pub fn mode(&self) -> u16 {
+ unsafe { (***self.inode).base.mode }
+ }
+
+ pub fn mtime(&self) -> u32 {
+ unsafe { (***self.inode).base.mod_time }
+ }
+}
+
+impl<'a> std::clone::Clone for Node<'a> {
+ fn clone(&self) -> Self {
+ Self { container: self.container, path: self.path.clone(), inode: self.inode.clone() }
+ }
+
+}
+
+impl<'a> std::fmt::Display for Node<'a> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{} at {}", self.data().map(|x| x.name()).unwrap_or("inaccessible file".to_string()), self.path_string())
+ }
+}
+
+impl<'a> std::fmt::Debug for Node<'a> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "Node({:?})", self.path)
+ }
+}
+
+pub struct Archive {
+ path: PathBuf,
+ file: ManagedPointer<sqfs_file_t>,
+ superblock: sqfs_super_t,
+ compressor_config: sqfs_compressor_config_t,
+}
+
+impl Archive {
+ pub fn new<T: AsRef<Path>>(path: T) -> Result<Self> {
+ let cpath = CString::new(os_to_string(path.as_ref().as_os_str())?)?;
+ let file = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_open_file(cpath.as_ptr(), SQFS_FILE_OPEN_FLAGS_SQFS_FILE_OPEN_READ_ONLY)
+ }, &format!("Couldn't open input file {}", path.as_ref().display()))?, sfs_destroy);
+ let superblock = sfs_init(&|x| unsafe {
+ sqfs_super_read(x, *file)
+ }, "Couldn't read archive superblock")?;
+ let compressor_config = sfs_init(&|x| unsafe {
+ sqfs_compressor_config_init(x, superblock.compression_id as u32, superblock.block_size as u64, SQFS_COMP_FLAG_SQFS_COMP_FLAG_UNCOMPRESS as u16)
+ }, "Couldn't read archive compressor config")?;
+ Ok(Self { path: path.as_ref().to_path_buf(), file: file, superblock: superblock, compressor_config: compressor_config })
+ }
+
+ fn compressor(&self) -> Result<ManagedPointer<sqfs_compressor_t>> {
+ Ok(ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ sqfs_compressor_create(&self.compressor_config, x)
+ }, "Couldn't create compressor")?, sfs_destroy))
+ }
+
+ fn meta_reader(&self, compressor: &ManagedPointer<sqfs_compressor_t>, bounds: Option<(u64, u64)>) -> Result<ManagedPointer<sqfs_meta_reader_t>> {
+ let range = bounds.unwrap_or((0, self.superblock.bytes_used));
+ Ok(ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_meta_reader_create(*self.file, **compressor, range.0, range.1)
+ }, "Couldn't create metadata reader")?, sfs_destroy))
+ }
+
+ fn id_lookup(&self, idx: u16) -> Result<u32> {
+ // TODO Consider chaching the ID table to make lookups more efficient
+ let mut id_table = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_id_table_create(0)
+ }, "Couldn't create ID table")?, sfs_destroy);
+ let compressor = self.compressor()?;
+ unsafe { sfs_check(sqfs_id_table_read(*id_table, *self.file, &self.superblock, *compressor), "Couldn't read ID table")?; }
+ Ok(sfs_init(&|x| unsafe {
+ sqfs_id_table_index_to_id(*id_table, idx, x)
+ }, "Couldn't get ID from ID table")?)
+ }
+
+ pub fn size(&self) -> u32 {
+ self.superblock.inode_count
+ }
+
+ pub fn get_path<T: AsRef<Path>>(&self, path: T) -> Result<Node> {
+ let compressor = self.compressor()?;
+ let dir_reader = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_dir_reader_create(&self.superblock, *compressor, *self.file, 0)
+ }, "Couldn't create directory reader")?, sfs_destroy);
+ let root = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ sqfs_dir_reader_get_root_inode(*dir_reader, x)
+ }, "Couldn't get filesystem root")?, libc_free);
+ let pathbuf = dumb_canonicalize(path.as_ref());
+ if &pathbuf == Path::new("/") {
+ Node::new(&self, root, Some(pathbuf))
+ }
+ else {
+ let cpath = CString::new(os_to_string(pathbuf.as_os_str())?)?;
+ let inode = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ sqfs_dir_reader_find_by_path(*dir_reader, *root, cpath.as_ptr(), x)
+ }, &format!("Unable to access path {}", path.as_ref().display()))?, libc_free);
+ Node::new(&self, inode, Some(pathbuf))
+ }
+ }
+
+ pub fn get_id(&self, id: u64) -> Result<Node> {
+ if self.superblock.flags & SQFS_SUPER_FLAGS_SQFS_FLAG_EXPORTABLE as u16 == 0 { Err(SquashfsError::Unsupported("inode indexing".to_string()))?; }
+ if id <= 0 || id > self.superblock.inode_count as u64 { Err(SquashfsError::Range(id, self.superblock.inode_count as u64))? }
+ let compressor = self.compressor()?;
+ let export_reader = self.meta_reader(&compressor, None)?; // TODO Would be nice if we could set bounds for this
+ let (block, offset) = ((id - 1) * 8 / self.superblock.block_size as u64, (id - 1) * 8 % self.superblock.block_size as u64);
+ let block_start: u64 = sfs_init(&|x| unsafe {
+ let read_at = (**self.file).read_at.expect("File object does not implement read_at");
+ read_at(*self.file, self.superblock.export_table_start + block, x as *mut libc::c_void, 8)
+ }, "Couldn't read inode table")?;
+
+ let mut noderef: u64 = 0;
+ unsafe {
+ sfs_check(sqfs_meta_reader_seek(*export_reader, block_start, offset), "Couldn't seek to inode reference")?;
+ sfs_check(sqfs_meta_reader_read(*export_reader, &mut noderef as *mut u64 as *mut libc::c_void, 8), "Couldn't read inode reference")?;
+ }
+ let (block, offset) = unpack_meta_ref(noderef);
+ let inode = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ sqfs_meta_reader_read_inode(*export_reader, &self.superblock, block, offset, x)
+ }, "Couldn't read inode")?, libc_free);
+ Node::new(&self, inode, None)
+ }
+
+ pub fn get(&self, path: &str) -> Result<Node> {
+ self.get_path(Path::new(path))
+ }
+
+ pub fn names_from_dirent_refs(&mut self, dirent_refs: &[u64]) -> Result<Vec<String>> {
+ let compressor = self.compressor()?;
+ let meta_reader = self.meta_reader(&compressor, None)?; // TODO Set bounds
+ let mut ret = Vec::with_capacity(dirent_refs.len());
+ for dirent_ref in dirent_refs {
+ let (block, offset) = unpack_meta_ref(*dirent_ref);
+ unsafe { sfs_check(sqfs_meta_reader_seek(*meta_reader, block, offset), "Couldn't seek to directory entry")?; }
+ let entry = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ sqfs_meta_reader_read_dir_ent(*meta_reader, x)
+ }, "Couldn't read directory entry by reference")?, libc_free);
+ let name_bytes = unsafe { (**entry).name.as_slice((**entry).size as usize + 1) };
+ ret.push(String::from_utf8(name_bytes.to_vec())?);
+ }
+ Ok(ret)
+ }
+}
+
+unsafe impl Send for Archive { }
+unsafe impl Sync for Archive { }
diff --git a/src/write.rs b/src/write.rs
new file mode 100644
index 0000000..23579b2
--- /dev/null
+++ b/src/write.rs
@@ -0,0 +1,368 @@
+use std::cell::RefCell;
+use std::collections::{BTreeMap, HashMap};
+use std::ffi::{CStr, CString, OsStr, OsString};
+use std::io::Read;
+use std::path::Path;
+use std::time::SystemTime;
+use bindings::*;
+use super::*;
+use super::SquashfsError;
+use thiserror::Error;
+use walkdir::{DirEntry, WalkDir};
+
+pub mod BlockFlags {
+ pub const DontCompress: u32 = super::SQFS_BLK_FLAGS_SQFS_BLK_DONT_COMPRESS;
+ pub const BlockAlign: u32 = super::SQFS_BLK_FLAGS_SQFS_BLK_ALIGN;
+ pub const DontFragment: u32 = super::SQFS_BLK_FLAGS_SQFS_BLK_DONT_FRAGMENT;
+ pub const DontDeduplicate: u32 = super::SQFS_BLK_FLAGS_SQFS_BLK_DONT_DEDUPLICATE;
+ pub const IgnoreSparse: u32 = super::SQFS_BLK_FLAGS_SQFS_BLK_IGNORE_SPARSE;
+ pub const DontHash: u32 = super::SQFS_BLK_FLAGS_SQFS_BLK_DONT_HASH;
+}
+
+pub enum SourceData {
+ File(Box<dyn Read>),
+ Dir(Box<dyn Iterator<Item=(OsString, u32)>>),
+ Symlink(OsString),
+ BlockDev(u32, u32),
+ CharDev(u32, u32),
+ Fifo,
+ Socket,
+}
+
+pub struct Source {
+ data: SourceData,
+ xattrs: HashMap<OsString, Vec<u8>>,
+ uid: u32,
+ gid: u32,
+ mode: u16,
+ modified: u32,
+ flags: u32,
+}
+
+fn copy_metadata(src: &ManagedPointer<sqfs_inode_generic_t>, dst: &mut ManagedPointer<sqfs_inode_generic_t>) {
+ fn nlink_ref(inode: &ManagedPointer<sqfs_inode_generic_t>) -> Option<&u32> {
+ unimplemented!();
+ }
+ let (src_base, dst_base) = unsafe { (&(***src).base, &mut (***dst).base) };
+ dst_base.mode = src_base.mode;
+ dst_base.uid_idx = src_base.uid_idx;
+ dst_base.gid_idx = src_base.gid_idx;
+ dst_base.mod_time = src_base.mod_time;
+ dst_base.inode_number = src_base.inode_number;
+ // TODO xattr_idx, uid, git, mode, mtime, link_count
+}
+
+impl Source {
+ pub fn new(data: SourceData, xattrs: HashMap<OsString, Vec<u8>>, parent: u32, flags: u32) -> Self { // TODO Parent not necessary?
+ Self { data: data, xattrs: xattrs, uid: 1000, gid: 1001, mode: 0x1ff, modified: 0, flags: flags }
+ }
+
+ fn devno(maj: u32, min: u32) -> u32 {
+ ((min & 0xfff00) << 20) | ((maj & 0xfff) << 8) | (min & 0xff)
+ }
+
+ // TODO Handle hard links
+ fn to_inode(&self, link_count: u32) -> Result<ManagedPointer<sqfs_inode_generic_t>> {
+ fn create_inode(kind: SQFS_INODE_TYPE, extra: usize) -> ManagedPointer<sqfs_inode_generic_t> {
+ use std::alloc::{alloc, Layout};
+ use std::mem::{align_of, size_of};
+ unsafe {
+ let layout = Layout::from_size_align_unchecked(size_of::<sqfs_inode_generic_t>() + extra, align_of::<sqfs_inode_generic_t>());
+ let ret = alloc(layout) as *mut sqfs_inode_generic_t;
+ (*ret).base.type_ = kind as u16;
+ ManagedPointer::new(ret, rust_dealloc)
+ }
+ }
+ let ret = unsafe {
+ match &self.data {
+ SourceData::File(_) => {
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_FILE, 0);
+ ret
+ },
+ SourceData::Dir(_) => {
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_DIR, 0);
+ (**ret).data.dir.nlink = link_count;
+ ret
+ },
+ SourceData::Symlink(dest_os) => {
+ let dest = os_to_string(&dest_os)?.into_bytes();
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_SLINK, dest.len());
+ let mut data = &mut (**ret).data.slink;
+ data.nlink = link_count;
+ data.target_size = dest.len() as u32;
+ let dest_field = std::mem::transmute::<_, &mut [u8]>((**ret).extra.as_mut_slice(dest.len()));
+ dest_field.copy_from_slice(dest.as_slice());
+ ret
+ },
+ SourceData::BlockDev(maj, min) => {
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_BDEV, 0);
+ let mut data = &mut (**ret).data.dev;
+ data.nlink = link_count;
+ data.devno = Self::devno(*maj, *min);
+ ret
+ },
+ SourceData::CharDev(maj, min) => {
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_CDEV, 0);
+ let mut data = &mut (**ret).data.dev;
+ data.nlink = link_count;
+ data.devno = Self::devno(*maj, *min);
+ ret
+ },
+ SourceData::Fifo => {
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_FIFO, 0);
+ (**ret).data.ipc.nlink = link_count;
+ ret
+ },
+ SourceData::Socket => {
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_SOCKET, 0);
+ (**ret).data.ipc.nlink = link_count;
+ ret
+ },
+ }
+ };
+ Ok(ret)
+ }
+}
+
+struct IntermediateNode {
+ inode: Box<ManagedPointer<sqfs_inode_generic_t>>,
+ dir_children: Option<Box<dyn Iterator<Item=(OsString, u32)>>>,
+ pos: u64,
+ parent: u32, // TODO Calculate rather than requiring
+}
+
+pub struct Writer {
+ outfile: ManagedPointer<sqfs_file_t>,
+ compressor_config: sqfs_compressor_config_t,
+ compressor: ManagedPointer<sqfs_compressor_t>,
+ superblock: sqfs_super_t,
+ block_writer: ManagedPointer<sqfs_block_writer_t>,
+ block_processor: ManagedPointer<sqfs_block_processor_t>,
+ frag_table: ManagedPointer<sqfs_frag_table_t>,
+ id_table: ManagedPointer<sqfs_id_table_t>,
+ xattr_writer: ManagedPointer<sqfs_xattr_writer_t>,
+ inode_writer: ManagedPointer<sqfs_meta_writer_t>,
+ dirent_writer: ManagedPointer<sqfs_meta_writer_t>,
+ dir_writer: ManagedPointer<sqfs_dir_writer_t>,
+ nodes: Vec<RefCell<IntermediateNode>>,
+}
+
+impl Writer {
+ pub fn open(path: &Path) -> Result<Self> {
+ let cpath = CString::new(os_to_string(path.as_os_str())?)?;
+ let block_size = SQFS_DEFAULT_BLOCK_SIZE as u64;
+ let num_workers = 1;
+ let compressor_id = SQFS_COMPRESSOR_SQFS_COMP_ZSTD;
+ let now = 0; // TODO Get current timestamp
+ let outfile = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_open_file(cpath.as_ptr(), SQFS_FILE_OPEN_FLAGS_SQFS_FILE_OPEN_OVERWRITE)
+ }, &format!("Couldn't open output file {}", path.display()))?, sfs_destroy);
+ let compressor_config = sfs_init(&|x| unsafe {
+ sqfs_compressor_config_init(x, compressor_id, block_size, 0)
+ }, "Couldn't create compressor config")?;
+ let compressor = ManagedPointer::new(sfs_init_ptr(&|x| unsafe {
+ sqfs_compressor_create(&compressor_config, x)
+ }, "Couldn't create compressor")?, sfs_destroy);
+ let superblock = sfs_init(&|x| unsafe {
+ sqfs_super_init(x, block_size, now, compressor_id)
+ }, "Couldn't create superblock")?;
+ let frag_table = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_frag_table_create(0)
+ }, "Couldn't create fragment table")?, sfs_destroy);
+ let block_writer = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_block_writer_create(*outfile, 4096, 0)
+ }, "Couldn't create block writer")?, sfs_destroy);
+ let block_processor = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_block_processor_create(block_size, *compressor, num_workers, 10 * num_workers as u64, *block_writer, *frag_table)
+ }, "Couldn't create block processor")?, sfs_destroy);
+ let id_table = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_id_table_create(0)
+ }, "Couldn't create ID table")?, sfs_destroy);
+ let xattr_writer = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_xattr_writer_create(0)
+ }, "Couldn't create xattr writer")?, sfs_destroy);
+ let inode_writer = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_meta_writer_create(*outfile, *compressor, 0)
+ }, "Couldn't create inode metadata writer")?, sfs_destroy);
+ let dirent_writer = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_meta_writer_create(*outfile, *compressor, SQFS_META_WRITER_FLAGS_SQFS_META_WRITER_KEEP_IN_MEMORY) // TODO Untangle so we don't have to keep in memory
+ }, "Couldn't create directory entry metadata writer")?, sfs_destroy);
+ let dir_writer = ManagedPointer::new(sfs_init_check_null(&|| unsafe {
+ sqfs_dir_writer_create(*dirent_writer, SQFS_DIR_WRITER_CREATE_FLAGS_SQFS_DIR_WRITER_CREATE_EXPORT_TABLE)
+ }, "Couldn't create directory writer")?, sfs_destroy);
+ unsafe {
+ sfs_check(sqfs_super_write(&superblock, *outfile), "Couldn't write archive superblock")?;
+ sfs_check((**compressor).write_options.expect("Compressor doesn't provide write_options")(*compressor, *outfile), "Couldn't write compressor options")?;
+ }
+ Ok(Self {
+ outfile: outfile,
+ compressor_config: compressor_config,
+ compressor: compressor,
+ superblock: superblock,
+ block_writer: block_writer,
+ block_processor: block_processor,
+ frag_table: frag_table,
+ id_table: id_table,
+ xattr_writer: xattr_writer,
+ inode_writer: inode_writer,
+ dirent_writer: dirent_writer,
+ dir_writer: dir_writer,
+ nodes: vec![],
+ })
+ }
+
+ fn mode_from_inode(inode: &ManagedPointer<sqfs_inode_generic_t>) -> u16 {
+ let typenums = vec![ // TODO Lazy static
+ (SQFS_INODE_TYPE_SQFS_INODE_DIR, S_IFDIR),
+ (SQFS_INODE_TYPE_SQFS_INODE_FILE, S_IFREG),
+ (SQFS_INODE_TYPE_SQFS_INODE_SLINK, S_IFLNK),
+ (SQFS_INODE_TYPE_SQFS_INODE_BDEV, S_IFBLK),
+ (SQFS_INODE_TYPE_SQFS_INODE_CDEV, S_IFCHR),
+ (SQFS_INODE_TYPE_SQFS_INODE_FIFO, S_IFIFO),
+ (SQFS_INODE_TYPE_SQFS_INODE_SOCKET, S_IFSOCK),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_DIR, S_IFDIR),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_FILE, S_IFREG),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_SLINK, S_IFLNK),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_BDEV, S_IFBLK),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_CDEV, S_IFCHR),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_FIFO, S_IFIFO),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_SOCKET, S_IFSOCK),
+ ].into_iter().collect::<HashMap<u32, u32>>();
+ let base = unsafe { (***inode).base };
+ typenums[&(base.type_ as u32)] as u16 | base.mode
+ }
+
+ fn outfile_size(&self) -> u64 {
+ unsafe { (**self.outfile).get_size.expect("Superblock doesn't provide get_size")(*self.outfile) }
+ }
+
+ // TODO Minimize unsafe blocks
+ pub fn add(&mut self, mut source: Source) -> Result<u32> {
+ let flags = source.flags;
+ let nlink = 1; // TODO Handle hard links
+ let mut inode = match source.data {
+ SourceData::File(ref mut reader) => {
+ let mut ret = Box::new(ManagedPointer::null(libc_free));
+ unsafe {
+ sfs_check(sqfs_block_processor_begin_file(*self.block_processor, &mut **ret, ptr::null_mut(), flags), "Couldn't begin writing file")?;
+ let mut buf = vec![0; BLOCK_BUF_SIZE];
+ loop {
+ let rdsize = reader.read(&mut buf)? as u64;
+ if rdsize == 0 { break; }
+ sfs_check(sqfs_block_processor_append(*self.block_processor, &buf as &[u8] as *const [u8] as *const libc::c_void, rdsize), "Couldn't write file data block")?;
+ }
+ sfs_check(sqfs_block_processor_end_file(*self.block_processor), "Couldn't finish writing file")?;
+ }
+ ret
+ },
+ _ => Box::new(source.to_inode(nlink)?),
+ };
+ unsafe {
+ sfs_check(sqfs_xattr_writer_begin(*self.xattr_writer, 0), "Couldn't start writing xattrs")?;
+ for (key, value) in &source.xattrs {
+ let ckey = CString::new(os_to_string(key)?)?;
+ sfs_check(sqfs_xattr_writer_add(*self.xattr_writer, ckey.as_ptr() as *const i8, value as &[u8] as *const [u8] as *const libc::c_void, value.len() as u64), "Couldn't add xattr")?;
+ }
+ let xattr_idx = unsafe { sfs_init(&|x| sqfs_xattr_writer_end(*self.xattr_writer, x), "Couldn't finish writing xattrs")? };
+ let mut base = &mut (***inode).base;
+ base.mode = source.mode;
+ sqfs_inode_set_xattr_index(**inode, xattr_idx);
+ sfs_check(sqfs_id_table_id_to_index(*self.id_table, source.uid, &mut base.uid_idx), "Couldn't set inode UID");
+ sfs_check(sqfs_id_table_id_to_index(*self.id_table, source.gid, &mut base.gid_idx), "Couldn't set inode GID");
+ base.mod_time = source.modified;
+ base.inode_number = self.nodes.len() as u32 + 1;;
+ }
+ let dir_children = match source.data {
+ SourceData::Dir(children) => Some(children),
+ _ => None,
+ };
+ self.nodes.push(RefCell::new(IntermediateNode { inode: inode, dir_children: dir_children, pos: 0, parent: 0 }));
+ Ok(self.nodes.len() as u32)
+ }
+
+ pub fn finish(&mut self) -> Result<()> {
+ unsafe {
+ sfs_check(sqfs_block_processor_finish(*self.block_processor), "Failed finishing block processing")?;
+ self.superblock.inode_table_start = self.outfile_size();
+ for raw_node in &self.nodes {
+ let mut node = raw_node.borrow_mut();
+ // TODO Handle extended inodes properly
+ // TODO What happens if a dir tries to include itself as a child? Probably a RefCell borrow panic.
+ let id = (***node.inode).base.inode_number;
+ if let Some(children) = node.dir_children.take() {
+ sfs_check(sqfs_dir_writer_begin(*self.dir_writer, 0), "Couldn't start writing directory")?;
+ // For each child, need: name, ID, reference, mode
+ for (name, child_id) in children { // TODO Check that children are sorted
+ if child_id >= id { panic!("Tried to write directory {} before child {}", id, child_id) } // TODO Allocate error
+ let child_node = &self.nodes[child_id as usize - 1].borrow();
+ let child = child_node.inode.as_ref();
+ let child_ref = child_node.pos;
+ sfs_check(sqfs_dir_writer_add_entry(*self.dir_writer, CString::new(os_to_string(&name)?)?.as_ptr(), child_id, child_ref, Self::mode_from_inode(&child)), "Couldn't add directory entry")?;
+ }
+ sfs_check(sqfs_dir_writer_end(*self.dir_writer), "Couldn't finish writing directory")?;
+ let mut ret = Box::new(ManagedPointer::new(sfs_init_check_null(&|| sqfs_dir_writer_create_inode(*self.dir_writer, 0, 0, node.parent), "Couldn't get inode for directory")?, libc_free));
+ copy_metadata(&*node.inode, &mut ret);
+ node.inode = ret;
+ }
+ let (mut block, mut offset) = (0, 0);
+ sqfs_meta_writer_get_position(*self.inode_writer, &mut block, &mut offset);
+ node.pos = block << 16 | offset as u64;
+ sfs_check(sqfs_meta_writer_write_inode(*self.inode_writer, **node.inode), "Couldn't write inode")?;
+ }
+
+ let root_ref = self.nodes.last().ok_or(SquashfsError::Empty)?.borrow().pos;
+ self.superblock.root_inode_ref = root_ref;
+ sfs_check(sqfs_meta_writer_flush(*self.inode_writer), "Couldn't flush inodes")?;
+ sfs_check(sqfs_meta_writer_flush(*self.dirent_writer), "Couldn't flush directory entries")?;
+ self.superblock.directory_table_start = self.outfile_size();
+ sfs_check(sqfs_meta_write_write_to_file(*self.dirent_writer), "Couldn't write directory entries")?;
+ (self.superblock).inode_count = self.nodes.len() as u32;
+ sfs_check(sqfs_frag_table_write(*self.frag_table, *self.outfile, &mut self.superblock, *self.compressor), "Couldn't write fragment table")?;
+ sfs_check(sqfs_dir_writer_write_export_table(*self.dir_writer, *self.outfile, *self.compressor, self.nodes.len() as u32, root_ref, &mut self.superblock), "Couldn't write export table")?;
+ sfs_check(sqfs_id_table_write(*self.id_table, *self.outfile, &mut self.superblock, *self.compressor), "Couldn't write ID table")?;
+ sfs_check(sqfs_xattr_writer_flush(*self.xattr_writer, *self.outfile, &mut self.superblock, *self.compressor), "Couldn't write xattr table")?;
+ self.superblock.bytes_used = self.outfile_size();
+ self.superblock.modification_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_secs() as u32;
+ sfs_check(sqfs_super_write(&self.superblock, *self.outfile), "Couldn't rewrite archive superblock")?;
+ let padding: Vec<u8> = vec![0; PAD_TO - self.outfile_size() as usize % PAD_TO];
+ sfs_check((**self.outfile).write_at.expect("File does not provide write_at")(*self.outfile, self.outfile_size(), &padding as &[u8] as *const [u8] as *const libc::c_void, padding.len() as u64), "Couldn't pad file");
+ }
+ Ok(())
+ }
+
+ pub fn add_tree<T: AsRef<Path>>(&mut self, root: T, callback: &Fn(Source) -> Result<Source>) -> Result<()> {
+ let mut childmap: HashMap<PathBuf, BTreeMap<OsString, u32>> = HashMap::new();
+ for step in WalkDir::new(root.as_ref()).follow_links(false).contents_first(true) {
+ match step {
+ Ok(entry) => {
+ // TODO Consider adding Unix-specific functionality with graceful degradation
+ // TODO Catch all errors except add() and continue
+ let metadata = entry.metadata().unwrap();
+ let mtime = metadata.modified()?.duration_since(SystemTime::UNIX_EPOCH)?.as_secs() as u32;
+ let data = if metadata.file_type().is_dir() {
+ SourceData::Dir(Box::new(childmap.remove(&entry.path().to_path_buf()).unwrap_or(BTreeMap::new()).into_iter()))
+ }
+ else if metadata.file_type().is_file() {
+ SourceData::File(Box::new(std::fs::File::open(entry.path())?))
+ }
+ else if metadata.file_type().is_symlink() {
+ SourceData::Symlink(std::fs::read_link(entry.path())?.into_os_string())
+ }
+ else {
+ panic!("Unknown or unsupported file type"); // TODO Error
+ };
+ let id = self.add(callback(Source { data: data, xattrs: HashMap::new(), uid: 0, gid: 0, mode: 0x1ff, modified: mtime, flags: 0 })?)?;
+ if let Some(parent) = entry.path().parent() {
+ childmap.entry(parent.to_path_buf()).or_insert(BTreeMap::new()).insert(entry.file_name().to_os_string(), id);
+ }
+ println!("{}: {}", id, entry.path().display());
+ },
+ Err(e) => {
+ let path = e.path().map(|x| x.to_string_lossy().into_owned()).unwrap_or("(unknown)".to_string());
+ eprintln!("Not processing {}: {}", path, e.to_string());
+ }
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/wrapper.h b/wrapper.h
index 97f6a81..bd3c125 100644
--- a/wrapper.h
+++ b/wrapper.h
@@ -1,3 +1,4 @@
+#include <sys/stat.h>
#include <sqfs/block.h>
#include <sqfs/block_processor.h>
#include <sqfs/block_writer.h>