aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build.rs20
-rw-r--r--src/lib.rs275
-rw-r--r--src/read.rs1603
-rw-r--r--src/write.rs1330
4 files changed, 1938 insertions, 1290 deletions
diff --git a/build.rs b/build.rs
index 0cd0f14..fc0c243 100644
--- a/build.rs
+++ b/build.rs
@@ -5,16 +5,18 @@ use std::path::PathBuf;
#[cfg(not(feature = "hermetic"))]
fn main() {
- println!("cargo:rustc-link-lib=squashfs");
- println!("cargo:rerun-if-changed=wrapper.h");
- let bindings = bindgen::Builder::default()
- .header("wrapper.h")
- .parse_callbacks(Box::new(bindgen::CargoCallbacks))
- .generate()
- .expect("Failed to generate SquashFS bindings");
- bindings.write_to_file(PathBuf::from(env::var("OUT_DIR").unwrap()).join("bindings.rs")).expect("Failed to write SquashFS bindings");
+ println!("cargo:rustc-link-lib=squashfs");
+ println!("cargo:rerun-if-changed=wrapper.h");
+ let bindings = bindgen::Builder::default()
+ .header("wrapper.h")
+ .parse_callbacks(Box::new(bindgen::CargoCallbacks))
+ .generate()
+ .expect("Failed to generate SquashFS bindings");
+ bindings
+ .write_to_file(PathBuf::from(env::var("OUT_DIR").unwrap()).join("bindings.rs"))
+ .expect("Failed to write SquashFS bindings");
}
// Don't generate bindings or linking directives if we're building hermetically
#[cfg(feature = "hermetic")]
-fn main() { }
+fn main() {}
diff --git a/src/lib.rs b/src/lib.rs
index 1640a31..8af1bb9 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -20,7 +20,8 @@
//!
//! [squashfs-tools-ng]: https://github.com/AgentD/squashfs-tools-ng/
-#[macro_use] extern crate lazy_static;
+#[macro_use]
+extern crate lazy_static;
extern crate libc;
extern crate memmap;
extern crate num_derive;
@@ -29,21 +30,21 @@ extern crate owning_ref;
extern crate walkdir;
extern crate xattr;
-use std::mem::MaybeUninit;
+use num_derive::FromPrimitive;
+use num_traits::FromPrimitive;
use std::ffi::{OsStr, OsString};
+use std::mem::MaybeUninit;
use std::path::PathBuf;
use std::ptr;
-use num_derive::FromPrimitive;
-use num_traits::FromPrimitive;
use thiserror::Error;
#[cfg(not(feature = "hermetic"))]
mod bindings {
- #![allow(non_camel_case_types)]
- #![allow(non_snake_case)]
- #![allow(non_upper_case_globals)]
- #![allow(dead_code)]
- include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
+ #![allow(non_camel_case_types)]
+ #![allow(non_snake_case)]
+ #![allow(non_upper_case_globals)]
+ #![allow(dead_code)]
+ include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
}
#[cfg(feature = "hermetic")]
mod bindings;
@@ -63,23 +64,40 @@ type BoxedError = Box<dyn std::error::Error + std::marker::Send + std::marker::S
#[derive(Error, Debug, FromPrimitive)]
#[repr(i32)]
pub enum LibError {
- #[error("Failed to allocate memory")] Alloc = SQFS_ERROR_SQFS_ERROR_ALLOC,
- #[error("Generic I/O failure")] Io = SQFS_ERROR_SQFS_ERROR_IO,
- #[error("Compressor failed to extract data")] Compressor = SQFS_ERROR_SQFS_ERROR_COMPRESSOR,
- #[error("Internal error")] Internal = SQFS_ERROR_SQFS_ERROR_INTERNAL,
- #[error("Archive file appears to be corrupted")] Corrupted = SQFS_ERROR_SQFS_ERROR_CORRUPTED,
- #[error("Unsupported feature used")] Unsupported = SQFS_ERROR_SQFS_ERROR_UNSUPPORTED,
- #[error("Archive would overflow memory")] Overflow = SQFS_ERROR_SQFS_ERROR_OVERFLOW,
- #[error("Out-of-bounds access attempted")] OutOfBounds = SQFS_ERROR_SQFS_ERROR_OUT_OF_BOUNDS,
- #[error("Superblock magic number incorrect")] SuperMagic = SQFS_ERROR_SFQS_ERROR_SUPER_MAGIC,
- #[error("Unsupported archive version")] SuperVersion = SQFS_ERROR_SFQS_ERROR_SUPER_VERSION,
- #[error("Archive block size is invalid")] SuperBlockSize = SQFS_ERROR_SQFS_ERROR_SUPER_BLOCK_SIZE,
- #[error("Not a directory")] NotDir = SQFS_ERROR_SQFS_ERROR_NOT_DIR,
- #[error("Path does not exist")] NoEntry = SQFS_ERROR_SQFS_ERROR_NO_ENTRY,
- #[error("Hard link loop detected")] LinkLoop = SQFS_ERROR_SQFS_ERROR_LINK_LOOP,
- #[error("Not a regular file")] NotFile = SQFS_ERROR_SQFS_ERROR_NOT_FILE,
- #[error("Invalid argument passed")] ArgInvalid = SQFS_ERROR_SQFS_ERROR_ARG_INVALID,
- #[error("Library operations performed in incorrect order")] Sequence = SQFS_ERROR_SQFS_ERROR_SEQUENCE,
+ #[error("Failed to allocate memory")]
+ Alloc = SQFS_ERROR_SQFS_ERROR_ALLOC,
+ #[error("Generic I/O failure")]
+ Io = SQFS_ERROR_SQFS_ERROR_IO,
+ #[error("Compressor failed to extract data")]
+ Compressor = SQFS_ERROR_SQFS_ERROR_COMPRESSOR,
+ #[error("Internal error")]
+ Internal = SQFS_ERROR_SQFS_ERROR_INTERNAL,
+ #[error("Archive file appears to be corrupted")]
+ Corrupted = SQFS_ERROR_SQFS_ERROR_CORRUPTED,
+ #[error("Unsupported feature used")]
+ Unsupported = SQFS_ERROR_SQFS_ERROR_UNSUPPORTED,
+ #[error("Archive would overflow memory")]
+ Overflow = SQFS_ERROR_SQFS_ERROR_OVERFLOW,
+ #[error("Out-of-bounds access attempted")]
+ OutOfBounds = SQFS_ERROR_SQFS_ERROR_OUT_OF_BOUNDS,
+ #[error("Superblock magic number incorrect")]
+ SuperMagic = SQFS_ERROR_SFQS_ERROR_SUPER_MAGIC,
+ #[error("Unsupported archive version")]
+ SuperVersion = SQFS_ERROR_SFQS_ERROR_SUPER_VERSION,
+ #[error("Archive block size is invalid")]
+ SuperBlockSize = SQFS_ERROR_SQFS_ERROR_SUPER_BLOCK_SIZE,
+ #[error("Not a directory")]
+ NotDir = SQFS_ERROR_SQFS_ERROR_NOT_DIR,
+ #[error("Path does not exist")]
+ NoEntry = SQFS_ERROR_SQFS_ERROR_NO_ENTRY,
+ #[error("Hard link loop detected")]
+ LinkLoop = SQFS_ERROR_SQFS_ERROR_LINK_LOOP,
+ #[error("Not a regular file")]
+ NotFile = SQFS_ERROR_SQFS_ERROR_NOT_FILE,
+ #[error("Invalid argument passed")]
+ ArgInvalid = SQFS_ERROR_SQFS_ERROR_ARG_INVALID,
+ #[error("Library operations performed in incorrect order")]
+ Sequence = SQFS_ERROR_SQFS_ERROR_SEQUENCE,
}
/// Errors encountered while reading or writing an archive.
@@ -88,67 +106,98 @@ pub enum LibError {
/// operation.
#[derive(Error, Debug)]
pub enum SquashfsError {
- #[error("Input contains an invalid null character")] NullInput(#[from] std::ffi::NulError),
- #[error("Encoded string is not valid UTF-8")] Utf8(#[from] std::string::FromUtf8Error),
- #[error("OS string is not valid UTF-8")] OsUtf8(OsString),
- #[error("{0}: {1}")] LibraryError(String, LibError),
- #[error("{0}: Unknown error {1} in SquashFS library")] UnknownLibraryError(String, i32),
- #[error("{0}: Squashfs library did not return expected value")] LibraryReturnError(String),
- #[error("{0}")] LibraryNullError(String),
- #[error("Symbolic link chain exceeds {0} elements")] LinkChain(i32), // Can I use a const in the formatting string?
- #[error("Symbolic link loop detected containing {0}")] LinkLoop(PathBuf),
- #[error("Dangling symbolic link from {0} to {1}")] DanglingLink(PathBuf, PathBuf),
- #[error("{0} is type {1}, not {2}")] WrongType(String, String, String),
- #[error("Tried to copy an object that can't be copied")] Copy,
- #[error("Tried to get parent of a node with an unknown path")] NoPath,
- #[error("Inode index {0} is not within limits 1..{1}")] Range(u64, u64),
- #[error("Couldn't read file: {0}")] Read(#[from] std::io::Error),
- #[error("The filesystem does not support the feature: {0}")] Unsupported(String),
- #[error("Memory mapping failed: {0}")] Mmap(std::io::Error),
- #[error("Couldn't get the current system time: {0}")] Time(#[from] std::time::SystemTimeError),
- #[error("Refusing to create empty archive")] Empty,
- #[error("Tried to write parent directory before child node {0}")] WriteOrder(u32),
- #[error("Tried to write unknown or unsupported file type")] WriteType(std::fs::FileType),
- #[error("Callback returned an error")] WrappedError(BoxedError),
- #[error("Failed to retrieve xattrs for {0}: {1}")] Xattr(PathBuf, std::io::Error),
- #[error("Tried to add files to a writer that was already finished")] Finished,
- #[error("Internal error: {0}")] Internal(String),
+ #[error("Input contains an invalid null character")]
+ NullInput(#[from] std::ffi::NulError),
+ #[error("Encoded string is not valid UTF-8")]
+ Utf8(#[from] std::string::FromUtf8Error),
+ #[error("OS string is not valid UTF-8")]
+ OsUtf8(OsString),
+ #[error("{0}: {1}")]
+ LibraryError(String, LibError),
+ #[error("{0}: Unknown error {1} in SquashFS library")]
+ UnknownLibraryError(String, i32),
+ #[error("{0}: Squashfs library did not return expected value")]
+ LibraryReturnError(String),
+ #[error("{0}")]
+ LibraryNullError(String),
+ #[error("Symbolic link chain exceeds {0} elements")]
+ LinkChain(i32), // Can I use a const in the formatting string?
+ #[error("Symbolic link loop detected containing {0}")]
+ LinkLoop(PathBuf),
+ #[error("Dangling symbolic link from {0} to {1}")]
+ DanglingLink(PathBuf, PathBuf),
+ #[error("{0} is type {1}, not {2}")]
+ WrongType(String, String, String),
+ #[error("Tried to copy an object that can't be copied")]
+ Copy,
+ #[error("Tried to get parent of a node with an unknown path")]
+ NoPath,
+ #[error("Inode index {0} is not within limits 1..{1}")]
+ Range(u64, u64),
+ #[error("Couldn't read file: {0}")]
+ Read(#[from] std::io::Error),
+ #[error("The filesystem does not support the feature: {0}")]
+ Unsupported(String),
+ #[error("Memory mapping failed: {0}")]
+ Mmap(std::io::Error),
+ #[error("Couldn't get the current system time: {0}")]
+ Time(#[from] std::time::SystemTimeError),
+ #[error("Refusing to create empty archive")]
+ Empty,
+ #[error("Tried to write parent directory before child node {0}")]
+ WriteOrder(u32),
+ #[error("Tried to write unknown or unsupported file type")]
+ WriteType(std::fs::FileType),
+ #[error("Callback returned an error")]
+ WrappedError(BoxedError),
+ #[error("Failed to retrieve xattrs for {0}: {1}")]
+ Xattr(PathBuf, std::io::Error),
+ #[error("Tried to add files to a writer that was already finished")]
+ Finished,
+ #[error("Internal error: {0}")]
+ Internal(String),
}
/// Result type returned by SquashFS library operations.
pub type Result<T> = std::result::Result<T, SquashfsError>;
fn sfs_check(code: i32, desc: &str) -> Result<i32> {
- match code {
- i if i >= 0 => Ok(i),
- i => match FromPrimitive::from_i32(i) {
- Some(e) => Err(SquashfsError::LibraryError(desc.to_string(), e)),
- None => Err(SquashfsError::UnknownLibraryError(desc.to_string(), i)),
- }
- }
+ match code {
+ i if i >= 0 => Ok(i),
+ i => match FromPrimitive::from_i32(i) {
+ Some(e) => Err(SquashfsError::LibraryError(desc.to_string(), e)),
+ None => Err(SquashfsError::UnknownLibraryError(desc.to_string(), i)),
+ },
+ }
}
fn sfs_destroy<T>(x: *mut T) {
- unsafe {
- let obj = x as *mut sqfs_object_t;
- ((*obj).destroy.expect("SquashFS object did not provide a destroy callback"))(obj);
- }
+ unsafe {
+ let obj = x as *mut sqfs_object_t;
+ ((*obj)
+ .destroy
+ .expect("SquashFS object did not provide a destroy callback"))(obj);
+ }
}
fn libc_free<T>(x: *mut T) {
- unsafe { libc::free(x as *mut _ as *mut libc::c_void); }
+ unsafe {
+ libc::free(x as *mut _ as *mut libc::c_void);
+ }
}
fn rust_dealloc<T>(x: *mut T) {
- unsafe { std::alloc::dealloc(x as *mut u8, std::alloc::Layout::new::<T>()) }
+ unsafe { std::alloc::dealloc(x as *mut u8, std::alloc::Layout::new::<T>()) }
}
fn unpack_meta_ref(meta_ref: u64) -> (u64, u64) {
- (meta_ref >> 16 & 0xffffffff, meta_ref & 0xffff)
+ (meta_ref >> 16 & 0xffffffff, meta_ref & 0xffff)
}
fn os_to_string(s: &OsStr) -> Result<String> {
- Ok(s.to_str().ok_or_else(|| SquashfsError::OsUtf8(s.to_os_string()))?.to_string())
+ Ok(s.to_str()
+ .ok_or_else(|| SquashfsError::OsUtf8(s.to_os_string()))?
+ .to_string())
}
const NO_XATTRS: u32 = 0xffffffff;
@@ -158,65 +207,85 @@ const BLOCK_BUF_SIZE: usize = 4096;
const PAD_TO: usize = 4096;
struct ManagedPointer<T> {
- ptr: *mut T,
- destroy: fn(*mut T),
+ ptr: *mut T,
+ destroy: fn(*mut T),
}
impl<T> ManagedPointer<T> {
- fn null(destroy: fn(*mut T)) -> Self {
- Self { ptr: ptr::null_mut(), destroy: destroy }
- }
+ fn null(destroy: fn(*mut T)) -> Self {
+ Self {
+ ptr: ptr::null_mut(),
+ destroy: destroy,
+ }
+ }
+
+ fn new(ptr: *mut T, destroy: fn(*mut T)) -> Self {
+ Self {
+ ptr: ptr,
+ destroy: destroy,
+ }
+ }
- fn new(ptr: *mut T, destroy: fn(*mut T)) -> Self {
- Self { ptr: ptr, destroy: destroy }
- }
-
- fn as_const(&self) -> *const T {
- self.ptr as *const T
- }
+ fn as_const(&self) -> *const T {
+ self.ptr as *const T
+ }
}
impl<T> std::ops::Deref for ManagedPointer<T> {
- type Target = *mut T;
+ type Target = *mut T;
- fn deref(&self) -> &Self::Target {
- &self.ptr
- }
+ fn deref(&self) -> &Self::Target {
+ &self.ptr
+ }
}
impl<T> std::ops::DerefMut for ManagedPointer<T> {
- fn deref_mut(&mut self) -> &mut Self::Target {
- &mut self.ptr
- }
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.ptr
+ }
}
impl<T> Drop for ManagedPointer<T> {
- fn drop(&mut self) {
- (self.destroy)(**self)
- }
+ fn drop(&mut self) {
+ (self.destroy)(**self)
+ }
}
impl<T> std::fmt::Debug for ManagedPointer<T> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "ManagedPointer({:?})", self.ptr)
- }
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "ManagedPointer({:?})", self.ptr)
+ }
}
fn sfs_init<T>(init: &dyn Fn(*mut T) -> i32, err: &str) -> Result<T> {
- let mut ret: MaybeUninit<T> = MaybeUninit::uninit();
- sfs_check(init(ret.as_mut_ptr()), err)?;
- Ok(unsafe { ret.assume_init() })
+ let mut ret: MaybeUninit<T> = MaybeUninit::uninit();
+ sfs_check(init(ret.as_mut_ptr()), err)?;
+ Ok(unsafe { ret.assume_init() })
}
-fn sfs_init_ptr<T>(init: &dyn Fn(*mut *mut T) -> i32, err: &str, destroy: fn(*mut T)) -> Result<ManagedPointer<T>> {
- let mut ret: *mut T = ptr::null_mut();
- sfs_check(init(&mut ret), err)?;
- if ret.is_null() { Err(SquashfsError::LibraryReturnError(err.to_string())) }
- else { Ok(ManagedPointer::new(ret, destroy)) }
+fn sfs_init_ptr<T>(
+ init: &dyn Fn(*mut *mut T) -> i32,
+ err: &str,
+ destroy: fn(*mut T),
+) -> Result<ManagedPointer<T>> {
+ let mut ret: *mut T = ptr::null_mut();
+ sfs_check(init(&mut ret), err)?;
+ if ret.is_null() {
+ Err(SquashfsError::LibraryReturnError(err.to_string()))
+ } else {
+ Ok(ManagedPointer::new(ret, destroy))
+ }
}
-fn sfs_init_check_null<T>(init: &dyn Fn() -> *mut T, err: &str, destroy: fn(*mut T)) -> Result<ManagedPointer<T>> {
- let ret = init();
- if ret.is_null() { Err(SquashfsError::LibraryNullError(err.to_string())) }
- else { Ok(ManagedPointer::new(ret, destroy)) }
+fn sfs_init_check_null<T>(
+ init: &dyn Fn() -> *mut T,
+ err: &str,
+ destroy: fn(*mut T),
+) -> Result<ManagedPointer<T>> {
+ let ret = init();
+ if ret.is_null() {
+ Err(SquashfsError::LibraryNullError(err.to_string()))
+ } else {
+ Ok(ManagedPointer::new(ret, destroy))
+ }
}
diff --git a/src/read.rs b/src/read.rs
index 17f2595..02b8141 100644
--- a/src/read.rs
+++ b/src/read.rs
@@ -17,72 +17,82 @@
//! },
//! }
+use super::*;
+use memmap::{Mmap, MmapOptions};
+use owning_ref::OwningHandle;
use std::collections::{HashMap, HashSet};
use std::ffi::{CStr, CString};
use std::io;
use std::io::{Read, Seek};
use std::mem::ManuallyDrop;
use std::ops::{Deref, DerefMut};
-use std::path::{Path, PathBuf, Component};
+use std::path::{Component, Path, PathBuf};
use std::sync::{Arc, Mutex};
-use super::*;
-use memmap::{Mmap, MmapOptions};
-use owning_ref::OwningHandle;
// Canonicalize without requiring the path to actually exist in the filesystem
fn dumb_canonicalize(path: &Path) -> PathBuf {
- let mut ret = PathBuf::new();
- for part in path.components() {
- match part {
- Component::Prefix(_) => (),
- Component::CurDir => (),
- Component::RootDir => ret.clear(),
- Component::ParentDir => { ret.pop(); },
- Component::Normal(p) => ret.push(p),
- }
- }
- ret
+ let mut ret = PathBuf::new();
+ for part in path.components() {
+ match part {
+ Component::Prefix(_) => (),
+ Component::CurDir => (),
+ Component::RootDir => ret.clear(),
+ Component::ParentDir => {
+ ret.pop();
+ }
+ Component::Normal(p) => ret.push(p),
+ }
+ }
+ ret
}
// Pass errors through, but convert missing file errors to None
fn enoent_ok<T>(t: Result<T>) -> Result<Option<T>> {
- match t {
- Ok(ret) => Ok(Some(ret)),
- Err(SquashfsError::LibraryError(_, LibError::NoEntry)) => Ok(None),
- Err(e) => Err(e),
- }
+ match t {
+ Ok(ret) => Ok(Some(ret)),
+ Err(SquashfsError::LibraryError(_, LibError::NoEntry)) => Ok(None),
+ Err(e) => Err(e),
+ }
}
// Wrapper for leasing objects from a pool
struct Leased<'a, T> {
- pool: &'a Mutex<Vec<T>>,
- data: ManuallyDrop<T>,
+ pool: &'a Mutex<Vec<T>>,
+ data: ManuallyDrop<T>,
}
impl<'a, T> Leased<'a, T> {
- pub fn new(pool: &'a Mutex<Vec<T>>, data: T) -> Self {
- Self { pool, data: ManuallyDrop::new(data) }
- }
+ pub fn new(pool: &'a Mutex<Vec<T>>, data: T) -> Self {
+ Self {
+ pool,
+ data: ManuallyDrop::new(data),
+ }
+ }
}
impl<'a, T> Deref for Leased<'a, T> {
- type Target = T;
+ type Target = T;
- fn deref(&self) -> &Self::Target {
- &self.data
- }
+ fn deref(&self) -> &Self::Target {
+ &self.data
+ }
}
impl<'a, T> DerefMut for Leased<'a, T> {
- fn deref_mut(&mut self) -> &mut Self::Target {
- &mut self.data
- }
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.data
+ }
}
impl<'a, T> Drop for Leased<'a, T> {
- fn drop(&mut self) {
- unsafe { self.pool.lock().expect(LOCK_ERR).push(ManuallyDrop::take(&mut self.data)); }
- }
+ fn drop(&mut self) {
+ unsafe {
+ self.pool
+ .lock()
+ .expect(LOCK_ERR)
+ .push(ManuallyDrop::take(&mut self.data));
+ }
+ }
}
/// A directory in the archive.
@@ -102,89 +112,163 @@ impl<'a, T> Drop for Leased<'a, T> {
/// }
#[derive(Debug)]
pub struct Dir<'a> {
- node: &'a Node<'a>,
- // TODO I believe this unread field is necessary so we don't drop the compressor instance that
- // the reader uses. 1) Verify this. 2) Can we represent this dependency in such a way the
- // Rust won't warn about not using this field?
- compressor: ManagedPointer<sqfs_compressor_t>,
- reader: Mutex<ManagedPointer<sqfs_dir_reader_t>>,
+ node: &'a Node<'a>,
+ // TODO I believe this unread field is necessary so we don't drop the compressor instance that
+ // the reader uses. 1) Verify this. 2) Can we represent this dependency in such a way the
+ // Rust won't warn about not using this field?
+ compressor: ManagedPointer<sqfs_compressor_t>,
+ reader: Mutex<ManagedPointer<sqfs_dir_reader_t>>,
}
impl<'a> Dir<'a> {
- fn new(node: &'a Node) -> Result<Self> {
- let compressor = node.container.compressor()?;
- let reader = sfs_init_check_null(&|| unsafe {
- sqfs_dir_reader_create(&node.container.superblock, *compressor, *node.container.file, 0)
- }, "Couldn't create directory reader", sfs_destroy)?;
- unsafe { sfs_check(sqfs_dir_reader_open_dir(*reader, node.inode.as_const(), 0), "Couldn't open directory")?; }
- Ok(Self {node, compressor, reader: Mutex::new(reader) })
- }
-
- /// Reset the directory reader to the beginning of the directory.
- ///
- /// If the directory has been partially or completely iterated through, this will put it back
- /// to the beginning so that it can be read again.
- pub fn reset(&mut self) {
- unsafe { sqfs_dir_reader_rewind(**self.reader.lock().expect(LOCK_ERR)); }
- }
-
- fn read<'b>(&'b self) -> Result<Option<Node<'a>>> {
- let locked_reader = self.reader.lock().expect(LOCK_ERR);
- let mut raw_entry: *mut sqfs_dir_entry_t = ptr::null_mut();
- if sfs_check(unsafe { sqfs_dir_reader_read(**locked_reader, &mut raw_entry) }, "Couldn't read directory entries")? > 0 { Ok(None) }
- else if raw_entry.is_null() { Err(SquashfsError::LibraryReturnError("Couldn't read directory entries".to_string()))? }
- else {
- let entry = ManagedPointer::new(raw_entry, libc_free);
- let name_bytes = unsafe { (**entry).name.as_slice((**entry).size as usize + 1) };
- let name = String::from_utf8(name_bytes.to_vec())?;
- let node = sfs_init_ptr(&|x| unsafe {
- sqfs_dir_reader_get_inode(**locked_reader, x)
- }, "Couldn't read directory entry inode", libc_free)?;
- Ok(Some(Node::new(self.node.container, node, self.node.path.as_ref().map(|path| path.join(name)))?))
- }
- }
-
- /// Select a child inside the directory by name.
- ///
- /// This will return `Ok(None)` if the child does not exist, or an `Err` if the lookup could
- /// not be performed.
- pub fn child(&self, name: &str) -> Result<Option<Node>> {
- match unsafe { enoent_ok(sfs_check(sqfs_dir_reader_find(**self.reader.lock().expect(LOCK_ERR), CString::new(name)?.as_ptr()), &format!("Couldn't find child \"{}\"", name)))? } {
- None => Ok(None),
- Some(_) => Ok(self.read()?),
- }
- }
+ fn new(node: &'a Node) -> Result<Self> {
+ let compressor = node.container.compressor()?;
+ let reader = sfs_init_check_null(
+ &|| unsafe {
+ sqfs_dir_reader_create(
+ &node.container.superblock,
+ *compressor,
+ *node.container.file,
+ 0,
+ )
+ },
+ "Couldn't create directory reader",
+ sfs_destroy,
+ )?;
+ unsafe {
+ sfs_check(
+ sqfs_dir_reader_open_dir(*reader, node.inode.as_const(), 0),
+ "Couldn't open directory",
+ )?;
+ }
+ Ok(Self {
+ node,
+ compressor,
+ reader: Mutex::new(reader),
+ })
+ }
+
+ /// Reset the directory reader to the beginning of the directory.
+ ///
+ /// If the directory has been partially or completely iterated through, this will put it back
+ /// to the beginning so that it can be read again.
+ pub fn reset(&mut self) {
+ unsafe {
+ sqfs_dir_reader_rewind(**self.reader.lock().expect(LOCK_ERR));
+ }
+ }
+
+ fn read<'b>(&'b self) -> Result<Option<Node<'a>>> {
+ let locked_reader = self.reader.lock().expect(LOCK_ERR);
+ let mut raw_entry: *mut sqfs_dir_entry_t = ptr::null_mut();
+ if sfs_check(
+ unsafe { sqfs_dir_reader_read(**locked_reader, &mut raw_entry) },
+ "Couldn't read directory entries",
+ )? > 0
+ {
+ Ok(None)
+ } else if raw_entry.is_null() {
+ Err(SquashfsError::LibraryReturnError(
+ "Couldn't read directory entries".to_string(),
+ ))?
+ } else {
+ let entry = ManagedPointer::new(raw_entry, libc_free);
+ let name_bytes = unsafe { (**entry).name.as_slice((**entry).size as usize + 1) };
+ let name = String::from_utf8(name_bytes.to_vec())?;
+ let node = sfs_init_ptr(
+ &|x| unsafe { sqfs_dir_reader_get_inode(**locked_reader, x) },
+ "Couldn't read directory entry inode",
+ libc_free,
+ )?;
+ Ok(Some(Node::new(
+ self.node.container,
+ node,
+ self.node.path.as_ref().map(|path| path.join(name)),
+ )?))
+ }
+ }
+
+ /// Select a child inside the directory by name.
+ ///
+ /// This will return `Ok(None)` if the child does not exist, or an `Err` if the lookup could
+ /// not be performed.
+ pub fn child(&self, name: &str) -> Result<Option<Node>> {
+ match unsafe {
+ enoent_ok(sfs_check(
+ sqfs_dir_reader_find(
+ **self.reader.lock().expect(LOCK_ERR),
+ CString::new(name)?.as_ptr(),
+ ),
+ &format!("Couldn't find child \"{}\"", name),
+ ))?
+ } {
+ None => Ok(None),
+ Some(_) => Ok(self.read()?),
+ }
+ }
}
impl<'a> std::iter::Iterator for Dir<'a> {
- type Item = Result<Node<'a>>;
+ type Item = Result<Node<'a>>;
- fn next(&mut self) -> Option<Self::Item> {
- self.read().transpose()
- }
+ fn next(&mut self) -> Option<Self::Item> {
+ self.read().transpose()
+ }
}
struct DataReader {
- #[allow(dead_code)] compressor: ManagedPointer<sqfs_compressor_t>, // Referenced by `reader`
- reader: ManagedPointer<sqfs_data_reader_t>,
+ #[allow(dead_code)]
+ compressor: ManagedPointer<sqfs_compressor_t>, // Referenced by `reader`
+ reader: ManagedPointer<sqfs_data_reader_t>,
}
impl<'a> DataReader {
- fn new(archive: &'a Archive) -> Result<Self> {
- let compressor = archive.compressor()?;
- let reader = sfs_init_check_null(&|| unsafe {
- sqfs_data_reader_create(*archive.file, archive.superblock.block_size as u64, *compressor, 0)
- }, "Couldn't create data reader", sfs_destroy)?;
- unsafe { sfs_check(sqfs_data_reader_load_fragment_table(*reader, &archive.superblock), "Couldn't load fragment table")? };
- Ok(Self { compressor: compressor, reader: reader })
- }
-
- fn read(&self, inode: &ManagedPointer<sqfs_inode_generic_t>, offset: u64, buf: &mut [u8]) -> io::Result<u64> {
- Ok(unsafe { sfs_check(
- sqfs_data_reader_read(*self.reader, inode.as_const(), offset, buf.as_mut_ptr() as *mut libc::c_void, buf.len() as u32),
- "Couldn't read file content"
- ).map_err(|e| io::Error::new(io::ErrorKind::Other, e))? } as u64)
- }
+ fn new(archive: &'a Archive) -> Result<Self> {
+ let compressor = archive.compressor()?;
+ let reader = sfs_init_check_null(
+ &|| unsafe {
+ sqfs_data_reader_create(
+ *archive.file,
+ archive.superblock.block_size as usize,
+ *compressor,
+ 0,
+ )
+ },
+ "Couldn't create data reader",
+ sfs_destroy,
+ )?;
+ unsafe {
+ sfs_check(
+ sqfs_data_reader_load_fragment_table(*reader, &archive.superblock),
+ "Couldn't load fragment table",
+ )?
+ };
+ Ok(Self {
+ compressor: compressor,
+ reader: reader,
+ })
+ }
+
+ fn read(
+ &self,
+ inode: &ManagedPointer<sqfs_inode_generic_t>,
+ offset: u64,
+ buf: &mut [u8],
+ ) -> io::Result<u64> {
+ Ok(unsafe {
+ sfs_check(
+ sqfs_data_reader_read(
+ *self.reader,
+ inode.as_const(),
+ offset,
+ buf.as_mut_ptr() as *mut libc::c_void,
+ buf.len() as u32,
+ ),
+ "Couldn't read file content",
+ )
+ .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?
+ } as u64)
+ }
}
/// A file in the archive.
@@ -204,102 +288,126 @@ impl<'a> DataReader {
/// file.seek(SeekFrom::End(-10))?;
/// file.read(&mut buf)?;
pub struct File<'a> {
- node: &'a Node<'a>,
- offset: Mutex<u64>,
+ node: &'a Node<'a>,
+ offset: Mutex<u64>,
}
impl<'a> File<'a> {
- fn new(node: &'a Node) -> Result<Self> {
- Ok(Self { node: node, offset: Mutex::new(0) })
- }
-
- /// Retrieve the size of the file in bytes.
- pub fn size(&self) -> u64 {
- let mut ret: u64 = 0;
- unsafe { sqfs_inode_get_file_size(self.node.inode.as_const(), &mut ret) };
- ret
- }
-
- /// Retrieve the entire contents of the file in the form of a byte Vec.
- pub fn to_bytes(&mut self) -> Result<Vec<u8>> {
- let mut ret = Vec::with_capacity(self.size() as usize);
- self.read_to_end(&mut ret)?;
- Ok(ret)
- }
-
- /// Retrieve the entire contents of the file in the form of a String.
- ///
- /// This calls [`Read::read_to_string`] under the hood. Consequently, a UTF-8 error
- /// will be raised if the entire file is not valid UTF-8.
- pub fn to_string(&mut self) -> Result<String> {
- let mut ret = String::with_capacity(self.size() as usize);
- self.read_to_string(&mut ret)?;
- Ok(ret)
- }
-
- /// Map a file into memory for fast parallel random access.
- ///
- /// This uses `mmap` to map the file into memory. **It will fail** and return `None` if the
- /// file is compressed or fragmented. If the [`DontCompress`](write::BlockFlags::DontCompress)
- /// and [`DontFragment`](write::BlockFlags::DontFragment) options are set for a file at
- /// archive creation time, it will be added to the archive in one contiguous unmodified chunk.
- /// This is necessary because `mmap` provides a view into a file exactly as it is on-disk;
- /// there is no opportunity for the library to apply decompression or other transformations
- /// when mapping.
- ///
- /// let map = file.mmap().expect("File is not mmappable");
- /// println!("{}", str::from_utf8(map)?);
- pub fn mmap<'b>(&'b mut self) -> Option<&'b [u8]> {
- let inode = unsafe { &***self.node.inode };
- let (start, frag_idx) = unsafe {
- match inode.base.type_ as u32 {
- SQFS_INODE_TYPE_SQFS_INODE_FILE => (inode.data.file.blocks_start as u64, inode.data.file.fragment_index),
- SQFS_INODE_TYPE_SQFS_INODE_EXT_FILE => (inode.data.file_ext.blocks_start, inode.data.file_ext.fragment_idx),
- _ => panic!("File is not a file")
- }
- };
- let block_count = inode.payload_bytes_used / std::mem::size_of::<sqfs_u32>() as u32;
- if block_count == 0 || frag_idx != 0xffffffff { return None; }
- let block_sizes = unsafe { inode.extra.as_slice(block_count as usize) };
- if block_sizes.iter().any(|x| x & 0x01000000 == 0) { return None; }
- Some(self.node.container.map_range(start as usize, self.size() as usize))
- }
+ fn new(node: &'a Node) -> Result<Self> {
+ Ok(Self {
+ node: node,
+ offset: Mutex::new(0),
+ })
+ }
+
+ /// Retrieve the size of the file in bytes.
+ pub fn size(&self) -> u64 {
+ let mut ret: u64 = 0;
+ unsafe { sqfs_inode_get_file_size(self.node.inode.as_const(), &mut ret) };
+ ret
+ }
+
+ /// Retrieve the entire contents of the file in the form of a byte Vec.
+ pub fn to_bytes(&mut self) -> Result<Vec<u8>> {
+ let mut ret = Vec::with_capacity(self.size() as usize);
+ self.read_to_end(&mut ret)?;
+ Ok(ret)
+ }
+
+ /// Retrieve the entire contents of the file in the form of a String.
+ ///
+ /// This calls [`Read::read_to_string`] under the hood. Consequently, a UTF-8 error
+ /// will be raised if the entire file is not valid UTF-8.
+ pub fn to_string(&mut self) -> Result<String> {
+ let mut ret = String::with_capacity(self.size() as usize);
+ self.read_to_string(&mut ret)?;
+ Ok(ret)
+ }
+
+ /// Map a file into memory for fast parallel random access.
+ ///
+ /// This uses `mmap` to map the file into memory. **It will fail** and return `None` if the
+ /// file is compressed or fragmented. If the [`DontCompress`](write::BlockFlags::DontCompress)
+ /// and [`DontFragment`](write::BlockFlags::DontFragment) options are set for a file at
+ /// archive creation time, it will be added to the archive in one contiguous unmodified chunk.
+ /// This is necessary because `mmap` provides a view into a file exactly as it is on-disk;
+ /// there is no opportunity for the library to apply decompression or other transformations
+ /// when mapping.
+ ///
+ /// let map = file.mmap().expect("File is not mmappable");
+ /// println!("{}", str::from_utf8(map)?);
+ pub fn mmap<'b>(&'b mut self) -> Option<&'b [u8]> {
+ let inode = unsafe { &***self.node.inode };
+ let (start, frag_idx) = unsafe {
+ match inode.base.type_ as u32 {
+ SQFS_INODE_TYPE_SQFS_INODE_FILE => (
+ inode.data.file.blocks_start as u64,
+ inode.data.file.fragment_index,
+ ),
+ SQFS_INODE_TYPE_SQFS_INODE_EXT_FILE => (
+ inode.data.file_ext.blocks_start,
+ inode.data.file_ext.fragment_idx,
+ ),
+ _ => panic!("File is not a file"),
+ }
+ };
+ let block_count = inode.payload_bytes_used / std::mem::size_of::<sqfs_u32>() as u32;
+ if block_count == 0 || frag_idx != 0xffffffff {
+ return None;
+ }
+ let block_sizes = unsafe { inode.extra.as_slice(block_count as usize) };
+ if block_sizes.iter().any(|x| x & 0x01000000 == 0) {
+ return None;
+ }
+ Some(
+ self.node
+ .container
+ .map_range(start as usize, self.size() as usize),
+ )
+ }
}
impl<'a> Read for File<'a> {
- fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
- let mut locked_offset = self.offset.lock().expect(LOCK_ERR);
- if *locked_offset >= self.size() { Ok(0) }
- else {
- let res = self.node.container.data_reader().unwrap().read(&self.node.inode, *locked_offset, buf)?;
- *locked_offset += res;
- Ok(res as usize)
- }
- }
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let mut locked_offset = self.offset.lock().expect(LOCK_ERR);
+ if *locked_offset >= self.size() {
+ Ok(0)
+ } else {
+ let res = self.node.container.data_reader().unwrap().read(
+ &self.node.inode,
+ *locked_offset,
+ buf,
+ )?;
+ *locked_offset += res;
+ Ok(res as usize)
+ }
+ }
}
impl<'a> Seek for File<'a> {
- fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
- let mut locked_offset = self.offset.lock().expect(LOCK_ERR);
- let newoff = match pos {
- io::SeekFrom::Start(off) => off as i64,
- io::SeekFrom::End(off) => self.size() as i64 + off,
- io::SeekFrom::Current(off) => *locked_offset as i64 + off,
- };
- if newoff < 0 {
- Err(io::Error::new(io::ErrorKind::Other, "Attempted to seek before beginning of file"))
- }
- else {
- *locked_offset = newoff as u64;
- Ok(*locked_offset)
- }
- }
+ fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
+ let mut locked_offset = self.offset.lock().expect(LOCK_ERR);
+ let newoff = match pos {
+ io::SeekFrom::Start(off) => off as i64,
+ io::SeekFrom::End(off) => self.size() as i64 + off,
+ io::SeekFrom::Current(off) => *locked_offset as i64 + off,
+ };
+ if newoff < 0 {
+ Err(io::Error::new(
+ io::ErrorKind::Other,
+ "Attempted to seek before beginning of file",
+ ))
+ } else {
+ *locked_offset = newoff as u64;
+ Ok(*locked_offset)
+ }
+ }
}
impl<'a> std::fmt::Debug for File<'a> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "File at {:?}", self.node)
- }
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "File at {:?}", self.node)
+ }
}
/// Enum type for the various kinds of data that an inode can be.
@@ -311,92 +419,110 @@ impl<'a> std::fmt::Debug for File<'a> {
/// [`as_file`](Node::as_file) methods to bypass `Data` completely.
#[derive(Debug)]
pub enum Data<'a> {
- /// A regular file, containing a [`File`] object that can be used to extract the file contents.
- File(File<'a>),
+ /// A regular file, containing a [`File`] object that can be used to extract the file contents.
+ File(File<'a>),
- /// A directory, containing a [`Dir`] that can be used to access the directory's children.
- Dir(Dir<'a>),
+ /// A directory, containing a [`Dir`] that can be used to access the directory's children.
+ Dir(Dir<'a>),
- /// A symbolic link, containing the target of the link as a [`PathBuf`].
- Symlink(PathBuf),
+ /// A symbolic link, containing the target of the link as a [`PathBuf`].
+ Symlink(PathBuf),
- /// A block device file, containing the device's major and minor numbers.
- BlockDev(u32, u32),
+ /// A block device file, containing the device's major and minor numbers.
+ BlockDev(u32, u32),
- /// A character device file, containing the device's major and minor numbers.
- CharDev(u32, u32),
+ /// A character device file, containing the device's major and minor numbers.
+ CharDev(u32, u32),
- /// A named pipe.
- Fifo,
+ /// A named pipe.
+ Fifo,
- /// A socket.
- Socket,
+ /// A socket.
+ Socket,
}
impl<'a> Data<'a> {
- fn new(node: &'a Node) -> Result<Self> {
- unsafe fn arr_to_string<'a, T>(arr: &bindings::__IncompleteArrayField<T>, len: usize) -> String {
- let slice = std::slice::from_raw_parts(arr.as_ptr() as *const u8, len);
- String::from_utf8_lossy(slice).into_owned()
- }
- fn get_dev_nums(dev: u32) -> (u32, u32) {
- ((dev & 0xfff00) >> 8, (dev & 0xff) | ((dev >> 12) & 0xfff00))
- }
- match unsafe { (***node.inode).base.type_ } as u32 {
- SQFS_INODE_TYPE_SQFS_INODE_DIR | SQFS_INODE_TYPE_SQFS_INODE_EXT_DIR => Ok(Self::Dir(Dir::new(node)?)),
- SQFS_INODE_TYPE_SQFS_INODE_FILE | SQFS_INODE_TYPE_SQFS_INODE_EXT_FILE => Ok(Self::File(File::new(node)?)),
- SQFS_INODE_TYPE_SQFS_INODE_SLINK => Ok(unsafe {
- let path_str = arr_to_string(&(***node.inode).extra, (***node.inode).data.slink.target_size as usize);
- Self::Symlink(PathBuf::from(path_str))
- }),
- SQFS_INODE_TYPE_SQFS_INODE_EXT_SLINK => Ok(unsafe {
- let path_str = arr_to_string(&(***node.inode).extra, (***node.inode).data.slink_ext.target_size as usize);
- Self::Symlink(PathBuf::from(path_str))
- }),
- SQFS_INODE_TYPE_SQFS_INODE_BDEV => Ok(unsafe {
- let (maj, min) = get_dev_nums((***node.inode).data.dev.devno);
- Self::BlockDev(maj, min)
- }),
- SQFS_INODE_TYPE_SQFS_INODE_EXT_BDEV => Ok(unsafe {
- let (maj, min) = get_dev_nums((***node.inode).data.dev_ext.devno);
- Self::BlockDev(maj, min)
- }),
- SQFS_INODE_TYPE_SQFS_INODE_CDEV => Ok(unsafe {
- let (maj, min) = get_dev_nums((***node.inode).data.dev.devno);
- Self::CharDev(maj, min)
- }),
- SQFS_INODE_TYPE_SQFS_INODE_EXT_CDEV => Ok(unsafe {
- let (maj, min) = get_dev_nums((***node.inode).data.dev_ext.devno);
- Self::CharDev(maj, min)
- }),
- SQFS_INODE_TYPE_SQFS_INODE_FIFO | SQFS_INODE_TYPE_SQFS_INODE_EXT_FIFO => Ok(Self::Fifo),
- SQFS_INODE_TYPE_SQFS_INODE_SOCKET | SQFS_INODE_TYPE_SQFS_INODE_EXT_SOCKET => Ok(Self::Socket),
- _ => Err(SquashfsError::LibraryReturnError("Unsupported inode type".to_string())),
- }
- }
-
- /// Get a human-readable English name for the type of file represented by this object, intended
- /// primarily for debugging.
- pub fn name(&self) -> String {
- match self {
- Data::File(_) => "regular file",
- Data::Dir(_) => "directory",
- Data::Symlink(_) => "symbolic link",
- Data::BlockDev(_, _) => "block device",
- Data::CharDev(_, _) => "character device",
- Data::Fifo => "named pipe",
- Data::Socket => "socket",
- }.to_string()
- }
+ fn new(node: &'a Node) -> Result<Self> {
+ unsafe fn arr_to_string<'a, T>(
+ arr: &bindings::__IncompleteArrayField<T>,
+ len: usize,
+ ) -> String {
+ let slice = std::slice::from_raw_parts(arr.as_ptr() as *const u8, len);
+ String::from_utf8_lossy(slice).into_owned()
+ }
+ fn get_dev_nums(dev: u32) -> (u32, u32) {
+ ((dev & 0xfff00) >> 8, (dev & 0xff) | ((dev >> 12) & 0xfff00))
+ }
+ match unsafe { (***node.inode).base.type_ } as u32 {
+ SQFS_INODE_TYPE_SQFS_INODE_DIR | SQFS_INODE_TYPE_SQFS_INODE_EXT_DIR => {
+ Ok(Self::Dir(Dir::new(node)?))
+ }
+ SQFS_INODE_TYPE_SQFS_INODE_FILE | SQFS_INODE_TYPE_SQFS_INODE_EXT_FILE => {
+ Ok(Self::File(File::new(node)?))
+ }
+ SQFS_INODE_TYPE_SQFS_INODE_SLINK => Ok(unsafe {
+ let path_str = arr_to_string(
+ &(***node.inode).extra,
+ (***node.inode).data.slink.target_size as usize,
+ );
+ Self::Symlink(PathBuf::from(path_str))
+ }),
+ SQFS_INODE_TYPE_SQFS_INODE_EXT_SLINK => Ok(unsafe {
+ let path_str = arr_to_string(
+ &(***node.inode).extra,
+ (***node.inode).data.slink_ext.target_size as usize,
+ );
+ Self::Symlink(PathBuf::from(path_str))
+ }),
+ SQFS_INODE_TYPE_SQFS_INODE_BDEV => Ok(unsafe {
+ let (maj, min) = get_dev_nums((***node.inode).data.dev.devno);
+ Self::BlockDev(maj, min)
+ }),
+ SQFS_INODE_TYPE_SQFS_INODE_EXT_BDEV => Ok(unsafe {
+ let (maj, min) = get_dev_nums((***node.inode).data.dev_ext.devno);
+ Self::BlockDev(maj, min)
+ }),
+ SQFS_INODE_TYPE_SQFS_INODE_CDEV => Ok(unsafe {
+ let (maj, min) = get_dev_nums((***node.inode).data.dev.devno);
+ Self::CharDev(maj, min)
+ }),
+ SQFS_INODE_TYPE_SQFS_INODE_EXT_CDEV => Ok(unsafe {
+ let (maj, min) = get_dev_nums((***node.inode).data.dev_ext.devno);
+ Self::CharDev(maj, min)
+ }),
+ SQFS_INODE_TYPE_SQFS_INODE_FIFO | SQFS_INODE_TYPE_SQFS_INODE_EXT_FIFO => Ok(Self::Fifo),
+ SQFS_INODE_TYPE_SQFS_INODE_SOCKET | SQFS_INODE_TYPE_SQFS_INODE_EXT_SOCKET => {
+ Ok(Self::Socket)
+ }
+ _ => Err(SquashfsError::LibraryReturnError(
+ "Unsupported inode type".to_string(),
+ )),
+ }
+ }
+
+ /// Get a human-readable English name for the type of file represented by this object, intended
+ /// primarily for debugging.
+ pub fn name(&self) -> String {
+ match self {
+ Data::File(_) => "regular file",
+ Data::Dir(_) => "directory",
+ Data::Symlink(_) => "symbolic link",
+ Data::BlockDev(_, _) => "block device",
+ Data::CharDev(_, _) => "character device",
+ Data::Fifo => "named pipe",
+ Data::Socket => "socket",
+ }
+ .to_string()
+ }
}
/// Represents the namespace of extended attributes.
#[repr(u32)]
#[derive(Clone, Copy)]
pub enum XattrType {
- User = SQFS_XATTR_TYPE_SQFS_XATTR_USER,
- Trusted = SQFS_XATTR_TYPE_SQFS_XATTR_TRUSTED,
- Security = SQFS_XATTR_TYPE_SQFS_XATTR_SECURITY,
+ User = SQFS_XATTR_TYPE_SQFS_XATTR_USER,
+ Trusted = SQFS_XATTR_TYPE_SQFS_XATTR_TRUSTED,
+ Security = SQFS_XATTR_TYPE_SQFS_XATTR_SECURITY,
}
/// An object packaging a [`File`] with the [`Node`] from which it was constructed.
@@ -411,33 +537,33 @@ pub enum XattrType {
///
/// Create an `OwnedFile` using [`Node::into_owned_file`].
pub struct OwnedFile<'a> {
- handle: OwningHandle<Box<Node<'a>>, Box<File<'a>>>,
+ handle: OwningHandle<Box<Node<'a>>, Box<File<'a>>>,
}
impl<'a> Read for OwnedFile<'a> {
- fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
- (*self.handle).read(buf)
- }
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ (*self.handle).read(buf)
+ }
}
impl<'a> Seek for OwnedFile<'a> {
- fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
- (*self.handle).seek(pos)
- }
+ fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
+ (*self.handle).seek(pos)
+ }
}
impl<'a> std::ops::Deref for OwnedFile<'a> {
- type Target = File<'a>;
+ type Target = File<'a>;
- fn deref(&self) -> &Self::Target {
- self.handle.deref()
- }
+ fn deref(&self) -> &Self::Target {
+ self.handle.deref()
+ }
}
impl<'a> std::ops::DerefMut for OwnedFile<'a> {
- fn deref_mut(&mut self) -> &mut Self::Target {
- self.handle.deref_mut()
- }
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.handle.deref_mut()
+ }
}
/// An object packaging a [`Dir`] with the [`Node`] from which it was constructed.
@@ -450,29 +576,29 @@ impl<'a> std::ops::DerefMut for OwnedFile<'a> {
/// like `Dir`. It also implements [`Deref`](std::ops::Deref) and [`DerefMut`](std::ops::DerefMut)
/// to allow access to the internal `Dir`.
pub struct OwnedDir<'a> {
- handle: OwningHandle<Box<Node<'a>>, Box<Dir<'a>>>,
+ handle: OwningHandle<Box<Node<'a>>, Box<Dir<'a>>>,
}
impl<'a> std::iter::Iterator for OwnedDir<'a> {
- type Item = Result<Node<'a>>;
+ type Item = Result<Node<'a>>;
- fn next(&mut self) -> Option<Self::Item> {
- (*self.handle).next()
- }
+ fn next(&mut self) -> Option<Self::Item> {
+ (*self.handle).next()
+ }
}
impl<'a> std::ops::Deref for OwnedDir<'a> {
- type Target = Dir<'a>;
+ type Target = Dir<'a>;
- fn deref(&self) -> &Self::Target {
- self.handle.deref()
- }
+ fn deref(&self) -> &Self::Target {
+ self.handle.deref()
+ }
}
impl<'a> std::ops::DerefMut for OwnedDir<'a> {
- fn deref_mut(&mut self) -> &mut Self::Target {
- self.handle.deref_mut()
- }
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.handle.deref_mut()
+ }
}
/// Information about a single node in the directory tree.
@@ -487,393 +613,556 @@ impl<'a> std::ops::DerefMut for OwnedDir<'a> {
/// [`path`](Self::path) and [`parent`](Self::parent), will fail. For this reason, it is generally
/// recommended to get nodes by path when possible.
pub struct Node<'a> {
- container: &'a Archive,
- path: Option<PathBuf>,
- inode: Arc<ManagedPointer<sqfs_inode_generic_t>>,
+ container: &'a Archive,
+ path: Option<PathBuf>,
+ inode: Arc<ManagedPointer<sqfs_inode_generic_t>>,
}
impl<'a> Node<'a> {
- fn new(container: &'a Archive, inode: ManagedPointer<sqfs_inode_generic_t>, path: Option<PathBuf>) -> Result<Self> {
- Ok(Self { container: container, path: path, inode: Arc::new(inode) })
- }
-
- /// Get a node's extended attributes in a given namespace as a map of byte Vecs.
- pub fn xattrs(&self, category: XattrType) -> Result<HashMap<Vec<u8>, Vec<u8>>> {
- if self.container.superblock.flags & SQFS_SUPER_FLAGS_SQFS_FLAG_NO_XATTRS as u16 != 0 { Ok(HashMap::new()) }
- // TODO The following line reflects what I think is a bug. I have a non-xattr archive
- // created by mksquashfs, which does not have the above flag set but has the below table
- // offset of -1. This workaround allows us to check both cases until I get around to
- // figuring out what's going on.
- else if self.container.superblock.xattr_id_table_start == 0xffffffffffffffff { Ok(HashMap::new()) }
- else {
- let compressor = self.container.compressor()?;
- let xattr_reader = sfs_init_check_null(&|| unsafe {
- sqfs_xattr_reader_create(0)
- }, "Coudn't create xattr reader", sfs_destroy)?;
- unsafe { sfs_check(sqfs_xattr_reader_load(*xattr_reader, &self.container.superblock, *self.container.file, *compressor), "Couldn't load xattr reader")?; }
- let mut xattr_idx: u32 = NO_XATTRS;
- unsafe { sfs_check(sqfs_inode_get_xattr_index(self.inode.as_const(), &mut xattr_idx), "Couldn't get xattr index")?; }
- let desc = sfs_init(&|x| unsafe {
- sqfs_xattr_reader_get_desc(*xattr_reader, xattr_idx, x)
- }, "Couldn't get xattr descriptor")?;
- let mut ret: HashMap<Vec<u8>, Vec<u8>> = HashMap::new();
- unsafe { sfs_check(sqfs_xattr_reader_seek_kv(*xattr_reader, &desc), "Couldn't seek to xattr location")? };
- for _ in 0..desc.count {
- let prefixlen = unsafe { CStr::from_ptr(sqfs_get_xattr_prefix(category as u32)).to_bytes().len() };
- let key = sfs_init_ptr(&|x| unsafe {
- sqfs_xattr_reader_read_key(*xattr_reader, x)
- }, "Couldn't read xattr key", libc_free)?;
- let val = sfs_init_ptr(&|x| unsafe {
- sqfs_xattr_reader_read_value(*xattr_reader, *key, x)
- }, "Couldn't read xattr value", libc_free)?;
- if unsafe { (**key).type_ } as u32 & SQFS_XATTR_TYPE_SQFS_XATTR_PREFIX_MASK == category as u32 {
- unsafe {
- let keyvec = (**key).key.as_slice((**key).size as usize + prefixlen)[prefixlen..].to_vec();
- let valvec = (**val).value.as_slice((**val).size as usize).to_vec();
- ret.insert(keyvec, valvec);
- }
- }
- }
- Ok(ret)
- }
- }
-
- /// Get the inode number of a node.
- ///
- /// This can be used to cheaply compare nodes for equality or can be later used with
- /// [`get_id`](Archive::get_id) to retrieve nodes without traversing the directory tree.
- pub fn id(&self) -> u32 {
- unsafe { (***self.inode).base.inode_number }
- }
-
- /// Retrieve the data stored at the node.
- pub fn data(&self) -> Result<Data> {
- Data::new(&self)
- }
-
- /// Get the absolute path to the node in the archive.
- ///
- /// If the node was obtained in a way that did not provide path information, this will return
- /// `None`. If the node was retrieved using [`Archive::get`], this should return `Some`.
- pub fn path(&self) -> Option<&Path> {
- self.path.as_ref().map(|path| path.as_path())
- }
-
- fn path_string(&self) -> String {
- match &self.path {
- Some(path) => path.display().to_string(), //os_to_string(path.as_os_str()),
- None => "<unknown>".to_string(),
- }
- }
-
- /// A convenience method to retrieve the file name of the node from its path.
- ///
- /// As with [`path`](Self::path), if the node does not have embedded path information, this
- /// will return `None`.
- pub fn name(&self) -> Option<String> {
- self.path.as_ref().map(|path| path.file_name().map(|x| x.to_string_lossy().to_string()).unwrap_or("/".to_string()))
- }
-
- /// Get the parent directory node of the current node.
- ///
- /// If the node is the root of the tree, it will return a copy of itself. If this node was
- /// created without path information, it will raise a [`NoPath`](SquashfsError::NoPath) error.
- pub fn parent(&self) -> Result<Self> {
- self.path.as_ref().map(|path| {
- let ppath = path.parent().unwrap_or(&Path::new(""));
- self.container.get_exists(&os_to_string(ppath.as_os_str())?)
- }).ok_or(SquashfsError::NoPath)?
- }
-
- /// Resolve symbolic links to their targets, raising an error if a target does not exist.
- ///
- /// This works the same way as [`resolve`](Self::resolve), except that an error is raised if
- /// any link in the chain of symbolic links points at a path that does not exist.
- pub fn resolve_exists(&self) -> Result<Self> {
- let mut visited = HashSet::new();
- let mut cur = Box::new(self.clone());
- let mut i = 0;
- loop {
- match cur.data()? {
- Data::Symlink(targetstr) => {
- let rawtarget = PathBuf::from(targetstr);
- let target = match cur.path {
- Some(path) => path.parent().unwrap_or(&Path::new("")).join(rawtarget),
- None => match rawtarget.is_absolute() {
- true => rawtarget,
- false => Err(SquashfsError::NoPath)?,
- }
- };
- if !visited.insert(target.clone()) {
- return Err(SquashfsError::LinkLoop(target));
- }
- cur = Box::new(cur.container.get_exists(&target)?);
- }
- _ => return Ok(*cur),
- }
- i += 1;
- if i > LINK_MAX { Err(SquashfsError::LinkChain(LINK_MAX))?; }
- }
- }
-
- /// Resolve symbolic links to their targets.
- ///
- /// This follows the chain of symbolic links starting at the current node all the way to the
- /// end, returning the final node, which is guaranteed not to be a symbolic link. If any link
- /// in the chain points at a path that does not exist, it returns `Ok(None)`. If the current
- /// node is not a sybmolic link, this returns a copy of itself.
- pub fn resolve(&self) -> Result<Option<Self>> {
- enoent_ok(self.resolve_exists())
- }
-
- /// Return true if the current `Node` is a file.
- ///
- /// This does *not* resolve symbolic links, and will return `false` when called on nodes that
- /// are symbolic links to files.
- pub fn is_file(&self) -> Result<bool> {
- match self.data()? {
- Data::File(_) => Ok(true),
- _ => Ok(false),
- }
- }
-
- /// Fetch the [`File`] object from the current `Node`.
- ///
- /// This is essentially a shortcut for `if let Data::File(file) = self.data()`. If this node
- /// is not a regular file, this will return an error. This does *not* resolve symbolic links;
- /// the caller should call [`resolve`](Self::resolve) first if the node could be a link.
- pub fn as_file(&self) -> Result<File> {
- match self.data()? {
- Data::File(f) => Ok(f),
- other => Err(SquashfsError::WrongType(self.path_string(), other.name(), "regular file".to_string())),
- }
- }
-
- /// Convert the `Node` into an [`OwnedFile`].
- ///
- /// This resolves symbolic links. If the current node is not a regular file or a link to one,
- /// it will return an error.
- ///
- /// let archive = Archive::new("archive.sfs")?;
- /// let mut buf = String::new();
- /// archive.get("/file.txt")?.unwrap().into_owned_file()?.read_to_string(&mut buf)?;
- pub fn into_owned_file(self) -> Result<OwnedFile<'a>> {
- let resolved = self.resolve_exists()?;
- Ok(OwnedFile { handle: OwningHandle::try_new(Box::new(resolved), |x| unsafe { (*x).as_file().map(|x| Box::new(x)) })? })
- }
-
- /// Return true if the current `Node` is a directory.
- pub fn is_dir(&self) -> Result<bool> {
- match self.data()? {
- Data::Dir(_) => Ok(true),
- _ => Ok(false),
- }
- }
-
- /// Fetch the [`Dir`] object from the current `Node`.
- ///
- /// This is essentially a shortcut for `if let Data::Dir(dir) = self.data()`. If this node is
- /// not a directory, it will return an error. This does *not* resolve symbolic links; the
- /// caller should call [`resolve`](Self::resolve) first if the node could be a link.
- pub fn as_dir(&self) -> Result<Dir> {
- match self.data()? {
- Data::Dir(d) => Ok(d),
- other => Err(SquashfsError::WrongType(self.path_string(), other.name(), "directory".to_string())),
- }
- }
-
- /// Convert the `Node` into an [`OwnedDir`].
- ///
- /// This resolves symbolic links. If the current node is not a directory or a link to one, it
- /// will return an error.
- ///
- /// let archive = Archive::new("archive.sfs")?;
- /// for child in archive.get("/dir")?.unwrap().into_owned_dir()? {
- /// println!("{}", child?.name());
- /// }
- pub fn into_owned_dir(self) -> Result<OwnedDir<'a>> {
- let resolved = self.resolve_exists()?;
- Ok(OwnedDir { handle: OwningHandle::try_new(Box::new(resolved), |x| unsafe { (*x).as_dir().map(|x| Box::new(x)) })? })
- }
-
- /// Get the UID of the `Node`.
- pub fn uid(&self) -> Result<u32> {
- let idx = unsafe { (***self.inode).base.uid_idx };
- self.container.id_lookup(idx)
- }
-
- /// Get the GID of the `Node`.
- pub fn gid(&self) -> Result<u32> {
- let idx = unsafe { (***self.inode).base.gid_idx };
- self.container.id_lookup(idx)
- }
-
- /// Get the file mode of the `Node`.
- pub fn mode(&self) -> u16 {
- unsafe { (***self.inode).base.mode }
- }
-
- /// Get the modification time of the `Node` as a UNIX timestamp.
- pub fn mtime(&self) -> u32 {
- unsafe { (***self.inode).base.mod_time }
- }
+ fn new(
+ container: &'a Archive,
+ inode: ManagedPointer<sqfs_inode_generic_t>,
+ path: Option<PathBuf>,
+ ) -> Result<Self> {
+ Ok(Self {
+ container: container,
+ path: path,
+ inode: Arc::new(inode),
+ })
+ }
+
+ /// Get a node's extended attributes in a given namespace as a map of byte Vecs.
+ pub fn xattrs(&self, category: XattrType) -> Result<HashMap<Vec<u8>, Vec<u8>>> {
+ if self.container.superblock.flags & SQFS_SUPER_FLAGS_SQFS_FLAG_NO_XATTRS as u16 != 0 {
+ Ok(HashMap::new())
+ }
+ // TODO The following line reflects what I think is a bug. I have a non-xattr archive
+ // created by mksquashfs, which does not have the above flag set but has the below table
+ // offset of -1. This workaround allows us to check both cases until I get around to
+ // figuring out what's going on.
+ else if self.container.superblock.xattr_id_table_start == 0xffffffffffffffff {
+ Ok(HashMap::new())
+ } else {
+ let compressor = self.container.compressor()?;
+ let xattr_reader = sfs_init_check_null(
+ &|| unsafe { sqfs_xattr_reader_create(0) },
+ "Coudn't create xattr reader",
+ sfs_destroy,
+ )?;
+ unsafe {
+ sfs_check(
+ sqfs_xattr_reader_load(
+ *xattr_reader,
+ &self.container.superblock,
+ *self.container.file,
+ *compressor,
+ ),
+ "Couldn't load xattr reader",
+ )?;
+ }
+ let mut xattr_idx: u32 = NO_XATTRS;
+ unsafe {
+ sfs_check(
+ sqfs_inode_get_xattr_index(self.inode.as_const(), &mut xattr_idx),
+ "Couldn't get xattr index",
+ )?;
+ }
+ let desc = sfs_init(
+ &|x| unsafe { sqfs_xattr_reader_get_desc(*xattr_reader, xattr_idx, x) },
+ "Couldn't get xattr descriptor",
+ )?;
+ let mut ret: HashMap<Vec<u8>, Vec<u8>> = HashMap::new();
+ unsafe {
+ sfs_check(
+ sqfs_xattr_reader_seek_kv(*xattr_reader, &desc),
+ "Couldn't seek to xattr location",
+ )?
+ };
+ for _ in 0..desc.count {
+ let prefixlen = unsafe {
+ CStr::from_ptr(sqfs_get_xattr_prefix(category as u32))
+ .to_bytes()
+ .len()
+ };
+ let key = sfs_init_ptr(
+ &|x| unsafe { sqfs_xattr_reader_read_key(*xattr_reader, x) },
+ "Couldn't read xattr key",
+ libc_free,
+ )?;
+ let val = sfs_init_ptr(
+ &|x| unsafe { sqfs_xattr_reader_read_value(*xattr_reader, *key, x) },
+ "Couldn't read xattr value",
+ libc_free,
+ )?;
+ if unsafe { (**key).type_ } as u32 & SQFS_XATTR_TYPE_SQFS_XATTR_PREFIX_MASK
+ == category as u32
+ {
+ unsafe {
+ let keyvec = (**key).key.as_slice((**key).size as usize + prefixlen)
+ [prefixlen..]
+ .to_vec();
+ let valvec = (**val).value.as_slice((**val).size as usize).to_vec();
+ ret.insert(keyvec, valvec);
+ }
+ }
+ }
+ Ok(ret)
+ }
+ }
+
+ /// Get the inode number of a node.
+ ///
+ /// This can be used to cheaply compare nodes for equality or can be later used with
+ /// [`get_id`](Archive::get_id) to retrieve nodes without traversing the directory tree.
+ pub fn id(&self) -> u32 {
+ unsafe { (***self.inode).base.inode_number }
+ }
+
+ /// Retrieve the data stored at the node.
+ pub fn data(&self) -> Result<Data> {
+ Data::new(&self)
+ }
+
+ /// Get the absolute path to the node in the archive.
+ ///
+ /// If the node was obtained in a way that did not provide path information, this will return
+ /// `None`. If the node was retrieved using [`Archive::get`], this should return `Some`.
+ pub fn path(&self) -> Option<&Path> {
+ self.path.as_ref().map(|path| path.as_path())
+ }
+
+ fn path_string(&self) -> String {
+ match &self.path {
+ Some(path) => path.display().to_string(), //os_to_string(path.as_os_str()),
+ None => "<unknown>".to_string(),
+ }
+ }
+
+ /// A convenience method to retrieve the file name of the node from its path.
+ ///
+ /// As with [`path`](Self::path), if the node does not have embedded path information, this
+ /// will return `None`.
+ pub fn name(&self) -> Option<String> {
+ self.path.as_ref().map(|path| {
+ path.file_name()
+ .map(|x| x.to_string_lossy().to_string())
+ .unwrap_or("/".to_string())
+ })
+ }
+
+ /// Get the parent directory node of the current node.
+ ///
+ /// If the node is the root of the tree, it will return a copy of itself. If this node was
+ /// created without path information, it will raise a [`NoPath`](SquashfsError::NoPath) error.
+ pub fn parent(&self) -> Result<Self> {
+ self.path
+ .as_ref()
+ .map(|path| {
+ let ppath = path.parent().unwrap_or(&Path::new(""));
+ self.container.get_exists(&os_to_string(ppath.as_os_str())?)
+ })
+ .ok_or(SquashfsError::NoPath)?
+ }
+
+ /// Resolve symbolic links to their targets, raising an error if a target does not exist.
+ ///
+ /// This works the same way as [`resolve`](Self::resolve), except that an error is raised if
+ /// any link in the chain of symbolic links points at a path that does not exist.
+ pub fn resolve_exists(&self) -> Result<Self> {
+ let mut visited = HashSet::new();
+ let mut cur = Box::new(self.clone());
+ let mut i = 0;
+ loop {
+ match cur.data()? {
+ Data::Symlink(targetstr) => {
+ let rawtarget = PathBuf::from(targetstr);
+ let target = match cur.path {
+ Some(path) => path.parent().unwrap_or(&Path::new("")).join(rawtarget),
+ None => match rawtarget.is_absolute() {
+ true => rawtarget,
+ false => Err(SquashfsError::NoPath)?,
+ },
+ };
+ if !visited.insert(target.clone()) {
+ return Err(SquashfsError::LinkLoop(target));
+ }
+ cur = Box::new(cur.container.get_exists(&target)?);
+ }
+ _ => return Ok(*cur),
+ }
+ i += 1;
+ if i > LINK_MAX {
+ Err(SquashfsError::LinkChain(LINK_MAX))?;
+ }
+ }
+ }
+
+ /// Resolve symbolic links to their targets.
+ ///
+ /// This follows the chain of symbolic links starting at the current node all the way to the
+ /// end, returning the final node, which is guaranteed not to be a symbolic link. If any link
+ /// in the chain points at a path that does not exist, it returns `Ok(None)`. If the current
+ /// node is not a sybmolic link, this returns a copy of itself.
+ pub fn resolve(&self) -> Result<Option<Self>> {
+ enoent_ok(self.resolve_exists())
+ }
+
+ /// Return true if the current `Node` is a file.
+ ///
+ /// This does *not* resolve symbolic links, and will return `false` when called on nodes that
+ /// are symbolic links to files.
+ pub fn is_file(&self) -> Result<bool> {
+ match self.data()? {
+ Data::File(_) => Ok(true),
+ _ => Ok(false),
+ }
+ }
+
+ /// Fetch the [`File`] object from the current `Node`.
+ ///
+ /// This is essentially a shortcut for `if let Data::File(file) = self.data()`. If this node
+ /// is not a regular file, this will return an error. This does *not* resolve symbolic links;
+ /// the caller should call [`resolve`](Self::resolve) first if the node could be a link.
+ pub fn as_file(&self) -> Result<File> {
+ match self.data()? {
+ Data::File(f) => Ok(f),
+ other => Err(SquashfsError::WrongType(
+ self.path_string(),
+ other.name(),
+ "regular file".to_string(),
+ )),
+ }
+ }
+
+ /// Convert the `Node` into an [`OwnedFile`].
+ ///
+ /// This resolves symbolic links. If the current node is not a regular file or a link to one,
+ /// it will return an error.
+ ///
+ /// let archive = Archive::new("archive.sfs")?;
+ /// let mut buf = String::new();
+ /// archive.get("/file.txt")?.unwrap().into_owned_file()?.read_to_string(&mut buf)?;
+ pub fn into_owned_file(self) -> Result<OwnedFile<'a>> {
+ let resolved = self.resolve_exists()?;
+ Ok(OwnedFile {
+ handle: OwningHandle::try_new(Box::new(resolved), |x| unsafe {
+ (*x).as_file().map(|x| Box::new(x))
+ })?,
+ })
+ }
+
+ /// Return true if the current `Node` is a directory.
+ pub fn is_dir(&self) -> Result<bool> {
+ match self.data()? {
+ Data::Dir(_) => Ok(true),
+ _ => Ok(false),
+ }
+ }
+
+ /// Fetch the [`Dir`] object from the current `Node`.
+ ///
+ /// This is essentially a shortcut for `if let Data::Dir(dir) = self.data()`. If this node is
+ /// not a directory, it will return an error. This does *not* resolve symbolic links; the
+ /// caller should call [`resolve`](Self::resolve) first if the node could be a link.
+ pub fn as_dir(&self) -> Result<Dir> {
+ match self.data()? {
+ Data::Dir(d) => Ok(d),
+ other => Err(SquashfsError::WrongType(
+ self.path_string(),
+ other.name(),
+ "directory".to_string(),
+ )),
+ }
+ }
+
+ /// Convert the `Node` into an [`OwnedDir`].
+ ///
+ /// This resolves symbolic links. If the current node is not a directory or a link to one, it
+ /// will return an error.
+ ///
+ /// let archive = Archive::new("archive.sfs")?;
+ /// for child in archive.get("/dir")?.unwrap().into_owned_dir()? {
+ /// println!("{}", child?.name());
+ /// }
+ pub fn into_owned_dir(self) -> Result<OwnedDir<'a>> {
+ let resolved = self.resolve_exists()?;
+ Ok(OwnedDir {
+ handle: OwningHandle::try_new(Box::new(resolved), |x| unsafe {
+ (*x).as_dir().map(|x| Box::new(x))
+ })?,
+ })
+ }
+
+ /// Get the UID of the `Node`.
+ pub fn uid(&self) -> Result<u32> {
+ let idx = unsafe { (***self.inode).base.uid_idx };
+ self.container.id_lookup(idx)
+ }
+
+ /// Get the GID of the `Node`.
+ pub fn gid(&self) -> Result<u32> {
+ let idx = unsafe { (***self.inode).base.gid_idx };
+ self.container.id_lookup(idx)
+ }
+
+ /// Get the file mode of the `Node`.
+ pub fn mode(&self) -> u16 {
+ unsafe { (***self.inode).base.mode }
+ }
+
+ /// Get the modification time of the `Node` as a UNIX timestamp.
+ pub fn mtime(&self) -> u32 {
+ unsafe { (***self.inode).base.mod_time }
+ }
}
impl<'a> std::clone::Clone for Node<'a> {
- fn clone(&self) -> Self {
- Self { container: self.container, path: self.path.clone(), inode: self.inode.clone() }
- }
-
+ fn clone(&self) -> Self {
+ Self {
+ container: self.container,
+ path: self.path.clone(),
+ inode: self.inode.clone(),
+ }
+ }
}
impl<'a> std::fmt::Display for Node<'a> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "{} at {}", self.data().map(|x| x.name()).unwrap_or("inaccessible file".to_string()), self.path_string())
- }
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(
+ f,
+ "{} at {}",
+ self.data()
+ .map(|x| x.name())
+ .unwrap_or("inaccessible file".to_string()),
+ self.path_string()
+ )
+ }
}
impl<'a> std::fmt::Debug for Node<'a> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "Node({:?})", self.path)
- }
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "Node({:?})", self.path)
+ }
}
/// An open SquashFS archive.
pub struct Archive {
- path: PathBuf,
- file: ManagedPointer<sqfs_file_t>,
- superblock: sqfs_super_t,
- compressor_config: sqfs_compressor_config_t,
- mmap: (std::fs::File, Mmap),
- data_readers: Mutex<Vec<DataReader>>,
+ path: PathBuf,
+ file: ManagedPointer<sqfs_file_t>,
+ superblock: sqfs_super_t,
+ compressor_config: sqfs_compressor_config_t,
+ mmap: (std::fs::File, Mmap),
+ data_readers: Mutex<Vec<DataReader>>,
}
impl Archive {
- /// Open an existing archive for reading.
- pub fn new<T: AsRef<Path>>(path: T) -> Result<Self> {
- let cpath = CString::new(os_to_string(path.as_ref().as_os_str())?)?;
- let file = sfs_init_check_null(&|| unsafe {
- sqfs_open_file(cpath.as_ptr(), SQFS_FILE_OPEN_FLAGS_SQFS_FILE_OPEN_READ_ONLY)
- }, &format!("Couldn't open input file {}", path.as_ref().display()), sfs_destroy)?;
- let superblock = sfs_init(&|x| unsafe {
- sqfs_super_read(x, *file)
- }, "Couldn't read archive superblock")?;
- let compressor_config = sfs_init(&|x| unsafe {
- sqfs_compressor_config_init(x, superblock.compression_id as u32, superblock.block_size as u64, SQFS_COMP_FLAG_SQFS_COMP_FLAG_UNCOMPRESS as u16)
- }, "Couldn't read archive compressor config")?;
- let os_file = std::fs::File::open(&path)?;
- let map = unsafe { MmapOptions::new().map(&os_file).map_err(|e| SquashfsError::Mmap(e))? };
- //let map = MemoryMap::new(superblock.bytes_used as usize, &vec![MapOption::MapReadable, MapOption::MapFd(os_file.as_raw_fd())])?;
- Ok(Self { path: path.as_ref().to_path_buf(), file: file, superblock: superblock, compressor_config: compressor_config, mmap: (os_file, map), data_readers: Mutex::new(vec![]) })
- }
-
- fn compressor(&self) -> Result<ManagedPointer<sqfs_compressor_t>> {
- Ok(sfs_init_ptr(&|x| unsafe {
- sqfs_compressor_create(&self.compressor_config, x)
- }, "Couldn't create compressor", sfs_destroy)?)
- }
-
- fn meta_reader(&self, compressor: &ManagedPointer<sqfs_compressor_t>, bounds: Option<(u64, u64)>) -> Result<ManagedPointer<sqfs_meta_reader_t>> {
- let range = bounds.unwrap_or((0, self.superblock.bytes_used));
- Ok(sfs_init_check_null(&|| unsafe {
- sqfs_meta_reader_create(*self.file, **compressor, range.0, range.1)
- }, "Couldn't create metadata reader", sfs_destroy)?)
- }
-
- fn data_reader(&self) -> Result<Leased<DataReader>> {
- let mut locked_readers = self.data_readers.lock().expect(LOCK_ERR);
- let ret = match locked_readers.pop() {
- Some(reader) => reader,
- None => { DataReader::new(&self)? },
- };
- Ok(Leased::new(&self.data_readers, ret))
- }
-
- fn id_lookup(&self, idx: u16) -> Result<u32> {
- let id_table = sfs_init_check_null(&|| unsafe {
- sqfs_id_table_create(0)
- }, "Couldn't create ID table", sfs_destroy)?;
- let compressor = self.compressor()?;
- unsafe { sfs_check(sqfs_id_table_read(*id_table, *self.file, &self.superblock, *compressor), "Couldn't read ID table")?; }
- Ok(sfs_init(&|x| unsafe {
- sqfs_id_table_index_to_id(*id_table, idx, x)
- }, "Couldn't get ID from ID table")?)
- }
-
- /// Retrieve the path with that was used to open the archive.
- pub fn path(&self) -> &Path {
- &self.path
- }
-
- /// Get the number of inodes in the archive.
- pub fn size(&self) -> u32 {
- self.superblock.inode_count
- }
-
- /// Get the [`Node`] located at the given path, raising an error if it does not exist.
- pub fn get_exists<T: AsRef<Path>>(&self, path: T) -> Result<Node> {
- let compressor = self.compressor()?;
- let dir_reader = sfs_init_check_null(&|| unsafe {
- sqfs_dir_reader_create(&self.superblock, *compressor, *self.file, 0)
- }, "Couldn't create directory reader", sfs_destroy)?;
- let root = sfs_init_ptr(&|x| unsafe {
- sqfs_dir_reader_get_root_inode(*dir_reader, x)
- }, "Couldn't get filesystem root", libc_free)?;
- let pathbuf = dumb_canonicalize(path.as_ref());
- if &pathbuf == Path::new("/") {
- Node::new(&self, root, Some(pathbuf))
- }
- else {
- let cpath = CString::new(os_to_string(pathbuf.as_os_str())?)?;
- let inode = sfs_init_ptr(&|x| unsafe {
- sqfs_dir_reader_find_by_path(*dir_reader, *root, cpath.as_ptr(), x)
- }, &format!("Unable to access path {}", path.as_ref().display()), libc_free)?;
- Node::new(&self, inode, Some(pathbuf))
- }
- }
-
- /// Get the [`Node`] located at the given path in the archive.
- ///
- /// If the path is not present, `Ok(None)` will be returned.
- pub fn get<T: AsRef<Path>>(&self, path: T) -> Result<Option<Node>> {
- enoent_ok(self.get_exists(path))
- }
-
- /// Get a node from the archive by its inode number.
- ///
- /// Each inode in an archive has a unique ID. If the archive was created with the "exportable"
- /// option (intended for exporting over NFS), it is efficient to look up inodes by their IDs.
- /// If this archive is not exportable, [`SquashfsError::Unsupported`] will be raised. A `Node`
- /// obtained in this way will lack path information, and as such operations like getting its
- /// file name or parent will fail.
- pub fn get_id(&self, id: u64) -> Result<Node> {
- if self.superblock.flags & SQFS_SUPER_FLAGS_SQFS_FLAG_EXPORTABLE as u16 == 0 { Err(SquashfsError::Unsupported("inode indexing".to_string()))?; }
- if id <= 0 || id > self.superblock.inode_count as u64 { Err(SquashfsError::Range(id, self.superblock.inode_count as u64))? }
- let compressor = self.compressor()?;
- let export_reader = self.meta_reader(&compressor, None)?; // Would be nice if we could set bounds for this
- let (block, offset) = ((id - 1) / 1024, (id - 1) % 1024 * 8);
- let block_start: u64 = sfs_init(&|x| unsafe {
- let read_at = (**self.file).read_at.expect("File object does not implement read_at");
- read_at(*self.file, self.superblock.export_table_start + block * 8, x as *mut libc::c_void, 8)
- }, "Couldn't read inode table")?;
-
- let mut noderef: u64 = 0;
- unsafe {
- sfs_check(sqfs_meta_reader_seek(*export_reader, block_start, offset), "Couldn't seek to inode reference")?;
- sfs_check(sqfs_meta_reader_read(*export_reader, &mut noderef as *mut u64 as *mut libc::c_void, 8), "Couldn't read inode reference")?;
- }
- let (block, offset) = unpack_meta_ref(noderef);
- println!("Node {} at block {}, offset {}", id, block, offset);
- let inode = sfs_init_ptr(&|x| unsafe {
- sqfs_meta_reader_read_inode(*export_reader, &self.superblock, block, offset, x)
- }, "Couldn't read inode", libc_free)?;
- Node::new(&self, inode, None)
- }
-
- fn map_range(&self, start: usize, len: usize) -> &[u8] {
- &(self.mmap.1)[start..start + len]
- }
-}
-
-unsafe impl Send for Archive { }
-unsafe impl Sync for Archive { }
+ /// Open an existing archive for reading.
+ pub fn new<T: AsRef<Path>>(path: T) -> Result<Self> {
+ let cpath = CString::new(os_to_string(path.as_ref().as_os_str())?)?;
+ let file = sfs_init_check_null(
+ &|| unsafe {
+ sqfs_open_file(
+ cpath.as_ptr(),
+ SQFS_FILE_OPEN_FLAGS_SQFS_FILE_OPEN_READ_ONLY,
+ )
+ },
+ &format!("Couldn't open input file {}", path.as_ref().display()),
+ sfs_destroy,
+ )?;
+ let superblock = sfs_init(
+ &|x| unsafe { sqfs_super_read(x, *file) },
+ "Couldn't read archive superblock",
+ )?;
+ let compressor_config = sfs_init(
+ &|x| unsafe {
+ sqfs_compressor_config_init(
+ x,
+ superblock.compression_id as u32,
+ superblock.block_size as usize,
+ SQFS_COMP_FLAG_SQFS_COMP_FLAG_UNCOMPRESS as u16,
+ )
+ },
+ "Couldn't read archive compressor config",
+ )?;
+ let os_file = std::fs::File::open(&path)?;
+ let map = unsafe {
+ MmapOptions::new()
+ .map(&os_file)
+ .map_err(|e| SquashfsError::Mmap(e))?
+ };
+ //let map = MemoryMap::new(superblock.bytes_used as usize, &vec![MapOption::MapReadable, MapOption::MapFd(os_file.as_raw_fd())])?;
+ Ok(Self {
+ path: path.as_ref().to_path_buf(),
+ file: file,
+ superblock: superblock,
+ compressor_config: compressor_config,
+ mmap: (os_file, map),
+ data_readers: Mutex::new(vec![]),
+ })
+ }
+
+ fn compressor(&self) -> Result<ManagedPointer<sqfs_compressor_t>> {
+ Ok(sfs_init_ptr(
+ &|x| unsafe { sqfs_compressor_create(&self.compressor_config, x) },
+ "Couldn't create compressor",
+ sfs_destroy,
+ )?)
+ }
+
+ fn meta_reader(
+ &self,
+ compressor: &ManagedPointer<sqfs_compressor_t>,
+ bounds: Option<(u64, u64)>,
+ ) -> Result<ManagedPointer<sqfs_meta_reader_t>> {
+ let range = bounds.unwrap_or((0, self.superblock.bytes_used));
+ Ok(sfs_init_check_null(
+ &|| unsafe { sqfs_meta_reader_create(*self.file, **compressor, range.0, range.1) },
+ "Couldn't create metadata reader",
+ sfs_destroy,
+ )?)
+ }
+
+ fn data_reader(&self) -> Result<Leased<DataReader>> {
+ let mut locked_readers = self.data_readers.lock().expect(LOCK_ERR);
+ let ret = match locked_readers.pop() {
+ Some(reader) => reader,
+ None => DataReader::new(&self)?,
+ };
+ Ok(Leased::new(&self.data_readers, ret))
+ }
+
+ fn id_lookup(&self, idx: u16) -> Result<u32> {
+ let id_table = sfs_init_check_null(
+ &|| unsafe { sqfs_id_table_create(0) },
+ "Couldn't create ID table",
+ sfs_destroy,
+ )?;
+ let compressor = self.compressor()?;
+ unsafe {
+ sfs_check(
+ sqfs_id_table_read(*id_table, *self.file, &self.superblock, *compressor),
+ "Couldn't read ID table",
+ )?;
+ }
+ Ok(sfs_init(
+ &|x| unsafe { sqfs_id_table_index_to_id(*id_table, idx, x) },
+ "Couldn't get ID from ID table",
+ )?)
+ }
+
+ /// Retrieve the path with that was used to open the archive.
+ pub fn path(&self) -> &Path {
+ &self.path
+ }
+
+ /// Get the number of inodes in the archive.
+ pub fn size(&self) -> u32 {
+ self.superblock.inode_count
+ }
+
+ /// Get the [`Node`] located at the given path, raising an error if it does not exist.
+ pub fn get_exists<T: AsRef<Path>>(&self, path: T) -> Result<Node> {
+ let compressor = self.compressor()?;
+ let dir_reader = sfs_init_check_null(
+ &|| unsafe { sqfs_dir_reader_create(&self.superblock, *compressor, *self.file, 0) },
+ "Couldn't create directory reader",
+ sfs_destroy,
+ )?;
+ let root = sfs_init_ptr(
+ &|x| unsafe { sqfs_dir_reader_get_root_inode(*dir_reader, x) },
+ "Couldn't get filesystem root",
+ libc_free,
+ )?;
+ let pathbuf = dumb_canonicalize(path.as_ref());
+ if &pathbuf == Path::new("/") {
+ Node::new(&self, root, Some(pathbuf))
+ } else {
+ let cpath = CString::new(os_to_string(pathbuf.as_os_str())?)?;
+ let inode = sfs_init_ptr(
+ &|x| unsafe { sqfs_dir_reader_find_by_path(*dir_reader, *root, cpath.as_ptr(), x) },
+ &format!("Unable to access path {}", path.as_ref().display()),
+ libc_free,
+ )?;
+ Node::new(&self, inode, Some(pathbuf))
+ }
+ }
+
+ /// Get the [`Node`] located at the given path in the archive.
+ ///
+ /// If the path is not present, `Ok(None)` will be returned.
+ pub fn get<T: AsRef<Path>>(&self, path: T) -> Result<Option<Node>> {
+ enoent_ok(self.get_exists(path))
+ }
+
+ /// Get a node from the archive by its inode number.
+ ///
+ /// Each inode in an archive has a unique ID. If the archive was created with the "exportable"
+ /// option (intended for exporting over NFS), it is efficient to look up inodes by their IDs.
+ /// If this archive is not exportable, [`SquashfsError::Unsupported`] will be raised. A `Node`
+ /// obtained in this way will lack path information, and as such operations like getting its
+ /// file name or parent will fail.
+ pub fn get_id(&self, id: u64) -> Result<Node> {
+ if self.superblock.flags & SQFS_SUPER_FLAGS_SQFS_FLAG_EXPORTABLE as u16 == 0 {
+ Err(SquashfsError::Unsupported("inode indexing".to_string()))?;
+ }
+ if id <= 0 || id > self.superblock.inode_count.into() {
+ Err(SquashfsError::Range(id, self.superblock.inode_count as u64))?
+ }
+ let compressor = self.compressor()?;
+ let export_reader = self.meta_reader(&compressor, None)?; // Would be nice if we could set bounds for this
+ let (block, offset) = ((id - 1) / 1024, (id - 1) % 1024 * 8);
+ let block_start: u64 = sfs_init(
+ &|x| unsafe {
+ let read_at = (**self.file)
+ .read_at
+ .expect("File object does not implement read_at");
+ read_at(
+ *self.file,
+ self.superblock.export_table_start + block * 8,
+ x as *mut libc::c_void,
+ 8,
+ )
+ },
+ "Couldn't read inode table",
+ )?;
+
+ let mut noderef: u64 = 0;
+ unsafe {
+ sfs_check(
+ sqfs_meta_reader_seek(*export_reader, block_start, offset as usize),
+ "Couldn't seek to inode reference",
+ )?;
+ sfs_check(
+ sqfs_meta_reader_read(
+ *export_reader,
+ &mut noderef as *mut u64 as *mut libc::c_void,
+ 8,
+ ),
+ "Couldn't read inode reference",
+ )?;
+ }
+ let (block, offset) = unpack_meta_ref(noderef);
+ println!("Node {} at block {}, offset {}", id, block, offset);
+ let inode = sfs_init_ptr(
+ &|x| unsafe {
+ sqfs_meta_reader_read_inode(
+ *export_reader,
+ &self.superblock,
+ block,
+ offset as usize,
+ x,
+ )
+ },
+ "Couldn't read inode",
+ libc_free,
+ )?;
+ Node::new(&self, inode, None)
+ }
+
+ fn map_range(&self, start: usize, len: usize) -> &[u8] {
+ &(self.mmap.1)[start..start + len]
+ }
+}
+
+unsafe impl Send for Archive {}
+unsafe impl Sync for Archive {}
diff --git a/src/write.rs b/src/write.rs
index 49e9f72..3620803 100644
--- a/src/write.rs
+++ b/src/write.rs
@@ -21,6 +21,8 @@
//! entries. I hope to fix this some day, and in the meantime it has not caused problems in the
//! ways I have used the resultant files.
+use super::SquashfsError;
+use super::*;
use std::cell::RefCell;
use std::collections::{BTreeMap, HashMap};
use std::ffi::{CString, OsString};
@@ -28,8 +30,6 @@ use std::io::Read;
use std::path::{Path, PathBuf};
use std::sync::{Mutex, RwLock};
use std::time::SystemTime;
-use super::*;
-use super::SquashfsError;
use walkdir::{DirEntry, WalkDir};
/// Flags to fine-tune how an entry is added to the archive.
@@ -38,46 +38,46 @@ use walkdir::{DirEntry, WalkDir};
/// [`Source`] object.
#[repr(u32)]
pub enum BlockFlags {
- /// Don't compress file data.
- ///
- /// By default, files are compressed, and the compressed version is stored in the archive if it
- /// is smaller than the uncompressed version. Setting this flag will force the file to be
- /// stored uncompressed.
- DontCompress = super::SQFS_BLK_FLAGS_SQFS_BLK_DONT_COMPRESS,
-
- /// Align the file data to device blocks.
- ///
- /// If set, padding will be added before and after this file's data blocks so that it is
- /// aligned to the blocks of the underlying disk.
- BlockAlign = super::SQFS_BLK_FLAGS_SQFS_BLK_ALIGN,
-
- /// Store the tail of the file in a regular data block rather than a fragment block.
- ///
- /// The compressed content of a file to be written to an archive is split into equally-sized
- /// blocks and stored as "data blocks". The final chunk is usually smaller than the rest, so
- /// these final chunks are collected from multiple files are collected and stored together in
- /// separate "fragment blocks" as an optimization. If there is a reason for the entire file's
- /// contents to be stored together, fragmentation can be disabled using this flag.
- DontFragment = super::SQFS_BLK_FLAGS_SQFS_BLK_DONT_FRAGMENT,
-
- /// Don't deduplicated data blocks for this file.
- ///
- /// If two files contain an identical data block, the block will be stored only once and both
- /// files' block indices will point to this single block. The user can force all blocks of a
- /// file to be stored by setting this flag.
- DontDeduplicate = super::SQFS_BLK_FLAGS_SQFS_BLK_DONT_DEDUPLICATE,
-
- /// Don't elide sparse blocks.
- ///
- /// If a block of a file contains only zeros, it will not be stored at all and the file's block
- /// index will mark that the block is all-zero. This behavior can be disabled so that a zero
- /// block will be written by setting this flag.
- IgnoreSparse = super::SQFS_BLK_FLAGS_SQFS_BLK_IGNORE_SPARSE,
-
- /// Don't compute block checksums for this file.
- ///
- /// Each data block is checksummed to verify data integrity unless this flag is set.
- DontHash = super::SQFS_BLK_FLAGS_SQFS_BLK_DONT_HASH,
+ /// Don't compress file data.
+ ///
+ /// By default, files are compressed, and the compressed version is stored in the archive if it
+ /// is smaller than the uncompressed version. Setting this flag will force the file to be
+ /// stored uncompressed.
+ DontCompress = super::SQFS_BLK_FLAGS_SQFS_BLK_DONT_COMPRESS,
+
+ /// Align the file data to device blocks.
+ ///
+ /// If set, padding will be added before and after this file's data blocks so that it is
+ /// aligned to the blocks of the underlying disk.
+ BlockAlign = super::SQFS_BLK_FLAGS_SQFS_BLK_ALIGN,
+
+ /// Store the tail of the file in a regular data block rather than a fragment block.
+ ///
+ /// The compressed content of a file to be written to an archive is split into equally-sized
+ /// blocks and stored as "data blocks". The final chunk is usually smaller than the rest, so
+ /// these final chunks are collected from multiple files are collected and stored together in
+ /// separate "fragment blocks" as an optimization. If there is a reason for the entire file's
+ /// contents to be stored together, fragmentation can be disabled using this flag.
+ DontFragment = super::SQFS_BLK_FLAGS_SQFS_BLK_DONT_FRAGMENT,
+
+ /// Don't deduplicated data blocks for this file.
+ ///
+ /// If two files contain an identical data block, the block will be stored only once and both
+ /// files' block indices will point to this single block. The user can force all blocks of a
+ /// file to be stored by setting this flag.
+ DontDeduplicate = super::SQFS_BLK_FLAGS_SQFS_BLK_DONT_DEDUPLICATE,
+
+ /// Don't elide sparse blocks.
+ ///
+ /// If a block of a file contains only zeros, it will not be stored at all and the file's block
+ /// index will mark that the block is all-zero. This behavior can be disabled so that a zero
+ /// block will be written by setting this flag.
+ IgnoreSparse = super::SQFS_BLK_FLAGS_SQFS_BLK_IGNORE_SPARSE,
+
+ /// Don't compute block checksums for this file.
+ ///
+ /// Each data block is checksummed to verify data integrity unless this flag is set.
+ DontHash = super::SQFS_BLK_FLAGS_SQFS_BLK_DONT_HASH,
}
/// Represents the data of a filesystem object that can be added to an archive.
@@ -85,38 +85,38 @@ pub enum BlockFlags {
/// When creating the archive, this object is read from a [`Source`] (which additionally describes
/// the filesystem attributes of the node) and used to set the type and contents of the node.
pub enum SourceData {
- /// Create a file with the provided contents.
- ///
- /// The contained object will be read and its contents placed in the file written to the
- /// archive.
- File(Box<dyn Read + Sync + Send>),
-
- /// Create a directory with the given chidren.
- ///
- /// The creator must provide an iterator over [`OsString`] and `u32`, which respectively
- /// represent the name and inode number of each child of this directory. This is one of the
- /// hardest parts about writing archive contents -- all children of each directory must be
- /// written before the directory itself, so that the inode numbers of the children are known.
- /// [`TreeProcessor`] facilitates this by performing a post-order traversal of a filesystem,
- /// ensuring that files are written in the correct order.
- Dir(Box<dyn Iterator<Item=(OsString, u32)> + Sync + Send>),
-
- /// Create a symbolic link to the given path.
- ///
- /// It is not required for the target of the symlink to exist.
- Symlink(PathBuf),
-
- /// Create a block device file with the given major and minor device numbers.
- BlockDev(u32, u32),
-
- /// Create a character device file with the given major and minor device numbers.
- CharDev(u32, u32),
-
- /// Create a named pipe.
- Fifo,
-
- /// Create a socket.
- Socket,
+ /// Create a file with the provided contents.
+ ///
+ /// The contained object will be read and its contents placed in the file written to the
+ /// archive.
+ File(Box<dyn Read + Sync + Send>),
+
+ /// Create a directory with the given chidren.
+ ///
+ /// The creator must provide an iterator over [`OsString`] and `u32`, which respectively
+ /// represent the name and inode number of each child of this directory. This is one of the
+ /// hardest parts about writing archive contents -- all children of each directory must be
+ /// written before the directory itself, so that the inode numbers of the children are known.
+ /// [`TreeProcessor`] facilitates this by performing a post-order traversal of a filesystem,
+ /// ensuring that files are written in the correct order.
+ Dir(Box<dyn Iterator<Item = (OsString, u32)> + Sync + Send>),
+
+ /// Create a symbolic link to the given path.
+ ///
+ /// It is not required for the target of the symlink to exist.
+ Symlink(PathBuf),
+
+ /// Create a block device file with the given major and minor device numbers.
+ BlockDev(u32, u32),
+
+ /// Create a character device file with the given major and minor device numbers.
+ CharDev(u32, u32),
+
+ /// Create a named pipe.
+ Fifo,
+
+ /// Create a socket.
+ Socket,
}
/// A single node to be added to the SquashFS archive.
@@ -131,122 +131,152 @@ pub enum SourceData {
/// This object is designed to be constructed by the user by setting all fields to the appropriate
/// values.
pub struct Source {
- /// The type of the node and the data it contains.
- pub data: SourceData,
+ /// The type of the node and the data it contains.
+ pub data: SourceData,
- /// The UID of the file.
- pub uid: u32,
+ /// The UID of the file.
+ pub uid: u32,
- /// The GID of the file.
- pub gid: u32,
+ /// The GID of the file.
+ pub gid: u32,
- /// The file mode.
- pub mode: u16,
+ /// The file mode.
+ pub mode: u16,
- /// The modification time of the file as a Unix timestamp.
- pub modified: u32,
+ /// The modification time of the file as a Unix timestamp.
+ pub modified: u32,
- /// Extended attributes on the node. Each one must start with a valid xattr namespace (such as
- /// "user.", and the values can be arbitrary byte strings.
- pub xattrs: HashMap<OsString, Vec<u8>>,
+ /// Extended attributes on the node. Each one must start with a valid xattr namespace (such as
+ /// "user.", and the values can be arbitrary byte strings.
+ pub xattrs: HashMap<OsString, Vec<u8>>,
- /// [`BlockFlags`] to set on the node to control how its contents are archived. Multiple flags
- /// can be combined using `|`.
- pub flags: u32,
+ /// [`BlockFlags`] to set on the node to control how its contents are archived. Multiple flags
+ /// can be combined using `|`.
+ pub flags: u32,
}
fn file_xattrs(path: &Path) -> Result<HashMap<OsString, Vec<u8>>> {
- xattr::list(path)?.map(|attr| {
- let value = xattr::get(path, attr.clone()).map_err(|e| SquashfsError::Xattr(path.to_path_buf(), e))?
- .expect(&format!("Could not retrieve xattr {:?} reported to be present", attr));
- Ok((attr, value))
- }).collect()
+ xattr::list(path)?
+ .map(|attr| {
+ let value = xattr::get(path, attr.clone())
+ .map_err(|e| SquashfsError::Xattr(path.to_path_buf(), e))?
+ .expect(&format!(
+ "Could not retrieve xattr {:?} reported to be present",
+ attr
+ ));
+ Ok((attr, value))
+ })
+ .collect()
}
-fn copy_metadata(src: &ManagedPointer<sqfs_inode_generic_t>, dst: &mut ManagedPointer<sqfs_inode_generic_t>) -> Result<()> {
- let (src_base, dst_base) = unsafe { (&(***src).base, &mut (***dst).base) };
- dst_base.mode = src_base.mode;
- dst_base.uid_idx = src_base.uid_idx;
- dst_base.gid_idx = src_base.gid_idx;
- dst_base.mod_time = src_base.mod_time;
- dst_base.inode_number = src_base.inode_number;
- let mut xattr_idx: u32 = 0;
- unsafe {
- sfs_check(sqfs_inode_get_xattr_index(**src, &mut xattr_idx), "Couldn't get xattr index")?;
- sfs_check(sqfs_inode_set_xattr_index(**dst, xattr_idx), "Couldn't set xattr index")?;
- }
- Ok(())
+fn copy_metadata(
+ src: &ManagedPointer<sqfs_inode_generic_t>,
+ dst: &mut ManagedPointer<sqfs_inode_generic_t>,
+) -> Result<()> {
+ let (src_base, dst_base) = unsafe { (&(***src).base, &mut (***dst).base) };
+ dst_base.mode = src_base.mode;
+ dst_base.uid_idx = src_base.uid_idx;
+ dst_base.gid_idx = src_base.gid_idx;
+ dst_base.mod_time = src_base.mod_time;
+ dst_base.inode_number = src_base.inode_number;
+ let mut xattr_idx: u32 = 0;
+ unsafe {
+ sfs_check(
+ sqfs_inode_get_xattr_index(**src, &mut xattr_idx),
+ "Couldn't get xattr index",
+ )?;
+ sfs_check(
+ sqfs_inode_set_xattr_index(**dst, xattr_idx),
+ "Couldn't set xattr index",
+ )?;
+ }
+ Ok(())
}
impl Source {
- /// Construct a `Source` from a `SourceData`, using defaults for all metadata fields.
- pub fn defaults(data: SourceData) -> Self {
- Self { data: data, uid: 0, gid: 0, mode: 0x1ff, modified: 0, xattrs: HashMap::new(), flags: 0 }
- }
-
- fn devno(maj: u32, min: u32) -> u32 {
- ((min & 0xfff00) << 20) | ((maj & 0xfff) << 8) | (min & 0xff)
- }
-
- unsafe fn to_inode(&self, link_count: u32) -> Result<ManagedPointer<sqfs_inode_generic_t>> {
- unsafe fn create_inode(kind: SQFS_INODE_TYPE, extra: usize) -> ManagedPointer<sqfs_inode_generic_t> {
- use std::alloc::{alloc, Layout};
- use std::mem::{align_of, size_of};
- let layout = Layout::from_size_align_unchecked(size_of::<sqfs_inode_generic_t>() + extra, align_of::<sqfs_inode_generic_t>());
- let ret = alloc(layout) as *mut sqfs_inode_generic_t;
- (*ret).base.type_ = kind as u16;
- ManagedPointer::new(ret, rust_dealloc)
- }
- let ret = match &self.data {
- SourceData::File(_) => create_inode(SQFS_INODE_TYPE_SQFS_INODE_FILE, 0),
- SourceData::Dir(_) => {
- let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_DIR, 0);
- (**ret).data.dir.nlink = link_count;
- ret
- },
- SourceData::Symlink(dest_os) => {
- let dest = os_to_string(dest_os.as_os_str())?.into_bytes();
- let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_SLINK, dest.len());
- let mut data = &mut (**ret).data.slink;
- data.nlink = link_count;
- data.target_size = dest.len() as u32;
- let dest_field = std::mem::transmute::<_, &mut [u8]>((**ret).extra.as_mut_slice(dest.len()));
- dest_field.copy_from_slice(dest.as_slice());
- ret
- },
- SourceData::BlockDev(maj, min) => {
- let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_BDEV, 0);
- let mut data = &mut (**ret).data.dev;
- data.nlink = link_count;
- data.devno = Self::devno(*maj, *min);
- ret
- },
- SourceData::CharDev(maj, min) => {
- let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_CDEV, 0);
- let mut data = &mut (**ret).data.dev;
- data.nlink = link_count;
- data.devno = Self::devno(*maj, *min);
- ret
- },
- SourceData::Fifo => {
- let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_FIFO, 0);
- (**ret).data.ipc.nlink = link_count;
- ret
- },
- SourceData::Socket => {
- let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_SOCKET, 0);
- (**ret).data.ipc.nlink = link_count;
- ret
- },
- };
- Ok(ret)
- }
+ /// Construct a `Source` from a `SourceData`, using defaults for all metadata fields.
+ pub fn defaults(data: SourceData) -> Self {
+ Self {
+ data: data,
+ uid: 0,
+ gid: 0,
+ mode: 0x1ff,
+ modified: 0,
+ xattrs: HashMap::new(),
+ flags: 0,
+ }
+ }
+
+ fn devno(maj: u32, min: u32) -> u32 {
+ ((min & 0xfff00) << 20) | ((maj & 0xfff) << 8) | (min & 0xff)
+ }
+
+ unsafe fn to_inode(&self, link_count: u32) -> Result<ManagedPointer<sqfs_inode_generic_t>> {
+ unsafe fn create_inode(
+ kind: SQFS_INODE_TYPE,
+ extra: usize,
+ ) -> ManagedPointer<sqfs_inode_generic_t> {
+ use std::alloc::{alloc, Layout};
+ use std::mem::{align_of, size_of};
+ let layout = Layout::from_size_align_unchecked(
+ size_of::<sqfs_inode_generic_t>() + extra,
+ align_of::<sqfs_inode_generic_t>(),
+ );
+ let ret = alloc(layout) as *mut sqfs_inode_generic_t;
+ (*ret).base.type_ = kind as u16;
+ ManagedPointer::new(ret, rust_dealloc)
+ }
+ let ret = match &self.data {
+ SourceData::File(_) => create_inode(SQFS_INODE_TYPE_SQFS_INODE_FILE, 0),
+ SourceData::Dir(_) => {
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_DIR, 0);
+ (**ret).data.dir.nlink = link_count;
+ ret
+ }
+ SourceData::Symlink(dest_os) => {
+ let dest = os_to_string(dest_os.as_os_str())?.into_bytes();
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_SLINK, dest.len());
+ let mut data = &mut (**ret).data.slink;
+ data.nlink = link_count;
+ data.target_size = dest.len() as u32;
+ let dest_field =
+ std::mem::transmute::<_, &mut [u8]>((**ret).extra.as_mut_slice(dest.len()));
+ dest_field.copy_from_slice(dest.as_slice());
+ ret
+ }
+ SourceData::BlockDev(maj, min) => {
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_BDEV, 0);
+ let mut data = &mut (**ret).data.dev;
+ data.nlink = link_count;
+ data.devno = Self::devno(*maj, *min);
+ ret
+ }
+ SourceData::CharDev(maj, min) => {
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_CDEV, 0);
+ let mut data = &mut (**ret).data.dev;
+ data.nlink = link_count;
+ data.devno = Self::devno(*maj, *min);
+ ret
+ }
+ SourceData::Fifo => {
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_FIFO, 0);
+ (**ret).data.ipc.nlink = link_count;
+ ret
+ }
+ SourceData::Socket => {
+ let mut ret = create_inode(SQFS_INODE_TYPE_SQFS_INODE_SOCKET, 0);
+ (**ret).data.ipc.nlink = link_count;
+ ret
+ }
+ };
+ Ok(ret)
+ }
}
struct IntermediateNode {
- inode: Box<ManagedPointer<sqfs_inode_generic_t>>,
- dir_children: Option<Box<dyn Iterator<Item=(OsString, u32)> + Sync + Send>>,
- pos: u64,
+ inode: Box<ManagedPointer<sqfs_inode_generic_t>>,
+ dir_children: Option<Box<dyn Iterator<Item = (OsString, u32)> + Sync + Send>>,
+ pos: u64,
}
/// A [`Source`] bundled with the path where it should be located.
@@ -259,18 +289,21 @@ struct IntermediateNode {
/// unnecessary, [`defaults`](Self::defaults) can be used to conveniently construct a `FileSource`
/// from a [`PathBuf`] and [`SourceData`].
pub struct SourceFile {
- pub path: PathBuf,
- pub content: Source,
+ pub path: PathBuf,
+ pub content: Source,
}
impl SourceFile {
- /// Wrap a `SourceData` in a new `Source`, using defaults for all metadata fields.
- ///
- /// This sets UID and GID to 0 and permissions to 0o777, gives a null modification time and no
- /// xattrs, and sets no flags.
- pub fn defaults(path: PathBuf, data: SourceData) -> Self {
- Self { path: path, content: Source::defaults(data) }
- }
+ /// Wrap a `SourceData` in a new `Source`, using defaults for all metadata fields.
+ ///
+ /// This sets UID and GID to 0 and permissions to 0o777, gives a null modification time and no
+ /// xattrs, and sets no flags.
+ pub fn defaults(path: PathBuf, data: SourceData) -> Self {
+ Self {
+ path: path,
+ content: Source::defaults(data),
+ }
+ }
}
/// A basic SquashFS writer.
@@ -295,261 +328,463 @@ impl SourceFile {
/// writer.add(Source::defaults(SourceData::Dir(Box::new(ids.into_iter()))))?;
/// writer.finish()?;
pub struct Writer {
- outfile: ManagedPointer<sqfs_file_t>,
- #[allow(dead_code)] compressor_config: sqfs_compressor_config_t, // Referenced by `compressor`
- compressor: ManagedPointer<sqfs_compressor_t>,
- superblock: sqfs_super_t,
- #[allow(dead_code)] block_writer: ManagedPointer<sqfs_block_writer_t>, // Referenced by `block_processor`
- block_processor: Mutex<ManagedPointer<sqfs_block_processor_t>>,
- frag_table: ManagedPointer<sqfs_frag_table_t>,
- id_table: Mutex<ManagedPointer<sqfs_id_table_t>>,
- xattr_writer: Mutex<ManagedPointer<sqfs_xattr_writer_t>>,
- inode_writer: ManagedPointer<sqfs_meta_writer_t>,
- dirent_writer: ManagedPointer<sqfs_meta_writer_t>,
- dir_writer: ManagedPointer<sqfs_dir_writer_t>,
- nodes: Mutex<Vec<RefCell<IntermediateNode>>>,
- finished: RwLock<bool>,
+ outfile: ManagedPointer<sqfs_file_t>,
+ #[allow(dead_code)]
+ compressor_config: sqfs_compressor_config_t, // Referenced by `compressor`
+ compressor: ManagedPointer<sqfs_compressor_t>,
+ superblock: sqfs_super_t,
+ #[allow(dead_code)]
+ block_writer: ManagedPointer<sqfs_block_writer_t>, // Referenced by `block_processor`
+ block_processor: Mutex<ManagedPointer<sqfs_block_processor_t>>,
+ frag_table: ManagedPointer<sqfs_frag_table_t>,
+ id_table: Mutex<ManagedPointer<sqfs_id_table_t>>,
+ xattr_writer: Mutex<ManagedPointer<sqfs_xattr_writer_t>>,
+ inode_writer: ManagedPointer<sqfs_meta_writer_t>,
+ dirent_writer: ManagedPointer<sqfs_meta_writer_t>,
+ dir_writer: ManagedPointer<sqfs_dir_writer_t>,
+ nodes: Mutex<Vec<RefCell<IntermediateNode>>>,
+ finished: RwLock<bool>,
}
impl Writer {
- /// Open a new output file for writing.
- ///
- /// If the file exists, it will be overwritten.
- pub fn open<T: AsRef<Path>>(path: T) -> Result<Self> {
- let cpath = CString::new(os_to_string(path.as_ref().as_os_str())?)?;
- let block_size = SQFS_DEFAULT_BLOCK_SIZE as u64;
- let num_workers = num_cpus::get() as u32;
- let compressor_id = SQFS_COMPRESSOR_SQFS_COMP_ZSTD;
- let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_secs() as u32;
- let outfile = sfs_init_check_null(&|| unsafe {
- sqfs_open_file(cpath.as_ptr(), SQFS_FILE_OPEN_FLAGS_SQFS_FILE_OPEN_OVERWRITE)
- }, &format!("Couldn't open output file {}", path.as_ref().display()), sfs_destroy)?;
- let compressor_config = sfs_init(&|x| unsafe {
- sqfs_compressor_config_init(x, compressor_id, block_size, 0)
- }, "Couldn't create compressor config")?;
- let compressor = sfs_init_ptr(&|x| unsafe {
- sqfs_compressor_create(&compressor_config, x)
- }, "Couldn't create compressor", sfs_destroy)?;
- let superblock = sfs_init(&|x| unsafe {
- sqfs_super_init(x, block_size, now, compressor_id)
- }, "Couldn't create superblock")?;
- let frag_table = sfs_init_check_null(&|| unsafe {
- sqfs_frag_table_create(0)
- }, "Couldn't create fragment table", sfs_destroy)?;
- let block_writer = sfs_init_check_null(&|| unsafe {
- sqfs_block_writer_create(*outfile, 4096, 0)
- }, "Couldn't create block writer", sfs_destroy)?;
- let block_processor = Mutex::new(sfs_init_check_null(&|| unsafe {
- sqfs_block_processor_create(block_size, *compressor, num_workers, 10 * num_workers as u64, *block_writer, *frag_table)
- }, "Couldn't create block processor", sfs_destroy)?);
- let id_table = Mutex::new(sfs_init_check_null(&|| unsafe {
- sqfs_id_table_create(0)
- }, "Couldn't create ID table", sfs_destroy)?);
- let xattr_writer = Mutex::new(sfs_init_check_null(&|| unsafe {
- sqfs_xattr_writer_create(0)
- }, "Couldn't create xattr writer", sfs_destroy)?);
- let inode_writer = sfs_init_check_null(&|| unsafe {
- sqfs_meta_writer_create(*outfile, *compressor, 0)
- }, "Couldn't create inode metadata writer", sfs_destroy)?;
- let dirent_writer = sfs_init_check_null(&|| unsafe {
- sqfs_meta_writer_create(*outfile, *compressor, SQFS_META_WRITER_FLAGS_SQFS_META_WRITER_KEEP_IN_MEMORY)
- }, "Couldn't create directory entry metadata writer", sfs_destroy)?;
- let dir_writer = sfs_init_check_null(&|| unsafe {
- sqfs_dir_writer_create(*dirent_writer, SQFS_DIR_WRITER_CREATE_FLAGS_SQFS_DIR_WRITER_CREATE_EXPORT_TABLE)
- }, "Couldn't create directory writer", sfs_destroy)?;
- unsafe {
- sfs_check(sqfs_super_write(&superblock, *outfile), "Couldn't write archive superblock")?;
- sfs_check((**compressor).write_options.expect("Compressor doesn't provide write_options")(*compressor, *outfile), "Couldn't write compressor options")?;
- }
- Ok(Self {
- outfile: outfile,
- compressor_config: compressor_config,
- compressor: compressor,
- superblock: superblock,
- block_writer: block_writer,
- block_processor: block_processor,
- frag_table: frag_table,
- id_table: id_table,
- xattr_writer: xattr_writer,
- inode_writer: inode_writer,
- dirent_writer: dirent_writer,
- dir_writer: dir_writer,
- nodes: Mutex::new(vec![]),
- finished: RwLock::new(false),
- })
- }
-
- fn mode_from_inode(inode: &ManagedPointer<sqfs_inode_generic_t>) -> u16 {
- lazy_static! {
- static ref TYPENUMS: HashMap<u32, u32> = vec![
- (SQFS_INODE_TYPE_SQFS_INODE_DIR, S_IFDIR),
- (SQFS_INODE_TYPE_SQFS_INODE_FILE, S_IFREG),
- (SQFS_INODE_TYPE_SQFS_INODE_SLINK, S_IFLNK),
- (SQFS_INODE_TYPE_SQFS_INODE_BDEV, S_IFBLK),
- (SQFS_INODE_TYPE_SQFS_INODE_CDEV, S_IFCHR),
- (SQFS_INODE_TYPE_SQFS_INODE_FIFO, S_IFIFO),
- (SQFS_INODE_TYPE_SQFS_INODE_SOCKET, S_IFSOCK),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_DIR, S_IFDIR),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_FILE, S_IFREG),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_SLINK, S_IFLNK),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_BDEV, S_IFBLK),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_CDEV, S_IFCHR),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_FIFO, S_IFIFO),
- (SQFS_INODE_TYPE_SQFS_INODE_EXT_SOCKET, S_IFSOCK),
- ].into_iter().collect();
- }
- let base = unsafe { (***inode).base };
- TYPENUMS[&(base.type_ as u32)] as u16 | base.mode
- }
-
- fn outfile_size(&self) -> u64 {
- unsafe { (**self.outfile).get_size.expect("Superblock doesn't provide get_size")(*self.outfile) }
- }
-
- /// Add the provided `Source` to the archive.
- ///
- /// This writes file data and xattrs to the archive directly, while storing directory tree
- /// information to write when `finish` is called.
- ///
- /// The returned value is the inode number of the added `Source`. If the file is to be added
- /// to a directory (that is, almost always), this number needs to be stored so that it can be
- /// provided when the directory is added. In the current implementation, inode numbers start
- /// at 1 for the first file and count steadily upward, but this behavior may change without
- /// warning.
- pub fn add(&mut self, mut source: Source) -> Result<u32> {
- let finished = self.finished.read().expect("Poisoned lock");
- if *finished { Err(SquashfsError::Finished)?; }
- let flags = source.flags;
- let nlink = 1; // TODO Handle hard links
- let mut inode = unsafe {
- match source.data {
- SourceData::File(ref mut reader) => {
- let mut ret = Box::new(ManagedPointer::null(libc_free));
- let block_processor = self.block_processor.lock().expect("Poisoned lock");
- sfs_check(sqfs_block_processor_begin_file(**block_processor, &mut **ret, ptr::null_mut(), flags), "Couldn't begin writing file")?;
- let mut buf = vec![0; BLOCK_BUF_SIZE];
- loop {
- let rdsize = reader.read(&mut buf)? as u64;
- if rdsize == 0 { break; }
- sfs_check(sqfs_block_processor_append(**block_processor, &buf as &[u8] as *const [u8] as *const libc::c_void, rdsize), "Couldn't write file data block")?;
- }
- sfs_check(sqfs_block_processor_end_file(**block_processor), "Couldn't finish writing file")?;
- ret
- },
- _ => Box::new(source.to_inode(nlink)?),
- }
- };
- unsafe {
- let xattr_writer = self.xattr_writer.lock().expect("Poisoned lock");
- sfs_check(sqfs_xattr_writer_begin(**xattr_writer, 0), "Couldn't start writing xattrs")?;
- for (key, value) in &source.xattrs {
- let ckey = CString::new(os_to_string(key)?)?;
- sfs_check(sqfs_xattr_writer_add(**xattr_writer, ckey.as_ptr() as *const i8, value.as_ptr() as *const libc::c_void, value.len() as u64), "Couldn't add xattr")?;
- }
- let xattr_idx = sfs_init(&|x| sqfs_xattr_writer_end(**xattr_writer, x), "Couldn't finish writing xattrs")?;
- let mut base = &mut (***inode).base;
- base.mode = source.mode;
- sqfs_inode_set_xattr_index(**inode, xattr_idx);
- let id_table = self.id_table.lock().expect("Poisoned lock");
- sfs_check(sqfs_id_table_id_to_index(**id_table, source.uid, &mut base.uid_idx), "Couldn't set inode UID")?;
- sfs_check(sqfs_id_table_id_to_index(**id_table, source.gid, &mut base.gid_idx), "Couldn't set inode GID")?;
- base.mod_time = source.modified;
- }
- let dir_children = match source.data {
- SourceData::Dir(children) => Some(children),
- _ => None,
- };
- let mut nodes = self.nodes.lock().expect("Poisoned lock");
- let nodenum = nodes.len() as u32 + 1;
- unsafe { (***inode).base.inode_number = nodenum; }
- nodes.push(RefCell::new(IntermediateNode { inode: inode, dir_children: dir_children, pos: 0 }));
- Ok(nodenum)
- }
-
- /// Finish writing the archive and flush all contents to disk.
- ///
- /// It is an error to call `add` after this has been run.
- pub fn finish(&mut self) -> Result<()> {
- *self.finished.write().expect("Poisoned lock") = true;
- let nodes = self.nodes.lock().expect("Poisoned lock");
- unsafe {
- sfs_check(sqfs_block_processor_finish(**self.block_processor.lock().expect("Poisoned lock")), "Failed finishing block processing")?;
- self.superblock.inode_table_start = self.outfile_size();
- for raw_node in &*nodes {
- let mut node = raw_node.borrow_mut();
- let id = (***node.inode).base.inode_number;
- if let Some(children) = node.dir_children.take() {
- sfs_check(sqfs_dir_writer_begin(*self.dir_writer, 0), "Couldn't start writing directory")?;
- // For each child, need: name, ID, reference, mode
- for (name, child_id) in children { // On disk children need to be sorted -- I think the library takes care of this
- if child_id >= id { Err(SquashfsError::WriteOrder(child_id))?; }
- let child_node = &nodes[child_id as usize - 1].borrow();
- let child = child_node.inode.as_ref();
- let child_ref = child_node.pos;
- sfs_check(sqfs_dir_writer_add_entry(*self.dir_writer, CString::new(os_to_string(&name)?)?.as_ptr(), child_id, child_ref, Self::mode_from_inode(&child)), "Couldn't add directory entry")?;
- }
- sfs_check(sqfs_dir_writer_end(*self.dir_writer), "Couldn't finish writing directory")?;
- let mut ret = Box::new(sfs_init_check_null(&|| {
- sqfs_dir_writer_create_inode(*self.dir_writer, 0, 0, 0) // TODO Populate the parent inode number (how?)
- }, "Couldn't get inode for directory", libc_free)?);
- copy_metadata(&*node.inode, &mut ret)?;
- node.inode = ret;
- }
- let (mut block, mut offset) = (0, 0);
- sqfs_meta_writer_get_position(*self.inode_writer, &mut block, &mut offset);
- node.pos = block << 16 | offset as u64;
- sfs_check(sqfs_meta_writer_write_inode(*self.inode_writer, **node.inode), "Couldn't write inode")?;
- }
-
- let root_ref = nodes.last().ok_or(SquashfsError::Empty)?.borrow().pos;
- self.superblock.root_inode_ref = root_ref;
- sfs_check(sqfs_meta_writer_flush(*self.inode_writer), "Couldn't flush inodes")?;
- sfs_check(sqfs_meta_writer_flush(*self.dirent_writer), "Couldn't flush directory entries")?;
- self.superblock.directory_table_start = self.outfile_size();
- sfs_check(sqfs_meta_write_write_to_file(*self.dirent_writer), "Couldn't write directory entries")?;
- self.superblock.inode_count = nodes.len() as u32;
- sfs_check(sqfs_frag_table_write(*self.frag_table, *self.outfile, &mut self.superblock, *self.compressor), "Couldn't write fragment table")?;
- sfs_check(sqfs_dir_writer_write_export_table(*self.dir_writer, *self.outfile, *self.compressor, nodes.len() as u32, root_ref, &mut self.superblock), "Couldn't write export table")?;
- sfs_check(sqfs_id_table_write(**self.id_table.lock().expect("Poisoned lock"), *self.outfile, &mut self.superblock, *self.compressor), "Couldn't write ID table")?;
- sfs_check(sqfs_xattr_writer_flush(**self.xattr_writer.lock().expect("Poisoned lock"), *self.outfile, &mut self.superblock, *self.compressor), "Couldn't write xattr table")?;
- self.superblock.bytes_used = self.outfile_size();
- sfs_check(sqfs_super_write(&self.superblock, *self.outfile), "Couldn't rewrite archive superblock")?;
- let padding: Vec<u8> = vec![0; PAD_TO - self.outfile_size() as usize % PAD_TO];
- sfs_check((**self.outfile).write_at.expect("File does not provide write_at")(*self.outfile, self.outfile_size(), &padding as &[u8] as *const [u8] as *const libc::c_void, padding.len() as u64), "Couldn't pad file")?;
- }
- Ok(())
- }
+ /// Open a new output file for writing.
+ ///
+ /// If the file exists, it will be overwritten.
+ pub fn open<T: AsRef<Path>>(path: T) -> Result<Self> {
+ let cpath = CString::new(os_to_string(path.as_ref().as_os_str())?)?;
+ let block_size = SQFS_DEFAULT_BLOCK_SIZE as usize;
+ let num_workers = num_cpus::get() as u32;
+ let compressor_id = SQFS_COMPRESSOR_SQFS_COMP_ZSTD;
+ let now = SystemTime::now()
+ .duration_since(SystemTime::UNIX_EPOCH)?
+ .as_secs() as u32;
+ let outfile = sfs_init_check_null(
+ &|| unsafe {
+ sqfs_open_file(
+ cpath.as_ptr(),
+ SQFS_FILE_OPEN_FLAGS_SQFS_FILE_OPEN_OVERWRITE,
+ )
+ },
+ &format!("Couldn't open output file {}", path.as_ref().display()),
+ sfs_destroy,
+ )?;
+ let compressor_config = sfs_init(
+ &|x| unsafe { sqfs_compressor_config_init(x, compressor_id, block_size, 0) },
+ "Couldn't create compressor config",
+ )?;
+ let compressor = sfs_init_ptr(
+ &|x| unsafe { sqfs_compressor_create(&compressor_config, x) },
+ "Couldn't create compressor",
+ sfs_destroy,
+ )?;
+ let superblock = sfs_init(
+ &|x| unsafe { sqfs_super_init(x, block_size, now, compressor_id) },
+ "Couldn't create superblock",
+ )?;
+ let frag_table = sfs_init_check_null(
+ &|| unsafe { sqfs_frag_table_create(0) },
+ "Couldn't create fragment table",
+ sfs_destroy,
+ )?;
+ let block_writer = sfs_init_check_null(
+ &|| unsafe { sqfs_block_writer_create(*outfile, 4096, 0) },
+ "Couldn't create block writer",
+ sfs_destroy,
+ )?;
+ let block_processor = Mutex::new(sfs_init_check_null(
+ &|| unsafe {
+ sqfs_block_processor_create(
+ block_size,
+ *compressor,
+ num_workers,
+ 10 * num_workers as usize,
+ *block_writer,
+ *frag_table,
+ )
+ },
+ "Couldn't create block processor",
+ sfs_destroy,
+ )?);
+ let id_table = Mutex::new(sfs_init_check_null(
+ &|| unsafe { sqfs_id_table_create(0) },
+ "Couldn't create ID table",
+ sfs_destroy,
+ )?);
+ let xattr_writer = Mutex::new(sfs_init_check_null(
+ &|| unsafe { sqfs_xattr_writer_create(0) },
+ "Couldn't create xattr writer",
+ sfs_destroy,
+ )?);
+ let inode_writer = sfs_init_check_null(
+ &|| unsafe { sqfs_meta_writer_create(*outfile, *compressor, 0) },
+ "Couldn't create inode metadata writer",
+ sfs_destroy,
+ )?;
+ let dirent_writer = sfs_init_check_null(
+ &|| unsafe {
+ sqfs_meta_writer_create(
+ *outfile,
+ *compressor,
+ SQFS_META_WRITER_FLAGS_SQFS_META_WRITER_KEEP_IN_MEMORY,
+ )
+ },
+ "Couldn't create directory entry metadata writer",
+ sfs_destroy,
+ )?;
+ let dir_writer = sfs_init_check_null(
+ &|| unsafe {
+ sqfs_dir_writer_create(
+ *dirent_writer,
+ SQFS_DIR_WRITER_CREATE_FLAGS_SQFS_DIR_WRITER_CREATE_EXPORT_TABLE,
+ )
+ },
+ "Couldn't create directory writer",
+ sfs_destroy,
+ )?;
+ unsafe {
+ sfs_check(
+ sqfs_super_write(&superblock, *outfile),
+ "Couldn't write archive superblock",
+ )?;
+ sfs_check(
+ (**compressor)
+ .write_options
+ .expect("Compressor doesn't provide write_options")(
+ *compressor, *outfile
+ ),
+ "Couldn't write compressor options",
+ )?;
+ }
+ Ok(Self {
+ outfile: outfile,
+ compressor_config: compressor_config,
+ compressor: compressor,
+ superblock: superblock,
+ block_writer: block_writer,
+ block_processor: block_processor,
+ frag_table: frag_table,
+ id_table: id_table,
+ xattr_writer: xattr_writer,
+ inode_writer: inode_writer,
+ dirent_writer: dirent_writer,
+ dir_writer: dir_writer,
+ nodes: Mutex::new(vec![]),
+ finished: RwLock::new(false),
+ })
+ }
+
+ fn mode_from_inode(inode: &ManagedPointer<sqfs_inode_generic_t>) -> u16 {
+ lazy_static! {
+ static ref TYPENUMS: HashMap<u32, u32> = vec![
+ (SQFS_INODE_TYPE_SQFS_INODE_DIR, S_IFDIR),
+ (SQFS_INODE_TYPE_SQFS_INODE_FILE, S_IFREG),
+ (SQFS_INODE_TYPE_SQFS_INODE_SLINK, S_IFLNK),
+ (SQFS_INODE_TYPE_SQFS_INODE_BDEV, S_IFBLK),
+ (SQFS_INODE_TYPE_SQFS_INODE_CDEV, S_IFCHR),
+ (SQFS_INODE_TYPE_SQFS_INODE_FIFO, S_IFIFO),
+ (SQFS_INODE_TYPE_SQFS_INODE_SOCKET, S_IFSOCK),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_DIR, S_IFDIR),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_FILE, S_IFREG),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_SLINK, S_IFLNK),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_BDEV, S_IFBLK),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_CDEV, S_IFCHR),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_FIFO, S_IFIFO),
+ (SQFS_INODE_TYPE_SQFS_INODE_EXT_SOCKET, S_IFSOCK),
+ ]
+ .into_iter()
+ .collect();
+ }
+ let base = unsafe { (***inode).base };
+ TYPENUMS[&(base.type_ as u32)] as u16 | base.mode
+ }
+
+ fn outfile_size(&self) -> u64 {
+ unsafe {
+ (**self.outfile)
+ .get_size
+ .expect("Superblock doesn't provide get_size")(*self.outfile)
+ }
+ }
+
+ /// Add the provided `Source` to the archive.
+ ///
+ /// This writes file data and xattrs to the archive directly, while storing directory tree
+ /// information to write when `finish` is called.
+ ///
+ /// The returned value is the inode number of the added `Source`. If the file is to be added
+ /// to a directory (that is, almost always), this number needs to be stored so that it can be
+ /// provided when the directory is added. In the current implementation, inode numbers start
+ /// at 1 for the first file and count steadily upward, but this behavior may change without
+ /// warning.
+ pub fn add(&mut self, mut source: Source) -> Result<u32> {
+ let finished = self.finished.read().expect("Poisoned lock");
+ if *finished {
+ Err(SquashfsError::Finished)?;
+ }
+ let flags = source.flags;
+ let nlink = 1; // TODO Handle hard links
+ let mut inode = unsafe {
+ match source.data {
+ SourceData::File(ref mut reader) => {
+ let mut ret = Box::new(ManagedPointer::null(libc_free));
+ let block_processor = self.block_processor.lock().expect("Poisoned lock");
+ sfs_check(
+ sqfs_block_processor_begin_file(
+ **block_processor,
+ &mut **ret,
+ ptr::null_mut(),
+ flags,
+ ),
+ "Couldn't begin writing file",
+ )?;
+ let mut buf = vec![0; BLOCK_BUF_SIZE];
+ loop {
+ let rdsize = reader.read(&mut buf)?;
+ if rdsize == 0 {
+ break;
+ }
+ sfs_check(
+ sqfs_block_processor_append(
+ **block_processor,
+ &buf as &[u8] as *const [u8] as *const libc::c_void,
+ rdsize,
+ ),
+ "Couldn't write file data block",
+ )?;
+ }
+ sfs_check(
+ sqfs_block_processor_end_file(**block_processor),
+ "Couldn't finish writing file",
+ )?;
+ ret
+ }
+ _ => Box::new(source.to_inode(nlink)?),
+ }
+ };
+ unsafe {
+ let xattr_writer = self.xattr_writer.lock().expect("Poisoned lock");
+ sfs_check(
+ sqfs_xattr_writer_begin(**xattr_writer, 0),
+ "Couldn't start writing xattrs",
+ )?;
+ for (key, value) in &source.xattrs {
+ let ckey = CString::new(os_to_string(key)?)?;
+ sfs_check(
+ sqfs_xattr_writer_add(
+ **xattr_writer,
+ ckey.as_ptr() as *const i8,
+ value.as_ptr() as *const libc::c_void,
+ value.len(),
+ ),
+ "Couldn't add xattr",
+ )?;
+ }
+ let xattr_idx = sfs_init(
+ &|x| sqfs_xattr_writer_end(**xattr_writer, x),
+ "Couldn't finish writing xattrs",
+ )?;
+ let mut base = &mut (***inode).base;
+ base.mode = source.mode;
+ sqfs_inode_set_xattr_index(**inode, xattr_idx);
+ let id_table = self.id_table.lock().expect("Poisoned lock");
+ sfs_check(
+ sqfs_id_table_id_to_index(**id_table, source.uid, &mut base.uid_idx),
+ "Couldn't set inode UID",
+ )?;
+ sfs_check(
+ sqfs_id_table_id_to_index(**id_table, source.gid, &mut base.gid_idx),
+ "Couldn't set inode GID",
+ )?;
+ base.mod_time = source.modified;
+ }
+ let dir_children = match source.data {
+ SourceData::Dir(children) => Some(children),
+ _ => None,
+ };
+ let mut nodes = self.nodes.lock().expect("Poisoned lock");
+ let nodenum = nodes.len() as u32 + 1;
+ unsafe {
+ (***inode).base.inode_number = nodenum;
+ }
+ nodes.push(RefCell::new(IntermediateNode {
+ inode: inode,
+ dir_children: dir_children,
+ pos: 0,
+ }));
+ Ok(nodenum)
+ }
+
+ /// Finish writing the archive and flush all contents to disk.
+ ///
+ /// It is an error to call `add` after this has been run.
+ pub fn finish(&mut self) -> Result<()> {
+ *self.finished.write().expect("Poisoned lock") = true;
+ let nodes = self.nodes.lock().expect("Poisoned lock");
+ unsafe {
+ sfs_check(
+ sqfs_block_processor_finish(**self.block_processor.lock().expect("Poisoned lock")),
+ "Failed finishing block processing",
+ )?;
+ self.superblock.inode_table_start = self.outfile_size();
+ for raw_node in &*nodes {
+ let mut node = raw_node.borrow_mut();
+ let id = (***node.inode).base.inode_number;
+ if let Some(children) = node.dir_children.take() {
+ sfs_check(
+ sqfs_dir_writer_begin(*self.dir_writer, 0),
+ "Couldn't start writing directory",
+ )?;
+ // For each child, need: name, ID, reference, mode
+ for (name, child_id) in children {
+ // On disk children need to be sorted -- I think the library takes care of this
+ if child_id >= id {
+ Err(SquashfsError::WriteOrder(child_id))?;
+ }
+ let child_node = &nodes[child_id as usize - 1].borrow();
+ let child = child_node.inode.as_ref();
+ let child_ref = child_node.pos;
+ sfs_check(
+ sqfs_dir_writer_add_entry(
+ *self.dir_writer,
+ CString::new(os_to_string(&name)?)?.as_ptr(),
+ child_id,
+ child_ref,
+ Self::mode_from_inode(&child),
+ ),
+ "Couldn't add directory entry",
+ )?;
+ }
+ sfs_check(
+ sqfs_dir_writer_end(*self.dir_writer),
+ "Couldn't finish writing directory",
+ )?;
+ let mut ret = Box::new(sfs_init_check_null(
+ &|| {
+ sqfs_dir_writer_create_inode(*self.dir_writer, 0, 0, 0)
+ // TODO Populate the parent inode number (how?)
+ },
+ "Couldn't get inode for directory",
+ libc_free,
+ )?);
+ copy_metadata(&*node.inode, &mut ret)?;
+ node.inode = ret;
+ }
+ let (mut block, mut offset) = (0, 0);
+ sqfs_meta_writer_get_position(*self.inode_writer, &mut block, &mut offset);
+ node.pos = block << 16 | offset as u64;
+ sfs_check(
+ sqfs_meta_writer_write_inode(*self.inode_writer, **node.inode),
+ "Couldn't write inode",
+ )?;
+ }
+
+ let root_ref = nodes.last().ok_or(SquashfsError::Empty)?.borrow().pos;
+ self.superblock.root_inode_ref = root_ref;
+ sfs_check(
+ sqfs_meta_writer_flush(*self.inode_writer),
+ "Couldn't flush inodes",
+ )?;
+ sfs_check(
+ sqfs_meta_writer_flush(*self.dirent_writer),
+ "Couldn't flush directory entries",
+ )?;
+ self.superblock.directory_table_start = self.outfile_size();
+ sfs_check(
+ sqfs_meta_write_write_to_file(*self.dirent_writer),
+ "Couldn't write directory entries",
+ )?;
+ self.superblock.inode_count = nodes.len() as u32;
+ sfs_check(
+ sqfs_frag_table_write(
+ *self.frag_table,
+ *self.outfile,
+ &mut self.superblock,
+ *self.compressor,
+ ),
+ "Couldn't write fragment table",
+ )?;
+ sfs_check(
+ sqfs_dir_writer_write_export_table(
+ *self.dir_writer,
+ *self.outfile,
+ *self.compressor,
+ nodes.len() as u32,
+ root_ref,
+ &mut self.superblock,
+ ),
+ "Couldn't write export table",
+ )?;
+ sfs_check(
+ sqfs_id_table_write(
+ **self.id_table.lock().expect("Poisoned lock"),
+ *self.outfile,
+ &mut self.superblock,
+ *self.compressor,
+ ),
+ "Couldn't write ID table",
+ )?;
+ sfs_check(
+ sqfs_xattr_writer_flush(
+ **self.xattr_writer.lock().expect("Poisoned lock"),
+ *self.outfile,
+ &mut self.superblock,
+ *self.compressor,
+ ),
+ "Couldn't write xattr table",
+ )?;
+ self.superblock.bytes_used = self.outfile_size();
+ sfs_check(
+ sqfs_super_write(&self.superblock, *self.outfile),
+ "Couldn't rewrite archive superblock",
+ )?;
+ let padding: Vec<u8> = vec![0; PAD_TO - self.outfile_size() as usize % PAD_TO];
+ sfs_check(
+ (**self.outfile)
+ .write_at
+ .expect("File does not provide write_at")(
+ *self.outfile,
+ self.outfile_size(),
+ &padding as &[u8] as *const [u8] as *const libc::c_void,
+ padding.len(),
+ ),
+ "Couldn't pad file",
+ )?;
+ }
+ Ok(())
+ }
}
-unsafe impl Sync for Writer { }
-unsafe impl Send for Writer { }
+unsafe impl Sync for Writer {}
+unsafe impl Send for Writer {}
enum ChildMapEntry {
- Accumulating(BTreeMap<OsString, u32>),
- Done,
+ Accumulating(BTreeMap<OsString, u32>),
+ Done,
}
impl ChildMapEntry {
- fn new() -> Self {
- Self::Accumulating(BTreeMap::new())
- }
-
- fn add(&mut self, name: OsString, id: u32) -> Result<()> {
- match self {
- Self::Done => Err(SquashfsError::WriteOrder(id))?,
- Self::Accumulating(children) => {
- children.insert(name, id);
- Ok(())
- },
- }
- }
-
- fn finish(&mut self) -> Result<BTreeMap<OsString, u32>> {
- match std::mem::replace(self, Self::Done) {
- Self::Done => Err(SquashfsError::Internal("Tried to finish directory in tree processor multiple times".to_string()))?,
- Self::Accumulating(children) => Ok(children),
- }
- }
+ fn new() -> Self {
+ Self::Accumulating(BTreeMap::new())
+ }
+
+ fn add(&mut self, name: OsString, id: u32) -> Result<()> {
+ match self {
+ Self::Done => Err(SquashfsError::WriteOrder(id))?,
+ Self::Accumulating(children) => {
+ children.insert(name, id);
+ Ok(())
+ }
+ }
+ }
+
+ fn finish(&mut self) -> Result<BTreeMap<OsString, u32>> {
+ match std::mem::replace(self, Self::Done) {
+ Self::Done => Err(SquashfsError::Internal(
+ "Tried to finish directory in tree processor multiple times".to_string(),
+ ))?,
+ Self::Accumulating(children) => Ok(children),
+ }
+ }
}
/// Tool to help create an archive from a directory in the filesystem.
@@ -582,85 +817,132 @@ impl ChildMapEntry {
/// parent directories. If this happens, [`WriteOrder`](SquashfsError::WriteOrder) will be
/// raised and the node will not be added.
pub struct TreeProcessor {
- writer: Mutex<Writer>,
- childmap: Mutex<HashMap<PathBuf, ChildMapEntry>>,
+ writer: Mutex<Writer>,
+ childmap: Mutex<HashMap<PathBuf, ChildMapEntry>>,
}
impl TreeProcessor {
- /// Create a new `TreeProcessor` for an output file.
- pub fn new<P: AsRef<Path>>(outfile: P) -> Result<Self> {
- let writer = Writer::open(outfile)?;
- Ok(Self { writer: Mutex::new(writer), childmap: Mutex::new(HashMap::new()) })
- }
-
- /// Add a new file to the archive.
- ///
- /// It is not recommended to call this on `SourceFile`s that were not yielded by `iter`.
- pub fn add(&self, mut source: SourceFile) -> Result<u32> {
- let mut childmap = self.childmap.lock().expect("Poisoned lock");
- if let SourceData::Dir(old_children) = &mut source.content.data {
- let mut children = childmap.entry(source.path.clone()).or_insert(ChildMapEntry::new()).finish()?;
- children.extend(old_children);
- source.content.data = SourceData::Dir(Box::new(children.into_iter()));
- }
- let id = self.writer.lock().expect("Poisoned lock").add(source.content)?;
- if let Some(parent) = source.path.parent() {
- childmap.entry(parent.to_path_buf()).or_insert(ChildMapEntry::new()).add(source.path.file_name().expect("Path from walkdir has no file name").to_os_string(), id)?;
- }
- Ok(id)
- }
-
- /// Finish writing the archive.
- pub fn finish(&self) -> Result<()> {
- self.writer.lock().expect("Poisoned lock").finish()
- }
-
- fn make_source(&self, entry: DirEntry) -> Result<Source> {
- let metadata = entry.metadata().unwrap();
- let mtime = metadata.modified()?.duration_since(SystemTime::UNIX_EPOCH)?.as_secs() as u32;
- let data = if metadata.file_type().is_dir() {
- SourceData::Dir(Box::new(BTreeMap::new().into_iter()))
- }
- else if metadata.file_type().is_file() {
- SourceData::File(Box::new(std::fs::File::open(entry.path())?))
- }
- else if metadata.file_type().is_symlink() {
- SourceData::Symlink(std::fs::read_link(entry.path())?)
- }
- else {
- Err(SquashfsError::WriteType(metadata.file_type()))?;
- unreachable!();
- };
- let source = if cfg!(linux) {
- use std::os::linux::fs::MetadataExt;
- Source { data: data, xattrs: file_xattrs(entry.path())?, uid: metadata.st_uid(), gid: metadata.st_gid(), mode: (metadata.st_mode() & !S_IFMT) as u16, modified: mtime, flags: 0 }
- }
- else if cfg!(unix) {
- use std::os::unix::fs::MetadataExt;
- Source { data: data, xattrs: HashMap::new(), uid: metadata.uid(), gid: metadata.gid(), mode: (metadata.mode() & 0x1ff) as u16, modified: mtime, flags: 0 }
- }
- else {
- Source { data: data, xattrs: HashMap::new(), uid: 0, gid: 0, mode: 0x1ff, modified: mtime, flags: 0 }
- };
- Ok(source)
- }
-
- /// Create an iterator over a directory tree, yielding them in a form suitable to pass to
- /// `add`.
- pub fn iter<'a, P: AsRef<Path>>(&'a self, root: P) -> TreeIterator<'a> {
- let tree = WalkDir::new(root).follow_links(false).contents_first(true);
- TreeIterator { processor: self, tree: tree.into_iter() }
- }
-
- /// Add an entire directory tree to the archive, then finish it.
- ///
- /// This is the most basic, bare-bones way to create a full archive from an existing directory
- /// tree. This offers no way to customize the archive or handle errors gracefully.
- pub fn process<P: AsRef<Path>>(self, root: P) -> Result<()> {
- for entry in self.iter(root) { self.add(entry?)?; }
- self.finish()?;
- Ok(())
- }
+ /// Create a new `TreeProcessor` for an output file.
+ pub fn new<P: AsRef<Path>>(outfile: P) -> Result<Self> {
+ let writer = Writer::open(outfile)?;
+ Ok(Self {
+ writer: Mutex::new(writer),
+ childmap: Mutex::new(HashMap::new()),
+ })
+ }
+
+ /// Add a new file to the archive.
+ ///
+ /// It is not recommended to call this on `SourceFile`s that were not yielded by `iter`.
+ pub fn add(&self, mut source: SourceFile) -> Result<u32> {
+ let mut childmap = self.childmap.lock().expect("Poisoned lock");
+ if let SourceData::Dir(old_children) = &mut source.content.data {
+ let mut children = childmap
+ .entry(source.path.clone())
+ .or_insert(ChildMapEntry::new())
+ .finish()?;
+ children.extend(old_children);
+ source.content.data = SourceData::Dir(Box::new(children.into_iter()));
+ }
+ let id = self
+ .writer
+ .lock()
+ .expect("Poisoned lock")
+ .add(source.content)?;
+ if let Some(parent) = source.path.parent() {
+ childmap
+ .entry(parent.to_path_buf())
+ .or_insert(ChildMapEntry::new())
+ .add(
+ source
+ .path
+ .file_name()
+ .expect("Path from walkdir has no file name")
+ .to_os_string(),
+ id,
+ )?;
+ }
+ Ok(id)
+ }
+
+ /// Finish writing the archive.
+ pub fn finish(&self) -> Result<()> {
+ self.writer.lock().expect("Poisoned lock").finish()
+ }
+
+ fn make_source(&self, entry: DirEntry) -> Result<Source> {
+ let metadata = entry.metadata().unwrap();
+ let mtime = metadata
+ .modified()?
+ .duration_since(SystemTime::UNIX_EPOCH)?
+ .as_secs() as u32;
+ let data = if metadata.file_type().is_dir() {
+ SourceData::Dir(Box::new(BTreeMap::new().into_iter()))
+ } else if metadata.file_type().is_file() {
+ SourceData::File(Box::new(std::fs::File::open(entry.path())?))
+ } else if metadata.file_type().is_symlink() {
+ SourceData::Symlink(std::fs::read_link(entry.path())?)
+ } else {
+ Err(SquashfsError::WriteType(metadata.file_type()))?;
+ unreachable!();
+ };
+ let source = if cfg!(linux) {
+ use std::os::linux::fs::MetadataExt;
+ Source {
+ data: data,
+ xattrs: file_xattrs(entry.path())?,
+ uid: metadata.st_uid(),
+ gid: metadata.st_gid(),
+ mode: (metadata.st_mode() & !S_IFMT) as u16,
+ modified: mtime,
+ flags: 0,
+ }
+ } else if cfg!(unix) {
+ use std::os::unix::fs::MetadataExt;
+ Source {
+ data: data,
+ xattrs: HashMap::new(),
+ uid: metadata.uid(),
+ gid: metadata.gid(),
+ mode: (metadata.mode() & 0x1ff) as u16,
+ modified: mtime,
+ flags: 0,
+ }
+ } else {
+ Source {
+ data: data,
+ xattrs: HashMap::new(),
+ uid: 0,
+ gid: 0,
+ mode: 0x1ff,
+ modified: mtime,
+ flags: 0,
+ }
+ };
+ Ok(source)
+ }
+
+ /// Create an iterator over a directory tree, yielding them in a form suitable to pass to
+ /// `add`.
+ pub fn iter<'a, P: AsRef<Path>>(&'a self, root: P) -> TreeIterator<'a> {
+ let tree = WalkDir::new(root).follow_links(false).contents_first(true);
+ TreeIterator {
+ processor: self,
+ tree: tree.into_iter(),
+ }
+ }
+
+ /// Add an entire directory tree to the archive, then finish it.
+ ///
+ /// This is the most basic, bare-bones way to create a full archive from an existing directory
+ /// tree. This offers no way to customize the archive or handle errors gracefully.
+ pub fn process<P: AsRef<Path>>(self, root: P) -> Result<()> {
+ for entry in self.iter(root) {
+ self.add(entry?)?;
+ }
+ self.finish()?;
+ Ok(())
+ }
}
/// An iterator yielding the nodes in a directory tree in a way suitable for archiving.
@@ -668,25 +950,31 @@ impl TreeProcessor {
/// This is created by a [`TreeProcessor`] and the items yielded are intended to be
/// [`add`](TreeProcessor::add)ed to it.
pub struct TreeIterator<'a> {
- processor: &'a TreeProcessor,
- tree: walkdir::IntoIter,
+ processor: &'a TreeProcessor,
+ tree: walkdir::IntoIter,
}
impl<'a> std::iter::Iterator for TreeIterator<'a> {
- type Item = Result<SourceFile>;
-
- fn next(&mut self) -> Option<Self::Item> {
- match self.tree.next() {
- None => None,
- Some(Ok(entry)) => {
- let path = entry.path().to_path_buf();
- Some(self.processor.make_source(entry).map(|source| SourceFile { path: path, content: source }))
- },
- Some(Err(e)) => {
- let path = e.path().map(|x| x.to_string_lossy().into_owned()).unwrap_or("(unknown)".to_string());
- eprintln!("Not processing {}: {}", path, e.to_string());
- self.next()
- },
- }
- }
+ type Item = Result<SourceFile>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ match self.tree.next() {
+ None => None,
+ Some(Ok(entry)) => {
+ let path = entry.path().to_path_buf();
+ Some(self.processor.make_source(entry).map(|source| SourceFile {
+ path: path,
+ content: source,
+ }))
+ }
+ Some(Err(e)) => {
+ let path = e
+ .path()
+ .map(|x| x.to_string_lossy().into_owned())
+ .unwrap_or("(unknown)".to_string());
+ eprintln!("Not processing {}: {}", path, e.to_string());
+ self.next()
+ }
+ }
+ }
}