aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorValentin Popov <valentin@popov.link>2026-02-10 01:58:16 +0300
committerValentin Popov <valentin@popov.link>2026-02-10 01:58:16 +0300
commite08b5f3853784e2fb8dc016d4a149c1a2282f127 (patch)
tree0308096ae68dde3977bf18d360064638043257fc
parent5a97f2e42910f552cde0cda3561f4259cd200147 (diff)
downloadfparkan-e08b5f3853784e2fb8dc016d4a149c1a2282f127.tar.xz
fparkan-e08b5f3853784e2fb8dc016d4a149c1a2282f127.zip
feat: add initial implementation of rsli crate
- Created Cargo.toml for the rsli crate with flate2 dependency. - Implemented ResourceData enum for handling borrowed and owned byte slices. - Added OutputBuffer trait and its Vec<u8> implementation for writing data. - Defined a comprehensive Error enum for error handling in the library. - Developed the Library struct to manage resource entries and provide methods for loading and unpacking resources. - Implemented various packing methods and decompression algorithms, including LZSS and Deflate. - Added tests for validating the functionality of the rsli library against sample data.
-rw-r--r--.gitea/workflows/test.yml2
-rw-r--r--crates/nres/Cargo.toml6
-rw-r--r--crates/nres/src/data.rs43
-rw-r--r--crates/nres/src/error.rs99
-rw-r--r--crates/nres/src/lib.rs863
-rw-r--r--crates/rsli/Cargo.toml7
-rw-r--r--crates/rsli/src/data.rs41
-rw-r--r--crates/rsli/src/error.rs129
-rw-r--r--crates/rsli/src/lib.rs1165
9 files changed, 2354 insertions, 1 deletions
diff --git a/.gitea/workflows/test.yml b/.gitea/workflows/test.yml
index 516aae8..cf314cb 100644
--- a/.gitea/workflows/test.yml
+++ b/.gitea/workflows/test.yml
@@ -24,4 +24,4 @@ jobs:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@stable
- name: Cargo test
- run: cargo test --workspace --all-features
+ run: cargo test --workspace --all-features -- --nocapture
diff --git a/crates/nres/Cargo.toml b/crates/nres/Cargo.toml
new file mode 100644
index 0000000..77921df
--- /dev/null
+++ b/crates/nres/Cargo.toml
@@ -0,0 +1,6 @@
+[package]
+name = "nres"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
diff --git a/crates/nres/src/data.rs b/crates/nres/src/data.rs
new file mode 100644
index 0000000..bb9e778
--- /dev/null
+++ b/crates/nres/src/data.rs
@@ -0,0 +1,43 @@
+use std::io;
+
+/// Resource payload that can be either borrowed from mapped bytes or owned.
+#[derive(Clone, Debug)]
+pub enum ResourceData<'a> {
+ Borrowed(&'a [u8]),
+ Owned(Vec<u8>),
+}
+
+impl<'a> ResourceData<'a> {
+ pub fn as_slice(&self) -> &[u8] {
+ match self {
+ Self::Borrowed(slice) => slice,
+ Self::Owned(buf) => buf.as_slice(),
+ }
+ }
+
+ pub fn into_owned(self) -> Vec<u8> {
+ match self {
+ Self::Borrowed(slice) => slice.to_vec(),
+ Self::Owned(buf) => buf,
+ }
+ }
+}
+
+impl AsRef<[u8]> for ResourceData<'_> {
+ fn as_ref(&self) -> &[u8] {
+ self.as_slice()
+ }
+}
+
+/// Output sink used by `read_into`/`load_into` APIs.
+pub trait OutputBuffer {
+ fn write_exact(&mut self, data: &[u8]) -> io::Result<()>;
+}
+
+impl OutputBuffer for Vec<u8> {
+ fn write_exact(&mut self, data: &[u8]) -> io::Result<()> {
+ self.clear();
+ self.extend_from_slice(data);
+ Ok(())
+ }
+}
diff --git a/crates/nres/src/error.rs b/crates/nres/src/error.rs
new file mode 100644
index 0000000..a6f078f
--- /dev/null
+++ b/crates/nres/src/error.rs
@@ -0,0 +1,99 @@
+use core::fmt;
+
+#[derive(Debug)]
+#[non_exhaustive]
+pub enum Error {
+ Io(std::io::Error),
+
+ InvalidMagic {
+ got: [u8; 4],
+ },
+ UnsupportedVersion {
+ got: u32,
+ },
+ TotalSizeMismatch {
+ header: u32,
+ actual: u64,
+ },
+
+ InvalidEntryCount {
+ got: i32,
+ },
+ DirectoryOutOfBounds {
+ directory_offset: u64,
+ directory_len: u64,
+ file_len: u64,
+ },
+
+ EntryIdOutOfRange {
+ id: u32,
+ entry_count: u32,
+ },
+ EntryDataOutOfBounds {
+ id: u32,
+ offset: u64,
+ size: u32,
+ directory_offset: u64,
+ },
+ NameTooLong {
+ got: usize,
+ max: usize,
+ },
+ NameContainsNul,
+ BadNameEncoding,
+
+ IntegerOverflow,
+
+ RawModeDisallowsOperation(&'static str),
+}
+
+impl From<std::io::Error> for Error {
+ fn from(value: std::io::Error) -> Self {
+ Self::Io(value)
+ }
+}
+
+impl fmt::Display for Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Error::Io(e) => write!(f, "I/O error: {e}"),
+ Error::InvalidMagic { got } => write!(f, "invalid NRes magic: {got:02X?}"),
+ Error::UnsupportedVersion { got } => {
+ write!(f, "unsupported NRes version: {got:#x}")
+ }
+ Error::TotalSizeMismatch { header, actual } => {
+ write!(f, "NRes total_size mismatch: header={header}, actual={actual}")
+ }
+ Error::InvalidEntryCount { got } => write!(f, "invalid entry_count: {got}"),
+ Error::DirectoryOutOfBounds {
+ directory_offset,
+ directory_len,
+ file_len,
+ } => write!(
+ f,
+ "directory out of bounds: off={directory_offset}, len={directory_len}, file={file_len}"
+ ),
+ Error::EntryIdOutOfRange { id, entry_count } => {
+ write!(f, "entry id out of range: id={id}, count={entry_count}")
+ }
+ Error::EntryDataOutOfBounds {
+ id,
+ offset,
+ size,
+ directory_offset,
+ } => write!(
+ f,
+ "entry data out of bounds: id={id}, off={offset}, size={size}, dir_off={directory_offset}"
+ ),
+ Error::NameTooLong { got, max } => write!(f, "name too long: {got} > {max}"),
+ Error::NameContainsNul => write!(f, "name contains NUL byte"),
+ Error::BadNameEncoding => write!(f, "bad name encoding"),
+ Error::IntegerOverflow => write!(f, "integer overflow"),
+ Error::RawModeDisallowsOperation(op) => {
+ write!(f, "operation not allowed in raw mode: {op}")
+ }
+ }
+ }
+}
+
+impl std::error::Error for Error {}
diff --git a/crates/nres/src/lib.rs b/crates/nres/src/lib.rs
new file mode 100644
index 0000000..2005ba3
--- /dev/null
+++ b/crates/nres/src/lib.rs
@@ -0,0 +1,863 @@
+pub mod data;
+pub mod error;
+
+use crate::data::{OutputBuffer, ResourceData};
+use crate::error::Error;
+use core::ops::Range;
+use std::cmp::Ordering;
+use std::fs::{self, OpenOptions as FsOpenOptions};
+use std::io::Write;
+use std::path::{Path, PathBuf};
+use std::sync::Arc;
+use std::time::{SystemTime, UNIX_EPOCH};
+
+pub type Result<T> = core::result::Result<T, Error>;
+
+#[derive(Clone, Debug, Default)]
+pub struct OpenOptions {
+ pub raw_mode: bool,
+ pub sequential_hint: bool,
+ pub prefetch_pages: bool,
+}
+
+#[derive(Clone, Debug)]
+pub enum OpenMode {
+ ReadOnly,
+ ReadWrite,
+}
+
+impl Default for OpenMode {
+ fn default() -> Self {
+ Self::ReadOnly
+ }
+}
+
+pub struct Archive {
+ bytes: Arc<[u8]>,
+ entries: Vec<EntryRecord>,
+ raw_mode: bool,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub struct EntryId(pub u32);
+
+#[derive(Clone, Debug)]
+pub struct EntryMeta {
+ pub kind: u32,
+ pub attr1: u32,
+ pub attr2: u32,
+ pub attr3: u32,
+ pub name: String,
+ pub data_offset: u64,
+ pub data_size: u32,
+ pub sort_index: u32,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct EntryRef<'a> {
+ pub id: EntryId,
+ pub meta: &'a EntryMeta,
+}
+
+#[derive(Clone, Debug)]
+struct EntryRecord {
+ meta: EntryMeta,
+ name_raw: [u8; 36],
+}
+
+impl Archive {
+ pub fn open_path(path: impl AsRef<Path>) -> Result<Self> {
+ Self::open_path_with(path, OpenMode::ReadOnly, OpenOptions::default())
+ }
+
+ pub fn open_path_with(
+ path: impl AsRef<Path>,
+ _mode: OpenMode,
+ opts: OpenOptions,
+ ) -> Result<Self> {
+ let bytes = fs::read(path.as_ref())?;
+ let arc: Arc<[u8]> = Arc::from(bytes.into_boxed_slice());
+ Self::open_bytes(arc, opts)
+ }
+
+ pub fn open_bytes(bytes: Arc<[u8]>, opts: OpenOptions) -> Result<Self> {
+ let (entries, _) = parse_archive(&bytes, opts.raw_mode)?;
+ if opts.prefetch_pages {
+ prefetch_pages(&bytes);
+ }
+ Ok(Self {
+ bytes,
+ entries,
+ raw_mode: opts.raw_mode,
+ })
+ }
+
+ pub fn entry_count(&self) -> usize {
+ self.entries.len()
+ }
+
+ pub fn entries(&self) -> impl Iterator<Item = EntryRef<'_>> {
+ self.entries
+ .iter()
+ .enumerate()
+ .map(|(idx, entry)| EntryRef {
+ id: EntryId(idx as u32),
+ meta: &entry.meta,
+ })
+ }
+
+ pub fn find(&self, name: &str) -> Option<EntryId> {
+ if self.entries.is_empty() {
+ return None;
+ }
+
+ if !self.raw_mode {
+ let mut low = 0usize;
+ let mut high = self.entries.len();
+ while low < high {
+ let mid = low + (high - low) / 2;
+ let target_idx = self.entries[mid].meta.sort_index as usize;
+ if target_idx >= self.entries.len() {
+ break;
+ }
+ let cmp = cmp_name_case_insensitive(
+ name.as_bytes(),
+ entry_name_bytes(&self.entries[target_idx].name_raw),
+ );
+ match cmp {
+ Ordering::Less => high = mid,
+ Ordering::Greater => low = mid + 1,
+ Ordering::Equal => return Some(EntryId(target_idx as u32)),
+ }
+ }
+ }
+
+ self.entries.iter().enumerate().find_map(|(idx, entry)| {
+ if cmp_name_case_insensitive(name.as_bytes(), entry_name_bytes(&entry.name_raw))
+ == Ordering::Equal
+ {
+ Some(EntryId(idx as u32))
+ } else {
+ None
+ }
+ })
+ }
+
+ pub fn get(&self, id: EntryId) -> Option<EntryRef<'_>> {
+ let idx = usize::try_from(id.0).ok()?;
+ let entry = self.entries.get(idx)?;
+ Some(EntryRef {
+ id,
+ meta: &entry.meta,
+ })
+ }
+
+ pub fn read(&self, id: EntryId) -> Result<ResourceData<'_>> {
+ let range = self.entry_range(id)?;
+ Ok(ResourceData::Borrowed(&self.bytes[range]))
+ }
+
+ pub fn read_into(&self, id: EntryId, out: &mut dyn OutputBuffer) -> Result<usize> {
+ let range = self.entry_range(id)?;
+ out.write_exact(&self.bytes[range.clone()])?;
+ Ok(range.len())
+ }
+
+ pub fn raw_slice(&self, id: EntryId) -> Result<Option<&[u8]>> {
+ let range = self.entry_range(id)?;
+ Ok(Some(&self.bytes[range]))
+ }
+
+ pub fn edit_path(path: impl AsRef<Path>) -> Result<Editor> {
+ let path_buf = path.as_ref().to_path_buf();
+ let bytes = fs::read(&path_buf)?;
+ let arc: Arc<[u8]> = Arc::from(bytes.into_boxed_slice());
+ let (entries, _) = parse_archive(&arc, false)?;
+ let mut editable = Vec::with_capacity(entries.len());
+ for entry in &entries {
+ let range = checked_range(entry.meta.data_offset, entry.meta.data_size, arc.len())?;
+ editable.push(EditableEntry {
+ meta: entry.meta.clone(),
+ name_raw: entry.name_raw,
+ data: arc[range].to_vec(),
+ });
+ }
+ Ok(Editor {
+ path: path_buf,
+ entries: editable,
+ })
+ }
+
+ fn entry_range(&self, id: EntryId) -> Result<Range<usize>> {
+ let idx = usize::try_from(id.0).map_err(|_| Error::IntegerOverflow)?;
+ let Some(entry) = self.entries.get(idx) else {
+ return Err(Error::EntryIdOutOfRange {
+ id: id.0,
+ entry_count: self.entries.len().try_into().unwrap_or(u32::MAX),
+ });
+ };
+ checked_range(
+ entry.meta.data_offset,
+ entry.meta.data_size,
+ self.bytes.len(),
+ )
+ }
+}
+
+pub struct Editor {
+ path: PathBuf,
+ entries: Vec<EditableEntry>,
+}
+
+#[derive(Clone, Debug)]
+struct EditableEntry {
+ meta: EntryMeta,
+ name_raw: [u8; 36],
+ data: Vec<u8>,
+}
+
+#[derive(Clone, Debug)]
+pub struct NewEntry<'a> {
+ pub kind: u32,
+ pub attr1: u32,
+ pub attr2: u32,
+ pub attr3: u32,
+ pub name: &'a str,
+ pub data: &'a [u8],
+}
+
+impl Editor {
+ pub fn entries(&self) -> impl Iterator<Item = EntryRef<'_>> {
+ self.entries
+ .iter()
+ .enumerate()
+ .map(|(idx, entry)| EntryRef {
+ id: EntryId(idx as u32),
+ meta: &entry.meta,
+ })
+ }
+
+ pub fn add(&mut self, entry: NewEntry<'_>) -> Result<EntryId> {
+ let name_raw = encode_name_field(entry.name)?;
+ let id_u32 = u32::try_from(self.entries.len()).map_err(|_| Error::IntegerOverflow)?;
+ let data_size = u32::try_from(entry.data.len()).map_err(|_| Error::IntegerOverflow)?;
+ self.entries.push(EditableEntry {
+ meta: EntryMeta {
+ kind: entry.kind,
+ attr1: entry.attr1,
+ attr2: entry.attr2,
+ attr3: entry.attr3,
+ name: decode_name(entry_name_bytes(&name_raw)),
+ data_offset: 0,
+ data_size,
+ sort_index: 0,
+ },
+ name_raw,
+ data: entry.data.to_vec(),
+ });
+ Ok(EntryId(id_u32))
+ }
+
+ pub fn replace_data(&mut self, id: EntryId, data: &[u8]) -> Result<()> {
+ let idx = usize::try_from(id.0).map_err(|_| Error::IntegerOverflow)?;
+ let Some(entry) = self.entries.get_mut(idx) else {
+ return Err(Error::EntryIdOutOfRange {
+ id: id.0,
+ entry_count: self.entries.len().try_into().unwrap_or(u32::MAX),
+ });
+ };
+ entry.meta.data_size = u32::try_from(data.len()).map_err(|_| Error::IntegerOverflow)?;
+ entry.data.clear();
+ entry.data.extend_from_slice(data);
+ Ok(())
+ }
+
+ pub fn remove(&mut self, id: EntryId) -> Result<()> {
+ let idx = usize::try_from(id.0).map_err(|_| Error::IntegerOverflow)?;
+ if idx >= self.entries.len() {
+ return Err(Error::EntryIdOutOfRange {
+ id: id.0,
+ entry_count: self.entries.len().try_into().unwrap_or(u32::MAX),
+ });
+ }
+ self.entries.remove(idx);
+ Ok(())
+ }
+
+ pub fn commit(mut self) -> Result<()> {
+ let count_u32 = u32::try_from(self.entries.len()).map_err(|_| Error::IntegerOverflow)?;
+ let mut out = Vec::new();
+ out.resize(16, 0);
+
+ for entry in &mut self.entries {
+ entry.meta.data_offset =
+ u64::try_from(out.len()).map_err(|_| Error::IntegerOverflow)?;
+ entry.meta.data_size =
+ u32::try_from(entry.data.len()).map_err(|_| Error::IntegerOverflow)?;
+ out.extend_from_slice(&entry.data);
+ let padding = (8 - (out.len() % 8)) % 8;
+ if padding > 0 {
+ out.resize(out.len() + padding, 0);
+ }
+ }
+
+ let mut sort_order: Vec<usize> = (0..self.entries.len()).collect();
+ sort_order.sort_by(|a, b| {
+ cmp_name_case_insensitive(
+ entry_name_bytes(&self.entries[*a].name_raw),
+ entry_name_bytes(&self.entries[*b].name_raw),
+ )
+ });
+
+ for (idx, entry) in self.entries.iter_mut().enumerate() {
+ entry.meta.sort_index =
+ u32::try_from(sort_order[idx]).map_err(|_| Error::IntegerOverflow)?;
+ }
+
+ for entry in &self.entries {
+ let data_offset_u32 =
+ u32::try_from(entry.meta.data_offset).map_err(|_| Error::IntegerOverflow)?;
+ push_u32(&mut out, entry.meta.kind);
+ push_u32(&mut out, entry.meta.attr1);
+ push_u32(&mut out, entry.meta.attr2);
+ push_u32(&mut out, entry.meta.data_size);
+ push_u32(&mut out, entry.meta.attr3);
+ out.extend_from_slice(&entry.name_raw);
+ push_u32(&mut out, data_offset_u32);
+ push_u32(&mut out, entry.meta.sort_index);
+ }
+
+ let total_size_u32 = u32::try_from(out.len()).map_err(|_| Error::IntegerOverflow)?;
+ out[0..4].copy_from_slice(b"NRes");
+ out[4..8].copy_from_slice(&0x100_u32.to_le_bytes());
+ out[8..12].copy_from_slice(&count_u32.to_le_bytes());
+ out[12..16].copy_from_slice(&total_size_u32.to_le_bytes());
+
+ write_atomic(&self.path, &out)
+ }
+}
+
+fn parse_archive(bytes: &[u8], raw_mode: bool) -> Result<(Vec<EntryRecord>, u64)> {
+ if raw_mode {
+ let data_size = u32::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?;
+ let entry = EntryRecord {
+ meta: EntryMeta {
+ kind: 0,
+ attr1: 0,
+ attr2: 0,
+ attr3: 0,
+ name: String::from("RAW"),
+ data_offset: 0,
+ data_size,
+ sort_index: 0,
+ },
+ name_raw: {
+ let mut name = [0u8; 36];
+ let bytes_name = b"RAW";
+ name[..bytes_name.len()].copy_from_slice(bytes_name);
+ name
+ },
+ };
+ return Ok((vec![entry], bytes.len() as u64));
+ }
+
+ if bytes.len() < 16 {
+ let mut got = [0u8; 4];
+ let copy_len = bytes.len().min(4);
+ got[..copy_len].copy_from_slice(&bytes[..copy_len]);
+ return Err(Error::InvalidMagic { got });
+ }
+
+ let mut magic = [0u8; 4];
+ magic.copy_from_slice(&bytes[0..4]);
+ if &magic != b"NRes" {
+ return Err(Error::InvalidMagic { got: magic });
+ }
+
+ let version = read_u32(bytes, 4)?;
+ if version != 0x100 {
+ return Err(Error::UnsupportedVersion { got: version });
+ }
+
+ let entry_count_i32 = i32::from_le_bytes(
+ bytes[8..12]
+ .try_into()
+ .map_err(|_| Error::IntegerOverflow)?,
+ );
+ if entry_count_i32 < 0 {
+ return Err(Error::InvalidEntryCount {
+ got: entry_count_i32,
+ });
+ }
+ let entry_count = usize::try_from(entry_count_i32).map_err(|_| Error::IntegerOverflow)?;
+
+ let total_size = read_u32(bytes, 12)?;
+ let actual_size = u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?;
+ if u64::from(total_size) != actual_size {
+ return Err(Error::TotalSizeMismatch {
+ header: total_size,
+ actual: actual_size,
+ });
+ }
+
+ let directory_len = u64::try_from(entry_count)
+ .map_err(|_| Error::IntegerOverflow)?
+ .checked_mul(64)
+ .ok_or(Error::IntegerOverflow)?;
+ let directory_offset =
+ u64::from(total_size)
+ .checked_sub(directory_len)
+ .ok_or(Error::DirectoryOutOfBounds {
+ directory_offset: 0,
+ directory_len,
+ file_len: actual_size,
+ })?;
+
+ if directory_offset < 16 || directory_offset + directory_len > actual_size {
+ return Err(Error::DirectoryOutOfBounds {
+ directory_offset,
+ directory_len,
+ file_len: actual_size,
+ });
+ }
+
+ let mut entries = Vec::with_capacity(entry_count);
+ for index in 0..entry_count {
+ let base = usize::try_from(directory_offset)
+ .map_err(|_| Error::IntegerOverflow)?
+ .checked_add(index.checked_mul(64).ok_or(Error::IntegerOverflow)?)
+ .ok_or(Error::IntegerOverflow)?;
+
+ let kind = read_u32(bytes, base)?;
+ let attr1 = read_u32(bytes, base + 4)?;
+ let attr2 = read_u32(bytes, base + 8)?;
+ let data_size = read_u32(bytes, base + 12)?;
+ let attr3 = read_u32(bytes, base + 16)?;
+
+ let mut name_raw = [0u8; 36];
+ let name_slice = bytes
+ .get(base + 20..base + 56)
+ .ok_or(Error::IntegerOverflow)?;
+ name_raw.copy_from_slice(name_slice);
+
+ let name_bytes = entry_name_bytes(&name_raw);
+ if name_bytes.len() > 35 {
+ return Err(Error::NameTooLong {
+ got: name_bytes.len(),
+ max: 35,
+ });
+ }
+
+ let data_offset = u64::from(read_u32(bytes, base + 56)?);
+ let sort_index = read_u32(bytes, base + 60)?;
+
+ let end = data_offset
+ .checked_add(u64::from(data_size))
+ .ok_or(Error::IntegerOverflow)?;
+ if data_offset < 16 || end > directory_offset {
+ return Err(Error::EntryDataOutOfBounds {
+ id: u32::try_from(index).map_err(|_| Error::IntegerOverflow)?,
+ offset: data_offset,
+ size: data_size,
+ directory_offset,
+ });
+ }
+
+ entries.push(EntryRecord {
+ meta: EntryMeta {
+ kind,
+ attr1,
+ attr2,
+ attr3,
+ name: decode_name(name_bytes),
+ data_offset,
+ data_size,
+ sort_index,
+ },
+ name_raw,
+ });
+ }
+
+ Ok((entries, directory_offset))
+}
+
+fn checked_range(offset: u64, size: u32, bytes_len: usize) -> Result<Range<usize>> {
+ let start = usize::try_from(offset).map_err(|_| Error::IntegerOverflow)?;
+ let len = usize::try_from(size).map_err(|_| Error::IntegerOverflow)?;
+ let end = start.checked_add(len).ok_or(Error::IntegerOverflow)?;
+ if end > bytes_len {
+ return Err(Error::IntegerOverflow);
+ }
+ Ok(start..end)
+}
+
+fn read_u32(bytes: &[u8], offset: usize) -> Result<u32> {
+ let data = bytes
+ .get(offset..offset + 4)
+ .ok_or(Error::IntegerOverflow)?;
+ let arr: [u8; 4] = data.try_into().map_err(|_| Error::IntegerOverflow)?;
+ Ok(u32::from_le_bytes(arr))
+}
+
+fn push_u32(out: &mut Vec<u8>, value: u32) {
+ out.extend_from_slice(&value.to_le_bytes());
+}
+
+fn encode_name_field(name: &str) -> Result<[u8; 36]> {
+ let bytes = name.as_bytes();
+ if bytes.contains(&0) {
+ return Err(Error::NameContainsNul);
+ }
+ if bytes.len() > 35 {
+ return Err(Error::NameTooLong {
+ got: bytes.len(),
+ max: 35,
+ });
+ }
+
+ let mut out = [0u8; 36];
+ out[..bytes.len()].copy_from_slice(bytes);
+ Ok(out)
+}
+
+fn entry_name_bytes(raw: &[u8; 36]) -> &[u8] {
+ let len = raw.iter().position(|&b| b == 0).unwrap_or(raw.len());
+ &raw[..len]
+}
+
+fn decode_name(name: &[u8]) -> String {
+ name.iter().map(|b| char::from(*b)).collect()
+}
+
+fn cmp_name_case_insensitive(a: &[u8], b: &[u8]) -> Ordering {
+ let mut idx = 0usize;
+ let min_len = a.len().min(b.len());
+ while idx < min_len {
+ let left = ascii_lower(a[idx]);
+ let right = ascii_lower(b[idx]);
+ if left != right {
+ return left.cmp(&right);
+ }
+ idx += 1;
+ }
+ a.len().cmp(&b.len())
+}
+
+fn ascii_lower(value: u8) -> u8 {
+ if value.is_ascii_uppercase() {
+ value + 32
+ } else {
+ value
+ }
+}
+
+fn prefetch_pages(bytes: &[u8]) {
+ use std::sync::atomic::{compiler_fence, Ordering};
+
+ let mut cursor = 0usize;
+ let mut sink = 0u8;
+ while cursor < bytes.len() {
+ sink ^= bytes[cursor];
+ cursor = cursor.saturating_add(4096);
+ }
+ compiler_fence(Ordering::SeqCst);
+ let _ = sink;
+}
+
+fn write_atomic(path: &Path, content: &[u8]) -> Result<()> {
+ let file_name = path
+ .file_name()
+ .and_then(|name| name.to_str())
+ .unwrap_or("archive");
+ let parent = path.parent().unwrap_or_else(|| Path::new("."));
+
+ let mut temp_path = None;
+ for attempt in 0..128u32 {
+ let name = format!(
+ ".{}.tmp.{}.{}.{}",
+ file_name,
+ std::process::id(),
+ unix_time_nanos(),
+ attempt
+ );
+ let candidate = parent.join(name);
+ let opened = FsOpenOptions::new()
+ .create_new(true)
+ .write(true)
+ .open(&candidate);
+ if let Ok(mut file) = opened {
+ file.write_all(content)?;
+ file.sync_all()?;
+ temp_path = Some((candidate, file));
+ break;
+ }
+ }
+
+ let Some((tmp_path, mut file)) = temp_path else {
+ return Err(Error::Io(std::io::Error::new(
+ std::io::ErrorKind::AlreadyExists,
+ "failed to create temporary file for atomic write",
+ )));
+ };
+
+ file.flush()?;
+ drop(file);
+
+ match fs::rename(&tmp_path, path) {
+ Ok(()) => Ok(()),
+ Err(rename_err) => {
+ if path.exists() {
+ fs::remove_file(path)?;
+ fs::rename(&tmp_path, path)?;
+ Ok(())
+ } else {
+ let _ = fs::remove_file(&tmp_path);
+ Err(Error::Io(rename_err))
+ }
+ }
+ }
+}
+
+fn unix_time_nanos() -> u128 {
+ match SystemTime::now().duration_since(UNIX_EPOCH) {
+ Ok(duration) => duration.as_nanos(),
+ Err(_) => 0,
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::any::Any;
+ use std::fs;
+ use std::panic::{catch_unwind, AssertUnwindSafe};
+
+ fn collect_files_recursive(root: &Path, out: &mut Vec<PathBuf>) {
+ let Ok(entries) = fs::read_dir(root) else {
+ return;
+ };
+ for entry in entries.flatten() {
+ let path = entry.path();
+ if path.is_dir() {
+ collect_files_recursive(&path, out);
+ } else if path.is_file() {
+ out.push(path);
+ }
+ }
+ }
+
+ fn nres_test_files() -> Vec<PathBuf> {
+ let root = Path::new(env!("CARGO_MANIFEST_DIR"))
+ .join("..")
+ .join("..")
+ .join("testdata")
+ .join("nres");
+ let mut files = Vec::new();
+ collect_files_recursive(&root, &mut files);
+ files.sort();
+ files
+ .into_iter()
+ .filter(|path| {
+ fs::read(path)
+ .map(|data| data.get(0..4) == Some(b"NRes"))
+ .unwrap_or(false)
+ })
+ .collect()
+ }
+
+ fn make_temp_copy(original: &Path, bytes: &[u8]) -> PathBuf {
+ let mut path = std::env::temp_dir();
+ let file_name = original
+ .file_name()
+ .and_then(|v| v.to_str())
+ .unwrap_or("archive");
+ path.push(format!(
+ "nres-test-{}-{}-{}",
+ std::process::id(),
+ unix_time_nanos(),
+ file_name
+ ));
+ fs::write(&path, bytes).expect("failed to create temp file");
+ path
+ }
+
+ fn panic_message(payload: Box<dyn Any + Send>) -> String {
+ let any = payload.as_ref();
+ if let Some(message) = any.downcast_ref::<String>() {
+ return message.clone();
+ }
+ if let Some(message) = any.downcast_ref::<&str>() {
+ return (*message).to_string();
+ }
+ String::from("panic without message")
+ }
+
+ #[test]
+ fn nres_read_and_roundtrip_all_files() {
+ let files = nres_test_files();
+ assert!(!files.is_empty(), "testdata/nres contains no NRes archives");
+
+ let checked = files.len();
+ let mut success = 0usize;
+ let mut failures = Vec::new();
+
+ for path in files {
+ let display_path = path.display().to_string();
+ let result = catch_unwind(AssertUnwindSafe(|| {
+ let original = fs::read(&path).expect("failed to read archive");
+ let archive = Archive::open_path(&path)
+ .unwrap_or_else(|err| panic!("failed to open {}: {err}", path.display()));
+
+ let count = archive.entry_count();
+ assert_eq!(
+ count,
+ archive.entries().count(),
+ "entry count mismatch: {}",
+ path.display()
+ );
+
+ for idx in 0..count {
+ let id = EntryId(idx as u32);
+ let entry = archive
+ .get(id)
+ .unwrap_or_else(|| panic!("missing entry #{idx} in {}", path.display()));
+
+ let payload = archive.read(id).unwrap_or_else(|err| {
+ panic!("read failed for {} entry #{idx}: {err}", path.display())
+ });
+
+ let mut out = Vec::new();
+ let written = archive.read_into(id, &mut out).unwrap_or_else(|err| {
+ panic!(
+ "read_into failed for {} entry #{idx}: {err}",
+ path.display()
+ )
+ });
+ assert_eq!(
+ written,
+ payload.as_slice().len(),
+ "size mismatch in {} entry #{idx}",
+ path.display()
+ );
+ assert_eq!(
+ out.as_slice(),
+ payload.as_slice(),
+ "payload mismatch in {} entry #{idx}",
+ path.display()
+ );
+
+ let raw = archive
+ .raw_slice(id)
+ .unwrap_or_else(|err| {
+ panic!(
+ "raw_slice failed for {} entry #{idx}: {err}",
+ path.display()
+ )
+ })
+ .expect("raw_slice must return Some for file-backed archive");
+ assert_eq!(
+ raw,
+ payload.as_slice(),
+ "raw slice mismatch in {} entry #{idx}",
+ path.display()
+ );
+
+ let found = archive.find(&entry.meta.name).unwrap_or_else(|| {
+ panic!(
+ "find failed for name '{}' in {}",
+ entry.meta.name,
+ path.display()
+ )
+ });
+ let found_meta = archive.get(found).expect("find returned invalid id");
+ assert!(
+ found_meta.meta.name.eq_ignore_ascii_case(&entry.meta.name),
+ "find returned unrelated entry in {}",
+ path.display()
+ );
+ }
+
+ let temp_copy = make_temp_copy(&path, &original);
+ let mut editor = Archive::edit_path(&temp_copy)
+ .unwrap_or_else(|err| panic!("edit_path failed for {}: {err}", path.display()));
+
+ for idx in 0..count {
+ let data = archive
+ .read(EntryId(idx as u32))
+ .unwrap_or_else(|err| {
+ panic!(
+ "read before replace failed for {} entry #{idx}: {err}",
+ path.display()
+ )
+ })
+ .into_owned();
+ editor
+ .replace_data(EntryId(idx as u32), &data)
+ .unwrap_or_else(|err| {
+ panic!(
+ "replace_data failed for {} entry #{idx}: {err}",
+ path.display()
+ )
+ });
+ }
+
+ editor
+ .commit()
+ .unwrap_or_else(|err| panic!("commit failed for {}: {err}", path.display()));
+ let rebuilt = fs::read(&temp_copy).expect("failed to read rebuilt archive");
+ let _ = fs::remove_file(&temp_copy);
+
+ assert_eq!(
+ original,
+ rebuilt,
+ "byte-to-byte roundtrip mismatch for {}",
+ path.display()
+ );
+ }));
+
+ match result {
+ Ok(()) => success += 1,
+ Err(payload) => {
+ failures.push(format!("{}: {}", display_path, panic_message(payload)));
+ }
+ }
+ }
+
+ let failed = failures.len();
+ eprintln!(
+ "NRes summary: checked={}, success={}, failed={}",
+ checked, success, failed
+ );
+ if !failures.is_empty() {
+ panic!(
+ "NRes validation failed.\nsummary: checked={}, success={}, failed={}\n{}",
+ checked,
+ success,
+ failed,
+ failures.join("\n")
+ );
+ }
+ }
+
+ #[test]
+ fn nres_raw_mode_exposes_whole_file() {
+ let files = nres_test_files();
+ let first = files.first().expect("testdata/nres has no archives");
+ let original = fs::read(first).expect("failed to read archive");
+ let arc: Arc<[u8]> = Arc::from(original.clone().into_boxed_slice());
+
+ let archive = Archive::open_bytes(
+ arc,
+ OpenOptions {
+ raw_mode: true,
+ sequential_hint: false,
+ prefetch_pages: false,
+ },
+ )
+ .expect("raw mode open failed");
+
+ assert_eq!(archive.entry_count(), 1);
+ let data = archive.read(EntryId(0)).expect("raw read failed");
+ assert_eq!(data.as_slice(), original.as_slice());
+ }
+}
diff --git a/crates/rsli/Cargo.toml b/crates/rsli/Cargo.toml
new file mode 100644
index 0000000..6f89e0a
--- /dev/null
+++ b/crates/rsli/Cargo.toml
@@ -0,0 +1,7 @@
+[package]
+name = "rsli"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+flate2 = { version = "1", default-features = false, features = ["rust_backend"] }
diff --git a/crates/rsli/src/data.rs b/crates/rsli/src/data.rs
new file mode 100644
index 0000000..daa5592
--- /dev/null
+++ b/crates/rsli/src/data.rs
@@ -0,0 +1,41 @@
+use std::io;
+
+#[derive(Clone, Debug)]
+pub enum ResourceData<'a> {
+ Borrowed(&'a [u8]),
+ Owned(Vec<u8>),
+}
+
+impl<'a> ResourceData<'a> {
+ pub fn as_slice(&self) -> &[u8] {
+ match self {
+ Self::Borrowed(slice) => slice,
+ Self::Owned(buf) => buf.as_slice(),
+ }
+ }
+
+ pub fn into_owned(self) -> Vec<u8> {
+ match self {
+ Self::Borrowed(slice) => slice.to_vec(),
+ Self::Owned(buf) => buf,
+ }
+ }
+}
+
+impl AsRef<[u8]> for ResourceData<'_> {
+ fn as_ref(&self) -> &[u8] {
+ self.as_slice()
+ }
+}
+
+pub trait OutputBuffer {
+ fn write_exact(&mut self, data: &[u8]) -> io::Result<()>;
+}
+
+impl OutputBuffer for Vec<u8> {
+ fn write_exact(&mut self, data: &[u8]) -> io::Result<()> {
+ self.clear();
+ self.extend_from_slice(data);
+ Ok(())
+ }
+}
diff --git a/crates/rsli/src/error.rs b/crates/rsli/src/error.rs
new file mode 100644
index 0000000..056a13b
--- /dev/null
+++ b/crates/rsli/src/error.rs
@@ -0,0 +1,129 @@
+use core::fmt;
+
+#[derive(Debug)]
+#[non_exhaustive]
+pub enum Error {
+ Io(std::io::Error),
+
+ InvalidMagic {
+ got: [u8; 2],
+ },
+ UnsupportedVersion {
+ got: u8,
+ },
+ InvalidEntryCount {
+ got: i16,
+ },
+
+ EntryTableOutOfBounds {
+ table_offset: u64,
+ table_len: u64,
+ file_len: u64,
+ },
+ EntryTableDecryptFailed,
+ CorruptEntryTable(&'static str),
+
+ EntryIdOutOfRange {
+ id: u32,
+ entry_count: u32,
+ },
+ EntryDataOutOfBounds {
+ id: u32,
+ offset: u64,
+ size: u32,
+ file_len: u64,
+ },
+
+ AoTrailerInvalid,
+ MediaOverlayOutOfBounds {
+ overlay: u32,
+ file_len: u64,
+ },
+
+ UnsupportedMethod {
+ raw: u32,
+ },
+ PackedSizePastEof {
+ id: u32,
+ offset: u64,
+ packed_size: u32,
+ file_len: u64,
+ },
+ DeflateEofPlusOneQuirkRejected {
+ id: u32,
+ },
+
+ DecompressionFailed(&'static str),
+ OutputSizeMismatch {
+ expected: u32,
+ got: u32,
+ },
+
+ IntegerOverflow,
+}
+
+impl From<std::io::Error> for Error {
+ fn from(value: std::io::Error) -> Self {
+ Self::Io(value)
+ }
+}
+
+impl fmt::Display for Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Error::Io(e) => write!(f, "I/O error: {e}"),
+ Error::InvalidMagic { got } => write!(f, "invalid RsLi magic: {got:02X?}"),
+ Error::UnsupportedVersion { got } => write!(f, "unsupported RsLi version: {got:#x}"),
+ Error::InvalidEntryCount { got } => write!(f, "invalid entry_count: {got}"),
+ Error::EntryTableOutOfBounds {
+ table_offset,
+ table_len,
+ file_len,
+ } => write!(
+ f,
+ "entry table out of bounds: off={table_offset}, len={table_len}, file={file_len}"
+ ),
+ Error::EntryTableDecryptFailed => write!(f, "failed to decrypt entry table"),
+ Error::CorruptEntryTable(s) => write!(f, "corrupt entry table: {s}"),
+ Error::EntryIdOutOfRange { id, entry_count } => {
+ write!(f, "entry id out of range: id={id}, count={entry_count}")
+ }
+ Error::EntryDataOutOfBounds {
+ id,
+ offset,
+ size,
+ file_len,
+ } => write!(
+ f,
+ "entry data out of bounds: id={id}, off={offset}, size={size}, file={file_len}"
+ ),
+ Error::AoTrailerInvalid => write!(f, "invalid AO trailer"),
+ Error::MediaOverlayOutOfBounds { overlay, file_len } => {
+ write!(
+ f,
+ "media overlay out of bounds: overlay={overlay}, file={file_len}"
+ )
+ }
+ Error::UnsupportedMethod { raw } => write!(f, "unsupported packing method: {raw:#x}"),
+ Error::PackedSizePastEof {
+ id,
+ offset,
+ packed_size,
+ file_len,
+ } => write!(
+ f,
+ "packed range past EOF: id={id}, off={offset}, size={packed_size}, file={file_len}"
+ ),
+ Error::DeflateEofPlusOneQuirkRejected { id } => {
+ write!(f, "deflate EOF+1 quirk rejected for entry {id}")
+ }
+ Error::DecompressionFailed(s) => write!(f, "decompression failed: {s}"),
+ Error::OutputSizeMismatch { expected, got } => {
+ write!(f, "output size mismatch: expected={expected}, got={got}")
+ }
+ Error::IntegerOverflow => write!(f, "integer overflow"),
+ }
+ }
+}
+
+impl std::error::Error for Error {}
diff --git a/crates/rsli/src/lib.rs b/crates/rsli/src/lib.rs
new file mode 100644
index 0000000..0c55b25
--- /dev/null
+++ b/crates/rsli/src/lib.rs
@@ -0,0 +1,1165 @@
+pub mod data;
+pub mod error;
+
+use crate::data::{OutputBuffer, ResourceData};
+use crate::error::Error;
+use flate2::read::{DeflateDecoder, ZlibDecoder};
+use std::cmp::Ordering;
+use std::fs;
+use std::io::Read;
+use std::path::Path;
+use std::sync::Arc;
+
+pub type Result<T> = core::result::Result<T, Error>;
+
+#[derive(Clone, Debug)]
+pub struct OpenOptions {
+ pub allow_ao_trailer: bool,
+ pub allow_deflate_eof_plus_one: bool,
+}
+
+impl Default for OpenOptions {
+ fn default() -> Self {
+ Self {
+ allow_ao_trailer: true,
+ allow_deflate_eof_plus_one: true,
+ }
+ }
+}
+
+pub struct Library {
+ bytes: Arc<[u8]>,
+ entries: Vec<EntryRecord>,
+ #[cfg(test)]
+ header_raw: [u8; 32],
+ #[cfg(test)]
+ table_plain_original: Vec<u8>,
+ #[cfg(test)]
+ xor_seed: u32,
+ #[cfg(test)]
+ source_size: usize,
+ #[cfg(test)]
+ trailer_raw: Option<[u8; 6]>,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub struct EntryId(pub u32);
+
+#[derive(Clone, Debug)]
+pub struct EntryMeta {
+ pub name: String,
+ pub flags: i32,
+ pub method: PackMethod,
+ pub data_offset: u64,
+ pub packed_size: u32,
+ pub unpacked_size: u32,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum PackMethod {
+ None,
+ XorOnly,
+ Lzss,
+ XorLzss,
+ LzssHuffman,
+ XorLzssHuffman,
+ Deflate,
+ Unknown(u32),
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct EntryRef<'a> {
+ pub id: EntryId,
+ pub meta: &'a EntryMeta,
+}
+
+pub struct PackedResource {
+ pub meta: EntryMeta,
+ pub packed: Vec<u8>,
+}
+
+#[derive(Clone, Debug)]
+struct EntryRecord {
+ meta: EntryMeta,
+ name_raw: [u8; 12],
+ sort_to_original: i16,
+ key16: u16,
+ #[cfg(test)]
+ data_offset_raw: u32,
+ packed_size_declared: u32,
+ packed_size_available: usize,
+ effective_offset: usize,
+}
+
+impl Library {
+ pub fn open_path(path: impl AsRef<Path>) -> Result<Self> {
+ Self::open_path_with(path, OpenOptions::default())
+ }
+
+ pub fn open_path_with(path: impl AsRef<Path>, opts: OpenOptions) -> Result<Self> {
+ let bytes = fs::read(path.as_ref())?;
+ let arc: Arc<[u8]> = Arc::from(bytes.into_boxed_slice());
+ parse_library(arc, opts)
+ }
+
+ pub fn entry_count(&self) -> usize {
+ self.entries.len()
+ }
+
+ pub fn entries(&self) -> impl Iterator<Item = EntryRef<'_>> {
+ self.entries
+ .iter()
+ .enumerate()
+ .map(|(idx, entry)| EntryRef {
+ id: EntryId(idx as u32),
+ meta: &entry.meta,
+ })
+ }
+
+ pub fn find(&self, name: &str) -> Option<EntryId> {
+ if self.entries.is_empty() {
+ return None;
+ }
+
+ let query = name.to_ascii_uppercase();
+ let query_bytes = query.as_bytes();
+
+ let mut low = 0usize;
+ let mut high = self.entries.len();
+ while low < high {
+ let mid = low + (high - low) / 2;
+ let idx = self.entries[mid].sort_to_original;
+ if idx < 0 {
+ break;
+ }
+ let idx = usize::try_from(idx).ok()?;
+ if idx >= self.entries.len() {
+ break;
+ }
+
+ let cmp = cmp_c_string(query_bytes, c_name_bytes(&self.entries[idx].name_raw));
+ match cmp {
+ Ordering::Less => high = mid,
+ Ordering::Greater => low = mid + 1,
+ Ordering::Equal => return Some(EntryId(idx as u32)),
+ }
+ }
+
+ self.entries.iter().enumerate().find_map(|(idx, entry)| {
+ if cmp_c_string(query_bytes, c_name_bytes(&entry.name_raw)) == Ordering::Equal {
+ Some(EntryId(idx as u32))
+ } else {
+ None
+ }
+ })
+ }
+
+ pub fn get(&self, id: EntryId) -> Option<EntryRef<'_>> {
+ let idx = usize::try_from(id.0).ok()?;
+ let entry = self.entries.get(idx)?;
+ Some(EntryRef {
+ id,
+ meta: &entry.meta,
+ })
+ }
+
+ pub fn load(&self, id: EntryId) -> Result<Vec<u8>> {
+ let entry = self.entry_by_id(id)?;
+ let packed = self.packed_slice(entry)?;
+ decode_payload(
+ packed,
+ entry.meta.method,
+ entry.key16,
+ entry.meta.unpacked_size,
+ )
+ }
+
+ pub fn load_into(&self, id: EntryId, out: &mut dyn OutputBuffer) -> Result<usize> {
+ let decoded = self.load(id)?;
+ out.write_exact(&decoded)?;
+ Ok(decoded.len())
+ }
+
+ pub fn load_packed(&self, id: EntryId) -> Result<PackedResource> {
+ let entry = self.entry_by_id(id)?;
+ let packed = self.packed_slice(entry)?.to_vec();
+ Ok(PackedResource {
+ meta: entry.meta.clone(),
+ packed,
+ })
+ }
+
+ pub fn unpack(&self, packed: &PackedResource) -> Result<Vec<u8>> {
+ let key16 = self.resolve_key_for_meta(&packed.meta).unwrap_or(0);
+
+ let method = packed.meta.method;
+ if needs_xor_key(method) && self.resolve_key_for_meta(&packed.meta).is_none() {
+ return Err(Error::CorruptEntryTable(
+ "cannot resolve XOR key for packed resource",
+ ));
+ }
+
+ decode_payload(&packed.packed, method, key16, packed.meta.unpacked_size)
+ }
+
+ pub fn load_fast(&self, id: EntryId) -> Result<ResourceData<'_>> {
+ let entry = self.entry_by_id(id)?;
+ if entry.meta.method == PackMethod::None {
+ let packed = self.packed_slice(entry)?;
+ let size =
+ usize::try_from(entry.meta.unpacked_size).map_err(|_| Error::IntegerOverflow)?;
+ if packed.len() < size {
+ return Err(Error::OutputSizeMismatch {
+ expected: entry.meta.unpacked_size,
+ got: u32::try_from(packed.len()).unwrap_or(u32::MAX),
+ });
+ }
+ return Ok(ResourceData::Borrowed(&packed[..size]));
+ }
+ Ok(ResourceData::Owned(self.load(id)?))
+ }
+
+ fn entry_by_id(&self, id: EntryId) -> Result<&EntryRecord> {
+ let idx = usize::try_from(id.0).map_err(|_| Error::IntegerOverflow)?;
+ self.entries
+ .get(idx)
+ .ok_or_else(|| Error::EntryIdOutOfRange {
+ id: id.0,
+ entry_count: self.entries.len().try_into().unwrap_or(u32::MAX),
+ })
+ }
+
+ fn packed_slice<'a>(&'a self, entry: &EntryRecord) -> Result<&'a [u8]> {
+ let start = entry.effective_offset;
+ let end = start
+ .checked_add(entry.packed_size_available)
+ .ok_or(Error::IntegerOverflow)?;
+ self.bytes
+ .get(start..end)
+ .ok_or(Error::EntryDataOutOfBounds {
+ id: 0,
+ offset: u64::try_from(start).unwrap_or(u64::MAX),
+ size: entry.packed_size_declared,
+ file_len: u64::try_from(self.bytes.len()).unwrap_or(u64::MAX),
+ })
+ }
+
+ fn resolve_key_for_meta(&self, meta: &EntryMeta) -> Option<u16> {
+ self.entries
+ .iter()
+ .find(|entry| {
+ entry.meta.name == meta.name
+ && entry.meta.flags == meta.flags
+ && entry.meta.data_offset == meta.data_offset
+ && entry.meta.packed_size == meta.packed_size
+ && entry.meta.unpacked_size == meta.unpacked_size
+ && entry.meta.method == meta.method
+ })
+ .map(|entry| entry.key16)
+ }
+
+ #[cfg(test)]
+ fn rebuild_from_parsed_metadata(&self) -> Result<Vec<u8>> {
+ let trailer_len = usize::from(self.trailer_raw.is_some()) * 6;
+ let pre_trailer_size = self
+ .source_size
+ .checked_sub(trailer_len)
+ .ok_or(Error::IntegerOverflow)?;
+
+ let count = self.entries.len();
+ let table_len = count.checked_mul(32).ok_or(Error::IntegerOverflow)?;
+ let table_end = 32usize
+ .checked_add(table_len)
+ .ok_or(Error::IntegerOverflow)?;
+ if pre_trailer_size < table_end {
+ return Err(Error::EntryTableOutOfBounds {
+ table_offset: 32,
+ table_len: u64::try_from(table_len).map_err(|_| Error::IntegerOverflow)?,
+ file_len: u64::try_from(pre_trailer_size).map_err(|_| Error::IntegerOverflow)?,
+ });
+ }
+
+ let mut out = vec![0u8; pre_trailer_size];
+ out[0..32].copy_from_slice(&self.header_raw);
+ let encrypted_table =
+ xor_stream(&self.table_plain_original, (self.xor_seed & 0xFFFF) as u16);
+ out[32..table_end].copy_from_slice(&encrypted_table);
+
+ let mut occupied = vec![false; pre_trailer_size];
+ for byte in occupied.iter_mut().take(table_end) {
+ *byte = true;
+ }
+
+ for (idx, entry) in self.entries.iter().enumerate() {
+ let packed = self.load_packed(EntryId(idx as u32))?.packed;
+ let start =
+ usize::try_from(entry.data_offset_raw).map_err(|_| Error::IntegerOverflow)?;
+ for (offset, byte) in packed.iter().copied().enumerate() {
+ let pos = start.checked_add(offset).ok_or(Error::IntegerOverflow)?;
+ if pos >= out.len() {
+ return Err(Error::PackedSizePastEof {
+ id: idx as u32,
+ offset: u64::from(entry.data_offset_raw),
+ packed_size: entry.packed_size_declared,
+ file_len: u64::try_from(out.len()).map_err(|_| Error::IntegerOverflow)?,
+ });
+ }
+ if occupied[pos] && out[pos] != byte {
+ return Err(Error::CorruptEntryTable("packed payload overlap conflict"));
+ }
+ out[pos] = byte;
+ occupied[pos] = true;
+ }
+ }
+
+ if let Some(trailer) = self.trailer_raw {
+ out.extend_from_slice(&trailer);
+ }
+ Ok(out)
+ }
+}
+
+fn parse_library(bytes: Arc<[u8]>, opts: OpenOptions) -> Result<Library> {
+ if bytes.len() < 32 {
+ return Err(Error::EntryTableOutOfBounds {
+ table_offset: 32,
+ table_len: 0,
+ file_len: u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?,
+ });
+ }
+
+ let mut header_raw = [0u8; 32];
+ header_raw.copy_from_slice(&bytes[0..32]);
+
+ if &bytes[0..2] != b"NL" {
+ let mut got = [0u8; 2];
+ got.copy_from_slice(&bytes[0..2]);
+ return Err(Error::InvalidMagic { got });
+ }
+ if bytes[3] != 0x01 {
+ return Err(Error::UnsupportedVersion { got: bytes[3] });
+ }
+
+ let entry_count = i16::from_le_bytes([bytes[4], bytes[5]]);
+ if entry_count < 0 {
+ return Err(Error::InvalidEntryCount { got: entry_count });
+ }
+ let count = usize::try_from(entry_count).map_err(|_| Error::IntegerOverflow)?;
+
+ let xor_seed = u32::from_le_bytes([bytes[20], bytes[21], bytes[22], bytes[23]]);
+
+ let table_len = count.checked_mul(32).ok_or(Error::IntegerOverflow)?;
+ let table_offset = 32usize;
+ let table_end = table_offset
+ .checked_add(table_len)
+ .ok_or(Error::IntegerOverflow)?;
+ if table_end > bytes.len() {
+ return Err(Error::EntryTableOutOfBounds {
+ table_offset: u64::try_from(table_offset).map_err(|_| Error::IntegerOverflow)?,
+ table_len: u64::try_from(table_len).map_err(|_| Error::IntegerOverflow)?,
+ file_len: u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?,
+ });
+ }
+
+ let table_enc = &bytes[table_offset..table_end];
+ let table_plain_original = xor_stream(table_enc, (xor_seed & 0xFFFF) as u16);
+ if table_plain_original.len() != table_len {
+ return Err(Error::EntryTableDecryptFailed);
+ }
+
+ let (overlay, trailer_raw) = parse_ao_trailer(&bytes, opts.allow_ao_trailer)?;
+ #[cfg(not(test))]
+ let _ = trailer_raw;
+
+ let mut entries = Vec::with_capacity(count);
+ for idx in 0..count {
+ let row = &table_plain_original[idx * 32..(idx + 1) * 32];
+
+ let mut name_raw = [0u8; 12];
+ name_raw.copy_from_slice(&row[0..12]);
+
+ let flags_signed = i16::from_le_bytes([row[16], row[17]]);
+ let sort_to_original = i16::from_le_bytes([row[18], row[19]]);
+ let unpacked_size = u32::from_le_bytes([row[20], row[21], row[22], row[23]]);
+ let data_offset_raw = u32::from_le_bytes([row[24], row[25], row[26], row[27]]);
+ let packed_size_declared = u32::from_le_bytes([row[28], row[29], row[30], row[31]]);
+
+ let method_raw = (flags_signed as u16 as u32) & 0x1E0;
+ let method = parse_method(method_raw);
+
+ let effective_offset_u64 = u64::from(data_offset_raw)
+ .checked_add(u64::from(overlay))
+ .ok_or(Error::IntegerOverflow)?;
+ let effective_offset =
+ usize::try_from(effective_offset_u64).map_err(|_| Error::IntegerOverflow)?;
+
+ let packed_size_usize =
+ usize::try_from(packed_size_declared).map_err(|_| Error::IntegerOverflow)?;
+ let mut packed_size_available = packed_size_usize;
+
+ let end = effective_offset_u64
+ .checked_add(u64::from(packed_size_declared))
+ .ok_or(Error::IntegerOverflow)?;
+ let file_len_u64 = u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?;
+
+ if end > file_len_u64 {
+ if method_raw == 0x100 && end == file_len_u64 + 1 {
+ if opts.allow_deflate_eof_plus_one {
+ packed_size_available = packed_size_available
+ .checked_sub(1)
+ .ok_or(Error::IntegerOverflow)?;
+ } else {
+ return Err(Error::DeflateEofPlusOneQuirkRejected { id: idx as u32 });
+ }
+ } else {
+ return Err(Error::PackedSizePastEof {
+ id: idx as u32,
+ offset: effective_offset_u64,
+ packed_size: packed_size_declared,
+ file_len: file_len_u64,
+ });
+ }
+ }
+
+ let available_end = effective_offset
+ .checked_add(packed_size_available)
+ .ok_or(Error::IntegerOverflow)?;
+ if available_end > bytes.len() {
+ return Err(Error::EntryDataOutOfBounds {
+ id: idx as u32,
+ offset: effective_offset_u64,
+ size: packed_size_declared,
+ file_len: file_len_u64,
+ });
+ }
+
+ let name = decode_name(c_name_bytes(&name_raw));
+
+ entries.push(EntryRecord {
+ meta: EntryMeta {
+ name,
+ flags: i32::from(flags_signed),
+ method,
+ data_offset: effective_offset_u64,
+ packed_size: packed_size_declared,
+ unpacked_size,
+ },
+ name_raw,
+ sort_to_original,
+ key16: sort_to_original as u16,
+ #[cfg(test)]
+ data_offset_raw,
+ packed_size_declared,
+ packed_size_available,
+ effective_offset,
+ });
+ }
+
+ let presorted_flag = u16::from_le_bytes([bytes[14], bytes[15]]);
+ if presorted_flag == 0xABBA {
+ for entry in &entries {
+ let idx = i32::from(entry.sort_to_original);
+ if idx < 0 || usize::try_from(idx).map_err(|_| Error::IntegerOverflow)? >= count {
+ return Err(Error::CorruptEntryTable(
+ "sort_to_original is not a valid permutation index",
+ ));
+ }
+ }
+ } else {
+ let mut sorted: Vec<usize> = (0..count).collect();
+ sorted.sort_by(|a, b| {
+ cmp_c_string(
+ c_name_bytes(&entries[*a].name_raw),
+ c_name_bytes(&entries[*b].name_raw),
+ )
+ });
+ for (idx, entry) in entries.iter_mut().enumerate() {
+ entry.sort_to_original =
+ i16::try_from(sorted[idx]).map_err(|_| Error::IntegerOverflow)?;
+ entry.key16 = entry.sort_to_original as u16;
+ }
+ }
+
+ #[cfg(test)]
+ let source_size = bytes.len();
+
+ Ok(Library {
+ bytes,
+ entries,
+ #[cfg(test)]
+ header_raw,
+ #[cfg(test)]
+ table_plain_original,
+ #[cfg(test)]
+ xor_seed,
+ #[cfg(test)]
+ source_size,
+ #[cfg(test)]
+ trailer_raw,
+ })
+}
+
+fn parse_ao_trailer(bytes: &[u8], allow: bool) -> Result<(u32, Option<[u8; 6]>)> {
+ if !allow || bytes.len() < 6 {
+ return Ok((0, None));
+ }
+
+ if &bytes[bytes.len() - 6..bytes.len() - 4] != b"AO" {
+ return Ok((0, None));
+ }
+
+ let mut trailer = [0u8; 6];
+ trailer.copy_from_slice(&bytes[bytes.len() - 6..]);
+ let overlay = u32::from_le_bytes([trailer[2], trailer[3], trailer[4], trailer[5]]);
+
+ if u64::from(overlay) > u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)? {
+ return Err(Error::MediaOverlayOutOfBounds {
+ overlay,
+ file_len: u64::try_from(bytes.len()).map_err(|_| Error::IntegerOverflow)?,
+ });
+ }
+
+ Ok((overlay, Some(trailer)))
+}
+
+fn parse_method(raw: u32) -> PackMethod {
+ match raw {
+ 0x000 => PackMethod::None,
+ 0x020 => PackMethod::XorOnly,
+ 0x040 => PackMethod::Lzss,
+ 0x060 => PackMethod::XorLzss,
+ 0x080 => PackMethod::LzssHuffman,
+ 0x0A0 => PackMethod::XorLzssHuffman,
+ 0x100 => PackMethod::Deflate,
+ other => PackMethod::Unknown(other),
+ }
+}
+
+fn decode_payload(
+ packed: &[u8],
+ method: PackMethod,
+ key16: u16,
+ unpacked_size: u32,
+) -> Result<Vec<u8>> {
+ let expected = usize::try_from(unpacked_size).map_err(|_| Error::IntegerOverflow)?;
+
+ let out = match method {
+ PackMethod::None => {
+ if packed.len() < expected {
+ return Err(Error::OutputSizeMismatch {
+ expected: unpacked_size,
+ got: u32::try_from(packed.len()).unwrap_or(u32::MAX),
+ });
+ }
+ packed[..expected].to_vec()
+ }
+ PackMethod::XorOnly => {
+ if packed.len() < expected {
+ return Err(Error::OutputSizeMismatch {
+ expected: unpacked_size,
+ got: u32::try_from(packed.len()).unwrap_or(u32::MAX),
+ });
+ }
+ xor_stream(&packed[..expected], key16)
+ }
+ PackMethod::Lzss => lzss_decompress_simple(packed, expected)?,
+ PackMethod::XorLzss => {
+ let decrypted = xor_stream(packed, key16);
+ lzss_decompress_simple(&decrypted, expected)?
+ }
+ PackMethod::LzssHuffman => lzss_huffman_decompress(packed, expected)?,
+ PackMethod::XorLzssHuffman => {
+ let decrypted = xor_stream(packed, key16);
+ lzss_huffman_decompress(&decrypted, expected)?
+ }
+ PackMethod::Deflate => decode_deflate(packed)?,
+ PackMethod::Unknown(raw) => return Err(Error::UnsupportedMethod { raw }),
+ };
+
+ if out.len() != expected {
+ return Err(Error::OutputSizeMismatch {
+ expected: unpacked_size,
+ got: u32::try_from(out.len()).unwrap_or(u32::MAX),
+ });
+ }
+
+ Ok(out)
+}
+
+fn decode_deflate(packed: &[u8]) -> Result<Vec<u8>> {
+ let mut out = Vec::new();
+ let mut decoder = DeflateDecoder::new(packed);
+ if decoder.read_to_end(&mut out).is_ok() {
+ return Ok(out);
+ }
+
+ out.clear();
+ let mut zlib = ZlibDecoder::new(packed);
+ zlib.read_to_end(&mut out)
+ .map_err(|_| Error::DecompressionFailed("deflate"))?;
+ Ok(out)
+}
+
+fn xor_stream(data: &[u8], key16: u16) -> Vec<u8> {
+ let mut lo = (key16 & 0xFF) as u8;
+ let mut hi = ((key16 >> 8) & 0xFF) as u8;
+
+ let mut out = Vec::with_capacity(data.len());
+ for value in data {
+ lo = hi ^ lo.wrapping_shl(1);
+ out.push(value ^ lo);
+ hi = lo ^ (hi >> 1);
+ }
+ out
+}
+
+fn lzss_decompress_simple(data: &[u8], expected_size: usize) -> Result<Vec<u8>> {
+ let mut ring = [0x20u8; 0x1000];
+ let mut ring_pos = 0xFEEusize;
+ let mut out = Vec::with_capacity(expected_size);
+ let mut in_pos = 0usize;
+
+ let mut control = 0u8;
+ let mut bits_left = 0u8;
+
+ while out.len() < expected_size {
+ if bits_left == 0 {
+ let Some(byte) = data.get(in_pos).copied() else {
+ break;
+ };
+ control = byte;
+ in_pos += 1;
+ bits_left = 8;
+ }
+
+ if (control & 1) != 0 {
+ let Some(byte) = data.get(in_pos).copied() else {
+ break;
+ };
+ in_pos += 1;
+
+ out.push(byte);
+ ring[ring_pos] = byte;
+ ring_pos = (ring_pos + 1) & 0x0FFF;
+ } else {
+ let (Some(low), Some(high)) =
+ (data.get(in_pos).copied(), data.get(in_pos + 1).copied())
+ else {
+ break;
+ };
+ in_pos += 2;
+
+ let offset = usize::from(low) | (usize::from(high & 0xF0) << 4);
+ let length = usize::from((high & 0x0F) + 3);
+
+ for step in 0..length {
+ let byte = ring[(offset + step) & 0x0FFF];
+ out.push(byte);
+ ring[ring_pos] = byte;
+ ring_pos = (ring_pos + 1) & 0x0FFF;
+ if out.len() >= expected_size {
+ break;
+ }
+ }
+ }
+
+ control >>= 1;
+ bits_left -= 1;
+ }
+
+ if out.len() != expected_size {
+ return Err(Error::DecompressionFailed("lzss-simple"));
+ }
+
+ Ok(out)
+}
+
+const LZH_N: usize = 4096;
+const LZH_F: usize = 60;
+const LZH_THRESHOLD: usize = 2;
+const LZH_N_CHAR: usize = 256 - LZH_THRESHOLD + LZH_F;
+const LZH_T: usize = LZH_N_CHAR * 2 - 1;
+const LZH_R: usize = LZH_T - 1;
+const LZH_MAX_FREQ: u16 = 0x8000;
+
+fn lzss_huffman_decompress(data: &[u8], expected_size: usize) -> Result<Vec<u8>> {
+ let mut decoder = LzhDecoder::new(data);
+ decoder.decode(expected_size)
+}
+
+struct LzhDecoder<'a> {
+ bit_reader: BitReader<'a>,
+ text: [u8; LZH_N],
+ freq: [u16; LZH_T + 1],
+ parent: [usize; LZH_T + LZH_N_CHAR],
+ son: [usize; LZH_T],
+ d_code: [u8; 256],
+ d_len: [u8; 256],
+ ring_pos: usize,
+}
+
+impl<'a> LzhDecoder<'a> {
+ fn new(data: &'a [u8]) -> Self {
+ let mut decoder = Self {
+ bit_reader: BitReader::new(data),
+ text: [0x20u8; LZH_N],
+ freq: [0u16; LZH_T + 1],
+ parent: [0usize; LZH_T + LZH_N_CHAR],
+ son: [0usize; LZH_T],
+ d_code: [0u8; 256],
+ d_len: [0u8; 256],
+ ring_pos: LZH_N - LZH_F,
+ };
+ decoder.init_tables();
+ decoder.start_huff();
+ decoder
+ }
+
+ fn decode(&mut self, expected_size: usize) -> Result<Vec<u8>> {
+ let mut out = Vec::with_capacity(expected_size);
+
+ while out.len() < expected_size {
+ let c = self.decode_char();
+ if c < 256 {
+ let byte = c as u8;
+ out.push(byte);
+ self.text[self.ring_pos] = byte;
+ self.ring_pos = (self.ring_pos + 1) & (LZH_N - 1);
+ } else {
+ let mut offset = self.decode_position();
+ offset = (self.ring_pos.wrapping_sub(offset).wrapping_sub(1)) & (LZH_N - 1);
+ let mut length = c.saturating_sub(253);
+
+ while length > 0 && out.len() < expected_size {
+ let byte = self.text[offset];
+ out.push(byte);
+ self.text[self.ring_pos] = byte;
+ self.ring_pos = (self.ring_pos + 1) & (LZH_N - 1);
+ offset = (offset + 1) & (LZH_N - 1);
+ length -= 1;
+ }
+ }
+ }
+
+ if out.len() != expected_size {
+ return Err(Error::DecompressionFailed("lzss-huffman"));
+ }
+ Ok(out)
+ }
+
+ fn init_tables(&mut self) {
+ let d_code_group_counts = [1usize, 3, 8, 12, 24, 16];
+ let d_len_group_counts = [32usize, 48, 64, 48, 48, 16];
+
+ let mut group_index = 0u8;
+ let mut idx = 0usize;
+ let mut run = 32usize;
+ for count in d_code_group_counts {
+ for _ in 0..count {
+ for _ in 0..run {
+ self.d_code[idx] = group_index;
+ idx += 1;
+ }
+ group_index = group_index.wrapping_add(1);
+ }
+ run >>= 1;
+ }
+
+ let mut len = 3u8;
+ idx = 0;
+ for count in d_len_group_counts {
+ for _ in 0..count {
+ self.d_len[idx] = len;
+ idx += 1;
+ }
+ len = len.saturating_add(1);
+ }
+ }
+
+ fn start_huff(&mut self) {
+ for i in 0..LZH_N_CHAR {
+ self.freq[i] = 1;
+ self.son[i] = i + LZH_T;
+ self.parent[i + LZH_T] = i;
+ }
+
+ let mut i = 0usize;
+ let mut j = LZH_N_CHAR;
+ while j <= LZH_R {
+ self.freq[j] = self.freq[i].saturating_add(self.freq[i + 1]);
+ self.son[j] = i;
+ self.parent[i] = j;
+ self.parent[i + 1] = j;
+ i += 2;
+ j += 1;
+ }
+
+ self.freq[LZH_T] = u16::MAX;
+ self.parent[LZH_R] = 0;
+ }
+
+ fn decode_char(&mut self) -> usize {
+ let mut node = self.son[LZH_R];
+ while node < LZH_T {
+ let bit = usize::from(self.bit_reader.read_bit_or_zero());
+ node = self.son[node + bit];
+ }
+
+ let c = node - LZH_T;
+ self.update(c);
+ c
+ }
+
+ fn decode_position(&mut self) -> usize {
+ let i = self.bit_reader.read_bits_or_zero(8) as usize;
+ let mut c = usize::from(self.d_code[i]) << 6;
+ let mut j = usize::from(self.d_len[i]).saturating_sub(2);
+
+ while j > 0 {
+ j -= 1;
+ c |= usize::from(self.bit_reader.read_bit_or_zero()) << j;
+ }
+
+ c | (i & 0x3F)
+ }
+
+ fn update(&mut self, c: usize) {
+ if self.freq[LZH_R] == LZH_MAX_FREQ {
+ self.reconstruct();
+ }
+
+ let mut current = self.parent[c + LZH_T];
+ loop {
+ self.freq[current] = self.freq[current].saturating_add(1);
+ let freq = self.freq[current];
+
+ if current + 1 < self.freq.len() && freq > self.freq[current + 1] {
+ let mut swap_idx = current + 1;
+ while swap_idx + 1 < self.freq.len() && freq > self.freq[swap_idx + 1] {
+ swap_idx += 1;
+ }
+
+ self.freq.swap(current, swap_idx);
+
+ let left = self.son[current];
+ let right = self.son[swap_idx];
+ self.son[current] = right;
+ self.son[swap_idx] = left;
+
+ self.parent[left] = swap_idx;
+ if left < LZH_T {
+ self.parent[left + 1] = swap_idx;
+ }
+
+ self.parent[right] = current;
+ if right < LZH_T {
+ self.parent[right + 1] = current;
+ }
+
+ current = swap_idx;
+ }
+
+ current = self.parent[current];
+ if current == 0 {
+ break;
+ }
+ }
+ }
+
+ fn reconstruct(&mut self) {
+ let mut j = 0usize;
+ for i in 0..LZH_T {
+ if self.son[i] >= LZH_T {
+ self.freq[j] = (self.freq[i].saturating_add(1)) / 2;
+ self.son[j] = self.son[i];
+ j += 1;
+ }
+ }
+
+ let mut i = 0usize;
+ let mut current = LZH_N_CHAR;
+ while current < LZH_T {
+ let sum = self.freq[i].saturating_add(self.freq[i + 1]);
+ self.freq[current] = sum;
+
+ let mut insert_at = current;
+ while insert_at > 0 && sum < self.freq[insert_at - 1] {
+ insert_at -= 1;
+ }
+
+ for move_idx in (insert_at..current).rev() {
+ self.freq[move_idx + 1] = self.freq[move_idx];
+ self.son[move_idx + 1] = self.son[move_idx];
+ }
+
+ self.freq[insert_at] = sum;
+ self.son[insert_at] = i;
+
+ i += 2;
+ current += 1;
+ }
+
+ for idx in 0..LZH_T {
+ let node = self.son[idx];
+ self.parent[node] = idx;
+ if node < LZH_T {
+ self.parent[node + 1] = idx;
+ }
+ }
+
+ self.freq[LZH_T] = u16::MAX;
+ self.parent[LZH_R] = 0;
+ }
+}
+
+struct BitReader<'a> {
+ data: &'a [u8],
+ byte_pos: usize,
+ bit_mask: u8,
+}
+
+impl<'a> BitReader<'a> {
+ fn new(data: &'a [u8]) -> Self {
+ Self {
+ data,
+ byte_pos: 0,
+ bit_mask: 0x80,
+ }
+ }
+
+ fn read_bit_or_zero(&mut self) -> u8 {
+ let Some(byte) = self.data.get(self.byte_pos).copied() else {
+ return 0;
+ };
+
+ let bit = if (byte & self.bit_mask) != 0 { 1 } else { 0 };
+ self.bit_mask >>= 1;
+ if self.bit_mask == 0 {
+ self.bit_mask = 0x80;
+ self.byte_pos = self.byte_pos.saturating_add(1);
+ }
+ bit
+ }
+
+ fn read_bits_or_zero(&mut self, bits: usize) -> u32 {
+ let mut value = 0u32;
+ for _ in 0..bits {
+ value = (value << 1) | u32::from(self.read_bit_or_zero());
+ }
+ value
+ }
+}
+
+fn decode_name(name: &[u8]) -> String {
+ name.iter().map(|b| char::from(*b)).collect()
+}
+
+fn c_name_bytes(raw: &[u8; 12]) -> &[u8] {
+ let len = raw.iter().position(|&b| b == 0).unwrap_or(raw.len());
+ &raw[..len]
+}
+
+fn cmp_c_string(a: &[u8], b: &[u8]) -> Ordering {
+ let min_len = a.len().min(b.len());
+ let mut idx = 0usize;
+ while idx < min_len {
+ if a[idx] != b[idx] {
+ return a[idx].cmp(&b[idx]);
+ }
+ idx += 1;
+ }
+ a.len().cmp(&b.len())
+}
+
+fn needs_xor_key(method: PackMethod) -> bool {
+ matches!(
+ method,
+ PackMethod::XorOnly | PackMethod::XorLzss | PackMethod::XorLzssHuffman
+ )
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::any::Any;
+ use std::panic::{catch_unwind, AssertUnwindSafe};
+ use std::path::PathBuf;
+
+ fn collect_files_recursive(root: &Path, out: &mut Vec<PathBuf>) {
+ let Ok(entries) = fs::read_dir(root) else {
+ return;
+ };
+ for entry in entries.flatten() {
+ let path = entry.path();
+ if path.is_dir() {
+ collect_files_recursive(&path, out);
+ } else if path.is_file() {
+ out.push(path);
+ }
+ }
+ }
+
+ fn rsli_test_files() -> Vec<PathBuf> {
+ let root = Path::new(env!("CARGO_MANIFEST_DIR"))
+ .join("..")
+ .join("..")
+ .join("testdata")
+ .join("rsli");
+ let mut files = Vec::new();
+ collect_files_recursive(&root, &mut files);
+ files.sort();
+ files
+ .into_iter()
+ .filter(|path| {
+ fs::read(path)
+ .map(|data| data.get(0..4) == Some(b"NL\0\x01"))
+ .unwrap_or(false)
+ })
+ .collect()
+ }
+
+ fn panic_message(payload: Box<dyn Any + Send>) -> String {
+ let any = payload.as_ref();
+ if let Some(message) = any.downcast_ref::<String>() {
+ return message.clone();
+ }
+ if let Some(message) = any.downcast_ref::<&str>() {
+ return (*message).to_string();
+ }
+ String::from("panic without message")
+ }
+
+ #[test]
+ fn rsli_read_unpack_and_repack_all_files() {
+ let files = rsli_test_files();
+ assert!(!files.is_empty(), "testdata/rsli contains no RsLi archives");
+
+ let checked = files.len();
+ let mut success = 0usize;
+ let mut failures = Vec::new();
+
+ for path in files {
+ let display_path = path.display().to_string();
+ let result = catch_unwind(AssertUnwindSafe(|| {
+ let original = fs::read(&path).expect("failed to read archive");
+ let library = Library::open_path(&path)
+ .unwrap_or_else(|err| panic!("failed to open {}: {err}", path.display()));
+
+ let count = library.entry_count();
+ assert_eq!(
+ count,
+ library.entries().count(),
+ "entry count mismatch: {}",
+ path.display()
+ );
+
+ for idx in 0..count {
+ let id = EntryId(idx as u32);
+ let meta_ref = library
+ .get(id)
+ .unwrap_or_else(|| panic!("missing entry #{idx} in {}", path.display()));
+
+ let loaded = library.load(id).unwrap_or_else(|err| {
+ panic!("load failed for {} entry #{idx}: {err}", path.display())
+ });
+
+ let packed = library.load_packed(id).unwrap_or_else(|err| {
+ panic!(
+ "load_packed failed for {} entry #{idx}: {err}",
+ path.display()
+ )
+ });
+ let unpacked = library.unpack(&packed).unwrap_or_else(|err| {
+ panic!("unpack failed for {} entry #{idx}: {err}", path.display())
+ });
+ assert_eq!(
+ loaded,
+ unpacked,
+ "load != unpack in {} entry #{idx}",
+ path.display()
+ );
+
+ let mut out = Vec::new();
+ let written = library.load_into(id, &mut out).unwrap_or_else(|err| {
+ panic!(
+ "load_into failed for {} entry #{idx}: {err}",
+ path.display()
+ )
+ });
+ assert_eq!(
+ written,
+ loaded.len(),
+ "load_into size mismatch in {} entry #{idx}",
+ path.display()
+ );
+ assert_eq!(
+ out,
+ loaded,
+ "load_into payload mismatch in {} entry #{idx}",
+ path.display()
+ );
+
+ let fast = library.load_fast(id).unwrap_or_else(|err| {
+ panic!(
+ "load_fast failed for {} entry #{idx}: {err}",
+ path.display()
+ )
+ });
+ assert_eq!(
+ fast.as_slice(),
+ loaded.as_slice(),
+ "load_fast mismatch in {} entry #{idx}",
+ path.display()
+ );
+
+ let found = library.find(&meta_ref.meta.name).unwrap_or_else(|| {
+ panic!(
+ "find failed for '{}' in {}",
+ meta_ref.meta.name,
+ path.display()
+ )
+ });
+ let found_meta = library.get(found).expect("find returned invalid entry id");
+ assert_eq!(
+ found_meta.meta.name,
+ meta_ref.meta.name,
+ "find returned a different entry in {}",
+ path.display()
+ );
+ }
+
+ let rebuilt = library
+ .rebuild_from_parsed_metadata()
+ .unwrap_or_else(|err| panic!("rebuild failed for {}: {err}", path.display()));
+ assert_eq!(
+ rebuilt,
+ original,
+ "byte-to-byte roundtrip mismatch for {}",
+ path.display()
+ );
+ }));
+
+ match result {
+ Ok(()) => success += 1,
+ Err(payload) => {
+ failures.push(format!("{}: {}", display_path, panic_message(payload)));
+ }
+ }
+ }
+
+ let failed = failures.len();
+ eprintln!(
+ "RsLi summary: checked={}, success={}, failed={}",
+ checked, success, failed
+ );
+ if !failures.is_empty() {
+ panic!(
+ "RsLi validation failed.\nsummary: checked={}, success={}, failed={}\n{}",
+ checked,
+ success,
+ failed,
+ failures.join("\n")
+ );
+ }
+ }
+}