summaryrefslogtreecommitdiff
path: root/vendor/exr/src/meta
diff options
context:
space:
mode:
authorValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
committerValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
commit1b6a04ca5504955c571d1c97504fb45ea0befee4 (patch)
tree7579f518b23313e8a9748a88ab6173d5e030b227 /vendor/exr/src/meta
parent5ecd8cf2cba827454317368b68571df0d13d7842 (diff)
downloadfparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.tar.xz
fparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.zip
Initial vendor packages
Signed-off-by: Valentin Popov <valentin@popov.link>
Diffstat (limited to 'vendor/exr/src/meta')
-rw-r--r--vendor/exr/src/meta/attribute.rs2226
-rw-r--r--vendor/exr/src/meta/header.rs1197
-rw-r--r--vendor/exr/src/meta/mod.rs821
3 files changed, 4244 insertions, 0 deletions
diff --git a/vendor/exr/src/meta/attribute.rs b/vendor/exr/src/meta/attribute.rs
new file mode 100644
index 0000000..5b71e82
--- /dev/null
+++ b/vendor/exr/src/meta/attribute.rs
@@ -0,0 +1,2226 @@
+
+//! Contains all meta data attributes.
+//! Each layer can have any number of [`Attribute`]s, including custom attributes.
+
+use smallvec::SmallVec;
+
+
+/// Contains one of all possible attributes.
+/// Includes a variant for custom attributes.
+#[derive(Debug, Clone, PartialEq)]
+pub enum AttributeValue {
+
+ /// Channel meta data.
+ ChannelList(ChannelList),
+
+ /// Color space definition.
+ Chromaticities(Chromaticities),
+
+ /// Compression method of this layer.
+ Compression(Compression),
+
+ /// This image is an environment map.
+ EnvironmentMap(EnvironmentMap),
+
+ /// Film roll information.
+ KeyCode(KeyCode),
+
+ /// Order of the bocks in the file.
+ LineOrder(LineOrder),
+
+ /// A 3x3 matrix of floats.
+ Matrix3x3(Matrix3x3),
+
+ /// A 4x4 matrix of floats.
+ Matrix4x4(Matrix4x4),
+
+ /// 8-bit rgba Preview of the image.
+ Preview(Preview),
+
+ /// An integer dividend and divisor.
+ Rational(Rational),
+
+ /// Deep or flat and tiled or scan line.
+ BlockType(BlockType),
+
+ /// List of texts.
+ TextVector(Vec<Text>),
+
+ /// How to tile up the image.
+ TileDescription(TileDescription),
+
+ /// Timepoint and more.
+ TimeCode(TimeCode),
+
+ /// A string of byte-chars.
+ Text(Text),
+
+ /// 64-bit float
+ F64(f64),
+
+ /// 32-bit float
+ F32(f32),
+
+ /// 32-bit signed integer
+ I32(i32),
+
+ /// 2D integer rectangle.
+ IntegerBounds(IntegerBounds),
+
+ /// 2D float rectangle.
+ FloatRect(FloatRect),
+
+ /// 2D integer vector.
+ IntVec2(Vec2<i32>),
+
+ /// 2D float vector.
+ FloatVec2(Vec2<f32>),
+
+ /// 3D integer vector.
+ IntVec3((i32, i32, i32)),
+
+ /// 3D float vector.
+ FloatVec3((f32, f32, f32)),
+
+ /// A custom attribute.
+ /// Contains the type name of this value.
+ Custom {
+
+ /// The name of the type this attribute is an instance of.
+ kind: Text,
+
+ /// The value, stored in little-endian byte order, of the value.
+ /// Use the `exr::io::Data` trait to extract binary values from this vector.
+ bytes: Vec<u8>
+ },
+}
+
+/// A byte array with each byte being a char.
+/// This is not UTF an must be constructed from a standard string.
+// TODO is this ascii? use a rust ascii crate?
+#[derive(Clone, PartialEq, Ord, PartialOrd, Default)] // hash implemented manually
+pub struct Text {
+ bytes: TextBytes,
+}
+
+/// Contains time information for this frame within a sequence.
+/// Also defined methods to compile this information into a
+/// `TV60`, `TV50` or `Film24` bit sequence, packed into `u32`.
+///
+/// Satisfies the [SMPTE standard 12M-1999](https://en.wikipedia.org/wiki/SMPTE_timecode).
+/// For more in-depth information, see [philrees.co.uk/timecode](http://www.philrees.co.uk/articles/timecode.htm).
+#[derive(Copy, Debug, Clone, Eq, PartialEq, Hash, Default)]
+pub struct TimeCode {
+
+ /// Hours 0 - 23 are valid.
+ pub hours: u8,
+
+ /// Minutes 0 - 59 are valid.
+ pub minutes: u8,
+
+ /// Seconds 0 - 59 are valid.
+ pub seconds: u8,
+
+ /// Frame Indices 0 - 29 are valid.
+ pub frame: u8,
+
+ /// Whether this is a drop frame.
+ pub drop_frame: bool,
+
+ /// Whether this is a color frame.
+ pub color_frame: bool,
+
+ /// Field Phase.
+ pub field_phase: bool,
+
+ /// Flags for `TimeCode.binary_groups`.
+ pub binary_group_flags: [bool; 3],
+
+ /// The user-defined control codes.
+ /// Every entry in this array can use at most 3 bits.
+ /// This results in a maximum value of 15, including 0, for each `u8`.
+ pub binary_groups: [u8; 8]
+}
+
+/// layer type, specifies block type and deepness.
+#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
+pub enum BlockType {
+
+ /// Corresponds to the string value `scanlineimage`.
+ ScanLine,
+
+ /// Corresponds to the string value `tiledimage`.
+ Tile,
+
+ /// Corresponds to the string value `deepscanline`.
+ DeepScanLine,
+
+ /// Corresponds to the string value `deeptile`.
+ DeepTile,
+}
+
+/// The string literals used to represent a `BlockType` in a file.
+pub mod block_type_strings {
+
+ /// Type attribute text value of flat scan lines
+ pub const SCAN_LINE: &'static [u8] = b"scanlineimage";
+
+ /// Type attribute text value of flat tiles
+ pub const TILE: &'static [u8] = b"tiledimage";
+
+ /// Type attribute text value of deep scan lines
+ pub const DEEP_SCAN_LINE: &'static [u8] = b"deepscanline";
+
+ /// Type attribute text value of deep tiles
+ pub const DEEP_TILE: &'static [u8] = b"deeptile";
+}
+
+
+pub use crate::compression::Compression;
+
+/// The integer rectangle describing where an layer is placed on the infinite 2D global space.
+pub type DataWindow = IntegerBounds;
+
+/// The integer rectangle limiting which part of the infinite 2D global space should be displayed.
+pub type DisplayWindow = IntegerBounds;
+
+/// An integer dividend and divisor, together forming a ratio.
+pub type Rational = (i32, u32);
+
+/// A float matrix with four rows and four columns.
+pub type Matrix4x4 = [f32; 4*4];
+
+/// A float matrix with three rows and three columns.
+pub type Matrix3x3 = [f32; 3*3];
+
+/// A rectangular section anywhere in 2D integer space.
+/// Valid from minimum coordinate (including) `-1,073,741,822`
+/// to maximum coordinate (including) `1,073,741,822`, the value of (`i32::MAX/2 -1`).
+#[derive(Clone, Copy, Debug, Eq, PartialEq, Default, Hash)]
+pub struct IntegerBounds {
+
+ /// The top left corner of this rectangle.
+ /// The `Box2I32` includes this pixel if the size is not zero.
+ pub position: Vec2<i32>,
+
+ /// How many pixels to include in this `Box2I32`.
+ /// Extends to the right and downwards.
+ /// Does not include the actual boundary, just like `Vec::len()`.
+ pub size: Vec2<usize>,
+}
+
+/// A rectangular section anywhere in 2D float space.
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub struct FloatRect {
+
+ /// The top left corner location of the rectangle (inclusive)
+ pub min: Vec2<f32>,
+
+ /// The bottom right corner location of the rectangle (inclusive)
+ pub max: Vec2<f32>
+}
+
+/// A List of channels. Channels must be sorted alphabetically.
+#[derive(Clone, Debug, Eq, PartialEq, Hash)]
+pub struct ChannelList {
+
+ /// The channels in this list.
+ pub list: SmallVec<[ChannelDescription; 5]>,
+
+ /// The number of bytes that one pixel in this image needs.
+ // FIXME this needs to account for subsampling anywhere?
+ pub bytes_per_pixel: usize, // FIXME only makes sense for flat images!
+
+ /// The sample type of all channels, if all channels have the same type.
+ pub uniform_sample_type: Option<SampleType>,
+}
+
+/// A single channel in an layer.
+/// Does not contain the actual pixel data,
+/// but instead merely describes it.
+#[derive(Clone, Debug, Eq, PartialEq, Hash)]
+pub struct ChannelDescription {
+
+ /// One of "R", "G", or "B" most of the time.
+ pub name: Text,
+
+ /// U32, F16 or F32.
+ pub sample_type: SampleType,
+
+ /// This attribute only tells lossy compression methods
+ /// whether this value should be quantized exponentially or linearly.
+ ///
+ /// Should be `false` for red, green, or blue channels.
+ /// Should be `true` for hue, chroma, saturation, or alpha channels.
+ pub quantize_linearly: bool,
+
+ /// How many of the samples are skipped compared to the other channels in this layer.
+ ///
+ /// Can be used for chroma subsampling for manual lossy data compression.
+ /// Values other than 1 are allowed only in flat, scan-line based images.
+ /// If an image is deep or tiled, x and y sampling rates for all of its channels must be 1.
+ pub sampling: Vec2<usize>,
+}
+
+/// The type of samples in this channel.
+#[derive(Clone, Debug, Eq, PartialEq, Copy, Hash)]
+pub enum SampleType {
+
+ /// This channel contains 32-bit unsigned int values.
+ U32,
+
+ /// This channel contains 16-bit float values.
+ F16,
+
+ /// This channel contains 32-bit float values.
+ F32,
+}
+
+/// The color space of the pixels.
+///
+/// If a file doesn't have a chromaticities attribute, display software
+/// should assume that the file's primaries and the white point match `Rec. ITU-R BT.709-3`.
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub struct Chromaticities {
+
+ /// "Red" location on the CIE XY chromaticity diagram.
+ pub red: Vec2<f32>,
+
+ /// "Green" location on the CIE XY chromaticity diagram.
+ pub green: Vec2<f32>,
+
+ /// "Blue" location on the CIE XY chromaticity diagram.
+ pub blue: Vec2<f32>,
+
+ /// "White" location on the CIE XY chromaticity diagram.
+ pub white: Vec2<f32>
+}
+
+/// If this attribute is present, it describes
+/// how this texture should be projected onto an environment.
+#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
+pub enum EnvironmentMap {
+
+ /// This image is an environment map projected like a world map.
+ LatitudeLongitude,
+
+ /// This image contains the six sides of a cube.
+ Cube,
+}
+
+/// Uniquely identifies a motion picture film frame.
+#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
+pub struct KeyCode {
+
+ /// Identifies a film manufacturer.
+ pub film_manufacturer_code: i32,
+
+ /// Identifies a film type.
+ pub film_type: i32,
+
+ /// Specifies the film roll prefix.
+ pub film_roll_prefix: i32,
+
+ /// Specifies the film count.
+ pub count: i32,
+
+ /// Specifies the perforation offset.
+ pub perforation_offset: i32,
+
+ /// Specifies the perforation count of each single frame.
+ pub perforations_per_frame: i32,
+
+ /// Specifies the perforation count of each single film.
+ pub perforations_per_count: i32,
+}
+
+/// In what order the `Block`s of pixel data appear in a file.
+#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
+pub enum LineOrder {
+
+ /// The blocks in the file are ordered in descending rows from left to right.
+ /// When compressing in parallel, this option requires potentially large amounts of memory.
+ /// In that case, use `LineOrder::Unspecified` for best performance.
+ Increasing,
+
+ /// The blocks in the file are ordered in ascending rows from right to left.
+ /// When compressing in parallel, this option requires potentially large amounts of memory.
+ /// In that case, use `LineOrder::Unspecified` for best performance.
+ Decreasing,
+
+ /// The blocks are not ordered in a specific way inside the file.
+ /// In multi-core file writing, this option offers the best performance.
+ Unspecified,
+}
+
+/// A small `rgba` image of `i8` values that approximates the real exr image.
+// TODO is this linear?
+#[derive(Clone, Eq, PartialEq)]
+pub struct Preview {
+
+ /// The dimensions of the preview image.
+ pub size: Vec2<usize>,
+
+ /// An array with a length of 4 × width × height.
+ /// The pixels are stored in `LineOrder::Increasing`.
+ /// Each pixel consists of the four `u8` values red, green, blue, alpha.
+ pub pixel_data: Vec<i8>,
+}
+
+/// Describes how the layer is divided into tiles.
+/// Specifies the size of each tile in the image
+/// and whether this image contains multiple resolution levels.
+#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
+pub struct TileDescription {
+
+ /// The size of each tile.
+ /// Stays the same number of pixels across all levels.
+ pub tile_size: Vec2<usize>,
+
+ /// Whether to also store smaller versions of the image.
+ pub level_mode: LevelMode,
+
+ /// Whether to round up or down when calculating Mip/Rip levels.
+ pub rounding_mode: RoundingMode,
+}
+
+/// Whether to also store increasingly smaller versions of the original image.
+#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
+pub enum LevelMode {
+
+ /// Only a single level.
+ Singular,
+
+ /// Levels with a similar aspect ratio.
+ MipMap,
+
+ /// Levels with all possible aspect ratios.
+ RipMap,
+}
+
+
+/// The raw bytes that make up a string in an exr file.
+/// Each `u8` is a single char.
+// will mostly be "R", "G", "B" or "deepscanlineimage"
+pub type TextBytes = SmallVec<[u8; 24]>;
+
+/// A byte slice, interpreted as text
+pub type TextSlice = [u8];
+
+
+use crate::io::*;
+use crate::meta::{sequence_end};
+use crate::error::*;
+use crate::math::{RoundingMode, Vec2};
+use half::f16;
+use std::convert::{TryFrom};
+use std::borrow::Borrow;
+use std::hash::{Hash, Hasher};
+use bit_field::BitField;
+
+
+fn invalid_type() -> Error {
+ Error::invalid("attribute type mismatch")
+}
+
+
+impl Text {
+
+ /// Create a `Text` from an `str` reference.
+ /// Returns `None` if this string contains unsupported chars.
+ pub fn new_or_none(string: impl AsRef<str>) -> Option<Self> {
+ let vec : Option<TextBytes> = string.as_ref().chars()
+ .map(|character| u8::try_from(character as u64).ok())
+ .collect();
+
+ vec.map(Self::from_bytes_unchecked)
+ }
+
+ /// Create a `Text` from an `str` reference.
+ /// Panics if this string contains unsupported chars.
+ pub fn new_or_panic(string: impl AsRef<str>) -> Self {
+ Self::new_or_none(string).expect("exr::Text contains unsupported characters")
+ }
+
+ /// Create a `Text` from a slice of bytes,
+ /// without checking any of the bytes.
+ pub fn from_slice_unchecked(text: &TextSlice) -> Self {
+ Self::from_bytes_unchecked(SmallVec::from_slice(text))
+ }
+
+ /// Create a `Text` from the specified bytes object,
+ /// without checking any of the bytes.
+ pub fn from_bytes_unchecked(bytes: TextBytes) -> Self {
+ Text { bytes }
+ }
+
+ /// The internal ASCII bytes this text is made of.
+ pub fn as_slice(&self) -> &TextSlice {
+ self.bytes.as_slice()
+ }
+
+ /// Check whether this string is valid, adjusting `long_names` if required.
+ /// If `long_names` is not provided, text length will be entirely unchecked.
+ pub fn validate(&self, null_terminated: bool, long_names: Option<&mut bool>) -> UnitResult {
+ Self::validate_bytes(self.as_slice(), null_terminated, long_names)
+ }
+
+ /// Check whether some bytes are valid, adjusting `long_names` if required.
+ /// If `long_names` is not provided, text length will be entirely unchecked.
+ pub fn validate_bytes(text: &TextSlice, null_terminated: bool, long_names: Option<&mut bool>) -> UnitResult {
+ if null_terminated && text.is_empty() {
+ return Err(Error::invalid("text must not be empty"));
+ }
+
+ if let Some(long) = long_names {
+ if text.len() >= 256 { return Err(Error::invalid("text must not be longer than 255")); }
+ if text.len() >= 32 { *long = true; }
+ }
+
+ Ok(())
+ }
+
+ /// The byte count this string would occupy if it were encoded as a null-terminated string.
+ pub fn null_terminated_byte_size(&self) -> usize {
+ self.bytes.len() + sequence_end::byte_size()
+ }
+
+ /// The byte count this string would occupy if it were encoded as a size-prefixed string.
+ pub fn i32_sized_byte_size(&self) -> usize {
+ self.bytes.len() + i32::BYTE_SIZE
+ }
+
+ /// Write the length of a string and then the contents with that length.
+ pub fn write_i32_sized<W: Write>(&self, write: &mut W) -> UnitResult {
+ debug_assert!(self.validate( false, None).is_ok(), "text size bug");
+ i32::write(usize_to_i32(self.bytes.len()), write)?;
+ Self::write_unsized_bytes(self.bytes.as_slice(), write)
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ fn write_unsized_bytes<W: Write>(bytes: &[u8], write: &mut W) -> UnitResult {
+ u8::write_slice(write, bytes)?;
+ Ok(())
+ }
+
+ /// Read the length of a string and then the contents with that length.
+ pub fn read_i32_sized<R: Read>(read: &mut R, max_size: usize) -> Result<Self> {
+ let size = i32_to_usize(i32::read(read)?, "vector size")?;
+ Ok(Text::from_bytes_unchecked(SmallVec::from_vec(u8::read_vec(read, size, 1024, Some(max_size), "text attribute length")?)))
+ }
+
+ /// Read the contents with that length.
+ pub fn read_sized<R: Read>(read: &mut R, size: usize) -> Result<Self> {
+ const SMALL_SIZE: usize = 24;
+
+ // for small strings, read into small vec without heap allocation
+ if size <= SMALL_SIZE {
+ let mut buffer = [0_u8; SMALL_SIZE];
+ let data = &mut buffer[..size];
+
+ read.read_exact(data)?;
+ Ok(Text::from_bytes_unchecked(SmallVec::from_slice(data)))
+ }
+
+ // for large strings, read a dynamic vec of arbitrary size
+ else {
+ Ok(Text::from_bytes_unchecked(SmallVec::from_vec(u8::read_vec(read, size, 1024, None, "text attribute length")?)))
+ }
+ }
+
+ /// Write the string contents and a null-terminator.
+ pub fn write_null_terminated<W: Write>(&self, write: &mut W) -> UnitResult {
+ Self::write_null_terminated_bytes(self.as_slice(), write)
+ }
+
+ /// Write the string contents and a null-terminator.
+ fn write_null_terminated_bytes<W: Write>(bytes: &[u8], write: &mut W) -> UnitResult {
+ debug_assert!(!bytes.is_empty(), "text is empty bug"); // required to avoid mixup with "sequece_end"
+
+ Text::write_unsized_bytes(bytes, write)?;
+ sequence_end::write(write)?;
+ Ok(())
+ }
+
+ /// Read a string until the null-terminator is found. Then skips the null-terminator.
+ pub fn read_null_terminated<R: Read>(read: &mut R, max_len: usize) -> Result<Self> {
+ let mut bytes = smallvec![ u8::read(read)? ]; // null-terminated strings are always at least 1 byte
+
+ loop {
+ match u8::read(read)? {
+ 0 => break,
+ non_terminator => bytes.push(non_terminator),
+ }
+
+ if bytes.len() > max_len {
+ return Err(Error::invalid("text too long"))
+ }
+ }
+
+ Ok(Text { bytes })
+ }
+
+ /// Allows any text length since it is only used for attribute values,
+ /// but not attribute names, attribute type names, or channel names.
+ fn read_vec_of_i32_sized(
+ read: &mut PeekRead<impl Read>,
+ total_byte_size: usize
+ ) -> Result<Vec<Text>>
+ {
+ let mut result = Vec::with_capacity(2);
+
+ // length of the text-vector can be inferred from attribute size
+ let mut processed_bytes = 0;
+
+ while processed_bytes < total_byte_size {
+ let text = Text::read_i32_sized(read, total_byte_size)?;
+ processed_bytes += ::std::mem::size_of::<i32>(); // size i32 of the text
+ processed_bytes += text.bytes.len();
+ result.push(text);
+ }
+
+ // the expected byte size did not match the actual text byte size
+ if processed_bytes != total_byte_size {
+ return Err(Error::invalid("text array byte size"))
+ }
+
+ Ok(result)
+ }
+
+ /// Allows any text length since it is only used for attribute values,
+ /// but not attribute names, attribute type names, or channel names.
+ fn write_vec_of_i32_sized_texts<W: Write>(write: &mut W, texts: &[Text]) -> UnitResult {
+ // length of the text-vector can be inferred from attribute size
+ for text in texts {
+ text.write_i32_sized(write)?;
+ }
+
+ Ok(())
+ }
+
+ /// The underlying bytes that represent this text.
+ pub fn bytes(&self) -> &[u8] {
+ self.bytes.as_slice()
+ }
+
+ /// Iterate over the individual chars in this text, similar to `String::chars()`.
+ /// Does not do any heap-allocation but borrows from this instance instead.
+ pub fn chars(&self) -> impl '_ + Iterator<Item = char> {
+ self.bytes.iter().map(|&byte| byte as char)
+ }
+
+ /// Compare this `exr::Text` with a plain `&str`.
+ pub fn eq(&self, string: &str) -> bool {
+ string.chars().eq(self.chars())
+ }
+
+ /// Compare this `exr::Text` with a plain `&str` ignoring capitalization.
+ pub fn eq_case_insensitive(&self, string: &str) -> bool {
+ // this is technically not working for a "turkish i", but those cannot be encoded in exr files anyways
+ let self_chars = self.chars().map(|char| char.to_ascii_lowercase());
+ let string_chars = string.chars().flat_map(|ch| ch.to_lowercase());
+
+ string_chars.eq(self_chars)
+ }
+}
+
+impl PartialEq<str> for Text {
+ fn eq(&self, other: &str) -> bool {
+ self.eq(other)
+ }
+}
+
+impl PartialEq<Text> for str {
+ fn eq(&self, other: &Text) -> bool {
+ other.eq(self)
+ }
+}
+
+impl Eq for Text {}
+
+impl Borrow<TextSlice> for Text {
+ fn borrow(&self) -> &TextSlice {
+ self.as_slice()
+ }
+}
+
+// forwarding implementation. guarantees `text.borrow().hash() == text.hash()` (required for Borrow)
+impl Hash for Text {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.bytes.hash(state)
+ }
+}
+
+impl Into<String> for Text {
+ fn into(self) -> String {
+ self.to_string()
+ }
+}
+
+impl<'s> From<&'s str> for Text {
+
+ /// Panics if the string contains an unsupported character
+ fn from(str: &'s str) -> Self {
+ Self::new_or_panic(str)
+ }
+}
+
+
+/* TODO (currently conflicts with From<&str>)
+impl<'s> TryFrom<&'s str> for Text {
+ type Error = String;
+
+ fn try_from(value: &'s str) -> std::result::Result<Self, Self::Error> {
+ Text::new_or_none(value)
+ .ok_or_else(|| format!(
+ "exr::Text does not support all characters in the string `{}`",
+ value
+ ))
+ }
+}*/
+
+
+impl ::std::fmt::Debug for Text {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ write!(f, "exr::Text(\"{}\")", self)
+ }
+}
+
+// automatically implements to_string for us
+impl ::std::fmt::Display for Text {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ use std::fmt::Write;
+
+ for &byte in self.bytes.iter() {
+ f.write_char(byte as char)?;
+ }
+
+ Ok(())
+ }
+}
+
+
+impl ChannelList {
+
+ /// Does not validate channel order.
+ pub fn new(channels: SmallVec<[ChannelDescription; 5]>) -> Self {
+ let uniform_sample_type = {
+ if let Some(first) = channels.first() {
+ let has_uniform_types = channels.iter().skip(1)
+ .all(|chan| chan.sample_type == first.sample_type);
+
+ if has_uniform_types { Some(first.sample_type) } else { None }
+ }
+ else { None }
+ };
+
+ ChannelList {
+ bytes_per_pixel: channels.iter().map(|channel| channel.sample_type.bytes_per_sample()).sum(),
+ list: channels, uniform_sample_type,
+ }
+ }
+
+ /// Iterate over the channels, and adds to each channel the byte offset of the channels sample type.
+ /// Assumes the internal channel list is properly sorted.
+ pub fn channels_with_byte_offset(&self) -> impl Iterator<Item=(usize, &ChannelDescription)> {
+ self.list.iter().scan(0, |byte_position, channel|{
+ let previous_position = *byte_position;
+ *byte_position += channel.sample_type.bytes_per_sample();
+ Some((previous_position, channel))
+ })
+ }
+
+ /// Return the index of the channel with the exact name, case sensitive, or none.
+ /// Potentially uses less than linear time.
+ pub fn find_index_of_channel(&self, exact_name: &Text) -> Option<usize> {
+ self.list.binary_search_by_key(&exact_name.bytes(), |chan| chan.name.bytes()).ok()
+ }
+
+ // TODO use this in compression methods
+ /*pub fn pixel_section_indices(&self, bounds: IntegerBounds) -> impl '_ + Iterator<Item=(&Channel, usize, usize)> {
+ (bounds.position.y() .. bounds.end().y()).flat_map(|y| {
+ self.list
+ .filter(|channel| mod_p(y, usize_to_i32(channel.sampling.1)) == 0)
+ .flat_map(|channel|{
+ (bounds.position.x() .. bounds.end().x())
+ .filter(|x| mod_p(*x, usize_to_i32(channel.sampling.0)) == 0)
+ .map(|x| (channel, x, y))
+ })
+ })
+ }*/
+}
+
+impl BlockType {
+
+ /// The corresponding attribute type name literal
+ const TYPE_NAME: &'static [u8] = type_names::TEXT;
+
+ /// Return a `BlockType` object from the specified attribute text value.
+ pub fn parse(text: Text) -> Result<Self> {
+ match text.as_slice() {
+ block_type_strings::SCAN_LINE => Ok(BlockType::ScanLine),
+ block_type_strings::TILE => Ok(BlockType::Tile),
+
+ block_type_strings::DEEP_SCAN_LINE => Ok(BlockType::DeepScanLine),
+ block_type_strings::DEEP_TILE => Ok(BlockType::DeepTile),
+
+ _ => Err(Error::invalid("block type attribute value")),
+ }
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write(&self, write: &mut impl Write) -> UnitResult {
+ u8::write_slice(write, self.to_text_bytes())?;
+ Ok(())
+ }
+
+ /// Returns the raw attribute text value this type is represented by in a file.
+ pub fn to_text_bytes(&self) -> &[u8] {
+ match self {
+ BlockType::ScanLine => block_type_strings::SCAN_LINE,
+ BlockType::Tile => block_type_strings::TILE,
+ BlockType::DeepScanLine => block_type_strings::DEEP_SCAN_LINE,
+ BlockType::DeepTile => block_type_strings::DEEP_TILE,
+ }
+ }
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size(&self) -> usize {
+ self.to_text_bytes().len()
+ }
+}
+
+
+impl IntegerBounds {
+
+ /// Create a box with no size located at (0,0).
+ pub fn zero() -> Self {
+ Self::from_dimensions(Vec2(0, 0))
+ }
+
+ /// Create a box with a size starting at zero.
+ pub fn from_dimensions(size: impl Into<Vec2<usize>>) -> Self {
+ Self::new(Vec2(0,0), size)
+ }
+
+ /// Create a box with a size and an origin point.
+ pub fn new(start: impl Into<Vec2<i32>>, size: impl Into<Vec2<usize>>) -> Self {
+ Self { position: start.into(), size: size.into() }
+ }
+
+ /// Returns the top-right coordinate of the rectangle.
+ /// The row and column described by this vector are not included in the rectangle,
+ /// just like `Vec::len()`.
+ pub fn end(self) -> Vec2<i32> {
+ self.position + self.size.to_i32() // larger than max int32 is panic
+ }
+
+ /// Returns the maximum coordinate that a value in this rectangle may have.
+ pub fn max(self) -> Vec2<i32> {
+ self.end() - Vec2(1,1)
+ }
+
+ /// Validate this instance.
+ pub fn validate(&self, max_size: Option<Vec2<usize>>) -> UnitResult {
+ if let Some(max_size) = max_size {
+ if self.size.width() > max_size.width() || self.size.height() > max_size.height() {
+ return Err(Error::invalid("window attribute dimension value"));
+ }
+ }
+
+ let min_i64 = Vec2(self.position.x() as i64, self.position.y() as i64);
+
+ let max_i64 = Vec2(
+ self.position.x() as i64 + self.size.width() as i64,
+ self.position.y() as i64 + self.size.height() as i64,
+ );
+
+ Self::validate_min_max_u64(min_i64, max_i64)
+ }
+
+ fn validate_min_max_u64(min: Vec2<i64>, max: Vec2<i64>) -> UnitResult {
+ let max_box_size_as_i64 = (i32::MAX / 2) as i64; // as defined in the original c++ library
+
+ if max.x() >= max_box_size_as_i64
+ || max.y() >= max_box_size_as_i64
+ || min.x() <= -max_box_size_as_i64
+ || min.y() <= -max_box_size_as_i64
+ {
+ return Err(Error::invalid("window size exceeding integer maximum"));
+ }
+
+ Ok(())
+ }
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size() -> usize {
+ 4 * i32::BYTE_SIZE
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
+ let Vec2(x_min, y_min) = self.position;
+ let Vec2(x_max, y_max) = self.max();
+
+ x_min.write(write)?;
+ y_min.write(write)?;
+ x_max.write(write)?;
+ y_max.write(write)?;
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ let x_min = i32::read(read)?;
+ let y_min = i32::read(read)?;
+ let x_max = i32::read(read)?;
+ let y_max = i32::read(read)?;
+
+ let min = Vec2(x_min.min(x_max), y_min.min(y_max));
+ let max = Vec2(x_min.max(x_max), y_min.max(y_max));
+
+ // prevent addition overflow
+ Self::validate_min_max_u64(
+ Vec2(min.x() as i64, min.y() as i64),
+ Vec2(max.x() as i64, max.y() as i64),
+ )?;
+
+ // add one to max because the max inclusive, but the size is not
+ let size = Vec2(max.x() + 1 - min.x(), max.y() + 1 - min.y());
+ let size = size.to_usize("box coordinates")?;
+
+ Ok(IntegerBounds { position: min, size })
+ }
+
+ /// Create a new rectangle which is offset by the specified origin.
+ pub fn with_origin(self, origin: Vec2<i32>) -> Self { // TODO rename to "move" or "translate"?
+ IntegerBounds { position: self.position + origin, .. self }
+ }
+
+ /// Returns whether the specified rectangle is equal to or inside this rectangle.
+ pub fn contains(self, subset: Self) -> bool {
+ subset.position.x() >= self.position.x()
+ && subset.position.y() >= self.position.y()
+ && subset.end().x() <= self.end().x()
+ && subset.end().y() <= self.end().y()
+ }
+}
+
+
+impl FloatRect {
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size() -> usize {
+ 4 * f32::BYTE_SIZE
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
+ self.min.x().write(write)?;
+ self.min.y().write(write)?;
+ self.max.x().write(write)?;
+ self.max.y().write(write)?;
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ let x_min = f32::read(read)?;
+ let y_min = f32::read(read)?;
+ let x_max = f32::read(read)?;
+ let y_max = f32::read(read)?;
+
+ Ok(FloatRect {
+ min: Vec2(x_min, y_min),
+ max: Vec2(x_max, y_max)
+ })
+ }
+}
+
+impl SampleType {
+
+ /// How many bytes a single sample takes up.
+ pub fn bytes_per_sample(&self) -> usize {
+ match self {
+ SampleType::F16 => f16::BYTE_SIZE,
+ SampleType::F32 => f32::BYTE_SIZE,
+ SampleType::U32 => u32::BYTE_SIZE,
+ }
+ }
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size() -> usize {
+ i32::BYTE_SIZE
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
+ match *self {
+ SampleType::U32 => 0_i32,
+ SampleType::F16 => 1_i32,
+ SampleType::F32 => 2_i32,
+ }.write(write)?;
+
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ // there's definitely going to be more than 255 different pixel types in the future
+ Ok(match i32::read(read)? {
+ 0 => SampleType::U32,
+ 1 => SampleType::F16,
+ 2 => SampleType::F32,
+ _ => return Err(Error::invalid("pixel type attribute value")),
+ })
+ }
+}
+
+impl ChannelDescription {
+ /// Choose whether to compress samples linearly or not, based on the channel name.
+ /// Luminance-based channels will be compressed differently than linear data such as alpha.
+ pub fn guess_quantization_linearity(name: &Text) -> bool {
+ !(
+ name.eq_case_insensitive("R") || name.eq_case_insensitive("G") ||
+ name.eq_case_insensitive("B") || name.eq_case_insensitive("L") ||
+ name.eq_case_insensitive("Y") || name.eq_case_insensitive("X") ||
+ name.eq_case_insensitive("Z")
+ )
+ }
+
+ /// Create a new channel with the specified properties and a sampling rate of (1,1).
+ /// Automatically chooses the linearity for compression based on the channel name.
+ pub fn named(name: impl Into<Text>, sample_type: SampleType) -> Self {
+ let name = name.into();
+ let linearity = Self::guess_quantization_linearity(&name);
+ Self::new(name, sample_type, linearity)
+ }
+
+ /*pub fn from_name<T: Into<Sample> + Default>(name: impl Into<Text>) -> Self {
+ Self::named(name, T::default().into().sample_type())
+ }*/
+
+ /// Create a new channel with the specified properties and a sampling rate of (1,1).
+ pub fn new(name: impl Into<Text>, sample_type: SampleType, quantize_linearly: bool) -> Self {
+ Self { name: name.into(), sample_type, quantize_linearly, sampling: Vec2(1, 1) }
+ }
+
+ /// The count of pixels this channel contains, respecting subsampling.
+ // FIXME this must be used everywhere
+ pub fn subsampled_pixels(&self, dimensions: Vec2<usize>) -> usize {
+ self.subsampled_resolution(dimensions).area()
+ }
+
+ /// The resolution pf this channel, respecting subsampling.
+ pub fn subsampled_resolution(&self, dimensions: Vec2<usize>) -> Vec2<usize> {
+ dimensions / self.sampling
+ }
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size(&self) -> usize {
+ self.name.null_terminated_byte_size()
+ + SampleType::byte_size()
+ + 1 // is_linear
+ + 3 // reserved bytes
+ + 2 * u32::BYTE_SIZE // sampling x, y
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
+ Text::write_null_terminated(&self.name, write)?;
+ self.sample_type.write(write)?;
+
+ match self.quantize_linearly {
+ false => 0_u8,
+ true => 1_u8,
+ }.write(write)?;
+
+ i8::write_slice(write, &[0_i8, 0_i8, 0_i8])?;
+ i32::write(usize_to_i32(self.sampling.x()), write)?;
+ i32::write(usize_to_i32(self.sampling.y()), write)?;
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ let name = Text::read_null_terminated(read, 256)?;
+ let sample_type = SampleType::read(read)?;
+
+ let is_linear = match u8::read(read)? {
+ 1 => true,
+ 0 => false,
+ _ => return Err(Error::invalid("channel linearity attribute value")),
+ };
+
+ let mut reserved = [0_i8; 3];
+ i8::read_slice(read, &mut reserved)?;
+
+ let x_sampling = i32_to_usize(i32::read(read)?, "x channel sampling")?;
+ let y_sampling = i32_to_usize(i32::read(read)?, "y channel sampling")?;
+
+ Ok(ChannelDescription {
+ name, sample_type,
+ quantize_linearly: is_linear,
+ sampling: Vec2(x_sampling, y_sampling),
+ })
+ }
+
+ /// Validate this instance.
+ pub fn validate(&self, allow_sampling: bool, data_window: IntegerBounds, strict: bool) -> UnitResult {
+ self.name.validate(true, None)?; // TODO spec says this does not affect `requirements.long_names` but is that true?
+
+ if self.sampling.x() == 0 || self.sampling.y() == 0 {
+ return Err(Error::invalid("zero sampling factor"));
+ }
+
+ if strict && !allow_sampling && self.sampling != Vec2(1,1) {
+ return Err(Error::invalid("subsampling is only allowed in flat scan line images"));
+ }
+
+ if data_window.position.x() % self.sampling.x() as i32 != 0 || data_window.position.y() % self.sampling.y() as i32 != 0 {
+ return Err(Error::invalid("channel sampling factor not dividing data window position"));
+ }
+
+ if data_window.size.x() % self.sampling.x() != 0 || data_window.size.y() % self.sampling.y() != 0 {
+ return Err(Error::invalid("channel sampling factor not dividing data window size"));
+ }
+
+ if self.sampling != Vec2(1,1) {
+ // TODO this must only be implemented in the crate::image module and child modules,
+ // should not be too difficult
+
+ return Err(Error::unsupported("channel subsampling not supported yet"));
+ }
+
+ Ok(())
+ }
+}
+
+impl ChannelList {
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size(&self) -> usize {
+ self.list.iter().map(ChannelDescription::byte_size).sum::<usize>() + sequence_end::byte_size()
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ /// Assumes channels are sorted alphabetically and all values are validated.
+ pub fn write(&self, write: &mut impl Write) -> UnitResult {
+ for channel in &self.list {
+ channel.write(write)?;
+ }
+
+ sequence_end::write(write)?;
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read(read: &mut PeekRead<impl Read>) -> Result<Self> {
+ let mut channels = SmallVec::new();
+ while !sequence_end::has_come(read)? {
+ channels.push(ChannelDescription::read(read)?);
+ }
+
+ Ok(ChannelList::new(channels))
+ }
+
+ /// Check if channels are valid and sorted.
+ pub fn validate(&self, allow_sampling: bool, data_window: IntegerBounds, strict: bool) -> UnitResult {
+ let mut iter = self.list.iter().map(|chan| chan.validate(allow_sampling, data_window, strict).map(|_| &chan.name));
+ let mut previous = iter.next().ok_or(Error::invalid("at least one channel is required"))??;
+
+ for result in iter {
+ let value = result?;
+ if strict && previous == value { return Err(Error::invalid("channel names are not unique")); }
+ else if previous > value { return Err(Error::invalid("channel names are not sorted alphabetically")); }
+ else { previous = value; }
+ }
+
+ Ok(())
+ }
+}
+
+fn u8_to_decimal32(binary: u8) -> u32 {
+ let units = binary as u32 % 10;
+ let tens = (binary as u32 / 10) % 10;
+ units | (tens << 4)
+}
+
+// assumes value fits into u8
+fn u8_from_decimal32(coded: u32) -> u8 {
+ ((coded & 0x0f) + 10 * ((coded >> 4) & 0x0f)) as u8
+}
+
+// https://github.com/AcademySoftwareFoundation/openexr/blob/master/src/lib/OpenEXR/ImfTimeCode.cpp
+impl TimeCode {
+
+ /// Number of bytes this would consume in an exr file.
+ pub const BYTE_SIZE: usize = 2 * u32::BYTE_SIZE;
+
+ /// Returns an error if this time code is considered invalid.
+ pub fn validate(&self, strict: bool) -> UnitResult {
+ if strict {
+ if self.frame > 29 { Err(Error::invalid("time code frame larger than 29")) }
+ else if self.seconds > 59 { Err(Error::invalid("time code seconds larger than 59")) }
+ else if self.minutes > 59 { Err(Error::invalid("time code minutes larger than 59")) }
+ else if self.hours > 23 { Err(Error::invalid("time code hours larger than 23")) }
+ else if self.binary_groups.iter().any(|&group| group > 15) {
+ Err(Error::invalid("time code binary group value too large for 3 bits"))
+ }
+ else { Ok(()) }
+ }
+ else { Ok(()) }
+ }
+
+
+ /// Pack the SMPTE time code into a u32 value, according to TV60 packing.
+ /// This is the encoding which is used within a binary exr file.
+ pub fn pack_time_as_tv60_u32(&self) -> Result<u32> {
+ // validate strictly to prevent set_bit panic! below
+ self.validate(true)?;
+
+ Ok(*0_u32
+ .set_bits(0..6, u8_to_decimal32(self.frame))
+ .set_bit(6, self.drop_frame)
+ .set_bit(7, self.color_frame)
+ .set_bits(8..15, u8_to_decimal32(self.seconds))
+ .set_bit(15, self.field_phase)
+ .set_bits(16..23, u8_to_decimal32(self.minutes))
+ .set_bit(23, self.binary_group_flags[0])
+ .set_bits(24..30, u8_to_decimal32(self.hours))
+ .set_bit(30, self.binary_group_flags[1])
+ .set_bit(31, self.binary_group_flags[2])
+ )
+ }
+
+ /// Unpack a time code from one TV60 encoded u32 value and the encoded user data.
+ /// This is the encoding which is used within a binary exr file.
+ pub fn from_tv60_time(tv60_time: u32, user_data: u32) -> Self {
+ Self {
+ frame: u8_from_decimal32(tv60_time.get_bits(0..6)), // cast cannot fail, as these are less than 8 bits
+ drop_frame: tv60_time.get_bit(6),
+ color_frame: tv60_time.get_bit(7),
+ seconds: u8_from_decimal32(tv60_time.get_bits(8..15)), // cast cannot fail, as these are less than 8 bits
+ field_phase: tv60_time.get_bit(15),
+ minutes: u8_from_decimal32(tv60_time.get_bits(16..23)), // cast cannot fail, as these are less than 8 bits
+ hours: u8_from_decimal32(tv60_time.get_bits(24..30)), // cast cannot fail, as these are less than 8 bits
+ binary_group_flags: [
+ tv60_time.get_bit(23),
+ tv60_time.get_bit(30),
+ tv60_time.get_bit(31),
+ ],
+
+ binary_groups: Self::unpack_user_data_from_u32(user_data)
+ }
+ }
+
+ /// Pack the SMPTE time code into a u32 value, according to TV50 packing.
+ /// This encoding does not support the `drop_frame` flag, it will be lost.
+ pub fn pack_time_as_tv50_u32(&self) -> Result<u32> {
+ Ok(*self.pack_time_as_tv60_u32()?
+
+ // swap some fields by replacing some bits in the packed u32
+ .set_bit(6, false)
+ .set_bit(15, self.binary_group_flags[0])
+ .set_bit(30, self.binary_group_flags[1])
+ .set_bit(23, self.binary_group_flags[2])
+ .set_bit(31, self.field_phase)
+ )
+ }
+
+ /// Unpack a time code from one TV50 encoded u32 value and the encoded user data.
+ /// This encoding does not support the `drop_frame` flag, it will always be false.
+ pub fn from_tv50_time(tv50_time: u32, user_data: u32) -> Self {
+ Self {
+ drop_frame: false, // do not use bit [6]
+
+ // swap some fields:
+ field_phase: tv50_time.get_bit(31),
+ binary_group_flags: [
+ tv50_time.get_bit(15),
+ tv50_time.get_bit(30),
+ tv50_time.get_bit(23),
+ ],
+
+ .. Self::from_tv60_time(tv50_time, user_data)
+ }
+ }
+
+
+ /// Pack the SMPTE time code into a u32 value, according to FILM24 packing.
+ /// This encoding does not support the `drop_frame` and `color_frame` flags, they will be lost.
+ pub fn pack_time_as_film24_u32(&self) -> Result<u32> {
+ Ok(*self.pack_time_as_tv60_u32()?
+ .set_bit(6, false)
+ .set_bit(7, false)
+ )
+ }
+
+ /// Unpack a time code from one TV60 encoded u32 value and the encoded user data.
+ /// This encoding does not support the `drop_frame` and `color_frame` flags, they will always be `false`.
+ pub fn from_film24_time(film24_time: u32, user_data: u32) -> Self {
+ Self {
+ drop_frame: false, // bit [6]
+ color_frame: false, // bit [7]
+ .. Self::from_tv60_time(film24_time, user_data)
+ }
+ }
+
+
+ // in rust, group index starts at zero, not at one.
+ fn user_data_bit_indices(group_index: usize) -> std::ops::Range<usize> {
+ let min_bit = 4 * group_index;
+ min_bit .. min_bit + 4 // +4, not +3, as `Range` is exclusive
+ }
+
+ /// Pack the user data `u8` array into one u32.
+ /// User data values are clamped to the valid range (maximum value is 4).
+ pub fn pack_user_data_as_u32(&self) -> u32 {
+ let packed = self.binary_groups.iter().enumerate().fold(0_u32, |mut packed, (group_index, group_value)|
+ *packed.set_bits(Self::user_data_bit_indices(group_index), *group_value.min(&15) as u32)
+ );
+
+ debug_assert_eq!(Self::unpack_user_data_from_u32(packed), self.binary_groups, "round trip user data encoding");
+ packed
+ }
+
+ // Unpack the encoded u32 user data to an array of bytes, each byte having a value from 0 to 4.
+ fn unpack_user_data_from_u32(user_data: u32) -> [u8; 8] {
+ (0..8).map(|group_index| user_data.get_bits(Self::user_data_bit_indices(group_index)) as u8)
+ .collect::<SmallVec<[u8;8]>>().into_inner().expect("array index bug")
+ }
+
+
+ /// Write this time code to the byte stream, encoded as TV60 integers.
+ /// Returns an `Error::Invalid` if the fields are out of the allowed range.
+ pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
+ self.pack_time_as_tv60_u32()?.write(write)?; // will validate
+ self.pack_user_data_as_u32().write(write)?;
+ Ok(())
+ }
+
+ /// Read the time code, without validating, extracting from TV60 integers.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ let time_and_flags = u32::read(read)?;
+ let user_data = u32::read(read)?;
+ Ok(Self::from_tv60_time(time_and_flags, user_data))
+ }
+}
+
+impl Chromaticities {
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size() -> usize {
+ 8 * f32::BYTE_SIZE
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
+ self.red.x().write(write)?;
+ self.red.y().write(write)?;
+
+ self.green.x().write(write)?;
+ self.green.y().write(write)?;
+
+ self.blue.x().write(write)?;
+ self.blue.y().write(write)?;
+
+ self.white.x().write(write)?;
+ self.white.y().write(write)?;
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ Ok(Chromaticities {
+ red: Vec2(f32::read(read)?, f32::read(read)?),
+ green: Vec2(f32::read(read)?, f32::read(read)?),
+ blue: Vec2(f32::read(read)?, f32::read(read)?),
+ white: Vec2(f32::read(read)?, f32::read(read)?),
+ })
+ }
+}
+
+impl Compression {
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size() -> usize { u8::BYTE_SIZE }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(self, write: &mut W) -> UnitResult {
+ use self::Compression::*;
+ match self {
+ Uncompressed => 0_u8,
+ RLE => 1_u8,
+ ZIP1 => 2_u8,
+ ZIP16 => 3_u8,
+ PIZ => 4_u8,
+ PXR24 => 5_u8,
+ B44 => 6_u8,
+ B44A => 7_u8,
+ DWAA(_) => 8_u8,
+ DWAB(_) => 9_u8,
+ }.write(write)?;
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ use self::Compression::*;
+ Ok(match u8::read(read)? {
+ 0 => Uncompressed,
+ 1 => RLE,
+ 2 => ZIP1,
+ 3 => ZIP16,
+ 4 => PIZ,
+ 5 => PXR24,
+ 6 => B44,
+ 7 => B44A,
+ 8 => DWAA(None),
+ 9 => DWAB(None),
+ _ => return Err(Error::unsupported("unknown compression method")),
+ })
+ }
+}
+
+impl EnvironmentMap {
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size() -> usize {
+ u8::BYTE_SIZE
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(self, write: &mut W) -> UnitResult {
+ use self::EnvironmentMap::*;
+ match self {
+ LatitudeLongitude => 0_u8,
+ Cube => 1_u8
+ }.write(write)?;
+
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ use self::EnvironmentMap::*;
+ Ok(match u8::read(read)? {
+ 0 => LatitudeLongitude,
+ 1 => Cube,
+ _ => return Err(Error::invalid("environment map attribute value")),
+ })
+ }
+}
+
+impl KeyCode {
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size() -> usize {
+ 6 * i32::BYTE_SIZE
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
+ self.film_manufacturer_code.write(write)?;
+ self.film_type.write(write)?;
+ self.film_roll_prefix.write(write)?;
+ self.count.write(write)?;
+ self.perforation_offset.write(write)?;
+ self.perforations_per_count.write(write)?;
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ Ok(KeyCode {
+ film_manufacturer_code: i32::read(read)?,
+ film_type: i32::read(read)?,
+ film_roll_prefix: i32::read(read)?,
+ count: i32::read(read)?,
+ perforation_offset: i32::read(read)?,
+ perforations_per_frame: i32::read(read)?,
+ perforations_per_count: i32::read(read)?,
+ })
+ }
+}
+
+impl LineOrder {
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size() -> usize {
+ u8::BYTE_SIZE
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(self, write: &mut W) -> UnitResult {
+ use self::LineOrder::*;
+ match self {
+ Increasing => 0_u8,
+ Decreasing => 1_u8,
+ Unspecified => 2_u8,
+ }.write(write)?;
+
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ use self::LineOrder::*;
+ Ok(match u8::read(read)? {
+ 0 => Increasing,
+ 1 => Decreasing,
+ 2 => Unspecified,
+ _ => return Err(Error::invalid("line order attribute value")),
+ })
+ }
+}
+
+
+
+
+impl Preview {
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size(&self) -> usize {
+ 2 * u32::BYTE_SIZE + self.pixel_data.len()
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
+ u32::write(self.size.width() as u32, write)?;
+ u32::write(self.size.height() as u32, write)?;
+
+ i8::write_slice(write, &self.pixel_data)?;
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ let width = u32::read(read)? as usize;
+ let height = u32::read(read)? as usize;
+
+ if let Some(pixel_count) = width.checked_mul(height) {
+ // Multiply by the number of bytes per pixel.
+ if let Some(byte_count) = pixel_count.checked_mul(4) {
+ let pixel_data = i8::read_vec(
+ read,
+ byte_count,
+ 1024 * 1024 * 4,
+ None,
+ "preview attribute pixel count",
+ )?;
+
+ let preview = Preview {
+ size: Vec2(width, height),
+ pixel_data,
+ };
+
+ return Ok(preview);
+ }
+ }
+
+ return Err(Error::invalid(
+ format!("Overflow while calculating preview image Attribute size \
+ (width: {}, height: {}).",
+ width,
+ height)));
+ }
+
+ /// Validate this instance.
+ pub fn validate(&self, strict: bool) -> UnitResult {
+ if strict && (self.size.area() * 4 != self.pixel_data.len()) {
+ return Err(Error::invalid("preview dimensions do not match content length"))
+ }
+
+ Ok(())
+ }
+}
+
+impl ::std::fmt::Debug for Preview {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ write!(f, "Preview ({}x{} px)", self.size.width(), self.size.height())
+ }
+}
+
+impl TileDescription {
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size() -> usize {
+ 2 * u32::BYTE_SIZE + 1 // size x,y + (level mode + rounding mode)
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
+ u32::write(self.tile_size.width() as u32, write)?;
+ u32::write(self.tile_size.height() as u32, write)?;
+
+ let level_mode = match self.level_mode {
+ LevelMode::Singular => 0_u8,
+ LevelMode::MipMap => 1_u8,
+ LevelMode::RipMap => 2_u8,
+ };
+
+ let rounding_mode = match self.rounding_mode {
+ RoundingMode::Down => 0_u8,
+ RoundingMode::Up => 1_u8,
+ };
+
+ let mode: u8 = level_mode + (rounding_mode * 16);
+ mode.write(write)?;
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ let x_size = u32::read(read)? as usize;
+ let y_size = u32::read(read)? as usize;
+
+ let mode = u8::read(read)?;
+
+ // wow you really saved that one byte here
+ // mode = level_mode + (rounding_mode * 16)
+ let level_mode = mode & 0b00001111; // wow that works
+ let rounding_mode = mode >> 4; // wow that works
+
+ let level_mode = match level_mode {
+ 0 => LevelMode::Singular,
+ 1 => LevelMode::MipMap,
+ 2 => LevelMode::RipMap,
+ _ => return Err(Error::invalid("tile description level mode")),
+ };
+
+ let rounding_mode = match rounding_mode {
+ 0 => RoundingMode::Down,
+ 1 => RoundingMode::Up,
+ _ => return Err(Error::invalid("tile description rounding mode")),
+ };
+
+ Ok(TileDescription { tile_size: Vec2(x_size, y_size), level_mode, rounding_mode, })
+ }
+
+ /// Validate this instance.
+ pub fn validate(&self) -> UnitResult {
+ let max = i32::MAX as i64 / 2;
+
+ if self.tile_size.width() == 0 || self.tile_size.height() == 0
+ || self.tile_size.width() as i64 >= max || self.tile_size.height() as i64 >= max
+ {
+ return Err(Error::invalid("tile size"))
+ }
+
+ Ok(())
+ }
+}
+
+
+/// Number of bytes this attribute would consume in an exr file.
+// TODO instead of pre calculating byte size, write to a tmp buffer whose length is inspected before actually writing?
+pub fn byte_size(name: &Text, value: &AttributeValue) -> usize {
+ name.null_terminated_byte_size()
+ + value.kind_name().len() + sequence_end::byte_size()
+ + i32::BYTE_SIZE // serialized byte size
+ + value.byte_size()
+}
+
+/// Without validation, write this attribute to the byte stream.
+pub fn write<W: Write>(name: &[u8], value: &AttributeValue, write: &mut W) -> UnitResult {
+ Text::write_null_terminated_bytes(name, write)?;
+ Text::write_null_terminated_bytes(value.kind_name(), write)?;
+ i32::write(value.byte_size() as i32, write)?;
+ value.write(write)
+}
+
+/// Read the attribute without validating. The result may be `Ok` even if this single attribute is invalid.
+pub fn read(read: &mut PeekRead<impl Read>, max_size: usize) -> Result<(Text, Result<AttributeValue>)> {
+ let name = Text::read_null_terminated(read, max_size)?;
+ let kind = Text::read_null_terminated(read, max_size)?;
+ let size = i32_to_usize(i32::read(read)?, "attribute size")?;
+ let value = AttributeValue::read(read, kind, size)?;
+ Ok((name, value))
+}
+
+/// Validate this attribute.
+pub fn validate(name: &Text, value: &AttributeValue, long_names: &mut bool, allow_sampling: bool, data_window: IntegerBounds, strict: bool) -> UnitResult {
+ name.validate(true, Some(long_names))?; // only name text has length restriction
+ value.validate(allow_sampling, data_window, strict) // attribute value text length is never restricted
+}
+
+
+impl AttributeValue {
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size(&self) -> usize {
+ use self::AttributeValue::*;
+
+ match *self {
+ IntegerBounds(_) => self::IntegerBounds::byte_size(),
+ FloatRect(_) => self::FloatRect::byte_size(),
+
+ I32(_) => i32::BYTE_SIZE,
+ F32(_) => f32::BYTE_SIZE,
+ F64(_) => f64::BYTE_SIZE,
+
+ Rational(_) => { i32::BYTE_SIZE + u32::BYTE_SIZE },
+ TimeCode(_) => self::TimeCode::BYTE_SIZE,
+
+ IntVec2(_) => { 2 * i32::BYTE_SIZE },
+ FloatVec2(_) => { 2 * f32::BYTE_SIZE },
+ IntVec3(_) => { 3 * i32::BYTE_SIZE },
+ FloatVec3(_) => { 3 * f32::BYTE_SIZE },
+
+ ChannelList(ref channels) => channels.byte_size(),
+ Chromaticities(_) => self::Chromaticities::byte_size(),
+ Compression(_) => self::Compression::byte_size(),
+ EnvironmentMap(_) => self::EnvironmentMap::byte_size(),
+
+ KeyCode(_) => self::KeyCode::byte_size(),
+ LineOrder(_) => self::LineOrder::byte_size(),
+
+ Matrix3x3(ref value) => value.len() * f32::BYTE_SIZE,
+ Matrix4x4(ref value) => value.len() * f32::BYTE_SIZE,
+
+ Preview(ref value) => value.byte_size(),
+
+ // attribute value texts never have limited size.
+ // also, don't serialize size, as it can be inferred from attribute size
+ Text(ref value) => value.bytes.len(),
+
+ TextVector(ref value) => value.iter().map(self::Text::i32_sized_byte_size).sum(),
+ TileDescription(_) => self::TileDescription::byte_size(),
+ Custom { ref bytes, .. } => bytes.len(),
+ BlockType(ref kind) => kind.byte_size()
+ }
+ }
+
+ /// The exr name string of the type that an attribute can have.
+ pub fn kind_name(&self) -> &[u8] {
+ use self::AttributeValue::*;
+ use self::type_names as ty;
+
+ match *self {
+ IntegerBounds(_) => ty::I32BOX2,
+ FloatRect(_) => ty::F32BOX2,
+ I32(_) => ty::I32,
+ F32(_) => ty::F32,
+ F64(_) => ty::F64,
+ Rational(_) => ty::RATIONAL,
+ TimeCode(_) => ty::TIME_CODE,
+ IntVec2(_) => ty::I32VEC2,
+ FloatVec2(_) => ty::F32VEC2,
+ IntVec3(_) => ty::I32VEC3,
+ FloatVec3(_) => ty::F32VEC3,
+ ChannelList(_) => ty::CHANNEL_LIST,
+ Chromaticities(_) => ty::CHROMATICITIES,
+ Compression(_) => ty::COMPRESSION,
+ EnvironmentMap(_) => ty::ENVIRONMENT_MAP,
+ KeyCode(_) => ty::KEY_CODE,
+ LineOrder(_) => ty::LINE_ORDER,
+ Matrix3x3(_) => ty::F32MATRIX3X3,
+ Matrix4x4(_) => ty::F32MATRIX4X4,
+ Preview(_) => ty::PREVIEW,
+ Text(_) => ty::TEXT,
+ TextVector(_) => ty::TEXT_VECTOR,
+ TileDescription(_) => ty::TILES,
+ Custom { ref kind, .. } => &kind.bytes,
+ BlockType(_) => super::BlockType::TYPE_NAME,
+ }
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(&self, write: &mut W) -> UnitResult {
+ use self::AttributeValue::*;
+ match *self {
+ IntegerBounds(value) => value.write(write)?,
+ FloatRect(value) => value.write(write)?,
+
+ I32(value) => value.write(write)?,
+ F32(value) => value.write(write)?,
+ F64(value) => value.write(write)?,
+
+ Rational((a, b)) => { a.write(write)?; b.write(write)?; },
+ TimeCode(codes) => { codes.write(write)?; },
+
+ IntVec2(Vec2(x, y)) => { x.write(write)?; y.write(write)?; },
+ FloatVec2(Vec2(x, y)) => { x.write(write)?; y.write(write)?; },
+ IntVec3((x, y, z)) => { x.write(write)?; y.write(write)?; z.write(write)?; },
+ FloatVec3((x, y, z)) => { x.write(write)?; y.write(write)?; z.write(write)?; },
+
+ ChannelList(ref channels) => channels.write(write)?,
+ Chromaticities(ref value) => value.write(write)?,
+ Compression(value) => value.write(write)?,
+ EnvironmentMap(value) => value.write(write)?,
+
+ KeyCode(value) => value.write(write)?,
+ LineOrder(value) => value.write(write)?,
+
+ Matrix3x3(mut value) => f32::write_slice(write, &mut value)?,
+ Matrix4x4(mut value) => f32::write_slice(write, &mut value)?,
+
+ Preview(ref value) => { value.write(write)?; },
+
+ // attribute value texts never have limited size.
+ // also, don't serialize size, as it can be inferred from attribute size
+ Text(ref value) => u8::write_slice(write, value.bytes.as_slice())?,
+
+ TextVector(ref value) => self::Text::write_vec_of_i32_sized_texts(write, value)?,
+ TileDescription(ref value) => value.write(write)?,
+ Custom { ref bytes, .. } => u8::write_slice(write, &bytes)?, // write.write(&bytes).map(|_| ()),
+ BlockType(kind) => kind.write(write)?
+ };
+
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ /// Returns `Ok(Ok(attribute))` for valid attributes.
+ /// Returns `Ok(Err(Error))` for invalid attributes from a valid byte source.
+ /// Returns `Err(Error)` for invalid byte sources, for example for invalid files.
+ pub fn read(read: &mut PeekRead<impl Read>, kind: Text, byte_size: usize) -> Result<Result<Self>> {
+ use self::AttributeValue::*;
+ use self::type_names as ty;
+
+ // always read bytes
+ let attribute_bytes = u8::read_vec(read, byte_size, 128, None, "attribute value size")?;
+ // TODO no allocation for small attributes // : SmallVec<[u8; 64]> = smallvec![0; byte_size];
+
+ let parse_attribute = move || {
+ let reader = &mut attribute_bytes.as_slice();
+
+ Ok(match kind.bytes.as_slice() {
+ ty::I32BOX2 => IntegerBounds(self::IntegerBounds::read(reader)?),
+ ty::F32BOX2 => FloatRect(self::FloatRect::read(reader)?),
+
+ ty::I32 => I32(i32::read(reader)?),
+ ty::F32 => F32(f32::read(reader)?),
+ ty::F64 => F64(f64::read(reader)?),
+
+ ty::RATIONAL => Rational({
+ let a = i32::read(reader)?;
+ let b = u32::read(reader)?;
+ (a, b)
+ }),
+
+ ty::TIME_CODE => TimeCode(self::TimeCode::read(reader)?),
+
+ ty::I32VEC2 => IntVec2({
+ let a = i32::read(reader)?;
+ let b = i32::read(reader)?;
+ Vec2(a, b)
+ }),
+
+ ty::F32VEC2 => FloatVec2({
+ let a = f32::read(reader)?;
+ let b = f32::read(reader)?;
+ Vec2(a, b)
+ }),
+
+ ty::I32VEC3 => IntVec3({
+ let a = i32::read(reader)?;
+ let b = i32::read(reader)?;
+ let c = i32::read(reader)?;
+ (a, b, c)
+ }),
+
+ ty::F32VEC3 => FloatVec3({
+ let a = f32::read(reader)?;
+ let b = f32::read(reader)?;
+ let c = f32::read(reader)?;
+ (a, b, c)
+ }),
+
+ ty::CHANNEL_LIST => ChannelList(self::ChannelList::read(&mut PeekRead::new(attribute_bytes.as_slice()))?),
+ ty::CHROMATICITIES => Chromaticities(self::Chromaticities::read(reader)?),
+ ty::COMPRESSION => Compression(self::Compression::read(reader)?),
+ ty::ENVIRONMENT_MAP => EnvironmentMap(self::EnvironmentMap::read(reader)?),
+
+ ty::KEY_CODE => KeyCode(self::KeyCode::read(reader)?),
+ ty::LINE_ORDER => LineOrder(self::LineOrder::read(reader)?),
+
+ ty::F32MATRIX3X3 => Matrix3x3({
+ let mut result = [0.0_f32; 9];
+ f32::read_slice(reader, &mut result)?;
+ result
+ }),
+
+ ty::F32MATRIX4X4 => Matrix4x4({
+ let mut result = [0.0_f32; 16];
+ f32::read_slice(reader, &mut result)?;
+ result
+ }),
+
+ ty::PREVIEW => Preview(self::Preview::read(reader)?),
+ ty::TEXT => Text(self::Text::read_sized(reader, byte_size)?),
+
+ // the number of strings can be inferred from the total attribute size
+ ty::TEXT_VECTOR => TextVector(self::Text::read_vec_of_i32_sized(
+ &mut PeekRead::new(attribute_bytes.as_slice()),
+ byte_size
+ )?),
+
+ ty::TILES => TileDescription(self::TileDescription::read(reader)?),
+
+ _ => Custom { kind: kind.clone(), bytes: attribute_bytes.clone() } // TODO no clone
+ })
+ };
+
+ Ok(parse_attribute())
+ }
+
+ /// Validate this instance.
+ pub fn validate(&self, allow_sampling: bool, data_window: IntegerBounds, strict: bool) -> UnitResult {
+ use self::AttributeValue::*;
+
+ match *self {
+ ChannelList(ref channels) => channels.validate(allow_sampling, data_window, strict)?,
+ TileDescription(ref value) => value.validate()?,
+ Preview(ref value) => value.validate(strict)?,
+ TimeCode(ref time_code) => time_code.validate(strict)?,
+
+ TextVector(ref vec) => if strict && vec.is_empty() {
+ return Err(Error::invalid("text vector may not be empty"))
+ },
+
+ _ => {}
+ };
+
+ Ok(())
+ }
+
+
+ /// Return `Ok(i32)` if this attribute is an i32.
+ pub fn to_i32(&self) -> Result<i32> {
+ match *self {
+ AttributeValue::I32(value) => Ok(value),
+ _ => Err(invalid_type())
+ }
+ }
+
+ /// Return `Ok(f32)` if this attribute is an f32.
+ pub fn to_f32(&self) -> Result<f32> {
+ match *self {
+ AttributeValue::F32(value) => Ok(value),
+ _ => Err(invalid_type())
+ }
+ }
+
+ /// Return `Ok(Text)` if this attribute is a text.
+ pub fn into_text(self) -> Result<Text> {
+ match self {
+ AttributeValue::Text(value) => Ok(value),
+ _ => Err(invalid_type())
+ }
+ }
+
+ /// Return `Ok(Text)` if this attribute is a text.
+ pub fn to_text(&self) -> Result<&Text> {
+ match self {
+ AttributeValue::Text(value) => Ok(value),
+ _ => Err(invalid_type())
+ }
+ }
+
+ /// Return `Ok(Chromaticities)` if this attribute is a chromaticities attribute.
+ pub fn to_chromaticities(&self) -> Result<Chromaticities> {
+ match *self {
+ AttributeValue::Chromaticities(value) => Ok(value),
+ _ => Err(invalid_type())
+ }
+ }
+
+ /// Return `Ok(TimeCode)` if this attribute is a time code.
+ pub fn to_time_code(&self) -> Result<TimeCode> {
+ match *self {
+ AttributeValue::TimeCode(value) => Ok(value),
+ _ => Err(invalid_type())
+ }
+ }
+}
+
+
+
+/// Contains string literals identifying the type of an attribute.
+pub mod type_names {
+ macro_rules! define_attribute_type_names {
+ ( $($name: ident : $value: expr),* ) => {
+ $(
+ /// The byte-string name of this attribute type as it appears in an exr file.
+ pub const $name: &'static [u8] = $value;
+ )*
+ };
+ }
+
+ define_attribute_type_names! {
+ I32BOX2: b"box2i",
+ F32BOX2: b"box2f",
+ I32: b"int",
+ F32: b"float",
+ F64: b"double",
+ RATIONAL: b"rational",
+ TIME_CODE: b"timecode",
+ I32VEC2: b"v2i",
+ F32VEC2: b"v2f",
+ I32VEC3: b"v3i",
+ F32VEC3: b"v3f",
+ CHANNEL_LIST: b"chlist",
+ CHROMATICITIES: b"chromaticities",
+ COMPRESSION: b"compression",
+ ENVIRONMENT_MAP:b"envmap",
+ KEY_CODE: b"keycode",
+ LINE_ORDER: b"lineOrder",
+ F32MATRIX3X3: b"m33f",
+ F32MATRIX4X4: b"m44f",
+ PREVIEW: b"preview",
+ TEXT: b"string",
+ TEXT_VECTOR: b"stringvector",
+ TILES: b"tiledesc"
+ }
+}
+
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use ::std::io::Cursor;
+ use rand::{random, thread_rng, Rng};
+
+ #[test]
+ fn text_ord() {
+ for _ in 0..1024 {
+ let text1 = Text::from_bytes_unchecked((0..4).map(|_| rand::random::<u8>()).collect());
+ let text2 = Text::from_bytes_unchecked((0..4).map(|_| rand::random::<u8>()).collect());
+
+ assert_eq!(text1.to_string().cmp(&text2.to_string()), text1.cmp(&text2), "in text {:?} vs {:?}", text1, text2);
+ }
+ }
+
+ #[test]
+ fn rounding_up(){
+ let round_up = RoundingMode::Up;
+ assert_eq!(round_up.divide(10, 10), 1, "divide equal");
+ assert_eq!(round_up.divide(10, 2), 5, "divide even");
+ assert_eq!(round_up.divide(10, 5), 2, "divide even");
+
+ assert_eq!(round_up.divide(8, 5), 2, "round up");
+ assert_eq!(round_up.divide(10, 3), 4, "round up");
+ assert_eq!(round_up.divide(100, 50), 2, "divide even");
+ assert_eq!(round_up.divide(100, 49), 3, "round up");
+ }
+
+ #[test]
+ fn rounding_down(){
+ let round_down = RoundingMode::Down;
+ assert_eq!(round_down.divide(8, 5), 1, "round down");
+ assert_eq!(round_down.divide(10, 3), 3, "round down");
+ assert_eq!(round_down.divide(100, 50), 2, "divide even");
+ assert_eq!(round_down.divide(100, 49), 2, "round down");
+ assert_eq!(round_down.divide(100, 51), 1, "round down");
+ }
+
+ #[test]
+ fn tile_description_write_read_roundtrip(){
+ let tiles = [
+ TileDescription {
+ tile_size: Vec2(31, 7),
+ level_mode: LevelMode::MipMap,
+ rounding_mode: RoundingMode::Down,
+ },
+
+ TileDescription {
+ tile_size: Vec2(0, 0),
+ level_mode: LevelMode::Singular,
+ rounding_mode: RoundingMode::Up,
+ },
+
+ TileDescription {
+ tile_size: Vec2(4294967294, 4294967295),
+ level_mode: LevelMode::RipMap,
+ rounding_mode: RoundingMode::Down,
+ },
+ ];
+
+ for tile in &tiles {
+ let mut bytes = Vec::new();
+ tile.write(&mut bytes).unwrap();
+
+ let new_tile = TileDescription::read(&mut Cursor::new(bytes)).unwrap();
+ assert_eq!(*tile, new_tile, "tile round trip");
+ }
+ }
+
+ #[test]
+ fn attribute_write_read_roundtrip_and_byte_size(){
+ let attributes = [
+ (
+ Text::from("greeting"),
+ AttributeValue::Text(Text::from("hello")),
+ ),
+ (
+ Text::from("age"),
+ AttributeValue::I32(923),
+ ),
+ (
+ Text::from("leg count"),
+ AttributeValue::F64(9.114939599234),
+ ),
+ (
+ Text::from("rabbit area"),
+ AttributeValue::FloatRect(FloatRect {
+ min: Vec2(23.4234, 345.23),
+ max: Vec2(68623.0, 3.12425926538),
+ }),
+ ),
+ (
+ Text::from("rabbit area int"),
+ AttributeValue::IntegerBounds(IntegerBounds {
+ position: Vec2(23, 345),
+ size: Vec2(68623, 3),
+ }),
+ ),
+ (
+ Text::from("rabbit area int"),
+ AttributeValue::IntegerBounds(IntegerBounds {
+ position: Vec2(-(i32::MAX / 2 - 1), -(i32::MAX / 2 - 1)),
+ size: Vec2(i32::MAX as usize - 2, i32::MAX as usize - 2),
+ }),
+ ),
+ (
+ Text::from("rabbit area int 2"),
+ AttributeValue::IntegerBounds(IntegerBounds {
+ position: Vec2(0, 0),
+ size: Vec2(i32::MAX as usize / 2 - 1, i32::MAX as usize / 2 - 1),
+ }),
+ ),
+ (
+ Text::from("tests are difficult"),
+ AttributeValue::TextVector(vec![
+ Text::from("sdoifjpsdv"),
+ Text::from("sdoifjpsdvxxxx"),
+ Text::from("sdoifjasd"),
+ Text::from("sdoifj"),
+ Text::from("sdoifjddddddddasdasd"),
+ ]),
+ ),
+ (
+ Text::from("what should we eat tonight"),
+ AttributeValue::Preview(Preview {
+ size: Vec2(10, 30),
+ pixel_data: vec![31; 10 * 30 * 4],
+ }),
+ ),
+ (
+ Text::from("leg count, again"),
+ AttributeValue::ChannelList(ChannelList::new(smallvec![
+ ChannelDescription {
+ name: Text::from("Green"),
+ sample_type: SampleType::F16,
+ quantize_linearly: false,
+ sampling: Vec2(1,2)
+ },
+ ChannelDescription {
+ name: Text::from("Red"),
+ sample_type: SampleType::F32,
+ quantize_linearly: true,
+ sampling: Vec2(1,2)
+ },
+ ChannelDescription {
+ name: Text::from("Purple"),
+ sample_type: SampleType::U32,
+ quantize_linearly: false,
+ sampling: Vec2(0,0)
+ }
+ ],
+ )),
+ ),
+ ];
+
+ for (name, value) in &attributes {
+ let mut bytes = Vec::new();
+ super::write(name.as_slice(), value, &mut bytes).unwrap();
+ assert_eq!(super::byte_size(name, value), bytes.len(), "attribute.byte_size() for {:?}", (name, value));
+
+ let new_attribute = super::read(&mut PeekRead::new(Cursor::new(bytes)), 300).unwrap();
+ assert_eq!((name.clone(), value.clone()), (new_attribute.0, new_attribute.1.unwrap()), "attribute round trip");
+ }
+
+
+ {
+ let (name, value) = (
+ Text::from("asdkaspfokpaosdkfpaokswdpoakpsfokaposdkf"),
+ AttributeValue::I32(0),
+ );
+
+ let mut long_names = false;
+ super::validate(&name, &value, &mut long_names, false, IntegerBounds::zero(), false).unwrap();
+ assert!(long_names);
+ }
+
+ {
+ let (name, value) = (
+ Text::from("sdöksadöofkaspdolkpöasolfkcöalsod,kfcöaslodkcpöasolkfposdöksadöofkaspdolkpöasolfkcöalsod,kfcöaslodkcpöasolkfposdöksadöofkaspdolkpöasolfkcöalsod,kfcöaslodkcpöasolkfposdöksadöofkaspdolkpöasolfkcöalsod,kfcöaslodkcpöasolkfposdöksadöofkaspdolkpöasolfkcöalsod,kfcöaslodkcpöasolkfposdöksadöofkaspdolkpöasolfkcöalsod,kfcöaslodkcpöasolkfpo"),
+ AttributeValue::I32(0),
+ );
+
+ super::validate(&name, &value, &mut false, false, IntegerBounds::zero(), false).expect_err("name length check failed");
+ }
+ }
+
+ #[test]
+ fn time_code_pack(){
+ let mut rng = thread_rng();
+
+ let codes = std::iter::repeat_with(|| TimeCode {
+ hours: rng.gen_range(0 .. 24),
+ minutes: rng.gen_range(0 .. 60),
+ seconds: rng.gen_range(0 .. 60),
+ frame: rng.gen_range(0 .. 29),
+ drop_frame: random(),
+ color_frame: random(),
+ field_phase: random(),
+ binary_group_flags: [random(),random(),random()],
+ binary_groups: std::iter::repeat_with(|| rng.gen_range(0 .. 16)).take(8)
+ .collect::<SmallVec<[u8;8]>>().into_inner().unwrap()
+ });
+
+ for code in codes.take(500) {
+ code.validate(true).expect("invalid timecode test input");
+
+ { // through tv60 packing, roundtrip
+ let packed_tv60 = code.pack_time_as_tv60_u32().expect("invalid timecode test input");
+ let packed_user = code.pack_user_data_as_u32();
+ assert_eq!(TimeCode::from_tv60_time(packed_tv60, packed_user), code);
+ }
+
+ { // through bytes, roundtrip
+ let mut bytes = Vec::<u8>::new();
+ code.write(&mut bytes).unwrap();
+ let decoded = TimeCode::read(&mut bytes.as_slice()).unwrap();
+ assert_eq!(code, decoded);
+ }
+
+ {
+ let tv50_code = TimeCode {
+ drop_frame: false, // apparently, tv50 does not support drop frame, so do not use this value
+ .. code
+ };
+
+ let packed_tv50 = code.pack_time_as_tv50_u32().expect("invalid timecode test input");
+ let packed_user = code.pack_user_data_as_u32();
+ assert_eq!(TimeCode::from_tv50_time(packed_tv50, packed_user), tv50_code);
+ }
+
+ {
+ let film24_code = TimeCode {
+ // apparently, film24 does not support some flags, so do not use those values
+ color_frame: false,
+ drop_frame: false,
+ .. code
+ };
+
+ let packed_film24 = code.pack_time_as_film24_u32().expect("invalid timecode test input");
+ let packed_user = code.pack_user_data_as_u32();
+ assert_eq!(TimeCode::from_film24_time(packed_film24, packed_user), film24_code);
+ }
+ }
+ }
+
+}
diff --git a/vendor/exr/src/meta/header.rs b/vendor/exr/src/meta/header.rs
new file mode 100644
index 0000000..b322b18
--- /dev/null
+++ b/vendor/exr/src/meta/header.rs
@@ -0,0 +1,1197 @@
+
+//! Contains collections of common attributes.
+//! Defines some data types that list all standard attributes.
+
+use std::collections::HashMap;
+use crate::meta::attribute::*; // FIXME shouldn't this need some more imports????
+use crate::meta::*;
+use crate::math::Vec2;
+
+// TODO rename header to LayerDescription!
+
+/// Describes a single layer in a file.
+/// A file can have any number of layers.
+/// The meta data contains one header per layer.
+#[derive(Clone, Debug, PartialEq)]
+pub struct Header {
+
+ /// List of channels in this layer.
+ pub channels: ChannelList,
+
+ /// How the pixel data of all channels in this layer is compressed. May be `Compression::Uncompressed`.
+ pub compression: Compression,
+
+ /// Describes how the pixels of this layer are divided into smaller blocks.
+ /// A single block can be loaded without processing all bytes of a file.
+ ///
+ /// Also describes whether a file contains multiple resolution levels: mip maps or rip maps.
+ /// This allows loading not the full resolution, but the smallest sensible resolution.
+ //
+ // Required if file contains deep data or multiple layers.
+ // Note: This value must agree with the version field's tile bit and deep data bit.
+ // In this crate, this attribute will always have a value, for simplicity.
+ pub blocks: BlockDescription,
+
+ /// In what order the tiles of this header occur in the file.
+ pub line_order: LineOrder,
+
+ /// The resolution of this layer. Equivalent to the size of the `DataWindow`.
+ pub layer_size: Vec2<usize>,
+
+ /// Whether this layer contains deep data.
+ pub deep: bool,
+
+ /// This library supports only deep data version 1.
+ pub deep_data_version: Option<i32>,
+
+ /// Number of chunks, that is, scan line blocks or tiles, that this image has been divided into.
+ /// This number is calculated once at the beginning
+ /// of the read process or when creating a header object.
+ ///
+ /// This value includes all chunks of all resolution levels.
+ ///
+ ///
+ /// __Warning__
+ /// _This value is relied upon. You should probably use `Header::with_encoding`,
+ /// which automatically updates the chunk count._
+ pub chunk_count: usize,
+
+ // Required for deep data (deepscanline and deeptile) layers.
+ // Note: Since the value of "maxSamplesPerPixel"
+ // maybe be unknown at the time of opening the
+ // file, the value “ -1 ” is written to the file to
+ // indicate an unknown value. When the file is
+ // closed, this will be overwritten with the correct value.
+ // If file writing does not complete
+ // correctly due to an error, the value -1 will
+ // remain. In this case, the value must be derived
+ // by decoding each chunk in the layer
+ /// Maximum number of samples in a single pixel in a deep image.
+ pub max_samples_per_pixel: Option<usize>,
+
+ /// Includes mandatory fields like pixel aspect or display window
+ /// which must be the same for all layers.
+ pub shared_attributes: ImageAttributes,
+
+ /// Does not include the attributes required for reading the file contents.
+ /// Excludes standard fields that must be the same for all headers.
+ pub own_attributes: LayerAttributes,
+}
+
+/// Includes mandatory fields like pixel aspect or display window
+/// which must be the same for all layers.
+/// For more attributes, see struct `LayerAttributes`.
+#[derive(Clone, PartialEq, Debug)]
+pub struct ImageAttributes {
+
+ /// The rectangle anywhere in the global infinite 2D space
+ /// that clips all contents of the file.
+ pub display_window: IntegerBounds,
+
+ /// Aspect ratio of each pixel in this header.
+ pub pixel_aspect: f32,
+
+ /// The chromaticities attribute of the image. See the `Chromaticities` type.
+ pub chromaticities: Option<Chromaticities>,
+
+ /// The time code of the image.
+ pub time_code: Option<TimeCode>,
+
+ /// Contains custom attributes.
+ /// Does not contain the attributes already present in the `ImageAttributes`.
+ /// Contains only attributes that are standardized to be the same for all headers: chromaticities and time codes.
+ pub other: HashMap<Text, AttributeValue>,
+}
+
+/// Does not include the attributes required for reading the file contents.
+/// Excludes standard fields that must be the same for all headers.
+/// For more attributes, see struct `ImageAttributes`.
+#[derive(Clone, PartialEq)]
+pub struct LayerAttributes {
+
+ /// The name of this layer.
+ /// Required if this file contains deep data or multiple layers.
+ // As this is an attribute value, it is not restricted in length, may even be empty
+ pub layer_name: Option<Text>,
+
+ /// The top left corner of the rectangle that positions this layer
+ /// within the global infinite 2D space of the whole file.
+ /// This represents the position of the `DataWindow`.
+ pub layer_position: Vec2<i32>,
+
+ /// Part of the perspective projection. Default should be `(0, 0)`.
+ // TODO same for all layers?
+ pub screen_window_center: Vec2<f32>,
+
+ // TODO same for all layers?
+ /// Part of the perspective projection. Default should be `1`.
+ pub screen_window_width: f32,
+
+ /// The white luminance of the colors.
+ /// Defines the luminance in candelas per square meter, Nits, of the rgb value `(1, 1, 1)`.
+ // If the chromaticities and the whiteLuminance of an RGB image are
+ // known, then it is possible to convert the image's pixels from RGB
+ // to CIE XYZ tristimulus values (see function RGBtoXYZ() in header
+ // file ImfChromaticities.h).
+ pub white_luminance: Option<f32>,
+
+ /// The adopted neutral of the colors. Specifies the CIE (x,y) frequency coordinates that should
+ /// be considered neutral during color rendering. Pixels in the image
+ /// whose CIE (x,y) frequency coordinates match the adopted neutral value should
+ /// be mapped to neutral values on the given display.
+ pub adopted_neutral: Option<Vec2<f32>>,
+
+ /// Name of the color transform function that is applied for rendering the image.
+ pub rendering_transform_name: Option<Text>,
+
+ /// Name of the color transform function that computes the look modification of the image.
+ pub look_modification_transform_name: Option<Text>,
+
+ /// The horizontal density, in pixels per inch.
+ /// The image's vertical output density can be computed using `horizontal_density * pixel_aspect_ratio`.
+ pub horizontal_density: Option<f32>,
+
+ /// Name of the owner.
+ pub owner: Option<Text>,
+
+ /// Additional textual information.
+ pub comments: Option<Text>,
+
+ /// The date of image creation, in `YYYY:MM:DD hh:mm:ss` format.
+ // TODO parse!
+ pub capture_date: Option<Text>,
+
+ /// Time offset from UTC.
+ pub utc_offset: Option<f32>,
+
+ /// Geographical image location.
+ pub longitude: Option<f32>,
+
+ /// Geographical image location.
+ pub latitude: Option<f32>,
+
+ /// Geographical image location.
+ pub altitude: Option<f32>,
+
+ /// Camera focus in meters.
+ pub focus: Option<f32>,
+
+ /// Exposure time in seconds.
+ pub exposure: Option<f32>,
+
+ /// Camera aperture measured in f-stops. Equals the focal length
+ /// of the lens divided by the diameter of the iris opening.
+ pub aperture: Option<f32>,
+
+ /// Iso-speed of the camera sensor.
+ pub iso_speed: Option<f32>,
+
+ /// If this is an environment map, specifies how to interpret it.
+ pub environment_map: Option<EnvironmentMap>,
+
+ /// Identifies film manufacturer, film type, film roll and frame position within the roll.
+ pub film_key_code: Option<KeyCode>,
+
+ /// Specifies how texture map images are extrapolated.
+ /// Values can be `black`, `clamp`, `periodic`, or `mirror`.
+ pub wrap_mode_name: Option<Text>,
+
+ /// Frames per second if this is a frame in a sequence.
+ pub frames_per_second: Option<Rational>,
+
+ /// Specifies the view names for multi-view, for example stereo, image files.
+ pub multi_view_names: Option<Vec<Text>>,
+
+ /// The matrix that transforms 3D points from the world to the camera coordinate space.
+ /// Left-handed coordinate system, y up, z forward.
+ pub world_to_camera: Option<Matrix4x4>,
+
+ /// The matrix that transforms 3D points from the world to the "Normalized Device Coordinate" space.
+ /// Left-handed coordinate system, y up, z forward.
+ pub world_to_normalized_device: Option<Matrix4x4>,
+
+ /// Specifies whether the pixels in a deep image are sorted and non-overlapping.
+ pub deep_image_state: Option<Rational>,
+
+ /// If the image was cropped, contains the original data window.
+ pub original_data_window: Option<IntegerBounds>,
+
+ /// An 8-bit rgba image representing the rendered image.
+ pub preview: Option<Preview>,
+
+ /// Name of the view, which is typically either `"right"` or `"left"` for a stereoscopic image.
+ pub view_name: Option<Text>,
+
+ /// The name of the software that produced this image.
+ pub software_name: Option<Text>,
+
+ /// The near clip plane of the virtual camera projection.
+ pub near_clip_plane: Option<f32>,
+
+ /// The far clip plane of the virtual camera projection.
+ pub far_clip_plane: Option<f32>,
+
+ /// The field of view angle, along the horizontal axis, in degrees.
+ pub horizontal_field_of_view: Option<f32>,
+
+ /// The field of view angle, along the horizontal axis, in degrees.
+ pub vertical_field_of_view: Option<f32>,
+
+ /// Contains custom attributes.
+ /// Does not contain the attributes already present in the `Header` or `LayerAttributes` struct.
+ /// Does not contain attributes that are standardized to be the same for all layers: no chromaticities and no time codes.
+ pub other: HashMap<Text, AttributeValue>,
+}
+
+
+impl LayerAttributes {
+
+ /// Create default layer attributes with a data position of zero.
+ pub fn named(layer_name: impl Into<Text>) -> Self {
+ Self {
+ layer_name: Some(layer_name.into()),
+ .. Self::default()
+ }
+ }
+
+ /// Set the data position of this layer.
+ pub fn with_position(self, data_position: Vec2<i32>) -> Self {
+ Self { layer_position: data_position, ..self }
+ }
+
+ /// Set all common camera projection attributes at once.
+ pub fn with_camera_frustum(
+ self,
+ world_to_camera: Matrix4x4,
+ world_to_normalized_device: Matrix4x4,
+ field_of_view: impl Into<Vec2<f32>>,
+ depth_clip_range: std::ops::Range<f32>,
+ ) -> Self
+ {
+ let fov = field_of_view.into();
+
+ Self {
+ world_to_normalized_device: Some(world_to_normalized_device),
+ world_to_camera: Some(world_to_camera),
+ horizontal_field_of_view: Some(fov.x()),
+ vertical_field_of_view: Some(fov.y()),
+ near_clip_plane: Some(depth_clip_range.start),
+ far_clip_plane: Some(depth_clip_range.end),
+ ..self
+ }
+ }
+}
+
+impl ImageAttributes {
+
+ /// Set the display position and size of this image.
+ pub fn new(display_window: IntegerBounds) -> Self {
+ Self {
+ pixel_aspect: 1.0,
+ chromaticities: None,
+ time_code: None,
+ other: Default::default(),
+ display_window,
+ }
+ }
+
+ /// Set the display position to zero and use the specified size for this image.
+ pub fn with_size(size: impl Into<Vec2<usize>>) -> Self {
+ Self::new(IntegerBounds::from_dimensions(size))
+ }
+}
+
+
+
+
+impl Header {
+
+ /// Create a new Header with the specified name, display window and channels.
+ /// Use `Header::with_encoding` and the similar methods to add further properties to the header.
+ ///
+ /// The other settings are left to their default values:
+ /// - RLE compression
+ /// - display window equal to data window
+ /// - tiles (64 x 64 px)
+ /// - unspecified line order
+ /// - no custom attributes
+ pub fn new(name: Text, data_size: impl Into<Vec2<usize>>, channels: SmallVec<[ChannelDescription; 5]>) -> Self {
+ let data_size: Vec2<usize> = data_size.into();
+
+ let compression = Compression::RLE;
+ let blocks = BlockDescription::Tiles(TileDescription {
+ tile_size: Vec2(64, 64),
+ level_mode: LevelMode::Singular,
+ rounding_mode: RoundingMode::Down
+ });
+
+ Self {
+ layer_size: data_size,
+ compression,
+ blocks,
+
+ channels: ChannelList::new(channels),
+ line_order: LineOrder::Unspecified,
+
+ shared_attributes: ImageAttributes::with_size(data_size),
+ own_attributes: LayerAttributes::named(name),
+
+ chunk_count: compute_chunk_count(compression, data_size, blocks),
+
+ deep: false,
+ deep_data_version: None,
+ max_samples_per_pixel: None,
+ }
+ }
+
+ /// Set the display window, that is, the global clipping rectangle.
+ /// __Must be the same for all headers of a file.__
+ pub fn with_display_window(mut self, display_window: IntegerBounds) -> Self {
+ self.shared_attributes.display_window = display_window;
+ self
+ }
+
+ /// Set the offset of this layer.
+ pub fn with_position(mut self, position: Vec2<i32>) -> Self {
+ self.own_attributes.layer_position = position;
+ self
+ }
+
+ /// Set compression, tiling, and line order. Automatically computes chunk count.
+ pub fn with_encoding(self, compression: Compression, blocks: BlockDescription, line_order: LineOrder) -> Self {
+ Self {
+ chunk_count: compute_chunk_count(compression, self.layer_size, blocks),
+ compression, blocks, line_order,
+ .. self
+ }
+ }
+
+ /// Set **all** attributes of the header that are not shared with all other headers in the image.
+ pub fn with_attributes(self, own_attributes: LayerAttributes) -> Self {
+ Self { own_attributes, .. self }
+ }
+
+ /// Set **all** attributes of the header that are shared with all other headers in the image.
+ pub fn with_shared_attributes(self, shared_attributes: ImageAttributes) -> Self {
+ Self { shared_attributes, .. self }
+ }
+
+ /// Iterate over all blocks, in the order specified by the headers line order attribute.
+ /// Unspecified line order is treated as increasing line order.
+ /// Also enumerates the index of each block in the header, as if it were sorted in increasing line order.
+ pub fn enumerate_ordered_blocks(&self) -> impl Iterator<Item=(usize, TileIndices)> + Send {
+ let increasing_y = self.blocks_increasing_y_order().enumerate();
+
+ // TODO without box?
+ let ordered: Box<dyn Send + Iterator<Item=(usize, TileIndices)>> = {
+ if self.line_order == LineOrder::Decreasing { Box::new(increasing_y.rev()) }
+ else { Box::new(increasing_y) }
+ };
+
+ ordered
+ }
+
+ /*/// Iterate over all blocks, in the order specified by the headers line order attribute.
+ /// Also includes an index of the block if it were `LineOrder::Increasing`, starting at zero for this header.
+ pub fn enumerate_ordered_blocks(&self) -> impl Iterator<Item = (usize, TileIndices)> + Send {
+ let increasing_y = self.blocks_increasing_y_order().enumerate();
+
+ let ordered: Box<dyn Send + Iterator<Item = (usize, TileIndices)>> = {
+ if self.line_order == LineOrder::Decreasing {
+ Box::new(increasing_y.rev()) // TODO without box?
+ }
+ else {
+ Box::new(increasing_y)
+ }
+ };
+
+ ordered
+ }*/
+
+ /// Iterate over all tile indices in this header in `LineOrder::Increasing` order.
+ pub fn blocks_increasing_y_order(&self) -> impl Iterator<Item = TileIndices> + ExactSizeIterator + DoubleEndedIterator {
+ fn tiles_of(image_size: Vec2<usize>, tile_size: Vec2<usize>, level_index: Vec2<usize>) -> impl Iterator<Item=TileIndices> {
+ fn divide_and_rest(total_size: usize, block_size: usize) -> impl Iterator<Item=(usize, usize)> {
+ let block_count = compute_block_count(total_size, block_size);
+ (0..block_count).map(move |block_index| (
+ block_index, calculate_block_size(total_size, block_size, block_index).expect("block size calculation bug")
+ ))
+ }
+
+ divide_and_rest(image_size.height(), tile_size.height()).flat_map(move |(y_index, tile_height)|{
+ divide_and_rest(image_size.width(), tile_size.width()).map(move |(x_index, tile_width)|{
+ TileIndices {
+ size: Vec2(tile_width, tile_height),
+ location: TileCoordinates { tile_index: Vec2(x_index, y_index), level_index, },
+ }
+ })
+ })
+ }
+
+ let vec: Vec<TileIndices> = {
+ if let BlockDescription::Tiles(tiles) = self.blocks {
+ match tiles.level_mode {
+ LevelMode::Singular => {
+ tiles_of(self.layer_size, tiles.tile_size, Vec2(0, 0)).collect()
+ },
+ LevelMode::MipMap => {
+ mip_map_levels(tiles.rounding_mode, self.layer_size)
+ .flat_map(move |(level_index, level_size)|{
+ tiles_of(level_size, tiles.tile_size, Vec2(level_index, level_index))
+ })
+ .collect()
+ },
+ LevelMode::RipMap => {
+ rip_map_levels(tiles.rounding_mode, self.layer_size)
+ .flat_map(move |(level_index, level_size)| {
+ tiles_of(level_size, tiles.tile_size, level_index)
+ })
+ .collect()
+ }
+ }
+ }
+ else {
+ let tiles = Vec2(self.layer_size.0, self.compression.scan_lines_per_block());
+ tiles_of(self.layer_size, tiles, Vec2(0, 0)).collect()
+ }
+ };
+
+ vec.into_iter() // TODO without collect
+ }
+
+ /* TODO
+ /// The block indices of this header, ordered as they would appear in the file.
+ pub fn ordered_block_indices<'s>(&'s self, layer_index: usize) -> impl 's + Iterator<Item=BlockIndex> {
+ self.enumerate_ordered_blocks().map(|(chunk_index, tile)|{
+ let data_indices = self.get_absolute_block_pixel_coordinates(tile.location).expect("tile coordinate bug");
+
+ BlockIndex {
+ layer: layer_index,
+ level: tile.location.level_index,
+ pixel_position: data_indices.position.to_usize("data indices start").expect("data index bug"),
+ pixel_size: data_indices.size,
+ }
+ })
+ }*/
+
+ // TODO reuse this function everywhere
+ /// The default pixel resolution of a single block (tile or scan line block).
+ /// Not all blocks have this size, because they may be cutoff at the end of the image.
+ pub fn max_block_pixel_size(&self) -> Vec2<usize> {
+ match self.blocks {
+ BlockDescription::ScanLines => Vec2(self.layer_size.0, self.compression.scan_lines_per_block()),
+ BlockDescription::Tiles(tiles) => tiles.tile_size,
+ }
+ }
+
+ /// Calculate the position of a block in the global infinite 2D space of a file. May be negative.
+ pub fn get_block_data_window_pixel_coordinates(&self, tile: TileCoordinates) -> Result<IntegerBounds> {
+ let data = self.get_absolute_block_pixel_coordinates(tile)?;
+ Ok(data.with_origin(self.own_attributes.layer_position))
+ }
+
+ /// Calculate the pixel index rectangle inside this header. Is not negative. Starts at `0`.
+ pub fn get_absolute_block_pixel_coordinates(&self, tile: TileCoordinates) -> Result<IntegerBounds> {
+ if let BlockDescription::Tiles(tiles) = self.blocks {
+ let Vec2(data_width, data_height) = self.layer_size;
+
+ let data_width = compute_level_size(tiles.rounding_mode, data_width, tile.level_index.x());
+ let data_height = compute_level_size(tiles.rounding_mode, data_height, tile.level_index.y());
+ let absolute_tile_coordinates = tile.to_data_indices(tiles.tile_size, Vec2(data_width, data_height))?;
+
+ if absolute_tile_coordinates.position.x() as i64 >= data_width as i64 || absolute_tile_coordinates.position.y() as i64 >= data_height as i64 {
+ return Err(Error::invalid("data block tile index"))
+ }
+
+ Ok(absolute_tile_coordinates)
+ }
+ else { // this is a scanline image
+ debug_assert_eq!(tile.tile_index.0, 0, "block index calculation bug");
+
+ let (y, height) = calculate_block_position_and_size(
+ self.layer_size.height(),
+ self.compression.scan_lines_per_block(),
+ tile.tile_index.y()
+ )?;
+
+ Ok(IntegerBounds {
+ position: Vec2(0, usize_to_i32(y)),
+ size: Vec2(self.layer_size.width(), height)
+ })
+ }
+
+ // TODO deep data?
+ }
+
+ /// Return the tile index, converting scan line block coordinates to tile indices.
+ /// Starts at `0` and is not negative.
+ pub fn get_block_data_indices(&self, block: &CompressedBlock) -> Result<TileCoordinates> {
+ Ok(match block {
+ CompressedBlock::Tile(ref tile) => {
+ tile.coordinates
+ },
+
+ CompressedBlock::ScanLine(ref block) => {
+ let size = self.compression.scan_lines_per_block() as i32;
+
+ let diff = block.y_coordinate.checked_sub(self.own_attributes.layer_position.y()).ok_or(Error::invalid("invalid header"))?;
+ let y = diff.checked_div(size).ok_or(Error::invalid("invalid header"))?;
+
+ if y < 0 {
+ return Err(Error::invalid("scan block y coordinate"));
+ }
+
+ TileCoordinates {
+ tile_index: Vec2(0, y as usize),
+ level_index: Vec2(0, 0)
+ }
+ },
+
+ _ => return Err(Error::unsupported("deep data not supported yet"))
+ })
+ }
+
+ /// Computes the absolute tile coordinate data indices, which start at `0`.
+ pub fn get_scan_line_block_tile_coordinates(&self, block_y_coordinate: i32) -> Result<TileCoordinates> {
+ let size = self.compression.scan_lines_per_block() as i32;
+
+ let diff = block_y_coordinate.checked_sub(self.own_attributes.layer_position.1).ok_or(Error::invalid("invalid header"))?;
+ let y = diff.checked_div(size).ok_or(Error::invalid("invalid header"))?;
+
+ if y < 0 {
+ return Err(Error::invalid("scan block y coordinate"));
+ }
+
+ Ok(TileCoordinates {
+ tile_index: Vec2(0, y as usize),
+ level_index: Vec2(0, 0)
+ })
+ }
+
+ /// Maximum byte length of an uncompressed or compressed block, used for validation.
+ pub fn max_block_byte_size(&self) -> usize {
+ self.channels.bytes_per_pixel * match self.blocks {
+ BlockDescription::Tiles(tiles) => tiles.tile_size.area(),
+ BlockDescription::ScanLines => self.compression.scan_lines_per_block() * self.layer_size.width()
+ // TODO What about deep data???
+ }
+ }
+
+ /// Returns the number of bytes that the pixels of this header will require
+ /// when stored without compression. Respects multi-resolution levels and subsampling.
+ pub fn total_pixel_bytes(&self) -> usize {
+ assert!(!self.deep);
+
+ let pixel_count_of_levels = |size: Vec2<usize>| -> usize {
+ match self.blocks {
+ BlockDescription::ScanLines => size.area(),
+ BlockDescription::Tiles(tile_description) => match tile_description.level_mode {
+ LevelMode::Singular => size.area(),
+
+ LevelMode::MipMap => mip_map_levels(tile_description.rounding_mode, size)
+ .map(|(_, size)| size.area()).sum(),
+
+ LevelMode::RipMap => rip_map_levels(tile_description.rounding_mode, size)
+ .map(|(_, size)| size.area()).sum(),
+ }
+ }
+ };
+
+ self.channels.list.iter()
+ .map(|channel: &ChannelDescription|
+ pixel_count_of_levels(channel.subsampled_resolution(self.layer_size)) * channel.sample_type.bytes_per_sample()
+ )
+ .sum()
+
+ }
+
+ /// Approximates the maximum number of bytes that the pixels of this header will consume in a file.
+ /// Due to compression, the actual byte size may be smaller.
+ pub fn max_pixel_file_bytes(&self) -> usize {
+ assert!(!self.deep);
+
+ self.chunk_count * 64 // at most 64 bytes overhead for each chunk (header index, tile description, chunk size, and more)
+ + self.total_pixel_bytes()
+ }
+
+ /// Validate this instance.
+ pub fn validate(&self, is_multilayer: bool, long_names: &mut bool, strict: bool) -> UnitResult {
+
+ self.data_window().validate(None)?;
+ self.shared_attributes.display_window.validate(None)?;
+
+ if strict {
+ if is_multilayer {
+ if self.own_attributes.layer_name.is_none() {
+ return Err(missing_attribute("layer name for multi layer file"));
+ }
+ }
+
+ if self.blocks == BlockDescription::ScanLines && self.line_order == LineOrder::Unspecified {
+ return Err(Error::invalid("unspecified line order in scan line images"));
+ }
+
+ if self.layer_size == Vec2(0, 0) {
+ return Err(Error::invalid("empty data window"));
+ }
+
+ if self.shared_attributes.display_window.size == Vec2(0,0) {
+ return Err(Error::invalid("empty display window"));
+ }
+
+ if !self.shared_attributes.pixel_aspect.is_normal() || self.shared_attributes.pixel_aspect < 1.0e-6 || self.shared_attributes.pixel_aspect > 1.0e6 {
+ return Err(Error::invalid("pixel aspect ratio"));
+ }
+
+ if self.own_attributes.screen_window_width < 0.0 {
+ return Err(Error::invalid("screen window width"));
+ }
+ }
+
+ let allow_subsampling = !self.deep && self.blocks == BlockDescription::ScanLines;
+ self.channels.validate(allow_subsampling, self.data_window(), strict)?;
+
+ for (name, value) in &self.shared_attributes.other {
+ attribute::validate(name, value, long_names, allow_subsampling, self.data_window(), strict)?;
+ }
+
+ for (name, value) in &self.own_attributes.other {
+ attribute::validate(name, value, long_names, allow_subsampling, self.data_window(), strict)?;
+ }
+
+ // this is only to check whether someone tampered with our precious values, to avoid writing an invalid file
+ if self.chunk_count != compute_chunk_count(self.compression, self.layer_size, self.blocks) {
+ return Err(Error::invalid("chunk count attribute")); // TODO this may be an expensive check?
+ }
+
+ // check if attribute names appear twice
+ if strict {
+ for (name, _) in &self.shared_attributes.other {
+ if self.own_attributes.other.contains_key(name) {
+ return Err(Error::invalid(format!("duplicate attribute name: `{}`", name)));
+ }
+ }
+
+ for &reserved in header::standard_names::ALL.iter() {
+ let name = Text::from_bytes_unchecked(SmallVec::from_slice(reserved));
+ if self.own_attributes.other.contains_key(&name) || self.shared_attributes.other.contains_key(&name) {
+ return Err(Error::invalid(format!(
+ "attribute name `{}` is reserved and cannot be custom",
+ Text::from_bytes_unchecked(reserved.into())
+ )));
+ }
+ }
+ }
+
+ if self.deep {
+ if strict {
+ if self.own_attributes.layer_name.is_none() {
+ return Err(missing_attribute("layer name for deep file"));
+ }
+
+ if self.max_samples_per_pixel.is_none() {
+ return Err(Error::invalid("missing max samples per pixel attribute for deepdata"));
+ }
+ }
+
+ match self.deep_data_version {
+ Some(1) => {},
+ Some(_) => return Err(Error::unsupported("deep data version")),
+ None => return Err(missing_attribute("deep data version")),
+ }
+
+ if !self.compression.supports_deep_data() {
+ return Err(Error::invalid("compression method does not support deep data"));
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Read the headers without validating them.
+ pub fn read_all(read: &mut PeekRead<impl Read>, version: &Requirements, pedantic: bool) -> Result<Headers> {
+ if !version.is_multilayer() {
+ Ok(smallvec![ Header::read(read, version, pedantic)? ])
+ }
+ else {
+ let mut headers = SmallVec::new();
+
+ while !sequence_end::has_come(read)? {
+ headers.push(Header::read(read, version, pedantic)?);
+ }
+
+ Ok(headers)
+ }
+ }
+
+ /// Without validation, write the headers to the byte stream.
+ pub fn write_all(headers: &[Header], write: &mut impl Write, is_multilayer: bool) -> UnitResult {
+ for header in headers {
+ header.write(write)?;
+ }
+
+ if is_multilayer {
+ sequence_end::write(write)?;
+ }
+
+ Ok(())
+ }
+
+ /// Read the value without validating.
+ pub fn read(read: &mut PeekRead<impl Read>, requirements: &Requirements, pedantic: bool) -> Result<Self> {
+ let max_string_len = if requirements.has_long_names { 256 } else { 32 }; // TODO DRY this information
+
+ // these required attributes will be filled when encountered while parsing
+ let mut tiles = None;
+ let mut block_type = None;
+ let mut version = None;
+ let mut chunk_count = None;
+ let mut max_samples_per_pixel = None;
+ let mut channels = None;
+ let mut compression = None;
+ let mut data_window = None;
+ let mut display_window = None;
+ let mut line_order = None;
+
+ let mut dwa_compression_level = None;
+
+ let mut layer_attributes = LayerAttributes::default();
+ let mut image_attributes = ImageAttributes::new(IntegerBounds::zero());
+
+ // read each attribute in this header
+ while !sequence_end::has_come(read)? {
+ let (attribute_name, value) = attribute::read(read, max_string_len)?;
+
+ // if the attribute value itself is ok, record it
+ match value {
+ Ok(value) => {
+ use crate::meta::header::standard_names as name;
+ use crate::meta::attribute::AttributeValue::*;
+
+ // if the attribute is a required attribute, set the corresponding variable directly.
+ // otherwise, add the attribute to the vector of custom attributes
+
+ // the following attributes will only be set if the type matches the commonly used type for that attribute
+ match (attribute_name.as_slice(), value) {
+ (name::BLOCK_TYPE, Text(value)) => block_type = Some(attribute::BlockType::parse(value)?),
+ (name::TILES, TileDescription(value)) => tiles = Some(value),
+ (name::CHANNELS, ChannelList(value)) => channels = Some(value),
+ (name::COMPRESSION, Compression(value)) => compression = Some(value),
+ (name::DATA_WINDOW, IntegerBounds(value)) => data_window = Some(value),
+ (name::DISPLAY_WINDOW, IntegerBounds(value)) => display_window = Some(value),
+ (name::LINE_ORDER, LineOrder(value)) => line_order = Some(value),
+ (name::DEEP_DATA_VERSION, I32(value)) => version = Some(value),
+
+ (name::MAX_SAMPLES, I32(value)) => max_samples_per_pixel = Some(
+ i32_to_usize(value, "max sample count")?
+ ),
+
+ (name::CHUNKS, I32(value)) => chunk_count = Some(
+ i32_to_usize(value, "chunk count")?
+ ),
+
+ (name::NAME, Text(value)) => layer_attributes.layer_name = Some(value),
+ (name::WINDOW_CENTER, FloatVec2(value)) => layer_attributes.screen_window_center = value,
+ (name::WINDOW_WIDTH, F32(value)) => layer_attributes.screen_window_width = value,
+
+ (name::WHITE_LUMINANCE, F32(value)) => layer_attributes.white_luminance = Some(value),
+ (name::ADOPTED_NEUTRAL, FloatVec2(value)) => layer_attributes.adopted_neutral = Some(value),
+ (name::RENDERING_TRANSFORM, Text(value)) => layer_attributes.rendering_transform_name = Some(value),
+ (name::LOOK_MOD_TRANSFORM, Text(value)) => layer_attributes.look_modification_transform_name = Some(value),
+ (name::X_DENSITY, F32(value)) => layer_attributes.horizontal_density = Some(value),
+
+ (name::OWNER, Text(value)) => layer_attributes.owner = Some(value),
+ (name::COMMENTS, Text(value)) => layer_attributes.comments = Some(value),
+ (name::CAPTURE_DATE, Text(value)) => layer_attributes.capture_date = Some(value),
+ (name::UTC_OFFSET, F32(value)) => layer_attributes.utc_offset = Some(value),
+ (name::LONGITUDE, F32(value)) => layer_attributes.longitude = Some(value),
+ (name::LATITUDE, F32(value)) => layer_attributes.latitude = Some(value),
+ (name::ALTITUDE, F32(value)) => layer_attributes.altitude = Some(value),
+ (name::FOCUS, F32(value)) => layer_attributes.focus = Some(value),
+ (name::EXPOSURE_TIME, F32(value)) => layer_attributes.exposure = Some(value),
+ (name::APERTURE, F32(value)) => layer_attributes.aperture = Some(value),
+ (name::ISO_SPEED, F32(value)) => layer_attributes.iso_speed = Some(value),
+ (name::ENVIRONMENT_MAP, EnvironmentMap(value)) => layer_attributes.environment_map = Some(value),
+ (name::KEY_CODE, KeyCode(value)) => layer_attributes.film_key_code = Some(value),
+ (name::WRAP_MODES, Text(value)) => layer_attributes.wrap_mode_name = Some(value),
+ (name::FRAMES_PER_SECOND, Rational(value)) => layer_attributes.frames_per_second = Some(value),
+ (name::MULTI_VIEW, TextVector(value)) => layer_attributes.multi_view_names = Some(value),
+ (name::WORLD_TO_CAMERA, Matrix4x4(value)) => layer_attributes.world_to_camera = Some(value),
+ (name::WORLD_TO_NDC, Matrix4x4(value)) => layer_attributes.world_to_normalized_device = Some(value),
+ (name::DEEP_IMAGE_STATE, Rational(value)) => layer_attributes.deep_image_state = Some(value),
+ (name::ORIGINAL_DATA_WINDOW, IntegerBounds(value)) => layer_attributes.original_data_window = Some(value),
+ (name::DWA_COMPRESSION_LEVEL, F32(value)) => dwa_compression_level = Some(value),
+ (name::PREVIEW, Preview(value)) => layer_attributes.preview = Some(value),
+ (name::VIEW, Text(value)) => layer_attributes.view_name = Some(value),
+
+ (name::NEAR, F32(value)) => layer_attributes.near_clip_plane = Some(value),
+ (name::FAR, F32(value)) => layer_attributes.far_clip_plane = Some(value),
+ (name::FOV_X, F32(value)) => layer_attributes.horizontal_field_of_view = Some(value),
+ (name::FOV_Y, F32(value)) => layer_attributes.vertical_field_of_view = Some(value),
+ (name::SOFTWARE, Text(value)) => layer_attributes.software_name = Some(value),
+
+ (name::PIXEL_ASPECT, F32(value)) => image_attributes.pixel_aspect = value,
+ (name::TIME_CODE, TimeCode(value)) => image_attributes.time_code = Some(value),
+ (name::CHROMATICITIES, Chromaticities(value)) => image_attributes.chromaticities = Some(value),
+
+ // insert unknown attributes of these types into image attributes,
+ // as these must be the same for all headers
+ (_, value @ Chromaticities(_)) |
+ (_, value @ TimeCode(_)) => {
+ image_attributes.other.insert(attribute_name, value);
+ },
+
+ // insert unknown attributes into layer attributes
+ (_, value) => {
+ layer_attributes.other.insert(attribute_name, value);
+ },
+
+ }
+ },
+
+ // in case the attribute value itself is not ok, but the rest of the image is
+ // only abort reading the image if desired
+ Err(error) => {
+ if pedantic { return Err(error); }
+ }
+ }
+ }
+
+ // construct compression with parameters from properties
+ let compression = match (dwa_compression_level, compression) {
+ (Some(level), Some(Compression::DWAA(_))) => Some(Compression::DWAA(Some(level))),
+ (Some(level), Some(Compression::DWAB(_))) => Some(Compression::DWAB(Some(level))),
+ (_, other) => other,
+ // FIXME dwa compression level gets lost if any other compression is used later in the process
+ };
+
+ let compression = compression.ok_or(missing_attribute("compression"))?;
+ image_attributes.display_window = display_window.ok_or(missing_attribute("display window"))?;
+
+ let data_window = data_window.ok_or(missing_attribute("data window"))?;
+ data_window.validate(None)?; // validate now to avoid errors when computing the chunk_count
+ layer_attributes.layer_position = data_window.position;
+
+
+ // validate now to avoid errors when computing the chunk_count
+ if let Some(tiles) = tiles { tiles.validate()?; }
+ let blocks = match block_type {
+ None if requirements.is_single_layer_and_tiled => {
+ BlockDescription::Tiles(tiles.ok_or(missing_attribute("tiles"))?)
+ },
+ Some(BlockType::Tile) | Some(BlockType::DeepTile) => {
+ BlockDescription::Tiles(tiles.ok_or(missing_attribute("tiles"))?)
+ },
+
+ _ => BlockDescription::ScanLines,
+ };
+
+ let computed_chunk_count = compute_chunk_count(compression, data_window.size, blocks);
+ if chunk_count.is_some() && pedantic && chunk_count != Some(computed_chunk_count) {
+ return Err(Error::invalid("chunk count not matching data size"));
+ }
+
+ let header = Header {
+ compression,
+
+ // always compute ourselves, because we cannot trust anyone out there 😱
+ chunk_count: computed_chunk_count,
+
+ layer_size: data_window.size,
+
+ shared_attributes: image_attributes,
+ own_attributes: layer_attributes,
+
+ channels: channels.ok_or(missing_attribute("channels"))?,
+ line_order: line_order.unwrap_or(LineOrder::Unspecified),
+
+ blocks,
+ max_samples_per_pixel,
+ deep_data_version: version,
+ deep: block_type == Some(BlockType::DeepScanLine) || block_type == Some(BlockType::DeepTile),
+ };
+
+ Ok(header)
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write(&self, write: &mut impl Write) -> UnitResult {
+
+ macro_rules! write_attributes {
+ ( $($name: ident : $variant: ident = $value: expr),* ) => { $(
+ attribute::write($name, & $variant ($value .clone()), write)?; // TODO without clone
+ )* };
+ }
+
+ macro_rules! write_optional_attributes {
+ ( $($name: ident : $variant: ident = $value: expr),* ) => { $(
+ if let Some(value) = $value {
+ attribute::write($name, & $variant (value.clone()), write)?; // TODO without clone
+ };
+ )* };
+ }
+
+ use crate::meta::header::standard_names::*;
+ use AttributeValue::*;
+
+ let (block_type, tiles) = match self.blocks {
+ BlockDescription::ScanLines => (attribute::BlockType::ScanLine, None),
+ BlockDescription::Tiles(tiles) => (attribute::BlockType::Tile, Some(tiles))
+ };
+
+ fn usize_as_i32(value: usize) -> AttributeValue {
+ I32(i32::try_from(value).expect("u32 exceeds i32 range"))
+ }
+
+ write_optional_attributes!(
+ TILES: TileDescription = &tiles,
+ DEEP_DATA_VERSION: I32 = &self.deep_data_version,
+ MAX_SAMPLES: usize_as_i32 = &self.max_samples_per_pixel
+ );
+
+ write_attributes!(
+ // chunks is not actually required, but always computed in this library anyways
+ CHUNKS: usize_as_i32 = &self.chunk_count,
+
+ BLOCK_TYPE: BlockType = &block_type,
+ CHANNELS: ChannelList = &self.channels,
+ COMPRESSION: Compression = &self.compression,
+ LINE_ORDER: LineOrder = &self.line_order,
+ DATA_WINDOW: IntegerBounds = &self.data_window(),
+
+ DISPLAY_WINDOW: IntegerBounds = &self.shared_attributes.display_window,
+ PIXEL_ASPECT: F32 = &self.shared_attributes.pixel_aspect,
+
+ WINDOW_CENTER: FloatVec2 = &self.own_attributes.screen_window_center,
+ WINDOW_WIDTH: F32 = &self.own_attributes.screen_window_width
+ );
+
+ write_optional_attributes!(
+ NAME: Text = &self.own_attributes.layer_name,
+ WHITE_LUMINANCE: F32 = &self.own_attributes.white_luminance,
+ ADOPTED_NEUTRAL: FloatVec2 = &self.own_attributes.adopted_neutral,
+ RENDERING_TRANSFORM: Text = &self.own_attributes.rendering_transform_name,
+ LOOK_MOD_TRANSFORM: Text = &self.own_attributes.look_modification_transform_name,
+ X_DENSITY: F32 = &self.own_attributes.horizontal_density,
+ OWNER: Text = &self.own_attributes.owner,
+ COMMENTS: Text = &self.own_attributes.comments,
+ CAPTURE_DATE: Text = &self.own_attributes.capture_date,
+ UTC_OFFSET: F32 = &self.own_attributes.utc_offset,
+ LONGITUDE: F32 = &self.own_attributes.longitude,
+ LATITUDE: F32 = &self.own_attributes.latitude,
+ ALTITUDE: F32 = &self.own_attributes.altitude,
+ FOCUS: F32 = &self.own_attributes.focus,
+ EXPOSURE_TIME: F32 = &self.own_attributes.exposure,
+ APERTURE: F32 = &self.own_attributes.aperture,
+ ISO_SPEED: F32 = &self.own_attributes.iso_speed,
+ ENVIRONMENT_MAP: EnvironmentMap = &self.own_attributes.environment_map,
+ KEY_CODE: KeyCode = &self.own_attributes.film_key_code,
+ TIME_CODE: TimeCode = &self.shared_attributes.time_code,
+ WRAP_MODES: Text = &self.own_attributes.wrap_mode_name,
+ FRAMES_PER_SECOND: Rational = &self.own_attributes.frames_per_second,
+ MULTI_VIEW: TextVector = &self.own_attributes.multi_view_names,
+ WORLD_TO_CAMERA: Matrix4x4 = &self.own_attributes.world_to_camera,
+ WORLD_TO_NDC: Matrix4x4 = &self.own_attributes.world_to_normalized_device,
+ DEEP_IMAGE_STATE: Rational = &self.own_attributes.deep_image_state,
+ ORIGINAL_DATA_WINDOW: IntegerBounds = &self.own_attributes.original_data_window,
+ CHROMATICITIES: Chromaticities = &self.shared_attributes.chromaticities,
+ PREVIEW: Preview = &self.own_attributes.preview,
+ VIEW: Text = &self.own_attributes.view_name,
+ NEAR: F32 = &self.own_attributes.near_clip_plane,
+ FAR: F32 = &self.own_attributes.far_clip_plane,
+ FOV_X: F32 = &self.own_attributes.horizontal_field_of_view,
+ FOV_Y: F32 = &self.own_attributes.vertical_field_of_view,
+ SOFTWARE: Text = &self.own_attributes.software_name
+ );
+
+ // dwa writes compression parameters as attribute.
+ match self.compression {
+ attribute::Compression::DWAA(Some(level)) |
+ attribute::Compression::DWAB(Some(level)) =>
+ attribute::write(DWA_COMPRESSION_LEVEL, &F32(level), write)?,
+
+ _ => {}
+ };
+
+
+ for (name, value) in &self.shared_attributes.other {
+ attribute::write(name.as_slice(), value, write)?;
+ }
+
+ for (name, value) in &self.own_attributes.other {
+ attribute::write(name.as_slice(), value, write)?;
+ }
+
+ sequence_end::write(write)?;
+ Ok(())
+ }
+
+ /// The rectangle describing the bounding box of this layer
+ /// within the infinite global 2D space of the file.
+ pub fn data_window(&self) -> IntegerBounds {
+ IntegerBounds::new(self.own_attributes.layer_position, self.layer_size)
+ }
+}
+
+
+
+/// Collection of required attribute names.
+pub mod standard_names {
+ macro_rules! define_required_attribute_names {
+ ( $($name: ident : $value: expr),* ) => {
+
+ /// A list containing all reserved names.
+ pub const ALL: &'static [&'static [u8]] = &[
+ $( $value ),*
+ ];
+
+ $(
+ /// The byte-string name of this required attribute as it appears in an exr file.
+ pub const $name: &'static [u8] = $value;
+ )*
+ };
+ }
+
+ define_required_attribute_names! {
+ TILES: b"tiles",
+ NAME: b"name",
+ BLOCK_TYPE: b"type",
+ DEEP_DATA_VERSION: b"version",
+ CHUNKS: b"chunkCount",
+ MAX_SAMPLES: b"maxSamplesPerPixel",
+ CHANNELS: b"channels",
+ COMPRESSION: b"compression",
+ DATA_WINDOW: b"dataWindow",
+ DISPLAY_WINDOW: b"displayWindow",
+ LINE_ORDER: b"lineOrder",
+ PIXEL_ASPECT: b"pixelAspectRatio",
+ WINDOW_CENTER: b"screenWindowCenter",
+ WINDOW_WIDTH: b"screenWindowWidth",
+ WHITE_LUMINANCE: b"whiteLuminance",
+ ADOPTED_NEUTRAL: b"adoptedNeutral",
+ RENDERING_TRANSFORM: b"renderingTransform",
+ LOOK_MOD_TRANSFORM: b"lookModTransform",
+ X_DENSITY: b"xDensity",
+ OWNER: b"owner",
+ COMMENTS: b"comments",
+ CAPTURE_DATE: b"capDate",
+ UTC_OFFSET: b"utcOffset",
+ LONGITUDE: b"longitude",
+ LATITUDE: b"latitude",
+ ALTITUDE: b"altitude",
+ FOCUS: b"focus",
+ EXPOSURE_TIME: b"expTime",
+ APERTURE: b"aperture",
+ ISO_SPEED: b"isoSpeed",
+ ENVIRONMENT_MAP: b"envmap",
+ KEY_CODE: b"keyCode",
+ TIME_CODE: b"timeCode",
+ WRAP_MODES: b"wrapmodes",
+ FRAMES_PER_SECOND: b"framesPerSecond",
+ MULTI_VIEW: b"multiView",
+ WORLD_TO_CAMERA: b"worldToCamera",
+ WORLD_TO_NDC: b"worldToNDC",
+ DEEP_IMAGE_STATE: b"deepImageState",
+ ORIGINAL_DATA_WINDOW: b"originalDataWindow",
+ DWA_COMPRESSION_LEVEL: b"dwaCompressionLevel",
+ PREVIEW: b"preview",
+ VIEW: b"view",
+ CHROMATICITIES: b"chromaticities",
+ NEAR: b"near",
+ FAR: b"far",
+ FOV_X: b"fieldOfViewHorizontal",
+ FOV_Y: b"fieldOfViewVertical",
+ SOFTWARE: b"software"
+ }
+}
+
+
+impl Default for LayerAttributes {
+ fn default() -> Self {
+ Self {
+ layer_position: Vec2(0, 0),
+ screen_window_center: Vec2(0.0, 0.0),
+ screen_window_width: 1.0,
+ layer_name: None,
+ white_luminance: None,
+ adopted_neutral: None,
+ rendering_transform_name: None,
+ look_modification_transform_name: None,
+ horizontal_density: None,
+ owner: None,
+ comments: None,
+ capture_date: None,
+ utc_offset: None,
+ longitude: None,
+ latitude: None,
+ altitude: None,
+ focus: None,
+ exposure: None,
+ aperture: None,
+ iso_speed: None,
+ environment_map: None,
+ film_key_code: None,
+ wrap_mode_name: None,
+ frames_per_second: None,
+ multi_view_names: None,
+ world_to_camera: None,
+ world_to_normalized_device: None,
+ deep_image_state: None,
+ original_data_window: None,
+ preview: None,
+ view_name: None,
+ software_name: None,
+ near_clip_plane: None,
+ far_clip_plane: None,
+ horizontal_field_of_view: None,
+ vertical_field_of_view: None,
+ other: Default::default()
+ }
+ }
+}
+
+impl std::fmt::Debug for LayerAttributes {
+ fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let default_self = Self::default();
+
+ let mut debug = formatter.debug_struct("LayerAttributes (default values omitted)");
+
+ // always debug the following field
+ debug.field("name", &self.layer_name);
+
+ macro_rules! debug_non_default_fields {
+ ( $( $name: ident ),* ) => { $(
+
+ if self.$name != default_self.$name {
+ debug.field(stringify!($name), &self.$name);
+ }
+
+ )* };
+ }
+
+ // only debug these fields if they are not the default value
+ debug_non_default_fields! {
+ screen_window_center, screen_window_width,
+ white_luminance, adopted_neutral, horizontal_density,
+ rendering_transform_name, look_modification_transform_name,
+ owner, comments,
+ capture_date, utc_offset,
+ longitude, latitude, altitude,
+ focus, exposure, aperture, iso_speed,
+ environment_map, film_key_code, wrap_mode_name,
+ frames_per_second, multi_view_names,
+ world_to_camera, world_to_normalized_device,
+ deep_image_state, original_data_window,
+ preview, view_name,
+ vertical_field_of_view, horizontal_field_of_view,
+ near_clip_plane, far_clip_plane, software_name
+ }
+
+ for (name, value) in &self.other {
+ debug.field(&format!("\"{}\"", name), value);
+ }
+
+ // debug.finish_non_exhaustive() TODO
+ debug.finish()
+ }
+}
diff --git a/vendor/exr/src/meta/mod.rs b/vendor/exr/src/meta/mod.rs
new file mode 100644
index 0000000..0c36af8
--- /dev/null
+++ b/vendor/exr/src/meta/mod.rs
@@ -0,0 +1,821 @@
+
+//! Describes all meta data possible in an exr file.
+//! Contains functionality to read and write meta data from bytes.
+//! Browse the `exr::image` module to get started with the high-level interface.
+
+pub mod attribute;
+pub mod header;
+
+
+use crate::io::*;
+use ::smallvec::SmallVec;
+use self::attribute::*;
+use crate::block::chunk::{TileCoordinates, CompressedBlock};
+use crate::error::*;
+use std::fs::File;
+use std::io::{BufReader};
+use crate::math::*;
+use std::collections::{HashSet};
+use std::convert::TryFrom;
+use crate::meta::header::{Header};
+use crate::block::{BlockIndex, UncompressedBlock};
+
+
+// TODO rename MetaData to ImageInfo?
+
+/// Contains the complete meta data of an exr image.
+/// Defines how the image is split up in the file,
+/// the number and type of images and channels,
+/// and various other attributes.
+/// The usage of custom attributes is encouraged.
+#[derive(Debug, Clone, PartialEq)]
+pub struct MetaData {
+
+ /// Some flags summarizing the features that must be supported to decode the file.
+ pub requirements: Requirements,
+
+ /// One header to describe each layer in this file.
+ // TODO rename to layer descriptions?
+ pub headers: Headers,
+}
+
+
+/// List of `Header`s.
+pub type Headers = SmallVec<[Header; 3]>;
+
+/// List of `OffsetTable`s.
+pub type OffsetTables = SmallVec<[OffsetTable; 3]>;
+
+
+/// The offset table is an ordered list of indices referencing pixel data in the exr file.
+/// For each pixel tile in the image, an index exists, which points to the byte-location
+/// of the corresponding pixel data in the file. That index can be used to load specific
+/// portions of an image without processing all bytes in a file. For each header,
+/// an offset table exists with its indices ordered by `LineOrder::Increasing`.
+// If the multipart bit is unset and the chunkCount attribute is not present,
+// the number of entries in the chunk table is computed using the
+// dataWindow, tileDesc, and compression attribute.
+//
+// If the multipart bit is set, the header must contain a
+// chunkCount attribute, that contains the length of the offset table.
+pub type OffsetTable = Vec<u64>;
+
+
+/// A summary of requirements that must be met to read this exr file.
+/// Used to determine whether this file can be read by a given reader.
+/// It includes the OpenEXR version number. This library aims to support version `2.0`.
+#[derive(Clone, Copy, Eq, PartialEq, Debug, Hash)]
+pub struct Requirements {
+
+ /// This library supports reading version 1 and 2, and writing version 2.
+ // TODO write version 1 for simple images
+ pub file_format_version: u8,
+
+ /// If true, this image has tiled blocks and contains only a single layer.
+ /// If false and not deep and not multilayer, this image is a single layer image with scan line blocks.
+ pub is_single_layer_and_tiled: bool,
+
+ // in c or bad c++ this might have been relevant (omg is he allowed to say that)
+ /// Whether this file has strings with a length greater than 31.
+ /// Strings can never be longer than 255.
+ pub has_long_names: bool,
+
+ /// This image contains at least one layer with deep data.
+ pub has_deep_data: bool,
+
+ /// Whether this file contains multiple layers.
+ pub has_multiple_layers: bool,
+}
+
+
+/// Locates a rectangular section of pixels in an image.
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
+pub struct TileIndices {
+
+ /// Index of the tile.
+ pub location: TileCoordinates,
+
+ /// Pixel size of the tile.
+ pub size: Vec2<usize>,
+}
+
+/// How the image pixels are split up into separate blocks.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub enum BlockDescription {
+
+ /// The image is divided into scan line blocks.
+ /// The number of scan lines in a block depends on the compression method.
+ ScanLines,
+
+ /// The image is divided into tile blocks.
+ /// Also specifies the size of each tile in the image
+ /// and whether this image contains multiple resolution levels.
+ Tiles(TileDescription)
+}
+
+
+/*impl TileIndices {
+ pub fn cmp(&self, other: &Self) -> Ordering {
+ match self.location.level_index.1.cmp(&other.location.level_index.1) {
+ Ordering::Equal => {
+ match self.location.level_index.0.cmp(&other.location.level_index.0) {
+ Ordering::Equal => {
+ match self.location.tile_index.1.cmp(&other.location.tile_index.1) {
+ Ordering::Equal => {
+ self.location.tile_index.0.cmp(&other.location.tile_index.0)
+ },
+
+ other => other,
+ }
+ },
+
+ other => other
+ }
+ },
+
+ other => other
+ }
+ }
+}*/
+
+impl BlockDescription {
+
+ /// Whether this image is tiled. If false, this image is divided into scan line blocks.
+ pub fn has_tiles(&self) -> bool {
+ match self {
+ BlockDescription::Tiles { .. } => true,
+ _ => false
+ }
+ }
+}
+
+
+
+
+
+/// The first four bytes of each exr file.
+/// Used to abort reading non-exr files.
+pub mod magic_number {
+ use super::*;
+
+ /// The first four bytes of each exr file.
+ pub const BYTES: [u8; 4] = [0x76, 0x2f, 0x31, 0x01];
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write(write: &mut impl Write) -> Result<()> {
+ u8::write_slice(write, &self::BYTES)
+ }
+
+ /// Consumes four bytes from the reader and returns whether the file may be an exr file.
+ // TODO check if exr before allocating BufRead
+ pub fn is_exr(read: &mut impl Read) -> Result<bool> {
+ let mut magic_num = [0; 4];
+ u8::read_slice(read, &mut magic_num)?;
+ Ok(magic_num == self::BYTES)
+ }
+
+ /// Validate this image. If it is an exr file, return `Ok(())`.
+ pub fn validate_exr(read: &mut impl Read) -> UnitResult {
+ if self::is_exr(read)? {
+ Ok(())
+
+ } else {
+ Err(Error::invalid("file identifier missing"))
+ }
+ }
+}
+
+/// A `0_u8` at the end of a sequence.
+pub mod sequence_end {
+ use super::*;
+
+ /// Number of bytes this would consume in an exr file.
+ pub fn byte_size() -> usize {
+ 1
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(write: &mut W) -> UnitResult {
+ 0_u8.write(write)
+ }
+
+ /// Peeks the next byte. If it is zero, consumes the byte and returns true.
+ pub fn has_come(read: &mut PeekRead<impl Read>) -> Result<bool> {
+ Ok(read.skip_if_eq(0)?)
+ }
+}
+
+fn missing_attribute(name: &str) -> Error {
+ Error::invalid(format!("missing or invalid {} attribute", name))
+}
+
+
+/// Compute the number of tiles required to contain all values.
+pub fn compute_block_count(full_res: usize, tile_size: usize) -> usize {
+ // round up, because if the image is not evenly divisible by the tiles,
+ // we add another tile at the end (which is only partially used)
+ RoundingMode::Up.divide(full_res, tile_size)
+}
+
+/// Compute the start position and size of a block inside a dimension.
+#[inline]
+pub fn calculate_block_position_and_size(total_size: usize, block_size: usize, block_index: usize) -> Result<(usize, usize)> {
+ let block_position = block_size * block_index;
+
+ Ok((
+ block_position,
+ calculate_block_size(total_size, block_size, block_position)?
+ ))
+}
+
+/// Calculate the size of a single block. If this is the last block,
+/// this only returns the required size, which is always smaller than the default block size.
+// TODO use this method everywhere instead of convoluted formulas
+#[inline]
+pub fn calculate_block_size(total_size: usize, block_size: usize, block_position: usize) -> Result<usize> {
+ if block_position >= total_size {
+ return Err(Error::invalid("block index"))
+ }
+
+ if block_position + block_size <= total_size {
+ Ok(block_size)
+ }
+ else {
+ Ok(total_size - block_position)
+ }
+}
+
+
+/// Calculate number of mip levels in a given resolution.
+// TODO this should be cached? log2 may be very expensive
+pub fn compute_level_count(round: RoundingMode, full_res: usize) -> usize {
+ usize::try_from(round.log2(u32::try_from(full_res).unwrap())).unwrap() + 1
+}
+
+/// Calculate the size of a single mip level by index.
+// TODO this should be cached? log2 may be very expensive
+pub fn compute_level_size(round: RoundingMode, full_res: usize, level_index: usize) -> usize {
+ assert!(level_index < std::mem::size_of::<usize>() * 8, "largest level size exceeds maximum integer value");
+ round.divide(full_res, 1 << level_index).max(1)
+}
+
+/// Iterates over all rip map level resolutions of a given size, including the indices of each level.
+/// The order of iteration conforms to `LineOrder::Increasing`.
+// TODO cache these?
+// TODO compute these directly instead of summing up an iterator?
+pub fn rip_map_levels(round: RoundingMode, max_resolution: Vec2<usize>) -> impl Iterator<Item=(Vec2<usize>, Vec2<usize>)> {
+ rip_map_indices(round, max_resolution).map(move |level_indices|{
+ // TODO progressively divide instead??
+ let width = compute_level_size(round, max_resolution.width(), level_indices.x());
+ let height = compute_level_size(round, max_resolution.height(), level_indices.y());
+ (level_indices, Vec2(width, height))
+ })
+}
+
+/// Iterates over all mip map level resolutions of a given size, including the indices of each level.
+/// The order of iteration conforms to `LineOrder::Increasing`.
+// TODO cache all these level values when computing table offset size??
+// TODO compute these directly instead of summing up an iterator?
+pub fn mip_map_levels(round: RoundingMode, max_resolution: Vec2<usize>) -> impl Iterator<Item=(usize, Vec2<usize>)> {
+ mip_map_indices(round, max_resolution)
+ .map(move |level_index|{
+ // TODO progressively divide instead??
+ let width = compute_level_size(round, max_resolution.width(), level_index);
+ let height = compute_level_size(round, max_resolution.height(), level_index);
+ (level_index, Vec2(width, height))
+ })
+}
+
+/// Iterates over all rip map level indices of a given size.
+/// The order of iteration conforms to `LineOrder::Increasing`.
+pub fn rip_map_indices(round: RoundingMode, max_resolution: Vec2<usize>) -> impl Iterator<Item=Vec2<usize>> {
+ let (width, height) = (
+ compute_level_count(round, max_resolution.width()),
+ compute_level_count(round, max_resolution.height())
+ );
+
+ (0..height).flat_map(move |y_level|{
+ (0..width).map(move |x_level|{
+ Vec2(x_level, y_level)
+ })
+ })
+}
+
+/// Iterates over all mip map level indices of a given size.
+/// The order of iteration conforms to `LineOrder::Increasing`.
+pub fn mip_map_indices(round: RoundingMode, max_resolution: Vec2<usize>) -> impl Iterator<Item=usize> {
+ 0..compute_level_count(round, max_resolution.width().max(max_resolution.height()))
+}
+
+/// Compute the number of chunks that an image is divided into. May be an expensive operation.
+// If not multilayer and chunkCount not present,
+// the number of entries in the chunk table is computed
+// using the dataWindow and tileDesc attributes and the compression format
+pub fn compute_chunk_count(compression: Compression, data_size: Vec2<usize>, blocks: BlockDescription) -> usize {
+
+ if let BlockDescription::Tiles(tiles) = blocks {
+ let round = tiles.rounding_mode;
+ let Vec2(tile_width, tile_height) = tiles.tile_size;
+
+ // TODO cache all these level values??
+ use crate::meta::attribute::LevelMode::*;
+ match tiles.level_mode {
+ Singular => {
+ let tiles_x = compute_block_count(data_size.width(), tile_width);
+ let tiles_y = compute_block_count(data_size.height(), tile_height);
+ tiles_x * tiles_y
+ }
+
+ MipMap => {
+ mip_map_levels(round, data_size).map(|(_, Vec2(level_width, level_height))| {
+ compute_block_count(level_width, tile_width) * compute_block_count(level_height, tile_height)
+ }).sum()
+ },
+
+ RipMap => {
+ rip_map_levels(round, data_size).map(|(_, Vec2(level_width, level_height))| {
+ compute_block_count(level_width, tile_width) * compute_block_count(level_height, tile_height)
+ }).sum()
+ }
+ }
+ }
+
+ // scan line blocks never have mip maps
+ else {
+ compute_block_count(data_size.height(), compression.scan_lines_per_block())
+ }
+}
+
+
+
+impl MetaData {
+
+ /// Read the exr meta data from a file.
+ /// Use `read_from_unbuffered` instead if you do not have a file.
+ /// Does not validate the meta data.
+ #[must_use]
+ pub fn read_from_file(path: impl AsRef<::std::path::Path>, pedantic: bool) -> Result<Self> {
+ Self::read_from_unbuffered(File::open(path)?, pedantic)
+ }
+
+ /// Buffer the reader and then read the exr meta data from it.
+ /// Use `read_from_buffered` if your reader is an in-memory reader.
+ /// Use `read_from_file` if you have a file path.
+ /// Does not validate the meta data.
+ #[must_use]
+ pub fn read_from_unbuffered(unbuffered: impl Read, pedantic: bool) -> Result<Self> {
+ Self::read_from_buffered(BufReader::new(unbuffered), pedantic)
+ }
+
+ /// Read the exr meta data from a reader.
+ /// Use `read_from_file` if you have a file path.
+ /// Use `read_from_unbuffered` if this is not an in-memory reader.
+ /// Does not validate the meta data.
+ #[must_use]
+ pub fn read_from_buffered(buffered: impl Read, pedantic: bool) -> Result<Self> {
+ let mut read = PeekRead::new(buffered);
+ MetaData::read_unvalidated_from_buffered_peekable(&mut read, pedantic)
+ }
+
+ /// Does __not validate__ the meta data completely.
+ #[must_use]
+ pub(crate) fn read_unvalidated_from_buffered_peekable(read: &mut PeekRead<impl Read>, pedantic: bool) -> Result<Self> {
+ magic_number::validate_exr(read)?;
+
+ let requirements = Requirements::read(read)?;
+
+ // do this check now in order to fast-fail for newer versions and features than version 2
+ requirements.validate()?;
+
+ let headers = Header::read_all(read, &requirements, pedantic)?;
+
+ // TODO check if supporting requirements 2 always implies supporting requirements 1
+ Ok(MetaData { requirements, headers })
+ }
+
+ /// Validates the meta data.
+ #[must_use]
+ pub(crate) fn read_validated_from_buffered_peekable(
+ read: &mut PeekRead<impl Read>, pedantic: bool
+ ) -> Result<Self> {
+ let meta_data = Self::read_unvalidated_from_buffered_peekable(read, !pedantic)?;
+ MetaData::validate(meta_data.headers.as_slice(), pedantic)?;
+ Ok(meta_data)
+ }
+
+ /// Validates the meta data and writes it to the stream.
+ /// If pedantic, throws errors for files that may produce errors in other exr readers.
+ /// Returns the automatically detected minimum requirement flags.
+ pub(crate) fn write_validating_to_buffered(write: &mut impl Write, headers: &[Header], pedantic: bool) -> Result<Requirements> {
+ // pedantic validation to not allow slightly invalid files
+ // that still could be read correctly in theory
+ let minimal_requirements = Self::validate(headers, pedantic)?;
+
+ magic_number::write(write)?;
+ minimal_requirements.write(write)?;
+ Header::write_all(headers, write, minimal_requirements.has_multiple_layers)?;
+ Ok(minimal_requirements)
+ }
+
+ /// Read one offset table from the reader for each header.
+ pub fn read_offset_tables(read: &mut PeekRead<impl Read>, headers: &Headers) -> Result<OffsetTables> {
+ headers.iter()
+ .map(|header| u64::read_vec(read, header.chunk_count, u16::MAX as usize, None, "offset table size"))
+ .collect()
+ }
+
+ /// Skip the offset tables by advancing the reader by the required byte count.
+ // TODO use seek for large (probably all) tables!
+ pub fn skip_offset_tables(read: &mut PeekRead<impl Read>, headers: &Headers) -> Result<usize> {
+ let chunk_count: usize = headers.iter().map(|header| header.chunk_count).sum();
+ crate::io::skip_bytes(read, chunk_count * u64::BYTE_SIZE)?; // TODO this should seek for large tables
+ Ok(chunk_count)
+ }
+
+ /// This iterator tells you the block indices of all blocks that must be in the image.
+ /// The order of the blocks depends on the `LineOrder` attribute
+ /// (unspecified line order is treated the same as increasing line order).
+ /// The blocks written to the file must be exactly in this order,
+ /// except for when the `LineOrder` is unspecified.
+ /// The index represents the block index, in increasing line order, within the header.
+ pub fn enumerate_ordered_header_block_indices(&self) -> impl '_ + Iterator<Item=(usize, BlockIndex)> {
+ crate::block::enumerate_ordered_header_block_indices(&self.headers)
+ }
+
+ /// Go through all the block indices in the correct order and call the specified closure for each of these blocks.
+ /// That way, the blocks indices are filled with real block data and returned as an iterator.
+ /// The closure returns the an `UncompressedBlock` for each block index.
+ pub fn collect_ordered_blocks<'s>(&'s self, mut get_block: impl 's + FnMut(BlockIndex) -> UncompressedBlock)
+ -> impl 's + Iterator<Item=(usize, UncompressedBlock)>
+ {
+ self.enumerate_ordered_header_block_indices().map(move |(index_in_header, block_index)|{
+ (index_in_header, get_block(block_index))
+ })
+ }
+
+ /// Go through all the block indices in the correct order and call the specified closure for each of these blocks.
+ /// That way, the blocks indices are filled with real block data and returned as an iterator.
+ /// The closure returns the byte data for each block index.
+ pub fn collect_ordered_block_data<'s>(&'s self, mut get_block_data: impl 's + FnMut(BlockIndex) -> Vec<u8>)
+ -> impl 's + Iterator<Item=(usize, UncompressedBlock)>
+ {
+ self.collect_ordered_blocks(move |block_index|
+ UncompressedBlock { index: block_index, data: get_block_data(block_index) }
+ )
+ }
+
+ /// Validates this meta data. Returns the minimal possible requirements.
+ pub fn validate(headers: &[Header], pedantic: bool) -> Result<Requirements> {
+ if headers.len() == 0 {
+ return Err(Error::invalid("at least one layer is required"));
+ }
+
+ let deep = false; // TODO deep data
+ let is_multilayer = headers.len() > 1;
+ let first_header_has_tiles = headers.iter().next()
+ .map_or(false, |header| header.blocks.has_tiles());
+
+ let mut minimal_requirements = Requirements {
+ // according to the spec, version 2 should only be necessary if `is_multilayer || deep`.
+ // but the current open exr library does not support images with version 1, so always use version 2.
+ file_format_version: 2,
+
+ // start as low as possible, later increasing if required
+ has_long_names: false,
+
+ is_single_layer_and_tiled: !is_multilayer && first_header_has_tiles,
+ has_multiple_layers: is_multilayer,
+ has_deep_data: deep,
+ };
+
+ for header in headers {
+ if header.deep { // TODO deep data (and then remove this check)
+ return Err(Error::unsupported("deep data not supported yet"));
+ }
+
+ header.validate(is_multilayer, &mut minimal_requirements.has_long_names, pedantic)?;
+ }
+
+ // TODO validation fn!
+ /*if let Some(max) = max_pixel_bytes {
+ let byte_size: usize = headers.iter()
+ .map(|header| header.total_pixel_bytes())
+ .sum();
+
+ if byte_size > max {
+ return Err(Error::invalid("image larger than specified maximum"));
+ }
+ }*/
+
+ if pedantic { // check for duplicate header names
+ let mut header_names = HashSet::with_capacity(headers.len());
+ for header in headers {
+ if !header_names.insert(&header.own_attributes.layer_name) {
+ return Err(Error::invalid(format!(
+ "duplicate layer name: `{}`",
+ header.own_attributes.layer_name.as_ref().expect("header validation bug")
+ )));
+ }
+ }
+ }
+
+ if pedantic {
+ let must_share = headers.iter().flat_map(|header| header.own_attributes.other.iter())
+ .any(|(_, value)| value.to_chromaticities().is_ok() || value.to_time_code().is_ok());
+
+ if must_share {
+ return Err(Error::invalid("chromaticities and time code attributes must must not exist in own attributes but shared instead"));
+ }
+ }
+
+ if pedantic && headers.len() > 1 { // check for attributes that should not differ in between headers
+ let first_header = headers.first().expect("header count validation bug");
+ let first_header_attributes = &first_header.shared_attributes;
+
+ for header in &headers[1..] {
+ if &header.shared_attributes != first_header_attributes {
+ return Err(Error::invalid("display window, pixel aspect, chromaticities, and time code attributes must be equal for all headers"))
+ }
+ }
+ }
+
+ debug_assert!(minimal_requirements.validate().is_ok(), "inferred requirements are invalid");
+ Ok(minimal_requirements)
+ }
+}
+
+
+
+
+impl Requirements {
+
+ // this is actually used for control flow, as the number of headers may be 1 in a multilayer file
+ /// Is this file declared to contain multiple layers?
+ pub fn is_multilayer(&self) -> bool {
+ self.has_multiple_layers
+ }
+
+ /// Read the value without validating.
+ pub fn read<R: Read>(read: &mut R) -> Result<Self> {
+ use ::bit_field::BitField;
+
+ let version_and_flags = u32::read(read)?;
+
+ // take the 8 least significant bits, they contain the file format version number
+ let version = (version_and_flags & 0x000F) as u8;
+
+ // the 24 most significant bits are treated as a set of boolean flags
+ let is_single_tile = version_and_flags.get_bit(9);
+ let has_long_names = version_and_flags.get_bit(10);
+ let has_deep_data = version_and_flags.get_bit(11);
+ let has_multiple_layers = version_and_flags.get_bit(12);
+
+ // all remaining bits except 9, 10, 11 and 12 are reserved and should be 0
+ // if a file has any of these bits set to 1, it means this file contains
+ // a feature that we don't support
+ let unknown_flags = version_and_flags >> 13; // all flags excluding the 12 bits we already parsed
+
+ if unknown_flags != 0 { // TODO test if this correctly detects unsupported files
+ return Err(Error::unsupported("too new file feature flags"));
+ }
+
+ let version = Requirements {
+ file_format_version: version,
+ is_single_layer_and_tiled: is_single_tile, has_long_names,
+ has_deep_data, has_multiple_layers,
+ };
+
+ Ok(version)
+ }
+
+ /// Without validation, write this instance to the byte stream.
+ pub fn write<W: Write>(self, write: &mut W) -> UnitResult {
+ use ::bit_field::BitField;
+
+ // the 8 least significant bits contain the file format version number
+ // and the flags are set to 0
+ let mut version_and_flags = self.file_format_version as u32;
+
+ // the 24 most significant bits are treated as a set of boolean flags
+ version_and_flags.set_bit(9, self.is_single_layer_and_tiled);
+ version_and_flags.set_bit(10, self.has_long_names);
+ version_and_flags.set_bit(11, self.has_deep_data);
+ version_and_flags.set_bit(12, self.has_multiple_layers);
+ // all remaining bits except 9, 10, 11 and 12 are reserved and should be 0
+
+ version_and_flags.write(write)?;
+ Ok(())
+ }
+
+ /// Validate this instance.
+ pub fn validate(&self) -> UnitResult {
+ if self.file_format_version == 2 {
+
+ match (
+ self.is_single_layer_and_tiled, self.has_deep_data, self.has_multiple_layers,
+ self.file_format_version
+ ) {
+ // Single-part scan line. One normal scan line image.
+ (false, false, false, 1..=2) => Ok(()),
+
+ // Single-part tile. One normal tiled image.
+ (true, false, false, 1..=2) => Ok(()),
+
+ // Multi-part (new in 2.0).
+ // Multiple normal images (scan line and/or tiled).
+ (false, false, true, 2) => Ok(()),
+
+ // Single-part deep data (new in 2.0).
+ // One deep tile or deep scan line part
+ (false, true, false, 2) => Ok(()),
+
+ // Multi-part deep data (new in 2.0).
+ // Multiple parts (any combination of:
+ // tiles, scan lines, deep tiles and/or deep scan lines).
+ (false, true, true, 2) => Ok(()),
+
+ _ => Err(Error::invalid("file feature flags"))
+ }
+ }
+ else {
+ Err(Error::unsupported("file versions other than 2.0 are not supported"))
+ }
+ }
+}
+
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use crate::meta::header::{ImageAttributes, LayerAttributes};
+
+ #[test]
+ fn round_trip_requirements() {
+ let requirements = Requirements {
+ file_format_version: 2,
+ is_single_layer_and_tiled: true,
+ has_long_names: false,
+ has_deep_data: true,
+ has_multiple_layers: false
+ };
+
+ let mut data: Vec<u8> = Vec::new();
+ requirements.write(&mut data).unwrap();
+ let read = Requirements::read(&mut data.as_slice()).unwrap();
+ assert_eq!(requirements, read);
+ }
+
+ #[test]
+ fn round_trip(){
+ let header = Header {
+ channels: ChannelList::new(smallvec![
+ ChannelDescription {
+ name: Text::from("main"),
+ sample_type: SampleType::U32,
+ quantize_linearly: false,
+ sampling: Vec2(1, 1)
+ }
+ ],
+ ),
+ compression: Compression::Uncompressed,
+ line_order: LineOrder::Increasing,
+ deep_data_version: Some(1),
+ chunk_count: compute_chunk_count(Compression::Uncompressed, Vec2(2000, 333), BlockDescription::ScanLines),
+ max_samples_per_pixel: Some(4),
+ shared_attributes: ImageAttributes {
+ pixel_aspect: 3.0,
+ .. ImageAttributes::new(IntegerBounds {
+ position: Vec2(2,1),
+ size: Vec2(11, 9)
+ })
+ },
+
+ blocks: BlockDescription::ScanLines,
+ deep: false,
+ layer_size: Vec2(2000, 333),
+ own_attributes: LayerAttributes {
+ layer_name: Some(Text::from("test name lol")),
+ layer_position: Vec2(3, -5),
+ screen_window_center: Vec2(0.3, 99.0),
+ screen_window_width: 0.19,
+ .. Default::default()
+ }
+ };
+
+ let meta = MetaData {
+ requirements: Requirements {
+ file_format_version: 2,
+ is_single_layer_and_tiled: false,
+ has_long_names: false,
+ has_deep_data: false,
+ has_multiple_layers: false
+ },
+ headers: smallvec![ header ],
+ };
+
+
+ let mut data: Vec<u8> = Vec::new();
+ MetaData::write_validating_to_buffered(&mut data, meta.headers.as_slice(), true).unwrap();
+ let meta2 = MetaData::read_from_buffered(data.as_slice(), false).unwrap();
+ MetaData::validate(meta2.headers.as_slice(), true).unwrap();
+ assert_eq!(meta, meta2);
+ }
+
+ #[test]
+ fn infer_low_requirements() {
+ let header_version_1_short_names = Header {
+ channels: ChannelList::new(smallvec![
+ ChannelDescription {
+ name: Text::from("main"),
+ sample_type: SampleType::U32,
+ quantize_linearly: false,
+ sampling: Vec2(1, 1)
+ }
+ ],
+ ),
+ compression: Compression::Uncompressed,
+ line_order: LineOrder::Increasing,
+ deep_data_version: Some(1),
+ chunk_count: compute_chunk_count(Compression::Uncompressed, Vec2(2000, 333), BlockDescription::ScanLines),
+ max_samples_per_pixel: Some(4),
+ shared_attributes: ImageAttributes {
+ pixel_aspect: 3.0,
+ .. ImageAttributes::new(IntegerBounds {
+ position: Vec2(2,1),
+ size: Vec2(11, 9)
+ })
+ },
+ blocks: BlockDescription::ScanLines,
+ deep: false,
+ layer_size: Vec2(2000, 333),
+ own_attributes: LayerAttributes {
+ other: vec![
+ (Text::try_from("x").unwrap(), AttributeValue::F32(3.0)),
+ (Text::try_from("y").unwrap(), AttributeValue::F32(-1.0)),
+ ].into_iter().collect(),
+ .. Default::default()
+ }
+ };
+
+ let low_requirements = MetaData::validate(
+ &[header_version_1_short_names], true
+ ).unwrap();
+
+ assert_eq!(low_requirements.has_long_names, false);
+ assert_eq!(low_requirements.file_format_version, 2); // always have version 2
+ assert_eq!(low_requirements.has_deep_data, false);
+ assert_eq!(low_requirements.has_multiple_layers, false);
+ }
+
+ #[test]
+ fn infer_high_requirements() {
+ let header_version_2_long_names = Header {
+ channels: ChannelList::new(
+ smallvec![
+ ChannelDescription {
+ name: Text::new_or_panic("main"),
+ sample_type: SampleType::U32,
+ quantize_linearly: false,
+ sampling: Vec2(1, 1)
+ }
+ ],
+ ),
+ compression: Compression::Uncompressed,
+ line_order: LineOrder::Increasing,
+ deep_data_version: Some(1),
+ chunk_count: compute_chunk_count(Compression::Uncompressed, Vec2(2000, 333), BlockDescription::ScanLines),
+ max_samples_per_pixel: Some(4),
+ shared_attributes: ImageAttributes {
+ pixel_aspect: 3.0,
+ .. ImageAttributes::new(IntegerBounds {
+ position: Vec2(2,1),
+ size: Vec2(11, 9)
+ })
+ },
+ blocks: BlockDescription::ScanLines,
+ deep: false,
+ layer_size: Vec2(2000, 333),
+ own_attributes: LayerAttributes {
+ layer_name: Some(Text::new_or_panic("oasdasoidfj")),
+ other: vec![
+ (Text::new_or_panic("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"), AttributeValue::F32(3.0)),
+ (Text::new_or_panic("y"), AttributeValue::F32(-1.0)),
+ ].into_iter().collect(),
+ .. Default::default()
+ }
+ };
+
+ let mut layer_2 = header_version_2_long_names.clone();
+ layer_2.own_attributes.layer_name = Some(Text::new_or_panic("anythingelse"));
+
+ let low_requirements = MetaData::validate(
+ &[header_version_2_long_names, layer_2], true
+ ).unwrap();
+
+ assert_eq!(low_requirements.has_long_names, true);
+ assert_eq!(low_requirements.file_format_version, 2);
+ assert_eq!(low_requirements.has_deep_data, false);
+ assert_eq!(low_requirements.has_multiple_layers, true);
+ }
+}
+