aboutsummaryrefslogtreecommitdiff
path: root/vendor/exr/src/image
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/exr/src/image')
-rw-r--r--vendor/exr/src/image/channel_groups.rs267
-rw-r--r--vendor/exr/src/image/crop.rs801
-rw-r--r--vendor/exr/src/image/mod.rs1326
-rw-r--r--vendor/exr/src/image/pixel_vec.rs97
-rw-r--r--vendor/exr/src/image/read/any_channels.rs128
-rw-r--r--vendor/exr/src/image/read/image.rs209
-rw-r--r--vendor/exr/src/image/read/layers.rs204
-rw-r--r--vendor/exr/src/image/read/levels.rs219
-rw-r--r--vendor/exr/src/image/read/mod.rs207
-rw-r--r--vendor/exr/src/image/read/samples.rs122
-rw-r--r--vendor/exr/src/image/read/specific_channels.rs463
-rw-r--r--vendor/exr/src/image/recursive.rs178
-rw-r--r--vendor/exr/src/image/write/channels.rs407
-rw-r--r--vendor/exr/src/image/write/layers.rs188
-rw-r--r--vendor/exr/src/image/write/mod.rs184
-rw-r--r--vendor/exr/src/image/write/samples.rs205
16 files changed, 5205 insertions, 0 deletions
diff --git a/vendor/exr/src/image/channel_groups.rs b/vendor/exr/src/image/channel_groups.rs
new file mode 100644
index 0000000..7d74375
--- /dev/null
+++ b/vendor/exr/src/image/channel_groups.rs
@@ -0,0 +1,267 @@
+
+use std::collections::HashMap;
+use crate::image::write::channels::{WritableChannels, ChannelsWriter};
+use crate::meta::attribute::{LevelMode, ChannelList, Text, TextSlice, ChannelInfo};
+use crate::meta::header::Header;
+use crate::image::read::layers::{ReadChannels, ChannelsReader};
+use crate::block::{BlockIndex, UncompressedBlock};
+use crate::block::lines::{collect_uncompressed_block_from_lines, LineIndex};
+use std::io::{Cursor, Read};
+use crate::error::{Result, UnitResult};
+use crate::block::chunk::TileCoordinates;
+use crate::prelude::SmallVec;
+
+
+
+
+
+pub struct ChannelGroups<ChannelGroup> {
+ channel_group: Option<ChannelGroup>,
+ children: HashMap<Text, Self>
+}
+
+
+impl<ChannelGroup> ChannelGroups<ChannelGroup> {
+
+
+ // pub fn visit_groups_mut(&mut self, visitor: impl Fn(&mut Channels)) {
+ // }
+
+
+
+ pub fn groups(&self) -> SmallVec<[&ChannelGroup; 12]> {
+ let children = self.children.iter().flat_map(|group| group.groups());
+ self.channel_group.iter().chain(children).collect()
+ }
+
+ pub fn lookup_group(&self, group_name: &TextSlice) -> Option<&ChannelGroup> {
+ let dot_index = group_name.iter().position('.');
+ if let Some(dot_index) = dot_index {
+ let group_name = &group_name[.. dot_index];
+ let child_name = &group_name[dot_index + 1 ..];
+ self.children.get(group_name)
+ .and_then(|child| child.lookup(child_name))
+ }
+ else {
+ self.channel_group.lookup(name)
+ }
+ }
+
+
+ /*pub fn insert_group(&mut self, full_name: &TextSlice, value: ChannelGroup) {
+ let dot_index = full_name.iter().position('.');
+ if let Some(dot_index) = dot_index {
+ let group_name = &group_name[.. dot_index];
+ let name_rest = &group_name[dot_index + 1 ..];
+
+ self.children.entry(Text::from_slice_unchecked(group_name))
+ .or_insert(|| );
+
+ // self.children.insert(Text::from_slice_unchecked(group_name), value)
+ // .and_then(|child| child.lookup(name_rest));
+ }
+ else {
+ self.channel_group.lookup(name);
+ }
+ }*/
+
+ pub fn map<T>(self, mapper: impl FnMut(ChannelGroup) -> T) -> ChannelGroups<T> {
+ ChannelGroups {
+ children: self.channel_group.iter().map(&mapper).collect(),
+ channel_group: self.channel_group.map(mapper),
+ }
+ }
+}
+
+
+pub fn parse_channel_list_groups<T>(channels: impl Iterator<Item=(Text, T)>)
+ -> ChannelGroups<SmallVec<(Text, T)>>
+{
+ fn insert_into_groups(groups: &mut ChannelGroups<SmallVec<(Text, T)>>, name: Text, value: T) {
+ let dot_index = name.as_slice().iter().position('.');
+
+ if let Some(dot_index) = dot_index {
+ // insert into child group
+
+ let group_name = Text::from_slice_unchecked(&name.as_slice()[.. dot_index]);
+ let child_channel = Text::from_slice_unchecked(&name.as_slice()[dot_index + 1 ..]);
+
+ let child_group = groups.children.entry(group_name)
+ .or_insert(ChannelGroups { channel_group: None, children: Default::default() });
+
+ insert_into_groups(child_group, child_channel, value);
+ }
+
+ else {
+ // insert directly into group
+
+ if groups.channel_group.is_none() {
+ groups.channel_group = Some(SmallVec::new());
+ }
+
+ groups.channel_group.unwrap().push(value);
+ }
+ }
+
+ let mut result = ChannelGroups { channel_group: None, children: HashMap::default() };
+ for (name, value) in channels { insert_into_groups(&mut result, name, value); }
+ result
+}
+
+
+impl<'slf, ChannelGroup> WritableChannels<'slf> for ChannelGroups<ChannelGroup>
+ where ChannelGroup: WritableChannels<'slf>
+{
+ fn infer_channel_list(&self) -> ChannelList {
+ // TODO what about empty groups with NO channels??
+
+ let child_channels = self.children.iter().flat_map(|(group_name, child)| {
+ let mut child_channels = child.infer_channel_list().list;
+ for channel in &mut child_channels { channel.name.push_front(group_name) };
+ child_channels
+ });
+
+ let mut own_channels = self.channel_group
+ .map(|chans| chans.infer_channel_list().list)
+ .unwrap_or_default();
+
+ own_channels.extend(child_channels);
+ own_channels.sort_unstable(); // TODO only once at end
+ ChannelList::new(own_channels) // might be empty, but will be checked in MetaData::validate()
+ }
+
+ fn level_mode(&self) -> LevelMode {
+ fn find_mode_or_none(channels: &Self) -> Option<LevelMode> {
+ channels.channel_group.map(WritableChannels::level_mode).or_else(|| {
+ channels.children.iter().map(find_mode_or_none).next()
+ })
+ }
+
+ let mode = find_mode_or_none(self)
+ .expect("empty channel groups (check failed)"); // TODO only happens for empty channels, right? panic maybe?
+
+ if let Some(chans) = self.channel_group.as_ref() {
+ debug_assert_eq!(chans.level_mode(), mode, "level mode must be equal for all legacy channel groups")
+ }
+
+ debug_assert!(
+ self.children.values()
+ .flat_map(find_mode_or_none)
+ .all(|child_mode| child_mode == mode),
+
+ "level mode must be equal for all legacy channel groups"
+ );
+
+ mode
+ }
+
+ type Writer = GroupChannelsWriter<'slf, ChannelGroup>;
+
+ fn create_writer(&'slf self, header: &Header) -> Self::Writer {
+ let channels = header.channels.list.iter()
+ .map(|channel_info|{
+ // hashmap order is not guaranteed? so look up each channel group manually instead of generating new
+ let channels = self.lookup_group(channel_info.name.as_slice())
+ .expect("channels not found bug");
+
+ channels.create_writer(header) // channel_info.name.clone()
+ })
+ .collect();
+
+ GroupChannelsWriter { channels_list: channels }
+ }
+}
+
+struct GroupChannelsWriter<'c, ChannelGroupWriter> {
+ channels_list: Vec<&'c ChannelGroupWriter>,
+}
+
+impl<'c, Channels> ChannelsWriter for GroupChannelsWriter<'c, Channels> where Channels: ChannelsWriter {
+ fn extract_uncompressed_block(&self, header: &Header, block: BlockIndex) -> Vec<u8> {
+ let mut blocks_per_channel: Vec<Cursor<Vec<u8>>> = self
+ .channels_list.iter()
+ .map(|channels| Cursor::new(channels.extract_uncompressed_block(header, block)))
+ .collect();
+
+ UncompressedBlock::uncompressed_block_from_lines(header, block, |line|{
+ let channel_reader = &mut blocks_per_channel[line.location.channel]; // TODO subsampling
+
+ // read from specific channel into total byte block
+ // this assumes that the lines in the callback are iterated in strictly increasing order
+ // because each channel reader is consumed
+ channel_reader.read_exact(line.value)
+ .expect("collecting grouped channel byte block failed");
+ })
+ }
+}
+
+
+struct ReadChannelGroups<ReadChannelGroup> {
+ read_channels: ReadChannelGroup
+}
+
+struct ChannelGroupsReader<ChannelGroupReader> {
+ channels: ChannelGroups<usize>,
+ indexed_channels: Vec<ChannelGroupReader>,
+}
+
+impl<'s, ReadChannelGroup> ReadChannels<'s> for ReadChannelGroups<ReadChannelGroup>
+ where ReadChannelGroup: ReadChannels<'s>
+{
+ type Reader = ChannelGroupsReader<ReadChannelGroup::Reader>;
+
+ fn create_channels_reader(&'s self, header: &Header) -> Result<Self::Reader> {
+ let swap = |(a,b)| (b,a);
+ let channel_groups = parse_channel_list_groups(
+ header.channels.list.iter().enumerate().map(swap)
+ );
+
+ let mut indexed_channels = Vec::new();
+ let channel_groups = channel_groups.map(|channels| {
+
+ let mut channels_header = header.clone(); // TODO no clone?
+ channels_header.channels = ChannelList::new(channels.iter().map(|(name, index)|{
+ let mut channel_info = header.channels.list[index].clone();
+ channel_info.name = name;
+ channel_info
+ }).collect()); // FIXME does not comply to `header.chunk_count` and that stuff?? change ReadChannels fn signature?
+
+ indexed_channels.push(self.read_channels.create_channels_reader(&channels_header));
+
+ // FIXME this is not the original order indexed_channels.len() - 1
+ indexed_channels[]
+ });
+
+ Ok(ChannelGroupsReader {
+ channels: channel_groups,
+ indexed_channels,
+ })
+
+ /*Ok(ChannelGroupsReader {
+ channels: header.channels.list.iter().map(|channel| {
+ let mut channels_header = header.clone();
+
+ let reader = self.read_channels.create_channels_reader(&channels_header);
+ (channels_header, reader)
+ }).collect(),
+ })*/
+ }
+}
+
+impl<ChannelGroupReader> ChannelsReader for ChannelGroupsReader<ChannelGroupReader> where ChannelGroupReader: ChannelsReader {
+ type Channels = ChannelGroups<ChannelGroupReader::Channels>;
+
+ fn filter_block(&self, tile: (usize, &TileCoordinates)) -> bool {
+ self.indexed_channels.iter().any(|channel| channel.filter_block(tile))
+ }
+
+ fn read_block(&mut self, header: &Header, block: UncompressedBlock) -> UnitResult {
+ block.for_lines(|line|{
+
+ })
+ }
+
+ fn into_channels(self) -> Self::Channels {
+
+ }
+} \ No newline at end of file
diff --git a/vendor/exr/src/image/crop.rs b/vendor/exr/src/image/crop.rs
new file mode 100644
index 0000000..63aadbf
--- /dev/null
+++ b/vendor/exr/src/image/crop.rs
@@ -0,0 +1,801 @@
+//! Crop away unwanted pixels. Includes automatic detection of bounding rectangle.
+//! Currently does not support deep data and resolution levels.
+
+use crate::meta::attribute::{IntegerBounds, LevelMode, ChannelList};
+use crate::math::{Vec2, RoundingMode};
+use crate::image::{Layer, FlatSamples, SpecificChannels, AnyChannels, FlatSamplesPixel, AnyChannel};
+use crate::image::write::channels::{GetPixel, WritableChannels, ChannelsWriter};
+use crate::meta::header::{LayerAttributes, Header};
+use crate::block::BlockIndex;
+
+/// Something that has a two-dimensional rectangular shape
+pub trait GetBounds {
+
+ /// The bounding rectangle of this pixel grid.
+ fn bounds(&self) -> IntegerBounds;
+}
+
+/// Inspect the pixels in this image to determine where to crop some away
+pub trait InspectSample: GetBounds {
+
+ /// The type of pixel in this pixel grid.
+ type Sample;
+
+ /// Index is not in world coordinates, but within the data window.
+ /// Position `(0,0)` always represents the top left pixel.
+ fn inspect_sample(&self, local_index: Vec2<usize>) -> Self::Sample;
+}
+
+/// Crop some pixels ways when specifying a smaller rectangle
+pub trait Crop: Sized {
+
+ /// The type of this image after cropping (probably the same as before)
+ type Cropped;
+
+ /// Crop the image to exclude unwanted pixels.
+ /// Panics for invalid (larger than previously) bounds.
+ /// The bounds are specified in absolute coordinates.
+ /// Does not reduce allocation size of the current image, but instead only adjust a few boundary numbers.
+ /// Use `reallocate_cropped()` on the return value to actually reduce the memory footprint.
+ fn crop(self, bounds: IntegerBounds) -> Self::Cropped;
+
+ /// Reduce your image to a smaller part, usually to save memory.
+ /// Crop if bounds are specified, return the original if no bounds are specified.
+ /// Does not reduce allocation size of the current image, but instead only adjust a few boundary numbers.
+ /// Use `reallocate_cropped()` on the return value to actually reduce the memory footprint.
+ fn try_crop(self, bounds: Option<IntegerBounds>) -> CropResult<Self::Cropped, Self> {
+ match bounds {
+ Some(bounds) => CropResult::Cropped(self.crop(bounds)),
+ None => CropResult::Empty { original: self },
+ }
+ }
+}
+
+/// Cropping an image fails if the image is fully transparent.
+/// Use [`or_crop_to_1x1_if_empty`] or [`or_none_if_empty`] to obtain a normal image again.
+#[must_use]
+#[derive(Debug, Clone, Copy, Eq, PartialEq)]
+pub enum CropResult<Cropped, Old> {
+
+ /// The image contained some pixels and has been cropped or left untouched
+ Cropped (Cropped),
+
+ /// All pixels in the image would be discarded, removing the whole image
+ Empty {
+
+ /// The fully discarded image which caused the cropping to fail
+ original: Old
+ }
+}
+
+/// Crop away unwanted pixels from the border if they match the specified rule.
+pub trait CropWhere<Sample>: Sized {
+
+ /// The type of the cropped image (probably the same as the original image).
+ type Cropped;
+
+ /// Crop away unwanted pixels from the border if they match the specified rule.
+ /// Does not reduce allocation size of the current image, but instead only adjust a few boundary numbers.
+ /// Use `reallocate_cropped()` on the return value to actually reduce the memory footprint.
+ fn crop_where(self, discard_if: impl Fn(Sample) -> bool) -> CropResult<Self::Cropped, Self>;
+
+ /// Crop away unwanted pixels from the border if they match the specified color.
+ /// If you want discard based on a rule, use `crop_where` with a closure instead.
+ /// Does not reduce allocation size of the current image, but instead only adjust a few boundary numbers.
+ /// Use `reallocate_cropped()` on the return value to actually reduce the memory footprint.
+ fn crop_where_eq(self, discard_color: impl Into<Sample>) -> CropResult<Self::Cropped, Self> where Sample: PartialEq;
+
+ /// Convert this data to cropped data without discarding any pixels.
+ fn crop_nowhere(self) -> Self::Cropped;
+}
+
+impl<Channels> Crop for Layer<Channels> {
+ type Cropped = Layer<CroppedChannels<Channels>>;
+
+ fn crop(self, bounds: IntegerBounds) -> Self::Cropped {
+ CroppedChannels::crop_layer(bounds, self)
+ }
+}
+
+impl<T> CropWhere<T::Sample> for T where T: Crop + InspectSample {
+ type Cropped = <Self as Crop>::Cropped;
+
+ fn crop_where(self, discard_if: impl Fn(T::Sample) -> bool) -> CropResult<Self::Cropped, Self> {
+ let smaller_bounds = {
+ let keep_if = |position| !discard_if(self.inspect_sample(position));
+ try_find_smaller_bounds(self.bounds(), keep_if)
+ };
+
+ self.try_crop(smaller_bounds)
+ }
+
+ fn crop_where_eq(self, discard_color: impl Into<T::Sample>) -> CropResult<Self::Cropped, Self> where T::Sample: PartialEq {
+ let discard_color: T::Sample = discard_color.into();
+ self.crop_where(|sample| sample == discard_color)
+ }
+
+ fn crop_nowhere(self) -> Self::Cropped {
+ let current_bounds = self.bounds();
+ self.crop(current_bounds)
+ }
+}
+
+/// A smaller window into an existing pixel storage
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct CroppedChannels<Channels> {
+
+ /// The uncropped pixel storage
+ pub full_channels: Channels,
+
+ /// The uncropped pixel storage bounds
+ pub full_bounds: IntegerBounds,
+
+ /// The cropped pixel storage bounds
+ pub cropped_bounds: IntegerBounds,
+}
+
+impl<Channels> CroppedChannels<Channels> {
+
+ /// Wrap a layer in a cropped view with adjusted bounds, but without reallocating your pixels
+ pub fn crop_layer(new_bounds: IntegerBounds, layer: Layer<Channels>) -> Layer<CroppedChannels<Channels>> {
+ Layer {
+ channel_data: CroppedChannels {
+ cropped_bounds: new_bounds,
+ full_bounds: layer.absolute_bounds(),
+ full_channels: layer.channel_data,
+ },
+
+ size: new_bounds.size,
+
+ attributes: LayerAttributes {
+ layer_position: new_bounds.position,
+ .. layer.attributes
+ },
+
+ encoding: layer.encoding
+ }
+ }
+}
+
+// TODO make cropped view readable if you only need a specific section of the image?
+
+// make cropped view writable:
+
+impl<'slf, Channels:'slf> WritableChannels<'slf> for CroppedChannels<Channels> where Channels: WritableChannels<'slf> {
+ fn infer_channel_list(&self) -> ChannelList {
+ self.full_channels.infer_channel_list() // no need for adjustments, as the layer content already reflects the changes
+ }
+
+ fn infer_level_modes(&self) -> (LevelMode, RoundingMode) {
+ self.full_channels.infer_level_modes()
+ }
+
+ type Writer = CroppedWriter<Channels::Writer>;
+
+ fn create_writer(&'slf self, header: &Header) -> Self::Writer {
+ let offset = (self.cropped_bounds.position - self.full_bounds.position)
+ .to_usize("invalid cropping bounds for cropped view").unwrap();
+
+ CroppedWriter { channels: self.full_channels.create_writer(header), offset }
+ }
+}
+
+/// A writer for the cropped view layer
+#[derive(Debug, Clone, PartialEq)]
+pub struct CroppedWriter<ChannelsWriter> {
+ channels: ChannelsWriter,
+ offset: Vec2<usize>
+}
+
+impl<'c, Channels> ChannelsWriter for CroppedWriter<Channels> where Channels: ChannelsWriter {
+ fn extract_uncompressed_block(&self, header: &Header, block: BlockIndex) -> Vec<u8> {
+ let block = BlockIndex {
+ pixel_position: block.pixel_position + self.offset,
+ .. block
+ };
+
+ self.channels.extract_uncompressed_block(header, block)
+ }
+}
+
+impl<Samples, Channels> InspectSample for Layer<SpecificChannels<Samples, Channels>> where Samples: GetPixel {
+ type Sample = Samples::Pixel;
+ fn inspect_sample(&self, local_index: Vec2<usize>) -> Samples::Pixel {
+ self.channel_data.pixels.get_pixel(local_index)
+ }
+}
+
+impl InspectSample for Layer<AnyChannels<FlatSamples>> {
+ type Sample = FlatSamplesPixel;
+
+ fn inspect_sample(&self, local_index: Vec2<usize>) -> FlatSamplesPixel {
+ self.sample_vec_at(local_index)
+ }
+}
+
+// ALGORITHM IDEA: for arbitrary channels, find the most desired channel,
+// and process that first, keeping the processed bounds as starting point for the other layers
+
+/// Realize a cropped view of the original data,
+/// by actually removing the unwanted original pixels,
+/// reducing the memory consumption.
+/// Currently not supported for `SpecificChannels`.
+pub trait ApplyCroppedView {
+
+ /// The simpler type after cropping is realized
+ type Reallocated;
+
+ /// Make the cropping real by reallocating the underlying storage,
+ /// with the goal of reducing total memory usage.
+ /// Currently not supported for `SpecificChannels`.
+ fn reallocate_cropped(self) -> Self::Reallocated;
+}
+
+impl ApplyCroppedView for Layer<CroppedChannels<AnyChannels<FlatSamples>>> {
+ type Reallocated = Layer<AnyChannels<FlatSamples>>;
+
+ fn reallocate_cropped(self) -> Self::Reallocated {
+ let cropped_absolute_bounds = self.channel_data.cropped_bounds;
+ let cropped_relative_bounds = cropped_absolute_bounds.with_origin(-self.channel_data.full_bounds.position);
+
+ assert!(self.absolute_bounds().contains(cropped_absolute_bounds), "bounds not valid for layer dimensions");
+ assert!(cropped_relative_bounds.size.area() > 0, "the cropped image would be empty");
+
+ Layer {
+ channel_data: if cropped_relative_bounds.size == self.channel_data.full_bounds.size {
+ assert_eq!(cropped_absolute_bounds.position, self.channel_data.full_bounds.position, "crop bounds size equals, but position does not");
+
+ // the cropping would not remove any pixels
+ self.channel_data.full_channels
+ }
+ else {
+ let start_x = cropped_relative_bounds.position.x() as usize; // safe, because just checked above
+ let start_y = cropped_relative_bounds.position.y() as usize; // safe, because just checked above
+ let x_range = start_x .. start_x + cropped_relative_bounds.size.width();
+ let old_width = self.channel_data.full_bounds.size.width();
+ let new_height = cropped_relative_bounds.size.height();
+
+ let channels = self.channel_data.full_channels.list.into_iter().map(|channel: AnyChannel<FlatSamples>| {
+ fn crop_samples<T:Copy>(samples: Vec<T>, old_width: usize, new_height: usize, x_range: std::ops::Range<usize>, y_start: usize) -> Vec<T> {
+ let filtered_lines = samples.chunks_exact(old_width).skip(y_start).take(new_height);
+ let trimmed_lines = filtered_lines.map(|line| &line[x_range.clone()]);
+ trimmed_lines.flatten().map(|x|*x).collect() // TODO does this use memcpy?
+ }
+
+ let samples = match channel.sample_data {
+ FlatSamples::F16(samples) => FlatSamples::F16(crop_samples(
+ samples, old_width, new_height, x_range.clone(), start_y
+ )),
+
+ FlatSamples::F32(samples) => FlatSamples::F32(crop_samples(
+ samples, old_width, new_height, x_range.clone(), start_y
+ )),
+
+ FlatSamples::U32(samples) => FlatSamples::U32(crop_samples(
+ samples, old_width, new_height, x_range.clone(), start_y
+ )),
+ };
+
+ AnyChannel { sample_data: samples, ..channel }
+ }).collect();
+
+ AnyChannels { list: channels }
+ },
+
+ attributes: self.attributes,
+ encoding: self.encoding,
+ size: self.size,
+ }
+ }
+}
+
+
+
+/// Return the smallest bounding rectangle including all pixels that satisfy the predicate.
+/// Worst case: Fully transparent image, visits each pixel once.
+/// Best case: Fully opaque image, visits two pixels.
+/// Returns `None` if the image is fully transparent.
+/// Returns `[(0,0), size]` if the image is fully opaque.
+/// Designed to be cache-friendly linear search. Optimized for row-major image vectors.
+pub fn try_find_smaller_bounds(current_bounds: IntegerBounds, pixel_at: impl Fn(Vec2<usize>) -> bool) -> Option<IntegerBounds> {
+ assert_ne!(current_bounds.size.area(), 0, "cannot find smaller bounds of an image with zero width or height");
+ let Vec2(width, height) = current_bounds.size;
+
+ // scans top to bottom (left to right)
+ let first_top_left_pixel = (0 .. height)
+ .flat_map(|y| (0 .. width).map(move |x| Vec2(x,y)))
+ .find(|&position| pixel_at(position))?; // return none if no pixel should be kept
+
+ // scans bottom to top (right to left)
+ let first_bottom_right_pixel = (first_top_left_pixel.y() + 1 .. height) // excluding the top line
+ .flat_map(|y| (0 .. width).map(move |x| Vec2(x, y))) // x search cannot start at first_top.x, because this must catch all bottom pixels
+ .rev().find(|&position| pixel_at(position))
+ .unwrap_or(first_top_left_pixel); // did not find any at bottom, but we know top has some pixel
+
+ // now we know exactly how much we can throw away top and bottom,
+ // but we don't know exactly about left or right
+ let top = first_top_left_pixel.y();
+ let bottom = first_bottom_right_pixel.y();
+
+ // we only now some arbitrary left and right bounds which we need to refine.
+ // because the actual image contents might be wider than the corner points.
+ // we know that we do not need to look in the center between min x and max x,
+ // as these must be included in any case.
+ let mut min_left_x = first_top_left_pixel.x().min(first_bottom_right_pixel.x());
+ let mut max_right_x = first_bottom_right_pixel.x().max(first_top_left_pixel.x());
+
+ // requires for loop, because bounds change while searching
+ for y in top ..= bottom {
+
+ // escape the loop if there is nothing left to crop
+ if min_left_x == 0 && max_right_x == width - 1 { break; }
+
+ // search from right image edge towards image center, until known max x, for existing pixels,
+ // possibly including some pixels that would have been cropped otherwise
+ if max_right_x != width - 1 {
+ max_right_x = (max_right_x + 1 .. width).rev() // excluding current max
+ .find(|&x| pixel_at(Vec2(x, y)))
+ .unwrap_or(max_right_x);
+ }
+
+ // search from left image edge towards image center, until known min x, for existing pixels,
+ // possibly including some pixels that would have been cropped otherwise
+ if min_left_x != 0 {
+ min_left_x = (0 .. min_left_x) // excluding current min
+ .find(|&x| pixel_at(Vec2(x, y)))
+ .unwrap_or(min_left_x);
+ }
+ }
+
+ // TODO add 1px margin to avoid interpolation issues?
+ let local_start = Vec2(min_left_x, top);
+ let local_end = Vec2(max_right_x + 1, bottom + 1);
+ Some(IntegerBounds::new(
+ current_bounds.position + local_start.to_i32(),
+ local_end - local_start
+ ))
+}
+
+impl<S> GetBounds for Layer<S> {
+ fn bounds(&self) -> IntegerBounds {
+ self.absolute_bounds()
+ }
+}
+
+impl<Cropped, Original> CropResult<Cropped, Original> {
+
+ /// If the image was fully empty, return `None`, otherwise return `Some(cropped_image)`.
+ pub fn or_none_if_empty(self) -> Option<Cropped> {
+ match self {
+ CropResult::Cropped (cropped) => Some(cropped),
+ CropResult::Empty { .. } => None,
+ }
+ }
+
+ /// If the image was fully empty, crop to one single pixel of all the transparent pixels instead,
+ /// leaving the layer intact while reducing memory usage.
+ pub fn or_crop_to_1x1_if_empty(self) -> Cropped where Original: Crop<Cropped=Cropped> + GetBounds {
+ match self {
+ CropResult::Cropped (cropped) => cropped,
+ CropResult::Empty { original } => {
+ let bounds = original.bounds();
+ if bounds.size == Vec2(0,0) { panic!("layer has width and height of zero") }
+ original.crop(IntegerBounds::new(bounds.position, Vec2(1,1)))
+ },
+ }
+ }
+}
+
+
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn find_bounds() {
+ fn find_bounds(offset: Vec2<i32>, lines: &Vec<Vec<i32>>) -> IntegerBounds {
+ if let Some(first_line) = lines.first() {
+ assert!(lines.iter().all(|line| line.len() == first_line.len()), "invalid test input");
+ IntegerBounds::new(offset, (first_line.len(), lines.len()))
+ }
+ else {
+ IntegerBounds::new(offset, (0,0))
+ }
+ }
+
+ fn assert_found_smaller_bounds(offset: Vec2<i32>, uncropped_lines: Vec<Vec<i32>>, expected_cropped_lines: Vec<Vec<i32>>) {
+ let old_bounds = find_bounds(offset, &uncropped_lines);
+
+ let found_bounds = try_find_smaller_bounds(
+ old_bounds,
+ |position| uncropped_lines[position.y()][position.x()] != 0
+ ).unwrap();
+
+ let found_bounds = found_bounds.with_origin(-offset); // make indices local
+
+ let cropped_lines: Vec<Vec<i32>> =
+ uncropped_lines[found_bounds.position.y() as usize .. found_bounds.end().y() as usize]
+ .iter().map(|uncropped_line|{
+ uncropped_line[found_bounds.position.x() as usize .. found_bounds.end().x() as usize].to_vec()
+ }).collect();
+
+ assert_eq!(cropped_lines, expected_cropped_lines);
+ }
+
+ assert_found_smaller_bounds(
+ Vec2(-3,-3),
+
+ vec![
+ vec![ 2, 3, 4 ],
+ vec![ 2, 3, 4 ],
+ ],
+
+ vec![
+ vec![ 2, 3, 4 ],
+ vec![ 2, 3, 4 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(-3,-3),
+
+ vec![
+ vec![ 2 ],
+ ],
+
+ vec![
+ vec![ 2 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(-3,-3),
+
+ vec![
+ vec![ 0 ],
+ vec![ 2 ],
+ vec![ 0 ],
+ vec![ 0 ],
+ ],
+
+ vec![
+ vec![ 2 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(-3,-3),
+
+ vec![
+ vec![ 0, 0, 0, 3, 0 ],
+ ],
+
+ vec![
+ vec![ 3 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(3,3),
+
+ vec![
+ vec![ 0, 1, 1, 2, 1, 0 ],
+ vec![ 0, 1, 3, 1, 1, 0 ],
+ vec![ 0, 1, 1, 1, 1, 0 ],
+ ],
+
+ vec![
+ vec![ 1, 1, 2, 1 ],
+ vec![ 1, 3, 1, 1 ],
+ vec![ 1, 1, 1, 1 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(3,3),
+
+ vec![
+ vec![ 0, 0, 0, 0 ],
+ vec![ 1, 1, 2, 1 ],
+ vec![ 1, 3, 1, 1 ],
+ vec![ 1, 1, 1, 1 ],
+ vec![ 0, 0, 0, 0 ],
+ ],
+
+ vec![
+ vec![ 1, 1, 2, 1 ],
+ vec![ 1, 3, 1, 1 ],
+ vec![ 1, 1, 1, 1 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(3,3),
+
+ vec![
+ vec![ 0, 1, 1, 2, 1, 0 ],
+ vec![ 0, 0, 3, 1, 0, 0 ],
+ vec![ 0, 1, 1, 1, 1, 0 ],
+ ],
+
+ vec![
+ vec![ 1, 1, 2, 1 ],
+ vec![ 0, 3, 1, 0 ],
+ vec![ 1, 1, 1, 1 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(3,3),
+
+ vec![
+ vec![ 0, 0, 1, 2, 0, 0 ],
+ vec![ 0, 1, 3, 1, 1, 0 ],
+ vec![ 0, 0, 1, 1, 0, 0 ],
+ ],
+
+ vec![
+ vec![ 0, 1, 2, 0 ],
+ vec![ 1, 3, 1, 1 ],
+ vec![ 0, 1, 1, 0 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(1,3),
+
+ vec![
+ vec![ 1, 0, 0, 0, ],
+ vec![ 0, 0, 0, 0, ],
+ vec![ 0, 0, 0, 0, ],
+ ],
+
+ vec![
+ vec![ 1 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(1,3),
+
+ vec![
+ vec![ 0, 0, 0, 0, ],
+ vec![ 0, 1, 0, 0, ],
+ vec![ 0, 0, 0, 0, ],
+ ],
+
+ vec![
+ vec![ 1 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(-1,-3),
+
+ vec![
+ vec![ 0, 0, 0, 0, ],
+ vec![ 0, 0, 0, 1, ],
+ vec![ 0, 0, 0, 0, ],
+ ],
+
+ vec![
+ vec![ 1 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(-1,-3),
+
+ vec![
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 1, 1, 1, 0, 0 ],
+ vec![ 0, 0, 1, 1, 1, 0, 0 ],
+ vec![ 0, 0, 1, 1, 1, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ ],
+
+ vec![
+ vec![ 1, 1, 1 ],
+ vec![ 1, 1, 1 ],
+ vec![ 1, 1, 1 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(1000,-300),
+
+ vec![
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 1, 1, 1, 0, 0 ],
+ vec![ 0, 1, 1, 1, 1, 1, 0 ],
+ vec![ 0, 0, 1, 1, 1, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ ],
+
+ vec![
+ vec![ 0, 1, 1, 1, 0 ],
+ vec![ 1, 1, 1, 1, 1 ],
+ vec![ 0, 1, 1, 1, 0 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(-10,-300),
+
+ vec![
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 1, 0, 1, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 1, 0, 1, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ ],
+
+ vec![
+ vec![ 1, 0, 1 ],
+ vec![ 0, 0, 0 ],
+ vec![ 1, 0, 1 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(-10,-300),
+
+ vec![
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 1, 0, 1, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ ],
+
+ vec![
+ vec![ 1, 0, 1 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(-10,-300),
+
+ vec![
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 1, 0, 0, 0 ],
+ vec![ 0, 0, 0, 2, 0, 0, 0 ],
+ vec![ 0, 0, 3, 3, 3, 0, 0 ],
+ vec![ 0, 0, 0, 4, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ ],
+
+ vec![
+ vec![ 0, 1, 0 ],
+ vec![ 0, 2, 0 ],
+ vec![ 3, 3, 3 ],
+ vec![ 0, 4, 0 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(-10,-300),
+
+ vec![
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 1, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 1, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ ],
+
+ vec![
+ vec![ 0, 0, 1 ],
+ vec![ 0, 0, 0 ],
+ vec![ 0, 0, 0 ],
+ vec![ 1, 0, 0 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(-10,-300),
+
+ vec![
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 1, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 1, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ ],
+
+ vec![
+ vec![ 1, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 1 ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(-10,-300),
+
+ vec![
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 1, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ vec![ 0, 0, 1, 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0, 0, 0, 0 ],
+ ],
+
+ vec![
+ vec![ 1 ],
+ vec![ 0 ],
+ vec![ 0 ],
+ vec![ 1 ],
+ ]
+ );
+
+
+ assert_found_smaller_bounds(
+ Vec2(-1,-3),
+
+ vec![
+ vec![ 0, 0, 1, 0, ],
+ vec![ 0, 0, 0, 1, ],
+ vec![ 0, 0, 0, 0, ],
+ ],
+
+ vec![
+ vec![ 1, 0, ],
+ vec![ 0, 1, ],
+ ]
+ );
+
+ assert_found_smaller_bounds(
+ Vec2(-1,-3),
+
+ vec![
+ vec![ 1, 0, 0, 0, ],
+ vec![ 0, 1, 0, 0, ],
+ vec![ 0, 0, 0, 0, ],
+ vec![ 0, 0, 0, 0, ],
+ ],
+
+ vec![
+ vec![ 1, 0, ],
+ vec![ 0, 1, ],
+ ]
+ );
+ }
+
+
+ #[test]
+ fn find_no_bounds() {
+ let pixels = vec![
+ vec![ 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0 ],
+ vec![ 0, 0, 0, 0 ],
+ ];
+
+ let bounds = try_find_smaller_bounds(
+ IntegerBounds::new((0,0), (4,3)),
+ |position| pixels[position.y()][position.x()] != 0
+ );
+
+ assert_eq!(bounds, None)
+ }
+
+}
+
+
+
+
diff --git a/vendor/exr/src/image/mod.rs b/vendor/exr/src/image/mod.rs
new file mode 100644
index 0000000..db75050
--- /dev/null
+++ b/vendor/exr/src/image/mod.rs
@@ -0,0 +1,1326 @@
+
+//! Data structures that represent a complete exr image.
+//! Contains generic structs that must be nested to obtain a complete image type.
+//!
+//!
+//! For example, an rgba image containing multiple layers
+//! can be represented using `Image<Layers<SpecificChannels<MyPixelStorage>>>`.
+//! An image containing a single layer with arbitrary channels and no deep data
+//! can be represented using `Image<Layer<AnyChannels<FlatSamples>>>`.
+//!
+//!
+//! These and other predefined types are included in this module as
+//! 1. `PixelImage`: A single layer, fixed set of arbitrary channels.
+//! 1. `PixelLayersImage`: Multiple layers, fixed set of arbitrary channels.
+//! 1. `RgbaImage`: A single layer, fixed set of channels: rgb, optional a.
+//! 1. `RgbaLayersImage`: Multiple layers, fixed set of channels: rgb, optional a.
+//! 1. `FlatImage`: Multiple layers, any channels, no deep data.
+//! 1. `AnyImage`: All supported data (multiple layers, arbitrary channels, no deep data yet)
+//!
+//! You can also use your own types inside an image,
+//! for example if you want to use a custom sample storage.
+//!
+//! This is the high-level interface for the pixels of an image.
+//! See `exr::blocks` module for a low-level interface.
+
+pub mod read;
+pub mod write;
+pub mod crop;
+pub mod pixel_vec;
+pub mod recursive;
+// pub mod channel_groups;
+
+
+use crate::meta::header::{ImageAttributes, LayerAttributes};
+use crate::meta::attribute::{Text, LineOrder};
+use half::f16;
+use crate::math::{Vec2, RoundingMode};
+use crate::compression::Compression;
+use smallvec::{SmallVec};
+use crate::error::Error;
+
+/// Don't do anything
+pub(crate) fn ignore_progress(_progress: f64){}
+
+/// This image type contains all supported exr features and can represent almost any image.
+/// It currently does not support deep data yet.
+pub type AnyImage = Image<Layers<AnyChannels<Levels<FlatSamples>>>>;
+
+/// This image type contains the most common exr features and can represent almost any plain image.
+/// Does not contain resolution levels. Does not support deep data.
+pub type FlatImage = Image<Layers<AnyChannels<FlatSamples>>>;
+
+/// This image type contains multiple layers, with each layer containing a user-defined type of pixels.
+pub type PixelLayersImage<Storage, Channels> = Image<Layers<SpecificChannels<Storage, Channels>>>;
+
+/// This image type contains a single layer containing a user-defined type of pixels.
+pub type PixelImage<Storage, Channels> = Image<Layer<SpecificChannels<Storage, Channels>>>;
+
+/// This image type contains multiple layers, with each layer containing a user-defined type of rgba pixels.
+pub type RgbaLayersImage<Storage> = PixelLayersImage<Storage, RgbaChannels>;
+
+/// This image type contains a single layer containing a user-defined type of rgba pixels.
+pub type RgbaImage<Storage> = PixelImage<Storage, RgbaChannels>;
+
+/// Contains information about the channels in an rgba image, in the order `(red, green, blue, alpha)`.
+/// The alpha channel is not required. May be `None` if the image did not contain an alpha channel.
+pub type RgbaChannels = (ChannelDescription, ChannelDescription, ChannelDescription, Option<ChannelDescription>);
+
+/// Contains information about the channels in an rgb image, in the order `(red, green, blue)`.
+pub type RgbChannels = (ChannelDescription, ChannelDescription, ChannelDescription);
+
+/// The complete exr image.
+/// `Layers` can be either a single `Layer` or `Layers`.
+#[derive(Debug, Clone, PartialEq)]
+pub struct Image<Layers> {
+
+ /// Attributes that apply to the whole image file.
+ /// These attributes appear in each layer of the file.
+ /// Excludes technical meta data.
+ /// Each layer in this image also has its own attributes.
+ pub attributes: ImageAttributes,
+
+ /// The layers contained in the image file.
+ /// Can be either a single `Layer` or a list of layers.
+ pub layer_data: Layers,
+}
+
+/// A list of layers. `Channels` can be `SpecificChannels` or `AnyChannels`.
+pub type Layers<Channels> = SmallVec<[Layer<Channels>; 2]>;
+
+/// A single Layer, including fancy attributes and compression settings.
+/// `Channels` can be either `SpecificChannels` or `AnyChannels`
+#[derive(Debug, Clone, PartialEq)]
+pub struct Layer<Channels> {
+
+ /// The actual pixel data. Either `SpecificChannels` or `AnyChannels`
+ pub channel_data: Channels,
+
+ /// Attributes that apply to this layer.
+ /// May still contain attributes that should be considered global for an image file.
+ /// Excludes technical meta data: Does not contain data window size, line order, tiling, or compression attributes.
+ /// The image also has attributes, which do not differ per layer.
+ pub attributes: LayerAttributes,
+
+ /// The pixel resolution of this layer.
+ /// See `layer.attributes` for more attributes, like for example layer position.
+ pub size: Vec2<usize>,
+
+ /// How the pixels are split up and compressed.
+ pub encoding: Encoding
+}
+
+/// How the pixels are split up and compressed.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub struct Encoding {
+
+ /// How the pixel data of all channels in this layer is compressed. May be `Compression::Uncompressed`.
+ /// See `layer.attributes` for more attributes.
+ pub compression: Compression,
+
+ /// Describes how the pixels of this layer are divided into smaller blocks.
+ /// Either splits the image into its scan lines or splits the image into tiles of the specified size.
+ /// A single block can be loaded without processing all bytes of a file.
+ pub blocks: Blocks,
+
+ /// In what order the tiles of this header occur in the file.
+ /// Does not change any actual image orientation.
+ /// See `layer.attributes` for more attributes.
+ pub line_order: LineOrder,
+}
+
+/// How the image pixels are split up into separate blocks.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum Blocks {
+
+ /// The image is divided into scan line blocks.
+ /// The number of scan lines in a block depends on the compression method.
+ ScanLines,
+
+ /// The image is divided into tile blocks.
+ /// Also specifies the size of each tile in the image
+ /// and whether this image contains multiple resolution levels.
+ ///
+ /// The inner `Vec2` describes the size of each tile.
+ /// Stays the same number of pixels across all levels.
+ Tiles (Vec2<usize>)
+}
+
+
+/// A grid of pixels. The pixels are written to your custom pixel storage.
+/// `PixelStorage` can be anything, from a flat `Vec<f16>` to `Vec<Vec<AnySample>>`, as desired.
+/// In order to write this image to a file, your `PixelStorage` must implement [`GetPixel`].
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct SpecificChannels<Pixels, ChannelsDescription> {
+
+ /// A description of the channels in the file, as opposed to the channels in memory.
+ /// Should always be a tuple containing `ChannelDescription`s, one description for each channel.
+ pub channels: ChannelsDescription, // TODO this is awkward. can this be not a type parameter please? maybe vec<option<chan_info>> ??
+
+ /// Your custom pixel storage
+ // TODO should also support `Levels<YourStorage>`, where levels are desired!
+ pub pixels: Pixels, // TODO rename to "pixels"?
+}
+
+
+/// A dynamic list of arbitrary channels.
+/// `Samples` can currently only be `FlatSamples` or `Levels<FlatSamples>`.
+#[derive(Debug, Clone, PartialEq)]
+pub struct AnyChannels<Samples> {
+
+ /// This list must be sorted alphabetically, by channel name.
+ /// Use `AnyChannels::sorted` for automatic sorting.
+ pub list: SmallVec<[AnyChannel<Samples>; 4]>
+}
+
+/// A single arbitrary channel.
+/// `Samples` can currently only be `FlatSamples` or `Levels<FlatSamples>`
+#[derive(Debug, Clone, PartialEq)]
+pub struct AnyChannel<Samples> {
+
+ /// One of "R", "G", or "B" most of the time.
+ pub name: Text,
+
+ /// The actual pixel data.
+ /// Can be `FlatSamples` or `Levels<FlatSamples>`.
+ pub sample_data: Samples,
+
+ /// This attribute only tells lossy compression methods
+ /// whether this value should be quantized exponentially or linearly.
+ ///
+ /// Should be `false` for red, green, blue and luma channels, as they are not perceived linearly.
+ /// Should be `true` for hue, chroma, saturation, and alpha channels.
+ pub quantize_linearly: bool,
+
+ /// How many of the samples are skipped compared to the other channels in this layer.
+ ///
+ /// Can be used for chroma subsampling for manual lossy data compression.
+ /// Values other than 1 are allowed only in flat, scan-line based images.
+ /// If an image is deep or tiled, the sampling rates for all of its channels must be 1.
+ pub sampling: Vec2<usize>,
+}
+
+/// One or multiple resolution levels of the same image.
+/// `Samples` can be `FlatSamples`.
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum Levels<Samples> {
+
+ /// A single image without smaller versions of itself.
+ /// If you only want to handle exclusively this case, use `Samples` directly, and not `Levels<Samples>`.
+ Singular(Samples),
+
+ /// Contains uniformly scaled smaller versions of the original.
+ Mip
+ {
+ /// Whether to round up or down when calculating Mip/Rip levels.
+ rounding_mode: RoundingMode,
+
+ /// The smaller versions of the original.
+ level_data: LevelMaps<Samples>
+ },
+
+ /// Contains any possible combination of smaller versions of the original.
+ Rip
+ {
+ /// Whether to round up or down when calculating Mip/Rip levels.
+ rounding_mode: RoundingMode,
+
+ /// The smaller versions of the original.
+ level_data: RipMaps<Samples>
+ },
+}
+
+/// A list of resolution levels. `Samples` can currently only be `FlatSamples`.
+// or `DeepAndFlatSamples` (not yet implemented).
+pub type LevelMaps<Samples> = Vec<Samples>;
+
+/// In addition to the full resolution image,
+/// this layer also contains smaller versions,
+/// and each smaller version has further versions with varying aspect ratios.
+/// `Samples` can currently only be `FlatSamples`.
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct RipMaps<Samples> {
+
+ /// A flattened list containing the individual levels
+ pub map_data: LevelMaps<Samples>,
+
+ /// The number of levels that were generated along the x-axis and y-axis.
+ pub level_count: Vec2<usize>,
+}
+
+
+// TODO deep data
+/*#[derive(Clone, PartialEq)]
+pub enum DeepAndFlatSamples {
+ Deep(DeepSamples),
+ Flat(FlatSamples)
+}*/
+
+/// A vector of non-deep values (one value per pixel per channel).
+/// Stores row after row in a single vector.
+/// The precision of all values is either `f16`, `f32` or `u32`.
+///
+/// Since this is close to the pixel layout in the byte file,
+/// this will most likely be the fastest storage.
+/// Using a different storage, for example `SpecificChannels`,
+/// will probably be slower.
+#[derive(Clone, PartialEq)] // debug is implemented manually
+pub enum FlatSamples {
+
+ /// A vector of non-deep `f16` values.
+ F16(Vec<f16>),
+
+ /// A vector of non-deep `f32` values.
+ F32(Vec<f32>),
+
+ /// A vector of non-deep `u32` values.
+ U32(Vec<u32>),
+}
+
+
+/*#[derive(Clone, PartialEq)]
+pub enum DeepSamples {
+ F16(Vec<Vec<f16>>),
+ F32(Vec<Vec<f32>>),
+ U32(Vec<Vec<u32>>),
+}*/
+
+use crate::block::samples::*;
+use crate::meta::attribute::*;
+use crate::error::Result;
+use crate::block::samples::Sample;
+use crate::image::write::channels::*;
+use crate::image::write::layers::WritableLayers;
+use crate::image::write::samples::{WritableSamples};
+use crate::meta::{mip_map_levels, rip_map_levels};
+use crate::io::Data;
+use crate::image::recursive::{NoneMore, Recursive, IntoRecursive};
+use std::marker::PhantomData;
+use std::ops::Not;
+use crate::image::validate_results::{ValidationOptions};
+
+
+impl<Channels> Layer<Channels> {
+ /// Sometimes called "data window"
+ pub fn absolute_bounds(&self) -> IntegerBounds {
+ IntegerBounds::new(self.attributes.layer_position, self.size)
+ }
+}
+
+
+impl<SampleStorage, Channels> SpecificChannels<SampleStorage, Channels> {
+ /// Create some pixels with channel information.
+ /// The `Channels` must be a tuple containing either `ChannelDescription` or `Option<ChannelDescription>`.
+ /// The length of the tuple dictates the number of channels in the sample storage.
+ pub fn new(channels: Channels, source_samples: SampleStorage) -> Self
+ where
+ SampleStorage: GetPixel,
+ SampleStorage::Pixel: IntoRecursive,
+ Channels: Sync + Clone + IntoRecursive,
+ <Channels as IntoRecursive>::Recursive: WritableChannelsDescription<<SampleStorage::Pixel as IntoRecursive>::Recursive>,
+ {
+ SpecificChannels { channels, pixels: source_samples }
+ }
+}
+
+/// Convert this type into one of the known sample types.
+/// Also specify the preferred native type, which dictates the default sample type in the image.
+pub trait IntoSample: IntoNativeSample {
+
+ /// The native sample types that this type should be converted to.
+ const PREFERRED_SAMPLE_TYPE: SampleType;
+}
+
+impl IntoSample for f16 { const PREFERRED_SAMPLE_TYPE: SampleType = SampleType::F16; }
+impl IntoSample for f32 { const PREFERRED_SAMPLE_TYPE: SampleType = SampleType::F32; }
+impl IntoSample for u32 { const PREFERRED_SAMPLE_TYPE: SampleType = SampleType::U32; }
+
+/// Used to construct a `SpecificChannels`.
+/// Call `with_named_channel` as many times as desired,
+/// and then call `with_pixels` to define the colors.
+#[derive(Debug)]
+pub struct SpecificChannelsBuilder<RecursiveChannels, RecursivePixel> {
+ channels: RecursiveChannels,
+ px: PhantomData<RecursivePixel>
+}
+
+/// This check can be executed at compile time
+/// if the channel names are `&'static str` and the compiler is smart enough.
+pub trait CheckDuplicates {
+
+ /// Check for duplicate channel names.
+ fn already_contains(&self, name: &Text) -> bool;
+}
+
+impl CheckDuplicates for NoneMore {
+ fn already_contains(&self, _: &Text) -> bool { false }
+}
+
+impl<Inner: CheckDuplicates> CheckDuplicates for Recursive<Inner, ChannelDescription> {
+ fn already_contains(&self, name: &Text) -> bool {
+ &self.value.name == name || self.inner.already_contains(name)
+ }
+}
+
+impl SpecificChannels<(),()>
+{
+ /// Start building some specific channels. On the result of this function,
+ /// call `with_named_channel` as many times as desired,
+ /// and then call `with_pixels` to define the colors.
+ pub fn build() -> SpecificChannelsBuilder<NoneMore, NoneMore> {
+ SpecificChannelsBuilder { channels: NoneMore, px: Default::default() }
+ }
+}
+
+impl<RecursiveChannels: CheckDuplicates, RecursivePixel> SpecificChannelsBuilder<RecursiveChannels, RecursivePixel>
+{
+ /// Add another channel to this image. Does not add the actual pixels,
+ /// but instead only declares the presence of the channel.
+ /// Panics if the name contains unsupported characters.
+ /// Panics if a channel with the same name already exists.
+ /// Use `Text::new_or_none()` to manually handle these cases.
+ /// Use `with_channel_details` instead if you want to specify more options than just the name of the channel.
+ /// The generic parameter can usually be inferred from the closure in `with_pixels`.
+ pub fn with_channel<Sample: IntoSample>(self, name: impl Into<Text>)
+ -> SpecificChannelsBuilder<Recursive<RecursiveChannels, ChannelDescription>, Recursive<RecursivePixel, Sample>>
+ {
+ self.with_channel_details::<Sample>(ChannelDescription::named(name, Sample::PREFERRED_SAMPLE_TYPE))
+ }
+
+ /// Add another channel to this image. Does not add the actual pixels,
+ /// but instead only declares the presence of the channel.
+ /// Use `with_channel` instead if you only want to specify the name of the channel.
+ /// Panics if a channel with the same name already exists.
+ /// The generic parameter can usually be inferred from the closure in `with_pixels`.
+ pub fn with_channel_details<Sample: Into<Sample>>(self, channel: ChannelDescription)
+ -> SpecificChannelsBuilder<Recursive<RecursiveChannels, ChannelDescription>, Recursive<RecursivePixel, Sample>>
+ {
+ // duplicate channel names are checked later, but also check now to make sure there are no problems with the `SpecificChannelsWriter`
+ assert!(self.channels.already_contains(&channel.name).not(), "channel name `{}` is duplicate", channel.name);
+
+ SpecificChannelsBuilder {
+ channels: Recursive::new(self.channels, channel),
+ px: PhantomData::default()
+ }
+ }
+
+ /// Specify the actual pixel contents of the image.
+ /// You can pass a closure that returns a color for each pixel (`Fn(Vec2<usize>) -> Pixel`),
+ /// or you can pass your own image if it implements `GetPixel`.
+ /// The pixel type must be a tuple with the correct number of entries, depending on the number of channels.
+ /// The tuple entries can be either `f16`, `f32`, `u32` or `Sample`.
+ /// Use `with_pixel_fn` instead of this function, to get extra type safety for your pixel closure.
+ pub fn with_pixels<Pixels>(self, get_pixel: Pixels) -> SpecificChannels<Pixels, RecursiveChannels>
+ where Pixels: GetPixel, <Pixels as GetPixel>::Pixel: IntoRecursive<Recursive=RecursivePixel>,
+ {
+ SpecificChannels {
+ channels: self.channels,
+ pixels: get_pixel
+ }
+ }
+
+ /// Specify the contents of the image.
+ /// The pixel type must be a tuple with the correct number of entries, depending on the number of channels.
+ /// The tuple entries can be either `f16`, `f32`, `u32` or `Sample`.
+ /// Use `with_pixels` instead of this function, if you want to pass an object that is not a closure.
+ ///
+ /// Usually, the compiler can infer the type of the pixel (for example, `f16,f32,f32`) from the closure.
+ /// If that's not possible, you can specify the type of the channels
+ /// when declaring the channel (for example, `with_named_channel::<f32>("R")`).
+ pub fn with_pixel_fn<Pixel, Pixels>(self, get_pixel: Pixels) -> SpecificChannels<Pixels, RecursiveChannels>
+ where Pixels: Sync + Fn(Vec2<usize>) -> Pixel, Pixel: IntoRecursive<Recursive=RecursivePixel>,
+ {
+ SpecificChannels {
+ channels: self.channels,
+ pixels: get_pixel
+ }
+ }
+}
+
+impl<SampleStorage> SpecificChannels<
+ SampleStorage, (ChannelDescription, ChannelDescription, ChannelDescription, ChannelDescription)
+>
+{
+
+ /// Create an image with red, green, blue, and alpha channels.
+ /// You can pass a closure that returns a color for each pixel (`Fn(Vec2<usize>) -> (R,G,B,A)`),
+ /// or you can pass your own image if it implements `GetPixel<Pixel=(R,G,B,A)>`.
+ /// Each of `R`, `G`, `B` and `A` can be either `f16`, `f32`, `u32`, or `Sample`.
+ pub fn rgba<R, G, B, A>(source_samples: SampleStorage) -> Self
+ where R: IntoSample, G: IntoSample,
+ B: IntoSample, A: IntoSample,
+ SampleStorage: GetPixel<Pixel=(R, G, B, A)>
+ {
+ SpecificChannels {
+ channels: (
+ ChannelDescription::named("R", R::PREFERRED_SAMPLE_TYPE),
+ ChannelDescription::named("G", G::PREFERRED_SAMPLE_TYPE),
+ ChannelDescription::named("B", B::PREFERRED_SAMPLE_TYPE),
+ ChannelDescription::named("A", A::PREFERRED_SAMPLE_TYPE),
+ ),
+ pixels: source_samples
+ }
+ }
+}
+
+impl<SampleStorage> SpecificChannels<
+ SampleStorage, (ChannelDescription, ChannelDescription, ChannelDescription)
+>
+{
+
+ /// Create an image with red, green, and blue channels.
+ /// You can pass a closure that returns a color for each pixel (`Fn(Vec2<usize>) -> (R,G,B)`),
+ /// or you can pass your own image if it implements `GetPixel<Pixel=(R,G,B)>`.
+ /// Each of `R`, `G` and `B` can be either `f16`, `f32`, `u32`, or `Sample`.
+ pub fn rgb<R, G, B>(source_samples: SampleStorage) -> Self
+ where R: IntoSample, G: IntoSample, B: IntoSample,
+ SampleStorage: GetPixel<Pixel=(R, G, B)>
+ {
+ SpecificChannels {
+ channels: (
+ ChannelDescription::named("R", R::PREFERRED_SAMPLE_TYPE),
+ ChannelDescription::named("G", G::PREFERRED_SAMPLE_TYPE),
+ ChannelDescription::named("B", B::PREFERRED_SAMPLE_TYPE),
+ ),
+ pixels: source_samples
+ }
+ }
+}
+
+
+/// A list of samples representing a single pixel.
+/// Does not heap allocate for images with 8 or fewer channels.
+pub type FlatSamplesPixel = SmallVec<[Sample; 8]>;
+
+// TODO also deep samples?
+impl Layer<AnyChannels<FlatSamples>> {
+
+ /// Use `samples_at` if you can borrow from this layer
+ pub fn sample_vec_at(&self, position: Vec2<usize>) -> FlatSamplesPixel {
+ self.samples_at(position).collect()
+ }
+
+ /// Lookup all channels of a single pixel in the image
+ pub fn samples_at(&self, position: Vec2<usize>) -> FlatSampleIterator<'_> {
+ FlatSampleIterator {
+ layer: self,
+ channel_index: 0,
+ position
+ }
+ }
+}
+
+/// Iterate over all channels of a single pixel in the image
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub struct FlatSampleIterator<'s> {
+ layer: &'s Layer<AnyChannels<FlatSamples>>,
+ channel_index: usize,
+ position: Vec2<usize>,
+}
+
+impl Iterator for FlatSampleIterator<'_> {
+ type Item = Sample;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.channel_index < self.layer.channel_data.list.len() {
+ let channel = &self.layer.channel_data.list[self.channel_index];
+ let sample = channel.sample_data.value_by_flat_index(self.position.flat_index_for_size(self.layer.size));
+ self.channel_index += 1;
+ Some(sample)
+ }
+ else { None }
+ }
+
+ fn nth(&mut self, pos: usize) -> Option<Self::Item> {
+ self.channel_index += pos;
+ self.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let remaining = self.layer.channel_data.list.len().saturating_sub(self.channel_index);
+ (remaining, Some(remaining))
+ }
+}
+
+impl ExactSizeIterator for FlatSampleIterator<'_> {}
+
+impl<SampleData> AnyChannels<SampleData>{
+
+ /// A new list of arbitrary channels. Sorts the list to make it alphabetically stable.
+ pub fn sort(mut list: SmallVec<[AnyChannel<SampleData>; 4]>) -> Self {
+ list.sort_unstable_by_key(|channel| channel.name.clone()); // TODO no clone?
+ Self { list }
+ }
+}
+
+// FIXME check content size of layer somewhere??? before writing?
+impl<LevelSamples> Levels<LevelSamples> {
+
+ /// Get a resolution level by index, sorted by size, decreasing.
+ pub fn get_level(&self, level: Vec2<usize>) -> Result<&LevelSamples> {
+ match self {
+ Levels::Singular(block) => {
+ debug_assert_eq!(level, Vec2(0,0), "singular image cannot write leveled blocks bug");
+ Ok(block)
+ },
+
+ Levels::Mip { level_data, .. } => {
+ debug_assert_eq!(level.x(), level.y(), "mip map levels must be equal on x and y bug");
+ level_data.get(level.x()).ok_or(Error::invalid("block mip level index"))
+ },
+
+ Levels::Rip { level_data, .. } => {
+ level_data.get_by_level(level).ok_or(Error::invalid("block rip level index"))
+ }
+ }
+ }
+
+ /// Get a resolution level by index, sorted by size, decreasing.
+ // TODO storage order for RIP maps?
+ pub fn get_level_mut(&mut self, level: Vec2<usize>) -> Result<&mut LevelSamples> {
+ match self {
+ Levels::Singular(ref mut block) => {
+ debug_assert_eq!(level, Vec2(0,0), "singular image cannot write leveled blocks bug");
+ Ok(block)
+ },
+
+ Levels::Mip { level_data, .. } => {
+ debug_assert_eq!(level.x(), level.y(), "mip map levels must be equal on x and y bug");
+ level_data.get_mut(level.x()).ok_or(Error::invalid("block mip level index"))
+ },
+
+ Levels::Rip { level_data, .. } => {
+ level_data.get_by_level_mut(level).ok_or(Error::invalid("block rip level index"))
+ }
+ }
+ }
+
+ /// Get a slice of all resolution levels, sorted by size, decreasing.
+ pub fn levels_as_slice(&self) -> &[LevelSamples] {
+ match self {
+ Levels::Singular(data) => std::slice::from_ref(data),
+ Levels::Mip { level_data, .. } => level_data,
+ Levels::Rip { level_data, .. } => &level_data.map_data,
+ }
+ }
+
+ /// Get a mutable slice of all resolution levels, sorted by size, decreasing.
+ pub fn levels_as_slice_mut(&mut self) -> &mut [LevelSamples] {
+ match self {
+ Levels::Singular(data) => std::slice::from_mut(data),
+ Levels::Mip { level_data, .. } => level_data,
+ Levels::Rip { level_data, .. } => &mut level_data.map_data,
+ }
+ }
+
+ // TODO simplify working with levels in general! like level_size_by_index and such
+
+ /*pub fn levels_with_size(&self, rounding: RoundingMode, max_resolution: Vec2<usize>) -> Vec<(Vec2<usize>, &S)> {
+ match self {
+ Levels::Singular(ref data) => vec![ (max_resolution, data) ],
+ Levels::Mip(ref maps) => mip_map_levels(rounding, max_resolution).map(|(_index, size)| size).zip(maps).collect(),
+ Levels::Rip(ref rip_maps) => rip_map_levels(rounding, max_resolution).map(|(_index, size)| size).zip(&rip_maps.map_data).collect(),
+ }
+ }*/
+
+ /// Whether this stores multiple resolution levels.
+ pub fn level_mode(&self) -> LevelMode {
+ match self {
+ Levels::Singular(_) => LevelMode::Singular,
+ Levels::Mip { .. } => LevelMode::MipMap,
+ Levels::Rip { .. } => LevelMode::RipMap,
+ }
+ }
+}
+
+impl<Samples> RipMaps<Samples> {
+
+ /// Flatten the 2D level index to a one dimensional index.
+ pub fn get_level_index(&self, level: Vec2<usize>) -> usize {
+ level.flat_index_for_size(self.level_count)
+ }
+
+ /// Return a level by level index. Level `0` has the largest resolution.
+ pub fn get_by_level(&self, level: Vec2<usize>) -> Option<&Samples> {
+ self.map_data.get(self.get_level_index(level))
+ }
+
+ /// Return a mutable level reference by level index. Level `0` has the largest resolution.
+ pub fn get_by_level_mut(&mut self, level: Vec2<usize>) -> Option<&mut Samples> {
+ let index = self.get_level_index(level);
+ self.map_data.get_mut(index)
+ }
+}
+
+impl FlatSamples {
+
+ /// The number of samples in the image. Should be the width times the height.
+ /// Might vary when subsampling is used.
+ pub fn len(&self) -> usize {
+ match self {
+ FlatSamples::F16(vec) => vec.len(),
+ FlatSamples::F32(vec) => vec.len(),
+ FlatSamples::U32(vec) => vec.len(),
+ }
+ }
+
+ /// Views all samples in this storage as f32.
+ /// Matches the underlying sample type again for every sample,
+ /// match yourself if performance is critical! Does not allocate.
+ pub fn values_as_f32<'s>(&'s self) -> impl 's + Iterator<Item = f32> {
+ self.values().map(|sample| sample.to_f32())
+ }
+
+ /// All samples in this storage as iterator.
+ /// Matches the underlying sample type again for every sample,
+ /// match yourself if performance is critical! Does not allocate.
+ pub fn values<'s>(&'s self) -> impl 's + Iterator<Item = Sample> {
+ (0..self.len()).map(move |index| self.value_by_flat_index(index))
+ }
+
+ /// Lookup a single value, by flat index.
+ /// The flat index can be obtained using `Vec2::flatten_for_width`
+ /// which computes the index in a flattened array of pixel rows.
+ pub fn value_by_flat_index(&self, index: usize) -> Sample {
+ match self {
+ FlatSamples::F16(vec) => Sample::F16(vec[index]),
+ FlatSamples::F32(vec) => Sample::F32(vec[index]),
+ FlatSamples::U32(vec) => Sample::U32(vec[index]),
+ }
+ }
+}
+
+
+impl<'s, ChannelData:'s> Layer<ChannelData> {
+
+ /// Create a layer with the specified size, attributes, encoding and channels.
+ /// The channels can be either `SpecificChannels` or `AnyChannels`.
+ pub fn new(
+ dimensions: impl Into<Vec2<usize>>,
+ attributes: LayerAttributes,
+ encoding: Encoding,
+ channels: ChannelData
+ ) -> Self
+ where ChannelData: WritableChannels<'s>
+ {
+ Layer { channel_data: channels, attributes, size: dimensions.into(), encoding }
+ }
+
+ // TODO test pls wtf
+ /// Panics for images with Scanline encoding.
+ pub fn levels_with_resolution<'l, L>(&self, levels: &'l Levels<L>) -> Box<dyn 'l + Iterator<Item=(&'l L, Vec2<usize>)>> {
+ match levels {
+ Levels::Singular(level) => Box::new(std::iter::once((level, self.size))),
+
+ Levels::Mip { rounding_mode, level_data } => Box::new(level_data.iter().zip(
+ mip_map_levels(*rounding_mode, self.size)
+ .map(|(_index, size)| size)
+ )),
+
+ Levels::Rip { rounding_mode, level_data } => Box::new(level_data.map_data.iter().zip(
+ rip_map_levels(*rounding_mode, self.size)
+ .map(|(_index, size)| size)
+ )),
+ }
+ }
+}
+
+impl Encoding {
+
+ /// No compression. Massive space requirements.
+ /// Fast, because it minimizes data shuffling and reallocation.
+ pub const UNCOMPRESSED: Encoding = Encoding {
+ compression: Compression::Uncompressed,
+ blocks: Blocks::ScanLines, // longest lines, faster memcpy
+ line_order: LineOrder::Increasing // presumably fastest?
+ };
+
+ /// Run-length encoding with tiles of 64x64 pixels. This is the recommended default encoding.
+ /// Almost as fast as uncompressed data, but optimizes single-colored areas such as mattes and masks.
+ pub const FAST_LOSSLESS: Encoding = Encoding {
+ compression: Compression::RLE,
+ blocks: Blocks::Tiles(Vec2(64, 64)), // optimize for RLE compression
+ line_order: LineOrder::Unspecified
+ };
+
+ /// ZIP compression with blocks of 16 lines. Slow, but produces small files without visible artefacts.
+ pub const SMALL_LOSSLESS: Encoding = Encoding {
+ compression: Compression::ZIP16,
+ blocks: Blocks::ScanLines, // largest possible, but also with high probability of parallel workers
+ line_order: LineOrder::Increasing
+ };
+
+ /// PIZ compression with tiles of 256x256 pixels. Small images, not too slow.
+ pub const SMALL_FAST_LOSSLESS: Encoding = Encoding {
+ compression: Compression::PIZ,
+ blocks: Blocks::Tiles(Vec2(256, 256)),
+ line_order: LineOrder::Unspecified
+ };
+}
+
+impl Default for Encoding {
+ fn default() -> Self { Encoding::FAST_LOSSLESS }
+}
+
+impl<'s, LayerData: 's> Image<LayerData> where LayerData: WritableLayers<'s> {
+ /// Create an image with one or multiple layers. The layer can be a `Layer`, or `Layers` small vector, or `Vec<Layer>` or `&[Layer]`.
+ pub fn new(image_attributes: ImageAttributes, layer_data: LayerData) -> Self {
+ Image { attributes: image_attributes, layer_data }
+ }
+}
+
+// explorable constructor alias
+impl<'s, Channels: 's> Image<Layers<Channels>> where Channels: WritableChannels<'s> {
+ /// Create an image with multiple layers. The layer can be a `Vec<Layer>` or `Layers` (a small vector).
+ pub fn from_layers(image_attributes: ImageAttributes, layer_data: impl Into<Layers<Channels>>) -> Self {
+ Self::new(image_attributes, layer_data.into())
+ }
+}
+
+
+impl<'s, ChannelData:'s> Image<Layer<ChannelData>> where ChannelData: WritableChannels<'s> {
+
+ /// Uses the display position and size to the channel position and size of the layer.
+ pub fn from_layer(layer: Layer<ChannelData>) -> Self {
+ let bounds = IntegerBounds::new(layer.attributes.layer_position, layer.size);
+ Self::new(ImageAttributes::new(bounds), layer)
+ }
+
+ /// Uses empty attributes.
+ pub fn from_encoded_channels(size: impl Into<Vec2<usize>>, encoding: Encoding, channels: ChannelData) -> Self {
+ // layer name is not required for single-layer images
+ Self::from_layer(Layer::new(size, LayerAttributes::default(), encoding, channels))
+ }
+
+ /// Uses empty attributes and fast compression.
+ pub fn from_channels(size: impl Into<Vec2<usize>>, channels: ChannelData) -> Self {
+ Self::from_encoded_channels(size, Encoding::default(), channels)
+ }
+}
+
+
+impl Image<NoneMore> {
+
+ /// Create an empty image, to be filled with layers later on. Add at least one layer to obtain a valid image.
+ /// Call `with_layer(another_layer)` for each layer you want to add to this image.
+ pub fn empty(attributes: ImageAttributes) -> Self { Self { attributes, layer_data: NoneMore } }
+}
+
+impl<'s, InnerLayers: 's> Image<InnerLayers> where
+ InnerLayers: WritableLayers<'s>,
+{
+ /// Add another layer to this image. The layer type does
+ /// not have to equal the existing layers in this image.
+ pub fn with_layer<NewChannels>(self, layer: Layer<NewChannels>)
+ -> Image<Recursive<InnerLayers, Layer<NewChannels>>>
+ where NewChannels: 's + WritableChannels<'s>
+ {
+ Image {
+ attributes: self.attributes,
+ layer_data: Recursive::new(self.layer_data, layer)
+ }
+ }
+}
+
+
+impl<'s, SampleData: 's> AnyChannel<SampleData> {
+
+ /// Create a new channel without subsampling.
+ ///
+ /// Automatically flags this channel for specialized compression
+ /// if the name is "R", "G", "B", "Y", or "L",
+ /// as they typically encode values that are perceived non-linearly.
+ /// Construct the value yourself using `AnyChannel { .. }`, if you want to control this flag.
+ pub fn new(name: impl Into<Text>, sample_data: SampleData) -> Self where SampleData: WritableSamples<'s> {
+ let name: Text = name.into();
+
+ AnyChannel {
+ quantize_linearly: ChannelDescription::guess_quantization_linearity(&name),
+ name, sample_data,
+ sampling: Vec2(1, 1),
+ }
+ }
+
+ /*/// This is the same as `AnyChannel::new()`, but additionally ensures that the closure type is correct.
+ pub fn from_closure<V>(name: Text, sample_data: S) -> Self
+ where S: Sync + Fn(Vec2<usize>) -> V, V: InferSampleType + Data
+ {
+ Self::new(name, sample_data)
+ }*/
+}
+
+impl std::fmt::Debug for FlatSamples {
+ fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ if self.len() <= 6 {
+ match self {
+ FlatSamples::F16(vec) => vec.fmt(formatter),
+ FlatSamples::F32(vec) => vec.fmt(formatter),
+ FlatSamples::U32(vec) => vec.fmt(formatter),
+ }
+ }
+ else {
+ match self {
+ FlatSamples::F16(vec) => write!(formatter, "[f16; {}]", vec.len()),
+ FlatSamples::F32(vec) => write!(formatter, "[f32; {}]", vec.len()),
+ FlatSamples::U32(vec) => write!(formatter, "[u32; {}]", vec.len()),
+ }
+ }
+ }
+}
+
+
+
+/// Compare the result of a round trip test with the original method.
+/// Supports lossy compression methods.
+// #[cfg(test)] TODO do not ship this code
+pub mod validate_results {
+ use crate::prelude::*;
+ use smallvec::Array;
+ use crate::prelude::recursive::*;
+ use crate::image::write::samples::WritableSamples;
+ use std::ops::Not;
+ use crate::block::samples::IntoNativeSample;
+
+
+ /// Compare two objects, but with a few special quirks.
+ /// Intended mainly for unit testing.
+ pub trait ValidateResult {
+
+ /// Compare self with the other. Panics if not equal.
+ ///
+ /// Exceptional behaviour:
+ /// This does not work the other way around! This method is not symmetrical!
+ /// Returns whether the result is correct for this image.
+ /// For lossy compression methods, uses approximate equality.
+ /// Intended for unit testing.
+ ///
+ /// Warning: If you use `SpecificChannels`, the comparison might be inaccurate
+ /// for images with mixed compression methods. This is to be used with `AnyChannels` mainly.
+ fn assert_equals_result(&self, result: &Self) {
+ self.validate_result(result, ValidationOptions::default(), || String::new()).unwrap();
+ }
+
+ /// Compare self with the other.
+ /// Exceptional behaviour:
+ /// - Any two NaN values are considered equal, regardless of bit representation.
+ /// - If a `lossy` is specified, any two values that differ only by a small amount will be considered equal.
+ /// - If `nan_to_zero` is true, and __self is NaN/Infinite and the other value is zero, they are considered equal__
+ /// (because some compression methods replace nan with zero)
+ ///
+ /// This does not work the other way around! This method is not symmetrical!
+ fn validate_result(
+ &self, lossy_result: &Self,
+ options: ValidationOptions,
+ // this is a lazy string, because constructing a string is only necessary in the case of an error,
+ // but eats up memory and allocation time every time. this was measured.
+ context: impl Fn() -> String
+ ) -> ValidationResult;
+ }
+
+ /// Whether to do accurate or approximate comparison.
+ #[derive(Default, Debug, Eq, PartialEq, Hash, Copy, Clone)]
+ pub struct ValidationOptions {
+ allow_lossy: bool,
+ nan_converted_to_zero: bool,
+ }
+
+ /// If invalid, contains the error message.
+ pub type ValidationResult = std::result::Result<(), String>;
+
+
+ impl<C> ValidateResult for Image<C> where C: ValidateResult {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ if self.attributes != other.attributes { Err(location() + "| image > attributes") }
+ else { self.layer_data.validate_result(&other.layer_data, options, || location() + "| image > layer data") }
+ }
+ }
+
+ impl<S> ValidateResult for Layer<AnyChannels<S>>
+ where AnyChannel<S>: ValidateResult, S: for<'a> WritableSamples<'a>
+ {
+ fn validate_result(&self, other: &Self, _overridden: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ let location = || format!("{} (layer `{:?}`)", location(), self.attributes.layer_name);
+ if self.attributes != other.attributes { Err(location() + " > attributes") }
+ else if self.encoding != other.encoding { Err(location() + " > encoding") }
+ else if self.size != other.size { Err(location() + " > size") }
+ else if self.channel_data.list.len() != other.channel_data.list.len() { Err(location() + " > channel count") }
+ else {
+ for (own_chan, other_chan) in self.channel_data.list.iter().zip(other.channel_data.list.iter()) {
+ own_chan.validate_result(
+ other_chan,
+
+ ValidationOptions {
+ // no tolerance for lossless channels
+ allow_lossy: other.encoding.compression
+ .is_lossless_for(other_chan.sample_data.sample_type()).not(),
+
+ // consider nan and zero equal if the compression method does not support nan
+ nan_converted_to_zero: other.encoding.compression.supports_nan().not()
+ },
+
+ || format!("{} > channel `{}`", location(), own_chan.name)
+ )?;
+ }
+ Ok(())
+ }
+ }
+ }
+
+ impl<Px, Desc> ValidateResult for Layer<SpecificChannels<Px, Desc>>
+ where SpecificChannels<Px, Desc>: ValidateResult
+ {
+ /// This does an approximate comparison for all channels,
+ /// even if some channels can be compressed without loss.
+ fn validate_result(&self, other: &Self, _overridden: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ let location = || format!("{} (layer `{:?}`)", location(), self.attributes.layer_name);
+
+ // TODO dedup with above
+ if self.attributes != other.attributes { Err(location() + " > attributes") }
+ else if self.encoding != other.encoding { Err(location() + " > encoding") }
+ else if self.size != other.size { Err(location() + " > size") }
+ else {
+ let options = ValidationOptions {
+ // no tolerance for lossless channels
+ // pxr only looses data for f32 values, B44 only for f16, not other any other types
+ allow_lossy: other.encoding.compression.may_loose_data(),// TODO check specific channels sample types
+
+ // consider nan and zero equal if the compression method does not support nan
+ nan_converted_to_zero: other.encoding.compression.supports_nan().not()
+ };
+
+ self.channel_data.validate_result(&other.channel_data, options, || location() + " > channel_data")?;
+ Ok(())
+ }
+ }
+ }
+
+ impl<S> ValidateResult for AnyChannels<S> where S: ValidateResult {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ self.list.validate_result(&other.list, options, location)
+ }
+ }
+
+ impl<S> ValidateResult for AnyChannel<S> where S: ValidateResult {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ if self.name != other.name { Err(location() + " > name") }
+ else if self.quantize_linearly != other.quantize_linearly { Err(location() + " > quantize_linearly") }
+ else if self.sampling != other.sampling { Err(location() + " > sampling") }
+ else {
+ self.sample_data.validate_result(&other.sample_data, options, || location() + " > sample_data")
+ }
+ }
+ }
+
+ impl<Pxs, Chans> ValidateResult for SpecificChannels<Pxs, Chans> where Pxs: ValidateResult, Chans: Eq {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ if self.channels != other.channels { Err(location() + " > specific channels") }
+ else { self.pixels.validate_result(&other.pixels, options, || location() + " > specific pixels") }
+ }
+ }
+
+ impl<S> ValidateResult for Levels<S> where S: ValidateResult {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ self.levels_as_slice().validate_result(&other.levels_as_slice(), options, || location() + " > levels")
+ }
+ }
+
+ impl ValidateResult for FlatSamples {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ use FlatSamples::*;
+ match (self, other) {
+ (F16(values), F16(other_values)) => values.as_slice().validate_result(&other_values.as_slice(), options, ||location() + " > f16 samples"),
+ (F32(values), F32(other_values)) => values.as_slice().validate_result(&other_values.as_slice(), options, ||location() + " > f32 samples"),
+ (U32(values), U32(other_values)) => values.as_slice().validate_result(&other_values.as_slice(), options, ||location() + " > u32 samples"),
+ (own, other) => Err(format!("{}: samples type mismatch. expected {:?}, found {:?}", location(), own.sample_type(), other.sample_type()))
+ }
+ }
+ }
+
+ impl<T> ValidateResult for &[T] where T: ValidateResult {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ if self.len() != other.len() { Err(location() + " count") }
+ else {
+ for (index, (slf, other)) in self.iter().zip(other.iter()).enumerate() {
+ slf.validate_result(other, options, ||format!("{} element [{}] of {}", location(), index, self.len()))?;
+ }
+ Ok(())
+ }
+ }
+ }
+
+ impl<A: Array> ValidateResult for SmallVec<A> where A::Item: ValidateResult {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ self.as_slice().validate_result(&other.as_slice(), options, location)
+ }
+ }
+
+ impl<A> ValidateResult for Vec<A> where A: ValidateResult {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ self.as_slice().validate_result(&other.as_slice(), options, location)
+ }
+ }
+
+ impl<A,B,C,D> ValidateResult for (A, B, C, D) where A: Clone+ ValidateResult, B: Clone+ ValidateResult, C: Clone+ ValidateResult, D: Clone+ ValidateResult {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ self.clone().into_recursive().validate_result(&other.clone().into_recursive(), options, location)
+ }
+ }
+
+ impl<A,B,C> ValidateResult for (A, B, C) where A: Clone+ ValidateResult, B: Clone+ ValidateResult, C: Clone+ ValidateResult {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ self.clone().into_recursive().validate_result(&other.clone().into_recursive(), options, location)
+ }
+ }
+
+ // // (low priority because it is only used in the tests)
+ /*TODO
+ impl<Tuple> SimilarToLossy for Tuple where
+ Tuple: Clone + IntoRecursive,
+ <Tuple as IntoRecursive>::Recursive: SimilarToLossy,
+ {
+ fn similar_to_lossy(&self, other: &Self, max_difference: f32) -> bool {
+ self.clone().into_recursive().similar_to_lossy(&other.clone().into_recursive(), max_difference)
+ } // TODO no clone?
+ }*/
+
+
+ // implement for recursive types
+ impl ValidateResult for NoneMore {
+ fn validate_result(&self, _: &Self, _: ValidationOptions, _: impl Fn()->String) -> ValidationResult { Ok(()) }
+ }
+
+ impl<Inner, T> ValidateResult for Recursive<Inner, T> where Inner: ValidateResult, T: ValidateResult {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ self.value.validate_result(&other.value, options, &location).and_then(|()|
+ self.inner.validate_result(&other.inner, options, &location)
+ )
+ }
+ }
+
+ impl<S> ValidateResult for Option<S> where S: ValidateResult {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ match (self, other) {
+ (None, None) => Ok(()),
+ (Some(value), Some(other)) => value.validate_result(other, options, location),
+ _ => Err(location() + ": option mismatch")
+ }
+ }
+ }
+
+ impl ValidateResult for f32 {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ if self == other || (self.is_nan() && other.is_nan()) || (options.nan_converted_to_zero && !self.is_normal() && *other == 0.0) {
+ return Ok(());
+ }
+
+ if options.allow_lossy {
+ let epsilon = 0.06;
+ let max_difference = 0.1;
+
+ let adaptive_threshold = epsilon * (self.abs() + other.abs());
+ let tolerance = adaptive_threshold.max(max_difference);
+ let difference = (self - other).abs();
+
+ return if difference <= tolerance { Ok(()) }
+ else { Err(format!("{}: expected ~{}, found {} (adaptive tolerance {})", location(), self, other, tolerance)) };
+ }
+
+ Err(format!("{}: expected exactly {}, found {}", location(), self, other))
+ }
+ }
+
+ impl ValidateResult for f16 {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ if self.to_bits() == other.to_bits() { Ok(()) } else {
+ self.to_f32().validate_result(&other.to_f32(), options, location)
+ }
+ }
+ }
+
+ impl ValidateResult for u32 {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ if self == other { Ok(()) } else { // todo to float conversion resulting in nan/infinity?
+ self.to_f32().validate_result(&other.to_f32(), options, location)
+ }
+ }
+ }
+
+ impl ValidateResult for Sample {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn()->String) -> ValidationResult {
+ use Sample::*;
+ match (self, other) {
+ (F16(a), F16(b)) => a.validate_result(b, options, ||location() + " (f16)"),
+ (F32(a), F32(b)) => a.validate_result(b, options, ||location() + " (f32)"),
+ (U32(a), U32(b)) => a.validate_result(b, options, ||location() + " (u32)"),
+ (_,_) => Err(location() + ": sample type mismatch")
+ }
+ }
+ }
+
+
+ #[cfg(test)]
+ mod test_value_result {
+ use std::f32::consts::*;
+ use std::io::Cursor;
+ use crate::image::pixel_vec::PixelVec;
+ use crate::image::validate_results::{ValidateResult, ValidationOptions};
+ use crate::meta::attribute::LineOrder::Increasing;
+ use crate::image::{FlatSamples};
+
+ fn expect_valid<T>(original: &T, result: &T, allow_lossy: bool, nan_converted_to_zero: bool) where T: ValidateResult {
+ original.validate_result(
+ result,
+ ValidationOptions { allow_lossy, nan_converted_to_zero },
+ || String::new()
+ ).unwrap();
+ }
+
+ fn expect_invalid<T>(original: &T, result: &T, allow_lossy: bool, nan_converted_to_zero: bool) where T: ValidateResult {
+ assert!(original.validate_result(
+ result,
+ ValidationOptions { allow_lossy, nan_converted_to_zero },
+ || String::new()
+ ).is_err());
+ }
+
+ #[test]
+ fn test_f32(){
+ let original:&[f32] = &[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, -20.4, f32::NAN];
+ let lossy:&[f32] = &[0.0, 0.2, 0.2, 0.3, 0.4, 0.5, -20.5, f32::NAN];
+
+ expect_valid(&original, &original, true, true);
+ expect_valid(&original, &original, true, false);
+ expect_valid(&original, &original, false, true);
+ expect_valid(&original, &original, false, false);
+
+ expect_invalid(&original, &lossy, false, false);
+ expect_valid(&original, &lossy, true, false);
+
+ expect_invalid(&original, &&original[..original.len()-2], true, true);
+
+ // test relative comparison with some large values
+ expect_valid(&1_000_f32, &1_001_f32, true, false);
+ expect_invalid(&1_000_f32, &1_200_f32, true, false);
+
+ expect_valid(&10_000_f32, &10_100_f32, true, false);
+ expect_invalid(&10_000_f32, &12_000_f32, true, false);
+
+ expect_valid(&33_120_f32, &30_120_f32, true, false);
+ expect_invalid(&33_120_f32, &20_120_f32, true, false);
+ }
+
+ #[test]
+ fn test_nan(){
+ let original:&[f32] = &[ 0.0, f32::NAN, f32::NAN ];
+ let lossy:&[f32] = &[ 0.0, f32::NAN, 0.0 ];
+
+ expect_valid(&original, &lossy, true, true);
+ expect_invalid(&lossy, &original, true, true);
+
+ expect_valid(&lossy, &lossy, true, true);
+ expect_valid(&lossy, &lossy, false, true);
+ }
+
+ #[test]
+ fn test_error(){
+
+ fn print_error<T: ValidateResult>(original: &T, lossy: &T, allow_lossy: bool){
+ let message = original
+ .validate_result(
+ &lossy,
+ ValidationOptions { allow_lossy, .. Default::default() },
+ || String::new() // type_name::<T>().to_string()
+ )
+ .unwrap_err();
+
+ println!("message: {}", message);
+ }
+
+ let original:&[f32] = &[ 0.0, f32::NAN, f32::NAN ];
+ let lossy:&[f32] = &[ 0.0, f32::NAN, 0.0 ];
+ print_error(&original, &lossy, false);
+
+ print_error(&2.0, &1.0, true);
+ print_error(&2.0, &1.0, false);
+
+ print_error(&FlatSamples::F32(vec![0.1,0.1]), &FlatSamples::F32(vec![0.1,0.2]), false);
+ print_error(&FlatSamples::U32(vec![0,0]), &FlatSamples::F32(vec![0.1,0.2]), false);
+
+ {
+ let image = crate::prelude::read_all_data_from_file("tests/images/valid/openexr/MultiResolution/Kapaa.exr").unwrap();
+
+ let mut mutated = image.clone();
+ let samples = mutated.layer_data.first_mut().unwrap()
+ .channel_data.list.first_mut().unwrap().sample_data.levels_as_slice_mut().first_mut().unwrap();
+
+ match samples {
+ FlatSamples::F16(vals) => vals[100] = vals[1],
+ FlatSamples::F32(vals) => vals[100] = vals[1],
+ FlatSamples::U32(vals) => vals[100] = vals[1],
+ }
+
+ print_error(&image, &mutated, false);
+ }
+
+ // TODO check out more nested behaviour!
+ }
+
+ #[test]
+ fn test_uncompressed(){
+ use crate::prelude::*;
+
+ let original_pixels: [(f32,f32,f32); 4] = [
+ (0.0, -1.1, PI),
+ (0.0, -1.1, TAU),
+ (0.0, -1.1, f32::EPSILON),
+ (f32::NAN, 10000.1, -1024.009),
+ ];
+
+ let mut file_bytes = Vec::new();
+ let original_image = Image::from_encoded_channels(
+ (2,2),
+ Encoding {
+ compression: Compression::Uncompressed,
+ line_order: Increasing, // FIXME unspecified may be optimized to increasing, which destroys test eq
+ .. Encoding::default()
+ },
+ SpecificChannels::rgb(PixelVec::new(Vec2(2,2), original_pixels.to_vec()))
+ );
+
+ original_image.write().to_buffered(Cursor::new(&mut file_bytes)).unwrap();
+
+ let lossy_image = read().no_deep_data().largest_resolution_level()
+ .rgb_channels(PixelVec::<(f32,f32,f32)>::constructor, PixelVec::set_pixel)
+ .first_valid_layer().all_attributes().from_buffered(Cursor::new(&file_bytes)).unwrap();
+
+ original_image.assert_equals_result(&original_image);
+ lossy_image.assert_equals_result(&lossy_image);
+ original_image.assert_equals_result(&lossy_image);
+ lossy_image.assert_equals_result(&original_image);
+ }
+
+ #[test]
+ fn test_compiles(){
+ use crate::prelude::*;
+
+ fn accepts_validatable_value(_: &impl ValidateResult){}
+
+ let object: Levels<FlatSamples> = Levels::Singular(FlatSamples::F32(Vec::default()));
+ accepts_validatable_value(&object);
+
+ let object: AnyChannels<Levels<FlatSamples>> = AnyChannels::sort(SmallVec::default());
+ accepts_validatable_value(&object);
+
+ let layer: Layer<AnyChannels<Levels<FlatSamples>>> = Layer::new((0,0), Default::default(), Default::default(), object);
+ accepts_validatable_value(&layer);
+
+ let layers: Layers<AnyChannels<Levels<FlatSamples>>> = Default::default();
+ accepts_validatable_value(&layers);
+
+ let object: Image<Layer<AnyChannels<Levels<FlatSamples>>>> = Image::from_layer(layer);
+ object.assert_equals_result(&object);
+ }
+ }
+}
+
+
diff --git a/vendor/exr/src/image/pixel_vec.rs b/vendor/exr/src/image/pixel_vec.rs
new file mode 100644
index 0000000..3447bf2
--- /dev/null
+++ b/vendor/exr/src/image/pixel_vec.rs
@@ -0,0 +1,97 @@
+
+//! Provides a predefined pixel storage.
+//! Currently only contains a simple flattened vector storage.
+//! Use the functions `create_pixel_vec::<YourPixelTuple>` and
+//! `set_pixel_in_vec::<YourPixelTuple>` for reading a predefined pixel vector.
+//! Use the function `PixelVec::new` to create a pixel vector which can be written to a file.
+
+use super::*;
+
+/// Store all samples in a single array.
+/// All samples will be converted to the type `T`.
+/// This supports all the sample types, `f16`, `f32`, and `u32`.
+///
+/// The flattened vector contains all rows one after another.
+/// In each row, for each pixel, its red, green, blue, and then alpha
+/// samples are stored one after another.
+///
+/// Use `PixelVec.compute_pixel_index(position)`
+/// to compute the flat index of a specific pixel.
+#[derive(Eq, PartialEq, Clone)]
+pub struct PixelVec<T> {
+
+ /// The resolution of this layer.
+ pub resolution: Vec2<usize>,
+
+ /// The flattened vector contains all rows one after another.
+ /// In each row, for each pixel, its red, green, blue, and then alpha
+ /// samples are stored one after another.
+ ///
+ /// Use `Flattened::compute_pixel_index(image, position)`
+ /// to compute the flat index of a specific pixel.
+ pub pixels: Vec<T>,
+}
+
+impl<Pixel> PixelVec<Pixel> {
+
+ /// Create a new flattened pixel storage, filled with default pixels.
+ /// Accepts a `Channels` parameter, which is not used, so that it can be passed as a function pointer instead of calling it.
+ pub fn constructor<Channels>(resolution: Vec2<usize>, _: &Channels) -> Self where Pixel: Default + Clone {
+ PixelVec { resolution, pixels: vec![Pixel::default(); resolution.area()] }
+ }
+
+ /// Examine a pixel of a `PixelVec<T>` image.
+ /// Can usually be used as a function reference instead of calling it directly.
+ #[inline]
+ pub fn get_pixel(&self, position: Vec2<usize>) -> &Pixel where Pixel: Sync {
+ &self.pixels[self.compute_pixel_index(position)]
+ }
+
+ /// Update a pixel of a `PixelVec<T>` image.
+ /// Can usually be used as a function reference instead of calling it directly.
+ #[inline]
+ pub fn set_pixel(&mut self, position: Vec2<usize>, pixel: Pixel) {
+ let index = self.compute_pixel_index(position);
+ self.pixels[index] = pixel;
+ }
+
+ /// Create a new flattened pixel storage, checking the length of the provided pixels vector.
+ pub fn new(resolution: impl Into<Vec2<usize>>, pixels: Vec<Pixel>) -> Self {
+ let size = resolution.into();
+ assert_eq!(size.area(), pixels.len(), "expected {} samples, but vector length is {}", size.area(), pixels.len());
+ Self { resolution: size, pixels }
+ }
+
+ /// Compute the flat index of a specific pixel. Returns a range of either 3 or 4 samples.
+ /// The computed index can be used with `PixelVec.samples[index]`.
+ /// Panics for invalid sample coordinates.
+ #[inline]
+ pub fn compute_pixel_index(&self, position: Vec2<usize>) -> usize {
+ position.flat_index_for_size(self.resolution)
+ }
+}
+
+use crate::image::validate_results::{ValidateResult, ValidationResult};
+
+impl<Px> ValidateResult for PixelVec<Px> where Px: ValidateResult {
+ fn validate_result(&self, other: &Self, options: ValidationOptions, location: impl Fn() -> String) -> ValidationResult {
+ if self.resolution != other.resolution { Err(location() + " > resolution") }
+ else { self.pixels.as_slice().validate_result(&other.pixels.as_slice(), options, || location() + " > pixels") }
+ }
+}
+
+impl<Px> GetPixel for PixelVec<Px> where Px: Clone + Sync {
+ type Pixel = Px;
+ fn get_pixel(&self, position: Vec2<usize>) -> Self::Pixel {
+ self.get_pixel(position).clone()
+ }
+}
+
+use std::fmt::*;
+
+impl<T> Debug for PixelVec<T> {
+ #[inline] fn fmt(&self, formatter: &mut Formatter<'_>) -> std::fmt::Result {
+ write!(formatter, "[{}; {}]", std::any::type_name::<T>(), self.pixels.len())
+ }
+}
+
diff --git a/vendor/exr/src/image/read/any_channels.rs b/vendor/exr/src/image/read/any_channels.rs
new file mode 100644
index 0000000..054a7c3
--- /dev/null
+++ b/vendor/exr/src/image/read/any_channels.rs
@@ -0,0 +1,128 @@
+//! How to read arbitrary channels.
+
+use crate::image::*;
+use crate::meta::header::{Header};
+use crate::error::{Result, UnitResult};
+use crate::block::UncompressedBlock;
+use crate::block::lines::{LineRef};
+use crate::math::Vec2;
+use crate::meta::attribute::{Text, ChannelDescription};
+use crate::image::read::layers::{ReadChannels, ChannelsReader};
+use crate::block::chunk::TileCoordinates;
+
+/// A template that creates an [AnyChannelsReader] for each layer in the image.
+/// This loads all channels for each layer.
+/// The `ReadSamples` can, for example, be [ReadFlatSamples] or [ReadAllLevels<ReadFlatSamples>].
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct ReadAnyChannels<ReadSamples> {
+
+ /// The sample reading specification
+ pub read_samples: ReadSamples
+}
+
+/// A template that creates a new [`SampleReader`] for each channel in each layer.
+pub trait ReadSamples {
+
+ /// The type of the temporary samples reader
+ type Reader: SamplesReader;
+
+ /// Create a single reader for a single channel of a layer
+ fn create_sample_reader(&self, header: &Header, channel: &ChannelDescription) -> Result<Self::Reader>;
+}
+
+/// Processes pixel blocks from a file and accumulates them into a collection of arbitrary channels.
+/// Loads all channels for each layer.
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct AnyChannelsReader<SamplesReader> {
+
+ /// Stores a separate sample reader per channel in the layer
+ sample_channels_reader: SmallVec<[AnyChannelReader<SamplesReader>; 4]>,
+}
+
+/// Processes pixel blocks from a file and accumulates them into a single arbitrary channel.
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct AnyChannelReader<SamplesReader> {
+
+ /// The custom reader that accumulates the pixel data for a single channel
+ samples: SamplesReader,
+
+ /// Temporarily accumulated meta data.
+ name: Text,
+
+ /// Temporarily accumulated meta data.
+ sampling_rate: Vec2<usize>,
+
+ /// Temporarily accumulated meta data.
+ quantize_linearly: bool,
+}
+
+/// Processes pixel blocks from a file and accumulates them into a single pixel channel.
+/// For example, stores thousands of "Red" pixel values for a single layer.
+pub trait SamplesReader {
+
+ /// The type of resulting sample storage
+ type Samples;
+
+ /// Specify whether a single block of pixels should be loaded from the file
+ fn filter_block(&self, tile: TileCoordinates) -> bool;
+
+ /// Load a single pixel line, which has not been filtered, into the reader, accumulating the sample data
+ fn read_line(&mut self, line: LineRef<'_>) -> UnitResult;
+
+ /// Deliver the final accumulated sample storage for the image
+ fn into_samples(self) -> Self::Samples;
+}
+
+
+impl<'s, S: 's + ReadSamples> ReadChannels<'s> for ReadAnyChannels<S> {
+ type Reader = AnyChannelsReader<S::Reader>;
+
+ fn create_channels_reader(&self, header: &Header) -> Result<Self::Reader> {
+ let samples: Result<_> = header.channels.list.iter()
+ .map(|channel: &ChannelDescription| Ok(AnyChannelReader {
+ samples: self.read_samples.create_sample_reader(header, channel)?,
+ name: channel.name.clone(),
+ sampling_rate: channel.sampling,
+ quantize_linearly: channel.quantize_linearly
+ }))
+ .collect();
+
+ Ok(AnyChannelsReader { sample_channels_reader: samples? })
+ }
+}
+
+impl<S: SamplesReader> ChannelsReader for AnyChannelsReader<S> {
+ type Channels = AnyChannels<S::Samples>;
+
+ fn filter_block(&self, tile: TileCoordinates) -> bool {
+ self.sample_channels_reader.iter().any(|channel| channel.samples.filter_block(tile))
+ }
+
+ fn read_block(&mut self, header: &Header, decompressed: UncompressedBlock) -> UnitResult {
+ /*for (bytes, line) in LineIndex::lines_in_block(decompressed.index, header) {
+ let channel = self.sample_channels_reader.get_mut(line.channel).unwrap();
+ channel.samples.read_line(LineSlice { location: line, value: &decompressed.data[bytes] })?;
+ }
+
+ Ok(())*/
+ for line in decompressed.lines(&header.channels) {
+ self.sample_channels_reader[line.location.channel].samples.read_line(line)?;
+ }
+
+ Ok(())
+ }
+
+ fn into_channels(self) -> Self::Channels {
+ AnyChannels { // not using `new()` as the channels are already sorted
+ list: self.sample_channels_reader.into_iter()
+ .map(|channel| AnyChannel {
+ sample_data: channel.samples.into_samples(),
+
+ name: channel.name,
+ quantize_linearly: channel.quantize_linearly,
+ sampling: channel.sampling_rate
+ })
+ .collect()
+ }
+ }
+}
diff --git a/vendor/exr/src/image/read/image.rs b/vendor/exr/src/image/read/image.rs
new file mode 100644
index 0000000..fce2f52
--- /dev/null
+++ b/vendor/exr/src/image/read/image.rs
@@ -0,0 +1,209 @@
+//! The last wrapper of image readers, finally containing the [`from_file(path)`] method.
+//! This completes the builder and reads a complete image.
+
+use crate::image::*;
+use crate::meta::header::{Header, ImageAttributes};
+use crate::error::{Result, UnitResult};
+use crate::block::{UncompressedBlock, BlockIndex};
+use crate::block::chunk::TileCoordinates;
+use std::path::Path;
+use std::io::{Read, BufReader};
+use std::io::Seek;
+use crate::meta::MetaData;
+use crate::block::reader::ChunksReader;
+
+/// Specify whether to read the image in parallel,
+/// whether to use pedantic error handling,
+/// and a callback for the reading progress.
+#[derive(Debug, Clone)]
+pub struct ReadImage<OnProgress, ReadLayers> {
+ on_progress: OnProgress,
+ read_layers: ReadLayers,
+ pedantic: bool,
+ parallel: bool,
+}
+
+impl<F, L> ReadImage<F, L> where F: FnMut(f64)
+{
+ /// Uses relaxed error handling and parallel decompression.
+ pub fn new(read_layers: L, on_progress: F) -> Self {
+ Self {
+ on_progress, read_layers,
+ pedantic: false, parallel: true,
+ }
+ }
+
+ /// Specify that any missing or unusual information should result in an error.
+ /// Otherwise, `exrs` will try to compute or ignore missing information.
+ ///
+ /// If pedantic is true, then an error will be returned as soon as anything is missing in the file,
+ /// or two values in the image contradict each other. If pedantic is false,
+ /// then only fatal errors will be thrown. By default, reading an image is not pedantic,
+ /// which means that slightly invalid files might still be readable.
+ /// For example, if some attribute is missing but can be recomputed, this flag decides whether an error is thrown.
+ /// Or if the pedantic flag is true and there are still bytes left after the decompression algorithm finished,
+ /// an error is thrown, because this should not happen and something might be wrong with the file.
+ /// Or if your application is a target of attacks, or if you want to emulate the original C++ library,
+ /// you might want to switch to pedantic reading.
+ pub fn pedantic(self) -> Self { Self { pedantic: true, ..self } }
+
+ /// Specify that multiple pixel blocks should never be decompressed using multiple threads at once.
+ /// This might be slower but uses less memory and less synchronization.
+ pub fn non_parallel(self) -> Self { Self { parallel: false, ..self } }
+
+ /// Specify a function to be called regularly throughout the loading process.
+ /// Replaces all previously specified progress functions in this reader.
+ pub fn on_progress<OnProgress>(self, on_progress: OnProgress) -> ReadImage<OnProgress, L>
+ where OnProgress: FnMut(f64)
+ {
+ ReadImage {
+ on_progress,
+ read_layers: self.read_layers,
+ pedantic: self.pedantic,
+ parallel: self.parallel
+ }
+ }
+
+
+ /// Read the exr image from a file.
+ /// Use [`ReadImage::read_from_unbuffered`] instead, if you do not have a file.
+ #[inline]
+ #[must_use]
+ pub fn from_file<Layers>(self, path: impl AsRef<Path>) -> Result<Image<Layers>>
+ where for<'s> L: ReadLayers<'s, Layers = Layers>
+ {
+ self.from_unbuffered(std::fs::File::open(path)?)
+ }
+
+ /// Buffer the reader and then read the exr image from it.
+ /// Use [`ReadImage::read_from_buffered`] instead, if your reader is an in-memory reader.
+ /// Use [`ReadImage::read_from_file`] instead, if you have a file path.
+ #[inline]
+ #[must_use]
+ pub fn from_unbuffered<Layers>(self, unbuffered: impl Read + Seek) -> Result<Image<Layers>>
+ where for<'s> L: ReadLayers<'s, Layers = Layers>
+ {
+ self.from_buffered(BufReader::new(unbuffered))
+ }
+
+ /// Read the exr image from a buffered reader.
+ /// Use [`ReadImage::read_from_file`] instead, if you have a file path.
+ /// Use [`ReadImage::read_from_unbuffered`] instead, if this is not an in-memory reader.
+ // TODO Use Parallel<> Wrapper to only require sendable byte source where parallel decompression is required
+ #[must_use]
+ pub fn from_buffered<Layers>(self, buffered: impl Read + Seek) -> Result<Image<Layers>>
+ where for<'s> L: ReadLayers<'s, Layers = Layers>
+ {
+ let chunks = crate::block::read(buffered, self.pedantic)?;
+ self.from_chunks(chunks)
+ }
+
+ /// Read the exr image from an initialized chunks reader
+ /// that has already extracted the meta data from the file.
+ /// Use [`ReadImage::read_from_file`] instead, if you have a file path.
+ /// Use [`ReadImage::read_from_buffered`] instead, if this is an in-memory reader.
+ // TODO Use Parallel<> Wrapper to only require sendable byte source where parallel decompression is required
+ #[must_use]
+ pub fn from_chunks<Layers>(mut self, chunks_reader: crate::block::reader::Reader<impl Read + Seek>) -> Result<Image<Layers>>
+ where for<'s> L: ReadLayers<'s, Layers = Layers>
+ {
+ let Self { pedantic, parallel, ref mut on_progress, ref mut read_layers } = self;
+
+ let layers_reader = read_layers.create_layers_reader(chunks_reader.headers())?;
+ let mut image_collector = ImageWithAttributesReader::new(chunks_reader.headers(), layers_reader)?;
+
+ let block_reader = chunks_reader
+ .filter_chunks(pedantic, |meta, tile, block| {
+ image_collector.filter_block(meta, tile, block)
+ })?
+ .on_progress(on_progress);
+
+ // TODO propagate send requirement further upwards
+ if parallel {
+ block_reader.decompress_parallel(pedantic, |meta_data, block|{
+ image_collector.read_block(&meta_data.headers, block)
+ })?;
+ }
+ else {
+ block_reader.decompress_sequential(pedantic, |meta_data, block|{
+ image_collector.read_block(&meta_data.headers, block)
+ })?;
+ }
+
+ Ok(image_collector.into_image())
+ }
+}
+
+/// Processes blocks from a file and collects them into a complete `Image`.
+#[derive(Debug, Clone, PartialEq)]
+pub struct ImageWithAttributesReader<L> {
+ image_attributes: ImageAttributes,
+ layers_reader: L,
+}
+
+impl<L> ImageWithAttributesReader<L> where L: LayersReader {
+
+ /// A new image reader with image attributes.
+ pub fn new(headers: &[Header], layers_reader: L) -> Result<Self>
+ {
+ Ok(ImageWithAttributesReader {
+ image_attributes: headers.first().as_ref().expect("invalid headers").shared_attributes.clone(),
+ layers_reader,
+ })
+ }
+
+ /// Specify whether a single block of pixels should be loaded from the file
+ fn filter_block(&self, meta: &MetaData, tile: TileCoordinates, block: BlockIndex) -> bool {
+ self.layers_reader.filter_block(meta, tile, block)
+ }
+
+ /// Load a single pixel block, which has not been filtered, into the reader, accumulating the image
+ fn read_block(&mut self, headers: &[Header], block: UncompressedBlock) -> UnitResult {
+ self.layers_reader.read_block(headers, block)
+ }
+
+ /// Deliver the complete accumulated image
+ fn into_image(self) -> Image<L::Layers> {
+ Image {
+ attributes: self.image_attributes,
+ layer_data: self.layers_reader.into_layers()
+ }
+ }
+}
+
+
+/// A template that creates a `LayerReader` for each layer in the file.
+pub trait ReadLayers<'s> {
+
+ /// The type of the resulting Layers
+ type Layers;
+
+ /// The type of the temporary layer reader
+ type Reader: LayersReader<Layers = Self::Layers>;
+
+ /// Create a single reader for a single layer
+ fn create_layers_reader(&'s self, headers: &[Header]) -> Result<Self::Reader>;
+
+ /// Specify that all attributes should be read from an image.
+ /// Use `from_file(path)` on the return value of this method to actually decode an image.
+ fn all_attributes(self) -> ReadImage<fn(f64), Self> where Self: Sized {
+ ReadImage::new(self, ignore_progress)
+ }
+}
+
+/// Processes pixel blocks from a file and accumulates them into a single image layer.
+pub trait LayersReader {
+
+ /// The type of resulting layers
+ type Layers;
+
+ /// Specify whether a single block of pixels should be loaded from the file
+ fn filter_block(&self, meta: &MetaData, tile: TileCoordinates, block: BlockIndex) -> bool;
+
+ /// Load a single pixel block, which has not been filtered, into the reader, accumulating the layer
+ fn read_block(&mut self, headers: &[Header], block: UncompressedBlock) -> UnitResult;
+
+ /// Deliver the final accumulated layers for the image
+ fn into_layers(self) -> Self::Layers;
+}
+
diff --git a/vendor/exr/src/image/read/layers.rs b/vendor/exr/src/image/read/layers.rs
new file mode 100644
index 0000000..75159c2
--- /dev/null
+++ b/vendor/exr/src/image/read/layers.rs
@@ -0,0 +1,204 @@
+//! How to read either a single or a list of layers.
+
+use crate::image::*;
+use crate::meta::header::{Header, LayerAttributes};
+use crate::error::{Result, UnitResult, Error};
+use crate::block::{UncompressedBlock, BlockIndex};
+use crate::math::Vec2;
+use crate::image::read::image::{ReadLayers, LayersReader};
+use crate::block::chunk::TileCoordinates;
+use crate::meta::MetaData;
+
+/// Specify to read all channels, aborting if any one is invalid.
+/// [`ReadRgbaChannels`] or [`ReadAnyChannels<ReadFlatSamples>`].
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct ReadAllLayers<ReadChannels> {
+
+ /// The channel reading specification
+ pub read_channels: ReadChannels,
+}
+
+/// Specify to read only the first layer which meets the previously specified requirements
+// FIXME do not throw error on deep data but just skip it!
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct ReadFirstValidLayer<ReadChannels> {
+
+ /// The channel reading specification
+ pub read_channels: ReadChannels,
+}
+
+/// A template that creates a [`ChannelsReader`] once for all channels per layer.
+pub trait ReadChannels<'s> {
+
+ /// The type of the temporary channels reader
+ type Reader: ChannelsReader;
+
+ /// Create a single reader for all channels of a specific layer
+ fn create_channels_reader(&'s self, header: &Header) -> Result<Self::Reader>;
+
+
+ /// Read only the first layer which meets the previously specified requirements
+ /// For example, skips layers with deep data, if specified earlier.
+ /// Aborts if the image contains no layers.
+ // TODO test if this filters non-deep layers while ignoring deep data layers!
+ fn first_valid_layer(self) -> ReadFirstValidLayer<Self> where Self:Sized { ReadFirstValidLayer { read_channels: self } }
+
+// FIXME do not throw error on deep data but just skip it!
+
+
+ /// Reads all layers, including an empty list. Aborts if any of the layers are invalid,
+ /// even if only one of the layers contains unexpected data.
+ fn all_layers(self) -> ReadAllLayers<Self> where Self:Sized { ReadAllLayers { read_channels: self } }
+
+ // TODO pub fn all_valid_layers(self) -> ReadAllValidLayers<Self> { ReadAllValidLayers { read_channels: self } }
+}
+
+
+/// Processes pixel blocks from a file and accumulates them into a list of layers.
+/// For example, `ChannelsReader` can be
+/// [`SpecificChannelsReader`] or [`AnyChannelsReader<FlatSamplesReader>`].
+#[derive(Debug, Clone, PartialEq)]
+pub struct AllLayersReader<ChannelsReader> {
+ layer_readers: SmallVec<[LayerReader<ChannelsReader>; 2]>, // TODO unpack struct?
+}
+
+/// Processes pixel blocks from a file and accumulates them into a single layers, using only the first.
+/// For example, `ChannelsReader` can be
+/// `SpecificChannelsReader` or `AnyChannelsReader<FlatSamplesReader>`.
+#[derive(Debug, Clone, PartialEq)]
+pub struct FirstValidLayerReader<ChannelsReader> {
+ layer_reader: LayerReader<ChannelsReader>,
+ layer_index: usize,
+}
+
+/// Processes pixel blocks from a file and accumulates them into a single layers.
+/// For example, `ChannelsReader` can be
+/// `SpecificChannelsReader` or `AnyChannelsReader<FlatSamplesReader>`.
+#[derive(Debug, Clone, PartialEq)]
+pub struct LayerReader<ChannelsReader> {
+ channels_reader: ChannelsReader,
+ attributes: LayerAttributes,
+ size: Vec2<usize>,
+ encoding: Encoding
+}
+
+/// Processes pixel blocks from a file and accumulates them into multiple channels per layer.
+pub trait ChannelsReader {
+
+ /// The type of the resulting channel collection
+ type Channels;
+
+ /// Specify whether a single block of pixels should be loaded from the file
+ fn filter_block(&self, tile: TileCoordinates) -> bool;
+
+ /// Load a single pixel block, which has not been filtered, into the reader, accumulating the channel data
+ fn read_block(&mut self, header: &Header, block: UncompressedBlock) -> UnitResult;
+
+ /// Deliver the final accumulated channel collection for the image
+ fn into_channels(self) -> Self::Channels;
+}
+
+
+impl<C> LayerReader<C> {
+ fn new(header: &Header, channels_reader: C) -> Result<Self> {
+ Ok(LayerReader {
+ channels_reader,
+ attributes: header.own_attributes.clone(),
+ size: header.layer_size,
+ encoding: Encoding {
+ compression: header.compression,
+ line_order: header.line_order,
+ blocks: match header.blocks {
+ crate::meta::BlockDescription::ScanLines => Blocks::ScanLines,
+ crate::meta::BlockDescription::Tiles(TileDescription { tile_size, .. }) => Blocks::Tiles(tile_size)
+ },
+ },
+ })
+ }
+}
+
+impl<'s, C> ReadLayers<'s> for ReadAllLayers<C> where C: ReadChannels<'s> {
+ type Layers = Layers<<C::Reader as ChannelsReader>::Channels>;
+ type Reader = AllLayersReader<C::Reader>;
+
+ fn create_layers_reader(&'s self, headers: &[Header]) -> Result<Self::Reader> {
+ let readers: Result<_> = headers.iter()
+ .map(|header| LayerReader::new(header, self.read_channels.create_channels_reader(header)?))
+ .collect();
+
+ Ok(AllLayersReader {
+ layer_readers: readers?
+ })
+ }
+}
+
+impl<C> LayersReader for AllLayersReader<C> where C: ChannelsReader {
+ type Layers = Layers<C::Channels>;
+
+ fn filter_block(&self, _: &MetaData, tile: TileCoordinates, block: BlockIndex) -> bool {
+ let layer = self.layer_readers.get(block.layer).expect("invalid layer index argument");
+ layer.channels_reader.filter_block(tile)
+ }
+
+ fn read_block(&mut self, headers: &[Header], block: UncompressedBlock) -> UnitResult {
+ self.layer_readers
+ .get_mut(block.index.layer).expect("invalid layer index argument")
+ .channels_reader.read_block(headers.get(block.index.layer).expect("invalid header index in block"), block)
+ }
+
+ fn into_layers(self) -> Self::Layers {
+ self.layer_readers
+ .into_iter()
+ .map(|layer| Layer {
+ channel_data: layer.channels_reader.into_channels(),
+ attributes: layer.attributes,
+ size: layer.size,
+ encoding: layer.encoding
+ })
+ .collect()
+ }
+}
+
+
+impl<'s, C> ReadLayers<'s> for ReadFirstValidLayer<C> where C: ReadChannels<'s> {
+ type Layers = Layer<<C::Reader as ChannelsReader>::Channels>;
+ type Reader = FirstValidLayerReader<C::Reader>;
+
+ fn create_layers_reader(&'s self, headers: &[Header]) -> Result<Self::Reader> {
+ headers.iter().enumerate()
+ .flat_map(|(index, header)|
+ self.read_channels.create_channels_reader(header)
+ .and_then(|reader| Ok(FirstValidLayerReader {
+ layer_reader: LayerReader::new(header, reader)?,
+ layer_index: index
+ }))
+ .ok()
+ )
+ .next()
+ .ok_or(Error::invalid("no layer in the image matched your specified requirements"))
+ }
+}
+
+
+impl<C> LayersReader for FirstValidLayerReader<C> where C: ChannelsReader {
+ type Layers = Layer<C::Channels>;
+
+ fn filter_block(&self, _: &MetaData, tile: TileCoordinates, block: BlockIndex) -> bool {
+ block.layer == self.layer_index && self.layer_reader.channels_reader.filter_block(tile)
+ }
+
+ fn read_block(&mut self, headers: &[Header], block: UncompressedBlock) -> UnitResult {
+ debug_assert_eq!(block.index.layer, self.layer_index, "block should have been filtered out");
+ self.layer_reader.channels_reader.read_block(&headers[self.layer_index], block)
+ }
+
+ fn into_layers(self) -> Self::Layers {
+ Layer {
+ channel_data: self.layer_reader.channels_reader.into_channels(),
+ attributes: self.layer_reader.attributes,
+ size: self.layer_reader.size,
+ encoding: self.layer_reader.encoding
+ }
+ }
+}
+
diff --git a/vendor/exr/src/image/read/levels.rs b/vendor/exr/src/image/read/levels.rs
new file mode 100644
index 0000000..5705903
--- /dev/null
+++ b/vendor/exr/src/image/read/levels.rs
@@ -0,0 +1,219 @@
+//! How to read a set of resolution levels.
+
+use crate::meta::*;
+use crate::image::*;
+use crate::error::*;
+use crate::meta::attribute::*;
+use crate::image::read::any_channels::*;
+use crate::block::chunk::TileCoordinates;
+use crate::image::read::specific_channels::*;
+use crate::image::recursive::*;
+use crate::math::Vec2;
+use crate::block::lines::LineRef;
+use crate::block::samples::*;
+use crate::meta::header::{Header};
+
+
+// Note: In the resulting image, the `FlatSamples` are placed
+// directly inside the channels, without `LargestLevel<>` indirection
+/// Specify to read only the highest resolution level, skipping all smaller variations.
+/// The sample storage can be [`ReadFlatSamples`].
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct ReadLargestLevel<DeepOrFlatSamples> {
+
+ /// The sample reading specification
+ pub read_samples: DeepOrFlatSamples
+}
+
+
+// FIXME rgba levels???
+
+// Read the largest level, directly, without intermediate structs
+impl<DeepOrFlatSamples> ReadLargestLevel<DeepOrFlatSamples> {
+
+ /// Read all arbitrary channels in each layer.
+ pub fn all_channels(self) -> ReadAnyChannels<DeepOrFlatSamples> { ReadAnyChannels { read_samples: self.read_samples } } // Instead of Self, the `FlatSamples` are used directly
+
+ /// Read only layers that contain rgba channels. Skips any other channels in the layer.
+ /// The alpha channel will contain the value `1.0` if no alpha channel can be found in the image.
+ ///
+ /// Using two closures, define how to store the pixels.
+ /// The first closure creates an image, and the second closure inserts a single pixel.
+ /// The type of the pixel can be defined by the second closure;
+ /// it must be a tuple containing four values, each being either `f16`, `f32`, `u32` or `Sample`.
+ ///
+ /// Throws an error for images with deep data or subsampling.
+ /// Use `specific_channels` or `all_channels` if you want to read something other than rgba.
+ pub fn rgba_channels<R,G,B,A, Create, Set, Pixels>(
+ self, create_pixels: Create, set_pixel: Set
+ ) -> CollectPixels<
+ ReadOptionalChannel<ReadRequiredChannel<ReadRequiredChannel<ReadRequiredChannel<NoneMore, R>, G>, B>, A>,
+ (R, G, B, A), Pixels, Create, Set
+ >
+ where
+ R: FromNativeSample, G: FromNativeSample, B: FromNativeSample, A: FromNativeSample,
+ Create: Fn(Vec2<usize>, &RgbaChannels) -> Pixels,
+ Set: Fn(&mut Pixels, Vec2<usize>, (R,G,B,A)),
+ {
+ self.specific_channels()
+ .required("R").required("G").required("B")
+ .optional("A", A::from_f32(1.0))
+ .collect_pixels(create_pixels, set_pixel)
+ }
+
+ /// Read only layers that contain rgb channels. Skips any other channels in the layer.
+ ///
+ /// Using two closures, define how to store the pixels.
+ /// The first closure creates an image, and the second closure inserts a single pixel.
+ /// The type of the pixel can be defined by the second closure;
+ /// it must be a tuple containing three values, each being either `f16`, `f32`, `u32` or `Sample`.
+ ///
+ /// Throws an error for images with deep data or subsampling.
+ /// Use `specific_channels` or `all_channels` if you want to read something other than rgb.
+ pub fn rgb_channels<R,G,B, Create, Set, Pixels>(
+ self, create_pixels: Create, set_pixel: Set
+ ) -> CollectPixels<
+ ReadRequiredChannel<ReadRequiredChannel<ReadRequiredChannel<NoneMore, R>, G>, B>,
+ (R, G, B), Pixels, Create, Set
+ >
+ where
+ R: FromNativeSample, G: FromNativeSample, B: FromNativeSample,
+ Create: Fn(Vec2<usize>, &RgbChannels) -> Pixels,
+ Set: Fn(&mut Pixels, Vec2<usize>, (R,G,B)),
+ {
+ self.specific_channels()
+ .required("R").required("G").required("B")
+ .collect_pixels(create_pixels, set_pixel)
+ }
+
+ /// Read only layers that contain the specified channels, skipping any other channels in the layer.
+ /// Further specify which channels should be included by calling `.required("ChannelName")`
+ /// or `.optional("ChannelName", default_value)` on the result of this function.
+ /// Call `collect_pixels` afterwards to define the pixel container for your set of channels.
+ ///
+ /// Throws an error for images with deep data or subsampling.
+ pub fn specific_channels(self) -> ReadZeroChannels {
+ ReadZeroChannels { }
+ }
+}
+
+/// Specify to read all contained resolution levels from the image, if any.
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct ReadAllLevels<DeepOrFlatSamples> {
+
+ /// The sample reading specification
+ pub read_samples: DeepOrFlatSamples
+}
+
+impl<ReadDeepOrFlatSamples> ReadAllLevels<ReadDeepOrFlatSamples> {
+
+ /// Read all arbitrary channels in each layer.
+ pub fn all_channels(self) -> ReadAnyChannels<Self> { ReadAnyChannels { read_samples: self } }
+
+ // TODO specific channels for multiple resolution levels
+
+}
+
+/*pub struct ReadLevels<S> {
+ read_samples: S,
+}*/
+
+/// Processes pixel blocks from a file and accumulates them into multiple levels per channel.
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct AllLevelsReader<SamplesReader> {
+ levels: Levels<SamplesReader>,
+}
+
+/// A template that creates a [`SamplesReader`] once for each resolution level.
+pub trait ReadSamplesLevel {
+
+ /// The type of the temporary level reader
+ type Reader: SamplesReader;
+
+ /// Create a single reader for a single resolution level
+ fn create_samples_level_reader(&self, header: &Header, channel: &ChannelDescription, level: Vec2<usize>, resolution: Vec2<usize>) -> Result<Self::Reader>;
+}
+
+
+impl<S: ReadSamplesLevel> ReadSamples for ReadAllLevels<S> {
+ type Reader = AllLevelsReader<S::Reader>;
+
+ fn create_sample_reader(&self, header: &Header, channel: &ChannelDescription) -> Result<Self::Reader> {
+ let data_size = header.layer_size / channel.sampling;
+
+ let levels = {
+ if let crate::meta::BlockDescription::Tiles(tiles) = &header.blocks {
+ match tiles.level_mode {
+ LevelMode::Singular => Levels::Singular(self.read_samples.create_samples_level_reader(header, channel, Vec2(0,0), header.layer_size)?),
+
+ LevelMode::MipMap => Levels::Mip {
+ rounding_mode: tiles.rounding_mode,
+ level_data: {
+ let round = tiles.rounding_mode;
+ let maps: Result<LevelMaps<S::Reader>> = mip_map_levels(round, data_size)
+ .map(|(index, level_size)| self.read_samples.create_samples_level_reader(header, channel, Vec2(index, index), level_size))
+ .collect();
+
+ maps?
+ },
+ },
+
+ // TODO put this into Levels::new(..) ?
+ LevelMode::RipMap => Levels::Rip {
+ rounding_mode: tiles.rounding_mode,
+ level_data: {
+ let round = tiles.rounding_mode;
+ let level_count_x = compute_level_count(round, data_size.width());
+ let level_count_y = compute_level_count(round, data_size.height());
+ let maps: Result<LevelMaps<S::Reader>> = rip_map_levels(round, data_size)
+ .map(|(index, level_size)| self.read_samples.create_samples_level_reader(header, channel, index, level_size))
+ .collect();
+
+ RipMaps {
+ map_data: maps?,
+ level_count: Vec2(level_count_x, level_count_y)
+ }
+ },
+ },
+ }
+ }
+
+ // scan line blocks never have mip maps
+ else {
+ Levels::Singular(self.read_samples.create_samples_level_reader(header, channel, Vec2(0, 0), data_size)?)
+ }
+ };
+
+ Ok(AllLevelsReader { levels })
+ }
+}
+
+
+impl<S: SamplesReader> SamplesReader for AllLevelsReader<S> {
+ type Samples = Levels<S::Samples>;
+
+ fn filter_block(&self, _: TileCoordinates) -> bool {
+ true
+ }
+
+ fn read_line(&mut self, line: LineRef<'_>) -> UnitResult {
+ self.levels.get_level_mut(line.location.level)?.read_line(line)
+ }
+
+ fn into_samples(self) -> Self::Samples {
+ match self.levels {
+ Levels::Singular(level) => Levels::Singular(level.into_samples()),
+ Levels::Mip { rounding_mode, level_data } => Levels::Mip {
+ rounding_mode, level_data: level_data.into_iter().map(|s| s.into_samples()).collect(),
+ },
+
+ Levels::Rip { rounding_mode, level_data } => Levels::Rip {
+ rounding_mode,
+ level_data: RipMaps {
+ level_count: level_data.level_count,
+ map_data: level_data.map_data.into_iter().map(|s| s.into_samples()).collect(),
+ }
+ },
+ }
+ }
+}
diff --git a/vendor/exr/src/image/read/mod.rs b/vendor/exr/src/image/read/mod.rs
new file mode 100644
index 0000000..c03fc90
--- /dev/null
+++ b/vendor/exr/src/image/read/mod.rs
@@ -0,0 +1,207 @@
+
+//! Read an exr image.
+//!
+//! For great flexibility and customization, use the `read()` function.
+//! The return value of the `read()` function must be further customized before reading a file.
+
+//!
+//! For very simple applications, you can alternatively use one of these functions:
+//!
+//! 1. `read_first_rgba_layer_from_file(path, your_constructor, your_pixel_setter)`:
+//! You specify how to store the pixels.
+//! The first layer containing rgba channels is then loaded from the file.
+//! Fails if no rgba layer can be found.
+//!
+//! 1. `read_all_rgba_layers_from_file(path, your_constructor, your_pixel_setter)`:
+//! You specify how to store the pixels.
+//! All layers containing rgba channels are then loaded from the file.
+//! Fails if any layer in the image does not contain rgba channels.
+//!
+//! 1. `read_first_flat_layer_from_file(path)`:
+//! The first layer containing non-deep data with arbitrary channels is loaded from the file.
+//! Fails if no non-deep layer can be found.
+//!
+//! 1. `read_all_flat_layers_from_file(path)`:
+//! All layers containing non-deep data with arbitrary channels are loaded from the file.
+//! Fails if any layer in the image contains deep data.
+//!
+//! 1. `read_all_data_from_file(path)`:
+//! All layers with arbitrary channels and all resolution levels are extracted from the file.
+//!
+//! Note: Currently does not support deep data, and currently fails
+//! if any layer in the image contains deep data.
+//!
+
+// The following three stages are internally used to read an image.
+// 1. `ReadImage` - The specification. Contains everything the user wants to tell us about loading an image.
+// The data in this structure will be instantiated and might be borrowed.
+// 2. `ImageReader` - The temporary reader. Based on the specification of the blueprint,
+// a reader is instantiated, once for each layer.
+// This data structure accumulates the image data from the file.
+// It also owns temporary data and references the blueprint.
+// 3. `Image` - The clean image. The accumulated data from the Reader
+// is converted to the clean image structure, without temporary data.
+
+pub mod image;
+pub mod layers;
+pub mod any_channels;
+pub mod levels;
+pub mod samples;
+pub mod specific_channels;
+
+use crate::error::{Result};
+use crate::image::read::samples::{ReadFlatSamples};
+use std::path::Path;
+use crate::image::{AnyImage, AnyChannels, FlatSamples, Image, Layer, FlatImage, PixelLayersImage, RgbaChannels};
+use crate::image::read::image::ReadLayers;
+use crate::image::read::layers::ReadChannels;
+use crate::math::Vec2;
+use crate::prelude::{PixelImage};
+use crate::block::samples::FromNativeSample;
+
+
+/// All resolution levels, all channels, all layers.
+/// Does not support deep data yet. Uses parallel decompression and relaxed error handling.
+/// Inspect the source code of this function if you need customization.
+pub fn read_all_data_from_file(path: impl AsRef<Path>) -> Result<AnyImage> {
+ read()
+ .no_deep_data() // TODO deep data
+ .all_resolution_levels()
+ .all_channels()
+ .all_layers()
+ .all_attributes()
+ .from_file(path)
+}
+
+// FIXME do not throw error on deep data but just skip it!
+/// No deep data, no resolution levels, all channels, all layers.
+/// Uses parallel decompression and relaxed error handling.
+/// Inspect the source code of this function if you need customization.
+pub fn read_all_flat_layers_from_file(path: impl AsRef<Path>) -> Result<FlatImage> {
+ read()
+ .no_deep_data()
+ .largest_resolution_level()
+ .all_channels()
+ .all_layers()
+ .all_attributes()
+ .from_file(path)
+}
+
+/// No deep data, no resolution levels, all channels, first layer.
+/// Uses parallel decompression and relaxed error handling.
+/// Inspect the source code of this function if you need customization.
+pub fn read_first_flat_layer_from_file(path: impl AsRef<Path>) -> Result<Image<Layer<AnyChannels<FlatSamples>>>> {
+ read()
+ .no_deep_data()
+ .largest_resolution_level()
+ .all_channels()
+ .first_valid_layer()
+ .all_attributes()
+ .from_file(path)
+}
+
+/// No deep data, no resolution levels, rgba channels, all layers.
+/// If a single layer does not contain rgba data, this method returns an error.
+/// Uses parallel decompression and relaxed error handling.
+/// `Create` and `Set` can be closures, see the examples for more information.
+/// Inspect the source code of this function if you need customization.
+/// The alpha channel will contain the value `1.0` if no alpha channel can be found in the image.
+///
+/// Using two closures, define how to store the pixels.
+/// The first closure creates an image, and the second closure inserts a single pixel.
+/// The type of the pixel can be defined by the second closure;
+/// it must be a tuple containing four values, each being either `f16`, `f32`, `u32` or `Sample`.
+// FIXME Set and Create should not need to be static
+pub fn read_all_rgba_layers_from_file<R,G,B,A, Set:'static, Create:'static, Pixels: 'static>(
+ path: impl AsRef<Path>, create: Create, set_pixel: Set
+)
+ -> Result<PixelLayersImage<Pixels, RgbaChannels>>
+ where
+ R: FromNativeSample, G: FromNativeSample, B: FromNativeSample, A: FromNativeSample,
+ Create: Fn(Vec2<usize>, &RgbaChannels) -> Pixels, // TODO type alias? CreateRgbaPixels<Pixels=Pixels>,
+ Set: Fn(&mut Pixels, Vec2<usize>, (R,G,B,A)),
+{
+ read()
+ .no_deep_data()
+ .largest_resolution_level()
+ .rgba_channels(create, set_pixel)
+ .all_layers()
+ .all_attributes()
+ .from_file(path)
+}
+
+/// No deep data, no resolution levels, rgba channels, choosing the first layer with rgba channels.
+/// Uses parallel decompression and relaxed error handling.
+/// `Create` and `Set` can be closures, see the examples for more information.
+/// Inspect the source code of this function if you need customization.
+/// The alpha channel will contain the value `1.0` if no alpha channel can be found in the image.
+///
+/// Using two closures, define how to store the pixels.
+/// The first closure creates an image, and the second closure inserts a single pixel.
+/// The type of the pixel can be defined by the second closure;
+/// it must be a tuple containing four values, each being either `f16`, `f32`, `u32` or `Sample`.
+// FIXME Set and Create should not need to be static
+pub fn read_first_rgba_layer_from_file<R,G,B,A, Set:'static, Create:'static, Pixels: 'static>(
+ path: impl AsRef<Path>, create: Create, set_pixel: Set
+)
+ -> Result<PixelImage<Pixels, RgbaChannels>>
+ where
+ R: FromNativeSample, G: FromNativeSample, B: FromNativeSample, A: FromNativeSample,
+ Create: Fn(Vec2<usize>, &RgbaChannels) -> Pixels, // TODO type alias? CreateRgbaPixels<Pixels=Pixels>,
+ Set: Fn(&mut Pixels, Vec2<usize>, (R,G,B,A)),
+{
+ read()
+ .no_deep_data()
+ .largest_resolution_level()
+ .rgba_channels(create, set_pixel)
+ .first_valid_layer()
+ .all_attributes()
+ .from_file(path)
+}
+
+
+/// Utilizes the builder pattern to configure an image reader. This is the initial struct.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub struct ReadBuilder;
+
+/// Create a reader which can be used to load an exr image.
+/// Allows you to exactly specify how to load the image, for example:
+///
+/// ```no_run
+/// use exr::prelude::*;
+///
+/// // the type of the this image depends on the chosen options
+/// let image = read()
+/// .no_deep_data() // (currently required)
+/// .largest_resolution_level() // or `all_resolution_levels()`
+/// .all_channels() // or `rgba_channels(constructor, setter)`
+/// .all_layers() // or `first_valid_layer()`
+/// .all_attributes() // (currently required)
+/// .on_progress(|progress| println!("progress: {:.1}", progress*100.0)) // optional
+/// .from_file("image.exr").unwrap(); // or `from_buffered(my_byte_slice)`
+/// ```
+///
+/// You can alternatively use one of the following simpler functions:
+/// 1. `read_first_flat_layer_from_file`
+/// 1. `read_all_rgba_layers_from_file`
+/// 1. `read_all_flat_layers_from_file`
+/// 1. `read_all_data_from_file`
+///
+// TODO not panic but skip deep layers!
+pub fn read() -> ReadBuilder { ReadBuilder }
+
+impl ReadBuilder {
+
+ /// Specify to handle only one sample per channel, disabling "deep data".
+ // TODO not panic but skip deep layers!
+ pub fn no_deep_data(self) -> ReadFlatSamples { ReadFlatSamples }
+
+ // pub fn any_resolution_levels() -> ReadBuilder<> {}
+
+ // TODO
+ // e. g. `let sum = reader.any_channels_with(|sample, sum| sum += sample)`
+ // e. g. `let floats = reader.any_channels_with(|sample, f32_samples| f32_samples[index] = sample as f32)`
+ // pub fn no_deep_data_with <S> (self, storage: S) -> FlatSamplesWith<S> { }
+
+ // pub fn flat_and_deep_data(self) -> ReadAnySamples { ReadAnySamples }
+}
diff --git a/vendor/exr/src/image/read/samples.rs b/vendor/exr/src/image/read/samples.rs
new file mode 100644
index 0000000..e03c3cc
--- /dev/null
+++ b/vendor/exr/src/image/read/samples.rs
@@ -0,0 +1,122 @@
+//! How to read samples (a grid of `f32`, `f16` or `u32` values).
+
+use crate::image::*;
+use crate::meta::header::{Header};
+use crate::error::{Result, UnitResult};
+use crate::block::lines::LineRef;
+use crate::math::Vec2;
+use crate::meta::attribute::{ChannelDescription, SampleType};
+use crate::image::read::any_channels::{SamplesReader, ReadSamples};
+use crate::image::read::levels::{ReadSamplesLevel, ReadAllLevels, ReadLargestLevel};
+use crate::block::chunk::TileCoordinates;
+// use crate::image::read::layers::ReadChannels;
+
+/// Specify to read only flat samples and no "deep data"
+// FIXME do not throw error on deep data but just skip it!
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub struct ReadFlatSamples;
+// pub struct ReadAnySamples;
+
+impl ReadFlatSamples {
+
+ // TODO
+ // e. g. `let sum = reader.any_channels_with(|sample, sum| sum += sample)`
+ // pub fn any_channels_with <S> (self, storage: S) -> { }
+
+ /// Specify to read only the highest resolution level, skipping all smaller variations.
+ pub fn largest_resolution_level(self) -> ReadLargestLevel<Self> { ReadLargestLevel { read_samples: self } }
+
+ /// Specify to read all contained resolution levels from the image, if any.
+ pub fn all_resolution_levels(self) -> ReadAllLevels<Self> { ReadAllLevels { read_samples: self } }
+
+ // TODO pub fn specific_resolution_level<F: Fn(&[Vec2<usize>])->usize >(self, select_level: F) -> ReadLevelBy<Self> { ReadAllLevels { read_samples: self } }
+}
+
+
+/*pub struct AnySamplesReader { TODO
+ resolution: Vec2<usize>,
+ samples: DeepAndFlatSamples
+}*/
+
+/// Processes pixel blocks from a file and accumulates them into a grid of samples, for example "Red" or "Alpha".
+#[derive(Debug, Clone, PartialEq)]
+pub struct FlatSamplesReader {
+ level: Vec2<usize>,
+ resolution: Vec2<usize>,
+ samples: FlatSamples
+}
+
+
+// only used when samples is directly inside a channel, without levels
+impl ReadSamples for ReadFlatSamples {
+ type Reader = FlatSamplesReader;
+
+ fn create_sample_reader(&self, header: &Header, channel: &ChannelDescription) -> Result<Self::Reader> {
+ self.create_samples_level_reader(header, channel, Vec2(0, 0), header.layer_size)
+ }
+}
+
+impl ReadSamplesLevel for ReadFlatSamples {
+ type Reader = FlatSamplesReader;
+
+ fn create_samples_level_reader(&self, _header: &Header, channel: &ChannelDescription, level: Vec2<usize>, resolution: Vec2<usize>) -> Result<Self::Reader> {
+ Ok(FlatSamplesReader {
+ level, resolution, // TODO sampling
+ samples: match channel.sample_type {
+ SampleType::F16 => FlatSamples::F16(vec![f16::ZERO; resolution.area()]),
+ SampleType::F32 => FlatSamples::F32(vec![0.0; resolution.area()]),
+ SampleType::U32 => FlatSamples::U32(vec![0; resolution.area()]),
+ }
+ })
+ }
+}
+
+
+impl SamplesReader for FlatSamplesReader {
+ type Samples = FlatSamples;
+
+ fn filter_block(&self, tile: TileCoordinates) -> bool {
+ tile.level_index == self.level
+ }
+
+ fn read_line(&mut self, line: LineRef<'_>) -> UnitResult {
+ let index = line.location;
+ let resolution = self.resolution;
+
+ // the index is generated by ourselves and must always be correct
+ debug_assert_eq!(index.level, self.level, "line should have been filtered");
+ debug_assert!(index.position.x() + index.sample_count <= resolution.width(), "line index calculation bug");
+ debug_assert!(index.position.y() < resolution.height(), "line index calculation bug");
+ debug_assert_ne!(resolution.0, 0, "sample size bug");
+
+ let start_index = index.position.y() * resolution.width() + index.position.x();
+ let end_index = start_index + index.sample_count;
+
+ debug_assert!(
+ start_index < end_index && end_index <= self.samples.len(),
+ "for resolution {:?}, this is an invalid line: {:?}",
+ self.resolution, line.location
+ );
+
+ match &mut self.samples {
+ FlatSamples::F16(samples) =>
+ line.read_samples_into_slice(&mut samples[start_index .. end_index])
+ .expect("writing line bytes failed"),
+
+ FlatSamples::F32(samples) =>
+ line.read_samples_into_slice(&mut samples[start_index .. end_index])
+ .expect("writing line bytes failed"),
+
+ FlatSamples::U32(samples) =>
+ line.read_samples_into_slice(&mut samples[start_index .. end_index])
+ .expect("writing line bytes failed"),
+ }
+
+ Ok(())
+ }
+
+ fn into_samples(self) -> FlatSamples {
+ self.samples
+ }
+}
+
diff --git a/vendor/exr/src/image/read/specific_channels.rs b/vendor/exr/src/image/read/specific_channels.rs
new file mode 100644
index 0000000..375691c
--- /dev/null
+++ b/vendor/exr/src/image/read/specific_channels.rs
@@ -0,0 +1,463 @@
+//! How to read arbitrary but specific selection of arbitrary channels.
+//! This is not a zero-cost abstraction.
+
+use crate::image::recursive::*;
+use crate::block::samples::*;
+use crate::image::*;
+use crate::math::*;
+use crate::meta::header::*;
+use crate::error::*;
+use crate::block::UncompressedBlock;
+use crate::image::read::layers::{ChannelsReader, ReadChannels};
+use crate::block::chunk::TileCoordinates;
+
+use std::marker::PhantomData;
+use crate::io::Read;
+
+
+/// Can be attached one more channel reader.
+/// Call `required` or `optional` on this object to declare another channel to be read from the file.
+/// Call `collect_pixels` at last to define how the previously declared pixels should be stored.
+pub trait ReadSpecificChannel: Sized + CheckDuplicates {
+
+ /// A separate internal reader for the pixels. Will be of type `Recursive<_, SampleReader<_>>`,
+ /// depending on the pixels of the specific channel combination.
+ type RecursivePixelReader: RecursivePixelReader;
+
+ /// Create a separate internal reader for the pixels of the specific channel combination.
+ fn create_recursive_reader(&self, channels: &ChannelList) -> Result<Self::RecursivePixelReader>;
+
+ /// Plan to read an additional channel from the image, with the specified name.
+ /// If the channel cannot be found in the image when the image is read, the image will not be loaded.
+ /// The generic parameter can usually be inferred from the closure in `collect_pixels`.
+ fn required<Sample>(self, channel_name: impl Into<Text>) -> ReadRequiredChannel<Self, Sample> {
+ let channel_name = channel_name.into();
+ assert!(self.already_contains(&channel_name).not(), "a channel with the name `{}` is already defined", channel_name);
+ ReadRequiredChannel { channel_name, previous_channels: self, px: Default::default() }
+ }
+
+ /// Plan to read an additional channel from the image, with the specified name.
+ /// If the file does not contain this channel, the specified default sample will be returned instead.
+ /// You can check whether the channel has been loaded by
+ /// checking the presence of the optional channel description before instantiating your own image.
+ /// The generic parameter can usually be inferred from the closure in `collect_pixels`.
+ fn optional<Sample>(self, channel_name: impl Into<Text>, default_sample: Sample)
+ -> ReadOptionalChannel<Self, Sample>
+ {
+ let channel_name = channel_name.into();
+ assert!(self.already_contains(&channel_name).not(), "a channel with the name `{}` is already defined", channel_name);
+ ReadOptionalChannel { channel_name, previous_channels: self, default_sample }
+ }
+
+ /// Using two closures, define how to store the pixels.
+ /// The first closure creates an image, and the second closure inserts a single pixel.
+ /// The type of the pixel can be defined by the second closure;
+ /// it must be a tuple containing `f16`, `f32`, `u32` or `Sample` values.
+ /// See the examples for more information.
+ fn collect_pixels<Pixel, PixelStorage, CreatePixels, SetPixel>(
+ self, create_pixels: CreatePixels, set_pixel: SetPixel
+ ) -> CollectPixels<Self, Pixel, PixelStorage, CreatePixels, SetPixel>
+ where
+ <Self::RecursivePixelReader as RecursivePixelReader>::RecursivePixel: IntoTuple<Pixel>,
+ <Self::RecursivePixelReader as RecursivePixelReader>::RecursiveChannelDescriptions: IntoNonRecursive,
+ CreatePixels: Fn(
+ Vec2<usize>,
+ &<<Self::RecursivePixelReader as RecursivePixelReader>::RecursiveChannelDescriptions as IntoNonRecursive>::NonRecursive
+ ) -> PixelStorage,
+ SetPixel: Fn(&mut PixelStorage, Vec2<usize>, Pixel),
+ {
+ CollectPixels { read_channels: self, set_pixel, create_pixels, px: Default::default() }
+ }
+}
+
+/// A reader containing sub-readers for reading the pixel content of an image.
+pub trait RecursivePixelReader {
+
+ /// The channel descriptions from the image.
+ /// Will be converted to a tuple before being stored in `SpecificChannels<_, ChannelDescriptions>`.
+ type RecursiveChannelDescriptions;
+
+ /// Returns the channel descriptions based on the channels in the file.
+ fn get_descriptions(&self) -> Self::RecursiveChannelDescriptions;
+
+ /// The pixel type. Will be converted to a tuple at the end of the process.
+ type RecursivePixel: Copy + Default + 'static;
+
+ /// Read the line of pixels.
+ fn read_pixels<'s, FullPixel>(
+ &self, bytes: &'s[u8], pixels: &mut [FullPixel],
+ get_pixel: impl Fn(&mut FullPixel) -> &mut Self::RecursivePixel
+ );
+}
+
+// does not use the generic `Recursive` struct to reduce the number of angle brackets in the public api
+/// Used to read another specific channel from an image.
+/// Contains the previous `ReadChannels` objects.
+#[derive(Clone, Debug)]
+pub struct ReadOptionalChannel<ReadChannels, Sample> {
+ previous_channels: ReadChannels,
+ channel_name: Text,
+ default_sample: Sample,
+}
+
+// does not use the generic `Recursive` struct to reduce the number of angle brackets in the public api
+/// Used to read another specific channel from an image.
+/// Contains the previous `ReadChannels` objects.
+#[derive(Clone, Debug)]
+pub struct ReadRequiredChannel<ReadChannels, Sample> {
+ previous_channels: ReadChannels,
+ channel_name: Text,
+ px: PhantomData<Sample>,
+}
+
+/// Specifies how to collect all the specified channels into a number of individual pixels.
+#[derive(Copy, Clone, Debug)]
+pub struct CollectPixels<ReadChannels, Pixel, PixelStorage, CreatePixels, SetPixel> {
+ read_channels: ReadChannels,
+ create_pixels: CreatePixels,
+ set_pixel: SetPixel,
+ px: PhantomData<(Pixel, PixelStorage)>,
+}
+
+impl<Inner: CheckDuplicates, Sample> CheckDuplicates for ReadRequiredChannel<Inner, Sample> {
+ fn already_contains(&self, name: &Text) -> bool {
+ &self.channel_name == name || self.previous_channels.already_contains(name)
+ }
+}
+
+impl<Inner: CheckDuplicates, Sample> CheckDuplicates for ReadOptionalChannel<Inner, Sample> {
+ fn already_contains(&self, name: &Text) -> bool {
+ &self.channel_name == name || self.previous_channels.already_contains(name)
+ }
+}
+
+impl<'s, InnerChannels, Pixel, PixelStorage, CreatePixels, SetPixel: 's>
+ReadChannels<'s> for CollectPixels<InnerChannels, Pixel, PixelStorage, CreatePixels, SetPixel>
+ where
+ InnerChannels: ReadSpecificChannel,
+ <InnerChannels::RecursivePixelReader as RecursivePixelReader>::RecursivePixel: IntoTuple<Pixel>,
+ <InnerChannels::RecursivePixelReader as RecursivePixelReader>::RecursiveChannelDescriptions: IntoNonRecursive,
+ CreatePixels: Fn(Vec2<usize>, &<<InnerChannels::RecursivePixelReader as RecursivePixelReader>::RecursiveChannelDescriptions as IntoNonRecursive>::NonRecursive) -> PixelStorage,
+ SetPixel: Fn(&mut PixelStorage, Vec2<usize>, Pixel),
+{
+ type Reader = SpecificChannelsReader<
+ PixelStorage, &'s SetPixel,
+ InnerChannels::RecursivePixelReader,
+ Pixel,
+ >;
+
+ fn create_channels_reader(&'s self, header: &Header) -> Result<Self::Reader> {
+ if header.deep { return Err(Error::invalid("`SpecificChannels` does not support deep data yet")) }
+
+ let pixel_reader = self.read_channels.create_recursive_reader(&header.channels)?;
+ let channel_descriptions = pixel_reader.get_descriptions().into_non_recursive();// TODO not call this twice
+
+ let create = &self.create_pixels;
+ let pixel_storage = create(header.layer_size, &channel_descriptions);
+
+ Ok(SpecificChannelsReader {
+ set_pixel: &self.set_pixel,
+ pixel_storage,
+ pixel_reader,
+ px: Default::default()
+ })
+ }
+}
+
+/// The reader that holds the temporary data that is required to read some specified channels.
+#[derive(Copy, Clone, Debug)]
+pub struct SpecificChannelsReader<PixelStorage, SetPixel, PixelReader, Pixel> {
+ set_pixel: SetPixel,
+ pixel_storage: PixelStorage,
+ pixel_reader: PixelReader,
+ px: PhantomData<Pixel>
+}
+
+impl<PixelStorage, SetPixel, PxReader, Pixel>
+ChannelsReader for SpecificChannelsReader<PixelStorage, SetPixel, PxReader, Pixel>
+ where PxReader: RecursivePixelReader,
+ PxReader::RecursivePixel: IntoTuple<Pixel>,
+ PxReader::RecursiveChannelDescriptions: IntoNonRecursive,
+ SetPixel: Fn(&mut PixelStorage, Vec2<usize>, Pixel),
+{
+ type Channels = SpecificChannels<PixelStorage, <PxReader::RecursiveChannelDescriptions as IntoNonRecursive>::NonRecursive>;
+
+ fn filter_block(&self, tile: TileCoordinates) -> bool { tile.is_largest_resolution_level() } // TODO all levels
+
+ fn read_block(&mut self, header: &Header, block: UncompressedBlock) -> UnitResult {
+ let mut pixels = vec![PxReader::RecursivePixel::default(); block.index.pixel_size.width()]; // TODO allocate once in self
+
+ let byte_lines = block.data.chunks_exact(header.channels.bytes_per_pixel * block.index.pixel_size.width());
+ debug_assert_eq!(byte_lines.len(), block.index.pixel_size.height(), "invalid block lines split");
+
+ for (y_offset, line_bytes) in byte_lines.enumerate() { // TODO sampling
+ // this two-step copy method should be very cache friendly in theory, and also reduce sample_type lookup count
+ self.pixel_reader.read_pixels(line_bytes, &mut pixels, |px| px);
+
+ for (x_offset, pixel) in pixels.iter().enumerate() {
+ let set_pixel = &self.set_pixel;
+ set_pixel(&mut self.pixel_storage, block.index.pixel_position + Vec2(x_offset, y_offset), pixel.into_tuple());
+ }
+ }
+
+ Ok(())
+ }
+
+ fn into_channels(self) -> Self::Channels {
+ SpecificChannels { channels: self.pixel_reader.get_descriptions().into_non_recursive(), pixels: self.pixel_storage }
+ }
+}
+
+
+/// Read zero channels from an image. Call `with_named_channel` on this object
+/// to read as many channels as desired.
+pub type ReadZeroChannels = NoneMore;
+
+impl ReadSpecificChannel for NoneMore {
+ type RecursivePixelReader = NoneMore;
+ fn create_recursive_reader(&self, _: &ChannelList) -> Result<Self::RecursivePixelReader> { Ok(NoneMore) }
+}
+
+impl<DefaultSample, ReadChannels> ReadSpecificChannel for ReadOptionalChannel<ReadChannels, DefaultSample>
+ where ReadChannels: ReadSpecificChannel, DefaultSample: FromNativeSample + 'static,
+{
+ type RecursivePixelReader = Recursive<ReadChannels::RecursivePixelReader, OptionalSampleReader<DefaultSample>>;
+
+ fn create_recursive_reader(&self, channels: &ChannelList) -> Result<Self::RecursivePixelReader> {
+ debug_assert!(self.previous_channels.already_contains(&self.channel_name).not(), "duplicate channel name: {}", self.channel_name);
+
+ let inner_samples_reader = self.previous_channels.create_recursive_reader(channels)?;
+ let reader = channels.channels_with_byte_offset()
+ .find(|(_, channel)| channel.name == self.channel_name)
+ .map(|(channel_byte_offset, channel)| SampleReader {
+ channel_byte_offset, channel: channel.clone(),
+ px: Default::default()
+ });
+
+ Ok(Recursive::new(inner_samples_reader, OptionalSampleReader {
+ reader, default_sample: self.default_sample,
+ }))
+ }
+}
+
+impl<Sample, ReadChannels> ReadSpecificChannel for ReadRequiredChannel<ReadChannels, Sample>
+ where ReadChannels: ReadSpecificChannel, Sample: FromNativeSample + 'static
+{
+ type RecursivePixelReader = Recursive<ReadChannels::RecursivePixelReader, SampleReader<Sample>>;
+
+ fn create_recursive_reader(&self, channels: &ChannelList) -> Result<Self::RecursivePixelReader> {
+ let previous_samples_reader = self.previous_channels.create_recursive_reader(channels)?;
+ let (channel_byte_offset, channel) = channels.channels_with_byte_offset()
+ .find(|(_, channel)| channel.name == self.channel_name)
+ .ok_or_else(|| Error::invalid(format!(
+ "layer does not contain all of your specified channels (`{}` is missing)",
+ self.channel_name
+ )))?;
+
+ Ok(Recursive::new(previous_samples_reader, SampleReader { channel_byte_offset, channel: channel.clone(), px: Default::default() }))
+ }
+}
+
+/// Reader for a single channel. Generic over the concrete sample type (f16, f32, u32).
+#[derive(Clone, Debug)]
+pub struct SampleReader<Sample> {
+
+ /// to be multiplied with line width!
+ channel_byte_offset: usize,
+
+ channel: ChannelDescription,
+ px: PhantomData<Sample>
+}
+
+/// Reader for a single channel. Generic over the concrete sample type (f16, f32, u32).
+/// Can also skip reading a channel if it could not be found in the image.
+#[derive(Clone, Debug)]
+pub struct OptionalSampleReader<DefaultSample> {
+ reader: Option<SampleReader<DefaultSample>>,
+ default_sample: DefaultSample,
+}
+
+impl<Sample: FromNativeSample> SampleReader<Sample> {
+ fn read_own_samples<'s, FullPixel>(
+ &self, bytes: &'s[u8], pixels: &mut [FullPixel],
+ get_sample: impl Fn(&mut FullPixel) -> &mut Sample
+ ){
+ let start_index = pixels.len() * self.channel_byte_offset;
+ let byte_count = pixels.len() * self.channel.sample_type.bytes_per_sample();
+ let mut own_bytes_reader = &mut &bytes[start_index .. start_index + byte_count]; // TODO check block size somewhere
+ let mut samples_out = pixels.iter_mut().map(|pixel| get_sample(pixel));
+
+ // match the type once for the whole line, not on every single sample
+ match self.channel.sample_type {
+ SampleType::F16 => read_and_convert_all_samples_batched(
+ &mut own_bytes_reader, &mut samples_out,
+ Sample::from_f16s
+ ),
+
+ SampleType::F32 => read_and_convert_all_samples_batched(
+ &mut own_bytes_reader, &mut samples_out,
+ Sample::from_f32s
+ ),
+
+ SampleType::U32 => read_and_convert_all_samples_batched(
+ &mut own_bytes_reader, &mut samples_out,
+ Sample::from_u32s
+ ),
+ }
+
+ debug_assert!(samples_out.next().is_none(), "not all samples have been converted");
+ debug_assert!(own_bytes_reader.is_empty(), "bytes left after reading all samples");
+ }
+}
+
+
+/// Does the same as `convert_batch(in_bytes.chunks().map(From::from_bytes))`, but vectorized.
+/// Reads the samples for one line, using the sample type specified in the file,
+/// and then converts those to the desired sample types.
+/// Uses batches to allow vectorization, converting multiple values with one instruction.
+fn read_and_convert_all_samples_batched<'t, From, To>(
+ mut in_bytes: impl Read,
+ out_samples: &mut impl ExactSizeIterator<Item=&'t mut To>,
+ convert_batch: fn(&[From], &mut [To])
+) where From: Data + Default + Copy, To: 't + Default + Copy
+{
+ // this is not a global! why is this warning triggered?
+ #[allow(non_upper_case_globals)]
+ const batch_size: usize = 16;
+
+ let total_sample_count = out_samples.len();
+ let batch_count = total_sample_count / batch_size;
+ let remaining_samples_count = total_sample_count % batch_size;
+
+ let len_error_msg = "sample count was miscalculated";
+ let byte_error_msg = "error when reading from in-memory slice";
+
+ // write samples from a given slice to the output iterator. should be inlined.
+ let output_n_samples = &mut move |samples: &[To]| {
+ for converted_sample in samples {
+ *out_samples.next().expect(len_error_msg) = *converted_sample;
+ }
+ };
+
+ // read samples from the byte source into a given slice. should be inlined.
+ // todo: use #[inline] when available
+ // error[E0658]: attributes on expressions are experimental,
+ // see issue #15701 <https://github.com/rust-lang/rust/issues/15701> for more information
+ let read_n_samples = &mut move |samples: &mut [From]| {
+ Data::read_slice(&mut in_bytes, samples).expect(byte_error_msg);
+ };
+
+ // temporary arrays with fixed size, operations should be vectorized within these arrays
+ let mut source_samples_batch: [From; batch_size] = Default::default();
+ let mut desired_samples_batch: [To; batch_size] = Default::default();
+
+ // first convert all whole batches, size statically known to be 16 element arrays
+ for _ in 0 .. batch_count {
+ read_n_samples(&mut source_samples_batch);
+ convert_batch(source_samples_batch.as_slice(), desired_samples_batch.as_mut_slice());
+ output_n_samples(&desired_samples_batch);
+ }
+
+ // then convert a partial remaining batch, size known only at runtime
+ if remaining_samples_count != 0 {
+ let source_samples_batch = &mut source_samples_batch[..remaining_samples_count];
+ let desired_samples_batch = &mut desired_samples_batch[..remaining_samples_count];
+
+ read_n_samples(source_samples_batch);
+ convert_batch(source_samples_batch, desired_samples_batch);
+ output_n_samples(desired_samples_batch);
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn equals_naive_f32(){
+ for total_array_size in [3, 7, 30, 41, 120, 10_423] {
+ let input_f32s = (0..total_array_size).map(|_| rand::random::<f32>()).collect::<Vec<f32>>();
+ let in_f32s_bytes = input_f32s.iter().cloned().flat_map(f32::to_le_bytes).collect::<Vec<u8>>();
+
+ let mut out_f16_samples_batched = vec![
+ f16::from_f32(rand::random::<f32>());
+ total_array_size
+ ];
+
+ read_and_convert_all_samples_batched(
+ &mut in_f32s_bytes.as_slice(),
+ &mut out_f16_samples_batched.iter_mut(),
+ f16::from_f32s
+ );
+
+ let out_f16_samples_naive = input_f32s.iter()
+ .cloned().map(f16::from_f32);
+
+ assert!(out_f16_samples_naive.eq(out_f16_samples_batched));
+ }
+ }
+}
+
+
+impl RecursivePixelReader for NoneMore {
+ type RecursiveChannelDescriptions = NoneMore;
+ fn get_descriptions(&self) -> Self::RecursiveChannelDescriptions { NoneMore }
+
+ type RecursivePixel = NoneMore;
+
+ fn read_pixels<'s, FullPixel>(
+ &self, _: &'s[u8], _: &mut [FullPixel],
+ _: impl Fn(&mut FullPixel) -> &mut NoneMore
+ ){}
+}
+
+impl<Sample, InnerReader: RecursivePixelReader>
+ RecursivePixelReader
+ for Recursive<InnerReader, SampleReader<Sample>>
+ where Sample: FromNativeSample + 'static
+{
+ type RecursiveChannelDescriptions = Recursive<InnerReader::RecursiveChannelDescriptions, ChannelDescription>;
+ fn get_descriptions(&self) -> Self::RecursiveChannelDescriptions { Recursive::new(self.inner.get_descriptions(), self.value.channel.clone()) }
+
+ type RecursivePixel = Recursive<InnerReader::RecursivePixel, Sample>;
+
+ fn read_pixels<'s, FullPixel>(
+ &self, bytes: &'s[u8], pixels: &mut [FullPixel],
+ get_pixel: impl Fn(&mut FullPixel) -> &mut Self::RecursivePixel
+ ) {
+ self.value.read_own_samples(bytes, pixels, |px| &mut get_pixel(px).value);
+ self.inner.read_pixels(bytes, pixels, |px| &mut get_pixel(px).inner);
+ }
+}
+
+impl<Sample, InnerReader: RecursivePixelReader>
+RecursivePixelReader
+for Recursive<InnerReader, OptionalSampleReader<Sample>>
+ where Sample: FromNativeSample + 'static
+{
+ type RecursiveChannelDescriptions = Recursive<InnerReader::RecursiveChannelDescriptions, Option<ChannelDescription>>;
+ fn get_descriptions(&self) -> Self::RecursiveChannelDescriptions { Recursive::new(
+ self.inner.get_descriptions(), self.value.reader.as_ref().map(|reader| reader.channel.clone())
+ ) }
+
+ type RecursivePixel = Recursive<InnerReader::RecursivePixel, Sample>;
+
+ fn read_pixels<'s, FullPixel>(
+ &self, bytes: &'s[u8], pixels: &mut [FullPixel],
+ get_pixel: impl Fn(&mut FullPixel) -> &mut Self::RecursivePixel
+ ) {
+ if let Some(reader) = &self.value.reader {
+ reader.read_own_samples(bytes, pixels, |px| &mut get_pixel(px).value);
+ }
+ else {
+ // if this channel is optional and was not found in the file, fill the default sample
+ for pixel in pixels.iter_mut() {
+ get_pixel(pixel).value = self.value.default_sample;
+ }
+ }
+
+ self.inner.read_pixels(bytes, pixels, |px| &mut get_pixel(px).inner);
+ }
+}
+
+
diff --git a/vendor/exr/src/image/recursive.rs b/vendor/exr/src/image/recursive.rs
new file mode 100644
index 0000000..25a980f
--- /dev/null
+++ b/vendor/exr/src/image/recursive.rs
@@ -0,0 +1,178 @@
+//! A generic wrapper which can be used to represent recursive types.
+//! Supports conversion from and to tuples of the same size.
+
+/// No more recursion. Can be used within any `Recursive<NoneMore, YourValue>` type.
+#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
+pub struct NoneMore;
+
+/// A recursive type-level linked list of `Value` entries.
+/// Mainly used to represent an arbitrary number of channels.
+/// The recursive architecture removes the need to implement traits for many different tuples.
+#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
+pub struct Recursive<Inner, Value> {
+ /// The remaining values of this linked list,
+ /// probably either `NoneMore` or another instance of the same `Recursive<Inner - 1, Value>`.
+ pub inner: Inner,
+
+ /// The next item in this linked list.
+ pub value: Value,
+}
+
+impl<Inner, Value> Recursive<Inner, Value> {
+ /// Create a new recursive type. Equivalent to the manual constructor, but less verbose.
+ pub fn new(inner: Inner, value: Value) -> Self { Self { inner, value } }
+}
+
+/// Convert this recursive type into a tuple.
+/// This is nice as it will require less typing for the same type.
+/// A type might or might not be convertible to the specified `Tuple` type.
+pub trait IntoTuple<Tuple> {
+ /// Convert this recursive type to a nice tuple.
+ fn into_tuple(self) -> Tuple;
+}
+
+/// Convert this recursive type into a tuple.
+/// This is nice as it will require less typing for the same type.
+/// A type will be converted to the specified `Self::NonRecursive` type.
+pub trait IntoNonRecursive {
+ /// The resulting tuple type.
+ type NonRecursive;
+
+ /// Convert this recursive type to a nice tuple.
+ fn into_non_recursive(self) -> Self::NonRecursive;
+}
+
+/// Create a recursive type from this tuple.
+pub trait IntoRecursive {
+ /// The recursive type resulting from this tuple.
+ type Recursive;
+
+ /// Create a recursive type from this tuple.
+ fn into_recursive(self) -> Self::Recursive;
+}
+
+impl IntoRecursive for NoneMore {
+ type Recursive = Self;
+ fn into_recursive(self) -> Self::Recursive { self }
+}
+
+impl<Inner: IntoRecursive, Value> IntoRecursive for Recursive<Inner, Value> {
+ type Recursive = Recursive<Inner::Recursive, Value>;
+ fn into_recursive(self) -> Self::Recursive { Recursive::new(self.inner.into_recursive(), self.value) }
+}
+
+// Automatically implement IntoTuple so we have to generate less code in the macros
+impl<I: IntoNonRecursive> IntoTuple<I::NonRecursive> for I {
+ fn into_tuple(self) -> <I as IntoNonRecursive>::NonRecursive {
+ self.into_non_recursive()
+ }
+}
+
+//Implement traits for the empty tuple, the macro doesn't handle that
+impl IntoRecursive for () {
+ type Recursive = NoneMore;
+ fn into_recursive(self) -> Self::Recursive { NoneMore }
+}
+
+impl IntoNonRecursive for NoneMore {
+ type NonRecursive = ();
+
+ fn into_non_recursive(self) -> Self::NonRecursive {
+ ()
+ }
+}
+
+/// Generates the recursive type corresponding to this tuple:
+/// ```nocheck
+/// gen_recursive_type!(A, B, C)
+/// => Recursive<Recursive<Recursive<NoneMore, A>, B>, C>
+/// ```
+macro_rules! gen_recursive_type {
+ () => { NoneMore };
+ ($last:ident $(,$not_last:ident)*) => {
+ Recursive<gen_recursive_type!($($not_last),*), $last>
+ };
+}
+
+/// Generates the recursive value corresponding to the given indices:
+/// ```nocheck
+/// gen_recursive_value(self; 1, 0)
+/// => Recursive { inner: Recursive { inner: NoneMore, value: self.0 }, value: self.1 }
+/// ```
+macro_rules! gen_recursive_value {
+ ($self:ident;) => { NoneMore };
+ ($self:ident; $last:tt $(,$not_last:tt)*) => {
+ Recursive { inner: gen_recursive_value!($self; $($not_last),*), value: $self.$last }
+ };
+}
+
+/// Generates the into_tuple value corresponding to the given type names:
+/// ```nocheck
+/// gen_tuple_value(self; A, B, C)
+/// => (self.inner.inner.value, self.inner.value, self.value)
+/// ```
+macro_rules! gen_tuple_value {
+ ($self:ident; $($all:ident),* ) => {
+ gen_tuple_value!(@ $self; (); $($all),* )
+ };
+
+ (@ $self:ident; ($($state:expr),*);) => { ($($state .value,)*) };
+ (@ $self:ident; ($($state:expr),*); $last:ident $(,$not_last:ident)* ) => {
+ gen_tuple_value!(@ $self; ($($state .inner,)* $self); $($not_last),* )
+ };
+}
+
+/// Generate the trait implementations given a sequence of type names in both directions and the indices backwards:
+/// ```nocheck
+/// generate_single(A, B, C; C, B, A; 2, 1, 0)
+/// ```
+macro_rules! generate_single {
+ ( $($name_fwd:ident),* ; $($name_back:ident),* ; $($index_back:tt),*) => {
+ impl<$($name_fwd),*> IntoNonRecursive for gen_recursive_type!($($name_back),*) {
+ type NonRecursive = ($($name_fwd,)*);
+ fn into_non_recursive(self) -> Self::NonRecursive {
+ gen_tuple_value!(self; $($name_fwd),*)
+ }
+ }
+
+ impl<$($name_fwd),*> IntoRecursive for ($($name_fwd,)*) {
+ type Recursive = gen_recursive_type!($($name_back),*);
+ fn into_recursive(self) -> Self::Recursive {
+ gen_recursive_value!(self; $($index_back),*)
+ }
+ }
+ };
+}
+
+generate_single!(A; A; 0);
+generate_single!(A,B; B,A; 1,0);
+generate_single!(A,B,C; C,B,A; 2,1,0);
+generate_single!(A,B,C,D; D,C,B,A; 3,2,1,0);
+generate_single!(A,B,C,D,E; E,D,C,B,A; 4,3,2,1,0);
+generate_single!(A,B,C,D,E,F; F,E,D,C,B,A; 5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G; G,F,E,D,C,B,A; 6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H; H,G,F,E,D,C,B,A; 7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I; I,H,G,F,E,D,C,B,A; 8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J; J,I,H,G,F,E,D,C,B,A; 9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K; K,J,I,H,G,F,E,D,C,B,A; 10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L; L,K,J,I,H,G,F,E,D,C,B,A; 11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M; M,L,K,J,I,H,G,F,E,D,C,B,A; 12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N; N,M,L,K,J,I,H,G,F,E,D,C,B,A; 13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O; O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P; P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q; Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R; R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S; S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T; T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U; U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V; V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W; W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X; X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y; Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z; Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,A1; A1,Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,A1,B1; B1,A1,Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,A1,B1,C1; C1,B1,A1,Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,A1,B1,C1,D1; D1,C1,B1,A1,Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,A1,B1,C1,D1,E1; E1,D1,C1,B1,A1,Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
+generate_single!(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,A1,B1,C1,D1,E1,F1; F1,E1,D1,C1,B1,A1,Z,Y,X,W,V,U,T,S,R,Q,P,O,N,M,L,K,J,I,H,G,F,E,D,C,B,A; 31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0);
diff --git a/vendor/exr/src/image/write/channels.rs b/vendor/exr/src/image/write/channels.rs
new file mode 100644
index 0000000..2450f09
--- /dev/null
+++ b/vendor/exr/src/image/write/channels.rs
@@ -0,0 +1,407 @@
+//! How to read arbitrary channels and rgb channels.
+
+use crate::prelude::*;
+use crate::io::*;
+use crate::math::*;
+use crate::meta::{header::*, attribute::*};
+use crate::block::*;
+use crate::image::recursive::*;
+use crate::block::samples::*;
+use crate::image::write::samples::*;
+
+use std::marker::PhantomData;
+
+
+/// Enables an image containing this list of channels to be written to a file.
+pub trait WritableChannels<'slf> {
+
+ /// Generate the file meta data for this list of channel
+ fn infer_channel_list(&self) -> ChannelList;
+
+ /// Generate the file meta data of whether and how resolution levels should be stored in the file
+ fn infer_level_modes(&self) -> (LevelMode, RoundingMode);
+
+ /// The type of temporary writer
+ type Writer: ChannelsWriter;
+
+ /// Create a temporary writer for this list of channels
+ fn create_writer(&'slf self, header: &Header) -> Self::Writer;
+}
+
+/// A temporary writer for a list of channels
+pub trait ChannelsWriter: Sync {
+
+ /// Deliver a block of pixels, containing all channel data, to be stored in the file
+ fn extract_uncompressed_block(&self, header: &Header, block: BlockIndex) -> Vec<u8>; // TODO return uncompressed block?
+}
+
+
+/// Define how to get a pixel from your custom pixel storage.
+/// Can be a closure of type [`Sync + Fn(Vec2<usize>) -> YourPixel`].
+pub trait GetPixel: Sync {
+
+ /// The pixel tuple containing `f32`, `f16`, `u32` and `Sample` values.
+ /// The length of the tuple must match the number of channels in the image.
+ type Pixel;
+
+ /// Inspect a single pixel at the requested position.
+ /// Will be called exactly once for each pixel in the image.
+ /// The position will not exceed the image dimensions.
+ /// Might be called from multiple threads at the same time.
+ fn get_pixel(&self, position: Vec2<usize>) -> Self::Pixel;
+}
+
+impl<F, P> GetPixel for F where F: Sync + Fn(Vec2<usize>) -> P {
+ type Pixel = P;
+ fn get_pixel(&self, position: Vec2<usize>) -> P { self(position) }
+}
+
+impl<'samples, Samples> WritableChannels<'samples> for AnyChannels<Samples>
+ where Samples: 'samples + WritableSamples<'samples>
+{
+ fn infer_channel_list(&self) -> ChannelList {
+ ChannelList::new(self.list.iter().map(|channel| ChannelDescription {
+ name: channel.name.clone(),
+ sample_type: channel.sample_data.sample_type(),
+ quantize_linearly: channel.quantize_linearly,
+ sampling: channel.sampling
+ }).collect())
+ }
+
+ fn infer_level_modes(&self) -> (LevelMode, RoundingMode) {
+ let mode = self.list.iter().next().expect("zero channels in list").sample_data.infer_level_modes();
+
+ debug_assert!(
+ std::iter::repeat(mode).zip(self.list.iter().skip(1))
+ .all(|(first, other)| other.sample_data.infer_level_modes() == first),
+
+ "level mode must be the same across all levels (do not nest resolution levels!)"
+ );
+
+ mode
+ }
+
+ type Writer = AnyChannelsWriter<Samples::Writer>;
+ fn create_writer(&'samples self, header: &Header) -> Self::Writer {
+ let channels = self.list.iter()
+ .map(|chan| chan.sample_data.create_samples_writer(header))
+ .collect();
+
+ AnyChannelsWriter { channels }
+ }
+}
+
+/// A temporary writer for an arbitrary list of channels
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct AnyChannelsWriter<SamplesWriter> {
+ channels: SmallVec<[SamplesWriter; 4]>
+}
+
+impl<Samples> ChannelsWriter for AnyChannelsWriter<Samples> where Samples: SamplesWriter {
+ fn extract_uncompressed_block(&self, header: &Header, block_index: BlockIndex) -> Vec<u8> {
+ UncompressedBlock::collect_block_data_from_lines(&header.channels, block_index, |line_ref| {
+ self.channels[line_ref.location.channel].extract_line(line_ref)
+ })
+ }
+}
+
+
+
+
+
+
+impl<'c, Channels, Storage>
+WritableChannels<'c> for SpecificChannels<Storage, Channels>
+where
+ Storage: 'c + GetPixel,
+ Storage::Pixel: IntoRecursive,
+ Channels: 'c + Sync + Clone + IntoRecursive,
+ <Channels as IntoRecursive>::Recursive: WritableChannelsDescription<<Storage::Pixel as IntoRecursive>::Recursive>,
+{
+ fn infer_channel_list(&self) -> ChannelList {
+ let mut vec = self.channels.clone().into_recursive().channel_descriptions_list();
+ vec.sort_unstable_by_key(|channel:&ChannelDescription| channel.name.clone()); // TODO no clone?
+
+ debug_assert!(
+ // check for equal neighbors in sorted vec
+ vec.iter().zip(vec.iter().skip(1)).all(|(prev, next)| prev.name != next.name),
+ "specific channels contain duplicate channel names"
+ );
+
+ ChannelList::new(vec)
+ }
+
+ fn infer_level_modes(&self) -> (LevelMode, RoundingMode) {
+ (LevelMode::Singular, RoundingMode::Down) // TODO
+ }
+
+ type Writer = SpecificChannelsWriter<
+ 'c,
+ <<Channels as IntoRecursive>::Recursive as WritableChannelsDescription<<Storage::Pixel as IntoRecursive>::Recursive>>::RecursiveWriter,
+ Storage,
+ Channels
+ >;
+
+ fn create_writer(&'c self, header: &Header) -> Self::Writer {
+ SpecificChannelsWriter {
+ channels: self,
+ recursive_channel_writer: self.channels.clone().into_recursive().create_recursive_writer(&header.channels),
+ }
+ }
+}
+
+
+
+/// A temporary writer for a layer of channels, alpha being optional
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct SpecificChannelsWriter<'channels, PixelWriter, Storage, Channels> {
+ channels: &'channels SpecificChannels<Storage, Channels>, // TODO this need not be a reference?? impl writer for specific_channels directly?
+ recursive_channel_writer: PixelWriter,
+}
+
+
+impl<'channels, PxWriter, Storage, Channels> ChannelsWriter
+for SpecificChannelsWriter<'channels, PxWriter, Storage, Channels>
+ where
+ Channels: Sync,
+ Storage: GetPixel,
+ Storage::Pixel: IntoRecursive,
+ PxWriter: Sync + RecursivePixelWriter<<Storage::Pixel as IntoRecursive>::Recursive>,
+{
+ fn extract_uncompressed_block(&self, header: &Header, block_index: BlockIndex) -> Vec<u8> {
+ let block_bytes = block_index.pixel_size.area() * header.channels.bytes_per_pixel;
+ let mut block_bytes = vec![0_u8; block_bytes];
+
+ let width = block_index.pixel_size.0;
+ let line_bytes = width * header.channels.bytes_per_pixel;
+ let byte_lines = block_bytes.chunks_exact_mut(line_bytes);
+ assert_eq!(byte_lines.len(), block_index.pixel_size.height(), "invalid block line splits");
+
+ //dbg!(width, line_bytes, header.channels.bytes_per_pixel, byte_lines.len());
+
+ let mut pixel_line = Vec::with_capacity(width);
+
+ for (y, line_bytes) in byte_lines.enumerate() {
+ pixel_line.clear();
+ pixel_line.extend((0 .. width).map(|x|
+ self.channels.pixels.get_pixel(block_index.pixel_position + Vec2(x, y)).into_recursive()
+ ));
+
+ self.recursive_channel_writer.write_pixels(line_bytes, pixel_line.as_slice(), |px| px);
+ }
+
+ block_bytes
+ }
+}
+
+/// A tuple containing either `ChannelsDescription` or `Option<ChannelsDescription>` entries.
+/// Use an `Option` if you want to dynamically omit a single channel (probably only for roundtrip tests).
+/// The number of entries must match the number of channels.
+pub trait WritableChannelsDescription<Pixel>: Sync {
+
+ /// A type that has a recursive entry for each channel in the image,
+ /// which must accept the desired pixel type.
+ type RecursiveWriter: RecursivePixelWriter<Pixel>;
+
+ /// Create the temporary writer, accepting the sorted list of channels from `channel_descriptions_list`.
+ fn create_recursive_writer(&self, channels: &ChannelList) -> Self::RecursiveWriter;
+
+ /// Return all the channels that should actually end up in the image, in any order.
+ fn channel_descriptions_list(&self) -> SmallVec<[ChannelDescription; 5]>;
+}
+
+impl WritableChannelsDescription<NoneMore> for NoneMore {
+ type RecursiveWriter = NoneMore;
+ fn create_recursive_writer(&self, _: &ChannelList) -> Self::RecursiveWriter { NoneMore }
+ fn channel_descriptions_list(&self) -> SmallVec<[ChannelDescription; 5]> { SmallVec::new() }
+}
+
+impl<InnerDescriptions, InnerPixel, Sample: IntoNativeSample>
+ WritableChannelsDescription<Recursive<InnerPixel, Sample>>
+ for Recursive<InnerDescriptions, ChannelDescription>
+ where InnerDescriptions: WritableChannelsDescription<InnerPixel>
+{
+ type RecursiveWriter = RecursiveWriter<InnerDescriptions::RecursiveWriter, Sample>;
+
+ fn create_recursive_writer(&self, channels: &ChannelList) -> Self::RecursiveWriter {
+ // this linear lookup is required because the order of the channels changed, due to alphabetical sorting
+ let (start_byte_offset, target_sample_type) = channels.channels_with_byte_offset()
+ .find(|(_offset, channel)| channel.name == self.value.name)
+ .map(|(offset, channel)| (offset, channel.sample_type))
+ .expect("a channel has not been put into channel list");
+
+ Recursive::new(self.inner.create_recursive_writer(channels), SampleWriter {
+ start_byte_offset, target_sample_type,
+ px: PhantomData::default()
+ })
+ }
+
+ fn channel_descriptions_list(&self) -> SmallVec<[ChannelDescription; 5]> {
+ let mut inner_list = self.inner.channel_descriptions_list();
+ inner_list.push(self.value.clone());
+ inner_list
+ }
+}
+
+impl<InnerDescriptions, InnerPixel, Sample: IntoNativeSample>
+WritableChannelsDescription<Recursive<InnerPixel, Sample>>
+for Recursive<InnerDescriptions, Option<ChannelDescription>>
+ where InnerDescriptions: WritableChannelsDescription<InnerPixel>
+{
+ type RecursiveWriter = OptionalRecursiveWriter<InnerDescriptions::RecursiveWriter, Sample>;
+
+ fn create_recursive_writer(&self, channels: &ChannelList) -> Self::RecursiveWriter {
+ // this linear lookup is required because the order of the channels changed, due to alphabetical sorting
+
+ let channel = self.value.as_ref().map(|required_channel|
+ channels.channels_with_byte_offset()
+ .find(|(_offset, channel)| channel == &required_channel)
+ .map(|(offset, channel)| (offset, channel.sample_type))
+ .expect("a channel has not been put into channel list")
+ );
+
+ Recursive::new(
+ self.inner.create_recursive_writer(channels),
+ channel.map(|(start_byte_offset, target_sample_type)| SampleWriter {
+ start_byte_offset, target_sample_type,
+ px: PhantomData::default(),
+ })
+ )
+ }
+
+ fn channel_descriptions_list(&self) -> SmallVec<[ChannelDescription; 5]> {
+ let mut inner_list = self.inner.channel_descriptions_list();
+ if let Some(value) = &self.value { inner_list.push(value.clone()); }
+ inner_list
+ }
+}
+
+/// Write pixels to a slice of bytes. The top level writer contains all the other channels,
+/// the most inner channel is `NoneMore`.
+pub trait RecursivePixelWriter<Pixel>: Sync {
+
+ /// Write pixels to a slice of bytes. Recursively do this for all channels.
+ fn write_pixels<FullPixel>(&self, bytes: &mut [u8], pixels: &[FullPixel], get_pixel: impl Fn(&FullPixel) -> &Pixel);
+}
+
+type RecursiveWriter<Inner, Sample> = Recursive<Inner, SampleWriter<Sample>>;
+type OptionalRecursiveWriter<Inner, Sample> = Recursive<Inner, Option<SampleWriter<Sample>>>;
+
+/// Write the pixels of a single channel, unconditionally. Generic over the concrete sample type (f16, f32, u32).
+#[derive(Debug, Clone)]
+pub struct SampleWriter<Sample> {
+ target_sample_type: SampleType,
+ start_byte_offset: usize,
+ px: PhantomData<Sample>,
+}
+
+impl<Sample> SampleWriter<Sample> where Sample: IntoNativeSample {
+ fn write_own_samples(&self, bytes: &mut [u8], samples: impl ExactSizeIterator<Item=Sample>) {
+ let byte_start_index = samples.len() * self.start_byte_offset;
+ let byte_count = samples.len() * self.target_sample_type.bytes_per_sample();
+ let ref mut byte_writer = &mut bytes[byte_start_index..byte_start_index + byte_count];
+
+ let write_error_msg = "invalid memory buffer length when writing";
+
+ // match outside the loop to avoid matching on every single sample
+ match self.target_sample_type {
+ // TODO does this boil down to a `memcpy` where the sample type equals the type parameter?
+ SampleType::F16 => for sample in samples { sample.to_f16().write(byte_writer).expect(write_error_msg); },
+ SampleType::F32 => for sample in samples { sample.to_f32().write(byte_writer).expect(write_error_msg); },
+ SampleType::U32 => for sample in samples { sample.to_u32().write(byte_writer).expect(write_error_msg); },
+ };
+
+ debug_assert!(byte_writer.is_empty(), "all samples are written, but more were expected");
+ }
+}
+
+impl RecursivePixelWriter<NoneMore> for NoneMore {
+ fn write_pixels<FullPixel>(&self, _: &mut [u8], _: &[FullPixel], _: impl Fn(&FullPixel) -> &NoneMore) {}
+}
+
+impl<Inner, InnerPixel, Sample: IntoNativeSample>
+ RecursivePixelWriter<Recursive<InnerPixel, Sample>>
+ for RecursiveWriter<Inner, Sample>
+ where Inner: RecursivePixelWriter<InnerPixel>
+{
+ // TODO impl exact size iterator <item = Self::Pixel>
+ fn write_pixels<FullPixel>(&self, bytes: &mut [u8], pixels: &[FullPixel], get_pixel: impl Fn(&FullPixel) -> &Recursive<InnerPixel, Sample>){
+ self.value.write_own_samples(bytes, pixels.iter().map(|px| get_pixel(px).value));
+ self.inner.write_pixels(bytes, pixels, |px| &get_pixel(px).inner);
+ }
+}
+
+impl<Inner, InnerPixel, Sample> RecursivePixelWriter<Recursive<InnerPixel, Sample>>
+ for OptionalRecursiveWriter<Inner, Sample>
+ where Inner: RecursivePixelWriter<InnerPixel>,
+ Sample: IntoNativeSample
+{
+ fn write_pixels<FullPixel>(&self, bytes: &mut [u8], pixels: &[FullPixel], get_pixel: impl Fn(&FullPixel) -> &Recursive<InnerPixel, Sample>) {
+ if let Some(writer) = &self.value {
+ writer.write_own_samples(bytes, pixels.iter().map(|px| get_pixel(px).value));
+ }
+
+ self.inner.write_pixels(bytes, pixels, |px| &get_pixel(px).inner);
+ }
+}
+
+
+
+
+
+
+
+#[cfg(test)]
+pub mod test {
+ use crate::image::write::channels::WritableChannels;
+ use crate::image::SpecificChannels;
+ use crate::prelude::{f16};
+ use crate::meta::attribute::{ChannelDescription, SampleType};
+ use crate::image::pixel_vec::PixelVec;
+
+ #[test]
+ fn compiles(){
+ let x = 3_f32;
+ let y = f16::from_f32(4.0);
+ let z = 2_u32;
+ let s = 1.3_f32;
+ let px = (x,y,z,s);
+
+ assert_is_writable_channels(
+ SpecificChannels::rgba(|_pos| px)
+ );
+
+ assert_is_writable_channels(SpecificChannels::rgba(
+ PixelVec::new((3, 2), vec![px, px, px, px, px, px])
+ ));
+
+ let px = (2333_u32, 4_f32);
+ assert_is_writable_channels(
+ SpecificChannels::build()
+ .with_channel("A")
+ .with_channel("C")
+ .with_pixels(PixelVec::new((3, 2), vec![px, px, px, px, px, px]))
+ );
+
+ let px = (3_f32, f16::ONE, 2333_u32, 4_f32);
+ assert_is_writable_channels(SpecificChannels::new(
+ (
+ ChannelDescription::named("x", SampleType::F32),
+ ChannelDescription::named("y", SampleType::F16),
+ Some(ChannelDescription::named("z", SampleType::U32)),
+ Some(ChannelDescription::named("p", SampleType::F32)),
+ ),
+
+ PixelVec::new((3, 2), vec![px, px, px, px, px, px])
+ ));
+
+
+
+ fn assert_is_writable_channels<'s>(_channels: impl WritableChannels<'s>){}
+
+ }
+}
+
+
+
+
diff --git a/vendor/exr/src/image/write/layers.rs b/vendor/exr/src/image/write/layers.rs
new file mode 100644
index 0000000..85648ff
--- /dev/null
+++ b/vendor/exr/src/image/write/layers.rs
@@ -0,0 +1,188 @@
+//! How to write either a single or a list of layers.
+
+use crate::meta::header::{ImageAttributes, Header};
+use crate::meta::{Headers, compute_chunk_count};
+use crate::block::BlockIndex;
+use crate::image::{Layers, Layer};
+use crate::meta::attribute::{TileDescription};
+use crate::prelude::{SmallVec};
+use crate::image::write::channels::{WritableChannels, ChannelsWriter};
+use crate::image::recursive::{Recursive, NoneMore};
+
+/// Enables an image containing this list of layers to be written to a file.
+pub trait WritableLayers<'slf> {
+
+ /// Generate the file meta data for this list of layers
+ fn infer_headers(&self, image_attributes: &ImageAttributes) -> Headers;
+
+ /// The type of temporary writer
+ type Writer: LayersWriter;
+
+ /// Create a temporary writer for this list of layers
+ fn create_writer(&'slf self, headers: &[Header]) -> Self::Writer;
+}
+
+/// A temporary writer for a list of channels
+pub trait LayersWriter: Sync {
+
+ /// Deliver a block of pixels from a single layer to be stored in the file
+ fn extract_uncompressed_block(&self, headers: &[Header], block: BlockIndex) -> Vec<u8>;
+}
+
+/// A temporary writer for an arbitrary list of layers
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct AllLayersWriter<ChannelsWriter> {
+ layers: SmallVec<[LayerWriter<ChannelsWriter>; 2]>
+}
+
+/// A temporary writer for a single layer
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct LayerWriter<ChannelsWriter> {
+ channels: ChannelsWriter, // impl ChannelsWriter
+}
+
+// impl for smallvec
+impl<'slf, Channels: 'slf> WritableLayers<'slf> for Layers<Channels> where Channels: WritableChannels<'slf> {
+ fn infer_headers(&self, image_attributes: &ImageAttributes) -> Headers {
+ slice_infer_headers(self.as_slice(), image_attributes)
+ }
+
+ type Writer = AllLayersWriter<Channels::Writer>;
+ fn create_writer(&'slf self, headers: &[Header]) -> Self::Writer {
+ slice_create_writer(self.as_slice(), headers)
+ }
+}
+
+fn slice_infer_headers<'slf, Channels:'slf + WritableChannels<'slf>>(
+ slice: &[Layer<Channels>], image_attributes: &ImageAttributes
+) -> Headers
+{
+ slice.iter().map(|layer| layer.infer_headers(image_attributes).remove(0)).collect() // TODO no array-vs-first
+}
+
+fn slice_create_writer<'slf, Channels:'slf + WritableChannels<'slf>>(
+ slice: &'slf [Layer<Channels>], headers: &[Header]
+) -> AllLayersWriter<Channels::Writer>
+{
+ AllLayersWriter {
+ layers: slice.iter().zip(headers.chunks_exact(1)) // TODO no array-vs-first
+ .map(|(layer, header)| layer.create_writer(header))
+ .collect()
+ }
+}
+
+
+impl<'slf, Channels: WritableChannels<'slf>> WritableLayers<'slf> for Layer<Channels> {
+ fn infer_headers(&self, image_attributes: &ImageAttributes) -> Headers {
+ let blocks = match self.encoding.blocks {
+ crate::image::Blocks::ScanLines => crate::meta::BlockDescription::ScanLines,
+ crate::image::Blocks::Tiles(tile_size) => {
+ let (level_mode, rounding_mode) = self.channel_data.infer_level_modes();
+ crate::meta::BlockDescription::Tiles(TileDescription { level_mode, rounding_mode, tile_size, })
+ },
+ };
+
+ let chunk_count = compute_chunk_count(
+ self.encoding.compression, self.size, blocks
+ );
+
+ let header = Header {
+ channels: self.channel_data.infer_channel_list(),
+ compression: self.encoding.compression,
+
+ blocks,
+ chunk_count,
+
+ line_order: self.encoding.line_order,
+ layer_size: self.size,
+ shared_attributes: image_attributes.clone(),
+ own_attributes: self.attributes.clone(),
+
+
+ deep: false, // TODO deep data
+ deep_data_version: None,
+ max_samples_per_pixel: None,
+ };
+
+ smallvec![ header ]// TODO no array-vs-first
+ }
+
+ type Writer = LayerWriter</*'l,*/ Channels::Writer>;
+ fn create_writer(&'slf self, headers: &[Header]) -> Self::Writer {
+ let channels = self.channel_data
+ .create_writer(headers.first().expect("inferred header error")); // TODO no array-vs-first
+
+ LayerWriter { channels }
+ }
+}
+
+impl<C> LayersWriter for AllLayersWriter<C> where C: ChannelsWriter {
+ fn extract_uncompressed_block(&self, headers: &[Header], block: BlockIndex) -> Vec<u8> {
+ self.layers[block.layer].extract_uncompressed_block(std::slice::from_ref(&headers[block.layer]), block) // TODO no array-vs-first
+ }
+}
+
+impl<C> LayersWriter for LayerWriter<C> where C: ChannelsWriter {
+ fn extract_uncompressed_block(&self, headers: &[Header], block: BlockIndex) -> Vec<u8> {
+ self.channels.extract_uncompressed_block(headers.first().expect("invalid inferred header"), block) // TODO no array-vs-first
+ }
+}
+
+
+
+
+
+impl<'slf> WritableLayers<'slf> for NoneMore {
+ fn infer_headers(&self, _: &ImageAttributes) -> Headers { SmallVec::new() }
+
+ type Writer = NoneMore;
+ fn create_writer(&'slf self, _: &[Header]) -> Self::Writer { NoneMore }
+}
+
+impl<'slf, InnerLayers, Channels> WritableLayers<'slf> for Recursive<InnerLayers, Layer<Channels>>
+ where InnerLayers: WritableLayers<'slf>, Channels: WritableChannels<'slf>
+{
+ fn infer_headers(&self, image_attributes: &ImageAttributes) -> Headers {
+ let mut headers = self.inner.infer_headers(image_attributes);
+ headers.push(self.value.infer_headers(image_attributes).remove(0)); // TODO no unwrap
+ headers
+ }
+
+ type Writer = RecursiveLayersWriter<InnerLayers::Writer, Channels::Writer>;
+
+ fn create_writer(&'slf self, headers: &[Header]) -> Self::Writer {
+ let (own_header, inner_headers) = headers.split_last()
+ .expect("header has not been inferred correctly");
+
+ let layer_index = inner_headers.len();
+ RecursiveLayersWriter {
+ inner: self.inner.create_writer(inner_headers),
+ value: (layer_index, self.value.create_writer(std::slice::from_ref(own_header))) // TODO no slice
+ }
+ }
+}
+
+type RecursiveLayersWriter<InnerLayersWriter, ChannelsWriter> = Recursive<InnerLayersWriter, (usize, LayerWriter<ChannelsWriter>)>;
+
+impl LayersWriter for NoneMore {
+ fn extract_uncompressed_block(&self, _: &[Header], _: BlockIndex) -> Vec<u8> {
+ panic!("recursive length mismatch bug");
+ }
+}
+
+impl<InnerLayersWriter, Channels> LayersWriter for RecursiveLayersWriter<InnerLayersWriter, Channels>
+ where InnerLayersWriter: LayersWriter, Channels: ChannelsWriter
+{
+ fn extract_uncompressed_block(&self, headers: &[Header], block: BlockIndex) -> Vec<u8> {
+ let (layer_index, layer) = &self.value;
+ if *layer_index == block.layer {
+ let header = headers.get(*layer_index).expect("layer index bug");
+ layer.extract_uncompressed_block(std::slice::from_ref(header), block) // TODO no slice?
+ }
+ else {
+ self.inner.extract_uncompressed_block(headers, block)
+ }
+ }
+}
+
+
diff --git a/vendor/exr/src/image/write/mod.rs b/vendor/exr/src/image/write/mod.rs
new file mode 100644
index 0000000..3c20060
--- /dev/null
+++ b/vendor/exr/src/image/write/mod.rs
@@ -0,0 +1,184 @@
+
+//! Write an exr image to a file.
+//!
+//! First, call `my_image.write()`. The resulting value can be customized, like this:
+//! ```no_run
+//! use exr::prelude::*;
+//! # let my_image: FlatImage = unimplemented!();
+//!
+//! my_image.write()
+//! .on_progress(|progress| println!("progress: {:.1}", progress*100.0))
+//! .to_file("image.exr").unwrap();
+//! ```
+//!
+
+pub mod layers;
+pub mod samples;
+pub mod channels;
+
+
+
+use crate::meta::Headers;
+use crate::error::UnitResult;
+use std::io::{Seek, BufWriter};
+use crate::io::Write;
+use crate::image::{Image, ignore_progress, SpecificChannels, IntoSample};
+use crate::image::write::layers::{WritableLayers, LayersWriter};
+use crate::math::Vec2;
+use crate::block::writer::ChunksWriter;
+
+/// An oversimplified function for "just write the damn file already" use cases.
+/// Have a look at the examples to see how you can write an image with more flexibility (it's not that hard).
+/// Use `write_rgb_file` if you do not need an alpha channel.
+///
+/// Each of `R`, `G`, `B` and `A` can be either `f16`, `f32`, `u32`, or `Sample`.
+// TODO explain pixel tuple f32,f16,u32
+pub fn write_rgba_file<R,G,B,A>(
+ path: impl AsRef<std::path::Path>, width: usize, height: usize,
+ colors: impl Sync + Fn(usize, usize) -> (R, G, B, A)
+) -> UnitResult
+ where R: IntoSample, G: IntoSample, B: IntoSample, A: IntoSample,
+{
+ let channels = SpecificChannels::rgba(|Vec2(x,y)| colors(x,y));
+ Image::from_channels((width, height), channels).write().to_file(path)
+}
+
+/// An oversimplified function for "just write the damn file already" use cases.
+/// Have a look at the examples to see how you can write an image with more flexibility (it's not that hard).
+/// Use `write_rgb_file` if you do not need an alpha channel.
+///
+/// Each of `R`, `G`, and `B` can be either `f16`, `f32`, `u32`, or `Sample`.
+// TODO explain pixel tuple f32,f16,u32
+pub fn write_rgb_file<R,G,B>(
+ path: impl AsRef<std::path::Path>, width: usize, height: usize,
+ colors: impl Sync + Fn(usize, usize) -> (R, G, B)
+) -> UnitResult
+ where R: IntoSample, G: IntoSample, B: IntoSample
+{
+ let channels = SpecificChannels::rgb(|Vec2(x,y)| colors(x,y));
+ Image::from_channels((width, height), channels).write().to_file(path)
+}
+
+
+
+/// Enables an image to be written to a file. Call `image.write()` where this trait is implemented.
+pub trait WritableImage<'img, WritableLayers>: Sized {
+
+ /// Create a temporary writer which can be configured and used to write the image to a file.
+ fn write(self) -> WriteImageWithOptions<'img, WritableLayers, fn(f64)>;
+}
+
+impl<'img, WritableLayers> WritableImage<'img, WritableLayers> for &'img Image<WritableLayers> {
+ fn write(self) -> WriteImageWithOptions<'img, WritableLayers, fn(f64)> {
+ WriteImageWithOptions {
+ image: self,
+ check_compatibility: true,
+ parallel: true,
+ on_progress: ignore_progress
+ }
+ }
+}
+
+/// A temporary writer which can be configured and used to write an image to a file.
+// temporary writer with options
+#[derive(Debug, Clone, PartialEq)]
+pub struct WriteImageWithOptions<'img, Layers, OnProgress> {
+ image: &'img Image<Layers>,
+ on_progress: OnProgress,
+ check_compatibility: bool,
+ parallel: bool,
+}
+
+
+impl<'img, L, F> WriteImageWithOptions<'img, L, F>
+ where L: WritableLayers<'img>, F: FnMut(f64)
+{
+ /// Generate file meta data for this image. The meta data structure is close to the data in the file.
+ pub fn infer_meta_data(&self) -> Headers { // TODO this should perform all validity checks? and none after that?
+ self.image.layer_data.infer_headers(&self.image.attributes)
+ }
+
+ /// Do not compress multiple pixel blocks on multiple threads at once.
+ /// Might use less memory and synchronization, but will be slower in most situations.
+ pub fn non_parallel(self) -> Self { Self { parallel: false, ..self } }
+
+ /// Skip some checks that ensure a file can be opened by other exr software.
+ /// For example, it is no longer checked that no two headers or two attributes have the same name,
+ /// which might be an expensive check for images with an exorbitant number of headers.
+ ///
+ /// If you write an uncompressed file and need maximum speed, it might save a millisecond to disable the checks,
+ /// if you know that your file is not invalid any ways. I do not recommend this though,
+ /// as the file might not be readably by any other exr library after that.
+ /// __You must care for not producing an invalid file yourself.__
+ pub fn skip_compatibility_checks(self) -> Self { Self { check_compatibility: false, ..self } }
+
+ /// Specify a function to be called regularly throughout the writing process.
+ /// Replaces all previously specified progress functions in this reader.
+ pub fn on_progress<OnProgress>(self, on_progress: OnProgress) -> WriteImageWithOptions<'img, L, OnProgress>
+ where OnProgress: FnMut(f64)
+ {
+ WriteImageWithOptions {
+ on_progress,
+ image: self.image,
+ check_compatibility: self.check_compatibility,
+ parallel: self.parallel
+ }
+ }
+
+ /// Write the exr image to a file.
+ /// Use `to_unbuffered` instead, if you do not have a file.
+ /// If an error occurs, attempts to delete the partially written file.
+ #[inline]
+ #[must_use]
+ pub fn to_file(self, path: impl AsRef<std::path::Path>) -> UnitResult {
+ crate::io::attempt_delete_file_on_write_error(path.as_ref(), move |write|
+ self.to_unbuffered(write)
+ )
+ }
+
+ /// Buffer the writer and then write the exr image to it.
+ /// Use `to_buffered` instead, if your writer is an in-memory buffer.
+ /// Use `to_file` instead, if you have a file path.
+ /// If your writer cannot seek, you can write to an in-memory vector of bytes first, using `to_buffered`.
+ #[inline]
+ #[must_use]
+ pub fn to_unbuffered(self, unbuffered: impl Write + Seek) -> UnitResult {
+ self.to_buffered(BufWriter::new(unbuffered))
+ }
+
+ /// Write the exr image to a writer.
+ /// Use `to_file` instead, if you have a file path.
+ /// Use `to_unbuffered` instead, if this is not an in-memory writer.
+ /// If your writer cannot seek, you can write to an in-memory vector of bytes first.
+ #[must_use]
+ pub fn to_buffered(self, write: impl Write + Seek) -> UnitResult {
+ let headers = self.infer_meta_data();
+ let layers = self.image.layer_data.create_writer(&headers);
+
+ crate::block::write(
+ write, headers, self.check_compatibility,
+ move |meta, chunk_writer|{
+
+ let blocks = meta.collect_ordered_block_data(|block_index|
+ layers.extract_uncompressed_block(&meta.headers, block_index)
+ );
+
+ let chunk_writer = chunk_writer.on_progress(self.on_progress);
+ if self.parallel { chunk_writer.compress_all_blocks_parallel(&meta, blocks)?; }
+ else { chunk_writer.compress_all_blocks_sequential(&meta, blocks)?; }
+ /*let blocks_writer = chunk_writer.as_blocks_writer(&meta);
+
+ // TODO propagate send requirement further upwards
+ if self.parallel {
+ blocks_writer.compress_all_blocks_parallel(blocks)?;
+ }
+ else {
+ blocks_writer.compress_all_blocks_sequential(blocks)?;
+ }*/
+
+ Ok(())
+ }
+ )
+ }
+}
+
diff --git a/vendor/exr/src/image/write/samples.rs b/vendor/exr/src/image/write/samples.rs
new file mode 100644
index 0000000..e74105b
--- /dev/null
+++ b/vendor/exr/src/image/write/samples.rs
@@ -0,0 +1,205 @@
+//! How to write samples (a grid of `f32`, `f16` or `u32` values).
+
+use crate::meta::attribute::{LevelMode, SampleType, TileDescription};
+use crate::meta::header::Header;
+use crate::block::lines::LineRefMut;
+use crate::image::{FlatSamples, Levels, RipMaps};
+use crate::math::{Vec2, RoundingMode};
+use crate::meta::{rip_map_levels, mip_map_levels, rip_map_indices, mip_map_indices, BlockDescription};
+
+/// Enable an image with this sample grid to be written to a file.
+/// Also can contain multiple resolution levels.
+/// Usually contained within `Channels`.
+pub trait WritableSamples<'slf> {
+ // fn is_deep(&self) -> bool;
+
+ /// Generate the file meta data regarding the number type of this storage
+ fn sample_type(&self) -> SampleType;
+
+ /// Generate the file meta data regarding resolution levels
+ fn infer_level_modes(&self) -> (LevelMode, RoundingMode);
+
+ /// The type of the temporary writer for this sample storage
+ type Writer: SamplesWriter;
+
+ /// Create a temporary writer for this sample storage
+ fn create_samples_writer(&'slf self, header: &Header) -> Self::Writer;
+}
+
+/// Enable an image with this single level sample grid to be written to a file.
+/// Only contained within `Levels`.
+pub trait WritableLevel<'slf> {
+
+ /// Generate the file meta data regarding the number type of these samples
+ fn sample_type(&self) -> SampleType;
+
+ /// The type of the temporary writer for this single level of samples
+ type Writer: SamplesWriter;
+
+ /// Create a temporary writer for this single level of samples
+ fn create_level_writer(&'slf self, size: Vec2<usize>) -> Self::Writer;
+}
+
+/// A temporary writer for one or more resolution levels containing samples
+pub trait SamplesWriter: Sync {
+
+ /// Deliver a single short horizontal list of samples for a specific channel.
+ fn extract_line(&self, line: LineRefMut<'_>);
+}
+
+/// A temporary writer for a predefined non-deep sample storage
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub struct FlatSamplesWriter<'samples> {
+ resolution: Vec2<usize>, // respects resolution level
+ samples: &'samples FlatSamples
+}
+
+
+
+// used if no layers are used and the flat samples are directly inside the channels
+impl<'samples> WritableSamples<'samples> for FlatSamples {
+ fn sample_type(&self) -> SampleType {
+ match self {
+ FlatSamples::F16(_) => SampleType::F16,
+ FlatSamples::F32(_) => SampleType::F32,
+ FlatSamples::U32(_) => SampleType::U32,
+ }
+ }
+
+ fn infer_level_modes(&self) -> (LevelMode, RoundingMode) { (LevelMode::Singular, RoundingMode::Down) }
+
+ type Writer = FlatSamplesWriter<'samples>; //&'s FlatSamples;
+ fn create_samples_writer(&'samples self, header: &Header) -> Self::Writer {
+ FlatSamplesWriter {
+ resolution: header.layer_size,
+ samples: self
+ }
+ }
+}
+
+// used if layers are used and the flat samples are inside the levels
+impl<'samples> WritableLevel<'samples> for FlatSamples {
+ fn sample_type(&self) -> SampleType {
+ match self {
+ FlatSamples::F16(_) => SampleType::F16,
+ FlatSamples::F32(_) => SampleType::F32,
+ FlatSamples::U32(_) => SampleType::U32,
+ }
+ }
+
+ type Writer = FlatSamplesWriter<'samples>;
+ fn create_level_writer(&'samples self, size: Vec2<usize>) -> Self::Writer {
+ FlatSamplesWriter {
+ resolution: size,
+ samples: self
+ }
+ }
+}
+
+impl<'samples> SamplesWriter for FlatSamplesWriter<'samples> {
+ fn extract_line(&self, line: LineRefMut<'_>) {
+ let image_width = self.resolution.width(); // header.layer_size.width();
+ debug_assert_ne!(image_width, 0, "image width calculation bug");
+
+ let start_index = line.location.position.y() * image_width + line.location.position.x();
+ let end_index = start_index + line.location.sample_count;
+
+ debug_assert!(
+ start_index < end_index && end_index <= self.samples.len(),
+ "for resolution {:?}, this is an invalid line: {:?}",
+ self.resolution, line.location
+ );
+
+ match self.samples {
+ FlatSamples::F16(samples) => line.write_samples_from_slice(&samples[start_index .. end_index]),
+ FlatSamples::F32(samples) => line.write_samples_from_slice(&samples[start_index .. end_index]),
+ FlatSamples::U32(samples) => line.write_samples_from_slice(&samples[start_index .. end_index]),
+ }.expect("writing line bytes failed");
+ }
+}
+
+
+impl<'samples, LevelSamples> WritableSamples<'samples> for Levels<LevelSamples>
+ where LevelSamples: WritableLevel<'samples>
+{
+ fn sample_type(&self) -> SampleType {
+ let sample_type = self.levels_as_slice().first().expect("no levels found").sample_type();
+
+ debug_assert!(
+ self.levels_as_slice().iter().skip(1).all(|ty| ty.sample_type() == sample_type),
+ "sample types must be the same across all levels"
+ );
+
+ sample_type
+ }
+
+ fn infer_level_modes(&self) -> (LevelMode, RoundingMode) {
+ match self {
+ Levels::Singular(_) => (LevelMode::Singular, RoundingMode::Down),
+ Levels::Mip { rounding_mode, .. } => (LevelMode::MipMap, *rounding_mode),
+ Levels::Rip { rounding_mode, .. } => (LevelMode::RipMap, *rounding_mode),
+ }
+ }
+
+ type Writer = LevelsWriter<LevelSamples::Writer>;
+ fn create_samples_writer(&'samples self, header: &Header) -> Self::Writer {
+ let rounding = match header.blocks {
+ BlockDescription::Tiles(TileDescription { rounding_mode, .. }) => Some(rounding_mode),
+ BlockDescription::ScanLines => None,
+ };
+
+ LevelsWriter {
+ levels: match self {
+ Levels::Singular(level) => Levels::Singular(level.create_level_writer(header.layer_size)),
+ Levels::Mip { level_data, rounding_mode } => {
+ debug_assert_eq!(
+ level_data.len(),
+ mip_map_indices(rounding.expect("mip maps only with tiles"), header.layer_size).count(),
+ "invalid mip map count"
+ );
+
+ Levels::Mip { // TODO store level size in image??
+ rounding_mode: *rounding_mode,
+ level_data: level_data.iter()
+ .zip(mip_map_levels(rounding.expect("mip maps only with tiles"), header.layer_size))
+ // .map(|level| level.create_samples_writer(header))
+ .map(|(level, (_level_index, level_size))| level.create_level_writer(level_size))
+ .collect()
+ }
+ },
+ Levels::Rip { level_data, rounding_mode } => {
+ debug_assert_eq!(level_data.map_data.len(), level_data.level_count.area(), "invalid rip level count");
+ debug_assert_eq!(
+ level_data.map_data.len(),
+ rip_map_indices(rounding.expect("rip maps only with tiles"), header.layer_size).count(),
+ "invalid rip map count"
+ );
+
+ Levels::Rip {
+ rounding_mode: *rounding_mode,
+ level_data: RipMaps {
+ level_count: level_data.level_count,
+ map_data: level_data.map_data.iter()
+ .zip(rip_map_levels(rounding.expect("rip maps only with tiles"), header.layer_size))
+ .map(|(level, (_level_index, level_size))| level.create_level_writer(level_size))
+ .collect(),
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+/// A temporary writer for multiple resolution levels
+#[derive(Debug, Clone, Eq, PartialEq)]
+pub struct LevelsWriter<SamplesWriter> {
+ levels: Levels<SamplesWriter>,
+}
+
+impl<Samples> SamplesWriter for LevelsWriter<Samples> where Samples: SamplesWriter {
+ fn extract_line(&self, line: LineRefMut<'_>) {
+ self.levels.get_level(line.location.level).expect("invalid level index") // TODO compute level size from line index??
+ .extract_line(line)
+ }
+}