diff options
Diffstat (limited to 'vendor/png/src')
-rw-r--r-- | vendor/png/src/chunk.rs | 98 | ||||
-rw-r--r-- | vendor/png/src/common.rs | 808 | ||||
-rw-r--r-- | vendor/png/src/decoder/mod.rs | 961 | ||||
-rw-r--r-- | vendor/png/src/decoder/stream.rs | 1576 | ||||
-rw-r--r-- | vendor/png/src/decoder/zlib.rs | 212 | ||||
-rw-r--r-- | vendor/png/src/encoder.rs | 2389 | ||||
-rw-r--r-- | vendor/png/src/filter.rs | 801 | ||||
-rw-r--r-- | vendor/png/src/lib.rs | 81 | ||||
-rw-r--r-- | vendor/png/src/srgb.rs | 30 | ||||
-rw-r--r-- | vendor/png/src/text_metadata.rs | 586 | ||||
-rw-r--r-- | vendor/png/src/traits.rs | 43 | ||||
-rw-r--r-- | vendor/png/src/utils.rs | 463 |
12 files changed, 0 insertions, 8048 deletions
diff --git a/vendor/png/src/chunk.rs b/vendor/png/src/chunk.rs deleted file mode 100644 index 39578a4..0000000 --- a/vendor/png/src/chunk.rs +++ /dev/null @@ -1,98 +0,0 @@ -//! Chunk types and functions -#![allow(dead_code)] -#![allow(non_upper_case_globals)] -use core::fmt; - -#[derive(Clone, Copy, PartialEq, Eq, Hash)] -pub struct ChunkType(pub [u8; 4]); - -// -- Critical chunks -- - -/// Image header -pub const IHDR: ChunkType = ChunkType(*b"IHDR"); -/// Palette -pub const PLTE: ChunkType = ChunkType(*b"PLTE"); -/// Image data -pub const IDAT: ChunkType = ChunkType(*b"IDAT"); -/// Image trailer -pub const IEND: ChunkType = ChunkType(*b"IEND"); - -// -- Ancillary chunks -- - -/// Transparency -pub const tRNS: ChunkType = ChunkType(*b"tRNS"); -/// Background colour -pub const bKGD: ChunkType = ChunkType(*b"bKGD"); -/// Image last-modification time -pub const tIME: ChunkType = ChunkType(*b"tIME"); -/// Physical pixel dimensions -pub const pHYs: ChunkType = ChunkType(*b"pHYs"); -/// Source system's pixel chromaticities -pub const cHRM: ChunkType = ChunkType(*b"cHRM"); -/// Source system's gamma value -pub const gAMA: ChunkType = ChunkType(*b"gAMA"); -/// sRGB color space chunk -pub const sRGB: ChunkType = ChunkType(*b"sRGB"); -/// ICC profile chunk -pub const iCCP: ChunkType = ChunkType(*b"iCCP"); -/// Latin-1 uncompressed textual data -pub const tEXt: ChunkType = ChunkType(*b"tEXt"); -/// Latin-1 compressed textual data -pub const zTXt: ChunkType = ChunkType(*b"zTXt"); -/// UTF-8 textual data -pub const iTXt: ChunkType = ChunkType(*b"iTXt"); - -// -- Extension chunks -- - -/// Animation control -pub const acTL: ChunkType = ChunkType(*b"acTL"); -/// Frame control -pub const fcTL: ChunkType = ChunkType(*b"fcTL"); -/// Frame data -pub const fdAT: ChunkType = ChunkType(*b"fdAT"); - -// -- Chunk type determination -- - -/// Returns true if the chunk is critical. -pub fn is_critical(ChunkType(type_): ChunkType) -> bool { - type_[0] & 32 == 0 -} - -/// Returns true if the chunk is private. -pub fn is_private(ChunkType(type_): ChunkType) -> bool { - type_[1] & 32 != 0 -} - -/// Checks whether the reserved bit of the chunk name is set. -/// If it is set the chunk name is invalid. -pub fn reserved_set(ChunkType(type_): ChunkType) -> bool { - type_[2] & 32 != 0 -} - -/// Returns true if the chunk is safe to copy if unknown. -pub fn safe_to_copy(ChunkType(type_): ChunkType) -> bool { - type_[3] & 32 != 0 -} - -impl fmt::Debug for ChunkType { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - struct DebugType([u8; 4]); - - impl fmt::Debug for DebugType { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for &c in &self.0[..] { - write!(f, "{}", char::from(c).escape_debug())?; - } - Ok(()) - } - } - - f.debug_struct("ChunkType") - .field("type", &DebugType(self.0)) - .field("critical", &is_critical(*self)) - .field("private", &is_private(*self)) - .field("reserved", &reserved_set(*self)) - .field("safecopy", &safe_to_copy(*self)) - .finish() - } -} diff --git a/vendor/png/src/common.rs b/vendor/png/src/common.rs deleted file mode 100644 index 6e5dbff..0000000 --- a/vendor/png/src/common.rs +++ /dev/null @@ -1,808 +0,0 @@ -//! Common types shared between the encoder and decoder -use crate::text_metadata::{EncodableTextChunk, ITXtChunk, TEXtChunk, ZTXtChunk}; -use crate::{chunk, encoder}; -use io::Write; -use std::{borrow::Cow, convert::TryFrom, fmt, io}; - -/// Describes how a pixel is encoded. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(u8)] -pub enum ColorType { - /// 1 grayscale sample. - Grayscale = 0, - /// 1 red sample, 1 green sample, 1 blue sample. - Rgb = 2, - /// 1 sample for the palette index. - Indexed = 3, - /// 1 grayscale sample, then 1 alpha sample. - GrayscaleAlpha = 4, - /// 1 red sample, 1 green sample, 1 blue sample, and finally, 1 alpha sample. - Rgba = 6, -} - -impl ColorType { - /// Returns the number of samples used per pixel encoded in this way. - pub fn samples(self) -> usize { - self.samples_u8().into() - } - - pub(crate) fn samples_u8(self) -> u8 { - use self::ColorType::*; - match self { - Grayscale | Indexed => 1, - Rgb => 3, - GrayscaleAlpha => 2, - Rgba => 4, - } - } - - /// u8 -> Self. Temporary solution until Rust provides a canonical one. - pub fn from_u8(n: u8) -> Option<ColorType> { - match n { - 0 => Some(ColorType::Grayscale), - 2 => Some(ColorType::Rgb), - 3 => Some(ColorType::Indexed), - 4 => Some(ColorType::GrayscaleAlpha), - 6 => Some(ColorType::Rgba), - _ => None, - } - } - - pub(crate) fn checked_raw_row_length(self, depth: BitDepth, width: u32) -> Option<usize> { - // No overflow can occur in 64 bits, we multiply 32-bit with 5 more bits. - let bits = u64::from(width) * u64::from(self.samples_u8()) * u64::from(depth.into_u8()); - TryFrom::try_from(1 + (bits + 7) / 8).ok() - } - - pub(crate) fn raw_row_length_from_width(self, depth: BitDepth, width: u32) -> usize { - let samples = width as usize * self.samples(); - 1 + match depth { - BitDepth::Sixteen => samples * 2, - BitDepth::Eight => samples, - subbyte => { - let samples_per_byte = 8 / subbyte as usize; - let whole = samples / samples_per_byte; - let fract = usize::from(samples % samples_per_byte > 0); - whole + fract - } - } - } - - pub(crate) fn is_combination_invalid(self, bit_depth: BitDepth) -> bool { - // Section 11.2.2 of the PNG standard disallows several combinations - // of bit depth and color type - ((bit_depth == BitDepth::One || bit_depth == BitDepth::Two || bit_depth == BitDepth::Four) - && (self == ColorType::Rgb - || self == ColorType::GrayscaleAlpha - || self == ColorType::Rgba)) - || (bit_depth == BitDepth::Sixteen && self == ColorType::Indexed) - } -} - -/// Bit depth of the PNG file. -/// Specifies the number of bits per sample. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(u8)] -pub enum BitDepth { - One = 1, - Two = 2, - Four = 4, - Eight = 8, - Sixteen = 16, -} - -/// Internal count of bytes per pixel. -/// This is used for filtering which never uses sub-byte units. This essentially reduces the number -/// of possible byte chunk lengths to a very small set of values appropriate to be defined as an -/// enum. -#[derive(Debug, Clone, Copy)] -#[repr(u8)] -pub(crate) enum BytesPerPixel { - One = 1, - Two = 2, - Three = 3, - Four = 4, - Six = 6, - Eight = 8, -} - -impl BitDepth { - /// u8 -> Self. Temporary solution until Rust provides a canonical one. - pub fn from_u8(n: u8) -> Option<BitDepth> { - match n { - 1 => Some(BitDepth::One), - 2 => Some(BitDepth::Two), - 4 => Some(BitDepth::Four), - 8 => Some(BitDepth::Eight), - 16 => Some(BitDepth::Sixteen), - _ => None, - } - } - - pub(crate) fn into_u8(self) -> u8 { - self as u8 - } -} - -/// Pixel dimensions information -#[derive(Clone, Copy, Debug)] -pub struct PixelDimensions { - /// Pixels per unit, X axis - pub xppu: u32, - /// Pixels per unit, Y axis - pub yppu: u32, - /// Either *Meter* or *Unspecified* - pub unit: Unit, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(u8)] -/// Physical unit of the pixel dimensions -pub enum Unit { - Unspecified = 0, - Meter = 1, -} - -impl Unit { - /// u8 -> Self. Temporary solution until Rust provides a canonical one. - pub fn from_u8(n: u8) -> Option<Unit> { - match n { - 0 => Some(Unit::Unspecified), - 1 => Some(Unit::Meter), - _ => None, - } - } -} - -/// How to reset buffer of an animated png (APNG) at the end of a frame. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(u8)] -pub enum DisposeOp { - /// Leave the buffer unchanged. - None = 0, - /// Clear buffer with the background color. - Background = 1, - /// Reset the buffer to the state before the current frame. - Previous = 2, -} - -impl DisposeOp { - /// u8 -> Self. Using enum_primitive or transmute is probably the right thing but this will do for now. - pub fn from_u8(n: u8) -> Option<DisposeOp> { - match n { - 0 => Some(DisposeOp::None), - 1 => Some(DisposeOp::Background), - 2 => Some(DisposeOp::Previous), - _ => None, - } - } -} - -impl fmt::Display for DisposeOp { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let name = match *self { - DisposeOp::None => "DISPOSE_OP_NONE", - DisposeOp::Background => "DISPOSE_OP_BACKGROUND", - DisposeOp::Previous => "DISPOSE_OP_PREVIOUS", - }; - write!(f, "{}", name) - } -} - -/// How pixels are written into the buffer. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(u8)] -pub enum BlendOp { - /// Pixels overwrite the value at their position. - Source = 0, - /// The new pixels are blended into the current state based on alpha. - Over = 1, -} - -impl BlendOp { - /// u8 -> Self. Using enum_primitive or transmute is probably the right thing but this will do for now. - pub fn from_u8(n: u8) -> Option<BlendOp> { - match n { - 0 => Some(BlendOp::Source), - 1 => Some(BlendOp::Over), - _ => None, - } - } -} - -impl fmt::Display for BlendOp { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let name = match *self { - BlendOp::Source => "BLEND_OP_SOURCE", - BlendOp::Over => "BLEND_OP_OVER", - }; - write!(f, "{}", name) - } -} - -/// Frame control information -#[derive(Clone, Copy, Debug)] -pub struct FrameControl { - /// Sequence number of the animation chunk, starting from 0 - pub sequence_number: u32, - /// Width of the following frame - pub width: u32, - /// Height of the following frame - pub height: u32, - /// X position at which to render the following frame - pub x_offset: u32, - /// Y position at which to render the following frame - pub y_offset: u32, - /// Frame delay fraction numerator - pub delay_num: u16, - /// Frame delay fraction denominator - pub delay_den: u16, - /// Type of frame area disposal to be done after rendering this frame - pub dispose_op: DisposeOp, - /// Type of frame area rendering for this frame - pub blend_op: BlendOp, -} - -impl Default for FrameControl { - fn default() -> FrameControl { - FrameControl { - sequence_number: 0, - width: 0, - height: 0, - x_offset: 0, - y_offset: 0, - delay_num: 1, - delay_den: 30, - dispose_op: DisposeOp::None, - blend_op: BlendOp::Source, - } - } -} - -impl FrameControl { - pub fn set_seq_num(&mut self, s: u32) { - self.sequence_number = s; - } - - pub fn inc_seq_num(&mut self, i: u32) { - self.sequence_number += i; - } - - pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> { - let mut data = [0u8; 26]; - data[..4].copy_from_slice(&self.sequence_number.to_be_bytes()); - data[4..8].copy_from_slice(&self.width.to_be_bytes()); - data[8..12].copy_from_slice(&self.height.to_be_bytes()); - data[12..16].copy_from_slice(&self.x_offset.to_be_bytes()); - data[16..20].copy_from_slice(&self.y_offset.to_be_bytes()); - data[20..22].copy_from_slice(&self.delay_num.to_be_bytes()); - data[22..24].copy_from_slice(&self.delay_den.to_be_bytes()); - data[24] = self.dispose_op as u8; - data[25] = self.blend_op as u8; - - encoder::write_chunk(w, chunk::fcTL, &data) - } -} - -/// Animation control information -#[derive(Clone, Copy, Debug)] -pub struct AnimationControl { - /// Number of frames - pub num_frames: u32, - /// Number of times to loop this APNG. 0 indicates infinite looping. - pub num_plays: u32, -} - -impl AnimationControl { - pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> { - let mut data = [0; 8]; - data[..4].copy_from_slice(&self.num_frames.to_be_bytes()); - data[4..].copy_from_slice(&self.num_plays.to_be_bytes()); - encoder::write_chunk(w, chunk::acTL, &data) - } -} - -/// The type and strength of applied compression. -#[derive(Debug, Clone, Copy)] -pub enum Compression { - /// Default level - Default, - /// Fast minimal compression - Fast, - /// Higher compression level - /// - /// Best in this context isn't actually the highest possible level - /// the encoder can do, but is meant to emulate the `Best` setting in the `Flate2` - /// library. - Best, - #[deprecated( - since = "0.17.6", - note = "use one of the other compression levels instead, such as 'fast'" - )] - Huffman, - #[deprecated( - since = "0.17.6", - note = "use one of the other compression levels instead, such as 'fast'" - )] - Rle, -} - -impl Default for Compression { - fn default() -> Self { - Self::Default - } -} - -/// An unsigned integer scaled version of a floating point value, -/// equivalent to an integer quotient with fixed denominator (100_000)). -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct ScaledFloat(u32); - -impl ScaledFloat { - const SCALING: f32 = 100_000.0; - - /// Gets whether the value is within the clamped range of this type. - pub fn in_range(value: f32) -> bool { - value >= 0.0 && (value * Self::SCALING).floor() <= std::u32::MAX as f32 - } - - /// Gets whether the value can be exactly converted in round-trip. - #[allow(clippy::float_cmp)] // Stupid tool, the exact float compare is _the entire point_. - pub fn exact(value: f32) -> bool { - let there = Self::forward(value); - let back = Self::reverse(there); - value == back - } - - fn forward(value: f32) -> u32 { - (value.max(0.0) * Self::SCALING).floor() as u32 - } - - fn reverse(encoded: u32) -> f32 { - encoded as f32 / Self::SCALING - } - - /// Slightly inaccurate scaling and quantization. - /// Clamps the value into the representable range if it is negative or too large. - pub fn new(value: f32) -> Self { - Self(Self::forward(value)) - } - - /// Fully accurate construction from a value scaled as per specification. - pub fn from_scaled(val: u32) -> Self { - Self(val) - } - - /// Get the accurate encoded value. - pub fn into_scaled(self) -> u32 { - self.0 - } - - /// Get the unscaled value as a floating point. - pub fn into_value(self) -> f32 { - Self::reverse(self.0) - } - - pub(crate) fn encode_gama<W: Write>(self, w: &mut W) -> encoder::Result<()> { - encoder::write_chunk(w, chunk::gAMA, &self.into_scaled().to_be_bytes()) - } -} - -/// Chromaticities of the color space primaries -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct SourceChromaticities { - pub white: (ScaledFloat, ScaledFloat), - pub red: (ScaledFloat, ScaledFloat), - pub green: (ScaledFloat, ScaledFloat), - pub blue: (ScaledFloat, ScaledFloat), -} - -impl SourceChromaticities { - pub fn new(white: (f32, f32), red: (f32, f32), green: (f32, f32), blue: (f32, f32)) -> Self { - SourceChromaticities { - white: (ScaledFloat::new(white.0), ScaledFloat::new(white.1)), - red: (ScaledFloat::new(red.0), ScaledFloat::new(red.1)), - green: (ScaledFloat::new(green.0), ScaledFloat::new(green.1)), - blue: (ScaledFloat::new(blue.0), ScaledFloat::new(blue.1)), - } - } - - #[rustfmt::skip] - pub fn to_be_bytes(self) -> [u8; 32] { - let white_x = self.white.0.into_scaled().to_be_bytes(); - let white_y = self.white.1.into_scaled().to_be_bytes(); - let red_x = self.red.0.into_scaled().to_be_bytes(); - let red_y = self.red.1.into_scaled().to_be_bytes(); - let green_x = self.green.0.into_scaled().to_be_bytes(); - let green_y = self.green.1.into_scaled().to_be_bytes(); - let blue_x = self.blue.0.into_scaled().to_be_bytes(); - let blue_y = self.blue.1.into_scaled().to_be_bytes(); - [ - white_x[0], white_x[1], white_x[2], white_x[3], - white_y[0], white_y[1], white_y[2], white_y[3], - red_x[0], red_x[1], red_x[2], red_x[3], - red_y[0], red_y[1], red_y[2], red_y[3], - green_x[0], green_x[1], green_x[2], green_x[3], - green_y[0], green_y[1], green_y[2], green_y[3], - blue_x[0], blue_x[1], blue_x[2], blue_x[3], - blue_y[0], blue_y[1], blue_y[2], blue_y[3], - ] - } - - pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> { - encoder::write_chunk(w, chunk::cHRM, &self.to_be_bytes()) - } -} - -/// The rendering intent for an sRGB image. -/// -/// Presence of this data also indicates that the image conforms to the sRGB color space. -#[repr(u8)] -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum SrgbRenderingIntent { - /// For images preferring good adaptation to the output device gamut at the expense of colorimetric accuracy, such as photographs. - Perceptual = 0, - /// For images requiring colour appearance matching (relative to the output device white point), such as logos. - RelativeColorimetric = 1, - /// For images preferring preservation of saturation at the expense of hue and lightness, such as charts and graphs. - Saturation = 2, - /// For images requiring preservation of absolute colorimetry, such as previews of images destined for a different output device (proofs). - AbsoluteColorimetric = 3, -} - -impl SrgbRenderingIntent { - pub(crate) fn into_raw(self) -> u8 { - self as u8 - } - - pub(crate) fn from_raw(raw: u8) -> Option<Self> { - match raw { - 0 => Some(SrgbRenderingIntent::Perceptual), - 1 => Some(SrgbRenderingIntent::RelativeColorimetric), - 2 => Some(SrgbRenderingIntent::Saturation), - 3 => Some(SrgbRenderingIntent::AbsoluteColorimetric), - _ => None, - } - } - - pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> { - encoder::write_chunk(w, chunk::sRGB, &[self.into_raw()]) - } -} - -/// PNG info struct -#[derive(Clone, Debug)] -#[non_exhaustive] -pub struct Info<'a> { - pub width: u32, - pub height: u32, - pub bit_depth: BitDepth, - /// How colors are stored in the image. - pub color_type: ColorType, - pub interlaced: bool, - /// The image's `tRNS` chunk, if present; contains the alpha channel of the image's palette, 1 byte per entry. - pub trns: Option<Cow<'a, [u8]>>, - pub pixel_dims: Option<PixelDimensions>, - /// The image's `PLTE` chunk, if present; contains the RGB channels (in that order) of the image's palettes, 3 bytes per entry (1 per channel). - pub palette: Option<Cow<'a, [u8]>>, - /// The contents of the image's gAMA chunk, if present. - /// Prefer `source_gamma` to also get the derived replacement gamma from sRGB chunks. - pub gama_chunk: Option<ScaledFloat>, - /// The contents of the image's `cHRM` chunk, if present. - /// Prefer `source_chromaticities` to also get the derived replacements from sRGB chunks. - pub chrm_chunk: Option<SourceChromaticities>, - - pub frame_control: Option<FrameControl>, - pub animation_control: Option<AnimationControl>, - pub compression: Compression, - /// Gamma of the source system. - /// Set by both `gAMA` as well as to a replacement by `sRGB` chunk. - pub source_gamma: Option<ScaledFloat>, - /// Chromaticities of the source system. - /// Set by both `cHRM` as well as to a replacement by `sRGB` chunk. - pub source_chromaticities: Option<SourceChromaticities>, - /// The rendering intent of an SRGB image. - /// - /// Presence of this value also indicates that the image conforms to the SRGB color space. - pub srgb: Option<SrgbRenderingIntent>, - /// The ICC profile for the image. - pub icc_profile: Option<Cow<'a, [u8]>>, - /// tEXt field - pub uncompressed_latin1_text: Vec<TEXtChunk>, - /// zTXt field - pub compressed_latin1_text: Vec<ZTXtChunk>, - /// iTXt field - pub utf8_text: Vec<ITXtChunk>, -} - -impl Default for Info<'_> { - fn default() -> Info<'static> { - Info { - width: 0, - height: 0, - bit_depth: BitDepth::Eight, - color_type: ColorType::Grayscale, - interlaced: false, - palette: None, - trns: None, - gama_chunk: None, - chrm_chunk: None, - pixel_dims: None, - frame_control: None, - animation_control: None, - // Default to `deflate::Compression::Fast` and `filter::FilterType::Sub` - // to maintain backward compatible output. - compression: Compression::Fast, - source_gamma: None, - source_chromaticities: None, - srgb: None, - icc_profile: None, - uncompressed_latin1_text: Vec::new(), - compressed_latin1_text: Vec::new(), - utf8_text: Vec::new(), - } - } -} - -impl Info<'_> { - /// A utility constructor for a default info with width and height. - pub fn with_size(width: u32, height: u32) -> Self { - Info { - width, - height, - ..Default::default() - } - } - - /// Size of the image, width then height. - pub fn size(&self) -> (u32, u32) { - (self.width, self.height) - } - - /// Returns true if the image is an APNG image. - pub fn is_animated(&self) -> bool { - self.frame_control.is_some() && self.animation_control.is_some() - } - - /// Returns the frame control information of the image. - pub fn animation_control(&self) -> Option<&AnimationControl> { - self.animation_control.as_ref() - } - - /// Returns the frame control information of the current frame - pub fn frame_control(&self) -> Option<&FrameControl> { - self.frame_control.as_ref() - } - - /// Returns the number of bits per pixel. - pub fn bits_per_pixel(&self) -> usize { - self.color_type.samples() * self.bit_depth as usize - } - - /// Returns the number of bytes per pixel. - pub fn bytes_per_pixel(&self) -> usize { - // If adjusting this for expansion or other transformation passes, remember to keep the old - // implementation for bpp_in_prediction, which is internal to the png specification. - self.color_type.samples() * ((self.bit_depth as usize + 7) >> 3) - } - - /// Return the number of bytes for this pixel used in prediction. - /// - /// Some filters use prediction, over the raw bytes of a scanline. Where a previous pixel is - /// require for such forms the specification instead references previous bytes. That is, for - /// a gray pixel of bit depth 2, the pixel used in prediction is actually 4 pixels prior. This - /// has the consequence that the number of possible values is rather small. To make this fact - /// more obvious in the type system and the optimizer we use an explicit enum here. - pub(crate) fn bpp_in_prediction(&self) -> BytesPerPixel { - match self.bytes_per_pixel() { - 1 => BytesPerPixel::One, - 2 => BytesPerPixel::Two, - 3 => BytesPerPixel::Three, - 4 => BytesPerPixel::Four, - 6 => BytesPerPixel::Six, // Only rgb×16bit - 8 => BytesPerPixel::Eight, // Only rgba×16bit - _ => unreachable!("Not a possible byte rounded pixel width"), - } - } - - /// Returns the number of bytes needed for one deinterlaced image. - pub fn raw_bytes(&self) -> usize { - self.height as usize * self.raw_row_length() - } - - /// Returns the number of bytes needed for one deinterlaced row. - pub fn raw_row_length(&self) -> usize { - self.raw_row_length_from_width(self.width) - } - - pub(crate) fn checked_raw_row_length(&self) -> Option<usize> { - self.color_type - .checked_raw_row_length(self.bit_depth, self.width) - } - - /// Returns the number of bytes needed for one deinterlaced row of width `width`. - pub fn raw_row_length_from_width(&self, width: u32) -> usize { - self.color_type - .raw_row_length_from_width(self.bit_depth, width) - } - - /// Encode this header to the writer. - /// - /// Note that this does _not_ include the PNG signature, it starts with the IHDR chunk and then - /// includes other chunks that were added to the header. - pub fn encode<W: Write>(&self, mut w: W) -> encoder::Result<()> { - // Encode the IHDR chunk - let mut data = [0; 13]; - data[..4].copy_from_slice(&self.width.to_be_bytes()); - data[4..8].copy_from_slice(&self.height.to_be_bytes()); - data[8] = self.bit_depth as u8; - data[9] = self.color_type as u8; - data[12] = self.interlaced as u8; - encoder::write_chunk(&mut w, chunk::IHDR, &data)?; - // Encode the pHYs chunk - if let Some(pd) = self.pixel_dims { - let mut phys_data = [0; 9]; - phys_data[0..4].copy_from_slice(&pd.xppu.to_be_bytes()); - phys_data[4..8].copy_from_slice(&pd.yppu.to_be_bytes()); - match pd.unit { - Unit::Meter => phys_data[8] = 1, - Unit::Unspecified => phys_data[8] = 0, - } - encoder::write_chunk(&mut w, chunk::pHYs, &phys_data)?; - } - - if let Some(p) = &self.palette { - encoder::write_chunk(&mut w, chunk::PLTE, p)?; - }; - - if let Some(t) = &self.trns { - encoder::write_chunk(&mut w, chunk::tRNS, t)?; - } - - // If specified, the sRGB information overrides the source gamma and chromaticities. - if let Some(srgb) = &self.srgb { - let gamma = crate::srgb::substitute_gamma(); - let chromaticities = crate::srgb::substitute_chromaticities(); - srgb.encode(&mut w)?; - gamma.encode_gama(&mut w)?; - chromaticities.encode(&mut w)?; - } else { - if let Some(gma) = self.source_gamma { - gma.encode_gama(&mut w)? - } - if let Some(chrms) = self.source_chromaticities { - chrms.encode(&mut w)?; - } - } - if let Some(actl) = self.animation_control { - actl.encode(&mut w)?; - } - - for text_chunk in &self.uncompressed_latin1_text { - text_chunk.encode(&mut w)?; - } - - for text_chunk in &self.compressed_latin1_text { - text_chunk.encode(&mut w)?; - } - - for text_chunk in &self.utf8_text { - text_chunk.encode(&mut w)?; - } - - Ok(()) - } -} - -impl BytesPerPixel { - pub(crate) fn into_usize(self) -> usize { - self as usize - } -} - -bitflags! { - /// Output transformations - /// - /// Many flags from libpng are not yet supported. A PR discussing/adding them would be nice. - /// - #[doc = " - ```c - /// Discard the alpha channel - const STRIP_ALPHA = 0x0002; // read only - /// Expand 1; 2 and 4-bit samples to bytes - const PACKING = 0x0004; // read and write - /// Change order of packed pixels to LSB first - const PACKSWAP = 0x0008; // read and write - /// Invert monochrome images - const INVERT_MONO = 0x0020; // read and write - /// Normalize pixels to the sBIT depth - const SHIFT = 0x0040; // read and write - /// Flip RGB to BGR; RGBA to BGRA - const BGR = 0x0080; // read and write - /// Flip RGBA to ARGB or GA to AG - const SWAP_ALPHA = 0x0100; // read and write - /// Byte-swap 16-bit samples - const SWAP_ENDIAN = 0x0200; // read and write - /// Change alpha from opacity to transparency - const INVERT_ALPHA = 0x0400; // read and write - const STRIP_FILLER = 0x0800; // write only - const STRIP_FILLER_BEFORE = 0x0800; // write only - const STRIP_FILLER_AFTER = 0x1000; // write only - const GRAY_TO_RGB = 0x2000; // read only - const EXPAND_16 = 0x4000; // read only - /// Similar to STRIP_16 but in libpng considering gamma? - /// Not entirely sure the documentation says it is more - /// accurate but doesn't say precisely how. - const SCALE_16 = 0x8000; // read only - ``` - "] - pub struct Transformations: u32 { - /// No transformation - const IDENTITY = 0x00000; // read and write */ - /// Strip 16-bit samples to 8 bits - const STRIP_16 = 0x00001; // read only */ - /// Expand paletted images to RGB; expand grayscale images of - /// less than 8-bit depth to 8-bit depth; and expand tRNS chunks - /// to alpha channels. - const EXPAND = 0x00010; // read only */ - /// Expand paletted images to include an alpha channel. Implies `EXPAND`. - const ALPHA = 0x10000; // read only */ - } -} - -impl Transformations { - /// Transform every input to 8bit grayscale or color. - /// - /// This sets `EXPAND` and `STRIP_16` which is similar to the default transformation used by - /// this library prior to `0.17`. - pub fn normalize_to_color8() -> Transformations { - Transformations::EXPAND | Transformations::STRIP_16 - } -} - -/// Instantiate the default transformations, the identity transform. -impl Default for Transformations { - fn default() -> Transformations { - Transformations::IDENTITY - } -} - -#[derive(Debug)] -pub struct ParameterError { - inner: ParameterErrorKind, -} - -#[derive(Debug)] -pub(crate) enum ParameterErrorKind { - /// A provided buffer must be have the exact size to hold the image data. Where the buffer can - /// be allocated by the caller, they must ensure that it has a minimum size as hinted previously. - /// Even though the size is calculated from image data, this does counts as a parameter error - /// because they must react to a value produced by this library, which can have been subjected - /// to limits. - ImageBufferSize { expected: usize, actual: usize }, - /// A bit like return `None` from an iterator. - /// We use it to differentiate between failing to seek to the next image in a sequence and the - /// absence of a next image. This is an error of the caller because they should have checked - /// the number of images by inspecting the header data returned when opening the image. This - /// library will perform the checks necessary to ensure that data was accurate or error with a - /// format error otherwise. - PolledAfterEndOfImage, -} - -impl From<ParameterErrorKind> for ParameterError { - fn from(inner: ParameterErrorKind) -> Self { - ParameterError { inner } - } -} - -impl fmt::Display for ParameterError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use ParameterErrorKind::*; - match self.inner { - ImageBufferSize { expected, actual } => { - write!(fmt, "wrong data size, expected {} got {}", expected, actual) - } - PolledAfterEndOfImage => write!(fmt, "End of image has been reached"), - } - } -} diff --git a/vendor/png/src/decoder/mod.rs b/vendor/png/src/decoder/mod.rs deleted file mode 100644 index 09772fe..0000000 --- a/vendor/png/src/decoder/mod.rs +++ /dev/null @@ -1,961 +0,0 @@ -mod stream; -mod zlib; - -pub use self::stream::{DecodeOptions, Decoded, DecodingError, StreamingDecoder}; -use self::stream::{FormatErrorInner, CHUNCK_BUFFER_SIZE}; - -use std::io::{BufRead, BufReader, Read}; -use std::mem; -use std::ops::Range; - -use crate::chunk; -use crate::common::{ - BitDepth, BytesPerPixel, ColorType, Info, ParameterErrorKind, Transformations, -}; -use crate::filter::{unfilter, FilterType}; -use crate::utils; - -/* -pub enum InterlaceHandling { - /// Outputs the raw rows - RawRows, - /// Fill missing the pixels from the existing ones - Rectangle, - /// Only fill the needed pixels - Sparkle -} -*/ - -/// Output info. -/// -/// This describes one particular frame of the image that was written into the output buffer. -#[derive(Debug, PartialEq, Eq)] -pub struct OutputInfo { - /// The pixel width of this frame. - pub width: u32, - /// The pixel height of this frame. - pub height: u32, - /// The chosen output color type. - pub color_type: ColorType, - /// The chosen output bit depth. - pub bit_depth: BitDepth, - /// The byte count of each scan line in the image. - pub line_size: usize, -} - -impl OutputInfo { - /// Returns the size needed to hold a decoded frame - /// If the output buffer was larger then bytes after this count should be ignored. They may - /// still have been changed. - pub fn buffer_size(&self) -> usize { - self.line_size * self.height as usize - } -} - -#[derive(Clone, Copy, Debug)] -/// Limits on the resources the `Decoder` is allowed too use -pub struct Limits { - /// maximum number of bytes the decoder is allowed to allocate, default is 64Mib - pub bytes: usize, -} - -impl Default for Limits { - fn default() -> Limits { - Limits { - bytes: 1024 * 1024 * 64, - } - } -} - -/// PNG Decoder -pub struct Decoder<R: Read> { - read_decoder: ReadDecoder<R>, - /// Output transformations - transform: Transformations, - /// Limits on resources the Decoder is allowed to use - limits: Limits, -} - -/// A row of data with interlace information attached. -#[derive(Clone, Copy, Debug)] -pub struct InterlacedRow<'data> { - data: &'data [u8], - interlace: InterlaceInfo, -} - -impl<'data> InterlacedRow<'data> { - pub fn data(&self) -> &'data [u8] { - self.data - } - - pub fn interlace(&self) -> InterlaceInfo { - self.interlace - } -} - -/// PNG (2003) specifies two interlace modes, but reserves future extensions. -#[derive(Clone, Copy, Debug)] -pub enum InterlaceInfo { - /// the null method means no interlacing - Null, - /// Adam7 derives its name from doing 7 passes over the image, only decoding a subset of all pixels in each pass. - /// The following table shows pictorially what parts of each 8x8 area of the image is found in each pass: - /// - /// 1 6 4 6 2 6 4 6 - /// 7 7 7 7 7 7 7 7 - /// 5 6 5 6 5 6 5 6 - /// 7 7 7 7 7 7 7 7 - /// 3 6 4 6 3 6 4 6 - /// 7 7 7 7 7 7 7 7 - /// 5 6 5 6 5 6 5 6 - /// 7 7 7 7 7 7 7 7 - Adam7 { pass: u8, line: u32, width: u32 }, -} - -/// A row of data without interlace information. -#[derive(Clone, Copy, Debug)] -pub struct Row<'data> { - data: &'data [u8], -} - -impl<'data> Row<'data> { - pub fn data(&self) -> &'data [u8] { - self.data - } -} - -impl<R: Read> Decoder<R> { - /// Create a new decoder configuration with default limits. - pub fn new(r: R) -> Decoder<R> { - Decoder::new_with_limits(r, Limits::default()) - } - - /// Create a new decoder configuration with custom limits. - pub fn new_with_limits(r: R, limits: Limits) -> Decoder<R> { - Decoder { - read_decoder: ReadDecoder { - reader: BufReader::with_capacity(CHUNCK_BUFFER_SIZE, r), - decoder: StreamingDecoder::new(), - at_eof: false, - }, - transform: Transformations::IDENTITY, - limits, - } - } - - /// Create a new decoder configuration with custom `DecodeOptions`. - pub fn new_with_options(r: R, decode_options: DecodeOptions) -> Decoder<R> { - Decoder { - read_decoder: ReadDecoder { - reader: BufReader::with_capacity(CHUNCK_BUFFER_SIZE, r), - decoder: StreamingDecoder::new_with_options(decode_options), - at_eof: false, - }, - transform: Transformations::IDENTITY, - limits: Limits::default(), - } - } - - /// Limit resource usage. - /// - /// Note that your allocations, e.g. when reading into a pre-allocated buffer, are __NOT__ - /// considered part of the limits. Nevertheless, required intermediate buffers such as for - /// singular lines is checked against the limit. - /// - /// Note that this is a best-effort basis. - /// - /// ``` - /// use std::fs::File; - /// use png::{Decoder, Limits}; - /// // This image is 32×32, 1bit per pixel. The reader buffers one row which requires 4 bytes. - /// let mut limits = Limits::default(); - /// limits.bytes = 3; - /// let mut decoder = Decoder::new_with_limits(File::open("tests/pngsuite/basi0g01.png").unwrap(), limits); - /// assert!(decoder.read_info().is_err()); - /// - /// // This image is 32x32 pixels, so the decoder will allocate less than 10Kib - /// let mut limits = Limits::default(); - /// limits.bytes = 10*1024; - /// let mut decoder = Decoder::new_with_limits(File::open("tests/pngsuite/basi0g01.png").unwrap(), limits); - /// assert!(decoder.read_info().is_ok()); - /// ``` - pub fn set_limits(&mut self, limits: Limits) { - self.limits = limits; - } - - /// Read the PNG header and return the information contained within. - /// - /// Most image metadata will not be read until `read_info` is called, so those fields will be - /// None or empty. - pub fn read_header_info(&mut self) -> Result<&Info, DecodingError> { - let mut buf = Vec::new(); - while self.read_decoder.info().is_none() { - buf.clear(); - if self.read_decoder.decode_next(&mut buf)?.is_none() { - return Err(DecodingError::Format( - FormatErrorInner::UnexpectedEof.into(), - )); - } - } - Ok(self.read_decoder.info().unwrap()) - } - - /// Reads all meta data until the first IDAT chunk - pub fn read_info(mut self) -> Result<Reader<R>, DecodingError> { - self.read_header_info()?; - - let mut reader = Reader { - decoder: self.read_decoder, - bpp: BytesPerPixel::One, - subframe: SubframeInfo::not_yet_init(), - fctl_read: 0, - next_frame: SubframeIdx::Initial, - prev: Vec::new(), - current: Vec::new(), - scan_start: 0, - transform: self.transform, - scratch_buffer: Vec::new(), - limits: self.limits, - }; - - // Check if the decoding buffer of a single raw line has a valid size. - if reader.info().checked_raw_row_length().is_none() { - return Err(DecodingError::LimitsExceeded); - } - - // Check if the output buffer has a valid size. - let (width, height) = reader.info().size(); - let (color, depth) = reader.output_color_type(); - let rowlen = color - .checked_raw_row_length(depth, width) - .ok_or(DecodingError::LimitsExceeded)? - - 1; - let height: usize = - std::convert::TryFrom::try_from(height).map_err(|_| DecodingError::LimitsExceeded)?; - if rowlen.checked_mul(height).is_none() { - return Err(DecodingError::LimitsExceeded); - } - - reader.read_until_image_data()?; - Ok(reader) - } - - /// Set the allowed and performed transformations. - /// - /// A transformation is a pre-processing on the raw image data modifying content or encoding. - /// Many options have an impact on memory or CPU usage during decoding. - pub fn set_transformations(&mut self, transform: Transformations) { - self.transform = transform; - } - - /// Set the decoder to ignore all text chunks while parsing. - /// - /// eg. - /// ``` - /// use std::fs::File; - /// use png::Decoder; - /// let mut decoder = Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap()); - /// decoder.set_ignore_text_chunk(true); - /// assert!(decoder.read_info().is_ok()); - /// ``` - pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) { - self.read_decoder - .decoder - .set_ignore_text_chunk(ignore_text_chunk); - } - - /// Set the decoder to ignore and not verify the Adler-32 checksum - /// and CRC code. - pub fn ignore_checksums(&mut self, ignore_checksums: bool) { - self.read_decoder - .decoder - .set_ignore_adler32(ignore_checksums); - self.read_decoder.decoder.set_ignore_crc(ignore_checksums); - } -} - -struct ReadDecoder<R: Read> { - reader: BufReader<R>, - decoder: StreamingDecoder, - at_eof: bool, -} - -impl<R: Read> ReadDecoder<R> { - /// Returns the next decoded chunk. If the chunk is an ImageData chunk, its contents are written - /// into image_data. - fn decode_next(&mut self, image_data: &mut Vec<u8>) -> Result<Option<Decoded>, DecodingError> { - while !self.at_eof { - let (consumed, result) = { - let buf = self.reader.fill_buf()?; - if buf.is_empty() { - return Err(DecodingError::Format( - FormatErrorInner::UnexpectedEof.into(), - )); - } - self.decoder.update(buf, image_data)? - }; - self.reader.consume(consumed); - match result { - Decoded::Nothing => (), - Decoded::ImageEnd => self.at_eof = true, - result => return Ok(Some(result)), - } - } - Ok(None) - } - - fn finish_decoding(&mut self) -> Result<(), DecodingError> { - while !self.at_eof { - let buf = self.reader.fill_buf()?; - if buf.is_empty() { - return Err(DecodingError::Format( - FormatErrorInner::UnexpectedEof.into(), - )); - } - let (consumed, event) = self.decoder.update(buf, &mut vec![])?; - self.reader.consume(consumed); - match event { - Decoded::Nothing => (), - Decoded::ImageEnd => self.at_eof = true, - // ignore more data - Decoded::ChunkComplete(_, _) | Decoded::ChunkBegin(_, _) | Decoded::ImageData => {} - Decoded::ImageDataFlushed => return Ok(()), - Decoded::PartialChunk(_) => {} - new => unreachable!("{:?}", new), - } - } - - Err(DecodingError::Format( - FormatErrorInner::UnexpectedEof.into(), - )) - } - - fn info(&self) -> Option<&Info> { - self.decoder.info.as_ref() - } -} - -/// PNG reader (mostly high-level interface) -/// -/// Provides a high level that iterates over lines or whole images. -pub struct Reader<R: Read> { - decoder: ReadDecoder<R>, - bpp: BytesPerPixel, - subframe: SubframeInfo, - /// Number of frame control chunks read. - /// By the APNG specification the total number must equal the count specified in the animation - /// control chunk. The IDAT image _may_ have such a chunk applying to it. - fctl_read: u32, - next_frame: SubframeIdx, - /// Previous raw line - prev: Vec<u8>, - /// Current raw line - current: Vec<u8>, - /// Start index of the current scan line. - scan_start: usize, - /// Output transformations - transform: Transformations, - /// This buffer is only used so that `next_row` and `next_interlaced_row` can return reference - /// to a byte slice. In a future version of this library, this buffer will be removed and - /// `next_row` and `next_interlaced_row` will write directly into a user provided output buffer. - scratch_buffer: Vec<u8>, - /// How resources we can spend (for example, on allocation). - limits: Limits, -} - -/// The subframe specific information. -/// -/// In APNG the frames are constructed by combining previous frame and a new subframe (through a -/// combination of `dispose_op` and `overlay_op`). These sub frames specify individual dimension -/// information and reuse the global interlace options. This struct encapsulates the state of where -/// in a particular IDAT-frame or subframe we are. -struct SubframeInfo { - width: u32, - height: u32, - rowlen: usize, - interlace: InterlaceIter, - consumed_and_flushed: bool, -} - -#[derive(Clone)] -enum InterlaceIter { - None(Range<u32>), - Adam7(utils::Adam7Iterator), -} - -/// Denote a frame as given by sequence numbers. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -enum SubframeIdx { - /// The initial frame in an IDAT chunk without fcTL chunk applying to it. - /// Note that this variant precedes `Some` as IDAT frames precede fdAT frames and all fdAT - /// frames must have a fcTL applying to it. - Initial, - /// An IDAT frame with fcTL or an fdAT frame. - Some(u32), - /// The past-the-end index. - End, -} - -impl<R: Read> Reader<R> { - /// Reads all meta data until the next frame data starts. - /// Requires IHDR before the IDAT and fcTL before fdAT. - fn read_until_image_data(&mut self) -> Result<(), DecodingError> { - loop { - // This is somewhat ugly. The API requires us to pass a buffer to decode_next but we - // know that we will stop before reading any image data from the stream. Thus pass an - // empty buffer and assert that remains empty. - let mut buf = Vec::new(); - let state = self.decoder.decode_next(&mut buf)?; - assert!(buf.is_empty()); - - match state { - Some(Decoded::ChunkBegin(_, chunk::IDAT)) - | Some(Decoded::ChunkBegin(_, chunk::fdAT)) => break, - Some(Decoded::FrameControl(_)) => { - self.subframe = SubframeInfo::new(self.info()); - // The next frame is the one to which this chunk applies. - self.next_frame = SubframeIdx::Some(self.fctl_read); - // TODO: what about overflow here? That would imply there are more fctl chunks - // than can be specified in the animation control but also that we have read - // several gigabytes of data. - self.fctl_read += 1; - } - None => { - return Err(DecodingError::Format( - FormatErrorInner::MissingImageData.into(), - )) - } - // Ignore all other chunk events. Any other chunk may be between IDAT chunks, fdAT - // chunks and their control chunks. - _ => {} - } - } - - let info = self - .decoder - .info() - .ok_or(DecodingError::Format(FormatErrorInner::MissingIhdr.into()))?; - self.bpp = info.bpp_in_prediction(); - self.subframe = SubframeInfo::new(info); - - // Allocate output buffer. - let buflen = self.output_line_size(self.subframe.width); - if buflen > self.limits.bytes { - return Err(DecodingError::LimitsExceeded); - } - - self.prev.clear(); - self.prev.resize(self.subframe.rowlen, 0); - - Ok(()) - } - - /// Get information on the image. - /// - /// The structure will change as new frames of an animated image are decoded. - pub fn info(&self) -> &Info { - self.decoder.info().unwrap() - } - - /// Decodes the next frame into `buf`. - /// - /// Note that this decodes raw subframes that need to be mixed according to blend-op and - /// dispose-op by the caller. - /// - /// The caller must always provide a buffer large enough to hold a complete frame (the APNG - /// specification restricts subframes to the dimensions given in the image header). The region - /// that has been written be checked afterwards by calling `info` after a successful call and - /// inspecting the `frame_control` data. This requirement may be lifted in a later version of - /// `png`. - /// - /// Output lines will be written in row-major, packed matrix with width and height of the read - /// frame (or subframe), all samples are in big endian byte order where this matters. - pub fn next_frame(&mut self, buf: &mut [u8]) -> Result<OutputInfo, DecodingError> { - let subframe_idx = match self.decoder.info().unwrap().frame_control() { - None => SubframeIdx::Initial, - Some(_) => SubframeIdx::Some(self.fctl_read - 1), - }; - - if self.next_frame == SubframeIdx::End { - return Err(DecodingError::Parameter( - ParameterErrorKind::PolledAfterEndOfImage.into(), - )); - } else if self.next_frame != subframe_idx { - // Advance until we've read the info / fcTL for this frame. - self.read_until_image_data()?; - } - - if buf.len() < self.output_buffer_size() { - return Err(DecodingError::Parameter( - ParameterErrorKind::ImageBufferSize { - expected: buf.len(), - actual: self.output_buffer_size(), - } - .into(), - )); - } - - let (color_type, bit_depth) = self.output_color_type(); - let output_info = OutputInfo { - width: self.subframe.width, - height: self.subframe.height, - color_type, - bit_depth, - line_size: self.output_line_size(self.subframe.width), - }; - - self.current.clear(); - self.scan_start = 0; - let width = self.info().width; - if self.info().interlaced { - while let Some(InterlacedRow { - data: row, - interlace, - .. - }) = self.next_interlaced_row()? - { - let (line, pass) = match interlace { - InterlaceInfo::Adam7 { line, pass, .. } => (line, pass), - InterlaceInfo::Null => unreachable!("expected interlace information"), - }; - let samples = color_type.samples() as u8; - utils::expand_pass(buf, width, row, pass, line, samples * (bit_depth as u8)); - } - } else { - for row in buf - .chunks_exact_mut(output_info.line_size) - .take(self.subframe.height as usize) - { - self.next_interlaced_row_impl(self.subframe.rowlen, row)?; - } - } - - // Advance over the rest of data for this (sub-)frame. - if !self.subframe.consumed_and_flushed { - self.decoder.finish_decoding()?; - } - - // Advance our state to expect the next frame. - let past_end_subframe = self - .info() - .animation_control() - .map(|ac| ac.num_frames) - .unwrap_or(0); - self.next_frame = match self.next_frame { - SubframeIdx::End => unreachable!("Next frame called when already at image end"), - // Reached the end of non-animated image. - SubframeIdx::Initial if past_end_subframe == 0 => SubframeIdx::End, - // An animated image, expecting first subframe. - SubframeIdx::Initial => SubframeIdx::Some(0), - // This was the last subframe, slightly fuzzy condition in case of programmer error. - SubframeIdx::Some(idx) if past_end_subframe <= idx + 1 => SubframeIdx::End, - // Expecting next subframe. - SubframeIdx::Some(idx) => SubframeIdx::Some(idx + 1), - }; - - Ok(output_info) - } - - /// Returns the next processed row of the image - pub fn next_row(&mut self) -> Result<Option<Row>, DecodingError> { - self.next_interlaced_row() - .map(|v| v.map(|v| Row { data: v.data })) - } - - /// Returns the next processed row of the image - pub fn next_interlaced_row(&mut self) -> Result<Option<InterlacedRow>, DecodingError> { - let (rowlen, interlace) = match self.next_pass() { - Some((rowlen, interlace)) => (rowlen, interlace), - None => return Ok(None), - }; - - let width = if let InterlaceInfo::Adam7 { width, .. } = interlace { - width - } else { - self.subframe.width - }; - let output_line_size = self.output_line_size(width); - - // TODO: change the interface of `next_interlaced_row` to take an output buffer instead of - // making us return a reference to a buffer that we own. - let mut output_buffer = mem::take(&mut self.scratch_buffer); - output_buffer.resize(output_line_size, 0u8); - let ret = self.next_interlaced_row_impl(rowlen, &mut output_buffer); - self.scratch_buffer = output_buffer; - ret?; - - Ok(Some(InterlacedRow { - data: &self.scratch_buffer[..output_line_size], - interlace, - })) - } - - /// Fetch the next interlaced row and filter it according to our own transformations. - fn next_interlaced_row_impl( - &mut self, - rowlen: usize, - output_buffer: &mut [u8], - ) -> Result<(), DecodingError> { - self.next_raw_interlaced_row(rowlen)?; - let row = &self.prev[1..rowlen]; - - // Apply transformations and write resulting data to buffer. - let (color_type, bit_depth, trns) = { - let info = self.info(); - ( - info.color_type, - info.bit_depth as u8, - info.trns.is_some() || self.transform.contains(Transformations::ALPHA), - ) - }; - let expand = self.transform.contains(Transformations::EXPAND) - || self.transform.contains(Transformations::ALPHA); - let strip16 = bit_depth == 16 && self.transform.contains(Transformations::STRIP_16); - let info = self.decoder.info().unwrap(); - let trns = if trns { - Some(info.trns.as_deref()) - } else { - None - }; - match (color_type, trns) { - (ColorType::Indexed, _) if expand => { - output_buffer[..row.len()].copy_from_slice(row); - expand_paletted(output_buffer, info, trns)?; - } - (ColorType::Grayscale | ColorType::GrayscaleAlpha, _) if bit_depth < 8 && expand => { - output_buffer[..row.len()].copy_from_slice(row); - expand_gray_u8(output_buffer, info, trns) - } - (ColorType::Grayscale | ColorType::Rgb, Some(trns)) if expand => { - let channels = color_type.samples(); - if bit_depth == 8 { - utils::expand_trns_line(row, output_buffer, trns, channels); - } else if strip16 { - utils::expand_trns_and_strip_line16(row, output_buffer, trns, channels); - } else { - assert_eq!(bit_depth, 16); - utils::expand_trns_line16(row, output_buffer, trns, channels); - } - } - ( - ColorType::Grayscale | ColorType::GrayscaleAlpha | ColorType::Rgb | ColorType::Rgba, - _, - ) if strip16 => { - for i in 0..row.len() / 2 { - output_buffer[i] = row[2 * i]; - } - } - _ => output_buffer.copy_from_slice(row), - } - - Ok(()) - } - - /// Returns the color type and the number of bits per sample - /// of the data returned by `Reader::next_row` and Reader::frames`. - pub fn output_color_type(&self) -> (ColorType, BitDepth) { - use crate::common::ColorType::*; - let t = self.transform; - let info = self.info(); - if t == Transformations::IDENTITY { - (info.color_type, info.bit_depth) - } else { - let bits = match info.bit_depth as u8 { - 16 if t.intersects(Transformations::STRIP_16) => 8, - n if n < 8 - && (t.contains(Transformations::EXPAND) - || t.contains(Transformations::ALPHA)) => - { - 8 - } - n => n, - }; - let color_type = - if t.contains(Transformations::EXPAND) || t.contains(Transformations::ALPHA) { - let has_trns = info.trns.is_some() || t.contains(Transformations::ALPHA); - match info.color_type { - Grayscale if has_trns => GrayscaleAlpha, - Rgb if has_trns => Rgba, - Indexed if has_trns => Rgba, - Indexed => Rgb, - ct => ct, - } - } else { - info.color_type - }; - (color_type, BitDepth::from_u8(bits).unwrap()) - } - } - - /// Returns the number of bytes required to hold a deinterlaced image frame - /// that is decoded using the given input transformations. - pub fn output_buffer_size(&self) -> usize { - let (width, height) = self.info().size(); - let size = self.output_line_size(width); - size * height as usize - } - - /// Returns the number of bytes required to hold a deinterlaced row. - pub fn output_line_size(&self, width: u32) -> usize { - let (color, depth) = self.output_color_type(); - color.raw_row_length_from_width(depth, width) - 1 - } - - fn next_pass(&mut self) -> Option<(usize, InterlaceInfo)> { - match self.subframe.interlace { - InterlaceIter::Adam7(ref mut adam7) => { - let last_pass = adam7.current_pass(); - let (pass, line, width) = adam7.next()?; - let rowlen = self.info().raw_row_length_from_width(width); - if last_pass != pass { - self.prev.clear(); - self.prev.resize(rowlen, 0u8); - } - Some((rowlen, InterlaceInfo::Adam7 { pass, line, width })) - } - InterlaceIter::None(ref mut height) => { - let _ = height.next()?; - Some((self.subframe.rowlen, InterlaceInfo::Null)) - } - } - } - - /// Write the next raw interlaced row into `self.prev`. - /// - /// The scanline is filtered against the previous scanline according to the specification. - fn next_raw_interlaced_row(&mut self, rowlen: usize) -> Result<(), DecodingError> { - // Read image data until we have at least one full row (but possibly more than one). - while self.current.len() - self.scan_start < rowlen { - if self.subframe.consumed_and_flushed { - return Err(DecodingError::Format( - FormatErrorInner::NoMoreImageData.into(), - )); - } - - // Clear the current buffer before appending more data. - if self.scan_start > 0 { - self.current.drain(..self.scan_start).for_each(drop); - self.scan_start = 0; - } - - match self.decoder.decode_next(&mut self.current)? { - Some(Decoded::ImageData) => {} - Some(Decoded::ImageDataFlushed) => { - self.subframe.consumed_and_flushed = true; - } - None => { - return Err(DecodingError::Format( - if self.current.is_empty() { - FormatErrorInner::NoMoreImageData - } else { - FormatErrorInner::UnexpectedEndOfChunk - } - .into(), - )); - } - _ => (), - } - } - - // Get a reference to the current row and point scan_start to the next one. - let row = &mut self.current[self.scan_start..]; - self.scan_start += rowlen; - - // Unfilter the row. - let filter = FilterType::from_u8(row[0]).ok_or(DecodingError::Format( - FormatErrorInner::UnknownFilterMethod(row[0]).into(), - ))?; - unfilter(filter, self.bpp, &self.prev[1..rowlen], &mut row[1..rowlen]); - - // Save the current row for the next pass. - self.prev[..rowlen].copy_from_slice(&row[..rowlen]); - - Ok(()) - } -} - -impl SubframeInfo { - fn not_yet_init() -> Self { - SubframeInfo { - width: 0, - height: 0, - rowlen: 0, - interlace: InterlaceIter::None(0..0), - consumed_and_flushed: false, - } - } - - fn new(info: &Info) -> Self { - // The apng fctnl overrides width and height. - // All other data is set by the main info struct. - let (width, height) = if let Some(fc) = info.frame_control { - (fc.width, fc.height) - } else { - (info.width, info.height) - }; - - let interlace = if info.interlaced { - InterlaceIter::Adam7(utils::Adam7Iterator::new(width, height)) - } else { - InterlaceIter::None(0..height) - }; - - SubframeInfo { - width, - height, - rowlen: info.raw_row_length_from_width(width), - interlace, - consumed_and_flushed: false, - } - } -} - -fn expand_paletted( - buffer: &mut [u8], - info: &Info, - trns: Option<Option<&[u8]>>, -) -> Result<(), DecodingError> { - if let Some(palette) = info.palette.as_ref() { - if let BitDepth::Sixteen = info.bit_depth { - // This should have been caught earlier but let's check again. Can't hurt. - Err(DecodingError::Format( - FormatErrorInner::InvalidColorBitDepth { - color_type: ColorType::Indexed, - bit_depth: BitDepth::Sixteen, - } - .into(), - )) - } else { - let black = [0, 0, 0]; - if let Some(trns) = trns { - let trns = trns.unwrap_or(&[]); - // > The tRNS chunk shall not contain more alpha values than there are palette - // entries, but a tRNS chunk may contain fewer values than there are palette - // entries. In this case, the alpha value for all remaining palette entries is - // assumed to be 255. - // - // It seems, accepted reading is to fully *ignore* an invalid tRNS as if it were - // completely empty / all pixels are non-transparent. - let trns = if trns.len() <= palette.len() / 3 { - trns - } else { - &[] - }; - - utils::unpack_bits(buffer, 4, info.bit_depth as u8, |i, chunk| { - let (rgb, a) = ( - palette - .get(3 * i as usize..3 * i as usize + 3) - .unwrap_or(&black), - *trns.get(i as usize).unwrap_or(&0xFF), - ); - chunk[0] = rgb[0]; - chunk[1] = rgb[1]; - chunk[2] = rgb[2]; - chunk[3] = a; - }); - } else { - utils::unpack_bits(buffer, 3, info.bit_depth as u8, |i, chunk| { - let rgb = palette - .get(3 * i as usize..3 * i as usize + 3) - .unwrap_or(&black); - chunk[0] = rgb[0]; - chunk[1] = rgb[1]; - chunk[2] = rgb[2]; - }) - } - Ok(()) - } - } else { - Err(DecodingError::Format( - FormatErrorInner::PaletteRequired.into(), - )) - } -} - -fn expand_gray_u8(buffer: &mut [u8], info: &Info, trns: Option<Option<&[u8]>>) { - let rescale = true; - let scaling_factor = if rescale { - (255) / ((1u16 << info.bit_depth as u8) - 1) as u8 - } else { - 1 - }; - if let Some(trns) = trns { - utils::unpack_bits(buffer, 2, info.bit_depth as u8, |pixel, chunk| { - chunk[1] = if let Some(trns) = trns { - if pixel == trns[0] { - 0 - } else { - 0xFF - } - } else { - 0xFF - }; - chunk[0] = pixel * scaling_factor - }) - } else { - utils::unpack_bits(buffer, 1, info.bit_depth as u8, |val, chunk| { - chunk[0] = val * scaling_factor - }) - } -} - -#[cfg(test)] -mod tests { - use super::Decoder; - use std::io::{BufRead, Read, Result}; - use std::mem::discriminant; - - /// A reader that reads at most `n` bytes. - struct SmalBuf<R: BufRead> { - inner: R, - cap: usize, - } - - impl<R: BufRead> SmalBuf<R> { - fn new(inner: R, cap: usize) -> Self { - SmalBuf { inner, cap } - } - } - - impl<R: BufRead> Read for SmalBuf<R> { - fn read(&mut self, buf: &mut [u8]) -> Result<usize> { - let len = buf.len().min(self.cap); - self.inner.read(&mut buf[..len]) - } - } - - impl<R: BufRead> BufRead for SmalBuf<R> { - fn fill_buf(&mut self) -> Result<&[u8]> { - let buf = self.inner.fill_buf()?; - let len = buf.len().min(self.cap); - Ok(&buf[..len]) - } - - fn consume(&mut self, amt: usize) { - assert!(amt <= self.cap); - self.inner.consume(amt) - } - } - - #[test] - fn no_data_dup_on_finish() { - const IMG: &[u8] = include_bytes!(concat!( - env!("CARGO_MANIFEST_DIR"), - "/tests/bugfixes/x_issue#214.png" - )); - - let mut normal = Decoder::new(IMG).read_info().unwrap(); - - let mut buffer = vec![0; normal.output_buffer_size()]; - let normal = normal.next_frame(&mut buffer).unwrap_err(); - - let smal = Decoder::new(SmalBuf::new(IMG, 1)) - .read_info() - .unwrap() - .next_frame(&mut buffer) - .unwrap_err(); - - assert_eq!(discriminant(&normal), discriminant(&smal)); - } -} diff --git a/vendor/png/src/decoder/stream.rs b/vendor/png/src/decoder/stream.rs deleted file mode 100644 index f5df6e9..0000000 --- a/vendor/png/src/decoder/stream.rs +++ /dev/null @@ -1,1576 +0,0 @@ -extern crate crc32fast; - -use std::convert::From; -use std::default::Default; -use std::error; -use std::fmt; -use std::io; -use std::{borrow::Cow, cmp::min}; - -use crc32fast::Hasher as Crc32; - -use super::zlib::ZlibStream; -use crate::chunk::{self, ChunkType, IDAT, IEND, IHDR}; -use crate::common::{ - AnimationControl, BitDepth, BlendOp, ColorType, DisposeOp, FrameControl, Info, ParameterError, - PixelDimensions, ScaledFloat, SourceChromaticities, Unit, -}; -use crate::text_metadata::{ITXtChunk, TEXtChunk, TextDecodingError, ZTXtChunk}; -use crate::traits::ReadBytesExt; - -/// TODO check if these size are reasonable -pub const CHUNCK_BUFFER_SIZE: usize = 32 * 1024; - -/// Determines if checksum checks should be disabled globally. -/// -/// This is used only in fuzzing. `afl` automatically adds `--cfg fuzzing` to RUSTFLAGS which can -/// be used to detect that build. -const CHECKSUM_DISABLED: bool = cfg!(fuzzing); - -#[derive(Debug)] -enum U32Value { - // CHUNKS - Length, - Type(u32), - Crc(ChunkType), -} - -#[derive(Debug)] -enum State { - Signature(u8, [u8; 7]), - U32Byte3(U32Value, u32), - U32Byte2(U32Value, u32), - U32Byte1(U32Value, u32), - U32(U32Value), - ReadChunk(ChunkType), - PartialChunk(ChunkType), - DecodeData(ChunkType, usize), -} - -#[derive(Debug)] -/// Result of the decoding process -pub enum Decoded { - /// Nothing decoded yet - Nothing, - Header(u32, u32, BitDepth, ColorType, bool), - ChunkBegin(u32, ChunkType), - ChunkComplete(u32, ChunkType), - PixelDimensions(PixelDimensions), - AnimationControl(AnimationControl), - FrameControl(FrameControl), - /// Decoded raw image data. - ImageData, - /// The last of a consecutive chunk of IDAT was done. - /// This is distinct from ChunkComplete which only marks that some IDAT chunk was completed but - /// not that no additional IDAT chunk follows. - ImageDataFlushed, - PartialChunk(ChunkType), - ImageEnd, -} - -/// Any kind of error during PNG decoding. -/// -/// This enumeration provides a very rough analysis on the origin of the failure. That is, each -/// variant corresponds to one kind of actor causing the error. It should not be understood as a -/// direct blame but can inform the search for a root cause or if such a search is required. -#[derive(Debug)] -pub enum DecodingError { - /// An error in IO of the underlying reader. - IoError(io::Error), - /// The input image was not a valid PNG. - /// - /// There isn't a lot that can be done here, except if the program itself was responsible for - /// creating this image then investigate the generator. This is internally implemented with a - /// large Enum. If You are interested in accessing some of the more exact information on the - /// variant then we can discuss in an issue. - Format(FormatError), - /// An interface was used incorrectly. - /// - /// This is used in cases where it's expected that the programmer might trip up and stability - /// could be affected. For example when: - /// - /// * The decoder is polled for more animation frames despite being done (or not being animated - /// in the first place). - /// * The output buffer does not have the required size. - /// - /// As a rough guideline for introducing new variants parts of the requirements are dynamically - /// derived from the (untrusted) input data while the other half is from the caller. In the - /// above cases the number of frames respectively the size is determined by the file while the - /// number of calls - /// - /// If you're an application you might want to signal that a bug report is appreciated. - Parameter(ParameterError), - /// The image would have required exceeding the limits configured with the decoder. - /// - /// Note that Your allocations, e.g. when reading into a pre-allocated buffer, is __NOT__ - /// considered part of the limits. Nevertheless, required intermediate buffers such as for - /// singular lines is checked against the limit. - /// - /// Note that this is a best-effort basis. - LimitsExceeded, -} - -#[derive(Debug)] -pub struct FormatError { - inner: FormatErrorInner, -} - -#[derive(Debug)] -pub(crate) enum FormatErrorInner { - /// Bad framing. - CrcMismatch { - /// Stored CRC32 value - crc_val: u32, - /// Calculated CRC32 sum - crc_sum: u32, - /// The chunk type that has the CRC mismatch. - chunk: ChunkType, - }, - /// Not a PNG, the magic signature is missing. - InvalidSignature, - /// End of file, within a chunk event. - UnexpectedEof, - /// End of file, while expecting more image data. - UnexpectedEndOfChunk, - // Errors of chunk level ordering, missing etc. - /// Ihdr must occur. - MissingIhdr, - /// Fctl must occur if an animated chunk occurs. - MissingFctl, - /// Image data that was indicated in IHDR or acTL is missing. - MissingImageData, - /// 4.3., Must be first. - ChunkBeforeIhdr { - kind: ChunkType, - }, - /// 4.3., some chunks must be before IDAT. - AfterIdat { - kind: ChunkType, - }, - /// 4.3., some chunks must be before PLTE. - AfterPlte { - kind: ChunkType, - }, - /// 4.3., some chunks must be between PLTE and IDAT. - OutsidePlteIdat { - kind: ChunkType, - }, - /// 4.3., some chunks must be unique. - DuplicateChunk { - kind: ChunkType, - }, - /// Specifically for fdat there is an embedded sequence number for chunks. - ApngOrder { - /// The sequence number in the chunk. - present: u32, - /// The one that should have been present. - expected: u32, - }, - // Errors specific to particular chunk data to be validated. - /// The palette did not even contain a single pixel data. - ShortPalette { - expected: usize, - len: usize, - }, - /// A palletized image did not have a palette. - PaletteRequired, - /// The color-depth combination is not valid according to Table 11.1. - InvalidColorBitDepth { - color_type: ColorType, - bit_depth: BitDepth, - }, - ColorWithBadTrns(ColorType), - InvalidBitDepth(u8), - InvalidColorType(u8), - InvalidDisposeOp(u8), - InvalidBlendOp(u8), - InvalidUnit(u8), - /// The rendering intent of the sRGB chunk is invalid. - InvalidSrgbRenderingIntent(u8), - UnknownCompressionMethod(u8), - UnknownFilterMethod(u8), - UnknownInterlaceMethod(u8), - /// The subframe is not in bounds of the image. - /// TODO: fields with relevant data. - BadSubFrameBounds {}, - // Errors specific to the IDAT/fDAT chunks. - /// The compression of the data stream was faulty. - CorruptFlateStream { - err: fdeflate::DecompressionError, - }, - /// The image data chunk was too short for the expected pixel count. - NoMoreImageData, - /// Bad text encoding - BadTextEncoding(TextDecodingError), -} - -impl error::Error for DecodingError { - fn cause(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - DecodingError::IoError(err) => Some(err), - _ => None, - } - } -} - -impl fmt::Display for DecodingError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { - use self::DecodingError::*; - match self { - IoError(err) => write!(fmt, "{}", err), - Parameter(desc) => write!(fmt, "{}", &desc), - Format(desc) => write!(fmt, "{}", desc), - LimitsExceeded => write!(fmt, "limits are exceeded"), - } - } -} - -impl fmt::Display for FormatError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use FormatErrorInner::*; - match &self.inner { - CrcMismatch { - crc_val, - crc_sum, - chunk, - .. - } => write!( - fmt, - "CRC error: expected 0x{:x} have 0x{:x} while decoding {:?} chunk.", - crc_val, crc_sum, chunk - ), - MissingIhdr => write!(fmt, "IHDR chunk missing"), - MissingFctl => write!(fmt, "fcTL chunk missing before fdAT chunk."), - MissingImageData => write!(fmt, "IDAT or fDAT chunk is missing."), - ChunkBeforeIhdr { kind } => write!(fmt, "{:?} chunk appeared before IHDR chunk", kind), - AfterIdat { kind } => write!(fmt, "Chunk {:?} is invalid after IDAT chunk.", kind), - AfterPlte { kind } => write!(fmt, "Chunk {:?} is invalid after PLTE chunk.", kind), - OutsidePlteIdat { kind } => write!( - fmt, - "Chunk {:?} must appear between PLTE and IDAT chunks.", - kind - ), - DuplicateChunk { kind } => write!(fmt, "Chunk {:?} must appear at most once.", kind), - ApngOrder { present, expected } => write!( - fmt, - "Sequence is not in order, expected #{} got #{}.", - expected, present, - ), - ShortPalette { expected, len } => write!( - fmt, - "Not enough palette entries, expect {} got {}.", - expected, len - ), - PaletteRequired => write!(fmt, "Missing palette of indexed image."), - InvalidColorBitDepth { - color_type, - bit_depth, - } => write!( - fmt, - "Invalid color/depth combination in header: {:?}/{:?}", - color_type, bit_depth, - ), - ColorWithBadTrns(color_type) => write!( - fmt, - "Transparency chunk found for color type {:?}.", - color_type - ), - InvalidBitDepth(nr) => write!(fmt, "Invalid dispose operation {}.", nr), - InvalidColorType(nr) => write!(fmt, "Invalid color type {}.", nr), - InvalidDisposeOp(nr) => write!(fmt, "Invalid dispose op {}.", nr), - InvalidBlendOp(nr) => write!(fmt, "Invalid blend op {}.", nr), - InvalidUnit(nr) => write!(fmt, "Invalid physical pixel size unit {}.", nr), - InvalidSrgbRenderingIntent(nr) => write!(fmt, "Invalid sRGB rendering intent {}.", nr), - UnknownCompressionMethod(nr) => write!(fmt, "Unknown compression method {}.", nr), - UnknownFilterMethod(nr) => write!(fmt, "Unknown filter method {}.", nr), - UnknownInterlaceMethod(nr) => write!(fmt, "Unknown interlace method {}.", nr), - BadSubFrameBounds {} => write!(fmt, "Sub frame is out-of-bounds."), - InvalidSignature => write!(fmt, "Invalid PNG signature."), - UnexpectedEof => write!(fmt, "Unexpected end of data before image end."), - UnexpectedEndOfChunk => write!(fmt, "Unexpected end of data within a chunk."), - NoMoreImageData => write!(fmt, "IDAT or fDAT chunk is has not enough data for image."), - CorruptFlateStream { err } => { - write!(fmt, "Corrupt deflate stream. ")?; - write!(fmt, "{:?}", err) - } - // TODO: Wrap more info in the enum variant - BadTextEncoding(tde) => { - match tde { - TextDecodingError::Unrepresentable => { - write!(fmt, "Unrepresentable data in tEXt chunk.") - } - TextDecodingError::InvalidKeywordSize => { - write!(fmt, "Keyword empty or longer than 79 bytes.") - } - TextDecodingError::MissingNullSeparator => { - write!(fmt, "No null separator in tEXt chunk.") - } - TextDecodingError::InflationError => { - write!(fmt, "Invalid compressed text data.") - } - TextDecodingError::OutOfDecompressionSpace => { - write!(fmt, "Out of decompression space. Try with a larger limit.") - } - TextDecodingError::InvalidCompressionMethod => { - write!(fmt, "Using an unrecognized byte as compression method.") - } - TextDecodingError::InvalidCompressionFlag => { - write!(fmt, "Using a flag that is not 0 or 255 as a compression flag for iTXt chunk.") - } - TextDecodingError::MissingCompressionFlag => { - write!(fmt, "No compression flag in the iTXt chunk.") - } - } - } - } - } -} - -impl From<io::Error> for DecodingError { - fn from(err: io::Error) -> DecodingError { - DecodingError::IoError(err) - } -} - -impl From<FormatError> for DecodingError { - fn from(err: FormatError) -> DecodingError { - DecodingError::Format(err) - } -} - -impl From<FormatErrorInner> for FormatError { - fn from(inner: FormatErrorInner) -> Self { - FormatError { inner } - } -} - -impl From<DecodingError> for io::Error { - fn from(err: DecodingError) -> io::Error { - match err { - DecodingError::IoError(err) => err, - err => io::Error::new(io::ErrorKind::Other, err.to_string()), - } - } -} - -impl From<TextDecodingError> for DecodingError { - fn from(tbe: TextDecodingError) -> Self { - DecodingError::Format(FormatError { - inner: FormatErrorInner::BadTextEncoding(tbe), - }) - } -} - -/// Decoder configuration options -#[derive(Clone)] -pub struct DecodeOptions { - ignore_adler32: bool, - ignore_crc: bool, - ignore_text_chunk: bool, -} - -impl Default for DecodeOptions { - fn default() -> Self { - Self { - ignore_adler32: true, - ignore_crc: false, - ignore_text_chunk: false, - } - } -} - -impl DecodeOptions { - /// When set, the decoder will not compute and verify the Adler-32 checksum. - /// - /// Defaults to `true`. - pub fn set_ignore_adler32(&mut self, ignore_adler32: bool) { - self.ignore_adler32 = ignore_adler32; - } - - /// When set, the decoder will not compute and verify the CRC code. - /// - /// Defaults to `false`. - pub fn set_ignore_crc(&mut self, ignore_crc: bool) { - self.ignore_crc = ignore_crc; - } - - /// Flag to ignore computing and verifying the Adler-32 checksum and CRC - /// code. - pub fn set_ignore_checksums(&mut self, ignore_checksums: bool) { - self.ignore_adler32 = ignore_checksums; - self.ignore_crc = ignore_checksums; - } - - /// Ignore text chunks while decoding. - /// - /// Defaults to `false`. - pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) { - self.ignore_text_chunk = ignore_text_chunk; - } -} - -/// PNG StreamingDecoder (low-level interface) -/// -/// By default, the decoder does not verify Adler-32 checksum computation. To -/// enable checksum verification, set it with [`StreamingDecoder::set_ignore_adler32`] -/// before starting decompression. -pub struct StreamingDecoder { - state: Option<State>, - current_chunk: ChunkState, - /// The inflater state handling consecutive `IDAT` and `fdAT` chunks. - inflater: ZlibStream, - /// The complete image info read from all prior chunks. - pub(crate) info: Option<Info<'static>>, - /// The animation chunk sequence number. - current_seq_no: Option<u32>, - /// Stores where in decoding an `fdAT` chunk we are. - apng_seq_handled: bool, - have_idat: bool, - decode_options: DecodeOptions, -} - -struct ChunkState { - /// The type of the current chunk. - /// Relevant for `IDAT` and `fdAT` which aggregate consecutive chunks of their own type. - type_: ChunkType, - - /// Partial crc until now. - crc: Crc32, - - /// Remaining bytes to be read. - remaining: u32, - - /// Non-decoded bytes in the chunk. - raw_bytes: Vec<u8>, -} - -impl StreamingDecoder { - /// Creates a new StreamingDecoder - /// - /// Allocates the internal buffers. - pub fn new() -> StreamingDecoder { - StreamingDecoder::new_with_options(DecodeOptions::default()) - } - - pub fn new_with_options(decode_options: DecodeOptions) -> StreamingDecoder { - let mut inflater = ZlibStream::new(); - inflater.set_ignore_adler32(decode_options.ignore_adler32); - - StreamingDecoder { - state: Some(State::Signature(0, [0; 7])), - current_chunk: ChunkState::default(), - inflater, - info: None, - current_seq_no: None, - apng_seq_handled: false, - have_idat: false, - decode_options, - } - } - - /// Resets the StreamingDecoder - pub fn reset(&mut self) { - self.state = Some(State::Signature(0, [0; 7])); - self.current_chunk.crc = Crc32::new(); - self.current_chunk.remaining = 0; - self.current_chunk.raw_bytes.clear(); - self.inflater.reset(); - self.info = None; - self.current_seq_no = None; - self.apng_seq_handled = false; - self.have_idat = false; - } - - /// Provides access to the inner `info` field - pub fn info(&self) -> Option<&Info<'static>> { - self.info.as_ref() - } - - pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) { - self.decode_options.set_ignore_text_chunk(ignore_text_chunk); - } - - /// Return whether the decoder is set to ignore the Adler-32 checksum. - pub fn ignore_adler32(&self) -> bool { - self.inflater.ignore_adler32() - } - - /// Set whether to compute and verify the Adler-32 checksum during - /// decompression. Return `true` if the flag was successfully set. - /// - /// The decoder defaults to `true`. - /// - /// This flag cannot be modified after decompression has started until the - /// [`StreamingDecoder`] is reset. - pub fn set_ignore_adler32(&mut self, ignore_adler32: bool) -> bool { - self.inflater.set_ignore_adler32(ignore_adler32) - } - - /// Set whether to compute and verify the Adler-32 checksum during - /// decompression. - /// - /// The decoder defaults to `false`. - pub fn set_ignore_crc(&mut self, ignore_crc: bool) { - self.decode_options.set_ignore_crc(ignore_crc) - } - - /// Low level StreamingDecoder interface. - /// - /// Allows to stream partial data to the encoder. Returns a tuple containing the bytes that have - /// been consumed from the input buffer and the current decoding result. If the decoded chunk - /// was an image data chunk, it also appends the read data to `image_data`. - pub fn update( - &mut self, - mut buf: &[u8], - image_data: &mut Vec<u8>, - ) -> Result<(usize, Decoded), DecodingError> { - let len = buf.len(); - while !buf.is_empty() && self.state.is_some() { - match self.next_state(buf, image_data) { - Ok((bytes, Decoded::Nothing)) => buf = &buf[bytes..], - Ok((bytes, result)) => { - buf = &buf[bytes..]; - return Ok((len - buf.len(), result)); - } - Err(err) => return Err(err), - } - } - Ok((len - buf.len(), Decoded::Nothing)) - } - - fn next_state<'a>( - &'a mut self, - buf: &[u8], - image_data: &mut Vec<u8>, - ) -> Result<(usize, Decoded), DecodingError> { - use self::State::*; - - let current_byte = buf[0]; - - // Driver should ensure that state is never None - let state = self.state.take().unwrap(); - - match state { - Signature(i, mut signature) if i < 7 => { - signature[i as usize] = current_byte; - self.state = Some(Signature(i + 1, signature)); - Ok((1, Decoded::Nothing)) - } - Signature(_, signature) - if signature == [137, 80, 78, 71, 13, 10, 26] && current_byte == 10 => - { - self.state = Some(U32(U32Value::Length)); - Ok((1, Decoded::Nothing)) - } - Signature(..) => Err(DecodingError::Format( - FormatErrorInner::InvalidSignature.into(), - )), - U32Byte3(type_, mut val) => { - use self::U32Value::*; - val |= u32::from(current_byte); - match type_ { - Length => { - self.state = Some(U32(Type(val))); - Ok((1, Decoded::Nothing)) - } - Type(length) => { - let type_str = ChunkType([ - (val >> 24) as u8, - (val >> 16) as u8, - (val >> 8) as u8, - val as u8, - ]); - if type_str != self.current_chunk.type_ - && (self.current_chunk.type_ == IDAT - || self.current_chunk.type_ == chunk::fdAT) - { - self.current_chunk.type_ = type_str; - self.inflater.finish_compressed_chunks(image_data)?; - self.inflater.reset(); - self.state = Some(U32Byte3(Type(length), val & !0xff)); - return Ok((0, Decoded::ImageDataFlushed)); - } - self.current_chunk.type_ = type_str; - if !self.decode_options.ignore_crc { - self.current_chunk.crc.reset(); - self.current_chunk.crc.update(&type_str.0); - } - self.current_chunk.remaining = length; - self.apng_seq_handled = false; - self.current_chunk.raw_bytes.clear(); - self.state = Some(ReadChunk(type_str)); - Ok((1, Decoded::ChunkBegin(length, type_str))) - } - Crc(type_str) => { - // If ignore_crc is set, do not calculate CRC. We set - // sum=val so that it short-circuits to true in the next - // if-statement block - let sum = if self.decode_options.ignore_crc { - val - } else { - self.current_chunk.crc.clone().finalize() - }; - - if val == sum || CHECKSUM_DISABLED { - self.state = Some(State::U32(U32Value::Length)); - if type_str == IEND { - Ok((1, Decoded::ImageEnd)) - } else { - Ok((1, Decoded::ChunkComplete(val, type_str))) - } - } else { - Err(DecodingError::Format( - FormatErrorInner::CrcMismatch { - crc_val: val, - crc_sum: sum, - chunk: type_str, - } - .into(), - )) - } - } - } - } - U32Byte2(type_, val) => { - self.state = Some(U32Byte3(type_, val | u32::from(current_byte) << 8)); - Ok((1, Decoded::Nothing)) - } - U32Byte1(type_, val) => { - self.state = Some(U32Byte2(type_, val | u32::from(current_byte) << 16)); - Ok((1, Decoded::Nothing)) - } - U32(type_) => { - self.state = Some(U32Byte1(type_, u32::from(current_byte) << 24)); - Ok((1, Decoded::Nothing)) - } - PartialChunk(type_str) => { - match type_str { - IDAT => { - self.have_idat = true; - self.state = Some(DecodeData(type_str, 0)); - Ok((0, Decoded::PartialChunk(type_str))) - } - chunk::fdAT => { - let data_start; - if let Some(seq_no) = self.current_seq_no { - if !self.apng_seq_handled { - data_start = 4; - let mut buf = &self.current_chunk.raw_bytes[..]; - let next_seq_no = buf.read_be()?; - if next_seq_no != seq_no + 1 { - return Err(DecodingError::Format( - FormatErrorInner::ApngOrder { - present: next_seq_no, - expected: seq_no + 1, - } - .into(), - )); - } - self.current_seq_no = Some(next_seq_no); - self.apng_seq_handled = true; - } else { - data_start = 0; - } - } else { - return Err(DecodingError::Format( - FormatErrorInner::MissingFctl.into(), - )); - } - self.state = Some(DecodeData(type_str, data_start)); - Ok((0, Decoded::PartialChunk(type_str))) - } - // Handle other chunks - _ => { - if self.current_chunk.remaining == 0 { - // complete chunk - Ok((0, self.parse_chunk(type_str)?)) - } else { - // Make sure we have room to read more of the chunk. - // We need it fully before parsing. - self.reserve_current_chunk()?; - - self.state = Some(ReadChunk(type_str)); - Ok((0, Decoded::PartialChunk(type_str))) - } - } - } - } - ReadChunk(type_str) => { - // The _previous_ event wanted to return the contents of raw_bytes, and let the - // caller consume it, - if self.current_chunk.remaining == 0 { - self.state = Some(U32(U32Value::Crc(type_str))); - Ok((0, Decoded::Nothing)) - } else { - let ChunkState { - crc, - remaining, - raw_bytes, - type_: _, - } = &mut self.current_chunk; - - let buf_avail = raw_bytes.capacity() - raw_bytes.len(); - let bytes_avail = min(buf.len(), buf_avail); - let n = min(*remaining, bytes_avail as u32); - if buf_avail == 0 { - self.state = Some(PartialChunk(type_str)); - Ok((0, Decoded::Nothing)) - } else { - let buf = &buf[..n as usize]; - if !self.decode_options.ignore_crc { - crc.update(buf); - } - raw_bytes.extend_from_slice(buf); - - *remaining -= n; - if *remaining == 0 { - self.state = Some(PartialChunk(type_str)); - } else { - self.state = Some(ReadChunk(type_str)); - } - Ok((n as usize, Decoded::Nothing)) - } - } - } - DecodeData(type_str, mut n) => { - let chunk_len = self.current_chunk.raw_bytes.len(); - let chunk_data = &self.current_chunk.raw_bytes[n..]; - let c = self.inflater.decompress(chunk_data, image_data)?; - n += c; - if n == chunk_len && c == 0 { - self.current_chunk.raw_bytes.clear(); - self.state = Some(ReadChunk(type_str)); - Ok((0, Decoded::ImageData)) - } else { - self.state = Some(DecodeData(type_str, n)); - Ok((0, Decoded::ImageData)) - } - } - } - } - - fn reserve_current_chunk(&mut self) -> Result<(), DecodingError> { - // FIXME: use limits, also do so in iccp/zlib decompression. - const MAX: usize = 0x10_0000; - let buffer = &mut self.current_chunk.raw_bytes; - - // Double if necessary, but no more than until the limit is reached. - let reserve_size = MAX.saturating_sub(buffer.capacity()).min(buffer.len()); - buffer.reserve_exact(reserve_size); - - if buffer.capacity() == buffer.len() { - Err(DecodingError::LimitsExceeded) - } else { - Ok(()) - } - } - - fn parse_chunk(&mut self, type_str: ChunkType) -> Result<Decoded, DecodingError> { - self.state = Some(State::U32(U32Value::Crc(type_str))); - if self.info.is_none() && type_str != IHDR { - return Err(DecodingError::Format( - FormatErrorInner::ChunkBeforeIhdr { kind: type_str }.into(), - )); - } - match match type_str { - IHDR => self.parse_ihdr(), - chunk::PLTE => self.parse_plte(), - chunk::tRNS => self.parse_trns(), - chunk::pHYs => self.parse_phys(), - chunk::gAMA => self.parse_gama(), - chunk::acTL => self.parse_actl(), - chunk::fcTL => self.parse_fctl(), - chunk::cHRM => self.parse_chrm(), - chunk::sRGB => self.parse_srgb(), - chunk::iCCP => self.parse_iccp(), - chunk::tEXt if !self.decode_options.ignore_text_chunk => self.parse_text(), - chunk::zTXt if !self.decode_options.ignore_text_chunk => self.parse_ztxt(), - chunk::iTXt if !self.decode_options.ignore_text_chunk => self.parse_itxt(), - _ => Ok(Decoded::PartialChunk(type_str)), - } { - Err(err) => { - // Borrow of self ends here, because Decoding error does not borrow self. - self.state = None; - Err(err) - } - ok => ok, - } - } - - fn parse_fctl(&mut self) -> Result<Decoded, DecodingError> { - let mut buf = &self.current_chunk.raw_bytes[..]; - let next_seq_no = buf.read_be()?; - - // Assuming that fcTL is required before *every* fdAT-sequence - self.current_seq_no = Some(if let Some(seq_no) = self.current_seq_no { - if next_seq_no != seq_no + 1 { - return Err(DecodingError::Format( - FormatErrorInner::ApngOrder { - expected: seq_no + 1, - present: next_seq_no, - } - .into(), - )); - } - next_seq_no - } else { - if next_seq_no != 0 { - return Err(DecodingError::Format( - FormatErrorInner::ApngOrder { - expected: 0, - present: next_seq_no, - } - .into(), - )); - } - 0 - }); - self.inflater.reset(); - let fc = FrameControl { - sequence_number: next_seq_no, - width: buf.read_be()?, - height: buf.read_be()?, - x_offset: buf.read_be()?, - y_offset: buf.read_be()?, - delay_num: buf.read_be()?, - delay_den: buf.read_be()?, - dispose_op: { - let dispose_op = buf.read_be()?; - match DisposeOp::from_u8(dispose_op) { - Some(dispose_op) => dispose_op, - None => { - return Err(DecodingError::Format( - FormatErrorInner::InvalidDisposeOp(dispose_op).into(), - )) - } - } - }, - blend_op: { - let blend_op = buf.read_be()?; - match BlendOp::from_u8(blend_op) { - Some(blend_op) => blend_op, - None => { - return Err(DecodingError::Format( - FormatErrorInner::InvalidBlendOp(blend_op).into(), - )) - } - } - }, - }; - self.info.as_ref().unwrap().validate(&fc)?; - self.info.as_mut().unwrap().frame_control = Some(fc); - Ok(Decoded::FrameControl(fc)) - } - - fn parse_actl(&mut self) -> Result<Decoded, DecodingError> { - if self.have_idat { - Err(DecodingError::Format( - FormatErrorInner::AfterIdat { kind: chunk::acTL }.into(), - )) - } else { - let mut buf = &self.current_chunk.raw_bytes[..]; - let actl = AnimationControl { - num_frames: buf.read_be()?, - num_plays: buf.read_be()?, - }; - self.info.as_mut().unwrap().animation_control = Some(actl); - Ok(Decoded::AnimationControl(actl)) - } - } - - fn parse_plte(&mut self) -> Result<Decoded, DecodingError> { - let info = self.info.as_mut().unwrap(); - if info.palette.is_some() { - // Only one palette is allowed - Err(DecodingError::Format( - FormatErrorInner::DuplicateChunk { kind: chunk::PLTE }.into(), - )) - } else { - info.palette = Some(Cow::Owned(self.current_chunk.raw_bytes.clone())); - Ok(Decoded::Nothing) - } - } - - fn parse_trns(&mut self) -> Result<Decoded, DecodingError> { - let info = self.info.as_mut().unwrap(); - if info.trns.is_some() { - return Err(DecodingError::Format( - FormatErrorInner::DuplicateChunk { kind: chunk::PLTE }.into(), - )); - } - let (color_type, bit_depth) = { (info.color_type, info.bit_depth as u8) }; - let mut vec = self.current_chunk.raw_bytes.clone(); - let len = vec.len(); - match color_type { - ColorType::Grayscale => { - if len < 2 { - return Err(DecodingError::Format( - FormatErrorInner::ShortPalette { expected: 2, len }.into(), - )); - } - if bit_depth < 16 { - vec[0] = vec[1]; - vec.truncate(1); - } - info.trns = Some(Cow::Owned(vec)); - Ok(Decoded::Nothing) - } - ColorType::Rgb => { - if len < 6 { - return Err(DecodingError::Format( - FormatErrorInner::ShortPalette { expected: 6, len }.into(), - )); - } - if bit_depth < 16 { - vec[0] = vec[1]; - vec[1] = vec[3]; - vec[2] = vec[5]; - vec.truncate(3); - } - info.trns = Some(Cow::Owned(vec)); - Ok(Decoded::Nothing) - } - ColorType::Indexed => { - // The transparency chunk must be after the palette chunk and - // before the data chunk. - if info.palette.is_none() { - return Err(DecodingError::Format( - FormatErrorInner::AfterPlte { kind: chunk::tRNS }.into(), - )); - } else if self.have_idat { - return Err(DecodingError::Format( - FormatErrorInner::OutsidePlteIdat { kind: chunk::tRNS }.into(), - )); - } - - info.trns = Some(Cow::Owned(vec)); - Ok(Decoded::Nothing) - } - c => Err(DecodingError::Format( - FormatErrorInner::ColorWithBadTrns(c).into(), - )), - } - } - - fn parse_phys(&mut self) -> Result<Decoded, DecodingError> { - let info = self.info.as_mut().unwrap(); - if self.have_idat { - Err(DecodingError::Format( - FormatErrorInner::AfterIdat { kind: chunk::pHYs }.into(), - )) - } else if info.pixel_dims.is_some() { - Err(DecodingError::Format( - FormatErrorInner::DuplicateChunk { kind: chunk::pHYs }.into(), - )) - } else { - let mut buf = &self.current_chunk.raw_bytes[..]; - let xppu = buf.read_be()?; - let yppu = buf.read_be()?; - let unit = buf.read_be()?; - let unit = match Unit::from_u8(unit) { - Some(unit) => unit, - None => { - return Err(DecodingError::Format( - FormatErrorInner::InvalidUnit(unit).into(), - )) - } - }; - let pixel_dims = PixelDimensions { xppu, yppu, unit }; - info.pixel_dims = Some(pixel_dims); - Ok(Decoded::PixelDimensions(pixel_dims)) - } - } - - fn parse_chrm(&mut self) -> Result<Decoded, DecodingError> { - let info = self.info.as_mut().unwrap(); - if self.have_idat { - Err(DecodingError::Format( - FormatErrorInner::AfterIdat { kind: chunk::cHRM }.into(), - )) - } else if info.chrm_chunk.is_some() { - Err(DecodingError::Format( - FormatErrorInner::DuplicateChunk { kind: chunk::cHRM }.into(), - )) - } else { - let mut buf = &self.current_chunk.raw_bytes[..]; - let white_x: u32 = buf.read_be()?; - let white_y: u32 = buf.read_be()?; - let red_x: u32 = buf.read_be()?; - let red_y: u32 = buf.read_be()?; - let green_x: u32 = buf.read_be()?; - let green_y: u32 = buf.read_be()?; - let blue_x: u32 = buf.read_be()?; - let blue_y: u32 = buf.read_be()?; - - let source_chromaticities = SourceChromaticities { - white: ( - ScaledFloat::from_scaled(white_x), - ScaledFloat::from_scaled(white_y), - ), - red: ( - ScaledFloat::from_scaled(red_x), - ScaledFloat::from_scaled(red_y), - ), - green: ( - ScaledFloat::from_scaled(green_x), - ScaledFloat::from_scaled(green_y), - ), - blue: ( - ScaledFloat::from_scaled(blue_x), - ScaledFloat::from_scaled(blue_y), - ), - }; - - info.chrm_chunk = Some(source_chromaticities); - // Ignore chromaticities if sRGB profile is used. - if info.srgb.is_none() { - info.source_chromaticities = Some(source_chromaticities); - } - - Ok(Decoded::Nothing) - } - } - - fn parse_gama(&mut self) -> Result<Decoded, DecodingError> { - let info = self.info.as_mut().unwrap(); - if self.have_idat { - Err(DecodingError::Format( - FormatErrorInner::AfterIdat { kind: chunk::gAMA }.into(), - )) - } else if info.gama_chunk.is_some() { - Err(DecodingError::Format( - FormatErrorInner::DuplicateChunk { kind: chunk::gAMA }.into(), - )) - } else { - let mut buf = &self.current_chunk.raw_bytes[..]; - let source_gamma: u32 = buf.read_be()?; - let source_gamma = ScaledFloat::from_scaled(source_gamma); - - info.gama_chunk = Some(source_gamma); - // Ignore chromaticities if sRGB profile is used. - if info.srgb.is_none() { - info.source_gamma = Some(source_gamma); - } - - Ok(Decoded::Nothing) - } - } - - fn parse_srgb(&mut self) -> Result<Decoded, DecodingError> { - let info = self.info.as_mut().unwrap(); - if self.have_idat { - Err(DecodingError::Format( - FormatErrorInner::AfterIdat { kind: chunk::acTL }.into(), - )) - } else if info.srgb.is_some() { - Err(DecodingError::Format( - FormatErrorInner::DuplicateChunk { kind: chunk::sRGB }.into(), - )) - } else { - let mut buf = &self.current_chunk.raw_bytes[..]; - let raw: u8 = buf.read_be()?; // BE is is nonsense for single bytes, but this way the size is checked. - let rendering_intent = crate::SrgbRenderingIntent::from_raw(raw).ok_or_else(|| { - FormatError::from(FormatErrorInner::InvalidSrgbRenderingIntent(raw)) - })?; - - // Set srgb and override source gamma and chromaticities. - info.srgb = Some(rendering_intent); - info.source_gamma = Some(crate::srgb::substitute_gamma()); - info.source_chromaticities = Some(crate::srgb::substitute_chromaticities()); - Ok(Decoded::Nothing) - } - } - - fn parse_iccp(&mut self) -> Result<Decoded, DecodingError> { - let info = self.info.as_mut().unwrap(); - if self.have_idat { - Err(DecodingError::Format( - FormatErrorInner::AfterIdat { kind: chunk::iCCP }.into(), - )) - } else if info.icc_profile.is_some() { - Err(DecodingError::Format( - FormatErrorInner::DuplicateChunk { kind: chunk::iCCP }.into(), - )) - } else { - let mut buf = &self.current_chunk.raw_bytes[..]; - - // read profile name - let _: u8 = buf.read_be()?; - for _ in 1..80 { - let raw: u8 = buf.read_be()?; - if raw == 0 { - break; - } - } - - match buf.read_be()? { - // compression method - 0u8 => (), - n => { - return Err(DecodingError::Format( - FormatErrorInner::UnknownCompressionMethod(n).into(), - )) - } - } - - let mut profile = Vec::new(); - let mut inflater = ZlibStream::new(); - while !buf.is_empty() { - let consumed_bytes = inflater.decompress(buf, &mut profile)?; - if profile.len() > 8000000 { - // TODO: this should use Limits.bytes - return Err(DecodingError::LimitsExceeded); - } - buf = &buf[consumed_bytes..]; - } - inflater.finish_compressed_chunks(&mut profile)?; - - info.icc_profile = Some(Cow::Owned(profile)); - Ok(Decoded::Nothing) - } - } - - fn parse_ihdr(&mut self) -> Result<Decoded, DecodingError> { - if self.info.is_some() { - return Err(DecodingError::Format( - FormatErrorInner::DuplicateChunk { kind: IHDR }.into(), - )); - } - let mut buf = &self.current_chunk.raw_bytes[..]; - let width = buf.read_be()?; - let height = buf.read_be()?; - let bit_depth = buf.read_be()?; - let bit_depth = match BitDepth::from_u8(bit_depth) { - Some(bits) => bits, - None => { - return Err(DecodingError::Format( - FormatErrorInner::InvalidBitDepth(bit_depth).into(), - )) - } - }; - let color_type = buf.read_be()?; - let color_type = match ColorType::from_u8(color_type) { - Some(color_type) => { - if color_type.is_combination_invalid(bit_depth) { - return Err(DecodingError::Format( - FormatErrorInner::InvalidColorBitDepth { - color_type, - bit_depth, - } - .into(), - )); - } else { - color_type - } - } - None => { - return Err(DecodingError::Format( - FormatErrorInner::InvalidColorType(color_type).into(), - )) - } - }; - match buf.read_be()? { - // compression method - 0u8 => (), - n => { - return Err(DecodingError::Format( - FormatErrorInner::UnknownCompressionMethod(n).into(), - )) - } - } - match buf.read_be()? { - // filter method - 0u8 => (), - n => { - return Err(DecodingError::Format( - FormatErrorInner::UnknownFilterMethod(n).into(), - )) - } - } - let interlaced = match buf.read_be()? { - 0u8 => false, - 1 => true, - n => { - return Err(DecodingError::Format( - FormatErrorInner::UnknownInterlaceMethod(n).into(), - )) - } - }; - - self.info = Some(Info { - width, - height, - bit_depth, - color_type, - interlaced, - ..Default::default() - }); - - Ok(Decoded::Header( - width, height, bit_depth, color_type, interlaced, - )) - } - - fn split_keyword(buf: &[u8]) -> Result<(&[u8], &[u8]), DecodingError> { - let null_byte_index = buf - .iter() - .position(|&b| b == 0) - .ok_or_else(|| DecodingError::from(TextDecodingError::MissingNullSeparator))?; - - if null_byte_index == 0 || null_byte_index > 79 { - return Err(DecodingError::from(TextDecodingError::InvalidKeywordSize)); - } - - Ok((&buf[..null_byte_index], &buf[null_byte_index + 1..])) - } - - fn parse_text(&mut self) -> Result<Decoded, DecodingError> { - let buf = &self.current_chunk.raw_bytes[..]; - - let (keyword_slice, value_slice) = Self::split_keyword(buf)?; - - self.info - .as_mut() - .unwrap() - .uncompressed_latin1_text - .push(TEXtChunk::decode(keyword_slice, value_slice).map_err(DecodingError::from)?); - - Ok(Decoded::Nothing) - } - - fn parse_ztxt(&mut self) -> Result<Decoded, DecodingError> { - let buf = &self.current_chunk.raw_bytes[..]; - - let (keyword_slice, value_slice) = Self::split_keyword(buf)?; - - let compression_method = *value_slice - .first() - .ok_or_else(|| DecodingError::from(TextDecodingError::InvalidCompressionMethod))?; - - let text_slice = &value_slice[1..]; - - self.info.as_mut().unwrap().compressed_latin1_text.push( - ZTXtChunk::decode(keyword_slice, compression_method, text_slice) - .map_err(DecodingError::from)?, - ); - - Ok(Decoded::Nothing) - } - - fn parse_itxt(&mut self) -> Result<Decoded, DecodingError> { - let buf = &self.current_chunk.raw_bytes[..]; - - let (keyword_slice, value_slice) = Self::split_keyword(buf)?; - - let compression_flag = *value_slice - .first() - .ok_or_else(|| DecodingError::from(TextDecodingError::MissingCompressionFlag))?; - - let compression_method = *value_slice - .get(1) - .ok_or_else(|| DecodingError::from(TextDecodingError::InvalidCompressionMethod))?; - - let second_null_byte_index = value_slice[2..] - .iter() - .position(|&b| b == 0) - .ok_or_else(|| DecodingError::from(TextDecodingError::MissingNullSeparator))? - + 2; - - let language_tag_slice = &value_slice[2..second_null_byte_index]; - - let third_null_byte_index = value_slice[second_null_byte_index + 1..] - .iter() - .position(|&b| b == 0) - .ok_or_else(|| DecodingError::from(TextDecodingError::MissingNullSeparator))? - + (second_null_byte_index + 1); - - let translated_keyword_slice = - &value_slice[second_null_byte_index + 1..third_null_byte_index]; - - let text_slice = &value_slice[third_null_byte_index + 1..]; - - self.info.as_mut().unwrap().utf8_text.push( - ITXtChunk::decode( - keyword_slice, - compression_flag, - compression_method, - language_tag_slice, - translated_keyword_slice, - text_slice, - ) - .map_err(DecodingError::from)?, - ); - - Ok(Decoded::Nothing) - } -} - -impl Info<'_> { - fn validate(&self, fc: &FrameControl) -> Result<(), DecodingError> { - // Validate mathematically: fc.width + fc.x_offset <= self.width - let in_x_bounds = Some(fc.width) <= self.width.checked_sub(fc.x_offset); - // Validate mathematically: fc.height + fc.y_offset <= self.height - let in_y_bounds = Some(fc.height) <= self.height.checked_sub(fc.y_offset); - - if !in_x_bounds || !in_y_bounds { - return Err(DecodingError::Format( - // TODO: do we want to display the bad bounds? - FormatErrorInner::BadSubFrameBounds {}.into(), - )); - } - - Ok(()) - } -} - -impl Default for StreamingDecoder { - fn default() -> Self { - Self::new() - } -} - -impl Default for ChunkState { - fn default() -> Self { - ChunkState { - type_: ChunkType([0; 4]), - crc: Crc32::new(), - remaining: 0, - raw_bytes: Vec::with_capacity(CHUNCK_BUFFER_SIZE), - } - } -} - -#[cfg(test)] -mod tests { - use super::ScaledFloat; - use super::SourceChromaticities; - use std::fs::File; - - #[test] - fn image_gamma() -> Result<(), ()> { - fn trial(path: &str, expected: Option<ScaledFloat>) { - let decoder = crate::Decoder::new(File::open(path).unwrap()); - let reader = decoder.read_info().unwrap(); - let actual: Option<ScaledFloat> = reader.info().source_gamma; - assert!(actual == expected); - } - trial("tests/pngsuite/f00n0g08.png", None); - trial("tests/pngsuite/f00n2c08.png", None); - trial("tests/pngsuite/f01n0g08.png", None); - trial("tests/pngsuite/f01n2c08.png", None); - trial("tests/pngsuite/f02n0g08.png", None); - trial("tests/pngsuite/f02n2c08.png", None); - trial("tests/pngsuite/f03n0g08.png", None); - trial("tests/pngsuite/f03n2c08.png", None); - trial("tests/pngsuite/f04n0g08.png", None); - trial("tests/pngsuite/f04n2c08.png", None); - trial("tests/pngsuite/f99n0g04.png", None); - trial("tests/pngsuite/tm3n3p02.png", None); - trial("tests/pngsuite/g03n0g16.png", Some(ScaledFloat::new(0.35))); - trial("tests/pngsuite/g03n2c08.png", Some(ScaledFloat::new(0.35))); - trial("tests/pngsuite/g03n3p04.png", Some(ScaledFloat::new(0.35))); - trial("tests/pngsuite/g04n0g16.png", Some(ScaledFloat::new(0.45))); - trial("tests/pngsuite/g04n2c08.png", Some(ScaledFloat::new(0.45))); - trial("tests/pngsuite/g04n3p04.png", Some(ScaledFloat::new(0.45))); - trial("tests/pngsuite/g05n0g16.png", Some(ScaledFloat::new(0.55))); - trial("tests/pngsuite/g05n2c08.png", Some(ScaledFloat::new(0.55))); - trial("tests/pngsuite/g05n3p04.png", Some(ScaledFloat::new(0.55))); - trial("tests/pngsuite/g07n0g16.png", Some(ScaledFloat::new(0.7))); - trial("tests/pngsuite/g07n2c08.png", Some(ScaledFloat::new(0.7))); - trial("tests/pngsuite/g07n3p04.png", Some(ScaledFloat::new(0.7))); - trial("tests/pngsuite/g10n0g16.png", Some(ScaledFloat::new(1.0))); - trial("tests/pngsuite/g10n2c08.png", Some(ScaledFloat::new(1.0))); - trial("tests/pngsuite/g10n3p04.png", Some(ScaledFloat::new(1.0))); - trial("tests/pngsuite/g25n0g16.png", Some(ScaledFloat::new(2.5))); - trial("tests/pngsuite/g25n2c08.png", Some(ScaledFloat::new(2.5))); - trial("tests/pngsuite/g25n3p04.png", Some(ScaledFloat::new(2.5))); - Ok(()) - } - - #[test] - fn image_source_chromaticities() -> Result<(), ()> { - fn trial(path: &str, expected: Option<SourceChromaticities>) { - let decoder = crate::Decoder::new(File::open(path).unwrap()); - let reader = decoder.read_info().unwrap(); - let actual: Option<SourceChromaticities> = reader.info().source_chromaticities; - assert!(actual == expected); - } - trial( - "tests/pngsuite/ccwn2c08.png", - Some(SourceChromaticities::new( - (0.3127, 0.3290), - (0.64, 0.33), - (0.30, 0.60), - (0.15, 0.06), - )), - ); - trial( - "tests/pngsuite/ccwn3p08.png", - Some(SourceChromaticities::new( - (0.3127, 0.3290), - (0.64, 0.33), - (0.30, 0.60), - (0.15, 0.06), - )), - ); - trial("tests/pngsuite/basi0g01.png", None); - trial("tests/pngsuite/basi0g02.png", None); - trial("tests/pngsuite/basi0g04.png", None); - trial("tests/pngsuite/basi0g08.png", None); - trial("tests/pngsuite/basi0g16.png", None); - trial("tests/pngsuite/basi2c08.png", None); - trial("tests/pngsuite/basi2c16.png", None); - trial("tests/pngsuite/basi3p01.png", None); - trial("tests/pngsuite/basi3p02.png", None); - trial("tests/pngsuite/basi3p04.png", None); - trial("tests/pngsuite/basi3p08.png", None); - trial("tests/pngsuite/basi4a08.png", None); - trial("tests/pngsuite/basi4a16.png", None); - trial("tests/pngsuite/basi6a08.png", None); - trial("tests/pngsuite/basi6a16.png", None); - trial("tests/pngsuite/basn0g01.png", None); - trial("tests/pngsuite/basn0g02.png", None); - trial("tests/pngsuite/basn0g04.png", None); - trial("tests/pngsuite/basn0g08.png", None); - trial("tests/pngsuite/basn0g16.png", None); - trial("tests/pngsuite/basn2c08.png", None); - trial("tests/pngsuite/basn2c16.png", None); - trial("tests/pngsuite/basn3p01.png", None); - trial("tests/pngsuite/basn3p02.png", None); - trial("tests/pngsuite/basn3p04.png", None); - trial("tests/pngsuite/basn3p08.png", None); - trial("tests/pngsuite/basn4a08.png", None); - trial("tests/pngsuite/basn4a16.png", None); - trial("tests/pngsuite/basn6a08.png", None); - trial("tests/pngsuite/basn6a16.png", None); - trial("tests/pngsuite/bgai4a08.png", None); - trial("tests/pngsuite/bgai4a16.png", None); - trial("tests/pngsuite/bgan6a08.png", None); - trial("tests/pngsuite/bgan6a16.png", None); - trial("tests/pngsuite/bgbn4a08.png", None); - trial("tests/pngsuite/bggn4a16.png", None); - trial("tests/pngsuite/bgwn6a08.png", None); - trial("tests/pngsuite/bgyn6a16.png", None); - trial("tests/pngsuite/cdfn2c08.png", None); - trial("tests/pngsuite/cdhn2c08.png", None); - trial("tests/pngsuite/cdsn2c08.png", None); - trial("tests/pngsuite/cdun2c08.png", None); - trial("tests/pngsuite/ch1n3p04.png", None); - trial("tests/pngsuite/ch2n3p08.png", None); - trial("tests/pngsuite/cm0n0g04.png", None); - trial("tests/pngsuite/cm7n0g04.png", None); - trial("tests/pngsuite/cm9n0g04.png", None); - trial("tests/pngsuite/cs3n2c16.png", None); - trial("tests/pngsuite/cs3n3p08.png", None); - trial("tests/pngsuite/cs5n2c08.png", None); - trial("tests/pngsuite/cs5n3p08.png", None); - trial("tests/pngsuite/cs8n2c08.png", None); - trial("tests/pngsuite/cs8n3p08.png", None); - trial("tests/pngsuite/ct0n0g04.png", None); - trial("tests/pngsuite/ct1n0g04.png", None); - trial("tests/pngsuite/cten0g04.png", None); - trial("tests/pngsuite/ctfn0g04.png", None); - trial("tests/pngsuite/ctgn0g04.png", None); - trial("tests/pngsuite/cthn0g04.png", None); - trial("tests/pngsuite/ctjn0g04.png", None); - trial("tests/pngsuite/ctzn0g04.png", None); - trial("tests/pngsuite/f00n0g08.png", None); - trial("tests/pngsuite/f00n2c08.png", None); - trial("tests/pngsuite/f01n0g08.png", None); - trial("tests/pngsuite/f01n2c08.png", None); - trial("tests/pngsuite/f02n0g08.png", None); - trial("tests/pngsuite/f02n2c08.png", None); - trial("tests/pngsuite/f03n0g08.png", None); - trial("tests/pngsuite/f03n2c08.png", None); - trial("tests/pngsuite/f04n0g08.png", None); - trial("tests/pngsuite/f04n2c08.png", None); - trial("tests/pngsuite/f99n0g04.png", None); - trial("tests/pngsuite/g03n0g16.png", None); - trial("tests/pngsuite/g03n2c08.png", None); - trial("tests/pngsuite/g03n3p04.png", None); - trial("tests/pngsuite/g04n0g16.png", None); - trial("tests/pngsuite/g04n2c08.png", None); - trial("tests/pngsuite/g04n3p04.png", None); - trial("tests/pngsuite/g05n0g16.png", None); - trial("tests/pngsuite/g05n2c08.png", None); - trial("tests/pngsuite/g05n3p04.png", None); - trial("tests/pngsuite/g07n0g16.png", None); - trial("tests/pngsuite/g07n2c08.png", None); - trial("tests/pngsuite/g07n3p04.png", None); - trial("tests/pngsuite/g10n0g16.png", None); - trial("tests/pngsuite/g10n2c08.png", None); - trial("tests/pngsuite/g10n3p04.png", None); - trial("tests/pngsuite/g25n0g16.png", None); - trial("tests/pngsuite/g25n2c08.png", None); - trial("tests/pngsuite/g25n3p04.png", None); - trial("tests/pngsuite/oi1n0g16.png", None); - trial("tests/pngsuite/oi1n2c16.png", None); - trial("tests/pngsuite/oi2n0g16.png", None); - trial("tests/pngsuite/oi2n2c16.png", None); - trial("tests/pngsuite/oi4n0g16.png", None); - trial("tests/pngsuite/oi4n2c16.png", None); - trial("tests/pngsuite/oi9n0g16.png", None); - trial("tests/pngsuite/oi9n2c16.png", None); - trial("tests/pngsuite/PngSuite.png", None); - trial("tests/pngsuite/pp0n2c16.png", None); - trial("tests/pngsuite/pp0n6a08.png", None); - trial("tests/pngsuite/ps1n0g08.png", None); - trial("tests/pngsuite/ps1n2c16.png", None); - trial("tests/pngsuite/ps2n0g08.png", None); - trial("tests/pngsuite/ps2n2c16.png", None); - trial("tests/pngsuite/s01i3p01.png", None); - trial("tests/pngsuite/s01n3p01.png", None); - trial("tests/pngsuite/s02i3p01.png", None); - trial("tests/pngsuite/s02n3p01.png", None); - trial("tests/pngsuite/s03i3p01.png", None); - trial("tests/pngsuite/s03n3p01.png", None); - trial("tests/pngsuite/s04i3p01.png", None); - trial("tests/pngsuite/s04n3p01.png", None); - trial("tests/pngsuite/s05i3p02.png", None); - trial("tests/pngsuite/s05n3p02.png", None); - trial("tests/pngsuite/s06i3p02.png", None); - trial("tests/pngsuite/s06n3p02.png", None); - trial("tests/pngsuite/s07i3p02.png", None); - trial("tests/pngsuite/s07n3p02.png", None); - trial("tests/pngsuite/s08i3p02.png", None); - trial("tests/pngsuite/s08n3p02.png", None); - trial("tests/pngsuite/s09i3p02.png", None); - trial("tests/pngsuite/s09n3p02.png", None); - trial("tests/pngsuite/s32i3p04.png", None); - trial("tests/pngsuite/s32n3p04.png", None); - trial("tests/pngsuite/s33i3p04.png", None); - trial("tests/pngsuite/s33n3p04.png", None); - trial("tests/pngsuite/s34i3p04.png", None); - trial("tests/pngsuite/s34n3p04.png", None); - trial("tests/pngsuite/s35i3p04.png", None); - trial("tests/pngsuite/s35n3p04.png", None); - trial("tests/pngsuite/s36i3p04.png", None); - trial("tests/pngsuite/s36n3p04.png", None); - trial("tests/pngsuite/s37i3p04.png", None); - trial("tests/pngsuite/s37n3p04.png", None); - trial("tests/pngsuite/s38i3p04.png", None); - trial("tests/pngsuite/s38n3p04.png", None); - trial("tests/pngsuite/s39i3p04.png", None); - trial("tests/pngsuite/s39n3p04.png", None); - trial("tests/pngsuite/s40i3p04.png", None); - trial("tests/pngsuite/s40n3p04.png", None); - trial("tests/pngsuite/tbbn0g04.png", None); - trial("tests/pngsuite/tbbn2c16.png", None); - trial("tests/pngsuite/tbbn3p08.png", None); - trial("tests/pngsuite/tbgn2c16.png", None); - trial("tests/pngsuite/tbgn3p08.png", None); - trial("tests/pngsuite/tbrn2c08.png", None); - trial("tests/pngsuite/tbwn0g16.png", None); - trial("tests/pngsuite/tbwn3p08.png", None); - trial("tests/pngsuite/tbyn3p08.png", None); - trial("tests/pngsuite/tm3n3p02.png", None); - trial("tests/pngsuite/tp0n0g08.png", None); - trial("tests/pngsuite/tp0n2c08.png", None); - trial("tests/pngsuite/tp0n3p08.png", None); - trial("tests/pngsuite/tp1n3p08.png", None); - trial("tests/pngsuite/z00n2c08.png", None); - trial("tests/pngsuite/z03n2c08.png", None); - trial("tests/pngsuite/z06n2c08.png", None); - Ok(()) - } -} diff --git a/vendor/png/src/decoder/zlib.rs b/vendor/png/src/decoder/zlib.rs deleted file mode 100644 index 2953c95..0000000 --- a/vendor/png/src/decoder/zlib.rs +++ /dev/null @@ -1,212 +0,0 @@ -use super::{stream::FormatErrorInner, DecodingError, CHUNCK_BUFFER_SIZE}; - -use fdeflate::Decompressor; - -/// Ergonomics wrapper around `miniz_oxide::inflate::stream` for zlib compressed data. -pub(super) struct ZlibStream { - /// Current decoding state. - state: Box<fdeflate::Decompressor>, - /// If there has been a call to decompress already. - started: bool, - /// A buffer of compressed data. - /// We use this for a progress guarantee. The data in the input stream is chunked as given by - /// the underlying stream buffer. We will not read any more data until the current buffer has - /// been fully consumed. The zlib decompression can not fully consume all the data when it is - /// in the middle of the stream, it will treat full symbols and maybe the last bytes need to be - /// treated in a special way. The exact reason isn't as important but the interface does not - /// promise us this. Now, the complication is that the _current_ chunking information of PNG - /// alone is not enough to determine this as indeed the compressed stream is the concatenation - /// of all consecutive `IDAT`/`fdAT` chunks. We would need to inspect the next chunk header. - /// - /// Thus, there needs to be a buffer that allows fully clearing a chunk so that the next chunk - /// type can be inspected. - in_buffer: Vec<u8>, - /// The logical start of the `in_buffer`. - in_pos: usize, - /// Remaining buffered decoded bytes. - /// The decoder sometimes wants inspect some already finished bytes for further decoding. So we - /// keep a total of 32KB of decoded data available as long as more data may be appended. - out_buffer: Vec<u8>, - /// The cursor position in the output stream as a buffer index. - out_pos: usize, - /// Ignore and do not calculate the Adler-32 checksum. Defaults to `true`. - /// - /// This flag overrides `TINFL_FLAG_COMPUTE_ADLER32`. - /// - /// This flag should not be modified after decompression has started. - ignore_adler32: bool, -} - -impl ZlibStream { - pub(crate) fn new() -> Self { - ZlibStream { - state: Box::new(Decompressor::new()), - started: false, - in_buffer: Vec::with_capacity(CHUNCK_BUFFER_SIZE), - in_pos: 0, - out_buffer: vec![0; 2 * CHUNCK_BUFFER_SIZE], - out_pos: 0, - ignore_adler32: true, - } - } - - pub(crate) fn reset(&mut self) { - self.started = false; - self.in_buffer.clear(); - self.in_pos = 0; - self.out_buffer.clear(); - self.out_pos = 0; - *self.state = Decompressor::new(); - } - - /// Set the `ignore_adler32` flag and return `true` if the flag was - /// successfully set. - /// - /// The default is `true`. - /// - /// This flag cannot be modified after decompression has started until the - /// [ZlibStream] is reset. - pub(crate) fn set_ignore_adler32(&mut self, flag: bool) -> bool { - if !self.started { - self.ignore_adler32 = flag; - true - } else { - false - } - } - - /// Return the `ignore_adler32` flag. - pub(crate) fn ignore_adler32(&self) -> bool { - self.ignore_adler32 - } - - /// Fill the decoded buffer as far as possible from `data`. - /// On success returns the number of consumed input bytes. - pub(crate) fn decompress( - &mut self, - data: &[u8], - image_data: &mut Vec<u8>, - ) -> Result<usize, DecodingError> { - self.prepare_vec_for_appending(); - - if !self.started && self.ignore_adler32 { - self.state.ignore_adler32(); - } - - let in_data = if self.in_buffer.is_empty() { - data - } else { - &self.in_buffer[self.in_pos..] - }; - - let (mut in_consumed, out_consumed) = self - .state - .read(in_data, self.out_buffer.as_mut_slice(), self.out_pos, false) - .map_err(|err| { - DecodingError::Format(FormatErrorInner::CorruptFlateStream { err }.into()) - })?; - - if !self.in_buffer.is_empty() { - self.in_pos += in_consumed; - in_consumed = 0; - } - - if self.in_buffer.len() == self.in_pos { - self.in_buffer.clear(); - self.in_pos = 0; - } - - if in_consumed == 0 { - self.in_buffer.extend_from_slice(data); - in_consumed = data.len(); - } - - self.started = true; - self.out_pos += out_consumed; - self.transfer_finished_data(image_data); - - Ok(in_consumed) - } - - /// Called after all consecutive IDAT chunks were handled. - /// - /// The compressed stream can be split on arbitrary byte boundaries. This enables some cleanup - /// within the decompressor and flushing additional data which may have been kept back in case - /// more data were passed to it. - pub(crate) fn finish_compressed_chunks( - &mut self, - image_data: &mut Vec<u8>, - ) -> Result<(), DecodingError> { - if !self.started { - return Ok(()); - } - - let tail = self.in_buffer.split_off(0); - let tail = &tail[self.in_pos..]; - - let mut start = 0; - loop { - self.prepare_vec_for_appending(); - - let (in_consumed, out_consumed) = self - .state - .read( - &tail[start..], - self.out_buffer.as_mut_slice(), - self.out_pos, - true, - ) - .map_err(|err| { - DecodingError::Format(FormatErrorInner::CorruptFlateStream { err }.into()) - })?; - - start += in_consumed; - self.out_pos += out_consumed; - - if self.state.is_done() { - self.out_buffer.truncate(self.out_pos); - image_data.append(&mut self.out_buffer); - return Ok(()); - } else { - let transferred = self.transfer_finished_data(image_data); - assert!( - transferred > 0 || in_consumed > 0 || out_consumed > 0, - "No more forward progress made in stream decoding." - ); - } - } - } - - /// Resize the vector to allow allocation of more data. - fn prepare_vec_for_appending(&mut self) { - if self.out_buffer.len().saturating_sub(self.out_pos) >= CHUNCK_BUFFER_SIZE { - return; - } - - let buffered_len = self.decoding_size(self.out_buffer.len()); - debug_assert!(self.out_buffer.len() <= buffered_len); - self.out_buffer.resize(buffered_len, 0u8); - } - - fn decoding_size(&self, len: usize) -> usize { - // Allocate one more chunk size than currently or double the length while ensuring that the - // allocation is valid and that any cursor within it will be valid. - len - // This keeps the buffer size a power-of-two, required by miniz_oxide. - .saturating_add(CHUNCK_BUFFER_SIZE.max(len)) - // Ensure all buffer indices are valid cursor positions. - // Note: both cut off and zero extension give correct results. - .min(u64::max_value() as usize) - // Ensure the allocation request is valid. - // TODO: maximum allocation limits? - .min(isize::max_value() as usize) - } - - fn transfer_finished_data(&mut self, image_data: &mut Vec<u8>) -> usize { - let safe = self.out_pos.saturating_sub(CHUNCK_BUFFER_SIZE); - // TODO: allocation limits. - image_data.extend(self.out_buffer.drain(..safe)); - self.out_pos -= safe; - safe - } -} diff --git a/vendor/png/src/encoder.rs b/vendor/png/src/encoder.rs deleted file mode 100644 index 812bcaa..0000000 --- a/vendor/png/src/encoder.rs +++ /dev/null @@ -1,2389 +0,0 @@ -use borrow::Cow; -use io::{Read, Write}; -use ops::{Deref, DerefMut}; -use std::{borrow, error, fmt, io, mem, ops, result}; - -use crc32fast::Hasher as Crc32; -use flate2::write::ZlibEncoder; - -use crate::chunk::{self, ChunkType}; -use crate::common::{ - AnimationControl, BitDepth, BlendOp, BytesPerPixel, ColorType, Compression, DisposeOp, - FrameControl, Info, ParameterError, ParameterErrorKind, PixelDimensions, ScaledFloat, -}; -use crate::filter::{filter, AdaptiveFilterType, FilterType}; -use crate::text_metadata::{ - EncodableTextChunk, ITXtChunk, TEXtChunk, TextEncodingError, ZTXtChunk, -}; -use crate::traits::WriteBytesExt; - -pub type Result<T> = result::Result<T, EncodingError>; - -#[derive(Debug)] -pub enum EncodingError { - IoError(io::Error), - Format(FormatError), - Parameter(ParameterError), - LimitsExceeded, -} - -#[derive(Debug)] -pub struct FormatError { - inner: FormatErrorKind, -} - -#[derive(Debug)] -enum FormatErrorKind { - ZeroWidth, - ZeroHeight, - InvalidColorCombination(BitDepth, ColorType), - NoPalette, - // TODO: wait, what? - WrittenTooMuch(usize), - NotAnimated, - OutOfBounds, - EndReached, - ZeroFrames, - MissingFrames, - MissingData(usize), - Unrecoverable, - BadTextEncoding(TextEncodingError), -} - -impl error::Error for EncodingError { - fn cause(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - EncodingError::IoError(err) => Some(err), - _ => None, - } - } -} - -impl fmt::Display for EncodingError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { - use self::EncodingError::*; - match self { - IoError(err) => write!(fmt, "{}", err), - Format(desc) => write!(fmt, "{}", desc), - Parameter(desc) => write!(fmt, "{}", desc), - LimitsExceeded => write!(fmt, "Limits are exceeded."), - } - } -} - -impl fmt::Display for FormatError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> { - use FormatErrorKind::*; - match self.inner { - ZeroWidth => write!(fmt, "Zero width not allowed"), - ZeroHeight => write!(fmt, "Zero height not allowed"), - ZeroFrames => write!(fmt, "Zero frames not allowed"), - InvalidColorCombination(depth, color) => write!( - fmt, - "Invalid combination of bit-depth '{:?}' and color-type '{:?}'", - depth, color - ), - NoPalette => write!(fmt, "can't write indexed image without palette"), - WrittenTooMuch(index) => write!(fmt, "wrong data size, got {} bytes too many", index), - NotAnimated => write!(fmt, "not an animation"), - OutOfBounds => write!( - fmt, - "the dimension and position go over the frame boundaries" - ), - EndReached => write!(fmt, "all the frames have been already written"), - MissingFrames => write!(fmt, "there are still frames to be written"), - MissingData(n) => write!(fmt, "there are still {} bytes to be written", n), - Unrecoverable => write!( - fmt, - "a previous error put the writer into an unrecoverable state" - ), - BadTextEncoding(tee) => match tee { - TextEncodingError::Unrepresentable => write!( - fmt, - "The text metadata cannot be encoded into valid ISO 8859-1" - ), - TextEncodingError::InvalidKeywordSize => write!(fmt, "Invalid keyword size"), - TextEncodingError::CompressionError => { - write!(fmt, "Unable to compress text metadata") - } - }, - } - } -} - -impl From<io::Error> for EncodingError { - fn from(err: io::Error) -> EncodingError { - EncodingError::IoError(err) - } -} - -impl From<EncodingError> for io::Error { - fn from(err: EncodingError) -> io::Error { - io::Error::new(io::ErrorKind::Other, err.to_string()) - } -} - -// Private impl. -impl From<FormatErrorKind> for FormatError { - fn from(kind: FormatErrorKind) -> Self { - FormatError { inner: kind } - } -} - -impl From<TextEncodingError> for EncodingError { - fn from(tee: TextEncodingError) -> Self { - EncodingError::Format(FormatError { - inner: FormatErrorKind::BadTextEncoding(tee), - }) - } -} - -/// PNG Encoder. -/// -/// This configures the PNG format options such as animation chunks, palette use, color types, -/// auxiliary chunks etc. -/// -/// FIXME: Configuring APNG might be easier (less individual errors) if we had an _adapter_ which -/// borrows this mutably but guarantees that `info.frame_control` is not `None`. -pub struct Encoder<'a, W: Write> { - w: W, - info: Info<'a>, - options: Options, -} - -/// Decoding options, internal type, forwarded to the Writer. -#[derive(Default)] -struct Options { - filter: FilterType, - adaptive_filter: AdaptiveFilterType, - sep_def_img: bool, - validate_sequence: bool, -} - -impl<'a, W: Write> Encoder<'a, W> { - pub fn new(w: W, width: u32, height: u32) -> Encoder<'static, W> { - Encoder { - w, - info: Info::with_size(width, height), - options: Options::default(), - } - } - - /// Specify that the image is animated. - /// - /// `num_frames` controls how many frames the animation has, while - /// `num_plays` controls how many times the animation should be - /// repeated until it stops, if it's zero then it will repeat - /// infinitely. - /// - /// When this method is returns successfully then the images written will be encoded as fdAT - /// chunks, except for the first image that is still encoded as `IDAT`. You can control if the - /// first frame should be treated as an animation frame with [`Encoder::set_sep_def_img()`]. - /// - /// This method returns an error if `num_frames` is 0. - pub fn set_animated(&mut self, num_frames: u32, num_plays: u32) -> Result<()> { - if num_frames == 0 { - return Err(EncodingError::Format(FormatErrorKind::ZeroFrames.into())); - } - - let actl = AnimationControl { - num_frames, - num_plays, - }; - - let fctl = FrameControl { - sequence_number: 0, - width: self.info.width, - height: self.info.height, - ..Default::default() - }; - - self.info.animation_control = Some(actl); - self.info.frame_control = Some(fctl); - Ok(()) - } - - /// Mark the first animated frame as a 'separate default image'. - /// - /// In APNG each animated frame is preceded by a special control chunk, `fcTL`. It's up to the - /// encoder to decide if the first image, the standard `IDAT` data, should be part of the - /// animation by emitting this chunk or by not doing so. A default image that is _not_ part of - /// the animation is often interpreted as a thumbnail. - /// - /// This method will return an error when animation control was not configured - /// (which is done by calling [`Encoder::set_animated`]). - pub fn set_sep_def_img(&mut self, sep_def_img: bool) -> Result<()> { - if self.info.animation_control.is_some() { - self.options.sep_def_img = sep_def_img; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Sets the raw byte contents of the PLTE chunk. This method accepts - /// both borrowed and owned byte data. - pub fn set_palette<T: Into<Cow<'a, [u8]>>>(&mut self, palette: T) { - self.info.palette = Some(palette.into()); - } - - /// Sets the raw byte contents of the tRNS chunk. This method accepts - /// both borrowed and owned byte data. - pub fn set_trns<T: Into<Cow<'a, [u8]>>>(&mut self, trns: T) { - self.info.trns = Some(trns.into()); - } - - /// Set the display gamma of the source system on which the image was generated or last edited. - pub fn set_source_gamma(&mut self, source_gamma: ScaledFloat) { - self.info.source_gamma = Some(source_gamma); - } - - /// Set the chromaticities for the source system's display channels (red, green, blue) and the whitepoint - /// of the source system on which the image was generated or last edited. - pub fn set_source_chromaticities( - &mut self, - source_chromaticities: super::SourceChromaticities, - ) { - self.info.source_chromaticities = Some(source_chromaticities); - } - - /// Mark the image data as conforming to the SRGB color space with the specified rendering intent. - /// - /// Matching source gamma and chromaticities chunks are added automatically. - /// Any manually specified source gamma or chromaticities will be ignored. - pub fn set_srgb(&mut self, rendering_intent: super::SrgbRenderingIntent) { - self.info.srgb = Some(rendering_intent); - } - - /// Start encoding by writing the header data. - /// - /// The remaining data can be supplied by methods on the returned [`Writer`]. - pub fn write_header(self) -> Result<Writer<W>> { - Writer::new(self.w, PartialInfo::new(&self.info), self.options).init(&self.info) - } - - /// Set the color of the encoded image. - /// - /// These correspond to the color types in the png IHDR data that will be written. The length - /// of the image data that is later supplied must match the color type, otherwise an error will - /// be emitted. - pub fn set_color(&mut self, color: ColorType) { - self.info.color_type = color; - } - - /// Set the indicated depth of the image data. - pub fn set_depth(&mut self, depth: BitDepth) { - self.info.bit_depth = depth; - } - - /// Set compression parameters. - /// - /// Accepts a `Compression` or any type that can transform into a `Compression`. Notably `deflate::Compression` and - /// `deflate::CompressionOptions` which "just work". - pub fn set_compression(&mut self, compression: Compression) { - self.info.compression = compression; - } - - /// Set the used filter type. - /// - /// The default filter is [`FilterType::Sub`] which provides a basic prediction algorithm for - /// sample values based on the previous. For a potentially better compression ratio, at the - /// cost of more complex processing, try out [`FilterType::Paeth`]. - /// - /// [`FilterType::Sub`]: enum.FilterType.html#variant.Sub - /// [`FilterType::Paeth`]: enum.FilterType.html#variant.Paeth - pub fn set_filter(&mut self, filter: FilterType) { - self.options.filter = filter; - } - - /// Set the adaptive filter type. - /// - /// Adaptive filtering attempts to select the best filter for each line - /// based on heuristics which minimize the file size for compression rather - /// than use a single filter for the entire image. The default method is - /// [`AdaptiveFilterType::NonAdaptive`]. - /// - /// [`AdaptiveFilterType::NonAdaptive`]: enum.AdaptiveFilterType.html - pub fn set_adaptive_filter(&mut self, adaptive_filter: AdaptiveFilterType) { - self.options.adaptive_filter = adaptive_filter; - } - - /// Set the fraction of time every frame is going to be displayed, in seconds. - /// - /// *Note that this parameter can be set for each individual frame after - /// [`Encoder::write_header`] is called. (see [`Writer::set_frame_delay`])* - /// - /// If the denominator is 0, it is to be treated as if it were 100 - /// (that is, the numerator then specifies 1/100ths of a second). - /// If the the value of the numerator is 0 the decoder should render the next frame - /// as quickly as possible, though viewers may impose a reasonable lower bound. - /// - /// The default value is 0 for both the numerator and denominator. - /// - /// This method will return an error if the image is not animated. - /// (see [`set_animated`]) - /// - /// [`write_header`]: struct.Encoder.html#method.write_header - /// [`set_animated`]: struct.Encoder.html#method.set_animated - /// [`Writer::set_frame_delay`]: struct.Writer#method.set_frame_delay - pub fn set_frame_delay(&mut self, numerator: u16, denominator: u16) -> Result<()> { - if let Some(ref mut fctl) = self.info.frame_control { - fctl.delay_den = denominator; - fctl.delay_num = numerator; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the blend operation for every frame. - /// - /// The blend operation specifies whether the frame is to be alpha blended - /// into the current output buffer content, or whether it should completely - /// replace its region in the output buffer. - /// - /// *Note that this parameter can be set for each individual frame after - /// [`write_header`] is called. (see [`Writer::set_blend_op`])* - /// - /// See the [`BlendOp`] documentation for the possible values and their effects. - /// - /// *Note that for the first frame the two blend modes are functionally - /// equivalent due to the clearing of the output buffer at the beginning - /// of each play.* - /// - /// The default value is [`BlendOp::Source`]. - /// - /// This method will return an error if the image is not animated. - /// (see [`set_animated`]) - /// - /// [`BlendOP`]: enum.BlendOp.html - /// [`BlendOP::Source`]: enum.BlendOp.html#variant.Source - /// [`write_header`]: struct.Encoder.html#method.write_header - /// [`set_animated`]: struct.Encoder.html#method.set_animated - /// [`Writer::set_blend_op`]: struct.Writer#method.set_blend_op - pub fn set_blend_op(&mut self, op: BlendOp) -> Result<()> { - if let Some(ref mut fctl) = self.info.frame_control { - fctl.blend_op = op; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the dispose operation for every frame. - /// - /// The dispose operation specifies how the output buffer should be changed - /// at the end of the delay (before rendering the next frame) - /// - /// *Note that this parameter can be set for each individual frame after - /// [`write_header`] is called (see [`Writer::set_dispose_op`])* - /// - /// See the [`DisposeOp`] documentation for the possible values and their effects. - /// - /// *Note that if the first frame uses [`DisposeOp::Previous`] - /// it will be treated as [`DisposeOp::Background`].* - /// - /// The default value is [`DisposeOp::None`]. - /// - /// This method will return an error if the image is not animated. - /// (see [`set_animated`]) - /// - /// [`DisposeOp`]: ../common/enum.BlendOp.html - /// [`DisposeOp::Previous`]: ../common/enum.BlendOp.html#variant.Previous - /// [`DisposeOp::Background`]: ../common/enum.BlendOp.html#variant.Background - /// [`DisposeOp::None`]: ../common/enum.BlendOp.html#variant.None - /// [`write_header`]: struct.Encoder.html#method.write_header - /// [`set_animated`]: struct.Encoder.html#method.set_animated - /// [`Writer::set_dispose_op`]: struct.Writer#method.set_dispose_op - pub fn set_dispose_op(&mut self, op: DisposeOp) -> Result<()> { - if let Some(ref mut fctl) = self.info.frame_control { - fctl.dispose_op = op; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - pub fn set_pixel_dims(&mut self, pixel_dims: Option<PixelDimensions>) { - self.info.pixel_dims = pixel_dims - } - /// Convenience function to add tEXt chunks to [`Info`] struct - pub fn add_text_chunk(&mut self, keyword: String, text: String) -> Result<()> { - let text_chunk = TEXtChunk::new(keyword, text); - self.info.uncompressed_latin1_text.push(text_chunk); - Ok(()) - } - - /// Convenience function to add zTXt chunks to [`Info`] struct - pub fn add_ztxt_chunk(&mut self, keyword: String, text: String) -> Result<()> { - let text_chunk = ZTXtChunk::new(keyword, text); - self.info.compressed_latin1_text.push(text_chunk); - Ok(()) - } - - /// Convenience function to add iTXt chunks to [`Info`] struct - /// - /// This function only sets the `keyword` and `text` field of the iTXt chunk. - /// To set the other fields, create a [`ITXtChunk`] directly, and then encode it to the output stream. - pub fn add_itxt_chunk(&mut self, keyword: String, text: String) -> Result<()> { - let text_chunk = ITXtChunk::new(keyword, text); - self.info.utf8_text.push(text_chunk); - Ok(()) - } - - /// Validate the written image sequence. - /// - /// When validation is turned on (it's turned off by default) then attempts to write more than - /// one `IDAT` image or images beyond the number of frames indicated in the animation control - /// chunk will fail and return an error result instead. Attempts to [finish][finish] the image - /// with missing frames will also return an error. - /// - /// [finish]: StreamWriter::finish - /// - /// (It's possible to circumvent these checks by writing raw chunks instead.) - pub fn validate_sequence(&mut self, validate: bool) { - self.options.validate_sequence = validate; - } -} - -/// PNG writer -/// -/// Progresses through the image by writing images, frames, or raw individual chunks. This is -/// constructed through [`Encoder::write_header()`]. -/// -/// FIXME: Writing of animated chunks might be clearer if we had an _adapter_ that you would call -/// to guarantee the next image to be prefaced with a fcTL-chunk, and all other chunks would be -/// guaranteed to be `IDAT`/not affected by APNG's frame control. -pub struct Writer<W: Write> { - /// The underlying writer. - w: W, - /// The local version of the `Info` struct. - info: PartialInfo, - /// Global encoding options. - options: Options, - /// The total number of image frames, counting all consecutive IDAT and fdAT chunks. - images_written: u64, - /// The total number of animation frames, that is equivalent to counting fcTL chunks. - animation_written: u32, - /// A flag to note when the IEND chunk was already added. - /// This is only set on code paths that drop `Self` to control the destructor. - iend_written: bool, -} - -/// Contains the subset of attributes of [Info] needed for [Writer] to function -struct PartialInfo { - width: u32, - height: u32, - bit_depth: BitDepth, - color_type: ColorType, - frame_control: Option<FrameControl>, - animation_control: Option<AnimationControl>, - compression: Compression, - has_palette: bool, -} - -impl PartialInfo { - fn new(info: &Info) -> Self { - PartialInfo { - width: info.width, - height: info.height, - bit_depth: info.bit_depth, - color_type: info.color_type, - frame_control: info.frame_control, - animation_control: info.animation_control, - compression: info.compression, - has_palette: info.palette.is_some(), - } - } - - fn bpp_in_prediction(&self) -> BytesPerPixel { - // Passthrough - self.to_info().bpp_in_prediction() - } - - fn raw_row_length(&self) -> usize { - // Passthrough - self.to_info().raw_row_length() - } - - fn raw_row_length_from_width(&self, width: u32) -> usize { - // Passthrough - self.to_info().raw_row_length_from_width(width) - } - - /// Converts this partial info to an owned Info struct, - /// setting missing values to their defaults - fn to_info(&self) -> Info<'static> { - Info { - width: self.width, - height: self.height, - bit_depth: self.bit_depth, - color_type: self.color_type, - frame_control: self.frame_control, - animation_control: self.animation_control, - compression: self.compression, - ..Default::default() - } - } -} - -const DEFAULT_BUFFER_LENGTH: usize = 4 * 1024; - -pub(crate) fn write_chunk<W: Write>(mut w: W, name: chunk::ChunkType, data: &[u8]) -> Result<()> { - w.write_be(data.len() as u32)?; - w.write_all(&name.0)?; - w.write_all(data)?; - let mut crc = Crc32::new(); - crc.update(&name.0); - crc.update(data); - w.write_be(crc.finalize())?; - Ok(()) -} - -impl<W: Write> Writer<W> { - fn new(w: W, info: PartialInfo, options: Options) -> Writer<W> { - Writer { - w, - info, - options, - images_written: 0, - animation_written: 0, - iend_written: false, - } - } - - fn init(mut self, info: &Info<'_>) -> Result<Self> { - if self.info.width == 0 { - return Err(EncodingError::Format(FormatErrorKind::ZeroWidth.into())); - } - - if self.info.height == 0 { - return Err(EncodingError::Format(FormatErrorKind::ZeroHeight.into())); - } - - if self - .info - .color_type - .is_combination_invalid(self.info.bit_depth) - { - return Err(EncodingError::Format( - FormatErrorKind::InvalidColorCombination(self.info.bit_depth, self.info.color_type) - .into(), - )); - } - - self.w.write_all(&[137, 80, 78, 71, 13, 10, 26, 10])?; // PNG signature - info.encode(&mut self.w)?; - - Ok(self) - } - - /// Write a raw chunk of PNG data. - /// - /// The chunk will have its CRC calculated and correctly. The data is not filtered in any way, - /// but the chunk needs to be short enough to have its length encoded correctly. - pub fn write_chunk(&mut self, name: ChunkType, data: &[u8]) -> Result<()> { - use std::convert::TryFrom; - - if u32::try_from(data.len()).map_or(true, |length| length > i32::MAX as u32) { - let kind = FormatErrorKind::WrittenTooMuch(data.len() - i32::MAX as usize); - return Err(EncodingError::Format(kind.into())); - } - - write_chunk(&mut self.w, name, data) - } - - pub fn write_text_chunk<T: EncodableTextChunk>(&mut self, text_chunk: &T) -> Result<()> { - text_chunk.encode(&mut self.w) - } - - /// Check if we should allow writing another image. - fn validate_new_image(&self) -> Result<()> { - if !self.options.validate_sequence { - return Ok(()); - } - - match self.info.animation_control { - None => { - if self.images_written == 0 { - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::EndReached.into())) - } - } - Some(_) => { - if self.info.frame_control.is_some() { - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::EndReached.into())) - } - } - } - } - - fn validate_sequence_done(&self) -> Result<()> { - if !self.options.validate_sequence { - return Ok(()); - } - - if (self.info.animation_control.is_some() && self.info.frame_control.is_some()) - || self.images_written == 0 - { - Err(EncodingError::Format(FormatErrorKind::MissingFrames.into())) - } else { - Ok(()) - } - } - - const MAX_IDAT_CHUNK_LEN: u32 = std::u32::MAX >> 1; - #[allow(non_upper_case_globals)] - const MAX_fdAT_CHUNK_LEN: u32 = (std::u32::MAX >> 1) - 4; - - /// Writes the next image data. - pub fn write_image_data(&mut self, data: &[u8]) -> Result<()> { - if self.info.color_type == ColorType::Indexed && !self.info.has_palette { - return Err(EncodingError::Format(FormatErrorKind::NoPalette.into())); - } - - self.validate_new_image()?; - - let width: usize; - let height: usize; - if let Some(ref mut fctl) = self.info.frame_control { - width = fctl.width as usize; - height = fctl.height as usize; - } else { - width = self.info.width as usize; - height = self.info.height as usize; - } - - let in_len = self.info.raw_row_length_from_width(width as u32) - 1; - let data_size = in_len * height; - if data_size != data.len() { - return Err(EncodingError::Parameter( - ParameterErrorKind::ImageBufferSize { - expected: data_size, - actual: data.len(), - } - .into(), - )); - } - - let prev = vec![0; in_len]; - let mut prev = prev.as_slice(); - - let bpp = self.info.bpp_in_prediction(); - let filter_method = self.options.filter; - let adaptive_method = self.options.adaptive_filter; - - let zlib_encoded = match self.info.compression { - Compression::Fast => { - let mut compressor = fdeflate::Compressor::new(std::io::Cursor::new(Vec::new()))?; - - let mut current = vec![0; in_len + 1]; - for line in data.chunks(in_len) { - let filter_type = filter( - filter_method, - adaptive_method, - bpp, - prev, - line, - &mut current[1..], - ); - - current[0] = filter_type as u8; - compressor.write_data(¤t)?; - prev = line; - } - - let compressed = compressor.finish()?.into_inner(); - if compressed.len() - > fdeflate::StoredOnlyCompressor::<()>::compressed_size((in_len + 1) * height) - { - // Write uncompressed data since the result from fast compression would take - // more space than that. - // - // We always use FilterType::NoFilter here regardless of the filter method - // requested by the user. Doing filtering again would only add performance - // cost for both encoding and subsequent decoding, without improving the - // compression ratio. - let mut compressor = - fdeflate::StoredOnlyCompressor::new(std::io::Cursor::new(Vec::new()))?; - for line in data.chunks(in_len) { - compressor.write_data(&[0])?; - compressor.write_data(line)?; - } - compressor.finish()?.into_inner() - } else { - compressed - } - } - _ => { - let mut current = vec![0; in_len]; - - let mut zlib = ZlibEncoder::new(Vec::new(), self.info.compression.to_options()); - for line in data.chunks(in_len) { - let filter_type = filter( - filter_method, - adaptive_method, - bpp, - prev, - line, - &mut current, - ); - - zlib.write_all(&[filter_type as u8])?; - zlib.write_all(¤t)?; - prev = line; - } - zlib.finish()? - } - }; - - match self.info.frame_control { - None => { - self.write_zlib_encoded_idat(&zlib_encoded)?; - } - Some(_) if self.should_skip_frame_control_on_default_image() => { - self.write_zlib_encoded_idat(&zlib_encoded)?; - } - Some(ref mut fctl) => { - fctl.encode(&mut self.w)?; - fctl.sequence_number = fctl.sequence_number.wrapping_add(1); - self.animation_written += 1; - - // If the default image is the first frame of an animation, it's still an IDAT. - if self.images_written == 0 { - self.write_zlib_encoded_idat(&zlib_encoded)?; - } else { - let buff_size = zlib_encoded.len().min(Self::MAX_fdAT_CHUNK_LEN as usize); - let mut alldata = vec![0u8; 4 + buff_size]; - for chunk in zlib_encoded.chunks(Self::MAX_fdAT_CHUNK_LEN as usize) { - alldata[..4].copy_from_slice(&fctl.sequence_number.to_be_bytes()); - alldata[4..][..chunk.len()].copy_from_slice(chunk); - write_chunk(&mut self.w, chunk::fdAT, &alldata[..4 + chunk.len()])?; - fctl.sequence_number = fctl.sequence_number.wrapping_add(1); - } - } - } - } - - self.increment_images_written(); - - Ok(()) - } - - fn increment_images_written(&mut self) { - self.images_written = self.images_written.saturating_add(1); - - if let Some(actl) = self.info.animation_control { - if actl.num_frames <= self.animation_written { - // If we've written all animation frames, all following will be normal image chunks. - self.info.frame_control = None; - } - } - } - - fn write_iend(&mut self) -> Result<()> { - self.iend_written = true; - self.write_chunk(chunk::IEND, &[]) - } - - fn should_skip_frame_control_on_default_image(&self) -> bool { - self.options.sep_def_img && self.images_written == 0 - } - - fn write_zlib_encoded_idat(&mut self, zlib_encoded: &[u8]) -> Result<()> { - for chunk in zlib_encoded.chunks(Self::MAX_IDAT_CHUNK_LEN as usize) { - self.write_chunk(chunk::IDAT, chunk)?; - } - Ok(()) - } - - /// Set the used filter type for the following frames. - /// - /// The default filter is [`FilterType::Sub`] which provides a basic prediction algorithm for - /// sample values based on the previous. For a potentially better compression ratio, at the - /// cost of more complex processing, try out [`FilterType::Paeth`]. - /// - /// [`FilterType::Sub`]: enum.FilterType.html#variant.Sub - /// [`FilterType::Paeth`]: enum.FilterType.html#variant.Paeth - pub fn set_filter(&mut self, filter: FilterType) { - self.options.filter = filter; - } - - /// Set the adaptive filter type for the following frames. - /// - /// Adaptive filtering attempts to select the best filter for each line - /// based on heuristics which minimize the file size for compression rather - /// than use a single filter for the entire image. The default method is - /// [`AdaptiveFilterType::NonAdaptive`]. - /// - /// [`AdaptiveFilterType::NonAdaptive`]: enum.AdaptiveFilterType.html - pub fn set_adaptive_filter(&mut self, adaptive_filter: AdaptiveFilterType) { - self.options.adaptive_filter = adaptive_filter; - } - - /// Set the fraction of time the following frames are going to be displayed, - /// in seconds - /// - /// If the denominator is 0, it is to be treated as if it were 100 - /// (that is, the numerator then specifies 1/100ths of a second). - /// If the the value of the numerator is 0 the decoder should render the next frame - /// as quickly as possible, though viewers may impose a reasonable lower bound. - /// - /// This method will return an error if the image is not animated. - pub fn set_frame_delay(&mut self, numerator: u16, denominator: u16) -> Result<()> { - if let Some(ref mut fctl) = self.info.frame_control { - fctl.delay_den = denominator; - fctl.delay_num = numerator; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the dimension of the following frames. - /// - /// This function will return an error when: - /// - The image is not an animated; - /// - /// - The selected dimension, considering also the current frame position, - /// goes outside the image boundaries; - /// - /// - One or both the width and height are 0; - /// - // ??? TODO ??? - // - The next frame is the default image - pub fn set_frame_dimension(&mut self, width: u32, height: u32) -> Result<()> { - if let Some(ref mut fctl) = self.info.frame_control { - if Some(width) > self.info.width.checked_sub(fctl.x_offset) - || Some(height) > self.info.height.checked_sub(fctl.y_offset) - { - return Err(EncodingError::Format(FormatErrorKind::OutOfBounds.into())); - } else if width == 0 { - return Err(EncodingError::Format(FormatErrorKind::ZeroWidth.into())); - } else if height == 0 { - return Err(EncodingError::Format(FormatErrorKind::ZeroHeight.into())); - } - fctl.width = width; - fctl.height = height; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the position of the following frames. - /// - /// An error will be returned if: - /// - The image is not animated; - /// - /// - The selected position, considering also the current frame dimension, - /// goes outside the image boundaries; - /// - // ??? TODO ??? - // - The next frame is the default image - pub fn set_frame_position(&mut self, x: u32, y: u32) -> Result<()> { - if let Some(ref mut fctl) = self.info.frame_control { - if Some(x) > self.info.width.checked_sub(fctl.width) - || Some(y) > self.info.height.checked_sub(fctl.height) - { - return Err(EncodingError::Format(FormatErrorKind::OutOfBounds.into())); - } - fctl.x_offset = x; - fctl.y_offset = y; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the frame dimension to occupy all the image, starting from - /// the current position. - /// - /// To reset the frame to the full image size [`reset_frame_position`] - /// should be called first. - /// - /// This method will return an error if the image is not animated. - /// - /// [`reset_frame_position`]: struct.Writer.html#method.reset_frame_position - pub fn reset_frame_dimension(&mut self) -> Result<()> { - if let Some(ref mut fctl) = self.info.frame_control { - fctl.width = self.info.width - fctl.x_offset; - fctl.height = self.info.height - fctl.y_offset; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the frame position to (0, 0). - /// - /// Equivalent to calling [`set_frame_position(0, 0)`]. - /// - /// This method will return an error if the image is not animated. - /// - /// [`set_frame_position(0, 0)`]: struct.Writer.html#method.set_frame_position - pub fn reset_frame_position(&mut self) -> Result<()> { - if let Some(ref mut fctl) = self.info.frame_control { - fctl.x_offset = 0; - fctl.y_offset = 0; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the blend operation for the following frames. - /// - /// The blend operation specifies whether the frame is to be alpha blended - /// into the current output buffer content, or whether it should completely - /// replace its region in the output buffer. - /// - /// See the [`BlendOp`] documentation for the possible values and their effects. - /// - /// *Note that for the first frame the two blend modes are functionally - /// equivalent due to the clearing of the output buffer at the beginning - /// of each play.* - /// - /// This method will return an error if the image is not animated. - /// - /// [`BlendOP`]: enum.BlendOp.html - pub fn set_blend_op(&mut self, op: BlendOp) -> Result<()> { - if let Some(ref mut fctl) = self.info.frame_control { - fctl.blend_op = op; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the dispose operation for the following frames. - /// - /// The dispose operation specifies how the output buffer should be changed - /// at the end of the delay (before rendering the next frame) - /// - /// See the [`DisposeOp`] documentation for the possible values and their effects. - /// - /// *Note that if the first frame uses [`DisposeOp::Previous`] - /// it will be treated as [`DisposeOp::Background`].* - /// - /// This method will return an error if the image is not animated. - /// - /// [`DisposeOp`]: ../common/enum.BlendOp.html - /// [`DisposeOp::Previous`]: ../common/enum.BlendOp.html#variant.Previous - /// [`DisposeOp::Background`]: ../common/enum.BlendOp.html#variant.Background - pub fn set_dispose_op(&mut self, op: DisposeOp) -> Result<()> { - if let Some(ref mut fctl) = self.info.frame_control { - fctl.dispose_op = op; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Create a stream writer. - /// - /// This allows you to create images that do not fit in memory. The default - /// chunk size is 4K, use `stream_writer_with_size` to set another chunk - /// size. - /// - /// This borrows the writer which allows for manually appending additional - /// chunks after the image data has been written. - pub fn stream_writer(&mut self) -> Result<StreamWriter<W>> { - self.stream_writer_with_size(DEFAULT_BUFFER_LENGTH) - } - - /// Create a stream writer with custom buffer size. - /// - /// See [`stream_writer`]. - /// - /// [`stream_writer`]: #fn.stream_writer - pub fn stream_writer_with_size(&mut self, size: usize) -> Result<StreamWriter<W>> { - StreamWriter::new(ChunkOutput::Borrowed(self), size) - } - - /// Turn this into a stream writer for image data. - /// - /// This allows you to create images that do not fit in memory. The default - /// chunk size is 4K, use `stream_writer_with_size` to set another chunk - /// size. - pub fn into_stream_writer(self) -> Result<StreamWriter<'static, W>> { - self.into_stream_writer_with_size(DEFAULT_BUFFER_LENGTH) - } - - /// Turn this into a stream writer with custom buffer size. - /// - /// See [`into_stream_writer`]. - /// - /// [`into_stream_writer`]: #fn.into_stream_writer - pub fn into_stream_writer_with_size(self, size: usize) -> Result<StreamWriter<'static, W>> { - StreamWriter::new(ChunkOutput::Owned(self), size) - } - - /// Consume the stream writer with validation. - /// - /// Unlike a simple drop this ensures that the final chunk was written correctly. When other - /// validation options (chunk sequencing) had been turned on in the configuration then it will - /// also do a check on their correctness _before_ writing the final chunk. - pub fn finish(mut self) -> Result<()> { - self.validate_sequence_done()?; - self.write_iend()?; - self.w.flush()?; - - // Explicitly drop `self` just for clarity. - drop(self); - Ok(()) - } -} - -impl<W: Write> Drop for Writer<W> { - fn drop(&mut self) { - if !self.iend_written { - let _ = self.write_iend(); - } - } -} - -enum ChunkOutput<'a, W: Write> { - Borrowed(&'a mut Writer<W>), - Owned(Writer<W>), -} - -// opted for deref for practical reasons -impl<'a, W: Write> Deref for ChunkOutput<'a, W> { - type Target = Writer<W>; - - fn deref(&self) -> &Self::Target { - match self { - ChunkOutput::Borrowed(writer) => writer, - ChunkOutput::Owned(writer) => writer, - } - } -} - -impl<'a, W: Write> DerefMut for ChunkOutput<'a, W> { - fn deref_mut(&mut self) -> &mut Self::Target { - match self { - ChunkOutput::Borrowed(writer) => writer, - ChunkOutput::Owned(writer) => writer, - } - } -} - -/// This writer is used between the actual writer and the -/// ZlibEncoder and has the job of packaging the compressed -/// data into a PNG chunk, based on the image metadata -/// -/// Currently the way it works is that the specified buffer -/// will hold one chunk at the time and buffer the incoming -/// data until `flush` is called or the maximum chunk size -/// is reached. -/// -/// The maximum chunk is the smallest between the selected buffer size -/// and `u32::MAX >> 1` (`0x7fffffff` or `2147483647` dec) -/// -/// When a chunk has to be flushed the length (that is now known) -/// and the CRC will be written at the correct locations in the chunk. -struct ChunkWriter<'a, W: Write> { - writer: ChunkOutput<'a, W>, - buffer: Vec<u8>, - /// keeps track of where the last byte was written - index: usize, - curr_chunk: ChunkType, -} - -impl<'a, W: Write> ChunkWriter<'a, W> { - fn new(writer: ChunkOutput<'a, W>, buf_len: usize) -> ChunkWriter<'a, W> { - // currently buf_len will determine the size of each chunk - // the len is capped to the maximum size every chunk can hold - // (this wont ever overflow an u32) - // - // TODO (maybe): find a way to hold two chunks at a time if `usize` - // is 64 bits. - const CAP: usize = std::u32::MAX as usize >> 1; - let curr_chunk = if writer.images_written == 0 { - chunk::IDAT - } else { - chunk::fdAT - }; - ChunkWriter { - writer, - buffer: vec![0; CAP.min(buf_len)], - index: 0, - curr_chunk, - } - } - - /// Returns the size of each scanline for the next frame - /// paired with the size of the whole frame - /// - /// This is used by the `StreamWriter` to know when the scanline ends - /// so it can filter compress it and also to know when to start - /// the next one - fn next_frame_info(&self) -> (usize, usize) { - let wrt = self.writer.deref(); - - let width: usize; - let height: usize; - if let Some(fctl) = wrt.info.frame_control { - width = fctl.width as usize; - height = fctl.height as usize; - } else { - width = wrt.info.width as usize; - height = wrt.info.height as usize; - } - - let in_len = wrt.info.raw_row_length_from_width(width as u32) - 1; - let data_size = in_len * height; - - (in_len, data_size) - } - - /// NOTE: this bypasses the internal buffer so the flush method should be called before this - /// in the case there is some data left in the buffer when this is called, it will panic - fn write_header(&mut self) -> Result<()> { - assert_eq!(self.index, 0, "Called when not flushed"); - let wrt = self.writer.deref_mut(); - - self.curr_chunk = if wrt.images_written == 0 { - chunk::IDAT - } else { - chunk::fdAT - }; - - match wrt.info.frame_control { - Some(_) if wrt.should_skip_frame_control_on_default_image() => {} - Some(ref mut fctl) => { - fctl.encode(&mut wrt.w)?; - fctl.sequence_number += 1; - } - _ => {} - } - - Ok(()) - } - - /// Set the `FrameControl` for the following frame - /// - /// It will ignore the `sequence_number` of the parameter - /// as it is updated internally. - fn set_fctl(&mut self, f: FrameControl) { - if let Some(ref mut fctl) = self.writer.info.frame_control { - // Ignore the sequence number - *fctl = FrameControl { - sequence_number: fctl.sequence_number, - ..f - }; - } else { - panic!("This function must be called on an animated PNG") - } - } - - /// Flushes the current chunk - fn flush_inner(&mut self) -> io::Result<()> { - if self.index > 0 { - // flush the chunk and reset everything - write_chunk( - &mut self.writer.w, - self.curr_chunk, - &self.buffer[..self.index], - )?; - - self.index = 0; - } - Ok(()) - } -} - -impl<'a, W: Write> Write for ChunkWriter<'a, W> { - fn write(&mut self, mut data: &[u8]) -> io::Result<usize> { - if data.is_empty() { - return Ok(0); - } - - // index == 0 means a chunk has been flushed out - if self.index == 0 { - let wrt = self.writer.deref_mut(); - - // Prepare the next animated frame, if any. - let no_fctl = wrt.should_skip_frame_control_on_default_image(); - if wrt.info.frame_control.is_some() && !no_fctl { - let fctl = wrt.info.frame_control.as_mut().unwrap(); - self.buffer[0..4].copy_from_slice(&fctl.sequence_number.to_be_bytes()); - fctl.sequence_number += 1; - self.index = 4; - } - } - - // Cap the buffer length to the maximum number of bytes that can't still - // be added to the current chunk - let written = data.len().min(self.buffer.len() - self.index); - data = &data[..written]; - - self.buffer[self.index..][..written].copy_from_slice(data); - self.index += written; - - // if the maximum data for this chunk as been reached it needs to be flushed - if self.index == self.buffer.len() { - self.flush_inner()?; - } - - Ok(written) - } - - fn flush(&mut self) -> io::Result<()> { - self.flush_inner() - } -} - -impl<W: Write> Drop for ChunkWriter<'_, W> { - fn drop(&mut self) { - let _ = self.flush(); - } -} - -// TODO: find a better name -// -/// This enum is used to be allow the `StreamWriter` to keep -/// its inner `ChunkWriter` without wrapping it inside a -/// `ZlibEncoder`. This is used in the case that between the -/// change of state that happens when the last write of a frame -/// is performed an error occurs, which obviously has to be returned. -/// This creates the problem of where to store the writer before -/// exiting the function, and this is where `Wrapper` comes in. -/// -/// Unfortunately the `ZlibWriter` can't be used because on the -/// write following the error, `finish` would be called and that -/// would write some data even if 0 bytes where compressed. -/// -/// If the `finish` function fails then there is nothing much to -/// do as the `ChunkWriter` would get lost so the `Unrecoverable` -/// variant is used to signal that. -enum Wrapper<'a, W: Write> { - Chunk(ChunkWriter<'a, W>), - Zlib(ZlibEncoder<ChunkWriter<'a, W>>), - Unrecoverable, - /// This is used in-between, should never be matched - None, -} - -impl<'a, W: Write> Wrapper<'a, W> { - /// Like `Option::take` this returns the `Wrapper` contained - /// in `self` and replaces it with `Wrapper::None` - fn take(&mut self) -> Wrapper<'a, W> { - let mut swap = Wrapper::None; - mem::swap(self, &mut swap); - swap - } -} - -/// Streaming PNG writer -/// -/// This may silently fail in the destructor, so it is a good idea to call -/// [`finish`](#method.finish) or [`flush`] before dropping. -/// -/// [`flush`]: https://doc.rust-lang.org/stable/std/io/trait.Write.html#tymethod.flush -pub struct StreamWriter<'a, W: Write> { - /// The option here is needed in order to access the inner `ChunkWriter` in-between - /// each frame, which is needed for writing the fcTL chunks between each frame - writer: Wrapper<'a, W>, - prev_buf: Vec<u8>, - curr_buf: Vec<u8>, - /// Amount of data already written - index: usize, - /// length of the current scanline - line_len: usize, - /// size of the frame (width * height * sample_size) - to_write: usize, - - width: u32, - height: u32, - - bpp: BytesPerPixel, - filter: FilterType, - adaptive_filter: AdaptiveFilterType, - fctl: Option<FrameControl>, - compression: Compression, -} - -impl<'a, W: Write> StreamWriter<'a, W> { - fn new(writer: ChunkOutput<'a, W>, buf_len: usize) -> Result<StreamWriter<'a, W>> { - let PartialInfo { - width, - height, - frame_control: fctl, - compression, - .. - } = writer.info; - - let bpp = writer.info.bpp_in_prediction(); - let in_len = writer.info.raw_row_length() - 1; - let filter = writer.options.filter; - let adaptive_filter = writer.options.adaptive_filter; - let prev_buf = vec![0; in_len]; - let curr_buf = vec![0; in_len]; - - let mut chunk_writer = ChunkWriter::new(writer, buf_len); - let (line_len, to_write) = chunk_writer.next_frame_info(); - chunk_writer.write_header()?; - let zlib = ZlibEncoder::new(chunk_writer, compression.to_options()); - - Ok(StreamWriter { - writer: Wrapper::Zlib(zlib), - index: 0, - prev_buf, - curr_buf, - bpp, - filter, - width, - height, - adaptive_filter, - line_len, - to_write, - fctl, - compression, - }) - } - - /// Set the used filter type for the next frame. - /// - /// The default filter is [`FilterType::Sub`] which provides a basic prediction algorithm for - /// sample values based on the previous. For a potentially better compression ratio, at the - /// cost of more complex processing, try out [`FilterType::Paeth`]. - /// - /// [`FilterType::Sub`]: enum.FilterType.html#variant.Sub - /// [`FilterType::Paeth`]: enum.FilterType.html#variant.Paeth - pub fn set_filter(&mut self, filter: FilterType) { - self.filter = filter; - } - - /// Set the adaptive filter type for the next frame. - /// - /// Adaptive filtering attempts to select the best filter for each line - /// based on heuristics which minimize the file size for compression rather - /// than use a single filter for the entire image. The default method is - /// [`AdaptiveFilterType::NonAdaptive`]. - /// - /// [`AdaptiveFilterType::NonAdaptive`]: enum.AdaptiveFilterType.html - pub fn set_adaptive_filter(&mut self, adaptive_filter: AdaptiveFilterType) { - self.adaptive_filter = adaptive_filter; - } - - /// Set the fraction of time the following frames are going to be displayed, - /// in seconds - /// - /// If the denominator is 0, it is to be treated as if it were 100 - /// (that is, the numerator then specifies 1/100ths of a second). - /// If the the value of the numerator is 0 the decoder should render the next frame - /// as quickly as possible, though viewers may impose a reasonable lower bound. - /// - /// This method will return an error if the image is not animated. - pub fn set_frame_delay(&mut self, numerator: u16, denominator: u16) -> Result<()> { - if let Some(ref mut fctl) = self.fctl { - fctl.delay_den = denominator; - fctl.delay_num = numerator; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the dimension of the following frames. - /// - /// This function will return an error when: - /// - The image is not an animated; - /// - /// - The selected dimension, considering also the current frame position, - /// goes outside the image boundaries; - /// - /// - One or both the width and height are 0; - /// - pub fn set_frame_dimension(&mut self, width: u32, height: u32) -> Result<()> { - if let Some(ref mut fctl) = self.fctl { - if Some(width) > self.width.checked_sub(fctl.x_offset) - || Some(height) > self.height.checked_sub(fctl.y_offset) - { - return Err(EncodingError::Format(FormatErrorKind::OutOfBounds.into())); - } else if width == 0 { - return Err(EncodingError::Format(FormatErrorKind::ZeroWidth.into())); - } else if height == 0 { - return Err(EncodingError::Format(FormatErrorKind::ZeroHeight.into())); - } - fctl.width = width; - fctl.height = height; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the position of the following frames. - /// - /// An error will be returned if: - /// - The image is not animated; - /// - /// - The selected position, considering also the current frame dimension, - /// goes outside the image boundaries; - /// - pub fn set_frame_position(&mut self, x: u32, y: u32) -> Result<()> { - if let Some(ref mut fctl) = self.fctl { - if Some(x) > self.width.checked_sub(fctl.width) - || Some(y) > self.height.checked_sub(fctl.height) - { - return Err(EncodingError::Format(FormatErrorKind::OutOfBounds.into())); - } - fctl.x_offset = x; - fctl.y_offset = y; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the frame dimension to occupy all the image, starting from - /// the current position. - /// - /// To reset the frame to the full image size [`reset_frame_position`] - /// should be called first. - /// - /// This method will return an error if the image is not animated. - /// - /// [`reset_frame_position`]: struct.Writer.html#method.reset_frame_position - pub fn reset_frame_dimension(&mut self) -> Result<()> { - if let Some(ref mut fctl) = self.fctl { - fctl.width = self.width - fctl.x_offset; - fctl.height = self.height - fctl.y_offset; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the frame position to (0, 0). - /// - /// Equivalent to calling [`set_frame_position(0, 0)`]. - /// - /// This method will return an error if the image is not animated. - /// - /// [`set_frame_position(0, 0)`]: struct.Writer.html#method.set_frame_position - pub fn reset_frame_position(&mut self) -> Result<()> { - if let Some(ref mut fctl) = self.fctl { - fctl.x_offset = 0; - fctl.y_offset = 0; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the blend operation for the following frames. - /// - /// The blend operation specifies whether the frame is to be alpha blended - /// into the current output buffer content, or whether it should completely - /// replace its region in the output buffer. - /// - /// See the [`BlendOp`] documentation for the possible values and their effects. - /// - /// *Note that for the first frame the two blend modes are functionally - /// equivalent due to the clearing of the output buffer at the beginning - /// of each play.* - /// - /// This method will return an error if the image is not animated. - /// - /// [`BlendOP`]: enum.BlendOp.html - pub fn set_blend_op(&mut self, op: BlendOp) -> Result<()> { - if let Some(ref mut fctl) = self.fctl { - fctl.blend_op = op; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - /// Set the dispose operation for the following frames. - /// - /// The dispose operation specifies how the output buffer should be changed - /// at the end of the delay (before rendering the next frame) - /// - /// See the [`DisposeOp`] documentation for the possible values and their effects. - /// - /// *Note that if the first frame uses [`DisposeOp::Previous`] - /// it will be treated as [`DisposeOp::Background`].* - /// - /// This method will return an error if the image is not animated. - /// - /// [`DisposeOp`]: ../common/enum.BlendOp.html - /// [`DisposeOp::Previous`]: ../common/enum.BlendOp.html#variant.Previous - /// [`DisposeOp::Background`]: ../common/enum.BlendOp.html#variant.Background - pub fn set_dispose_op(&mut self, op: DisposeOp) -> Result<()> { - if let Some(ref mut fctl) = self.fctl { - fctl.dispose_op = op; - Ok(()) - } else { - Err(EncodingError::Format(FormatErrorKind::NotAnimated.into())) - } - } - - pub fn finish(mut self) -> Result<()> { - if self.to_write > 0 { - let err = FormatErrorKind::MissingData(self.to_write).into(); - return Err(EncodingError::Format(err)); - } - - // TODO: call `writer.finish` somehow? - self.flush()?; - - if let Wrapper::Chunk(wrt) = self.writer.take() { - wrt.writer.validate_sequence_done()?; - } - - Ok(()) - } - - /// Flushes the buffered chunk, checks if it was the last frame, - /// writes the next frame header and gets the next frame scanline size - /// and image size. - /// NOTE: This method must only be called when the writer is the variant Chunk(_) - fn new_frame(&mut self) -> Result<()> { - let wrt = match &mut self.writer { - Wrapper::Chunk(wrt) => wrt, - Wrapper::Unrecoverable => { - let err = FormatErrorKind::Unrecoverable.into(); - return Err(EncodingError::Format(err)); - } - Wrapper::Zlib(_) => unreachable!("never called on a half-finished frame"), - Wrapper::None => unreachable!(), - }; - wrt.flush()?; - wrt.writer.validate_new_image()?; - - if let Some(fctl) = self.fctl { - wrt.set_fctl(fctl); - } - let (scansize, size) = wrt.next_frame_info(); - self.line_len = scansize; - self.to_write = size; - - wrt.write_header()?; - wrt.writer.increment_images_written(); - - // now it can be taken because the next statements cannot cause any errors - match self.writer.take() { - Wrapper::Chunk(wrt) => { - let encoder = ZlibEncoder::new(wrt, self.compression.to_options()); - self.writer = Wrapper::Zlib(encoder); - } - _ => unreachable!(), - }; - - Ok(()) - } -} - -impl<'a, W: Write> Write for StreamWriter<'a, W> { - fn write(&mut self, mut data: &[u8]) -> io::Result<usize> { - if let Wrapper::Unrecoverable = self.writer { - let err = FormatErrorKind::Unrecoverable.into(); - return Err(EncodingError::Format(err).into()); - } - - if data.is_empty() { - return Ok(0); - } - - if self.to_write == 0 { - match self.writer.take() { - Wrapper::Zlib(wrt) => match wrt.finish() { - Ok(chunk) => self.writer = Wrapper::Chunk(chunk), - Err(err) => { - self.writer = Wrapper::Unrecoverable; - return Err(err); - } - }, - chunk @ Wrapper::Chunk(_) => self.writer = chunk, - Wrapper::Unrecoverable => unreachable!(), - Wrapper::None => unreachable!(), - }; - - // Transition Wrapper::Chunk to Wrapper::Zlib. - self.new_frame()?; - } - - let written = data.read(&mut self.curr_buf[..self.line_len][self.index..])?; - self.index += written; - self.to_write -= written; - - if self.index == self.line_len { - // TODO: reuse this buffer between rows. - let mut filtered = vec![0; self.curr_buf.len()]; - let filter_type = filter( - self.filter, - self.adaptive_filter, - self.bpp, - &self.prev_buf, - &self.curr_buf, - &mut filtered, - ); - // This can't fail as the other variant is used only to allow the zlib encoder to finish - let wrt = match &mut self.writer { - Wrapper::Zlib(wrt) => wrt, - _ => unreachable!(), - }; - - wrt.write_all(&[filter_type as u8])?; - wrt.write_all(&filtered)?; - mem::swap(&mut self.prev_buf, &mut self.curr_buf); - self.index = 0; - } - - Ok(written) - } - - fn flush(&mut self) -> io::Result<()> { - match &mut self.writer { - Wrapper::Zlib(wrt) => wrt.flush()?, - Wrapper::Chunk(wrt) => wrt.flush()?, - // This handles both the case where we entered an unrecoverable state after zlib - // decoding failure and after a panic while we had taken the chunk/zlib reader. - Wrapper::Unrecoverable | Wrapper::None => { - let err = FormatErrorKind::Unrecoverable.into(); - return Err(EncodingError::Format(err).into()); - } - } - - if self.index > 0 { - let err = FormatErrorKind::WrittenTooMuch(self.index).into(); - return Err(EncodingError::Format(err).into()); - } - - Ok(()) - } -} - -impl<W: Write> Drop for StreamWriter<'_, W> { - fn drop(&mut self) { - let _ = self.flush(); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::Decoder; - - use rand::{thread_rng, Rng}; - use std::fs::File; - use std::io::{Cursor, Write}; - use std::{cmp, io}; - - #[test] - fn roundtrip() { - // More loops = more random testing, but also more test wait time - for _ in 0..10 { - for path in glob::glob("tests/pngsuite/*.png") - .unwrap() - .map(|r| r.unwrap()) - { - if path.file_name().unwrap().to_str().unwrap().starts_with('x') { - // x* files are expected to fail to decode - continue; - } - eprintln!("{}", path.display()); - // Decode image - let decoder = Decoder::new(File::open(path).unwrap()); - let mut reader = decoder.read_info().unwrap(); - let mut buf = vec![0; reader.output_buffer_size()]; - let info = reader.next_frame(&mut buf).unwrap(); - // Encode decoded image - let mut out = Vec::new(); - { - let mut wrapper = RandomChunkWriter { - rng: thread_rng(), - w: &mut out, - }; - - let mut encoder = Encoder::new(&mut wrapper, info.width, info.height); - encoder.set_color(info.color_type); - encoder.set_depth(info.bit_depth); - if let Some(palette) = &reader.info().palette { - encoder.set_palette(palette.clone()); - } - let mut encoder = encoder.write_header().unwrap(); - encoder.write_image_data(&buf).unwrap(); - } - // Decode encoded decoded image - let decoder = Decoder::new(&*out); - let mut reader = decoder.read_info().unwrap(); - let mut buf2 = vec![0; reader.output_buffer_size()]; - reader.next_frame(&mut buf2).unwrap(); - // check if the encoded image is ok: - assert_eq!(buf, buf2); - } - } - } - - #[test] - fn roundtrip_stream() { - // More loops = more random testing, but also more test wait time - for _ in 0..10 { - for path in glob::glob("tests/pngsuite/*.png") - .unwrap() - .map(|r| r.unwrap()) - { - if path.file_name().unwrap().to_str().unwrap().starts_with('x') { - // x* files are expected to fail to decode - continue; - } - // Decode image - let decoder = Decoder::new(File::open(path).unwrap()); - let mut reader = decoder.read_info().unwrap(); - let mut buf = vec![0; reader.output_buffer_size()]; - let info = reader.next_frame(&mut buf).unwrap(); - // Encode decoded image - let mut out = Vec::new(); - { - let mut wrapper = RandomChunkWriter { - rng: thread_rng(), - w: &mut out, - }; - - let mut encoder = Encoder::new(&mut wrapper, info.width, info.height); - encoder.set_color(info.color_type); - encoder.set_depth(info.bit_depth); - if let Some(palette) = &reader.info().palette { - encoder.set_palette(palette.clone()); - } - let mut encoder = encoder.write_header().unwrap(); - let mut stream_writer = encoder.stream_writer().unwrap(); - - let mut outer_wrapper = RandomChunkWriter { - rng: thread_rng(), - w: &mut stream_writer, - }; - - outer_wrapper.write_all(&buf).unwrap(); - } - // Decode encoded decoded image - let decoder = Decoder::new(&*out); - let mut reader = decoder.read_info().unwrap(); - let mut buf2 = vec![0; reader.output_buffer_size()]; - reader.next_frame(&mut buf2).unwrap(); - // check if the encoded image is ok: - assert_eq!(buf, buf2); - } - } - } - - #[test] - fn image_palette() -> Result<()> { - for &bit_depth in &[1u8, 2, 4, 8] { - // Do a reference decoding, choose a fitting palette image from pngsuite - let path = format!("tests/pngsuite/basn3p0{}.png", bit_depth); - let decoder = Decoder::new(File::open(&path).unwrap()); - let mut reader = decoder.read_info().unwrap(); - - let mut decoded_pixels = vec![0; reader.output_buffer_size()]; - let info = reader.info(); - assert_eq!( - info.width as usize * info.height as usize * usize::from(bit_depth), - decoded_pixels.len() * 8 - ); - let info = reader.next_frame(&mut decoded_pixels).unwrap(); - let indexed_data = decoded_pixels; - - let palette = reader.info().palette.as_ref().unwrap(); - let mut out = Vec::new(); - { - let mut encoder = Encoder::new(&mut out, info.width, info.height); - encoder.set_depth(BitDepth::from_u8(bit_depth).unwrap()); - encoder.set_color(ColorType::Indexed); - encoder.set_palette(palette.as_ref()); - - let mut writer = encoder.write_header().unwrap(); - writer.write_image_data(&indexed_data).unwrap(); - } - - // Decode re-encoded image - let decoder = Decoder::new(&*out); - let mut reader = decoder.read_info().unwrap(); - let mut redecoded = vec![0; reader.output_buffer_size()]; - reader.next_frame(&mut redecoded).unwrap(); - // check if the encoded image is ok: - assert_eq!(indexed_data, redecoded); - } - Ok(()) - } - - #[test] - fn expect_error_on_wrong_image_len() -> Result<()> { - let width = 10; - let height = 10; - - let output = vec![0u8; 1024]; - let writer = Cursor::new(output); - let mut encoder = Encoder::new(writer, width as u32, height as u32); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Rgb); - let mut png_writer = encoder.write_header()?; - - let correct_image_size = width * height * 3; - let image = vec![0u8; correct_image_size + 1]; - let result = png_writer.write_image_data(image.as_ref()); - assert!(result.is_err()); - - Ok(()) - } - - #[test] - fn expect_error_on_empty_image() -> Result<()> { - let output = vec![0u8; 1024]; - let mut writer = Cursor::new(output); - - let encoder = Encoder::new(&mut writer, 0, 0); - assert!(encoder.write_header().is_err()); - - let encoder = Encoder::new(&mut writer, 100, 0); - assert!(encoder.write_header().is_err()); - - let encoder = Encoder::new(&mut writer, 0, 100); - assert!(encoder.write_header().is_err()); - - Ok(()) - } - - #[test] - fn expect_error_on_invalid_bit_depth_color_type_combination() -> Result<()> { - let output = vec![0u8; 1024]; - let mut writer = Cursor::new(output); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::One); - encoder.set_color(ColorType::Rgb); - assert!(encoder.write_header().is_err()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::One); - encoder.set_color(ColorType::GrayscaleAlpha); - assert!(encoder.write_header().is_err()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::One); - encoder.set_color(ColorType::Rgba); - assert!(encoder.write_header().is_err()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Two); - encoder.set_color(ColorType::Rgb); - assert!(encoder.write_header().is_err()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Two); - encoder.set_color(ColorType::GrayscaleAlpha); - assert!(encoder.write_header().is_err()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Two); - encoder.set_color(ColorType::Rgba); - assert!(encoder.write_header().is_err()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Four); - encoder.set_color(ColorType::Rgb); - assert!(encoder.write_header().is_err()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Four); - encoder.set_color(ColorType::GrayscaleAlpha); - assert!(encoder.write_header().is_err()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Four); - encoder.set_color(ColorType::Rgba); - assert!(encoder.write_header().is_err()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Sixteen); - encoder.set_color(ColorType::Indexed); - assert!(encoder.write_header().is_err()); - - Ok(()) - } - - #[test] - fn can_write_header_with_valid_bit_depth_color_type_combination() -> Result<()> { - let output = vec![0u8; 1024]; - let mut writer = Cursor::new(output); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::One); - encoder.set_color(ColorType::Grayscale); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::One); - encoder.set_color(ColorType::Indexed); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Two); - encoder.set_color(ColorType::Grayscale); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Two); - encoder.set_color(ColorType::Indexed); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Four); - encoder.set_color(ColorType::Grayscale); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Four); - encoder.set_color(ColorType::Indexed); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Grayscale); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Rgb); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Indexed); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::GrayscaleAlpha); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Rgba); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Sixteen); - encoder.set_color(ColorType::Grayscale); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Sixteen); - encoder.set_color(ColorType::Rgb); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Sixteen); - encoder.set_color(ColorType::GrayscaleAlpha); - assert!(encoder.write_header().is_ok()); - - let mut encoder = Encoder::new(&mut writer, 1, 1); - encoder.set_depth(BitDepth::Sixteen); - encoder.set_color(ColorType::Rgba); - assert!(encoder.write_header().is_ok()); - - Ok(()) - } - - #[test] - fn all_filters_roundtrip() -> io::Result<()> { - let pixel: Vec<_> = (0..48).collect(); - - let roundtrip = |filter: FilterType| -> io::Result<()> { - let mut buffer = vec![]; - let mut encoder = Encoder::new(&mut buffer, 4, 4); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Rgb); - encoder.set_filter(filter); - encoder.write_header()?.write_image_data(&pixel)?; - - let decoder = crate::Decoder::new(Cursor::new(buffer)); - let mut reader = decoder.read_info()?; - let info = reader.info(); - assert_eq!(info.width, 4); - assert_eq!(info.height, 4); - let mut dest = vec![0; pixel.len()]; - reader.next_frame(&mut dest)?; - assert_eq!(dest, pixel, "Deviation with filter type {:?}", filter); - - Ok(()) - }; - - roundtrip(FilterType::NoFilter)?; - roundtrip(FilterType::Sub)?; - roundtrip(FilterType::Up)?; - roundtrip(FilterType::Avg)?; - roundtrip(FilterType::Paeth)?; - - Ok(()) - } - - #[test] - fn some_gamma_roundtrip() -> io::Result<()> { - let pixel: Vec<_> = (0..48).collect(); - - let roundtrip = |gamma: Option<ScaledFloat>| -> io::Result<()> { - let mut buffer = vec![]; - let mut encoder = Encoder::new(&mut buffer, 4, 4); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Rgb); - encoder.set_filter(FilterType::Avg); - if let Some(gamma) = gamma { - encoder.set_source_gamma(gamma); - } - encoder.write_header()?.write_image_data(&pixel)?; - - let decoder = crate::Decoder::new(Cursor::new(buffer)); - let mut reader = decoder.read_info()?; - assert_eq!( - reader.info().source_gamma, - gamma, - "Deviation with gamma {:?}", - gamma - ); - let mut dest = vec![0; pixel.len()]; - let info = reader.next_frame(&mut dest)?; - assert_eq!(info.width, 4); - assert_eq!(info.height, 4); - - Ok(()) - }; - - roundtrip(None)?; - roundtrip(Some(ScaledFloat::new(0.35)))?; - roundtrip(Some(ScaledFloat::new(0.45)))?; - roundtrip(Some(ScaledFloat::new(0.55)))?; - roundtrip(Some(ScaledFloat::new(0.7)))?; - roundtrip(Some(ScaledFloat::new(1.0)))?; - roundtrip(Some(ScaledFloat::new(2.5)))?; - - Ok(()) - } - - #[test] - fn write_image_chunks_beyond_first() -> Result<()> { - let width = 10; - let height = 10; - - let output = vec![0u8; 1024]; - let writer = Cursor::new(output); - - // Not an animation but we should still be able to write multiple images - // See issue: <https://github.com/image-rs/image-png/issues/301> - // This is technically all valid png so there is no issue with correctness. - let mut encoder = Encoder::new(writer, width, height); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Grayscale); - let mut png_writer = encoder.write_header()?; - - for _ in 0..3 { - let correct_image_size = (width * height) as usize; - let image = vec![0u8; correct_image_size]; - png_writer.write_image_data(image.as_ref())?; - } - - Ok(()) - } - - #[test] - fn image_validate_sequence_without_animation() -> Result<()> { - let width = 10; - let height = 10; - - let output = vec![0u8; 1024]; - let writer = Cursor::new(output); - - let mut encoder = Encoder::new(writer, width, height); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Grayscale); - encoder.validate_sequence(true); - let mut png_writer = encoder.write_header()?; - - let correct_image_size = (width * height) as usize; - let image = vec![0u8; correct_image_size]; - png_writer.write_image_data(image.as_ref())?; - - assert!(png_writer.write_image_data(image.as_ref()).is_err()); - Ok(()) - } - - #[test] - fn image_validate_animation() -> Result<()> { - let width = 10; - let height = 10; - - let output = vec![0u8; 1024]; - let writer = Cursor::new(output); - let correct_image_size = (width * height) as usize; - let image = vec![0u8; correct_image_size]; - - let mut encoder = Encoder::new(writer, width, height); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Grayscale); - encoder.set_animated(1, 0)?; - encoder.validate_sequence(true); - let mut png_writer = encoder.write_header()?; - - png_writer.write_image_data(image.as_ref())?; - - Ok(()) - } - - #[test] - fn image_validate_animation2() -> Result<()> { - let width = 10; - let height = 10; - - let output = vec![0u8; 1024]; - let writer = Cursor::new(output); - let correct_image_size = (width * height) as usize; - let image = vec![0u8; correct_image_size]; - - let mut encoder = Encoder::new(writer, width, height); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Grayscale); - encoder.set_animated(2, 0)?; - encoder.validate_sequence(true); - let mut png_writer = encoder.write_header()?; - - png_writer.write_image_data(image.as_ref())?; - png_writer.write_image_data(image.as_ref())?; - png_writer.finish()?; - - Ok(()) - } - - #[test] - fn image_validate_animation_sep_def_image() -> Result<()> { - let width = 10; - let height = 10; - - let output = vec![0u8; 1024]; - let writer = Cursor::new(output); - let correct_image_size = (width * height) as usize; - let image = vec![0u8; correct_image_size]; - - let mut encoder = Encoder::new(writer, width, height); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Grayscale); - encoder.set_animated(1, 0)?; - encoder.set_sep_def_img(true)?; - encoder.validate_sequence(true); - let mut png_writer = encoder.write_header()?; - - png_writer.write_image_data(image.as_ref())?; - png_writer.write_image_data(image.as_ref())?; - png_writer.finish()?; - - Ok(()) - } - - #[test] - fn image_validate_missing_image() -> Result<()> { - let width = 10; - let height = 10; - - let output = vec![0u8; 1024]; - let writer = Cursor::new(output); - - let mut encoder = Encoder::new(writer, width, height); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Grayscale); - encoder.validate_sequence(true); - let png_writer = encoder.write_header()?; - - assert!(png_writer.finish().is_err()); - Ok(()) - } - - #[test] - fn image_validate_missing_animated_frame() -> Result<()> { - let width = 10; - let height = 10; - - let output = vec![0u8; 1024]; - let writer = Cursor::new(output); - let correct_image_size = (width * height) as usize; - let image = vec![0u8; correct_image_size]; - - let mut encoder = Encoder::new(writer, width, height); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Grayscale); - encoder.set_animated(2, 0)?; - encoder.validate_sequence(true); - let mut png_writer = encoder.write_header()?; - - png_writer.write_image_data(image.as_ref())?; - assert!(png_writer.finish().is_err()); - - Ok(()) - } - - #[test] - fn issue_307_stream_validation() -> Result<()> { - let output = vec![0u8; 1024]; - let mut cursor = Cursor::new(output); - - let encoder = Encoder::new(&mut cursor, 1, 1); // Create a 1-pixel image - let mut writer = encoder.write_header()?; - let mut stream = writer.stream_writer()?; - - let written = stream.write(&[1, 2, 3, 4])?; - assert_eq!(written, 1); - stream.finish()?; - drop(writer); - - { - cursor.set_position(0); - let mut decoder = Decoder::new(cursor).read_info().expect("A valid image"); - let mut buffer = [0u8; 1]; - decoder.next_frame(&mut buffer[..]).expect("Valid read"); - assert_eq!(buffer, [1]); - } - - Ok(()) - } - - #[test] - fn stream_filtering() -> Result<()> { - let output = vec![0u8; 1024]; - let mut cursor = Cursor::new(output); - - let mut encoder = Encoder::new(&mut cursor, 8, 8); - encoder.set_color(ColorType::Rgba); - encoder.set_filter(FilterType::Paeth); - let mut writer = encoder.write_header()?; - let mut stream = writer.stream_writer()?; - - for _ in 0..8 { - let written = stream.write(&[1; 32])?; - assert_eq!(written, 32); - } - stream.finish()?; - drop(writer); - - { - cursor.set_position(0); - let mut decoder = Decoder::new(cursor).read_info().expect("A valid image"); - let mut buffer = [0u8; 256]; - decoder.next_frame(&mut buffer[..]).expect("Valid read"); - assert_eq!(buffer, [1; 256]); - } - - Ok(()) - } - - #[test] - #[cfg(all(unix, not(target_pointer_width = "32")))] - fn exper_error_on_huge_chunk() -> Result<()> { - // Okay, so we want a proper 4 GB chunk but not actually spend the memory for reserving it. - // Let's rely on overcommit? Otherwise we got the rather dumb option of mmap-ing /dev/zero. - let empty = vec![0; 1usize << 31]; - let writer = Cursor::new(vec![0u8; 1024]); - - let mut encoder = Encoder::new(writer, 10, 10); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Grayscale); - let mut png_writer = encoder.write_header()?; - - assert!(png_writer.write_chunk(chunk::fdAT, &empty).is_err()); - Ok(()) - } - - #[test] - #[cfg(all(unix, not(target_pointer_width = "32")))] - fn exper_error_on_non_u32_chunk() -> Result<()> { - // Okay, so we want a proper 4 GB chunk but not actually spend the memory for reserving it. - // Let's rely on overcommit? Otherwise we got the rather dumb option of mmap-ing /dev/zero. - let empty = vec![0; 1usize << 32]; - let writer = Cursor::new(vec![0u8; 1024]); - - let mut encoder = Encoder::new(writer, 10, 10); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Grayscale); - let mut png_writer = encoder.write_header()?; - - assert!(png_writer.write_chunk(chunk::fdAT, &empty).is_err()); - Ok(()) - } - - #[test] - fn finish_drops_inner_writer() -> Result<()> { - struct NoWriter<'flag>(&'flag mut bool); - - impl Write for NoWriter<'_> { - fn write(&mut self, buf: &[u8]) -> io::Result<usize> { - Ok(buf.len()) - } - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } - } - impl Drop for NoWriter<'_> { - fn drop(&mut self) { - *self.0 = true; - } - } - - let mut flag = false; - - { - let mut encoder = Encoder::new(NoWriter(&mut flag), 10, 10); - encoder.set_depth(BitDepth::Eight); - encoder.set_color(ColorType::Grayscale); - - let mut writer = encoder.write_header()?; - writer.write_image_data(&[0; 100])?; - writer.finish()?; - } - - assert!(flag, "PNG finished but writer was not dropped"); - Ok(()) - } - - /// A Writer that only writes a few bytes at a time - struct RandomChunkWriter<R: Rng, W: Write> { - rng: R, - w: W, - } - - impl<R: Rng, W: Write> Write for RandomChunkWriter<R, W> { - fn write(&mut self, buf: &[u8]) -> io::Result<usize> { - // choose a random length to write - let len = cmp::min(self.rng.gen_range(1..50), buf.len()); - - self.w.write(&buf[0..len]) - } - - fn flush(&mut self) -> io::Result<()> { - self.w.flush() - } - } -} - -/// Mod to encapsulate the converters depending on the `deflate` crate. -/// -/// Since this only contains trait impls, there is no need to make this public, they are simply -/// available when the mod is compiled as well. -impl Compression { - fn to_options(self) -> flate2::Compression { - #[allow(deprecated)] - match self { - Compression::Default => flate2::Compression::default(), - Compression::Fast => flate2::Compression::fast(), - Compression::Best => flate2::Compression::best(), - #[allow(deprecated)] - Compression::Huffman => flate2::Compression::none(), - #[allow(deprecated)] - Compression::Rle => flate2::Compression::none(), - } - } -} diff --git a/vendor/png/src/filter.rs b/vendor/png/src/filter.rs deleted file mode 100644 index b561e4e..0000000 --- a/vendor/png/src/filter.rs +++ /dev/null @@ -1,801 +0,0 @@ -use core::convert::TryInto; - -use crate::common::BytesPerPixel; - -/// The byte level filter applied to scanlines to prepare them for compression. -/// -/// Compression in general benefits from repetitive data. The filter is a content-aware method of -/// compressing the range of occurring byte values to help the compression algorithm. Note that -/// this does not operate on pixels but on raw bytes of a scanline. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(u8)] -pub enum FilterType { - NoFilter = 0, - Sub = 1, - Up = 2, - Avg = 3, - Paeth = 4, -} - -impl Default for FilterType { - fn default() -> Self { - FilterType::Sub - } -} - -impl FilterType { - /// u8 -> Self. Temporary solution until Rust provides a canonical one. - pub fn from_u8(n: u8) -> Option<FilterType> { - match n { - 0 => Some(FilterType::NoFilter), - 1 => Some(FilterType::Sub), - 2 => Some(FilterType::Up), - 3 => Some(FilterType::Avg), - 4 => Some(FilterType::Paeth), - _ => None, - } - } -} - -/// The filtering method for preprocessing scanline data before compression. -/// -/// Adaptive filtering performs additional computation in an attempt to maximize -/// the compression of the data. [`NonAdaptive`] filtering is the default. -/// -/// [`NonAdaptive`]: enum.AdaptiveFilterType.html#variant.NonAdaptive -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(u8)] -pub enum AdaptiveFilterType { - Adaptive, - NonAdaptive, -} - -impl Default for AdaptiveFilterType { - fn default() -> Self { - AdaptiveFilterType::NonAdaptive - } -} - -fn filter_paeth_decode(a: u8, b: u8, c: u8) -> u8 { - // Decoding seems to optimize better with this algorithm - let pa = (i16::from(b) - i16::from(c)).abs(); - let pb = (i16::from(a) - i16::from(c)).abs(); - let pc = ((i16::from(a) - i16::from(c)) + (i16::from(b) - i16::from(c))).abs(); - - let mut out = a; - let mut min = pa; - - if pb < min { - min = pb; - out = b; - } - if pc < min { - out = c; - } - - out -} - -fn filter_paeth(a: u8, b: u8, c: u8) -> u8 { - // This is an optimized version of the paeth filter from the PNG specification, proposed by - // Luca Versari for [FPNGE](https://www.lucaversari.it/FJXL_and_FPNGE.pdf). It operates - // entirely on unsigned 8-bit quantities, making it more conducive to vectorization. - // - // p = a + b - c - // pa = |p - a| = |a + b - c - a| = |b - c| = max(b, c) - min(b, c) - // pb = |p - b| = |a + b - c - b| = |a - c| = max(a, c) - min(a, c) - // pc = |p - c| = |a + b - c - c| = |(b - c) + (a - c)| = ... - // - // Further optimizing the calculation of `pc` a bit tricker. However, notice that: - // - // a > c && b > c - // ==> (a - c) > 0 && (b - c) > 0 - // ==> pc > (a - c) && pc > (b - c) - // ==> pc > |a - c| && pc > |b - c| - // ==> pc > pb && pc > pa - // - // Meaning that if `c` is smaller than `a` and `b`, the value of `pc` is irrelevant. Similar - // reasoning applies if `c` is larger than the other two inputs. Assuming that `c >= b` and - // `c <= b` or vice versa: - // - // pc = ||b - c| - |a - c|| = |pa - pb| = max(pa, pb) - min(pa, pb) - // - let pa = b.max(c) - c.min(b); - let pb = a.max(c) - c.min(a); - let pc = if (a < c) == (c < b) { - pa.max(pb) - pa.min(pb) - } else { - 255 - }; - - if pa <= pb && pa <= pc { - a - } else if pb <= pc { - b - } else { - c - } -} - -pub(crate) fn unfilter( - filter: FilterType, - tbpp: BytesPerPixel, - previous: &[u8], - current: &mut [u8], -) { - use self::FilterType::*; - - // [2023/01 @okaneco] - Notes on optimizing decoding filters - // - // Links: - // [PR]: https://github.com/image-rs/image-png/pull/382 - // [SWAR]: http://aggregate.org/SWAR/over.html - // [AVG]: http://aggregate.org/MAGIC/#Average%20of%20Integers - // - // #382 heavily refactored and optimized the following filters making the - // implementation nonobvious. These comments function as a summary of that - // PR with an explanation of the choices made below. - // - // #382 originally started with trying to optimize using a technique called - // SWAR, SIMD Within a Register. SWAR uses regular integer types like `u32` - // and `u64` as SIMD registers to perform vertical operations in parallel, - // usually involving bit-twiddling. This allowed each `BytesPerPixel` (bpp) - // pixel to be decoded in parallel: 3bpp and 4bpp in a `u32`, 6bpp and 8pp - // in a `u64`. The `Sub` filter looked like the following code block, `Avg` - // was similar but used a bitwise average method from [AVG]: - // ``` - // // See "Unpartitioned Operations With Correction Code" from [SWAR] - // fn swar_add_u32(x: u32, y: u32) -> u32 { - // // 7-bit addition so there's no carry over the most significant bit - // let n = (x & 0x7f7f7f7f) + (y & 0x7f7f7f7f); // 0x7F = 0b_0111_1111 - // // 1-bit parity/XOR addition to fill in the missing MSB - // n ^ (x ^ y) & 0x80808080 // 0x80 = 0b_1000_0000 - // } - // - // let mut prev = - // u32::from_ne_bytes([current[0], current[1], current[2], current[3]]); - // for chunk in current[4..].chunks_exact_mut(4) { - // let cur = u32::from_ne_bytes([chunk[0], chunk[1], chunk[2], chunk[3]]); - // let new_chunk = swar_add_u32(cur, prev); - // chunk.copy_from_slice(&new_chunk.to_ne_bytes()); - // prev = new_chunk; - // } - // ``` - // While this provided a measurable increase, @fintelia found that this idea - // could be taken even further by unrolling the chunks component-wise and - // avoiding unnecessary byte-shuffling by using byte arrays instead of - // `u32::from|to_ne_bytes`. The bitwise operations were no longer necessary - // so they were reverted to their obvious arithmetic equivalent. Lastly, - // `TryInto` was used instead of `copy_from_slice`. The `Sub` code now - // looked like this (with asserts to remove `0..bpp` bounds checks): - // ``` - // assert!(len > 3); - // let mut prev = [current[0], current[1], current[2], current[3]]; - // for chunk in current[4..].chunks_exact_mut(4) { - // let new_chunk = [ - // chunk[0].wrapping_add(prev[0]), - // chunk[1].wrapping_add(prev[1]), - // chunk[2].wrapping_add(prev[2]), - // chunk[3].wrapping_add(prev[3]), - // ]; - // *TryInto::<&mut [u8; 4]>::try_into(chunk).unwrap() = new_chunk; - // prev = new_chunk; - // } - // ``` - // The compiler was able to optimize the code to be even faster and this - // method even sped up Paeth filtering! Assertions were experimentally - // added within loop bodies which produced better instructions but no - // difference in speed. Finally, the code was refactored to remove manual - // slicing and start the previous pixel chunks with arrays of `[0; N]`. - // ``` - // let mut prev = [0; 4]; - // for chunk in current.chunks_exact_mut(4) { - // let new_chunk = [ - // chunk[0].wrapping_add(prev[0]), - // chunk[1].wrapping_add(prev[1]), - // chunk[2].wrapping_add(prev[2]), - // chunk[3].wrapping_add(prev[3]), - // ]; - // *TryInto::<&mut [u8; 4]>::try_into(chunk).unwrap() = new_chunk; - // prev = new_chunk; - // } - // ``` - // While we're not manually bit-twiddling anymore, a possible takeaway from - // this is to "think in SWAR" when dealing with small byte arrays. Unrolling - // array operations and performing them component-wise may unlock previously - // unavailable optimizations from the compiler, even when using the - // `chunks_exact` methods for their potential auto-vectorization benefits. - match filter { - NoFilter => {} - Sub => match tbpp { - BytesPerPixel::One => { - current.iter_mut().reduce(|&mut prev, curr| { - *curr = curr.wrapping_add(prev); - curr - }); - } - BytesPerPixel::Two => { - let mut prev = [0; 2]; - for chunk in current.chunks_exact_mut(2) { - let new_chunk = [ - chunk[0].wrapping_add(prev[0]), - chunk[1].wrapping_add(prev[1]), - ]; - *TryInto::<&mut [u8; 2]>::try_into(chunk).unwrap() = new_chunk; - prev = new_chunk; - } - } - BytesPerPixel::Three => { - let mut prev = [0; 3]; - for chunk in current.chunks_exact_mut(3) { - let new_chunk = [ - chunk[0].wrapping_add(prev[0]), - chunk[1].wrapping_add(prev[1]), - chunk[2].wrapping_add(prev[2]), - ]; - *TryInto::<&mut [u8; 3]>::try_into(chunk).unwrap() = new_chunk; - prev = new_chunk; - } - } - BytesPerPixel::Four => { - let mut prev = [0; 4]; - for chunk in current.chunks_exact_mut(4) { - let new_chunk = [ - chunk[0].wrapping_add(prev[0]), - chunk[1].wrapping_add(prev[1]), - chunk[2].wrapping_add(prev[2]), - chunk[3].wrapping_add(prev[3]), - ]; - *TryInto::<&mut [u8; 4]>::try_into(chunk).unwrap() = new_chunk; - prev = new_chunk; - } - } - BytesPerPixel::Six => { - let mut prev = [0; 6]; - for chunk in current.chunks_exact_mut(6) { - let new_chunk = [ - chunk[0].wrapping_add(prev[0]), - chunk[1].wrapping_add(prev[1]), - chunk[2].wrapping_add(prev[2]), - chunk[3].wrapping_add(prev[3]), - chunk[4].wrapping_add(prev[4]), - chunk[5].wrapping_add(prev[5]), - ]; - *TryInto::<&mut [u8; 6]>::try_into(chunk).unwrap() = new_chunk; - prev = new_chunk; - } - } - BytesPerPixel::Eight => { - let mut prev = [0; 8]; - for chunk in current.chunks_exact_mut(8) { - let new_chunk = [ - chunk[0].wrapping_add(prev[0]), - chunk[1].wrapping_add(prev[1]), - chunk[2].wrapping_add(prev[2]), - chunk[3].wrapping_add(prev[3]), - chunk[4].wrapping_add(prev[4]), - chunk[5].wrapping_add(prev[5]), - chunk[6].wrapping_add(prev[6]), - chunk[7].wrapping_add(prev[7]), - ]; - *TryInto::<&mut [u8; 8]>::try_into(chunk).unwrap() = new_chunk; - prev = new_chunk; - } - } - }, - Up => { - for (curr, &above) in current.iter_mut().zip(previous) { - *curr = curr.wrapping_add(above); - } - } - Avg => match tbpp { - BytesPerPixel::One => { - let mut lprev = [0; 1]; - for (chunk, above) in current.chunks_exact_mut(1).zip(previous.chunks_exact(1)) { - let new_chunk = - [chunk[0].wrapping_add(((above[0] as u16 + lprev[0] as u16) / 2) as u8)]; - *TryInto::<&mut [u8; 1]>::try_into(chunk).unwrap() = new_chunk; - lprev = new_chunk; - } - } - BytesPerPixel::Two => { - let mut lprev = [0; 2]; - for (chunk, above) in current.chunks_exact_mut(2).zip(previous.chunks_exact(2)) { - let new_chunk = [ - chunk[0].wrapping_add(((above[0] as u16 + lprev[0] as u16) / 2) as u8), - chunk[1].wrapping_add(((above[1] as u16 + lprev[1] as u16) / 2) as u8), - ]; - *TryInto::<&mut [u8; 2]>::try_into(chunk).unwrap() = new_chunk; - lprev = new_chunk; - } - } - BytesPerPixel::Three => { - let mut lprev = [0; 3]; - for (chunk, above) in current.chunks_exact_mut(3).zip(previous.chunks_exact(3)) { - let new_chunk = [ - chunk[0].wrapping_add(((above[0] as u16 + lprev[0] as u16) / 2) as u8), - chunk[1].wrapping_add(((above[1] as u16 + lprev[1] as u16) / 2) as u8), - chunk[2].wrapping_add(((above[2] as u16 + lprev[2] as u16) / 2) as u8), - ]; - *TryInto::<&mut [u8; 3]>::try_into(chunk).unwrap() = new_chunk; - lprev = new_chunk; - } - } - BytesPerPixel::Four => { - let mut lprev = [0; 4]; - for (chunk, above) in current.chunks_exact_mut(4).zip(previous.chunks_exact(4)) { - let new_chunk = [ - chunk[0].wrapping_add(((above[0] as u16 + lprev[0] as u16) / 2) as u8), - chunk[1].wrapping_add(((above[1] as u16 + lprev[1] as u16) / 2) as u8), - chunk[2].wrapping_add(((above[2] as u16 + lprev[2] as u16) / 2) as u8), - chunk[3].wrapping_add(((above[3] as u16 + lprev[3] as u16) / 2) as u8), - ]; - *TryInto::<&mut [u8; 4]>::try_into(chunk).unwrap() = new_chunk; - lprev = new_chunk; - } - } - BytesPerPixel::Six => { - let mut lprev = [0; 6]; - for (chunk, above) in current.chunks_exact_mut(6).zip(previous.chunks_exact(6)) { - let new_chunk = [ - chunk[0].wrapping_add(((above[0] as u16 + lprev[0] as u16) / 2) as u8), - chunk[1].wrapping_add(((above[1] as u16 + lprev[1] as u16) / 2) as u8), - chunk[2].wrapping_add(((above[2] as u16 + lprev[2] as u16) / 2) as u8), - chunk[3].wrapping_add(((above[3] as u16 + lprev[3] as u16) / 2) as u8), - chunk[4].wrapping_add(((above[4] as u16 + lprev[4] as u16) / 2) as u8), - chunk[5].wrapping_add(((above[5] as u16 + lprev[5] as u16) / 2) as u8), - ]; - *TryInto::<&mut [u8; 6]>::try_into(chunk).unwrap() = new_chunk; - lprev = new_chunk; - } - } - BytesPerPixel::Eight => { - let mut lprev = [0; 8]; - for (chunk, above) in current.chunks_exact_mut(8).zip(previous.chunks_exact(8)) { - let new_chunk = [ - chunk[0].wrapping_add(((above[0] as u16 + lprev[0] as u16) / 2) as u8), - chunk[1].wrapping_add(((above[1] as u16 + lprev[1] as u16) / 2) as u8), - chunk[2].wrapping_add(((above[2] as u16 + lprev[2] as u16) / 2) as u8), - chunk[3].wrapping_add(((above[3] as u16 + lprev[3] as u16) / 2) as u8), - chunk[4].wrapping_add(((above[4] as u16 + lprev[4] as u16) / 2) as u8), - chunk[5].wrapping_add(((above[5] as u16 + lprev[5] as u16) / 2) as u8), - chunk[6].wrapping_add(((above[6] as u16 + lprev[6] as u16) / 2) as u8), - chunk[7].wrapping_add(((above[7] as u16 + lprev[7] as u16) / 2) as u8), - ]; - *TryInto::<&mut [u8; 8]>::try_into(chunk).unwrap() = new_chunk; - lprev = new_chunk; - } - } - }, - Paeth => { - // Paeth filter pixels: - // C B D - // A X - match tbpp { - BytesPerPixel::One => { - let mut a_bpp = [0; 1]; - let mut c_bpp = [0; 1]; - for (chunk, b_bpp) in current.chunks_exact_mut(1).zip(previous.chunks_exact(1)) - { - let new_chunk = [chunk[0] - .wrapping_add(filter_paeth_decode(a_bpp[0], b_bpp[0], c_bpp[0]))]; - *TryInto::<&mut [u8; 1]>::try_into(chunk).unwrap() = new_chunk; - a_bpp = new_chunk; - c_bpp = b_bpp.try_into().unwrap(); - } - } - BytesPerPixel::Two => { - let mut a_bpp = [0; 2]; - let mut c_bpp = [0; 2]; - for (chunk, b_bpp) in current.chunks_exact_mut(2).zip(previous.chunks_exact(2)) - { - let new_chunk = [ - chunk[0] - .wrapping_add(filter_paeth_decode(a_bpp[0], b_bpp[0], c_bpp[0])), - chunk[1] - .wrapping_add(filter_paeth_decode(a_bpp[1], b_bpp[1], c_bpp[1])), - ]; - *TryInto::<&mut [u8; 2]>::try_into(chunk).unwrap() = new_chunk; - a_bpp = new_chunk; - c_bpp = b_bpp.try_into().unwrap(); - } - } - BytesPerPixel::Three => { - let mut a_bpp = [0; 3]; - let mut c_bpp = [0; 3]; - for (chunk, b_bpp) in current.chunks_exact_mut(3).zip(previous.chunks_exact(3)) - { - let new_chunk = [ - chunk[0] - .wrapping_add(filter_paeth_decode(a_bpp[0], b_bpp[0], c_bpp[0])), - chunk[1] - .wrapping_add(filter_paeth_decode(a_bpp[1], b_bpp[1], c_bpp[1])), - chunk[2] - .wrapping_add(filter_paeth_decode(a_bpp[2], b_bpp[2], c_bpp[2])), - ]; - *TryInto::<&mut [u8; 3]>::try_into(chunk).unwrap() = new_chunk; - a_bpp = new_chunk; - c_bpp = b_bpp.try_into().unwrap(); - } - } - BytesPerPixel::Four => { - let mut a_bpp = [0; 4]; - let mut c_bpp = [0; 4]; - for (chunk, b_bpp) in current.chunks_exact_mut(4).zip(previous.chunks_exact(4)) - { - let new_chunk = [ - chunk[0] - .wrapping_add(filter_paeth_decode(a_bpp[0], b_bpp[0], c_bpp[0])), - chunk[1] - .wrapping_add(filter_paeth_decode(a_bpp[1], b_bpp[1], c_bpp[1])), - chunk[2] - .wrapping_add(filter_paeth_decode(a_bpp[2], b_bpp[2], c_bpp[2])), - chunk[3] - .wrapping_add(filter_paeth_decode(a_bpp[3], b_bpp[3], c_bpp[3])), - ]; - *TryInto::<&mut [u8; 4]>::try_into(chunk).unwrap() = new_chunk; - a_bpp = new_chunk; - c_bpp = b_bpp.try_into().unwrap(); - } - } - BytesPerPixel::Six => { - let mut a_bpp = [0; 6]; - let mut c_bpp = [0; 6]; - for (chunk, b_bpp) in current.chunks_exact_mut(6).zip(previous.chunks_exact(6)) - { - let new_chunk = [ - chunk[0] - .wrapping_add(filter_paeth_decode(a_bpp[0], b_bpp[0], c_bpp[0])), - chunk[1] - .wrapping_add(filter_paeth_decode(a_bpp[1], b_bpp[1], c_bpp[1])), - chunk[2] - .wrapping_add(filter_paeth_decode(a_bpp[2], b_bpp[2], c_bpp[2])), - chunk[3] - .wrapping_add(filter_paeth_decode(a_bpp[3], b_bpp[3], c_bpp[3])), - chunk[4] - .wrapping_add(filter_paeth_decode(a_bpp[4], b_bpp[4], c_bpp[4])), - chunk[5] - .wrapping_add(filter_paeth_decode(a_bpp[5], b_bpp[5], c_bpp[5])), - ]; - *TryInto::<&mut [u8; 6]>::try_into(chunk).unwrap() = new_chunk; - a_bpp = new_chunk; - c_bpp = b_bpp.try_into().unwrap(); - } - } - BytesPerPixel::Eight => { - let mut a_bpp = [0; 8]; - let mut c_bpp = [0; 8]; - for (chunk, b_bpp) in current.chunks_exact_mut(8).zip(previous.chunks_exact(8)) - { - let new_chunk = [ - chunk[0] - .wrapping_add(filter_paeth_decode(a_bpp[0], b_bpp[0], c_bpp[0])), - chunk[1] - .wrapping_add(filter_paeth_decode(a_bpp[1], b_bpp[1], c_bpp[1])), - chunk[2] - .wrapping_add(filter_paeth_decode(a_bpp[2], b_bpp[2], c_bpp[2])), - chunk[3] - .wrapping_add(filter_paeth_decode(a_bpp[3], b_bpp[3], c_bpp[3])), - chunk[4] - .wrapping_add(filter_paeth_decode(a_bpp[4], b_bpp[4], c_bpp[4])), - chunk[5] - .wrapping_add(filter_paeth_decode(a_bpp[5], b_bpp[5], c_bpp[5])), - chunk[6] - .wrapping_add(filter_paeth_decode(a_bpp[6], b_bpp[6], c_bpp[6])), - chunk[7] - .wrapping_add(filter_paeth_decode(a_bpp[7], b_bpp[7], c_bpp[7])), - ]; - *TryInto::<&mut [u8; 8]>::try_into(chunk).unwrap() = new_chunk; - a_bpp = new_chunk; - c_bpp = b_bpp.try_into().unwrap(); - } - } - } - } - } -} - -fn filter_internal( - method: FilterType, - bpp: usize, - len: usize, - previous: &[u8], - current: &[u8], - output: &mut [u8], -) -> FilterType { - use self::FilterType::*; - - // This value was chosen experimentally based on what acheived the best performance. The - // Rust compiler does auto-vectorization, and 32-bytes per loop iteration seems to enable - // the fastest code when doing so. - const CHUNK_SIZE: usize = 32; - - match method { - NoFilter => { - output.copy_from_slice(current); - NoFilter - } - Sub => { - let mut out_chunks = output[bpp..].chunks_exact_mut(CHUNK_SIZE); - let mut cur_chunks = current[bpp..].chunks_exact(CHUNK_SIZE); - let mut prev_chunks = current[..len - bpp].chunks_exact(CHUNK_SIZE); - - for ((out, cur), prev) in (&mut out_chunks).zip(&mut cur_chunks).zip(&mut prev_chunks) { - for i in 0..CHUNK_SIZE { - out[i] = cur[i].wrapping_sub(prev[i]); - } - } - - for ((out, cur), &prev) in out_chunks - .into_remainder() - .iter_mut() - .zip(cur_chunks.remainder()) - .zip(prev_chunks.remainder()) - { - *out = cur.wrapping_sub(prev); - } - - output[..bpp].copy_from_slice(¤t[..bpp]); - Sub - } - Up => { - let mut out_chunks = output.chunks_exact_mut(CHUNK_SIZE); - let mut cur_chunks = current.chunks_exact(CHUNK_SIZE); - let mut prev_chunks = previous.chunks_exact(CHUNK_SIZE); - - for ((out, cur), prev) in (&mut out_chunks).zip(&mut cur_chunks).zip(&mut prev_chunks) { - for i in 0..CHUNK_SIZE { - out[i] = cur[i].wrapping_sub(prev[i]); - } - } - - for ((out, cur), &prev) in out_chunks - .into_remainder() - .iter_mut() - .zip(cur_chunks.remainder()) - .zip(prev_chunks.remainder()) - { - *out = cur.wrapping_sub(prev); - } - Up - } - Avg => { - let mut out_chunks = output[bpp..].chunks_exact_mut(CHUNK_SIZE); - let mut cur_chunks = current[bpp..].chunks_exact(CHUNK_SIZE); - let mut cur_minus_bpp_chunks = current[..len - bpp].chunks_exact(CHUNK_SIZE); - let mut prev_chunks = previous[bpp..].chunks_exact(CHUNK_SIZE); - - for (((out, cur), cur_minus_bpp), prev) in (&mut out_chunks) - .zip(&mut cur_chunks) - .zip(&mut cur_minus_bpp_chunks) - .zip(&mut prev_chunks) - { - for i in 0..CHUNK_SIZE { - // Bitwise average of two integers without overflow and - // without converting to a wider bit-width. See: - // http://aggregate.org/MAGIC/#Average%20of%20Integers - // If this is unrolled by component, consider reverting to - // `((cur_minus_bpp[i] as u16 + prev[i] as u16) / 2) as u8` - out[i] = cur[i].wrapping_sub( - (cur_minus_bpp[i] & prev[i]) + ((cur_minus_bpp[i] ^ prev[i]) >> 1), - ); - } - } - - for (((out, cur), &cur_minus_bpp), &prev) in out_chunks - .into_remainder() - .iter_mut() - .zip(cur_chunks.remainder()) - .zip(cur_minus_bpp_chunks.remainder()) - .zip(prev_chunks.remainder()) - { - *out = cur.wrapping_sub((cur_minus_bpp & prev) + ((cur_minus_bpp ^ prev) >> 1)); - } - - for i in 0..bpp { - output[i] = current[i].wrapping_sub(previous[i] / 2); - } - Avg - } - Paeth => { - let mut out_chunks = output[bpp..].chunks_exact_mut(CHUNK_SIZE); - let mut cur_chunks = current[bpp..].chunks_exact(CHUNK_SIZE); - let mut a_chunks = current[..len - bpp].chunks_exact(CHUNK_SIZE); - let mut b_chunks = previous[bpp..].chunks_exact(CHUNK_SIZE); - let mut c_chunks = previous[..len - bpp].chunks_exact(CHUNK_SIZE); - - for ((((out, cur), a), b), c) in (&mut out_chunks) - .zip(&mut cur_chunks) - .zip(&mut a_chunks) - .zip(&mut b_chunks) - .zip(&mut c_chunks) - { - for i in 0..CHUNK_SIZE { - out[i] = cur[i].wrapping_sub(filter_paeth(a[i], b[i], c[i])); - } - } - - for ((((out, cur), &a), &b), &c) in out_chunks - .into_remainder() - .iter_mut() - .zip(cur_chunks.remainder()) - .zip(a_chunks.remainder()) - .zip(b_chunks.remainder()) - .zip(c_chunks.remainder()) - { - *out = cur.wrapping_sub(filter_paeth(a, b, c)); - } - - for i in 0..bpp { - output[i] = current[i].wrapping_sub(filter_paeth(0, previous[i], 0)); - } - Paeth - } - } -} - -pub(crate) fn filter( - method: FilterType, - adaptive: AdaptiveFilterType, - bpp: BytesPerPixel, - previous: &[u8], - current: &[u8], - output: &mut [u8], -) -> FilterType { - use FilterType::*; - let bpp = bpp.into_usize(); - let len = current.len(); - - match adaptive { - AdaptiveFilterType::NonAdaptive => { - filter_internal(method, bpp, len, previous, current, output) - } - AdaptiveFilterType::Adaptive => { - let mut min_sum: u64 = u64::MAX; - let mut filter_choice = FilterType::NoFilter; - for &filter in [Sub, Up, Avg, Paeth].iter() { - filter_internal(filter, bpp, len, previous, current, output); - let sum = sum_buffer(output); - if sum <= min_sum { - min_sum = sum; - filter_choice = filter; - } - } - - if filter_choice != Paeth { - filter_internal(filter_choice, bpp, len, previous, current, output); - } - filter_choice - } - } -} - -// Helper function for Adaptive filter buffer summation -fn sum_buffer(buf: &[u8]) -> u64 { - const CHUNK_SIZE: usize = 32; - - let mut buf_chunks = buf.chunks_exact(CHUNK_SIZE); - let mut sum = 0_u64; - - for chunk in &mut buf_chunks { - // At most, `acc` can be `32 * (i8::MIN as u8) = 32 * 128 = 4096`. - let mut acc = 0; - for &b in chunk { - acc += u64::from((b as i8).unsigned_abs()); - } - sum = sum.saturating_add(acc); - } - - let mut acc = 0; - for &b in buf_chunks.remainder() { - acc += u64::from((b as i8).unsigned_abs()); - } - - sum.saturating_add(acc) -} - -#[cfg(test)] -mod test { - use super::{filter, unfilter, AdaptiveFilterType, BytesPerPixel, FilterType}; - use core::iter; - - #[test] - fn roundtrip() { - // A multiple of 8, 6, 4, 3, 2, 1 - const LEN: u8 = 240; - let previous: Vec<_> = iter::repeat(1).take(LEN.into()).collect(); - let current: Vec<_> = (0..LEN).collect(); - let expected = current.clone(); - let adaptive = AdaptiveFilterType::NonAdaptive; - - let roundtrip = |kind, bpp: BytesPerPixel| { - let mut output = vec![0; LEN.into()]; - filter(kind, adaptive, bpp, &previous, ¤t, &mut output); - unfilter(kind, bpp, &previous, &mut output); - assert_eq!( - output, expected, - "Filtering {:?} with {:?} does not roundtrip", - bpp, kind - ); - }; - - let filters = [ - FilterType::NoFilter, - FilterType::Sub, - FilterType::Up, - FilterType::Avg, - FilterType::Paeth, - ]; - - let bpps = [ - BytesPerPixel::One, - BytesPerPixel::Two, - BytesPerPixel::Three, - BytesPerPixel::Four, - BytesPerPixel::Six, - BytesPerPixel::Eight, - ]; - - for &filter in filters.iter() { - for &bpp in bpps.iter() { - roundtrip(filter, bpp); - } - } - } - - #[test] - fn roundtrip_ascending_previous_line() { - // A multiple of 8, 6, 4, 3, 2, 1 - const LEN: u8 = 240; - let previous: Vec<_> = (0..LEN).collect(); - let current: Vec<_> = (0..LEN).collect(); - let expected = current.clone(); - let adaptive = AdaptiveFilterType::NonAdaptive; - - let roundtrip = |kind, bpp: BytesPerPixel| { - let mut output = vec![0; LEN.into()]; - filter(kind, adaptive, bpp, &previous, ¤t, &mut output); - unfilter(kind, bpp, &previous, &mut output); - assert_eq!( - output, expected, - "Filtering {:?} with {:?} does not roundtrip", - bpp, kind - ); - }; - - let filters = [ - FilterType::NoFilter, - FilterType::Sub, - FilterType::Up, - FilterType::Avg, - FilterType::Paeth, - ]; - - let bpps = [ - BytesPerPixel::One, - BytesPerPixel::Two, - BytesPerPixel::Three, - BytesPerPixel::Four, - BytesPerPixel::Six, - BytesPerPixel::Eight, - ]; - - for &filter in filters.iter() { - for &bpp in bpps.iter() { - roundtrip(filter, bpp); - } - } - } - - #[test] - // This tests that converting u8 to i8 doesn't overflow when taking the - // absolute value for adaptive filtering: -128_i8.abs() will panic in debug - // or produce garbage in release mode. The sum of 0..=255u8 should equal the - // sum of the absolute values of -128_i8..=127, or abs(-128..=0) + 1..=127. - fn sum_buffer_test() { - let sum = (0..=128).sum::<u64>() + (1..=127).sum::<u64>(); - let buf: Vec<u8> = (0_u8..=255).collect(); - - assert_eq!(sum, crate::filter::sum_buffer(&buf)); - } -} diff --git a/vendor/png/src/lib.rs b/vendor/png/src/lib.rs deleted file mode 100644 index b3bb15b..0000000 --- a/vendor/png/src/lib.rs +++ /dev/null @@ -1,81 +0,0 @@ -//! # PNG encoder and decoder -//! -//! This crate contains a PNG encoder and decoder. It supports reading of single lines or whole frames. -//! -//! ## The decoder -//! -//! The most important types for decoding purposes are [`Decoder`](struct.Decoder.html) and -//! [`Reader`](struct.Reader.html). They both wrap a `std::io::Read`. -//! `Decoder` serves as a builder for `Reader`. Calling `Decoder::read_info` reads from the `Read` until the -//! image data is reached. -//! -//! ### Using the decoder -//! ``` -//! use std::fs::File; -//! // The decoder is a build for reader and can be used to set various decoding options -//! // via `Transformations`. The default output transformation is `Transformations::IDENTITY`. -//! let decoder = png::Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap()); -//! let mut reader = decoder.read_info().unwrap(); -//! // Allocate the output buffer. -//! let mut buf = vec![0; reader.output_buffer_size()]; -//! // Read the next frame. An APNG might contain multiple frames. -//! let info = reader.next_frame(&mut buf).unwrap(); -//! // Grab the bytes of the image. -//! let bytes = &buf[..info.buffer_size()]; -//! // Inspect more details of the last read frame. -//! let in_animation = reader.info().frame_control.is_some(); -//! ``` -//! -//! ## Encoder -//! ### Using the encoder -//! -//! ```no_run -//! // For reading and opening files -//! use std::path::Path; -//! use std::fs::File; -//! use std::io::BufWriter; -//! -//! let path = Path::new(r"/path/to/image.png"); -//! let file = File::create(path).unwrap(); -//! let ref mut w = BufWriter::new(file); -//! -//! let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1. -//! encoder.set_color(png::ColorType::Rgba); -//! encoder.set_depth(png::BitDepth::Eight); -//! encoder.set_source_gamma(png::ScaledFloat::from_scaled(45455)); // 1.0 / 2.2, scaled by 100000 -//! encoder.set_source_gamma(png::ScaledFloat::new(1.0 / 2.2)); // 1.0 / 2.2, unscaled, but rounded -//! let source_chromaticities = png::SourceChromaticities::new( // Using unscaled instantiation here -//! (0.31270, 0.32900), -//! (0.64000, 0.33000), -//! (0.30000, 0.60000), -//! (0.15000, 0.06000) -//! ); -//! encoder.set_source_chromaticities(source_chromaticities); -//! let mut writer = encoder.write_header().unwrap(); -//! -//! let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black. -//! writer.write_image_data(&data).unwrap(); // Save -//! ``` -//! - -#![forbid(unsafe_code)] - -#[macro_use] -extern crate bitflags; - -pub mod chunk; -mod common; -mod decoder; -mod encoder; -mod filter; -mod srgb; -pub mod text_metadata; -mod traits; -mod utils; - -pub use crate::common::*; -pub use crate::decoder::{ - DecodeOptions, Decoded, Decoder, DecodingError, Limits, OutputInfo, Reader, StreamingDecoder, -}; -pub use crate::encoder::{Encoder, EncodingError, StreamWriter, Writer}; -pub use crate::filter::{AdaptiveFilterType, FilterType}; diff --git a/vendor/png/src/srgb.rs b/vendor/png/src/srgb.rs deleted file mode 100644 index 2780e42..0000000 --- a/vendor/png/src/srgb.rs +++ /dev/null @@ -1,30 +0,0 @@ -use crate::{ScaledFloat, SourceChromaticities}; - -/// Get the gamma that should be substituted for images conforming to the sRGB color space. -pub fn substitute_gamma() -> ScaledFloat { - // Value taken from https://www.w3.org/TR/2003/REC-PNG-20031110/#11sRGB - ScaledFloat::from_scaled(45455) -} - -/// Get the chromaticities that should be substituted for images conforming to the sRGB color space. -pub fn substitute_chromaticities() -> SourceChromaticities { - // Values taken from https://www.w3.org/TR/2003/REC-PNG-20031110/#11sRGB - SourceChromaticities { - white: ( - ScaledFloat::from_scaled(31270), - ScaledFloat::from_scaled(32900), - ), - red: ( - ScaledFloat::from_scaled(64000), - ScaledFloat::from_scaled(33000), - ), - green: ( - ScaledFloat::from_scaled(30000), - ScaledFloat::from_scaled(60000), - ), - blue: ( - ScaledFloat::from_scaled(15000), - ScaledFloat::from_scaled(6000), - ), - } -} diff --git a/vendor/png/src/text_metadata.rs b/vendor/png/src/text_metadata.rs deleted file mode 100644 index 42f8df3..0000000 --- a/vendor/png/src/text_metadata.rs +++ /dev/null @@ -1,586 +0,0 @@ -//! # Text chunks (tEXt/zTXt/iTXt) structs and functions -//! -//! The [PNG spec](https://www.w3.org/TR/2003/REC-PNG-20031110/#11textinfo) optionally allows for -//! embedded text chunks in the file. They may appear either before or after the image data -//! chunks. There are three kinds of text chunks. -//! - `tEXt`: This has a `keyword` and `text` field, and is ISO 8859-1 encoded. -//! - `zTXt`: This is semantically the same as `tEXt`, i.e. it has the same fields and -//! encoding, but the `text` field is compressed before being written into the PNG file. -//! - `iTXt`: This chunk allows for its `text` field to be any valid UTF-8, and supports -//! compression of the text field as well. -//! -//! The `ISO 8859-1` encoding technically doesn't allow any control characters -//! to be used, but in practice these values are encountered anyway. This can -//! either be the extended `ISO-8859-1` encoding with control characters or the -//! `Windows-1252` encoding. This crate assumes the `ISO-8859-1` encoding is -//! used. -//! -//! ## Reading text chunks -//! -//! As a PNG is decoded, any text chunk encountered is appended the -//! [`Info`](`crate::common::Info`) struct, in the `uncompressed_latin1_text`, -//! `compressed_latin1_text`, and the `utf8_text` fields depending on whether the encountered -//! chunk is `tEXt`, `zTXt`, or `iTXt`. -//! -//! ``` -//! use std::fs::File; -//! use std::iter::FromIterator; -//! use std::path::PathBuf; -//! -//! // Opening a png file that has a zTXt chunk -//! let decoder = png::Decoder::new( -//! File::open(PathBuf::from_iter([ -//! "tests", -//! "text_chunk_examples", -//! "ztxt_example.png", -//! ])) -//! .unwrap(), -//! ); -//! let mut reader = decoder.read_info().unwrap(); -//! // If the text chunk is before the image data frames, `reader.info()` already contains the text. -//! for text_chunk in &reader.info().compressed_latin1_text { -//! println!("{:?}", text_chunk.keyword); // Prints the keyword -//! println!("{:#?}", text_chunk); // Prints out the text chunk. -//! // To get the uncompressed text, use the `get_text` method. -//! println!("{}", text_chunk.get_text().unwrap()); -//! } -//! ``` -//! -//! ## Writing text chunks -//! -//! There are two ways to write text chunks: the first is to add the appropriate text structs directly to the encoder header before the header is written to file. -//! To add a text chunk at any point in the stream, use the `write_text_chunk` method. -//! -//! ``` -//! # use png::text_metadata::{ITXtChunk, ZTXtChunk}; -//! # use std::env; -//! # use std::fs::File; -//! # use std::io::BufWriter; -//! # use std::iter::FromIterator; -//! # use std::path::PathBuf; -//! # let file = File::create(PathBuf::from_iter(["target", "text_chunk.png"])).unwrap(); -//! # let ref mut w = BufWriter::new(file); -//! let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1. -//! encoder.set_color(png::ColorType::Rgba); -//! encoder.set_depth(png::BitDepth::Eight); -//! // Adding text chunks to the header -//! encoder -//! .add_text_chunk( -//! "Testing tEXt".to_string(), -//! "This is a tEXt chunk that will appear before the IDAT chunks.".to_string(), -//! ) -//! .unwrap(); -//! encoder -//! .add_ztxt_chunk( -//! "Testing zTXt".to_string(), -//! "This is a zTXt chunk that is compressed in the png file.".to_string(), -//! ) -//! .unwrap(); -//! encoder -//! .add_itxt_chunk( -//! "Testing iTXt".to_string(), -//! "iTXt chunks support all of UTF8. Example: हिंदी.".to_string(), -//! ) -//! .unwrap(); -//! -//! let mut writer = encoder.write_header().unwrap(); -//! -//! let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black. -//! writer.write_image_data(&data).unwrap(); // Save -//! -//! // We can add a tEXt/zTXt/iTXt at any point before the encoder is dropped from scope. These chunks will be at the end of the png file. -//! let tail_ztxt_chunk = ZTXtChunk::new("Comment".to_string(), "A zTXt chunk after the image data.".to_string()); -//! writer.write_text_chunk(&tail_ztxt_chunk).unwrap(); -//! -//! // The fields of the text chunk are public, so they can be mutated before being written to the file. -//! let mut tail_itxt_chunk = ITXtChunk::new("Author".to_string(), "सायंतन खान".to_string()); -//! tail_itxt_chunk.compressed = true; -//! tail_itxt_chunk.language_tag = "hi".to_string(); -//! tail_itxt_chunk.translated_keyword = "लेखक".to_string(); -//! writer.write_text_chunk(&tail_itxt_chunk).unwrap(); -//! ``` - -#![warn(missing_docs)] - -use crate::{chunk, encoder, DecodingError, EncodingError}; -use flate2::write::ZlibEncoder; -use flate2::Compression; -use miniz_oxide::inflate::{decompress_to_vec_zlib, decompress_to_vec_zlib_with_limit}; -use std::{convert::TryFrom, io::Write}; - -/// Default decompression limit for compressed text chunks. -pub const DECOMPRESSION_LIMIT: usize = 2097152; // 2 MiB - -/// Text encoding errors that is wrapped by the standard EncodingError type -#[derive(Debug, Clone, Copy)] -pub(crate) enum TextEncodingError { - /// Unrepresentable characters in string - Unrepresentable, - /// Keyword longer than 79 bytes or empty - InvalidKeywordSize, - /// Error encountered while compressing text - CompressionError, -} - -/// Text decoding error that is wrapped by the standard DecodingError type -#[derive(Debug, Clone, Copy)] -pub(crate) enum TextDecodingError { - /// Unrepresentable characters in string - Unrepresentable, - /// Keyword longer than 79 bytes or empty - InvalidKeywordSize, - /// Missing null separator - MissingNullSeparator, - /// Compressed text cannot be uncompressed - InflationError, - /// Needs more space to decompress - OutOfDecompressionSpace, - /// Using an unspecified value for the compression method - InvalidCompressionMethod, - /// Using a byte that is not 0 or 255 as compression flag in iTXt chunk - InvalidCompressionFlag, - /// Missing the compression flag - MissingCompressionFlag, -} - -/// A generalized text chunk trait -pub trait EncodableTextChunk { - /// Encode text chunk as Vec<u8> to a `Write` - fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError>; -} - -/// Struct representing a tEXt chunk -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct TEXtChunk { - /// Keyword field of the tEXt chunk. Needs to be between 1-79 bytes when encoded as Latin-1. - pub keyword: String, - /// Text field of tEXt chunk. Can be at most 2GB. - pub text: String, -} - -fn decode_iso_8859_1(text: &[u8]) -> String { - text.iter().map(|&b| b as char).collect() -} - -fn encode_iso_8859_1(text: &str) -> Result<Vec<u8>, TextEncodingError> { - encode_iso_8859_1_iter(text).collect() -} - -fn encode_iso_8859_1_into(buf: &mut Vec<u8>, text: &str) -> Result<(), TextEncodingError> { - for b in encode_iso_8859_1_iter(text) { - buf.push(b?); - } - Ok(()) -} - -fn encode_iso_8859_1_iter(text: &str) -> impl Iterator<Item = Result<u8, TextEncodingError>> + '_ { - text.chars() - .map(|c| u8::try_from(c as u32).map_err(|_| TextEncodingError::Unrepresentable)) -} - -fn decode_ascii(text: &[u8]) -> Result<&str, TextDecodingError> { - if text.is_ascii() { - // `from_utf8` cannot panic because we're already checked that `text` is ASCII-7. - // And this is the only safe way to get ASCII-7 string from `&[u8]`. - Ok(std::str::from_utf8(text).expect("unreachable")) - } else { - Err(TextDecodingError::Unrepresentable) - } -} - -impl TEXtChunk { - /// Constructs a new TEXtChunk. - /// Not sure whether it should take &str or String. - pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self { - Self { - keyword: keyword.into(), - text: text.into(), - } - } - - /// Decodes a slice of bytes to a String using Latin-1 decoding. - /// The decoder runs in strict mode, and any decoding errors are passed along to the caller. - pub(crate) fn decode( - keyword_slice: &[u8], - text_slice: &[u8], - ) -> Result<Self, TextDecodingError> { - if keyword_slice.is_empty() || keyword_slice.len() > 79 { - return Err(TextDecodingError::InvalidKeywordSize); - } - - Ok(Self { - keyword: decode_iso_8859_1(keyword_slice), - text: decode_iso_8859_1(text_slice), - }) - } -} - -impl EncodableTextChunk for TEXtChunk { - /// Encodes TEXtChunk to a Writer. The keyword and text are separated by a byte of zeroes. - fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> { - let mut data = encode_iso_8859_1(&self.keyword)?; - - if data.is_empty() || data.len() > 79 { - return Err(TextEncodingError::InvalidKeywordSize.into()); - } - - data.push(0); - - encode_iso_8859_1_into(&mut data, &self.text)?; - - encoder::write_chunk(w, chunk::tEXt, &data) - } -} - -/// Struct representing a zTXt chunk -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ZTXtChunk { - /// Keyword field of the tEXt chunk. Needs to be between 1-79 bytes when encoded as Latin-1. - pub keyword: String, - /// Text field of zTXt chunk. It is compressed by default, but can be uncompressed if necessary. - text: OptCompressed, -} - -/// Private enum encoding the compressed and uncompressed states of zTXt/iTXt text field. -#[derive(Clone, Debug, PartialEq, Eq)] -enum OptCompressed { - /// Compressed version of text field. Can be at most 2GB. - Compressed(Vec<u8>), - /// Uncompressed text field. - Uncompressed(String), -} - -impl ZTXtChunk { - /// Creates a new ZTXt chunk. - pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self { - Self { - keyword: keyword.into(), - text: OptCompressed::Uncompressed(text.into()), - } - } - - pub(crate) fn decode( - keyword_slice: &[u8], - compression_method: u8, - text_slice: &[u8], - ) -> Result<Self, TextDecodingError> { - if keyword_slice.is_empty() || keyword_slice.len() > 79 { - return Err(TextDecodingError::InvalidKeywordSize); - } - - if compression_method != 0 { - return Err(TextDecodingError::InvalidCompressionMethod); - } - - Ok(Self { - keyword: decode_iso_8859_1(keyword_slice), - text: OptCompressed::Compressed(text_slice.to_vec()), - }) - } - - /// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `DECOMPRESSION_LIMIT` bytes. - pub fn decompress_text(&mut self) -> Result<(), DecodingError> { - self.decompress_text_with_limit(DECOMPRESSION_LIMIT) - } - - /// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `limit` bytes. - pub fn decompress_text_with_limit(&mut self, limit: usize) -> Result<(), DecodingError> { - match &self.text { - OptCompressed::Compressed(v) => { - let uncompressed_raw = match decompress_to_vec_zlib_with_limit(&v[..], limit) { - Ok(s) => s, - Err(err) if err.status == miniz_oxide::inflate::TINFLStatus::HasMoreOutput => { - return Err(DecodingError::from( - TextDecodingError::OutOfDecompressionSpace, - )); - } - Err(_) => { - return Err(DecodingError::from(TextDecodingError::InflationError)); - } - }; - self.text = OptCompressed::Uncompressed(decode_iso_8859_1(&uncompressed_raw)); - } - OptCompressed::Uncompressed(_) => {} - }; - Ok(()) - } - - /// Decompresses the inner text, and returns it as a `String`. - /// If decompression uses more the 2MiB, first call decompress with limit, and then this method. - pub fn get_text(&self) -> Result<String, DecodingError> { - match &self.text { - OptCompressed::Compressed(v) => { - let uncompressed_raw = decompress_to_vec_zlib(&v[..]) - .map_err(|_| DecodingError::from(TextDecodingError::InflationError))?; - Ok(decode_iso_8859_1(&uncompressed_raw)) - } - OptCompressed::Uncompressed(s) => Ok(s.clone()), - } - } - - /// Compresses the inner text, mutating its own state. - pub fn compress_text(&mut self) -> Result<(), EncodingError> { - match &self.text { - OptCompressed::Uncompressed(s) => { - let uncompressed_raw = encode_iso_8859_1(s)?; - let mut encoder = ZlibEncoder::new(Vec::new(), Compression::fast()); - encoder - .write_all(&uncompressed_raw) - .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?; - self.text = OptCompressed::Compressed( - encoder - .finish() - .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?, - ); - } - OptCompressed::Compressed(_) => {} - } - - Ok(()) - } -} - -impl EncodableTextChunk for ZTXtChunk { - fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> { - let mut data = encode_iso_8859_1(&self.keyword)?; - - if data.is_empty() || data.len() > 79 { - return Err(TextEncodingError::InvalidKeywordSize.into()); - } - - // Null separator - data.push(0); - - // Compression method: the only valid value is 0, as of 2021. - data.push(0); - - match &self.text { - OptCompressed::Compressed(v) => { - data.extend_from_slice(&v[..]); - } - OptCompressed::Uncompressed(s) => { - // This code may have a bug. Check for correctness. - let uncompressed_raw = encode_iso_8859_1(s)?; - let mut encoder = ZlibEncoder::new(data, Compression::fast()); - encoder - .write_all(&uncompressed_raw) - .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?; - data = encoder - .finish() - .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?; - } - }; - - encoder::write_chunk(w, chunk::zTXt, &data) - } -} - -/// Struct encoding an iTXt chunk -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ITXtChunk { - /// The keyword field. This needs to be between 1-79 bytes when encoded as Latin-1. - pub keyword: String, - /// Indicates whether the text will be (or was) compressed in the PNG. - pub compressed: bool, - /// A hyphen separated list of languages that the keyword is translated to. This is ASCII-7 encoded. - pub language_tag: String, - /// Translated keyword. This is UTF-8 encoded. - pub translated_keyword: String, - /// Text field of iTXt chunk. It is compressed by default, but can be uncompressed if necessary. - text: OptCompressed, -} - -impl ITXtChunk { - /// Constructs a new iTXt chunk. Leaves all but keyword and text to default values. - pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self { - Self { - keyword: keyword.into(), - compressed: false, - language_tag: "".to_string(), - translated_keyword: "".to_string(), - text: OptCompressed::Uncompressed(text.into()), - } - } - - pub(crate) fn decode( - keyword_slice: &[u8], - compression_flag: u8, - compression_method: u8, - language_tag_slice: &[u8], - translated_keyword_slice: &[u8], - text_slice: &[u8], - ) -> Result<Self, TextDecodingError> { - if keyword_slice.is_empty() || keyword_slice.len() > 79 { - return Err(TextDecodingError::InvalidKeywordSize); - } - let keyword = decode_iso_8859_1(keyword_slice); - - let compressed = match compression_flag { - 0 => false, - 1 => true, - _ => return Err(TextDecodingError::InvalidCompressionFlag), - }; - - if compressed && compression_method != 0 { - return Err(TextDecodingError::InvalidCompressionMethod); - } - - let language_tag = decode_ascii(language_tag_slice)?.to_owned(); - - let translated_keyword = std::str::from_utf8(translated_keyword_slice) - .map_err(|_| TextDecodingError::Unrepresentable)? - .to_string(); - let text = if compressed { - OptCompressed::Compressed(text_slice.to_vec()) - } else { - OptCompressed::Uncompressed( - String::from_utf8(text_slice.to_vec()) - .map_err(|_| TextDecodingError::Unrepresentable)?, - ) - }; - - Ok(Self { - keyword, - compressed, - language_tag, - translated_keyword, - text, - }) - } - - /// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `DECOMPRESSION_LIMIT` bytes. - pub fn decompress_text(&mut self) -> Result<(), DecodingError> { - self.decompress_text_with_limit(DECOMPRESSION_LIMIT) - } - - /// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `limit` bytes. - pub fn decompress_text_with_limit(&mut self, limit: usize) -> Result<(), DecodingError> { - match &self.text { - OptCompressed::Compressed(v) => { - let uncompressed_raw = match decompress_to_vec_zlib_with_limit(&v[..], limit) { - Ok(s) => s, - Err(err) if err.status == miniz_oxide::inflate::TINFLStatus::HasMoreOutput => { - return Err(DecodingError::from( - TextDecodingError::OutOfDecompressionSpace, - )); - } - Err(_) => { - return Err(DecodingError::from(TextDecodingError::InflationError)); - } - }; - self.text = OptCompressed::Uncompressed( - String::from_utf8(uncompressed_raw) - .map_err(|_| TextDecodingError::Unrepresentable)?, - ); - } - OptCompressed::Uncompressed(_) => {} - }; - Ok(()) - } - - /// Decompresses the inner text, and returns it as a `String`. - /// If decompression takes more than 2 MiB, try `decompress_text_with_limit` followed by this method. - pub fn get_text(&self) -> Result<String, DecodingError> { - match &self.text { - OptCompressed::Compressed(v) => { - let uncompressed_raw = decompress_to_vec_zlib(&v[..]) - .map_err(|_| DecodingError::from(TextDecodingError::InflationError))?; - String::from_utf8(uncompressed_raw) - .map_err(|_| TextDecodingError::Unrepresentable.into()) - } - OptCompressed::Uncompressed(s) => Ok(s.clone()), - } - } - - /// Compresses the inner text, mutating its own state. - pub fn compress_text(&mut self) -> Result<(), EncodingError> { - match &self.text { - OptCompressed::Uncompressed(s) => { - let uncompressed_raw = s.as_bytes(); - let mut encoder = ZlibEncoder::new(Vec::new(), Compression::fast()); - encoder - .write_all(uncompressed_raw) - .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?; - self.text = OptCompressed::Compressed( - encoder - .finish() - .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?, - ); - } - OptCompressed::Compressed(_) => {} - } - - Ok(()) - } -} - -impl EncodableTextChunk for ITXtChunk { - fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> { - // Keyword - let mut data = encode_iso_8859_1(&self.keyword)?; - - if data.is_empty() || data.len() > 79 { - return Err(TextEncodingError::InvalidKeywordSize.into()); - } - - // Null separator - data.push(0); - - // Compression flag - if self.compressed { - data.push(1); - } else { - data.push(0); - } - - // Compression method - data.push(0); - - // Language tag - if !self.language_tag.is_ascii() { - return Err(EncodingError::from(TextEncodingError::Unrepresentable)); - } - data.extend(self.language_tag.as_bytes()); - - // Null separator - data.push(0); - - // Translated keyword - data.extend_from_slice(self.translated_keyword.as_bytes()); - - // Null separator - data.push(0); - - // Text - if self.compressed { - match &self.text { - OptCompressed::Compressed(v) => { - data.extend_from_slice(&v[..]); - } - OptCompressed::Uncompressed(s) => { - let uncompressed_raw = s.as_bytes(); - let mut encoder = ZlibEncoder::new(data, Compression::fast()); - encoder - .write_all(uncompressed_raw) - .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?; - data = encoder - .finish() - .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?; - } - } - } else { - match &self.text { - OptCompressed::Compressed(v) => { - let uncompressed_raw = decompress_to_vec_zlib(&v[..]) - .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?; - data.extend_from_slice(&uncompressed_raw[..]); - } - OptCompressed::Uncompressed(s) => { - data.extend_from_slice(s.as_bytes()); - } - } - } - - encoder::write_chunk(w, chunk::iTXt, &data) - } -} diff --git a/vendor/png/src/traits.rs b/vendor/png/src/traits.rs deleted file mode 100644 index ffc10e7..0000000 --- a/vendor/png/src/traits.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::io; - -macro_rules! read_bytes_ext { - ($output_type:ty) => { - impl<W: io::Read + ?Sized> ReadBytesExt<$output_type> for W { - #[inline] - fn read_be(&mut self) -> io::Result<$output_type> { - let mut bytes = [0u8; std::mem::size_of::<$output_type>()]; - self.read_exact(&mut bytes)?; - Ok(<$output_type>::from_be_bytes(bytes)) - } - } - }; -} - -macro_rules! write_bytes_ext { - ($input_type:ty) => { - impl<W: io::Write + ?Sized> WriteBytesExt<$input_type> for W { - #[inline] - fn write_be(&mut self, n: $input_type) -> io::Result<()> { - self.write_all(&n.to_be_bytes()) - } - } - }; -} - -/// Read extension to read big endian data -pub trait ReadBytesExt<T>: io::Read { - /// Read `T` from a bytes stream. Most significant byte first. - fn read_be(&mut self) -> io::Result<T>; -} - -/// Write extension to write big endian data -pub trait WriteBytesExt<T>: io::Write { - /// Writes `T` to a bytes stream. Most significant byte first. - fn write_be(&mut self, _: T) -> io::Result<()>; -} - -read_bytes_ext!(u8); -read_bytes_ext!(u16); -read_bytes_ext!(u32); - -write_bytes_ext!(u32); diff --git a/vendor/png/src/utils.rs b/vendor/png/src/utils.rs deleted file mode 100644 index d43753b..0000000 --- a/vendor/png/src/utils.rs +++ /dev/null @@ -1,463 +0,0 @@ -//! Utility functions -use std::iter::{repeat, StepBy}; -use std::ops::Range; - -#[inline(always)] -pub fn unpack_bits<F>(buf: &mut [u8], channels: usize, bit_depth: u8, func: F) -where - F: Fn(u8, &mut [u8]), -{ - // Return early if empty. This enables to subtract `channels` later without overflow. - if buf.len() < channels { - return; - } - - let bits = buf.len() / channels * bit_depth as usize; - let extra_bits = bits % 8; - let entries = bits / 8 - + match extra_bits { - 0 => 0, - _ => 1, - }; - let skip = match extra_bits { - 0 => 0, - n => (8 - n) / bit_depth as usize, - }; - let mask = ((1u16 << bit_depth) - 1) as u8; - let i = (0..entries) - .rev() // reverse iterator - .flat_map(|idx| - // this has to be reversed too - (0..8).step_by(bit_depth.into()) - .zip(repeat(idx))) - .skip(skip); - let j = (0..=buf.len() - channels).rev().step_by(channels); - for ((shift, i), j) in i.zip(j) { - let pixel = (buf[i] & (mask << shift)) >> shift; - func(pixel, &mut buf[j..(j + channels)]) - } -} - -pub fn expand_trns_line(input: &[u8], output: &mut [u8], trns: Option<&[u8]>, channels: usize) { - for (input, output) in input - .chunks_exact(channels) - .zip(output.chunks_exact_mut(channels + 1)) - { - output[..channels].copy_from_slice(input); - output[channels] = if Some(input) == trns { 0 } else { 0xFF }; - } -} - -pub fn expand_trns_line16(input: &[u8], output: &mut [u8], trns: Option<&[u8]>, channels: usize) { - for (input, output) in input - .chunks_exact(channels * 2) - .zip(output.chunks_exact_mut(channels * 2 + 2)) - { - output[..channels * 2].copy_from_slice(input); - if Some(input) == trns { - output[channels * 2] = 0; - output[channels * 2 + 1] = 0 - } else { - output[channels * 2] = 0xFF; - output[channels * 2 + 1] = 0xFF - }; - } -} - -pub fn expand_trns_and_strip_line16( - input: &[u8], - output: &mut [u8], - trns: Option<&[u8]>, - channels: usize, -) { - for (input, output) in input - .chunks_exact(channels * 2) - .zip(output.chunks_exact_mut(channels + 1)) - { - for i in 0..channels { - output[i] = input[i * 2]; - } - output[channels] = if Some(input) == trns { 0 } else { 0xFF }; - } -} - -/// This iterator iterates over the different passes of an image Adam7 encoded -/// PNG image -/// The pattern is: -/// 16462646 -/// 77777777 -/// 56565656 -/// 77777777 -/// 36463646 -/// 77777777 -/// 56565656 -/// 77777777 -/// -#[derive(Clone)] -pub(crate) struct Adam7Iterator { - line: u32, - lines: u32, - line_width: u32, - current_pass: u8, - width: u32, - height: u32, -} - -impl Adam7Iterator { - pub fn new(width: u32, height: u32) -> Adam7Iterator { - let mut this = Adam7Iterator { - line: 0, - lines: 0, - line_width: 0, - current_pass: 1, - width, - height, - }; - this.init_pass(); - this - } - - /// Calculates the bounds of the current pass - fn init_pass(&mut self) { - let w = f64::from(self.width); - let h = f64::from(self.height); - let (line_width, lines) = match self.current_pass { - 1 => (w / 8.0, h / 8.0), - 2 => ((w - 4.0) / 8.0, h / 8.0), - 3 => (w / 4.0, (h - 4.0) / 8.0), - 4 => ((w - 2.0) / 4.0, h / 4.0), - 5 => (w / 2.0, (h - 2.0) / 4.0), - 6 => ((w - 1.0) / 2.0, h / 2.0), - 7 => (w, (h - 1.0) / 2.0), - _ => unreachable!(), - }; - self.line_width = line_width.ceil() as u32; - self.lines = lines.ceil() as u32; - self.line = 0; - } - - /// The current pass#. - pub fn current_pass(&self) -> u8 { - self.current_pass - } -} - -/// Iterates over the (passes, lines, widths) -impl Iterator for Adam7Iterator { - type Item = (u8, u32, u32); - fn next(&mut self) -> Option<Self::Item> { - if self.line < self.lines && self.line_width > 0 { - let this_line = self.line; - self.line += 1; - Some((self.current_pass, this_line, self.line_width)) - } else if self.current_pass < 7 { - self.current_pass += 1; - self.init_pass(); - self.next() - } else { - None - } - } -} - -fn subbyte_pixels(scanline: &[u8], bits_pp: usize) -> impl Iterator<Item = u8> + '_ { - (0..scanline.len() * 8) - .step_by(bits_pp) - .map(move |bit_idx| { - let byte_idx = bit_idx / 8; - - // sub-byte samples start in the high-order bits - let rem = 8 - bit_idx % 8 - bits_pp; - - match bits_pp { - // evenly divides bytes - 1 => (scanline[byte_idx] >> rem) & 1, - 2 => (scanline[byte_idx] >> rem) & 3, - 4 => (scanline[byte_idx] >> rem) & 15, - _ => unreachable!(), - } - }) -} - -/// Given pass, image width, and line number, produce an iterator of bit positions of pixels to copy -/// from the input scanline to the image buffer. -fn expand_adam7_bits( - pass: u8, - width: usize, - line_no: usize, - bits_pp: usize, -) -> StepBy<Range<usize>> { - let (line_mul, line_off, samp_mul, samp_off) = match pass { - 1 => (8, 0, 8, 0), - 2 => (8, 0, 8, 4), - 3 => (8, 4, 4, 0), - 4 => (4, 0, 4, 2), - 5 => (4, 2, 2, 0), - 6 => (2, 0, 2, 1), - 7 => (2, 1, 1, 0), - _ => panic!("Adam7 pass out of range: {}", pass), - }; - - // the equivalent line number in progressive scan - let prog_line = line_mul * line_no + line_off; - // line width is rounded up to the next byte - let line_width = (width * bits_pp + 7) & !7; - let line_start = prog_line * line_width; - let start = line_start + (samp_off * bits_pp); - let stop = line_start + (width * bits_pp); - - (start..stop).step_by(bits_pp * samp_mul) -} - -/// Expands an Adam 7 pass -pub fn expand_pass( - img: &mut [u8], - width: u32, - scanline: &[u8], - pass: u8, - line_no: u32, - bits_pp: u8, -) { - let width = width as usize; - let line_no = line_no as usize; - let bits_pp = bits_pp as usize; - - // pass is out of range but don't blow up - if pass == 0 || pass > 7 { - return; - } - - let bit_indices = expand_adam7_bits(pass, width, line_no, bits_pp); - - if bits_pp < 8 { - for (pos, px) in bit_indices.zip(subbyte_pixels(scanline, bits_pp)) { - let rem = 8 - pos % 8 - bits_pp; - img[pos / 8] |= px << rem as u8; - } - } else { - let bytes_pp = bits_pp / 8; - - for (bitpos, px) in bit_indices.zip(scanline.chunks(bytes_pp)) { - for (offset, val) in px.iter().enumerate() { - img[bitpos / 8 + offset] = *val; - } - } - } -} - -#[test] -fn test_adam7() { - /* - 1646 - 7777 - 5656 - 7777 - */ - let it = Adam7Iterator::new(4, 4); - let passes: Vec<_> = it.collect(); - assert_eq!( - &*passes, - &[ - (1, 0, 1), - (4, 0, 1), - (5, 0, 2), - (6, 0, 2), - (6, 1, 2), - (7, 0, 4), - (7, 1, 4) - ] - ); -} - -#[test] -fn test_subbyte_pixels() { - let scanline = &[0b10101010, 0b10101010]; - - let pixels = subbyte_pixels(scanline, 1).collect::<Vec<_>>(); - assert_eq!(pixels.len(), 16); - assert_eq!(pixels, [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]); -} - -#[test] -fn test_expand_adam7_bits() { - let width = 32; - let bits_pp = 1; - - let expected = |offset: usize, step: usize, count: usize| { - (0..count) - .map(move |i| step * i + offset) - .collect::<Vec<_>>() - }; - - for line_no in 0..8 { - let start = 8 * line_no * width; - - assert_eq!( - expand_adam7_bits(1, width, line_no, bits_pp).collect::<Vec<_>>(), - expected(start, 8, 4) - ); - - let start = start + 4; - - assert_eq!( - expand_adam7_bits(2, width, line_no, bits_pp).collect::<Vec<_>>(), - expected(start, 8, 4) - ); - - let start = (8 * line_no + 4) as usize * width as usize; - - assert_eq!( - expand_adam7_bits(3, width, line_no, bits_pp).collect::<Vec<_>>(), - expected(start, 4, 8) - ); - } - - for line_no in 0..16 { - let start = 4 * line_no * width + 2; - - assert_eq!( - expand_adam7_bits(4, width, line_no, bits_pp).collect::<Vec<_>>(), - expected(start, 4, 8) - ); - - let start = (4 * line_no + 2) * width; - - assert_eq!( - expand_adam7_bits(5, width, line_no, bits_pp).collect::<Vec<_>>(), - expected(start, 2, 16) - ) - } - - for line_no in 0..32 { - let start = 2 * line_no * width + 1; - - assert_eq!( - expand_adam7_bits(6, width, line_no, bits_pp).collect::<Vec<_>>(), - expected(start, 2, 16), - "line_no: {}", - line_no - ); - - let start = (2 * line_no + 1) * width; - - assert_eq!( - expand_adam7_bits(7, width, line_no, bits_pp).collect::<Vec<_>>(), - expected(start, 1, 32) - ); - } -} - -#[test] -fn test_expand_pass_subbyte() { - let mut img = [0u8; 8]; - let width = 8; - let bits_pp = 1; - - expand_pass(&mut img, width, &[0b10000000], 1, 0, bits_pp); - assert_eq!(img, [0b10000000u8, 0, 0, 0, 0, 0, 0, 0]); - - expand_pass(&mut img, width, &[0b10000000], 2, 0, bits_pp); - assert_eq!(img, [0b10001000u8, 0, 0, 0, 0, 0, 0, 0]); - - expand_pass(&mut img, width, &[0b11000000], 3, 0, bits_pp); - assert_eq!(img, [0b10001000u8, 0, 0, 0, 0b10001000, 0, 0, 0]); - - expand_pass(&mut img, width, &[0b11000000], 4, 0, bits_pp); - assert_eq!(img, [0b10101010u8, 0, 0, 0, 0b10001000, 0, 0, 0]); - - expand_pass(&mut img, width, &[0b11000000], 4, 1, bits_pp); - assert_eq!(img, [0b10101010u8, 0, 0, 0, 0b10101010, 0, 0, 0]); - - expand_pass(&mut img, width, &[0b11110000], 5, 0, bits_pp); - assert_eq!(img, [0b10101010u8, 0, 0b10101010, 0, 0b10101010, 0, 0, 0]); - - expand_pass(&mut img, width, &[0b11110000], 5, 1, bits_pp); - assert_eq!( - img, - [0b10101010u8, 0, 0b10101010, 0, 0b10101010, 0, 0b10101010, 0] - ); - - expand_pass(&mut img, width, &[0b11110000], 6, 0, bits_pp); - assert_eq!( - img, - [0b11111111u8, 0, 0b10101010, 0, 0b10101010, 0, 0b10101010, 0] - ); - - expand_pass(&mut img, width, &[0b11110000], 6, 1, bits_pp); - assert_eq!( - img, - [0b11111111u8, 0, 0b11111111, 0, 0b10101010, 0, 0b10101010, 0] - ); - - expand_pass(&mut img, width, &[0b11110000], 6, 2, bits_pp); - assert_eq!( - img, - [0b11111111u8, 0, 0b11111111, 0, 0b11111111, 0, 0b10101010, 0] - ); - - expand_pass(&mut img, width, &[0b11110000], 6, 3, bits_pp); - assert_eq!( - [0b11111111u8, 0, 0b11111111, 0, 0b11111111, 0, 0b11111111, 0], - img - ); - - expand_pass(&mut img, width, &[0b11111111], 7, 0, bits_pp); - assert_eq!( - [ - 0b11111111u8, - 0b11111111, - 0b11111111, - 0, - 0b11111111, - 0, - 0b11111111, - 0 - ], - img - ); - - expand_pass(&mut img, width, &[0b11111111], 7, 1, bits_pp); - assert_eq!( - [ - 0b11111111u8, - 0b11111111, - 0b11111111, - 0b11111111, - 0b11111111, - 0, - 0b11111111, - 0 - ], - img - ); - - expand_pass(&mut img, width, &[0b11111111], 7, 2, bits_pp); - assert_eq!( - [ - 0b11111111u8, - 0b11111111, - 0b11111111, - 0b11111111, - 0b11111111, - 0b11111111, - 0b11111111, - 0 - ], - img - ); - - expand_pass(&mut img, width, &[0b11111111], 7, 3, bits_pp); - assert_eq!( - [ - 0b11111111u8, - 0b11111111, - 0b11111111, - 0b11111111, - 0b11111111, - 0b11111111, - 0b11111111, - 0b11111111 - ], - img - ); -} |