diff options
Diffstat (limited to 'vendor/image/src/codecs')
44 files changed, 20828 insertions, 0 deletions
diff --git a/vendor/image/src/codecs/avif/decoder.rs b/vendor/image/src/codecs/avif/decoder.rs new file mode 100644 index 0000000..acba4f8 --- /dev/null +++ b/vendor/image/src/codecs/avif/decoder.rs @@ -0,0 +1,177 @@ +//! Decoding of AVIF images. +/// +/// The [AVIF] specification defines an image derivative of the AV1 bitstream, an open video codec. +/// +/// [AVIF]: https://aomediacodec.github.io/av1-avif/ +use std::convert::TryFrom; +use std::error::Error; +use std::io::{self, Cursor, Read}; +use std::marker::PhantomData; +use std::mem; + +use crate::error::DecodingError; +use crate::{ColorType, ImageDecoder, ImageError, ImageFormat, ImageResult}; + +use dav1d::{PixelLayout, PlanarImageComponent}; +use dcv_color_primitives as dcp; +use mp4parse::{read_avif, ParseStrictness}; + +fn error_map<E: Into<Box<dyn Error + Send + Sync>>>(err: E) -> ImageError { + ImageError::Decoding(DecodingError::new(ImageFormat::Avif.into(), err)) +} + +/// AVIF Decoder. +/// +/// Reads one image into the chosen input. +pub struct AvifDecoder<R> { + inner: PhantomData<R>, + picture: dav1d::Picture, + alpha_picture: Option<dav1d::Picture>, + icc_profile: Option<Vec<u8>>, +} + +impl<R: Read> AvifDecoder<R> { + /// Create a new decoder that reads its input from `r`. + pub fn new(mut r: R) -> ImageResult<Self> { + let ctx = read_avif(&mut r, ParseStrictness::Normal).map_err(error_map)?; + let coded = ctx.primary_item_coded_data().unwrap_or_default(); + + let mut primary_decoder = dav1d::Decoder::new(); + primary_decoder + .send_data(coded, None, None, None) + .map_err(error_map)?; + let picture = primary_decoder.get_picture().map_err(error_map)?; + let alpha_item = ctx.alpha_item_coded_data().unwrap_or_default(); + let alpha_picture = if !alpha_item.is_empty() { + let mut alpha_decoder = dav1d::Decoder::new(); + alpha_decoder + .send_data(alpha_item, None, None, None) + .map_err(error_map)?; + Some(alpha_decoder.get_picture().map_err(error_map)?) + } else { + None + }; + let icc_profile = ctx + .icc_colour_information() + .map(|x| x.ok().unwrap_or_default()) + .map(|x| x.to_vec()); + + assert_eq!(picture.bit_depth(), 8); + Ok(AvifDecoder { + inner: PhantomData, + picture, + alpha_picture, + icc_profile, + }) + } +} + +/// Wrapper struct around a `Cursor<Vec<u8>>` +pub struct AvifReader<R>(Cursor<Vec<u8>>, PhantomData<R>); +impl<R> Read for AvifReader<R> { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { + if self.0.position() == 0 && buf.is_empty() { + mem::swap(buf, self.0.get_mut()); + Ok(buf.len()) + } else { + self.0.read_to_end(buf) + } + } +} + +impl<'a, R: 'a + Read> ImageDecoder<'a> for AvifDecoder<R> { + type Reader = AvifReader<R>; + + fn dimensions(&self) -> (u32, u32) { + (self.picture.width(), self.picture.height()) + } + + fn color_type(&self) -> ColorType { + ColorType::Rgba8 + } + + fn icc_profile(&mut self) -> Option<Vec<u8>> { + self.icc_profile.clone() + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + let plane = self.picture.plane(PlanarImageComponent::Y); + Ok(AvifReader( + Cursor::new(plane.as_ref().to_vec()), + PhantomData, + )) + } + + fn read_image(self, buf: &mut [u8]) -> ImageResult<()> { + assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); + + dcp::initialize(); + + if self.picture.pixel_layout() != PixelLayout::I400 { + let pixel_format = match self.picture.pixel_layout() { + PixelLayout::I400 => todo!(), + PixelLayout::I420 => dcp::PixelFormat::I420, + PixelLayout::I422 => dcp::PixelFormat::I422, + PixelLayout::I444 => dcp::PixelFormat::I444, + PixelLayout::Unknown => panic!("Unknown pixel layout"), + }; + let src_format = dcp::ImageFormat { + pixel_format, + color_space: dcp::ColorSpace::Bt601, + num_planes: 3, + }; + let dst_format = dcp::ImageFormat { + pixel_format: dcp::PixelFormat::Rgba, + color_space: dcp::ColorSpace::Lrgb, + num_planes: 1, + }; + let (width, height) = self.dimensions(); + let planes = &[ + self.picture.plane(PlanarImageComponent::Y), + self.picture.plane(PlanarImageComponent::U), + self.picture.plane(PlanarImageComponent::V), + ]; + let src_buffers = planes.iter().map(AsRef::as_ref).collect::<Vec<_>>(); + let strides = &[ + self.picture.stride(PlanarImageComponent::Y) as usize, + self.picture.stride(PlanarImageComponent::U) as usize, + self.picture.stride(PlanarImageComponent::V) as usize, + ]; + let dst_buffers = &mut [&mut buf[..]]; + dcp::convert_image( + width, + height, + &src_format, + Some(strides), + &src_buffers, + &dst_format, + None, + dst_buffers, + ) + .map_err(error_map)?; + } else { + let plane = self.picture.plane(PlanarImageComponent::Y); + buf.copy_from_slice(plane.as_ref()); + } + + if let Some(picture) = self.alpha_picture { + assert_eq!(picture.pixel_layout(), PixelLayout::I400); + let stride = picture.stride(PlanarImageComponent::Y) as usize; + let plane = picture.plane(PlanarImageComponent::Y); + let width = picture.width(); + for (buf, slice) in Iterator::zip( + buf.chunks_exact_mut(width as usize * 4), + plane.as_ref().chunks_exact(stride), + ) { + for i in 0..width as usize { + buf[3 + i * 4] = slice[i]; + } + } + } + + Ok(()) + } +} diff --git a/vendor/image/src/codecs/avif/encoder.rs b/vendor/image/src/codecs/avif/encoder.rs new file mode 100644 index 0000000..7484ff1 --- /dev/null +++ b/vendor/image/src/codecs/avif/encoder.rs @@ -0,0 +1,274 @@ +//! Encoding of AVIF images. +/// +/// The [AVIF] specification defines an image derivative of the AV1 bitstream, an open video codec. +/// +/// [AVIF]: https://aomediacodec.github.io/av1-avif/ +use std::borrow::Cow; +use std::cmp::min; +use std::io::Write; + +use crate::buffer::ConvertBuffer; +use crate::color::{FromColor, Luma, LumaA, Rgb, Rgba}; +use crate::error::{ + EncodingError, ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind, +}; +use crate::{ColorType, ImageBuffer, ImageEncoder, ImageFormat, Pixel}; +use crate::{ImageError, ImageResult}; + +use bytemuck::{try_cast_slice, try_cast_slice_mut, Pod, PodCastError}; +use num_traits::Zero; +use ravif::{Encoder, Img, RGB8, RGBA8}; +use rgb::AsPixels; + +/// AVIF Encoder. +/// +/// Writes one image into the chosen output. +pub struct AvifEncoder<W> { + inner: W, + encoder: Encoder, +} + +/// An enumeration over supported AVIF color spaces +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[non_exhaustive] +pub enum ColorSpace { + /// sRGB colorspace + Srgb, + /// BT.709 colorspace + Bt709, +} + +impl ColorSpace { + fn to_ravif(self) -> ravif::ColorSpace { + match self { + Self::Srgb => ravif::ColorSpace::RGB, + Self::Bt709 => ravif::ColorSpace::YCbCr, + } + } +} + +enum RgbColor<'buf> { + Rgb8(Img<&'buf [RGB8]>), + Rgba8(Img<&'buf [RGBA8]>), +} + +impl<W: Write> AvifEncoder<W> { + /// Create a new encoder that writes its output to `w`. + pub fn new(w: W) -> Self { + AvifEncoder::new_with_speed_quality(w, 4, 80) // `cavif` uses these defaults + } + + /// Create a new encoder with specified speed and quality, that writes its output to `w`. + /// `speed` accepts a value in the range 0-10, where 0 is the slowest and 10 is the fastest. + /// `quality` accepts a value in the range 0-100, where 0 is the worst and 100 is the best. + pub fn new_with_speed_quality(w: W, speed: u8, quality: u8) -> Self { + // Clamp quality and speed to range + let quality = min(quality, 100); + let speed = min(speed, 10); + + let encoder = Encoder::new() + .with_quality(f32::from(quality)) + .with_alpha_quality(f32::from(quality)) + .with_speed(speed); + + AvifEncoder { inner: w, encoder } + } + + /// Encode with the specified `color_space`. + pub fn with_colorspace(mut self, color_space: ColorSpace) -> Self { + self.encoder = self + .encoder + .with_internal_color_space(color_space.to_ravif()); + self + } + + /// Configures `rayon` thread pool size. + /// The default `None` is to use all threads in the default `rayon` thread pool. + pub fn with_num_threads(mut self, num_threads: Option<usize>) -> Self { + self.encoder = self.encoder.with_num_threads(num_threads); + self + } +} + +impl<W: Write> ImageEncoder for AvifEncoder<W> { + /// Encode image data with the indicated color type. + /// + /// The encoder currently requires all data to be RGBA8, it will be converted internally if + /// necessary. When data is suitably aligned, i.e. u16 channels to two bytes, then the + /// conversion may be more efficient. + fn write_image( + mut self, + data: &[u8], + width: u32, + height: u32, + color: ColorType, + ) -> ImageResult<()> { + self.set_color(color); + // `ravif` needs strongly typed data so let's convert. We can either use a temporarily + // owned version in our own buffer or zero-copy if possible by using the input buffer. + // This requires going through `rgb`. + let mut fallback = vec![]; // This vector is used if we need to do a color conversion. + let result = match Self::encode_as_img(&mut fallback, data, width, height, color)? { + RgbColor::Rgb8(buffer) => self.encoder.encode_rgb(buffer), + RgbColor::Rgba8(buffer) => self.encoder.encode_rgba(buffer), + }; + let data = result.map_err(|err| { + ImageError::Encoding(EncodingError::new(ImageFormat::Avif.into(), err)) + })?; + self.inner.write_all(&data.avif_file)?; + Ok(()) + } +} + +impl<W: Write> AvifEncoder<W> { + // Does not currently do anything. Mirrors behaviour of old config function. + fn set_color(&mut self, _color: ColorType) { + // self.config.color_space = ColorSpace::RGB; + } + + fn encode_as_img<'buf>( + fallback: &'buf mut Vec<u8>, + data: &'buf [u8], + width: u32, + height: u32, + color: ColorType, + ) -> ImageResult<RgbColor<'buf>> { + // Error wrapping utility for color dependent buffer dimensions. + fn try_from_raw<P: Pixel + 'static>( + data: &[P::Subpixel], + width: u32, + height: u32, + ) -> ImageResult<ImageBuffer<P, &[P::Subpixel]>> { + ImageBuffer::from_raw(width, height, data).ok_or_else(|| { + ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + )) + }) + } + + // Convert to target color type using few buffer allocations. + fn convert_into<'buf, P>( + buf: &'buf mut Vec<u8>, + image: ImageBuffer<P, &[P::Subpixel]>, + ) -> Img<&'buf [RGBA8]> + where + P: Pixel + 'static, + Rgba<u8>: FromColor<P>, + { + let (width, height) = image.dimensions(); + // TODO: conversion re-using the target buffer? + let image: ImageBuffer<Rgba<u8>, _> = image.convert(); + *buf = image.into_raw(); + Img::new(buf.as_pixels(), width as usize, height as usize) + } + + // Cast the input slice using few buffer allocations if possible. + // In particular try not to allocate if the caller did the infallible reverse. + fn cast_buffer<Channel>(buf: &[u8]) -> ImageResult<Cow<[Channel]>> + where + Channel: Pod + Zero, + { + match try_cast_slice(buf) { + Ok(slice) => Ok(Cow::Borrowed(slice)), + Err(PodCastError::OutputSliceWouldHaveSlop) => Err(ImageError::Parameter( + ParameterError::from_kind(ParameterErrorKind::DimensionMismatch), + )), + Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned) => { + // Sad, but let's allocate. + // bytemuck checks alignment _before_ slop but size mismatch before this.. + if buf.len() % std::mem::size_of::<Channel>() != 0 { + Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + ))) + } else { + let len = buf.len() / std::mem::size_of::<Channel>(); + let mut data = vec![Channel::zero(); len]; + let view = try_cast_slice_mut::<_, u8>(data.as_mut_slice()).unwrap(); + view.copy_from_slice(buf); + Ok(Cow::Owned(data)) + } + } + Err(err) => { + // Are you trying to encode a ZST?? + Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic(format!("{:?}", err)), + ))) + } + } + } + + match color { + ColorType::Rgb8 => { + // ravif doesn't do any checks but has some asserts, so we do the checks. + let img = try_from_raw::<Rgb<u8>>(data, width, height)?; + // Now, internally ravif uses u32 but it takes usize. We could do some checked + // conversion but instead we use that a non-empty image must be addressable. + if img.pixels().len() == 0 { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + ))); + } + + Ok(RgbColor::Rgb8(Img::new( + rgb::AsPixels::as_pixels(data), + width as usize, + height as usize, + ))) + } + ColorType::Rgba8 => { + // ravif doesn't do any checks but has some asserts, so we do the checks. + let img = try_from_raw::<Rgba<u8>>(data, width, height)?; + // Now, internally ravif uses u32 but it takes usize. We could do some checked + // conversion but instead we use that a non-empty image must be addressable. + if img.pixels().len() == 0 { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + ))); + } + + Ok(RgbColor::Rgba8(Img::new( + rgb::AsPixels::as_pixels(data), + width as usize, + height as usize, + ))) + } + // we need a separate buffer.. + ColorType::L8 => { + let image = try_from_raw::<Luma<u8>>(data, width, height)?; + Ok(RgbColor::Rgba8(convert_into(fallback, image))) + } + ColorType::La8 => { + let image = try_from_raw::<LumaA<u8>>(data, width, height)?; + Ok(RgbColor::Rgba8(convert_into(fallback, image))) + } + // we need to really convert data.. + ColorType::L16 => { + let buffer = cast_buffer(data)?; + let image = try_from_raw::<Luma<u16>>(&buffer, width, height)?; + Ok(RgbColor::Rgba8(convert_into(fallback, image))) + } + ColorType::La16 => { + let buffer = cast_buffer(data)?; + let image = try_from_raw::<LumaA<u16>>(&buffer, width, height)?; + Ok(RgbColor::Rgba8(convert_into(fallback, image))) + } + ColorType::Rgb16 => { + let buffer = cast_buffer(data)?; + let image = try_from_raw::<Rgb<u16>>(&buffer, width, height)?; + Ok(RgbColor::Rgba8(convert_into(fallback, image))) + } + ColorType::Rgba16 => { + let buffer = cast_buffer(data)?; + let image = try_from_raw::<Rgba<u16>>(&buffer, width, height)?; + Ok(RgbColor::Rgba8(convert_into(fallback, image))) + } + // for cases we do not support at all? + _ => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Avif.into(), + UnsupportedErrorKind::Color(color.into()), + ), + )), + } + } +} diff --git a/vendor/image/src/codecs/avif/mod.rs b/vendor/image/src/codecs/avif/mod.rs new file mode 100644 index 0000000..f74217c --- /dev/null +++ b/vendor/image/src/codecs/avif/mod.rs @@ -0,0 +1,14 @@ +//! Encoding of AVIF images. +/// +/// The [AVIF] specification defines an image derivative of the AV1 bitstream, an open video codec. +/// +/// [AVIF]: https://aomediacodec.github.io/av1-avif/ +#[cfg(feature = "avif-decoder")] +pub use self::decoder::AvifDecoder; +#[cfg(feature = "avif-encoder")] +pub use self::encoder::{AvifEncoder, ColorSpace}; + +#[cfg(feature = "avif-decoder")] +mod decoder; +#[cfg(feature = "avif-encoder")] +mod encoder; diff --git a/vendor/image/src/codecs/bmp/decoder.rs b/vendor/image/src/codecs/bmp/decoder.rs new file mode 100644 index 0000000..58c0650 --- /dev/null +++ b/vendor/image/src/codecs/bmp/decoder.rs @@ -0,0 +1,1483 @@ +use std::cmp::{self, Ordering}; +use std::convert::TryFrom; +use std::io::{self, Cursor, Read, Seek, SeekFrom}; +use std::iter::{repeat, Iterator, Rev}; +use std::marker::PhantomData; +use std::slice::ChunksMut; +use std::{error, fmt, mem}; + +use byteorder::{LittleEndian, ReadBytesExt}; + +use crate::color::ColorType; +use crate::error::{ + DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind, +}; +use crate::image::{self, ImageDecoder, ImageDecoderRect, ImageFormat, Progress}; + +const BITMAPCOREHEADER_SIZE: u32 = 12; +const BITMAPINFOHEADER_SIZE: u32 = 40; +const BITMAPV2HEADER_SIZE: u32 = 52; +const BITMAPV3HEADER_SIZE: u32 = 56; +const BITMAPV4HEADER_SIZE: u32 = 108; +const BITMAPV5HEADER_SIZE: u32 = 124; + +static LOOKUP_TABLE_3_BIT_TO_8_BIT: [u8; 8] = [0, 36, 73, 109, 146, 182, 219, 255]; +static LOOKUP_TABLE_4_BIT_TO_8_BIT: [u8; 16] = [ + 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255, +]; +static LOOKUP_TABLE_5_BIT_TO_8_BIT: [u8; 32] = [ + 0, 8, 16, 25, 33, 41, 49, 58, 66, 74, 82, 90, 99, 107, 115, 123, 132, 140, 148, 156, 165, 173, + 181, 189, 197, 206, 214, 222, 230, 239, 247, 255, +]; +static LOOKUP_TABLE_6_BIT_TO_8_BIT: [u8; 64] = [ + 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 45, 49, 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93, + 97, 101, 105, 109, 113, 117, 121, 125, 130, 134, 138, 142, 146, 150, 154, 158, 162, 166, 170, + 174, 178, 182, 186, 190, 194, 198, 202, 206, 210, 215, 219, 223, 227, 231, 235, 239, 243, 247, + 251, 255, +]; + +static R5_G5_B5_COLOR_MASK: Bitfields = Bitfields { + r: Bitfield { len: 5, shift: 10 }, + g: Bitfield { len: 5, shift: 5 }, + b: Bitfield { len: 5, shift: 0 }, + a: Bitfield { len: 0, shift: 0 }, +}; +const R8_G8_B8_COLOR_MASK: Bitfields = Bitfields { + r: Bitfield { len: 8, shift: 24 }, + g: Bitfield { len: 8, shift: 16 }, + b: Bitfield { len: 8, shift: 8 }, + a: Bitfield { len: 0, shift: 0 }, +}; +const R8_G8_B8_A8_COLOR_MASK: Bitfields = Bitfields { + r: Bitfield { len: 8, shift: 16 }, + g: Bitfield { len: 8, shift: 8 }, + b: Bitfield { len: 8, shift: 0 }, + a: Bitfield { len: 8, shift: 24 }, +}; + +const RLE_ESCAPE: u8 = 0; +const RLE_ESCAPE_EOL: u8 = 0; +const RLE_ESCAPE_EOF: u8 = 1; +const RLE_ESCAPE_DELTA: u8 = 2; + +/// The maximum width/height the decoder will process. +const MAX_WIDTH_HEIGHT: i32 = 0xFFFF; + +#[derive(PartialEq, Copy, Clone)] +enum ImageType { + Palette, + RGB16, + RGB24, + RGB32, + RGBA32, + RLE8, + RLE4, + Bitfields16, + Bitfields32, +} + +#[derive(PartialEq)] +enum BMPHeaderType { + Core, + Info, + V2, + V3, + V4, + V5, +} + +#[derive(PartialEq)] +enum FormatFullBytes { + RGB24, + RGB32, + RGBA32, + Format888, +} + +enum Chunker<'a> { + FromTop(ChunksMut<'a, u8>), + FromBottom(Rev<ChunksMut<'a, u8>>), +} + +pub(crate) struct RowIterator<'a> { + chunks: Chunker<'a>, +} + +impl<'a> Iterator for RowIterator<'a> { + type Item = &'a mut [u8]; + + #[inline(always)] + fn next(&mut self) -> Option<&'a mut [u8]> { + match self.chunks { + Chunker::FromTop(ref mut chunks) => chunks.next(), + Chunker::FromBottom(ref mut chunks) => chunks.next(), + } + } +} + +/// All errors that can occur when attempting to parse a BMP +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +enum DecoderError { + // Failed to decompress RLE data. + CorruptRleData, + + /// The bitfield mask interleaves set and unset bits + BitfieldMaskNonContiguous, + /// Bitfield mask invalid (e.g. too long for specified type) + BitfieldMaskInvalid, + /// Bitfield (of the specified width – 16- or 32-bit) mask not present + BitfieldMaskMissing(u32), + /// Bitfield (of the specified width – 16- or 32-bit) masks not present + BitfieldMasksMissing(u32), + + /// BMP's "BM" signature wrong or missing + BmpSignatureInvalid, + /// More than the exactly one allowed plane specified by the format + MoreThanOnePlane, + /// Invalid amount of bits per channel for the specified image type + InvalidChannelWidth(ChannelWidthError, u16), + + /// The width is negative + NegativeWidth(i32), + /// One of the dimensions is larger than a soft limit + ImageTooLarge(i32, i32), + /// The height is `i32::min_value()` + /// + /// General negative heights specify top-down DIBs + InvalidHeight, + + /// Specified image type is invalid for top-down BMPs (i.e. is compressed) + ImageTypeInvalidForTopDown(u32), + /// Image type not currently recognized by the decoder + ImageTypeUnknown(u32), + + /// Bitmap header smaller than the core header + HeaderTooSmall(u32), + + /// The palette is bigger than allowed by the bit count of the BMP + PaletteSizeExceeded { + colors_used: u32, + bit_count: u16, + }, +} + +impl fmt::Display for DecoderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DecoderError::CorruptRleData => f.write_str("Corrupt RLE data"), + DecoderError::BitfieldMaskNonContiguous => f.write_str("Non-contiguous bitfield mask"), + DecoderError::BitfieldMaskInvalid => f.write_str("Invalid bitfield mask"), + DecoderError::BitfieldMaskMissing(bb) => { + f.write_fmt(format_args!("Missing {}-bit bitfield mask", bb)) + } + DecoderError::BitfieldMasksMissing(bb) => { + f.write_fmt(format_args!("Missing {}-bit bitfield masks", bb)) + } + DecoderError::BmpSignatureInvalid => f.write_str("BMP signature not found"), + DecoderError::MoreThanOnePlane => f.write_str("More than one plane"), + DecoderError::InvalidChannelWidth(tp, n) => { + f.write_fmt(format_args!("Invalid channel bit count for {}: {}", tp, n)) + } + DecoderError::NegativeWidth(w) => f.write_fmt(format_args!("Negative width ({})", w)), + DecoderError::ImageTooLarge(w, h) => f.write_fmt(format_args!( + "Image too large (one of ({}, {}) > soft limit of {})", + w, h, MAX_WIDTH_HEIGHT + )), + DecoderError::InvalidHeight => f.write_str("Invalid height"), + DecoderError::ImageTypeInvalidForTopDown(tp) => f.write_fmt(format_args!( + "Invalid image type {} for top-down image.", + tp + )), + DecoderError::ImageTypeUnknown(tp) => { + f.write_fmt(format_args!("Unknown image compression type {}", tp)) + } + DecoderError::HeaderTooSmall(s) => { + f.write_fmt(format_args!("Bitmap header too small ({} bytes)", s)) + } + DecoderError::PaletteSizeExceeded { + colors_used, + bit_count, + } => f.write_fmt(format_args!( + "Palette size {} exceeds maximum size for BMP with bit count of {}", + colors_used, bit_count + )), + } + } +} + +impl From<DecoderError> for ImageError { + fn from(e: DecoderError) -> ImageError { + ImageError::Decoding(DecodingError::new(ImageFormat::Bmp.into(), e)) + } +} + +impl error::Error for DecoderError {} + +/// Distinct image types whose saved channel width can be invalid +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +enum ChannelWidthError { + /// RGB + Rgb, + /// 8-bit run length encoding + Rle8, + /// 4-bit run length encoding + Rle4, + /// Bitfields (16- or 32-bit) + Bitfields, +} + +impl fmt::Display for ChannelWidthError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(match self { + ChannelWidthError::Rgb => "RGB", + ChannelWidthError::Rle8 => "RLE8", + ChannelWidthError::Rle4 => "RLE4", + ChannelWidthError::Bitfields => "bitfields", + }) + } +} + +/// Convenience function to check if the combination of width, length and number of +/// channels would result in a buffer that would overflow. +fn check_for_overflow(width: i32, length: i32, channels: usize) -> ImageResult<()> { + num_bytes(width, length, channels) + .map(|_| ()) + .ok_or_else(|| { + ImageError::Unsupported(UnsupportedError::from_format_and_kind( + ImageFormat::Bmp.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Image dimensions ({}x{} w/{} channels) are too large", + width, length, channels + )), + )) + }) +} + +/// Calculate how many many bytes a buffer holding a decoded image with these properties would +/// require. Returns `None` if the buffer size would overflow or if one of the sizes are negative. +fn num_bytes(width: i32, length: i32, channels: usize) -> Option<usize> { + if width <= 0 || length <= 0 { + None + } else { + match channels.checked_mul(width as usize) { + Some(n) => n.checked_mul(length as usize), + None => None, + } + } +} + +/// Call the provided function on each row of the provided buffer, returning Err if the provided +/// function returns an error, extends the buffer if it's not large enough. +fn with_rows<F>( + buffer: &mut [u8], + width: i32, + height: i32, + channels: usize, + top_down: bool, + mut func: F, +) -> io::Result<()> +where + F: FnMut(&mut [u8]) -> io::Result<()>, +{ + // An overflow should already have been checked for when this is called, + // though we check anyhow, as it somehow seems to increase performance slightly. + let row_width = channels.checked_mul(width as usize).unwrap(); + let full_image_size = row_width.checked_mul(height as usize).unwrap(); + assert_eq!(buffer.len(), full_image_size); + + if !top_down { + for row in buffer.chunks_mut(row_width).rev() { + func(row)?; + } + } else { + for row in buffer.chunks_mut(row_width) { + func(row)?; + } + } + Ok(()) +} + +fn set_8bit_pixel_run<'a, T: Iterator<Item = &'a u8>>( + pixel_iter: &mut ChunksMut<u8>, + palette: &[[u8; 3]], + indices: T, + n_pixels: usize, +) -> bool { + for idx in indices.take(n_pixels) { + if let Some(pixel) = pixel_iter.next() { + let rgb = palette[*idx as usize]; + pixel[0] = rgb[0]; + pixel[1] = rgb[1]; + pixel[2] = rgb[2]; + } else { + return false; + } + } + true +} + +fn set_4bit_pixel_run<'a, T: Iterator<Item = &'a u8>>( + pixel_iter: &mut ChunksMut<u8>, + palette: &[[u8; 3]], + indices: T, + mut n_pixels: usize, +) -> bool { + for idx in indices { + macro_rules! set_pixel { + ($i:expr) => { + if n_pixels == 0 { + break; + } + if let Some(pixel) = pixel_iter.next() { + let rgb = palette[$i as usize]; + pixel[0] = rgb[0]; + pixel[1] = rgb[1]; + pixel[2] = rgb[2]; + } else { + return false; + } + n_pixels -= 1; + }; + } + set_pixel!(idx >> 4); + set_pixel!(idx & 0xf); + } + true +} + +#[rustfmt::skip] +fn set_2bit_pixel_run<'a, T: Iterator<Item = &'a u8>>( + pixel_iter: &mut ChunksMut<u8>, + palette: &[[u8; 3]], + indices: T, + mut n_pixels: usize, +) -> bool { + for idx in indices { + macro_rules! set_pixel { + ($i:expr) => { + if n_pixels == 0 { + break; + } + if let Some(pixel) = pixel_iter.next() { + let rgb = palette[$i as usize]; + pixel[0] = rgb[0]; + pixel[1] = rgb[1]; + pixel[2] = rgb[2]; + } else { + return false; + } + n_pixels -= 1; + }; + } + set_pixel!((idx >> 6) & 0x3u8); + set_pixel!((idx >> 4) & 0x3u8); + set_pixel!((idx >> 2) & 0x3u8); + set_pixel!( idx & 0x3u8); + } + true +} + +fn set_1bit_pixel_run<'a, T: Iterator<Item = &'a u8>>( + pixel_iter: &mut ChunksMut<u8>, + palette: &[[u8; 3]], + indices: T, +) { + for idx in indices { + let mut bit = 0x80; + loop { + if let Some(pixel) = pixel_iter.next() { + let rgb = palette[((idx & bit) != 0) as usize]; + pixel[0] = rgb[0]; + pixel[1] = rgb[1]; + pixel[2] = rgb[2]; + } else { + return; + } + + bit >>= 1; + if bit == 0 { + break; + } + } + } +} + +#[derive(PartialEq, Eq)] +struct Bitfield { + shift: u32, + len: u32, +} + +impl Bitfield { + fn from_mask(mask: u32, max_len: u32) -> ImageResult<Bitfield> { + if mask == 0 { + return Ok(Bitfield { shift: 0, len: 0 }); + } + let mut shift = mask.trailing_zeros(); + let mut len = (!(mask >> shift)).trailing_zeros(); + if len != mask.count_ones() { + return Err(DecoderError::BitfieldMaskNonContiguous.into()); + } + if len + shift > max_len { + return Err(DecoderError::BitfieldMaskInvalid.into()); + } + if len > 8 { + shift += len - 8; + len = 8; + } + Ok(Bitfield { shift, len }) + } + + fn read(&self, data: u32) -> u8 { + let data = data >> self.shift; + match self.len { + 1 => ((data & 0b1) * 0xff) as u8, + 2 => ((data & 0b11) * 0x55) as u8, + 3 => LOOKUP_TABLE_3_BIT_TO_8_BIT[(data & 0b00_0111) as usize], + 4 => LOOKUP_TABLE_4_BIT_TO_8_BIT[(data & 0b00_1111) as usize], + 5 => LOOKUP_TABLE_5_BIT_TO_8_BIT[(data & 0b01_1111) as usize], + 6 => LOOKUP_TABLE_6_BIT_TO_8_BIT[(data & 0b11_1111) as usize], + 7 => ((data & 0x7f) << 1 | (data & 0x7f) >> 6) as u8, + 8 => (data & 0xff) as u8, + _ => panic!(), + } + } +} + +#[derive(PartialEq, Eq)] +struct Bitfields { + r: Bitfield, + g: Bitfield, + b: Bitfield, + a: Bitfield, +} + +impl Bitfields { + fn from_mask( + r_mask: u32, + g_mask: u32, + b_mask: u32, + a_mask: u32, + max_len: u32, + ) -> ImageResult<Bitfields> { + let bitfields = Bitfields { + r: Bitfield::from_mask(r_mask, max_len)?, + g: Bitfield::from_mask(g_mask, max_len)?, + b: Bitfield::from_mask(b_mask, max_len)?, + a: Bitfield::from_mask(a_mask, max_len)?, + }; + if bitfields.r.len == 0 || bitfields.g.len == 0 || bitfields.b.len == 0 { + return Err(DecoderError::BitfieldMaskMissing(max_len).into()); + } + Ok(bitfields) + } +} + +/// A bmp decoder +pub struct BmpDecoder<R> { + reader: R, + + bmp_header_type: BMPHeaderType, + indexed_color: bool, + + width: i32, + height: i32, + data_offset: u64, + top_down: bool, + no_file_header: bool, + add_alpha_channel: bool, + has_loaded_metadata: bool, + image_type: ImageType, + + bit_count: u16, + colors_used: u32, + palette: Option<Vec<[u8; 3]>>, + bitfields: Option<Bitfields>, +} + +enum RLEInsn { + EndOfFile, + EndOfRow, + Delta(u8, u8), + Absolute(u8, Vec<u8>), + PixelRun(u8, u8), +} + +impl<R: Read + Seek> BmpDecoder<R> { + fn new_decoder(reader: R) -> BmpDecoder<R> { + BmpDecoder { + reader, + + bmp_header_type: BMPHeaderType::Info, + indexed_color: false, + + width: 0, + height: 0, + data_offset: 0, + top_down: false, + no_file_header: false, + add_alpha_channel: false, + has_loaded_metadata: false, + image_type: ImageType::Palette, + + bit_count: 0, + colors_used: 0, + palette: None, + bitfields: None, + } + } + + /// Create a new decoder that decodes from the stream ```r``` + pub fn new(reader: R) -> ImageResult<BmpDecoder<R>> { + let mut decoder = Self::new_decoder(reader); + decoder.read_metadata()?; + Ok(decoder) + } + + /// Create a new decoder that decodes from the stream ```r``` without first + /// reading a BITMAPFILEHEADER. This is useful for decoding the CF_DIB format + /// directly from the Windows clipboard. + pub fn new_without_file_header(reader: R) -> ImageResult<BmpDecoder<R>> { + let mut decoder = Self::new_decoder(reader); + decoder.no_file_header = true; + decoder.read_metadata()?; + Ok(decoder) + } + + #[cfg(feature = "ico")] + pub(crate) fn new_with_ico_format(reader: R) -> ImageResult<BmpDecoder<R>> { + let mut decoder = Self::new_decoder(reader); + decoder.read_metadata_in_ico_format()?; + Ok(decoder) + } + + /// If true, the palette in BMP does not apply to the image even if it is found. + /// In other words, the output image is the indexed color. + pub fn set_indexed_color(&mut self, indexed_color: bool) { + self.indexed_color = indexed_color; + } + + #[cfg(feature = "ico")] + pub(crate) fn reader(&mut self) -> &mut R { + &mut self.reader + } + + fn read_file_header(&mut self) -> ImageResult<()> { + if self.no_file_header { + return Ok(()); + } + let mut signature = [0; 2]; + self.reader.read_exact(&mut signature)?; + + if signature != b"BM"[..] { + return Err(DecoderError::BmpSignatureInvalid.into()); + } + + // The next 8 bytes represent file size, followed the 4 reserved bytes + // We're not interesting these values + self.reader.read_u32::<LittleEndian>()?; + self.reader.read_u32::<LittleEndian>()?; + + self.data_offset = u64::from(self.reader.read_u32::<LittleEndian>()?); + + Ok(()) + } + + /// Read BITMAPCOREHEADER https://msdn.microsoft.com/en-us/library/vs/alm/dd183372(v=vs.85).aspx + /// + /// returns Err if any of the values are invalid. + fn read_bitmap_core_header(&mut self) -> ImageResult<()> { + // As height/width values in BMP files with core headers are only 16 bits long, + // they won't be larger than `MAX_WIDTH_HEIGHT`. + self.width = i32::from(self.reader.read_u16::<LittleEndian>()?); + self.height = i32::from(self.reader.read_u16::<LittleEndian>()?); + + check_for_overflow(self.width, self.height, self.num_channels())?; + + // Number of planes (format specifies that this should be 1). + if self.reader.read_u16::<LittleEndian>()? != 1 { + return Err(DecoderError::MoreThanOnePlane.into()); + } + + self.bit_count = self.reader.read_u16::<LittleEndian>()?; + self.image_type = match self.bit_count { + 1 | 4 | 8 => ImageType::Palette, + 24 => ImageType::RGB24, + _ => { + return Err(DecoderError::InvalidChannelWidth( + ChannelWidthError::Rgb, + self.bit_count, + ) + .into()) + } + }; + + Ok(()) + } + + /// Read BITMAPINFOHEADER https://msdn.microsoft.com/en-us/library/vs/alm/dd183376(v=vs.85).aspx + /// or BITMAPV{2|3|4|5}HEADER. + /// + /// returns Err if any of the values are invalid. + fn read_bitmap_info_header(&mut self) -> ImageResult<()> { + self.width = self.reader.read_i32::<LittleEndian>()?; + self.height = self.reader.read_i32::<LittleEndian>()?; + + // Width can not be negative + if self.width < 0 { + return Err(DecoderError::NegativeWidth(self.width).into()); + } else if self.width > MAX_WIDTH_HEIGHT || self.height > MAX_WIDTH_HEIGHT { + // Limit very large image sizes to avoid OOM issues. Images with these sizes are + // unlikely to be valid anyhow. + return Err(DecoderError::ImageTooLarge(self.width, self.height).into()); + } + + if self.height == i32::min_value() { + return Err(DecoderError::InvalidHeight.into()); + } + + // A negative height indicates a top-down DIB. + if self.height < 0 { + self.height *= -1; + self.top_down = true; + } + + check_for_overflow(self.width, self.height, self.num_channels())?; + + // Number of planes (format specifies that this should be 1). + if self.reader.read_u16::<LittleEndian>()? != 1 { + return Err(DecoderError::MoreThanOnePlane.into()); + } + + self.bit_count = self.reader.read_u16::<LittleEndian>()?; + let image_type_u32 = self.reader.read_u32::<LittleEndian>()?; + + // Top-down dibs can not be compressed. + if self.top_down && image_type_u32 != 0 && image_type_u32 != 3 { + return Err(DecoderError::ImageTypeInvalidForTopDown(image_type_u32).into()); + } + self.image_type = match image_type_u32 { + 0 => match self.bit_count { + 1 | 2 | 4 | 8 => ImageType::Palette, + 16 => ImageType::RGB16, + 24 => ImageType::RGB24, + 32 if self.add_alpha_channel => ImageType::RGBA32, + 32 => ImageType::RGB32, + _ => { + return Err(DecoderError::InvalidChannelWidth( + ChannelWidthError::Rgb, + self.bit_count, + ) + .into()) + } + }, + 1 => match self.bit_count { + 8 => ImageType::RLE8, + _ => { + return Err(DecoderError::InvalidChannelWidth( + ChannelWidthError::Rle8, + self.bit_count, + ) + .into()) + } + }, + 2 => match self.bit_count { + 4 => ImageType::RLE4, + _ => { + return Err(DecoderError::InvalidChannelWidth( + ChannelWidthError::Rle4, + self.bit_count, + ) + .into()) + } + }, + 3 => match self.bit_count { + 16 => ImageType::Bitfields16, + 32 => ImageType::Bitfields32, + _ => { + return Err(DecoderError::InvalidChannelWidth( + ChannelWidthError::Bitfields, + self.bit_count, + ) + .into()) + } + }, + 4 => { + // JPEG compression is not implemented yet. + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Bmp.into(), + UnsupportedErrorKind::GenericFeature("JPEG compression".to_owned()), + ), + )); + } + 5 => { + // PNG compression is not implemented yet. + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Bmp.into(), + UnsupportedErrorKind::GenericFeature("PNG compression".to_owned()), + ), + )); + } + 11 | 12 | 13 => { + // CMYK types are not implemented yet. + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Bmp.into(), + UnsupportedErrorKind::GenericFeature("CMYK format".to_owned()), + ), + )); + } + _ => { + // Unknown compression type. + return Err(DecoderError::ImageTypeUnknown(image_type_u32).into()); + } + }; + + // The next 12 bytes represent data array size in bytes, + // followed the horizontal and vertical printing resolutions + // We will calculate the pixel array size using width & height of image + // We're not interesting the horz or vert printing resolutions + self.reader.read_u32::<LittleEndian>()?; + self.reader.read_u32::<LittleEndian>()?; + self.reader.read_u32::<LittleEndian>()?; + + self.colors_used = self.reader.read_u32::<LittleEndian>()?; + + // The next 4 bytes represent number of "important" colors + // We're not interested in this value, so we'll skip it + self.reader.read_u32::<LittleEndian>()?; + + Ok(()) + } + + fn read_bitmasks(&mut self) -> ImageResult<()> { + let r_mask = self.reader.read_u32::<LittleEndian>()?; + let g_mask = self.reader.read_u32::<LittleEndian>()?; + let b_mask = self.reader.read_u32::<LittleEndian>()?; + + let a_mask = match self.bmp_header_type { + BMPHeaderType::V3 | BMPHeaderType::V4 | BMPHeaderType::V5 => { + self.reader.read_u32::<LittleEndian>()? + } + _ => 0, + }; + + self.bitfields = match self.image_type { + ImageType::Bitfields16 => { + Some(Bitfields::from_mask(r_mask, g_mask, b_mask, a_mask, 16)?) + } + ImageType::Bitfields32 => { + Some(Bitfields::from_mask(r_mask, g_mask, b_mask, a_mask, 32)?) + } + _ => None, + }; + + if self.bitfields.is_some() && a_mask != 0 { + self.add_alpha_channel = true; + } + + Ok(()) + } + + fn read_metadata(&mut self) -> ImageResult<()> { + if !self.has_loaded_metadata { + self.read_file_header()?; + let bmp_header_offset = self.reader.stream_position()?; + let bmp_header_size = self.reader.read_u32::<LittleEndian>()?; + let bmp_header_end = bmp_header_offset + u64::from(bmp_header_size); + + self.bmp_header_type = match bmp_header_size { + BITMAPCOREHEADER_SIZE => BMPHeaderType::Core, + BITMAPINFOHEADER_SIZE => BMPHeaderType::Info, + BITMAPV2HEADER_SIZE => BMPHeaderType::V2, + BITMAPV3HEADER_SIZE => BMPHeaderType::V3, + BITMAPV4HEADER_SIZE => BMPHeaderType::V4, + BITMAPV5HEADER_SIZE => BMPHeaderType::V5, + _ if bmp_header_size < BITMAPCOREHEADER_SIZE => { + // Size of any valid header types won't be smaller than core header type. + return Err(DecoderError::HeaderTooSmall(bmp_header_size).into()); + } + _ => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Bmp.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Unknown bitmap header type (size={})", + bmp_header_size + )), + ), + )) + } + }; + + match self.bmp_header_type { + BMPHeaderType::Core => { + self.read_bitmap_core_header()?; + } + BMPHeaderType::Info + | BMPHeaderType::V2 + | BMPHeaderType::V3 + | BMPHeaderType::V4 + | BMPHeaderType::V5 => { + self.read_bitmap_info_header()?; + } + }; + + match self.image_type { + ImageType::Bitfields16 | ImageType::Bitfields32 => self.read_bitmasks()?, + _ => {} + }; + + self.reader.seek(SeekFrom::Start(bmp_header_end))?; + + match self.image_type { + ImageType::Palette | ImageType::RLE4 | ImageType::RLE8 => self.read_palette()?, + _ => {} + }; + + if self.no_file_header { + // Use the offset of the end of metadata instead of reading a BMP file header. + self.data_offset = self.reader.stream_position()?; + } + + self.has_loaded_metadata = true; + } + Ok(()) + } + + #[cfg(feature = "ico")] + #[doc(hidden)] + pub fn read_metadata_in_ico_format(&mut self) -> ImageResult<()> { + self.no_file_header = true; + self.add_alpha_channel = true; + self.read_metadata()?; + + // The height field in an ICO file is doubled to account for the AND mask + // (whether or not an AND mask is actually present). + self.height /= 2; + Ok(()) + } + + fn get_palette_size(&mut self) -> ImageResult<usize> { + match self.colors_used { + 0 => Ok(1 << self.bit_count), + _ => { + if self.colors_used > 1 << self.bit_count { + return Err(DecoderError::PaletteSizeExceeded { + colors_used: self.colors_used, + bit_count: self.bit_count, + } + .into()); + } + Ok(self.colors_used as usize) + } + } + } + + fn bytes_per_color(&self) -> usize { + match self.bmp_header_type { + BMPHeaderType::Core => 3, + _ => 4, + } + } + + fn read_palette(&mut self) -> ImageResult<()> { + const MAX_PALETTE_SIZE: usize = 256; // Palette indices are u8. + + let bytes_per_color = self.bytes_per_color(); + let palette_size = self.get_palette_size()?; + let max_length = MAX_PALETTE_SIZE * bytes_per_color; + + let length = palette_size * bytes_per_color; + let mut buf = Vec::with_capacity(max_length); + + // Resize and read the palette entries to the buffer. + // We limit the buffer to at most 256 colours to avoid any oom issues as + // 8-bit images can't reference more than 256 indexes anyhow. + buf.resize(cmp::min(length, max_length), 0); + self.reader.by_ref().read_exact(&mut buf)?; + + // Allocate 256 entries even if palette_size is smaller, to prevent corrupt files from + // causing an out-of-bounds array access. + match length.cmp(&max_length) { + Ordering::Greater => { + self.reader + .seek(SeekFrom::Current((length - max_length) as i64))?; + } + Ordering::Less => buf.resize(max_length, 0), + Ordering::Equal => (), + } + + let p: Vec<[u8; 3]> = (0..MAX_PALETTE_SIZE) + .map(|i| { + let b = buf[bytes_per_color * i]; + let g = buf[bytes_per_color * i + 1]; + let r = buf[bytes_per_color * i + 2]; + [r, g, b] + }) + .collect(); + + self.palette = Some(p); + + Ok(()) + } + + /// Get the palette that is embedded in the BMP image, if any. + pub fn get_palette(&self) -> Option<&[[u8; 3]]> { + self.palette.as_ref().map(|vec| &vec[..]) + } + + fn num_channels(&self) -> usize { + if self.indexed_color { + 1 + } else if self.add_alpha_channel { + 4 + } else { + 3 + } + } + + fn rows<'a>(&self, pixel_data: &'a mut [u8]) -> RowIterator<'a> { + let stride = self.width as usize * self.num_channels(); + if self.top_down { + RowIterator { + chunks: Chunker::FromTop(pixel_data.chunks_mut(stride)), + } + } else { + RowIterator { + chunks: Chunker::FromBottom(pixel_data.chunks_mut(stride).rev()), + } + } + } + + fn read_palettized_pixel_data(&mut self, buf: &mut [u8]) -> ImageResult<()> { + let num_channels = self.num_channels(); + let row_byte_length = ((i32::from(self.bit_count) * self.width + 31) / 32 * 4) as usize; + let mut indices = vec![0; row_byte_length]; + let palette = self.palette.as_ref().unwrap(); + let bit_count = self.bit_count; + let reader = &mut self.reader; + let width = self.width as usize; + let skip_palette = self.indexed_color; + + reader.seek(SeekFrom::Start(self.data_offset))?; + + if num_channels == 4 { + buf.chunks_exact_mut(4).for_each(|c| c[3] = 0xFF); + } + + with_rows( + buf, + self.width, + self.height, + num_channels, + self.top_down, + |row| { + reader.read_exact(&mut indices)?; + if skip_palette { + row.clone_from_slice(&indices[0..width]); + } else { + let mut pixel_iter = row.chunks_mut(num_channels); + match bit_count { + 1 => { + set_1bit_pixel_run(&mut pixel_iter, palette, indices.iter()); + } + 2 => { + set_2bit_pixel_run(&mut pixel_iter, palette, indices.iter(), width); + } + 4 => { + set_4bit_pixel_run(&mut pixel_iter, palette, indices.iter(), width); + } + 8 => { + set_8bit_pixel_run(&mut pixel_iter, palette, indices.iter(), width); + } + _ => panic!(), + }; + } + Ok(()) + }, + )?; + + Ok(()) + } + + fn read_16_bit_pixel_data( + &mut self, + buf: &mut [u8], + bitfields: Option<&Bitfields>, + ) -> ImageResult<()> { + let num_channels = self.num_channels(); + let row_padding_len = self.width as usize % 2 * 2; + let row_padding = &mut [0; 2][..row_padding_len]; + let bitfields = match bitfields { + Some(b) => b, + None => self.bitfields.as_ref().unwrap(), + }; + let reader = &mut self.reader; + + reader.seek(SeekFrom::Start(self.data_offset))?; + + with_rows( + buf, + self.width, + self.height, + num_channels, + self.top_down, + |row| { + for pixel in row.chunks_mut(num_channels) { + let data = u32::from(reader.read_u16::<LittleEndian>()?); + + pixel[0] = bitfields.r.read(data); + pixel[1] = bitfields.g.read(data); + pixel[2] = bitfields.b.read(data); + if num_channels == 4 { + if bitfields.a.len != 0 { + pixel[3] = bitfields.a.read(data); + } else { + pixel[3] = 0xFF; + } + } + } + reader.read_exact(row_padding) + }, + )?; + + Ok(()) + } + + /// Read image data from a reader in 32-bit formats that use bitfields. + fn read_32_bit_pixel_data(&mut self, buf: &mut [u8]) -> ImageResult<()> { + let num_channels = self.num_channels(); + + let bitfields = self.bitfields.as_ref().unwrap(); + + let reader = &mut self.reader; + reader.seek(SeekFrom::Start(self.data_offset))?; + + with_rows( + buf, + self.width, + self.height, + num_channels, + self.top_down, + |row| { + for pixel in row.chunks_mut(num_channels) { + let data = reader.read_u32::<LittleEndian>()?; + + pixel[0] = bitfields.r.read(data); + pixel[1] = bitfields.g.read(data); + pixel[2] = bitfields.b.read(data); + if num_channels == 4 { + if bitfields.a.len != 0 { + pixel[3] = bitfields.a.read(data); + } else { + pixel[3] = 0xff; + } + } + } + Ok(()) + }, + )?; + + Ok(()) + } + + /// Read image data from a reader where the colours are stored as 8-bit values (24 or 32-bit). + fn read_full_byte_pixel_data( + &mut self, + buf: &mut [u8], + format: &FormatFullBytes, + ) -> ImageResult<()> { + let num_channels = self.num_channels(); + let row_padding_len = match *format { + FormatFullBytes::RGB24 => (4 - (self.width as usize * 3) % 4) % 4, + _ => 0, + }; + let row_padding = &mut [0; 4][..row_padding_len]; + + self.reader.seek(SeekFrom::Start(self.data_offset))?; + + let reader = &mut self.reader; + + with_rows( + buf, + self.width, + self.height, + num_channels, + self.top_down, + |row| { + for pixel in row.chunks_mut(num_channels) { + if *format == FormatFullBytes::Format888 { + reader.read_u8()?; + } + + // Read the colour values (b, g, r). + // Reading 3 bytes and reversing them is significantly faster than reading one + // at a time. + reader.read_exact(&mut pixel[0..3])?; + pixel[0..3].reverse(); + + if *format == FormatFullBytes::RGB32 { + reader.read_u8()?; + } + + // Read the alpha channel if present + if *format == FormatFullBytes::RGBA32 { + reader.read_exact(&mut pixel[3..4])?; + } else if num_channels == 4 { + pixel[3] = 0xFF; + } + } + reader.read_exact(row_padding) + }, + )?; + + Ok(()) + } + + fn read_rle_data(&mut self, buf: &mut [u8], image_type: ImageType) -> ImageResult<()> { + // Seek to the start of the actual image data. + self.reader.seek(SeekFrom::Start(self.data_offset))?; + + let num_channels = self.num_channels(); + let p = self.palette.as_ref().unwrap(); + + // Handling deltas in the RLE scheme means that we need to manually + // iterate through rows and pixels. Even if we didn't have to handle + // deltas, we have to ensure that a single runlength doesn't straddle + // two rows. + let mut row_iter = self.rows(buf); + + while let Some(row) = row_iter.next() { + let mut pixel_iter = row.chunks_mut(num_channels); + + let mut x = 0; + loop { + let instruction = { + let control_byte = self.reader.read_u8()?; + match control_byte { + RLE_ESCAPE => { + let op = self.reader.read_u8()?; + + match op { + RLE_ESCAPE_EOL => RLEInsn::EndOfRow, + RLE_ESCAPE_EOF => RLEInsn::EndOfFile, + RLE_ESCAPE_DELTA => { + let xdelta = self.reader.read_u8()?; + let ydelta = self.reader.read_u8()?; + RLEInsn::Delta(xdelta, ydelta) + } + _ => { + let mut length = op as usize; + if self.image_type == ImageType::RLE4 { + length = (length + 1) / 2; + } + length += length & 1; + let mut buffer = vec![0; length]; + self.reader.read_exact(&mut buffer)?; + RLEInsn::Absolute(op, buffer) + } + } + } + _ => { + let palette_index = self.reader.read_u8()?; + RLEInsn::PixelRun(control_byte, palette_index) + } + } + }; + + match instruction { + RLEInsn::EndOfFile => { + pixel_iter.for_each(|p| p.fill(0)); + row_iter.for_each(|r| r.fill(0)); + return Ok(()); + } + RLEInsn::EndOfRow => { + pixel_iter.for_each(|p| p.fill(0)); + break; + } + RLEInsn::Delta(x_delta, y_delta) => { + // The msdn site on bitmap compression doesn't specify + // what happens to the values skipped when encountering + // a delta code, however IE and the windows image + // preview seems to replace them with black pixels, + // so we stick to that. + + if y_delta > 0 { + // Zero out the remainder of the current row. + pixel_iter.for_each(|p| p.fill(0)); + + // If any full rows are skipped, zero them out. + for _ in 1..y_delta { + let row = row_iter.next().ok_or(DecoderError::CorruptRleData)?; + row.fill(0); + } + + // Set the pixel iterator to the start of the next row. + pixel_iter = row_iter + .next() + .ok_or(DecoderError::CorruptRleData)? + .chunks_mut(num_channels); + + // Zero out the pixels up to the current point in the row. + for _ in 0..x { + pixel_iter + .next() + .ok_or(DecoderError::CorruptRleData)? + .fill(0); + } + } + + for _ in 0..x_delta { + let pixel = pixel_iter.next().ok_or(DecoderError::CorruptRleData)?; + pixel.fill(0); + } + x += x_delta as usize; + } + RLEInsn::Absolute(length, indices) => { + // Absolute mode cannot span rows, so if we run + // out of pixels to process, we should stop + // processing the image. + match image_type { + ImageType::RLE8 => { + if !set_8bit_pixel_run( + &mut pixel_iter, + p, + indices.iter(), + length as usize, + ) { + return Err(DecoderError::CorruptRleData.into()); + } + } + ImageType::RLE4 => { + if !set_4bit_pixel_run( + &mut pixel_iter, + p, + indices.iter(), + length as usize, + ) { + return Err(DecoderError::CorruptRleData.into()); + } + } + _ => unreachable!(), + } + x += length as usize; + } + RLEInsn::PixelRun(n_pixels, palette_index) => { + // A pixel run isn't allowed to span rows, but we + // simply continue on to the next row if we run + // out of pixels to set. + match image_type { + ImageType::RLE8 => { + if !set_8bit_pixel_run( + &mut pixel_iter, + p, + repeat(&palette_index), + n_pixels as usize, + ) { + return Err(DecoderError::CorruptRleData.into()); + } + } + ImageType::RLE4 => { + if !set_4bit_pixel_run( + &mut pixel_iter, + p, + repeat(&palette_index), + n_pixels as usize, + ) { + return Err(DecoderError::CorruptRleData.into()); + } + } + _ => unreachable!(), + } + x += n_pixels as usize; + } + } + } + } + + Ok(()) + } + + /// Read the actual data of the image. This function is deliberately not public because it + /// cannot be called multiple times without seeking back the underlying reader in between. + pub(crate) fn read_image_data(&mut self, buf: &mut [u8]) -> ImageResult<()> { + match self.image_type { + ImageType::Palette => self.read_palettized_pixel_data(buf), + ImageType::RGB16 => self.read_16_bit_pixel_data(buf, Some(&R5_G5_B5_COLOR_MASK)), + ImageType::RGB24 => self.read_full_byte_pixel_data(buf, &FormatFullBytes::RGB24), + ImageType::RGB32 => self.read_full_byte_pixel_data(buf, &FormatFullBytes::RGB32), + ImageType::RGBA32 => self.read_full_byte_pixel_data(buf, &FormatFullBytes::RGBA32), + ImageType::RLE8 => self.read_rle_data(buf, ImageType::RLE8), + ImageType::RLE4 => self.read_rle_data(buf, ImageType::RLE4), + ImageType::Bitfields16 => match self.bitfields { + Some(_) => self.read_16_bit_pixel_data(buf, None), + None => Err(DecoderError::BitfieldMasksMissing(16).into()), + }, + ImageType::Bitfields32 => match self.bitfields { + Some(R8_G8_B8_COLOR_MASK) => { + self.read_full_byte_pixel_data(buf, &FormatFullBytes::Format888) + } + Some(R8_G8_B8_A8_COLOR_MASK) => { + self.read_full_byte_pixel_data(buf, &FormatFullBytes::RGBA32) + } + Some(_) => self.read_32_bit_pixel_data(buf), + None => Err(DecoderError::BitfieldMasksMissing(32).into()), + }, + } + } +} + +/// Wrapper struct around a `Cursor<Vec<u8>>` +pub struct BmpReader<R>(Cursor<Vec<u8>>, PhantomData<R>); +impl<R> Read for BmpReader<R> { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { + if self.0.position() == 0 && buf.is_empty() { + mem::swap(buf, self.0.get_mut()); + Ok(buf.len()) + } else { + self.0.read_to_end(buf) + } + } +} + +impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for BmpDecoder<R> { + type Reader = BmpReader<R>; + + fn dimensions(&self) -> (u32, u32) { + (self.width as u32, self.height as u32) + } + + fn color_type(&self) -> ColorType { + if self.indexed_color { + ColorType::L8 + } else if self.add_alpha_channel { + ColorType::Rgba8 + } else { + ColorType::Rgb8 + } + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + Ok(BmpReader( + Cursor::new(image::decoder_to_vec(self)?), + PhantomData, + )) + } + + fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> { + assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); + self.read_image_data(buf) + } +} + +impl<'a, R: 'a + Read + Seek> ImageDecoderRect<'a> for BmpDecoder<R> { + fn read_rect_with_progress<F: Fn(Progress)>( + &mut self, + x: u32, + y: u32, + width: u32, + height: u32, + buf: &mut [u8], + progress_callback: F, + ) -> ImageResult<()> { + let start = self.reader.stream_position()?; + image::load_rect( + x, + y, + width, + height, + buf, + progress_callback, + self, + |_, _| Ok(()), + |s, buf| s.read_image_data(buf), + )?; + self.reader.seek(SeekFrom::Start(start))?; + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_bitfield_len() { + for len in 1..9 { + let bitfield = Bitfield { shift: 0, len }; + for i in 0..(1 << len) { + let read = bitfield.read(i); + let calc = (i as f64 / ((1 << len) - 1) as f64 * 255f64).round() as u8; + if read != calc { + println!("len:{} i:{} read:{} calc:{}", len, i, read, calc); + } + assert_eq!(read, calc); + } + } + } + + #[test] + fn read_rect() { + let f = std::fs::File::open("tests/images/bmp/images/Core_8_Bit.bmp").unwrap(); + let mut decoder = super::BmpDecoder::new(f).unwrap(); + + let mut buf: Vec<u8> = vec![0; 8 * 8 * 3]; + decoder.read_rect(0, 0, 8, 8, &mut *buf).unwrap(); + } + + #[test] + fn read_rle_too_short() { + let data = vec![ + 0x42, 0x4d, 0x04, 0xee, 0xfe, 0xff, 0xff, 0x10, 0xff, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x7c, 0x00, 0x00, 0x00, 0x0c, 0x41, 0x00, 0x00, 0x07, 0x10, 0x00, 0x00, 0x01, 0x00, + 0x04, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, + 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x21, + 0xff, 0x00, 0x66, 0x61, 0x72, 0x62, 0x66, 0x65, 0x6c, 0x64, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xd8, 0xff, 0x00, 0x00, 0x19, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0xff, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, + 0x00, 0x00, 0x00, 0x2d, 0x31, 0x31, 0x35, 0x36, 0x00, 0xff, 0x00, 0x00, 0x52, 0x3a, + 0x37, 0x30, 0x7e, 0x71, 0x63, 0x91, 0x5a, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x35, 0x37, 0x00, 0xff, 0x00, 0x00, 0x52, + 0x3a, 0x37, 0x30, 0x7e, 0x71, 0x63, 0x91, 0x5a, 0x04, 0x05, 0x3c, 0x00, 0x00, 0x11, + 0x00, 0x5d, 0x7a, 0x82, 0xb7, 0xca, 0x2d, 0x31, 0xff, 0xff, 0xc7, 0x95, 0x33, 0x2e, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x00, + 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x66, 0x00, 0x4d, + 0x4d, 0x00, 0x2a, 0x00, + ]; + + let decoder = BmpDecoder::new(Cursor::new(&data)).unwrap(); + let mut buf = vec![0; usize::try_from(decoder.total_bytes()).unwrap()]; + assert!(decoder.read_image(&mut buf).is_ok()); + } + + #[test] + fn test_no_header() { + let tests = [ + "Info_R8_G8_B8.bmp", + "Info_A8_R8_G8_B8.bmp", + "Info_8_Bit.bmp", + "Info_4_Bit.bmp", + "Info_1_Bit.bmp", + ]; + + for name in &tests { + let path = format!("tests/images/bmp/images/{name}"); + let ref_img = crate::open(&path).unwrap(); + let mut data = std::fs::read(&path).unwrap(); + // skip the BITMAPFILEHEADER + let slice = &mut data[14..]; + let decoder = BmpDecoder::new_without_file_header(Cursor::new(slice)).unwrap(); + let no_hdr_img = crate::DynamicImage::from_decoder(decoder).unwrap(); + assert_eq!(ref_img, no_hdr_img); + } + } +} diff --git a/vendor/image/src/codecs/bmp/encoder.rs b/vendor/image/src/codecs/bmp/encoder.rs new file mode 100644 index 0000000..c90c063 --- /dev/null +++ b/vendor/image/src/codecs/bmp/encoder.rs @@ -0,0 +1,388 @@ +use byteorder::{LittleEndian, WriteBytesExt}; +use std::io::{self, Write}; + +use crate::error::{ + EncodingError, ImageError, ImageFormatHint, ImageResult, ParameterError, ParameterErrorKind, +}; +use crate::image::ImageEncoder; +use crate::{color, ImageFormat}; + +const BITMAPFILEHEADER_SIZE: u32 = 14; +const BITMAPINFOHEADER_SIZE: u32 = 40; +const BITMAPV4HEADER_SIZE: u32 = 108; + +/// The representation of a BMP encoder. +pub struct BmpEncoder<'a, W: 'a> { + writer: &'a mut W, +} + +impl<'a, W: Write + 'a> BmpEncoder<'a, W> { + /// Create a new encoder that writes its output to ```w```. + pub fn new(w: &'a mut W) -> Self { + BmpEncoder { writer: w } + } + + /// Encodes the image ```image``` + /// that has dimensions ```width``` and ```height``` + /// and ```ColorType``` ```c```. + pub fn encode( + &mut self, + image: &[u8], + width: u32, + height: u32, + c: color::ColorType, + ) -> ImageResult<()> { + self.encode_with_palette(image, width, height, c, None) + } + + /// Same as ```encode```, but allow a palette to be passed in. + /// The ```palette``` is ignored for color types other than Luma/Luma-with-alpha. + pub fn encode_with_palette( + &mut self, + image: &[u8], + width: u32, + height: u32, + c: color::ColorType, + palette: Option<&[[u8; 3]]>, + ) -> ImageResult<()> { + if palette.is_some() && c != color::ColorType::L8 && c != color::ColorType::La8 { + return Err(ImageError::IoError(io::Error::new( + io::ErrorKind::InvalidInput, + format!( + "Unsupported color type {:?} when using a non-empty palette. Supported types: Gray(8), GrayA(8).", + c + ), + ))); + } + + let bmp_header_size = BITMAPFILEHEADER_SIZE; + + let (dib_header_size, written_pixel_size, palette_color_count) = + get_pixel_info(c, palette)?; + let row_pad_size = (4 - (width * written_pixel_size) % 4) % 4; // each row must be padded to a multiple of 4 bytes + let image_size = width + .checked_mul(height) + .and_then(|v| v.checked_mul(written_pixel_size)) + .and_then(|v| v.checked_add(height * row_pad_size)) + .ok_or_else(|| { + ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + )) + })?; + let palette_size = palette_color_count * 4; // all palette colors are BGRA + let file_size = bmp_header_size + .checked_add(dib_header_size) + .and_then(|v| v.checked_add(palette_size)) + .and_then(|v| v.checked_add(image_size)) + .ok_or_else(|| { + ImageError::Encoding(EncodingError::new( + ImageFormatHint::Exact(ImageFormat::Bmp), + "calculated BMP header size larger than 2^32", + )) + })?; + + // write BMP header + self.writer.write_u8(b'B')?; + self.writer.write_u8(b'M')?; + self.writer.write_u32::<LittleEndian>(file_size)?; // file size + self.writer.write_u16::<LittleEndian>(0)?; // reserved 1 + self.writer.write_u16::<LittleEndian>(0)?; // reserved 2 + self.writer + .write_u32::<LittleEndian>(bmp_header_size + dib_header_size + palette_size)?; // image data offset + + // write DIB header + self.writer.write_u32::<LittleEndian>(dib_header_size)?; + self.writer.write_i32::<LittleEndian>(width as i32)?; + self.writer.write_i32::<LittleEndian>(height as i32)?; + self.writer.write_u16::<LittleEndian>(1)?; // color planes + self.writer + .write_u16::<LittleEndian>((written_pixel_size * 8) as u16)?; // bits per pixel + if dib_header_size >= BITMAPV4HEADER_SIZE { + // Assume BGRA32 + self.writer.write_u32::<LittleEndian>(3)?; // compression method - bitfields + } else { + self.writer.write_u32::<LittleEndian>(0)?; // compression method - no compression + } + self.writer.write_u32::<LittleEndian>(image_size)?; + self.writer.write_i32::<LittleEndian>(0)?; // horizontal ppm + self.writer.write_i32::<LittleEndian>(0)?; // vertical ppm + self.writer.write_u32::<LittleEndian>(palette_color_count)?; + self.writer.write_u32::<LittleEndian>(0)?; // all colors are important + if dib_header_size >= BITMAPV4HEADER_SIZE { + // Assume BGRA32 + self.writer.write_u32::<LittleEndian>(0xff << 16)?; // red mask + self.writer.write_u32::<LittleEndian>(0xff << 8)?; // green mask + self.writer.write_u32::<LittleEndian>(0xff)?; // blue mask + self.writer.write_u32::<LittleEndian>(0xff << 24)?; // alpha mask + self.writer.write_u32::<LittleEndian>(0x73524742)?; // colorspace - sRGB + + // endpoints (3x3) and gamma (3) + for _ in 0..12 { + self.writer.write_u32::<LittleEndian>(0)?; + } + } + + // write image data + match c { + color::ColorType::Rgb8 => self.encode_rgb(image, width, height, row_pad_size, 3)?, + color::ColorType::Rgba8 => self.encode_rgba(image, width, height, row_pad_size, 4)?, + color::ColorType::L8 => { + self.encode_gray(image, width, height, row_pad_size, 1, palette)? + } + color::ColorType::La8 => { + self.encode_gray(image, width, height, row_pad_size, 2, palette)? + } + _ => { + return Err(ImageError::IoError(io::Error::new( + io::ErrorKind::InvalidInput, + &get_unsupported_error_message(c)[..], + ))) + } + } + + Ok(()) + } + + fn encode_rgb( + &mut self, + image: &[u8], + width: u32, + height: u32, + row_pad_size: u32, + bytes_per_pixel: u32, + ) -> io::Result<()> { + let width = width as usize; + let height = height as usize; + let x_stride = bytes_per_pixel as usize; + let y_stride = width * x_stride; + for row in (0..height).rev() { + // from the bottom up + let row_start = row * y_stride; + for px in image[row_start..][..y_stride].chunks_exact(x_stride) { + let r = px[0]; + let g = px[1]; + let b = px[2]; + // written as BGR + self.writer.write_all(&[b, g, r])?; + } + self.write_row_pad(row_pad_size)?; + } + + Ok(()) + } + + fn encode_rgba( + &mut self, + image: &[u8], + width: u32, + height: u32, + row_pad_size: u32, + bytes_per_pixel: u32, + ) -> io::Result<()> { + let width = width as usize; + let height = height as usize; + let x_stride = bytes_per_pixel as usize; + let y_stride = width * x_stride; + for row in (0..height).rev() { + // from the bottom up + let row_start = row * y_stride; + for px in image[row_start..][..y_stride].chunks_exact(x_stride) { + let r = px[0]; + let g = px[1]; + let b = px[2]; + let a = px[3]; + // written as BGRA + self.writer.write_all(&[b, g, r, a])?; + } + self.write_row_pad(row_pad_size)?; + } + + Ok(()) + } + + fn encode_gray( + &mut self, + image: &[u8], + width: u32, + height: u32, + row_pad_size: u32, + bytes_per_pixel: u32, + palette: Option<&[[u8; 3]]>, + ) -> io::Result<()> { + // write grayscale palette + if let Some(palette) = palette { + for item in palette { + // each color is written as BGRA, where A is always 0 + self.writer.write_all(&[item[2], item[1], item[0], 0])?; + } + } else { + for val in 0u8..=255 { + // each color is written as BGRA, where A is always 0 and since only grayscale is being written, B = G = R = index + self.writer.write_all(&[val, val, val, 0])?; + } + } + + // write image data + let x_stride = bytes_per_pixel; + let y_stride = width * x_stride; + for row in (0..height).rev() { + // from the bottom up + let row_start = row * y_stride; + for col in 0..width { + let pixel_start = (row_start + (col * x_stride)) as usize; + // color value is equal to the palette index + self.writer.write_u8(image[pixel_start])?; + // alpha is never written as it's not widely supported + } + + self.write_row_pad(row_pad_size)?; + } + + Ok(()) + } + + fn write_row_pad(&mut self, row_pad_size: u32) -> io::Result<()> { + for _ in 0..row_pad_size { + self.writer.write_u8(0)?; + } + + Ok(()) + } +} + +impl<'a, W: Write> ImageEncoder for BmpEncoder<'a, W> { + fn write_image( + mut self, + buf: &[u8], + width: u32, + height: u32, + color_type: color::ColorType, + ) -> ImageResult<()> { + self.encode(buf, width, height, color_type) + } +} + +fn get_unsupported_error_message(c: color::ColorType) -> String { + format!( + "Unsupported color type {:?}. Supported types: RGB(8), RGBA(8), Gray(8), GrayA(8).", + c + ) +} + +/// Returns a tuple representing: (dib header size, written pixel size, palette color count). +fn get_pixel_info(c: color::ColorType, palette: Option<&[[u8; 3]]>) -> io::Result<(u32, u32, u32)> { + let sizes = match c { + color::ColorType::Rgb8 => (BITMAPINFOHEADER_SIZE, 3, 0), + color::ColorType::Rgba8 => (BITMAPV4HEADER_SIZE, 4, 0), + color::ColorType::L8 => ( + BITMAPINFOHEADER_SIZE, + 1, + palette.map(|p| p.len()).unwrap_or(256) as u32, + ), + color::ColorType::La8 => ( + BITMAPINFOHEADER_SIZE, + 1, + palette.map(|p| p.len()).unwrap_or(256) as u32, + ), + _ => { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + &get_unsupported_error_message(c)[..], + )) + } + }; + + Ok(sizes) +} + +#[cfg(test)] +mod tests { + use super::super::BmpDecoder; + use super::BmpEncoder; + use crate::color::ColorType; + use crate::image::ImageDecoder; + use std::io::Cursor; + + fn round_trip_image(image: &[u8], width: u32, height: u32, c: ColorType) -> Vec<u8> { + let mut encoded_data = Vec::new(); + { + let mut encoder = BmpEncoder::new(&mut encoded_data); + encoder + .encode(&image, width, height, c) + .expect("could not encode image"); + } + + let decoder = BmpDecoder::new(Cursor::new(&encoded_data)).expect("failed to decode"); + + let mut buf = vec![0; decoder.total_bytes() as usize]; + decoder.read_image(&mut buf).expect("failed to decode"); + buf + } + + #[test] + fn round_trip_single_pixel_rgb() { + let image = [255u8, 0, 0]; // single red pixel + let decoded = round_trip_image(&image, 1, 1, ColorType::Rgb8); + assert_eq!(3, decoded.len()); + assert_eq!(255, decoded[0]); + assert_eq!(0, decoded[1]); + assert_eq!(0, decoded[2]); + } + + #[test] + #[cfg(target_pointer_width = "64")] + fn huge_files_return_error() { + let mut encoded_data = Vec::new(); + let image = vec![0u8; 3 * 40_000 * 40_000]; // 40_000x40_000 pixels, 3 bytes per pixel, allocated on the heap + let mut encoder = BmpEncoder::new(&mut encoded_data); + let result = encoder.encode(&image, 40_000, 40_000, ColorType::Rgb8); + assert!(result.is_err()); + } + + #[test] + fn round_trip_single_pixel_rgba() { + let image = [1, 2, 3, 4]; + let decoded = round_trip_image(&image, 1, 1, ColorType::Rgba8); + assert_eq!(&decoded[..], &image[..]); + } + + #[test] + fn round_trip_3px_rgb() { + let image = [0u8; 3 * 3 * 3]; // 3x3 pixels, 3 bytes per pixel + let _decoded = round_trip_image(&image, 3, 3, ColorType::Rgb8); + } + + #[test] + fn round_trip_gray() { + let image = [0u8, 1, 2]; // 3 pixels + let decoded = round_trip_image(&image, 3, 1, ColorType::L8); + // should be read back as 3 RGB pixels + assert_eq!(9, decoded.len()); + assert_eq!(0, decoded[0]); + assert_eq!(0, decoded[1]); + assert_eq!(0, decoded[2]); + assert_eq!(1, decoded[3]); + assert_eq!(1, decoded[4]); + assert_eq!(1, decoded[5]); + assert_eq!(2, decoded[6]); + assert_eq!(2, decoded[7]); + assert_eq!(2, decoded[8]); + } + + #[test] + fn round_trip_graya() { + let image = [0u8, 0, 1, 0, 2, 0]; // 3 pixels, each with an alpha channel + let decoded = round_trip_image(&image, 1, 3, ColorType::La8); + // should be read back as 3 RGB pixels + assert_eq!(9, decoded.len()); + assert_eq!(0, decoded[0]); + assert_eq!(0, decoded[1]); + assert_eq!(0, decoded[2]); + assert_eq!(1, decoded[3]); + assert_eq!(1, decoded[4]); + assert_eq!(1, decoded[5]); + assert_eq!(2, decoded[6]); + assert_eq!(2, decoded[7]); + assert_eq!(2, decoded[8]); + } +} diff --git a/vendor/image/src/codecs/bmp/mod.rs b/vendor/image/src/codecs/bmp/mod.rs new file mode 100644 index 0000000..549b1cf --- /dev/null +++ b/vendor/image/src/codecs/bmp/mod.rs @@ -0,0 +1,14 @@ +//! Decoding and Encoding of BMP Images +//! +//! A decoder and encoder for BMP (Windows Bitmap) images +//! +//! # Related Links +//! * <https://msdn.microsoft.com/en-us/library/windows/desktop/dd183375%28v=vs.85%29.aspx> +//! * <https://en.wikipedia.org/wiki/BMP_file_format> +//! + +pub use self::decoder::BmpDecoder; +pub use self::encoder::BmpEncoder; + +mod decoder; +mod encoder; diff --git a/vendor/image/src/codecs/dds.rs b/vendor/image/src/codecs/dds.rs new file mode 100644 index 0000000..f0a7357 --- /dev/null +++ b/vendor/image/src/codecs/dds.rs @@ -0,0 +1,375 @@ +//! Decoding of DDS images +//! +//! DDS (DirectDraw Surface) is a container format for storing DXT (S3TC) compressed images. +//! +//! # Related Links +//! * <https://docs.microsoft.com/en-us/windows/win32/direct3ddds/dx-graphics-dds-pguide> - Description of the DDS format. + +use std::io::Read; +use std::{error, fmt}; + +use byteorder::{LittleEndian, ReadBytesExt}; + +#[allow(deprecated)] +use crate::codecs::dxt::{DxtDecoder, DxtReader, DxtVariant}; +use crate::color::ColorType; +use crate::error::{ + DecodingError, ImageError, ImageFormatHint, ImageResult, UnsupportedError, UnsupportedErrorKind, +}; +use crate::image::{ImageDecoder, ImageFormat}; + +/// Errors that can occur during decoding and parsing a DDS image +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +enum DecoderError { + /// Wrong DDS channel width + PixelFormatSizeInvalid(u32), + /// Wrong DDS header size + HeaderSizeInvalid(u32), + /// Wrong DDS header flags + HeaderFlagsInvalid(u32), + + /// Invalid DXGI format in DX10 header + DxgiFormatInvalid(u32), + /// Invalid resource dimension + ResourceDimensionInvalid(u32), + /// Invalid flags in DX10 header + Dx10FlagsInvalid(u32), + /// Invalid array size in DX10 header + Dx10ArraySizeInvalid(u32), + + /// DDS "DDS " signature invalid or missing + DdsSignatureInvalid, +} + +impl fmt::Display for DecoderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DecoderError::PixelFormatSizeInvalid(s) => { + f.write_fmt(format_args!("Invalid DDS PixelFormat size: {}", s)) + } + DecoderError::HeaderSizeInvalid(s) => { + f.write_fmt(format_args!("Invalid DDS header size: {}", s)) + } + DecoderError::HeaderFlagsInvalid(fs) => { + f.write_fmt(format_args!("Invalid DDS header flags: {:#010X}", fs)) + } + DecoderError::DxgiFormatInvalid(df) => { + f.write_fmt(format_args!("Invalid DDS DXGI format: {}", df)) + } + DecoderError::ResourceDimensionInvalid(d) => { + f.write_fmt(format_args!("Invalid DDS resource dimension: {}", d)) + } + DecoderError::Dx10FlagsInvalid(fs) => { + f.write_fmt(format_args!("Invalid DDS DX10 header flags: {:#010X}", fs)) + } + DecoderError::Dx10ArraySizeInvalid(s) => { + f.write_fmt(format_args!("Invalid DDS DX10 array size: {}", s)) + } + DecoderError::DdsSignatureInvalid => f.write_str("DDS signature not found"), + } + } +} + +impl From<DecoderError> for ImageError { + fn from(e: DecoderError) -> ImageError { + ImageError::Decoding(DecodingError::new(ImageFormat::Dds.into(), e)) + } +} + +impl error::Error for DecoderError {} + +/// Header used by DDS image files +#[derive(Debug)] +struct Header { + _flags: u32, + height: u32, + width: u32, + _pitch_or_linear_size: u32, + _depth: u32, + _mipmap_count: u32, + pixel_format: PixelFormat, + _caps: u32, + _caps2: u32, +} + +/// Extended DX10 header used by some DDS image files +#[derive(Debug)] +struct DX10Header { + dxgi_format: u32, + resource_dimension: u32, + misc_flag: u32, + array_size: u32, + misc_flags_2: u32, +} + +/// DDS pixel format +#[derive(Debug)] +struct PixelFormat { + flags: u32, + fourcc: [u8; 4], + _rgb_bit_count: u32, + _r_bit_mask: u32, + _g_bit_mask: u32, + _b_bit_mask: u32, + _a_bit_mask: u32, +} + +impl PixelFormat { + fn from_reader(r: &mut dyn Read) -> ImageResult<Self> { + let size = r.read_u32::<LittleEndian>()?; + if size != 32 { + return Err(DecoderError::PixelFormatSizeInvalid(size).into()); + } + + Ok(Self { + flags: r.read_u32::<LittleEndian>()?, + fourcc: { + let mut v = [0; 4]; + r.read_exact(&mut v)?; + v + }, + _rgb_bit_count: r.read_u32::<LittleEndian>()?, + _r_bit_mask: r.read_u32::<LittleEndian>()?, + _g_bit_mask: r.read_u32::<LittleEndian>()?, + _b_bit_mask: r.read_u32::<LittleEndian>()?, + _a_bit_mask: r.read_u32::<LittleEndian>()?, + }) + } +} + +impl Header { + fn from_reader(r: &mut dyn Read) -> ImageResult<Self> { + let size = r.read_u32::<LittleEndian>()?; + if size != 124 { + return Err(DecoderError::HeaderSizeInvalid(size).into()); + } + + const REQUIRED_FLAGS: u32 = 0x1 | 0x2 | 0x4 | 0x1000; + const VALID_FLAGS: u32 = 0x1 | 0x2 | 0x4 | 0x8 | 0x1000 | 0x20000 | 0x80000 | 0x800000; + let flags = r.read_u32::<LittleEndian>()?; + if flags & (REQUIRED_FLAGS | !VALID_FLAGS) != REQUIRED_FLAGS { + return Err(DecoderError::HeaderFlagsInvalid(flags).into()); + } + + let height = r.read_u32::<LittleEndian>()?; + let width = r.read_u32::<LittleEndian>()?; + let pitch_or_linear_size = r.read_u32::<LittleEndian>()?; + let depth = r.read_u32::<LittleEndian>()?; + let mipmap_count = r.read_u32::<LittleEndian>()?; + // Skip `dwReserved1` + { + let mut skipped = [0; 4 * 11]; + r.read_exact(&mut skipped)?; + } + let pixel_format = PixelFormat::from_reader(r)?; + let caps = r.read_u32::<LittleEndian>()?; + let caps2 = r.read_u32::<LittleEndian>()?; + // Skip `dwCaps3`, `dwCaps4`, `dwReserved2` (unused) + { + let mut skipped = [0; 4 + 4 + 4]; + r.read_exact(&mut skipped)?; + } + + Ok(Self { + _flags: flags, + height, + width, + _pitch_or_linear_size: pitch_or_linear_size, + _depth: depth, + _mipmap_count: mipmap_count, + pixel_format, + _caps: caps, + _caps2: caps2, + }) + } +} + +impl DX10Header { + fn from_reader(r: &mut dyn Read) -> ImageResult<Self> { + let dxgi_format = r.read_u32::<LittleEndian>()?; + let resource_dimension = r.read_u32::<LittleEndian>()?; + let misc_flag = r.read_u32::<LittleEndian>()?; + let array_size = r.read_u32::<LittleEndian>()?; + let misc_flags_2 = r.read_u32::<LittleEndian>()?; + + let dx10_header = Self { + dxgi_format, + resource_dimension, + misc_flag, + array_size, + misc_flags_2, + }; + dx10_header.validate()?; + + Ok(dx10_header) + } + + fn validate(&self) -> Result<(), ImageError> { + // Note: see https://docs.microsoft.com/en-us/windows/win32/direct3ddds/dds-header-dxt10 for info on valid values + if self.dxgi_format > 132 { + // Invalid format + return Err(DecoderError::DxgiFormatInvalid(self.dxgi_format).into()); + } + + if self.resource_dimension < 2 || self.resource_dimension > 4 { + // Invalid dimension + // Only 1D (2), 2D (3) and 3D (4) resource dimensions are allowed + return Err(DecoderError::ResourceDimensionInvalid(self.resource_dimension).into()); + } + + if self.misc_flag != 0x0 && self.misc_flag != 0x4 { + // Invalid flag + // Only no (0x0) and DDS_RESOURCE_MISC_TEXTURECUBE (0x4) flags are allowed + return Err(DecoderError::Dx10FlagsInvalid(self.misc_flag).into()); + } + + if self.resource_dimension == 4 && self.array_size != 1 { + // Invalid array size + // 3D textures (resource dimension == 4) must have an array size of 1 + return Err(DecoderError::Dx10ArraySizeInvalid(self.array_size).into()); + } + + if self.misc_flags_2 > 0x4 { + // Invalid alpha flags + return Err(DecoderError::Dx10FlagsInvalid(self.misc_flags_2).into()); + } + + Ok(()) + } +} + +/// The representation of a DDS decoder +pub struct DdsDecoder<R: Read> { + #[allow(deprecated)] + inner: DxtDecoder<R>, +} + +impl<R: Read> DdsDecoder<R> { + /// Create a new decoder that decodes from the stream `r` + pub fn new(mut r: R) -> ImageResult<Self> { + let mut magic = [0; 4]; + r.read_exact(&mut magic)?; + if magic != b"DDS "[..] { + return Err(DecoderError::DdsSignatureInvalid.into()); + } + + let header = Header::from_reader(&mut r)?; + + if header.pixel_format.flags & 0x4 != 0 { + #[allow(deprecated)] + let variant = match &header.pixel_format.fourcc { + b"DXT1" => DxtVariant::DXT1, + b"DXT3" => DxtVariant::DXT3, + b"DXT5" => DxtVariant::DXT5, + b"DX10" => { + let dx10_header = DX10Header::from_reader(&mut r)?; + // Format equivalents were taken from https://docs.microsoft.com/en-us/windows/win32/direct3d11/texture-block-compression-in-direct3d-11 + // The enum integer values were taken from https://docs.microsoft.com/en-us/windows/win32/api/dxgiformat/ne-dxgiformat-dxgi_format + // DXT1 represents the different BC1 variants, DTX3 represents the different BC2 variants and DTX5 represents the different BC3 variants + match dx10_header.dxgi_format { + 70 | 71 | 72 => DxtVariant::DXT1, // DXGI_FORMAT_BC1_TYPELESS, DXGI_FORMAT_BC1_UNORM or DXGI_FORMAT_BC1_UNORM_SRGB + 73 | 74 | 75 => DxtVariant::DXT3, // DXGI_FORMAT_BC2_TYPELESS, DXGI_FORMAT_BC2_UNORM or DXGI_FORMAT_BC2_UNORM_SRGB + 76 | 77 | 78 => DxtVariant::DXT5, // DXGI_FORMAT_BC3_TYPELESS, DXGI_FORMAT_BC3_UNORM or DXGI_FORMAT_BC3_UNORM_SRGB + _ => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Dds.into(), + UnsupportedErrorKind::GenericFeature(format!( + "DDS DXGI Format {}", + dx10_header.dxgi_format + )), + ), + )) + } + } + } + fourcc => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Dds.into(), + UnsupportedErrorKind::GenericFeature(format!( + "DDS FourCC {:?}", + fourcc + )), + ), + )) + } + }; + + #[allow(deprecated)] + let bytes_per_pixel = variant.color_type().bytes_per_pixel(); + + if crate::utils::check_dimension_overflow(header.width, header.height, bytes_per_pixel) + { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Dds.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Image dimensions ({}x{}) are too large", + header.width, header.height + )), + ), + )); + } + + #[allow(deprecated)] + let inner = DxtDecoder::new(r, header.width, header.height, variant)?; + Ok(Self { inner }) + } else { + // For now, supports only DXT variants + Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Dds.into(), + UnsupportedErrorKind::Format(ImageFormatHint::Name("DDS".to_string())), + ), + )) + } + } +} + +impl<'a, R: 'a + Read> ImageDecoder<'a> for DdsDecoder<R> { + #[allow(deprecated)] + type Reader = DxtReader<R>; + + fn dimensions(&self) -> (u32, u32) { + self.inner.dimensions() + } + + fn color_type(&self) -> ColorType { + self.inner.color_type() + } + + fn scanline_bytes(&self) -> u64 { + self.inner.scanline_bytes() + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + self.inner.into_reader() + } + + fn read_image(self, buf: &mut [u8]) -> ImageResult<()> { + self.inner.read_image(buf) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn dimension_overflow() { + // A DXT1 header set to 0xFFFF_FFFC width and height (the highest u32%4 == 0) + let header = vec![ + 0x44, 0x44, 0x53, 0x20, 0x7C, 0x0, 0x0, 0x0, 0x7, 0x10, 0x8, 0x0, 0xFC, 0xFF, 0xFF, + 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0x0, 0xC0, 0x12, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, + 0x0, 0x49, 0x4D, 0x41, 0x47, 0x45, 0x4D, 0x41, 0x47, 0x49, 0x43, 0x4B, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, + 0x4, 0x0, 0x0, 0x0, 0x44, 0x58, 0x54, 0x31, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + ]; + + assert!(DdsDecoder::new(&header[..]).is_err()); + } +} diff --git a/vendor/image/src/codecs/dxt.rs b/vendor/image/src/codecs/dxt.rs new file mode 100644 index 0000000..8737fb3 --- /dev/null +++ b/vendor/image/src/codecs/dxt.rs @@ -0,0 +1,869 @@ +//! Decoding of DXT (S3TC) compression +//! +//! DXT is an image format that supports lossy compression +//! +//! # Related Links +//! * <https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_compression_s3tc.txt> - Description of the DXT compression OpenGL extensions. +//! +//! Note: this module only implements bare DXT encoding/decoding, it does not parse formats that can contain DXT files like .dds + +use std::convert::TryFrom; +use std::io::{self, Read, Seek, SeekFrom, Write}; + +use crate::color::ColorType; +use crate::error::{ImageError, ImageResult, ParameterError, ParameterErrorKind}; +use crate::image::{self, ImageDecoder, ImageDecoderRect, ImageReadBuffer, Progress}; + +/// What version of DXT compression are we using? +/// Note that DXT2 and DXT4 are left away as they're +/// just DXT3 and DXT5 with premultiplied alpha +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum DxtVariant { + /// The DXT1 format. 48 bytes of RGB data in a 4x4 pixel square is + /// compressed into an 8 byte block of DXT1 data + DXT1, + /// The DXT3 format. 64 bytes of RGBA data in a 4x4 pixel square is + /// compressed into a 16 byte block of DXT3 data + DXT3, + /// The DXT5 format. 64 bytes of RGBA data in a 4x4 pixel square is + /// compressed into a 16 byte block of DXT5 data + DXT5, +} + +impl DxtVariant { + /// Returns the amount of bytes of raw image data + /// that is encoded in a single DXTn block + fn decoded_bytes_per_block(self) -> usize { + match self { + DxtVariant::DXT1 => 48, + DxtVariant::DXT3 | DxtVariant::DXT5 => 64, + } + } + + /// Returns the amount of bytes per block of encoded DXTn data + fn encoded_bytes_per_block(self) -> usize { + match self { + DxtVariant::DXT1 => 8, + DxtVariant::DXT3 | DxtVariant::DXT5 => 16, + } + } + + /// Returns the color type that is stored in this DXT variant + pub fn color_type(self) -> ColorType { + match self { + DxtVariant::DXT1 => ColorType::Rgb8, + DxtVariant::DXT3 | DxtVariant::DXT5 => ColorType::Rgba8, + } + } +} + +/// DXT decoder +pub struct DxtDecoder<R: Read> { + inner: R, + width_blocks: u32, + height_blocks: u32, + variant: DxtVariant, + row: u32, +} + +impl<R: Read> DxtDecoder<R> { + /// Create a new DXT decoder that decodes from the stream ```r```. + /// As DXT is often stored as raw buffers with the width/height + /// somewhere else the width and height of the image need + /// to be passed in ```width``` and ```height```, as well as the + /// DXT variant in ```variant```. + /// width and height are required to be powers of 2 and at least 4. + /// otherwise an error will be returned + pub fn new( + r: R, + width: u32, + height: u32, + variant: DxtVariant, + ) -> Result<DxtDecoder<R>, ImageError> { + if width % 4 != 0 || height % 4 != 0 { + // TODO: this is actually a bit of a weird case. We could return `DecodingError` but + // it's not really the format that is wrong However, the encoder should surely return + // `EncodingError` so it would be the logical choice for symmetry. + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + ))); + } + let width_blocks = width / 4; + let height_blocks = height / 4; + Ok(DxtDecoder { + inner: r, + width_blocks, + height_blocks, + variant, + row: 0, + }) + } + + fn read_scanline(&mut self, buf: &mut [u8]) -> io::Result<usize> { + assert_eq!(u64::try_from(buf.len()), Ok(self.scanline_bytes())); + + let mut src = + vec![0u8; self.variant.encoded_bytes_per_block() * self.width_blocks as usize]; + self.inner.read_exact(&mut src)?; + match self.variant { + DxtVariant::DXT1 => decode_dxt1_row(&src, buf), + DxtVariant::DXT3 => decode_dxt3_row(&src, buf), + DxtVariant::DXT5 => decode_dxt5_row(&src, buf), + } + self.row += 1; + Ok(buf.len()) + } +} + +// Note that, due to the way that DXT compression works, a scanline is considered to consist out of +// 4 lines of pixels. +impl<'a, R: 'a + Read> ImageDecoder<'a> for DxtDecoder<R> { + type Reader = DxtReader<R>; + + fn dimensions(&self) -> (u32, u32) { + (self.width_blocks * 4, self.height_blocks * 4) + } + + fn color_type(&self) -> ColorType { + self.variant.color_type() + } + + fn scanline_bytes(&self) -> u64 { + self.variant.decoded_bytes_per_block() as u64 * u64::from(self.width_blocks) + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + Ok(DxtReader { + buffer: ImageReadBuffer::new(self.scanline_bytes(), self.total_bytes()), + decoder: self, + }) + } + + fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> { + assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); + + for chunk in buf.chunks_mut(self.scanline_bytes().max(1) as usize) { + self.read_scanline(chunk)?; + } + Ok(()) + } +} + +impl<'a, R: 'a + Read + Seek> ImageDecoderRect<'a> for DxtDecoder<R> { + fn read_rect_with_progress<F: Fn(Progress)>( + &mut self, + x: u32, + y: u32, + width: u32, + height: u32, + buf: &mut [u8], + progress_callback: F, + ) -> ImageResult<()> { + let encoded_scanline_bytes = + self.variant.encoded_bytes_per_block() as u64 * u64::from(self.width_blocks); + + let start = self.inner.stream_position()?; + image::load_rect( + x, + y, + width, + height, + buf, + progress_callback, + self, + |s, scanline| { + s.inner + .seek(SeekFrom::Start(start + scanline * encoded_scanline_bytes))?; + Ok(()) + }, + |s, buf| s.read_scanline(buf).map(|_| ()), + )?; + self.inner.seek(SeekFrom::Start(start))?; + Ok(()) + } +} + +/// DXT reader +pub struct DxtReader<R: Read> { + buffer: ImageReadBuffer, + decoder: DxtDecoder<R>, +} + +impl<R: Read> Read for DxtReader<R> { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + let decoder = &mut self.decoder; + self.buffer.read(buf, |buf| decoder.read_scanline(buf)) + } +} + +/// DXT encoder +pub struct DxtEncoder<W: Write> { + w: W, +} + +impl<W: Write> DxtEncoder<W> { + /// Create a new encoder that writes its output to ```w``` + pub fn new(w: W) -> DxtEncoder<W> { + DxtEncoder { w } + } + + /// Encodes the image data ```data``` + /// that has dimensions ```width``` and ```height``` + /// in ```DxtVariant``` ```variant``` + /// data is assumed to be in variant.color_type() + pub fn encode( + mut self, + data: &[u8], + width: u32, + height: u32, + variant: DxtVariant, + ) -> ImageResult<()> { + if width % 4 != 0 || height % 4 != 0 { + // TODO: this is not very idiomatic yet. Should return an EncodingError. + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + ))); + } + let width_blocks = width / 4; + let height_blocks = height / 4; + + let stride = variant.decoded_bytes_per_block(); + + assert!(data.len() >= width_blocks as usize * height_blocks as usize * stride); + + for chunk in data.chunks(width_blocks as usize * stride) { + let data = match variant { + DxtVariant::DXT1 => encode_dxt1_row(chunk), + DxtVariant::DXT3 => encode_dxt3_row(chunk), + DxtVariant::DXT5 => encode_dxt5_row(chunk), + }; + self.w.write_all(&data)?; + } + Ok(()) + } +} + +/** + * Actual encoding/decoding logic below. + */ +use std::mem::swap; + +type Rgb = [u8; 3]; + +/// decodes a 5-bit R, 6-bit G, 5-bit B 16-bit packed color value into 8-bit RGB +/// mapping is done so min/max range values are preserved. So for 5-bit +/// values 0x00 -> 0x00 and 0x1F -> 0xFF +fn enc565_decode(value: u16) -> Rgb { + let red = (value >> 11) & 0x1F; + let green = (value >> 5) & 0x3F; + let blue = (value) & 0x1F; + [ + (red * 0xFF / 0x1F) as u8, + (green * 0xFF / 0x3F) as u8, + (blue * 0xFF / 0x1F) as u8, + ] +} + +/// encodes an 8-bit RGB value into a 5-bit R, 6-bit G, 5-bit B 16-bit packed color value +/// mapping preserves min/max values. It is guaranteed that i == encode(decode(i)) for all i +fn enc565_encode(rgb: Rgb) -> u16 { + let red = (u16::from(rgb[0]) * 0x1F + 0x7E) / 0xFF; + let green = (u16::from(rgb[1]) * 0x3F + 0x7E) / 0xFF; + let blue = (u16::from(rgb[2]) * 0x1F + 0x7E) / 0xFF; + (red << 11) | (green << 5) | blue +} + +/// utility function: squares a value +fn square(a: i32) -> i32 { + a * a +} + +/// returns the squared error between two RGB values +fn diff(a: Rgb, b: Rgb) -> i32 { + square(i32::from(a[0]) - i32::from(b[0])) + + square(i32::from(a[1]) - i32::from(b[1])) + + square(i32::from(a[2]) - i32::from(b[2])) +} + +/* + * Functions for decoding DXT compression + */ + +/// Constructs the DXT5 alpha lookup table from the two alpha entries +/// if alpha0 > alpha1, constructs a table of [a0, a1, 6 linearly interpolated values from a0 to a1] +/// if alpha0 <= alpha1, constructs a table of [a0, a1, 4 linearly interpolated values from a0 to a1, 0, 0xFF] +fn alpha_table_dxt5(alpha0: u8, alpha1: u8) -> [u8; 8] { + let mut table = [alpha0, alpha1, 0, 0, 0, 0, 0, 0xFF]; + if alpha0 > alpha1 { + for i in 2..8u16 { + table[i as usize] = + (((8 - i) * u16::from(alpha0) + (i - 1) * u16::from(alpha1)) / 7) as u8; + } + } else { + for i in 2..6u16 { + table[i as usize] = + (((6 - i) * u16::from(alpha0) + (i - 1) * u16::from(alpha1)) / 5) as u8; + } + } + table +} + +/// decodes an 8-byte dxt color block into the RGB channels of a 16xRGB or 16xRGBA block. +/// source should have a length of 8, dest a length of 48 (RGB) or 64 (RGBA) +fn decode_dxt_colors(source: &[u8], dest: &mut [u8], is_dxt1: bool) { + // sanity checks, also enable the compiler to elide all following bound checks + assert!(source.len() == 8 && (dest.len() == 48 || dest.len() == 64)); + // calculate pitch to store RGB values in dest (3 for RGB, 4 for RGBA) + let pitch = dest.len() / 16; + + // extract color data + let color0 = u16::from(source[0]) | (u16::from(source[1]) << 8); + let color1 = u16::from(source[2]) | (u16::from(source[3]) << 8); + let color_table = u32::from(source[4]) + | (u32::from(source[5]) << 8) + | (u32::from(source[6]) << 16) + | (u32::from(source[7]) << 24); + // let color_table = source[4..8].iter().rev().fold(0, |t, &b| (t << 8) | b as u32); + + // decode the colors to rgb format + let mut colors = [[0; 3]; 4]; + colors[0] = enc565_decode(color0); + colors[1] = enc565_decode(color1); + + // determine color interpolation method + if color0 > color1 || !is_dxt1 { + // linearly interpolate the other two color table entries + for i in 0..3 { + colors[2][i] = ((u16::from(colors[0][i]) * 2 + u16::from(colors[1][i]) + 1) / 3) as u8; + colors[3][i] = ((u16::from(colors[0][i]) + u16::from(colors[1][i]) * 2 + 1) / 3) as u8; + } + } else { + // linearly interpolate one other entry, keep the other at 0 + for i in 0..3 { + colors[2][i] = ((u16::from(colors[0][i]) + u16::from(colors[1][i]) + 1) / 2) as u8; + } + } + + // serialize the result. Every color is determined by looking up + // two bits in color_table which identify which color to actually pick from the 4 possible colors + for i in 0..16 { + dest[i * pitch..i * pitch + 3] + .copy_from_slice(&colors[(color_table >> (i * 2)) as usize & 3]); + } +} + +/// Decodes a 16-byte bock of dxt5 data to a 16xRGBA block +fn decode_dxt5_block(source: &[u8], dest: &mut [u8]) { + assert!(source.len() == 16 && dest.len() == 64); + + // extract alpha index table (stored as little endian 64-bit value) + let alpha_table = source[2..8] + .iter() + .rev() + .fold(0, |t, &b| (t << 8) | u64::from(b)); + + // alhpa level decode + let alphas = alpha_table_dxt5(source[0], source[1]); + + // serialize alpha + for i in 0..16 { + dest[i * 4 + 3] = alphas[(alpha_table >> (i * 3)) as usize & 7]; + } + + // handle colors + decode_dxt_colors(&source[8..16], dest, false); +} + +/// Decodes a 16-byte bock of dxt3 data to a 16xRGBA block +fn decode_dxt3_block(source: &[u8], dest: &mut [u8]) { + assert!(source.len() == 16 && dest.len() == 64); + + // extract alpha index table (stored as little endian 64-bit value) + let alpha_table = source[0..8] + .iter() + .rev() + .fold(0, |t, &b| (t << 8) | u64::from(b)); + + // serialize alpha (stored as 4-bit values) + for i in 0..16 { + dest[i * 4 + 3] = ((alpha_table >> (i * 4)) as u8 & 0xF) * 0x11; + } + + // handle colors + decode_dxt_colors(&source[8..16], dest, false); +} + +/// Decodes a 8-byte bock of dxt5 data to a 16xRGB block +fn decode_dxt1_block(source: &[u8], dest: &mut [u8]) { + assert!(source.len() == 8 && dest.len() == 48); + decode_dxt_colors(source, dest, true); +} + +/// Decode a row of DXT1 data to four rows of RGB data. +/// source.len() should be a multiple of 8, otherwise this panics. +fn decode_dxt1_row(source: &[u8], dest: &mut [u8]) { + assert!(source.len() % 8 == 0); + let block_count = source.len() / 8; + assert!(dest.len() >= block_count * 48); + + // contains the 16 decoded pixels per block + let mut decoded_block = [0u8; 48]; + + for (x, encoded_block) in source.chunks(8).enumerate() { + decode_dxt1_block(encoded_block, &mut decoded_block); + + // copy the values from the decoded block to linewise RGB layout + for line in 0..4 { + let offset = (block_count * line + x) * 12; + dest[offset..offset + 12].copy_from_slice(&decoded_block[line * 12..(line + 1) * 12]); + } + } +} + +/// Decode a row of DXT3 data to four rows of RGBA data. +/// source.len() should be a multiple of 16, otherwise this panics. +fn decode_dxt3_row(source: &[u8], dest: &mut [u8]) { + assert!(source.len() % 16 == 0); + let block_count = source.len() / 16; + assert!(dest.len() >= block_count * 64); + + // contains the 16 decoded pixels per block + let mut decoded_block = [0u8; 64]; + + for (x, encoded_block) in source.chunks(16).enumerate() { + decode_dxt3_block(encoded_block, &mut decoded_block); + + // copy the values from the decoded block to linewise RGB layout + for line in 0..4 { + let offset = (block_count * line + x) * 16; + dest[offset..offset + 16].copy_from_slice(&decoded_block[line * 16..(line + 1) * 16]); + } + } +} + +/// Decode a row of DXT5 data to four rows of RGBA data. +/// source.len() should be a multiple of 16, otherwise this panics. +fn decode_dxt5_row(source: &[u8], dest: &mut [u8]) { + assert!(source.len() % 16 == 0); + let block_count = source.len() / 16; + assert!(dest.len() >= block_count * 64); + + // contains the 16 decoded pixels per block + let mut decoded_block = [0u8; 64]; + + for (x, encoded_block) in source.chunks(16).enumerate() { + decode_dxt5_block(encoded_block, &mut decoded_block); + + // copy the values from the decoded block to linewise RGB layout + for line in 0..4 { + let offset = (block_count * line + x) * 16; + dest[offset..offset + 16].copy_from_slice(&decoded_block[line * 16..(line + 1) * 16]); + } + } +} + +/* + * Functions for encoding DXT compression + */ + +/// Tries to perform the color encoding part of dxt compression +/// the approach taken is simple, it picks unique combinations +/// of the colors present in the block, and attempts to encode the +/// block with each, picking the encoding that yields the least +/// squared error out of all of them. +/// +/// This could probably be faster but is already reasonably fast +/// and a good reference impl to optimize others against. +/// +/// Another way to perform this analysis would be to perform a +/// singular value decomposition of the different colors, and +/// then pick 2 points on this line as the base colors. But +/// this is still rather unwieldy math and has issues +/// with the 3-linear-colors-and-0 case, it's also worse +/// at conserving the original colors. +/// +/// source: should be RGBAx16 or RGBx16 bytes of data, +/// dest 8 bytes of resulting encoded color data +fn encode_dxt_colors(source: &[u8], dest: &mut [u8], is_dxt1: bool) { + // sanity checks and determine stride when parsing the source data + assert!((source.len() == 64 || source.len() == 48) && dest.len() == 8); + let stride = source.len() / 16; + + // reference colors array + let mut colors = [[0u8; 3]; 4]; + + // Put the colors we're going to be processing in an array with pure RGB layout + // note: we reverse the pixel order here. The reason for this is found in the inner quantization loop. + let mut targets = [[0u8; 3]; 16]; + for (s, d) in source.chunks(stride).rev().zip(&mut targets) { + *d = [s[0], s[1], s[2]]; + } + + // roundtrip all colors through the r5g6b5 encoding + for rgb in &mut targets { + *rgb = enc565_decode(enc565_encode(*rgb)); + } + + // and deduplicate the set of colors to choose from as the algorithm is O(N^2) in this + let mut colorspace_ = [[0u8; 3]; 16]; + let mut colorspace_len = 0; + for color in &targets { + if !colorspace_[..colorspace_len].contains(color) { + colorspace_[colorspace_len] = *color; + colorspace_len += 1; + } + } + let mut colorspace = &colorspace_[..colorspace_len]; + + // in case of slight gradients it can happen that there's only one entry left in the color table. + // as the resulting banding can be quite bad if we would just left the block at the closest + // encodable color, we have a special path here that tries to emulate the wanted color + // using the linear interpolation between gradients + if colorspace.len() == 1 { + // the base color we got from colorspace reduction + let ref_rgb = colorspace[0]; + // the unreduced color in this block that's the furthest away from the actual block + let mut rgb = targets + .iter() + .cloned() + .max_by_key(|rgb| diff(*rgb, ref_rgb)) + .unwrap(); + // amplify differences by 2.5, which should push them to the next quantized value + // if possible without overshoot + for i in 0..3 { + rgb[i] = + ((i16::from(rgb[i]) - i16::from(ref_rgb[i])) * 5 / 2 + i16::from(ref_rgb[i])) as u8; + } + + // roundtrip it through quantization + let encoded = enc565_encode(rgb); + let rgb = enc565_decode(encoded); + + // in case this didn't land us a different color the best way to represent this field is + // as a single color block + if rgb == ref_rgb { + dest[0] = encoded as u8; + dest[1] = (encoded >> 8) as u8; + + for d in dest.iter_mut().take(8).skip(2) { + *d = 0; + } + return; + } + + // we did find a separate value: add it to the options so after one round of quantization + // we're done + colorspace_[1] = rgb; + colorspace = &colorspace_[..2]; + } + + // block quantization loop: we basically just try every possible combination, returning + // the combination with the least squared error + // stores the best candidate colors + let mut chosen_colors = [[0; 3]; 4]; + // did this index table use the [0,0,0] variant + let mut chosen_use_0 = false; + // error calculated for the last entry + let mut chosen_error = 0xFFFF_FFFFu32; + + // loop through unique permutations of the colorspace, where c1 != c2 + 'search: for (i, &c1) in colorspace.iter().enumerate() { + colors[0] = c1; + + for &c2 in &colorspace[0..i] { + colors[1] = c2; + + if is_dxt1 { + // what's inside here is ran at most 120 times. + for use_0 in 0..2 { + // and 240 times here. + + if use_0 != 0 { + // interpolate one color, set the other to 0 + for i in 0..3 { + colors[2][i] = + ((u16::from(colors[0][i]) + u16::from(colors[1][i]) + 1) / 2) as u8; + } + colors[3] = [0, 0, 0]; + } else { + // interpolate to get 2 more colors + for i in 0..3 { + colors[2][i] = + ((u16::from(colors[0][i]) * 2 + u16::from(colors[1][i]) + 1) / 3) + as u8; + colors[3][i] = + ((u16::from(colors[0][i]) + u16::from(colors[1][i]) * 2 + 1) / 3) + as u8; + } + } + + // calculate the total error if we were to quantize the block with these color combinations + // both these loops have statically known iteration counts and are well vectorizable + // note that the inside of this can be run about 15360 times worst case, i.e. 960 times per + // pixel. + let total_error = targets + .iter() + .map(|t| colors.iter().map(|c| diff(*c, *t) as u32).min().unwrap()) + .sum(); + + // update the match if we found a better one + if total_error < chosen_error { + chosen_colors = colors; + chosen_use_0 = use_0 != 0; + chosen_error = total_error; + + // if we've got a perfect or at most 1 LSB off match, we're done + if total_error < 4 { + break 'search; + } + } + } + } else { + // what's inside here is ran at most 120 times. + + // interpolate to get 2 more colors + for i in 0..3 { + colors[2][i] = + ((u16::from(colors[0][i]) * 2 + u16::from(colors[1][i]) + 1) / 3) as u8; + colors[3][i] = + ((u16::from(colors[0][i]) + u16::from(colors[1][i]) * 2 + 1) / 3) as u8; + } + + // calculate the total error if we were to quantize the block with these color combinations + // both these loops have statically known iteration counts and are well vectorizable + // note that the inside of this can be run about 15360 times worst case, i.e. 960 times per + // pixel. + let total_error = targets + .iter() + .map(|t| colors.iter().map(|c| diff(*c, *t) as u32).min().unwrap()) + .sum(); + + // update the match if we found a better one + if total_error < chosen_error { + chosen_colors = colors; + chosen_error = total_error; + + // if we've got a perfect or at most 1 LSB off match, we're done + if total_error < 4 { + break 'search; + } + } + } + } + } + + // calculate the final indices + // note that targets is already in reverse pixel order, to make the index computation easy. + let mut chosen_indices = 0u32; + for t in &targets { + let (idx, _) = chosen_colors + .iter() + .enumerate() + .min_by_key(|&(_, c)| diff(*c, *t)) + .unwrap(); + chosen_indices = (chosen_indices << 2) | idx as u32; + } + + // encode the colors + let mut color0 = enc565_encode(chosen_colors[0]); + let mut color1 = enc565_encode(chosen_colors[1]); + + // determine encoding. Note that color0 == color1 is impossible at this point + if is_dxt1 { + if color0 > color1 { + if chosen_use_0 { + swap(&mut color0, &mut color1); + // Indexes are packed 2 bits wide, swap index 0/1 but preserve 2/3. + let filter = (chosen_indices & 0xAAAA_AAAA) >> 1; + chosen_indices ^= filter ^ 0x5555_5555; + } + } else if !chosen_use_0 { + swap(&mut color0, &mut color1); + // Indexes are packed 2 bits wide, swap index 0/1 and 2/3. + chosen_indices ^= 0x5555_5555; + } + } + + // encode everything. + dest[0] = color0 as u8; + dest[1] = (color0 >> 8) as u8; + dest[2] = color1 as u8; + dest[3] = (color1 >> 8) as u8; + for i in 0..4 { + dest[i + 4] = (chosen_indices >> (i * 8)) as u8; + } +} + +/// Encodes a buffer of 16 alpha bytes into a dxt5 alpha index table, +/// where the alpha table they are indexed against is created by +/// calling alpha_table_dxt5(alpha0, alpha1) +/// returns the resulting error and alpha table +fn encode_dxt5_alpha(alpha0: u8, alpha1: u8, alphas: &[u8; 16]) -> (i32, u64) { + // create a table for the given alpha ranges + let table = alpha_table_dxt5(alpha0, alpha1); + let mut indices = 0u64; + let mut total_error = 0i32; + + // least error brute force search + for (i, &a) in alphas.iter().enumerate() { + let (index, error) = table + .iter() + .enumerate() + .map(|(i, &e)| (i, square(i32::from(e) - i32::from(a)))) + .min_by_key(|&(_, e)| e) + .unwrap(); + total_error += error; + indices |= (index as u64) << (i * 3); + } + + (total_error, indices) +} + +/// Encodes a RGBAx16 sequence of bytes to a 16 bytes DXT5 block +fn encode_dxt5_block(source: &[u8], dest: &mut [u8]) { + assert!(source.len() == 64 && dest.len() == 16); + + // perform dxt color encoding + encode_dxt_colors(source, &mut dest[8..16], false); + + // copy out the alpha bytes + let mut alphas = [0; 16]; + for i in 0..16 { + alphas[i] = source[i * 4 + 3]; + } + + // try both alpha compression methods, see which has the least error. + let alpha07 = alphas.iter().cloned().min().unwrap(); + let alpha17 = alphas.iter().cloned().max().unwrap(); + let (error7, indices7) = encode_dxt5_alpha(alpha07, alpha17, &alphas); + + // if all alphas are 0 or 255 it doesn't particularly matter what we do here. + let alpha05 = alphas + .iter() + .cloned() + .filter(|&i| i != 255) + .max() + .unwrap_or(255); + let alpha15 = alphas + .iter() + .cloned() + .filter(|&i| i != 0) + .min() + .unwrap_or(0); + let (error5, indices5) = encode_dxt5_alpha(alpha05, alpha15, &alphas); + + // pick the best one, encode the min/max values + let mut alpha_table = if error5 < error7 { + dest[0] = alpha05; + dest[1] = alpha15; + indices5 + } else { + dest[0] = alpha07; + dest[1] = alpha17; + indices7 + }; + + // encode the alphas + for byte in dest[2..8].iter_mut() { + *byte = alpha_table as u8; + alpha_table >>= 8; + } +} + +/// Encodes a RGBAx16 sequence of bytes into a 16 bytes DXT3 block +fn encode_dxt3_block(source: &[u8], dest: &mut [u8]) { + assert!(source.len() == 64 && dest.len() == 16); + + // perform dxt color encoding + encode_dxt_colors(source, &mut dest[8..16], false); + + // DXT3 alpha compression is very simple, just round towards the nearest value + + // index the alpha values into the 64bit alpha table + let mut alpha_table = 0u64; + for i in 0..16 { + let alpha = u64::from(source[i * 4 + 3]); + let alpha = (alpha + 0x8) / 0x11; + alpha_table |= alpha << (i * 4); + } + + // encode the alpha values + for byte in &mut dest[0..8] { + *byte = alpha_table as u8; + alpha_table >>= 8; + } +} + +/// Encodes a RGBx16 sequence of bytes into a 8 bytes DXT1 block +fn encode_dxt1_block(source: &[u8], dest: &mut [u8]) { + assert!(source.len() == 48 && dest.len() == 8); + + // perform dxt color encoding + encode_dxt_colors(source, dest, true); +} + +/// Decode a row of DXT1 data to four rows of RGBA data. +/// source.len() should be a multiple of 8, otherwise this panics. +fn encode_dxt1_row(source: &[u8]) -> Vec<u8> { + assert!(source.len() % 48 == 0); + let block_count = source.len() / 48; + + let mut dest = vec![0u8; block_count * 8]; + // contains the 16 decoded pixels per block + let mut decoded_block = [0u8; 48]; + + for (x, encoded_block) in dest.chunks_mut(8).enumerate() { + // copy the values from the decoded block to linewise RGB layout + for line in 0..4 { + let offset = (block_count * line + x) * 12; + decoded_block[line * 12..(line + 1) * 12].copy_from_slice(&source[offset..offset + 12]); + } + + encode_dxt1_block(&decoded_block, encoded_block); + } + dest +} + +/// Decode a row of DXT3 data to four rows of RGBA data. +/// source.len() should be a multiple of 16, otherwise this panics. +fn encode_dxt3_row(source: &[u8]) -> Vec<u8> { + assert!(source.len() % 64 == 0); + let block_count = source.len() / 64; + + let mut dest = vec![0u8; block_count * 16]; + // contains the 16 decoded pixels per block + let mut decoded_block = [0u8; 64]; + + for (x, encoded_block) in dest.chunks_mut(16).enumerate() { + // copy the values from the decoded block to linewise RGB layout + for line in 0..4 { + let offset = (block_count * line + x) * 16; + decoded_block[line * 16..(line + 1) * 16].copy_from_slice(&source[offset..offset + 16]); + } + + encode_dxt3_block(&decoded_block, encoded_block); + } + dest +} + +/// Decode a row of DXT5 data to four rows of RGBA data. +/// source.len() should be a multiple of 16, otherwise this panics. +fn encode_dxt5_row(source: &[u8]) -> Vec<u8> { + assert!(source.len() % 64 == 0); + let block_count = source.len() / 64; + + let mut dest = vec![0u8; block_count * 16]; + // contains the 16 decoded pixels per block + let mut decoded_block = [0u8; 64]; + + for (x, encoded_block) in dest.chunks_mut(16).enumerate() { + // copy the values from the decoded block to linewise RGB layout + for line in 0..4 { + let offset = (block_count * line + x) * 16; + decoded_block[line * 16..(line + 1) * 16].copy_from_slice(&source[offset..offset + 16]); + } + + encode_dxt5_block(&decoded_block, encoded_block); + } + dest +} diff --git a/vendor/image/src/codecs/farbfeld.rs b/vendor/image/src/codecs/farbfeld.rs new file mode 100644 index 0000000..b543ade --- /dev/null +++ b/vendor/image/src/codecs/farbfeld.rs @@ -0,0 +1,400 @@ +//! Decoding of farbfeld images +//! +//! farbfeld is a lossless image format which is easy to parse, pipe and compress. +//! +//! It has the following format: +//! +//! | Bytes | Description | +//! |--------|---------------------------------------------------------| +//! | 8 | "farbfeld" magic value | +//! | 4 | 32-Bit BE unsigned integer (width) | +//! | 4 | 32-Bit BE unsigned integer (height) | +//! | [2222] | 4⋅16-Bit BE unsigned integers [RGBA] / pixel, row-major | +//! +//! The RGB-data should be sRGB for best interoperability and not alpha-premultiplied. +//! +//! # Related Links +//! * <https://tools.suckless.org/farbfeld/> - the farbfeld specification + +use std::convert::TryFrom; +use std::i64; +use std::io::{self, Read, Seek, SeekFrom, Write}; + +use byteorder::{BigEndian, ByteOrder, NativeEndian}; + +use crate::color::ColorType; +use crate::error::{ + DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind, +}; +use crate::image::{self, ImageDecoder, ImageDecoderRect, ImageEncoder, ImageFormat, Progress}; + +/// farbfeld Reader +pub struct FarbfeldReader<R: Read> { + width: u32, + height: u32, + inner: R, + /// Relative to the start of the pixel data + current_offset: u64, + cached_byte: Option<u8>, +} + +impl<R: Read> FarbfeldReader<R> { + fn new(mut buffered_read: R) -> ImageResult<FarbfeldReader<R>> { + fn read_dimm<R: Read>(from: &mut R) -> ImageResult<u32> { + let mut buf = [0u8; 4]; + from.read_exact(&mut buf).map_err(|err| { + ImageError::Decoding(DecodingError::new(ImageFormat::Farbfeld.into(), err)) + })?; + Ok(BigEndian::read_u32(&buf)) + } + + let mut magic = [0u8; 8]; + buffered_read.read_exact(&mut magic).map_err(|err| { + ImageError::Decoding(DecodingError::new(ImageFormat::Farbfeld.into(), err)) + })?; + if &magic != b"farbfeld" { + return Err(ImageError::Decoding(DecodingError::new( + ImageFormat::Farbfeld.into(), + format!("Invalid magic: {:02x?}", magic), + ))); + } + + let reader = FarbfeldReader { + width: read_dimm(&mut buffered_read)?, + height: read_dimm(&mut buffered_read)?, + inner: buffered_read, + current_offset: 0, + cached_byte: None, + }; + + if crate::utils::check_dimension_overflow( + reader.width, + reader.height, + // ColorType is always rgba16 + ColorType::Rgba16.bytes_per_pixel(), + ) { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Farbfeld.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Image dimensions ({}x{}) are too large", + reader.width, reader.height + )), + ), + )); + } + + Ok(reader) + } +} + +impl<R: Read> Read for FarbfeldReader<R> { + fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> { + let mut bytes_written = 0; + if let Some(byte) = self.cached_byte.take() { + buf[0] = byte; + buf = &mut buf[1..]; + bytes_written = 1; + self.current_offset += 1; + } + + if buf.len() == 1 { + buf[0] = cache_byte(&mut self.inner, &mut self.cached_byte)?; + bytes_written += 1; + self.current_offset += 1; + } else { + for channel_out in buf.chunks_exact_mut(2) { + consume_channel(&mut self.inner, channel_out)?; + bytes_written += 2; + self.current_offset += 2; + } + } + + Ok(bytes_written) + } +} + +impl<R: Read + Seek> Seek for FarbfeldReader<R> { + fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> { + fn parse_offset(original_offset: u64, end_offset: u64, pos: SeekFrom) -> Option<i64> { + match pos { + SeekFrom::Start(off) => i64::try_from(off) + .ok()? + .checked_sub(i64::try_from(original_offset).ok()?), + SeekFrom::End(off) => { + if off < i64::try_from(end_offset).unwrap_or(i64::MAX) { + None + } else { + Some(i64::try_from(end_offset.checked_sub(original_offset)?).ok()? + off) + } + } + SeekFrom::Current(off) => { + if off < i64::try_from(original_offset).unwrap_or(i64::MAX) { + None + } else { + Some(off) + } + } + } + } + + let original_offset = self.current_offset; + let end_offset = self.width as u64 * self.height as u64 * 2; + let offset_from_current = + parse_offset(original_offset, end_offset, pos).ok_or_else(|| { + io::Error::new( + io::ErrorKind::InvalidInput, + "invalid seek to a negative or overflowing position", + ) + })?; + + // TODO: convert to seek_relative() once that gets stabilised + self.inner.seek(SeekFrom::Current(offset_from_current))?; + self.current_offset = if offset_from_current < 0 { + original_offset.checked_sub(offset_from_current.wrapping_neg() as u64) + } else { + original_offset.checked_add(offset_from_current as u64) + } + .expect("This should've been checked above"); + + if self.current_offset < end_offset && self.current_offset % 2 == 1 { + let curr = self.inner.seek(SeekFrom::Current(-1))?; + cache_byte(&mut self.inner, &mut self.cached_byte)?; + self.inner.seek(SeekFrom::Start(curr))?; + } else { + self.cached_byte = None; + } + + Ok(original_offset) + } +} + +fn consume_channel<R: Read>(from: &mut R, to: &mut [u8]) -> io::Result<()> { + let mut ibuf = [0u8; 2]; + from.read_exact(&mut ibuf)?; + NativeEndian::write_u16(to, BigEndian::read_u16(&ibuf)); + Ok(()) +} + +fn cache_byte<R: Read>(from: &mut R, cached_byte: &mut Option<u8>) -> io::Result<u8> { + let mut obuf = [0u8; 2]; + consume_channel(from, &mut obuf)?; + *cached_byte = Some(obuf[1]); + Ok(obuf[0]) +} + +/// farbfeld decoder +pub struct FarbfeldDecoder<R: Read> { + reader: FarbfeldReader<R>, +} + +impl<R: Read> FarbfeldDecoder<R> { + /// Creates a new decoder that decodes from the stream ```r``` + pub fn new(buffered_read: R) -> ImageResult<FarbfeldDecoder<R>> { + Ok(FarbfeldDecoder { + reader: FarbfeldReader::new(buffered_read)?, + }) + } +} + +impl<'a, R: 'a + Read> ImageDecoder<'a> for FarbfeldDecoder<R> { + type Reader = FarbfeldReader<R>; + + fn dimensions(&self) -> (u32, u32) { + (self.reader.width, self.reader.height) + } + + fn color_type(&self) -> ColorType { + ColorType::Rgba16 + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + Ok(self.reader) + } + + fn scanline_bytes(&self) -> u64 { + 2 + } +} + +impl<'a, R: 'a + Read + Seek> ImageDecoderRect<'a> for FarbfeldDecoder<R> { + fn read_rect_with_progress<F: Fn(Progress)>( + &mut self, + x: u32, + y: u32, + width: u32, + height: u32, + buf: &mut [u8], + progress_callback: F, + ) -> ImageResult<()> { + // A "scanline" (defined as "shortest non-caching read" in the doc) is just one channel in this case + + let start = self.reader.stream_position()?; + image::load_rect( + x, + y, + width, + height, + buf, + progress_callback, + self, + |s, scanline| s.reader.seek(SeekFrom::Start(scanline * 2)).map(|_| ()), + |s, buf| s.reader.read_exact(buf), + )?; + self.reader.seek(SeekFrom::Start(start))?; + Ok(()) + } +} + +/// farbfeld encoder +pub struct FarbfeldEncoder<W: Write> { + w: W, +} + +impl<W: Write> FarbfeldEncoder<W> { + /// Create a new encoder that writes its output to ```w```. The writer should be buffered. + pub fn new(buffered_writer: W) -> FarbfeldEncoder<W> { + FarbfeldEncoder { w: buffered_writer } + } + + /// Encodes the image ```data``` (native endian) + /// that has dimensions ```width``` and ```height``` + pub fn encode(self, data: &[u8], width: u32, height: u32) -> ImageResult<()> { + self.encode_impl(data, width, height)?; + Ok(()) + } + + fn encode_impl(mut self, data: &[u8], width: u32, height: u32) -> io::Result<()> { + self.w.write_all(b"farbfeld")?; + + let mut buf = [0u8; 4]; + BigEndian::write_u32(&mut buf, width); + self.w.write_all(&buf)?; + + BigEndian::write_u32(&mut buf, height); + self.w.write_all(&buf)?; + + for channel in data.chunks_exact(2) { + BigEndian::write_u16(&mut buf, NativeEndian::read_u16(channel)); + self.w.write_all(&buf[..2])?; + } + + Ok(()) + } +} + +impl<W: Write> ImageEncoder for FarbfeldEncoder<W> { + fn write_image( + self, + buf: &[u8], + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<()> { + if color_type != ColorType::Rgba16 { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Farbfeld.into(), + UnsupportedErrorKind::Color(color_type.into()), + ), + )); + } + + self.encode(buf, width, height) + } +} + +#[cfg(test)] +mod tests { + use crate::codecs::farbfeld::FarbfeldDecoder; + use crate::ImageDecoderRect; + use byteorder::{ByteOrder, NativeEndian}; + use std::io::{Cursor, Seek, SeekFrom}; + + static RECTANGLE_IN: &[u8] = b"farbfeld\ + \x00\x00\x00\x02\x00\x00\x00\x03\ + \xFF\x01\xFE\x02\xFD\x03\xFC\x04\xFB\x05\xFA\x06\xF9\x07\xF8\x08\ + \xF7\x09\xF6\x0A\xF5\x0B\xF4\x0C\xF3\x0D\xF2\x0E\xF1\x0F\xF0\x10\ + \xEF\x11\xEE\x12\xED\x13\xEC\x14\xEB\x15\xEA\x16\xE9\x17\xE8\x18"; + + #[test] + fn read_rect_1x2() { + static RECTANGLE_OUT: &[u16] = &[ + 0xF30D, 0xF20E, 0xF10F, 0xF010, 0xEB15, 0xEA16, 0xE917, 0xE818, + ]; + + read_rect(1, 1, 1, 2, RECTANGLE_OUT); + } + + #[test] + fn read_rect_2x2() { + static RECTANGLE_OUT: &[u16] = &[ + 0xFF01, 0xFE02, 0xFD03, 0xFC04, 0xFB05, 0xFA06, 0xF907, 0xF808, 0xF709, 0xF60A, 0xF50B, + 0xF40C, 0xF30D, 0xF20E, 0xF10F, 0xF010, + ]; + + read_rect(0, 0, 2, 2, RECTANGLE_OUT); + } + + #[test] + fn read_rect_2x1() { + static RECTANGLE_OUT: &[u16] = &[ + 0xEF11, 0xEE12, 0xED13, 0xEC14, 0xEB15, 0xEA16, 0xE917, 0xE818, + ]; + + read_rect(0, 2, 2, 1, RECTANGLE_OUT); + } + + #[test] + fn read_rect_2x3() { + static RECTANGLE_OUT: &[u16] = &[ + 0xFF01, 0xFE02, 0xFD03, 0xFC04, 0xFB05, 0xFA06, 0xF907, 0xF808, 0xF709, 0xF60A, 0xF50B, + 0xF40C, 0xF30D, 0xF20E, 0xF10F, 0xF010, 0xEF11, 0xEE12, 0xED13, 0xEC14, 0xEB15, 0xEA16, + 0xE917, 0xE818, + ]; + + read_rect(0, 0, 2, 3, RECTANGLE_OUT); + } + + #[test] + fn read_rect_in_stream() { + static RECTANGLE_OUT: &[u16] = &[0xEF11, 0xEE12, 0xED13, 0xEC14]; + + let mut input = vec![]; + input.extend_from_slice(b"This is a 31-byte-long prologue"); + input.extend_from_slice(RECTANGLE_IN); + let mut input_cur = Cursor::new(input); + input_cur.seek(SeekFrom::Start(31)).unwrap(); + + let mut out_buf = [0u8; 64]; + FarbfeldDecoder::new(input_cur) + .unwrap() + .read_rect(0, 2, 1, 1, &mut out_buf) + .unwrap(); + let exp = degenerate_pixels(RECTANGLE_OUT); + assert_eq!(&out_buf[..exp.len()], &exp[..]); + } + + #[test] + fn dimension_overflow() { + let header = b"farbfeld\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"; + + assert!(FarbfeldDecoder::new(Cursor::new(header)).is_err()); + } + + fn read_rect(x: u32, y: u32, width: u32, height: u32, exp_wide: &[u16]) { + let mut out_buf = [0u8; 64]; + FarbfeldDecoder::new(Cursor::new(RECTANGLE_IN)) + .unwrap() + .read_rect(x, y, width, height, &mut out_buf) + .unwrap(); + let exp = degenerate_pixels(exp_wide); + assert_eq!(&out_buf[..exp.len()], &exp[..]); + } + + fn degenerate_pixels(exp_wide: &[u16]) -> Vec<u8> { + let mut exp = vec![0u8; exp_wide.len() * 2]; + NativeEndian::write_u16_into(exp_wide, &mut exp); + exp + } +} diff --git a/vendor/image/src/codecs/gif.rs b/vendor/image/src/codecs/gif.rs new file mode 100644 index 0000000..dcbd841 --- /dev/null +++ b/vendor/image/src/codecs/gif.rs @@ -0,0 +1,606 @@ +//! Decoding of GIF Images +//! +//! GIF (Graphics Interchange Format) is an image format that supports lossless compression. +//! +//! # Related Links +//! * <http://www.w3.org/Graphics/GIF/spec-gif89a.txt> - The GIF Specification +//! +//! # Examples +//! ```rust,no_run +//! use image::codecs::gif::{GifDecoder, GifEncoder}; +//! use image::{ImageDecoder, AnimationDecoder}; +//! use std::fs::File; +//! # fn main() -> std::io::Result<()> { +//! // Decode a gif into frames +//! let file_in = File::open("foo.gif")?; +//! let mut decoder = GifDecoder::new(file_in).unwrap(); +//! let frames = decoder.into_frames(); +//! let frames = frames.collect_frames().expect("error decoding gif"); +//! +//! // Encode frames into a gif and save to a file +//! let mut file_out = File::open("out.gif")?; +//! let mut encoder = GifEncoder::new(file_out); +//! encoder.encode_frames(frames.into_iter()); +//! # Ok(()) +//! # } +//! ``` +#![allow(clippy::while_let_loop)] + +use std::convert::TryFrom; +use std::convert::TryInto; +use std::io::{self, Cursor, Read, Write}; +use std::marker::PhantomData; +use std::mem; + +use gif::ColorOutput; +use gif::{DisposalMethod, Frame}; +use num_rational::Ratio; + +use crate::animation; +use crate::color::{ColorType, Rgba}; +use crate::error::{ + DecodingError, EncodingError, ImageError, ImageResult, ParameterError, ParameterErrorKind, + UnsupportedError, UnsupportedErrorKind, +}; +use crate::image::{self, AnimationDecoder, ImageDecoder, ImageFormat}; +use crate::io::Limits; +use crate::traits::Pixel; +use crate::ImageBuffer; + +/// GIF decoder +pub struct GifDecoder<R: Read> { + reader: gif::Decoder<R>, + limits: Limits, +} + +impl<R: Read> GifDecoder<R> { + /// Creates a new decoder that decodes the input steam `r` + pub fn new(r: R) -> ImageResult<GifDecoder<R>> { + let mut decoder = gif::DecodeOptions::new(); + decoder.set_color_output(ColorOutput::RGBA); + + Ok(GifDecoder { + reader: decoder.read_info(r).map_err(ImageError::from_decoding)?, + limits: Limits::default(), + }) + } + + /// Creates a new decoder that decodes the input steam `r`, using limits `limits` + pub fn with_limits(r: R, limits: Limits) -> ImageResult<GifDecoder<R>> { + let mut decoder = gif::DecodeOptions::new(); + decoder.set_color_output(ColorOutput::RGBA); + + Ok(GifDecoder { + reader: decoder.read_info(r).map_err(ImageError::from_decoding)?, + limits, + }) + } +} + +/// Wrapper struct around a `Cursor<Vec<u8>>` +pub struct GifReader<R>(Cursor<Vec<u8>>, PhantomData<R>); +impl<R> Read for GifReader<R> { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { + if self.0.position() == 0 && buf.is_empty() { + mem::swap(buf, self.0.get_mut()); + Ok(buf.len()) + } else { + self.0.read_to_end(buf) + } + } +} + +impl<'a, R: 'a + Read> ImageDecoder<'a> for GifDecoder<R> { + type Reader = GifReader<R>; + + fn dimensions(&self) -> (u32, u32) { + ( + u32::from(self.reader.width()), + u32::from(self.reader.height()), + ) + } + + fn color_type(&self) -> ColorType { + ColorType::Rgba8 + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + Ok(GifReader( + Cursor::new(image::decoder_to_vec(self)?), + PhantomData, + )) + } + + fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> { + assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); + + let frame = match self + .reader + .next_frame_info() + .map_err(ImageError::from_decoding)? + { + Some(frame) => FrameInfo::new_from_frame(frame), + None => { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::NoMoreData, + ))) + } + }; + + let (width, height) = self.dimensions(); + + if frame.left == 0 + && frame.width == width + && (frame.top as u64 + frame.height as u64 <= height as u64) + { + // If the frame matches the logical screen, or, as a more general case, + // fits into it and touches its left and right borders, then + // we can directly write it into the buffer without causing line wraparound. + let line_length = usize::try_from(width) + .unwrap() + .checked_mul(self.color_type().bytes_per_pixel() as usize) + .unwrap(); + + // isolate the portion of the buffer to read the frame data into. + // the chunks above and below it are going to be zeroed. + let (blank_top, rest) = + buf.split_at_mut(line_length.checked_mul(frame.top as usize).unwrap()); + let (buf, blank_bottom) = + rest.split_at_mut(line_length.checked_mul(frame.height as usize).unwrap()); + + debug_assert_eq!(buf.len(), self.reader.buffer_size()); + + // this is only necessary in case the buffer is not zeroed + for b in blank_top { + *b = 0; + } + // fill the middle section with the frame data + self.reader + .read_into_buffer(buf) + .map_err(ImageError::from_decoding)?; + // this is only necessary in case the buffer is not zeroed + for b in blank_bottom { + *b = 0; + } + } else { + // If the frame does not match the logical screen, read into an extra buffer + // and 'insert' the frame from left/top to logical screen width/height. + let buffer_size = self.reader.buffer_size(); + + self.limits.reserve_usize(buffer_size)?; + + let mut frame_buffer = vec![0; buffer_size]; + + self.limits.free_usize(buffer_size); + + self.reader + .read_into_buffer(&mut frame_buffer[..]) + .map_err(ImageError::from_decoding)?; + + let frame_buffer = ImageBuffer::from_raw(frame.width, frame.height, frame_buffer); + let image_buffer = ImageBuffer::from_raw(width, height, buf); + + // `buffer_size` uses wrapping arithmetic, thus might not report the + // correct storage requirement if the result does not fit in `usize`. + // `ImageBuffer::from_raw` detects overflow and reports by returning `None`. + if frame_buffer.is_none() || image_buffer.is_none() { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Gif.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Image dimensions ({}, {}) are too large", + frame.width, frame.height + )), + ), + )); + } + + let frame_buffer = frame_buffer.unwrap(); + let mut image_buffer = image_buffer.unwrap(); + + for (x, y, pixel) in image_buffer.enumerate_pixels_mut() { + let frame_x = x.wrapping_sub(frame.left); + let frame_y = y.wrapping_sub(frame.top); + + if frame_x < frame.width && frame_y < frame.height { + *pixel = *frame_buffer.get_pixel(frame_x, frame_y); + } else { + // this is only necessary in case the buffer is not zeroed + *pixel = Rgba([0, 0, 0, 0]); + } + } + } + + Ok(()) + } +} + +struct GifFrameIterator<R: Read> { + reader: gif::Decoder<R>, + + width: u32, + height: u32, + + non_disposed_frame: ImageBuffer<Rgba<u8>, Vec<u8>>, +} + +impl<R: Read> GifFrameIterator<R> { + fn new(decoder: GifDecoder<R>) -> GifFrameIterator<R> { + let (width, height) = decoder.dimensions(); + + // intentionally ignore the background color for web compatibility + + // create the first non disposed frame + let non_disposed_frame = ImageBuffer::from_pixel(width, height, Rgba([0, 0, 0, 0])); + + GifFrameIterator { + reader: decoder.reader, + width, + height, + non_disposed_frame, + } + } +} + +impl<R: Read> Iterator for GifFrameIterator<R> { + type Item = ImageResult<animation::Frame>; + + fn next(&mut self) -> Option<ImageResult<animation::Frame>> { + // begin looping over each frame + + let frame = match self.reader.next_frame_info() { + Ok(frame_info) => { + if let Some(frame) = frame_info { + FrameInfo::new_from_frame(frame) + } else { + // no more frames + return None; + } + } + Err(err) => return Some(Err(ImageError::from_decoding(err))), + }; + + let mut vec = vec![0; self.reader.buffer_size()]; + if let Err(err) = self.reader.read_into_buffer(&mut vec) { + return Some(Err(ImageError::from_decoding(err))); + } + + // create the image buffer from the raw frame. + // `buffer_size` uses wrapping arithmetic, thus might not report the + // correct storage requirement if the result does not fit in `usize`. + // on the other hand, `ImageBuffer::from_raw` detects overflow and + // reports by returning `None`. + let mut frame_buffer = match ImageBuffer::from_raw(frame.width, frame.height, vec) { + Some(frame_buffer) => frame_buffer, + None => { + return Some(Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Gif.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Image dimensions ({}, {}) are too large", + frame.width, frame.height + )), + ), + ))) + } + }; + + // blend the current frame with the non-disposed frame, then update + // the non-disposed frame according to the disposal method. + fn blend_and_dispose_pixel( + dispose: DisposalMethod, + previous: &mut Rgba<u8>, + current: &mut Rgba<u8>, + ) { + let pixel_alpha = current.channels()[3]; + if pixel_alpha == 0 { + *current = *previous; + } + + match dispose { + DisposalMethod::Any | DisposalMethod::Keep => { + // do not dispose + // (keep pixels from this frame) + // note: the `Any` disposal method is underspecified in the GIF + // spec, but most viewers treat it identically to `Keep` + *previous = *current; + } + DisposalMethod::Background => { + // restore to background color + // (background shows through transparent pixels in the next frame) + *previous = Rgba([0, 0, 0, 0]); + } + DisposalMethod::Previous => { + // restore to previous + // (dispose frames leaving the last none disposal frame) + } + } + } + + // if `frame_buffer`'s frame exactly matches the entire image, then + // use it directly, else create a new buffer to hold the composited + // image. + let image_buffer = if (frame.left, frame.top) == (0, 0) + && (self.width, self.height) == frame_buffer.dimensions() + { + for (x, y, pixel) in frame_buffer.enumerate_pixels_mut() { + let previous_pixel = self.non_disposed_frame.get_pixel_mut(x, y); + blend_and_dispose_pixel(frame.disposal_method, previous_pixel, pixel); + } + frame_buffer + } else { + ImageBuffer::from_fn(self.width, self.height, |x, y| { + let frame_x = x.wrapping_sub(frame.left); + let frame_y = y.wrapping_sub(frame.top); + let previous_pixel = self.non_disposed_frame.get_pixel_mut(x, y); + + if frame_x < frame_buffer.width() && frame_y < frame_buffer.height() { + let mut pixel = *frame_buffer.get_pixel(frame_x, frame_y); + blend_and_dispose_pixel(frame.disposal_method, previous_pixel, &mut pixel); + pixel + } else { + // out of bounds, return pixel from previous frame + *previous_pixel + } + }) + }; + + Some(Ok(animation::Frame::from_parts( + image_buffer, + 0, + 0, + frame.delay, + ))) + } +} + +impl<'a, R: Read + 'a> AnimationDecoder<'a> for GifDecoder<R> { + fn into_frames(self) -> animation::Frames<'a> { + animation::Frames::new(Box::new(GifFrameIterator::new(self))) + } +} + +struct FrameInfo { + left: u32, + top: u32, + width: u32, + height: u32, + disposal_method: DisposalMethod, + delay: animation::Delay, +} + +impl FrameInfo { + fn new_from_frame(frame: &Frame) -> FrameInfo { + FrameInfo { + left: u32::from(frame.left), + top: u32::from(frame.top), + width: u32::from(frame.width), + height: u32::from(frame.height), + disposal_method: frame.dispose, + // frame.delay is in units of 10ms so frame.delay*10 is in ms + delay: animation::Delay::from_ratio(Ratio::new(u32::from(frame.delay) * 10, 1)), + } + } +} + +/// Number of repetitions for a GIF animation +#[derive(Clone, Copy, Debug)] +pub enum Repeat { + /// Finite number of repetitions + Finite(u16), + /// Looping GIF + Infinite, +} + +impl Repeat { + pub(crate) fn to_gif_enum(&self) -> gif::Repeat { + match self { + Repeat::Finite(n) => gif::Repeat::Finite(*n), + Repeat::Infinite => gif::Repeat::Infinite, + } + } +} + +/// GIF encoder. +pub struct GifEncoder<W: Write> { + w: Option<W>, + gif_encoder: Option<gif::Encoder<W>>, + speed: i32, + repeat: Option<Repeat>, +} + +impl<W: Write> GifEncoder<W> { + /// Creates a new GIF encoder with a speed of 1. This prioritizes quality over performance at any cost. + pub fn new(w: W) -> GifEncoder<W> { + Self::new_with_speed(w, 1) + } + + /// Create a new GIF encoder, and has the speed parameter `speed`. See + /// [`Frame::from_rgba_speed`](https://docs.rs/gif/latest/gif/struct.Frame.html#method.from_rgba_speed) + /// for more information. + pub fn new_with_speed(w: W, speed: i32) -> GifEncoder<W> { + assert!( + (1..=30).contains(&speed), + "speed needs to be in the range [1, 30]" + ); + GifEncoder { + w: Some(w), + gif_encoder: None, + speed, + repeat: None, + } + } + + /// Set the repeat behaviour of the encoded GIF + pub fn set_repeat(&mut self, repeat: Repeat) -> ImageResult<()> { + if let Some(ref mut encoder) = self.gif_encoder { + encoder + .set_repeat(repeat.to_gif_enum()) + .map_err(ImageError::from_encoding)?; + } + self.repeat = Some(repeat); + Ok(()) + } + + /// Encode a single image. + pub fn encode( + &mut self, + data: &[u8], + width: u32, + height: u32, + color: ColorType, + ) -> ImageResult<()> { + let (width, height) = self.gif_dimensions(width, height)?; + match color { + ColorType::Rgb8 => self.encode_gif(Frame::from_rgb(width, height, data)), + ColorType::Rgba8 => { + self.encode_gif(Frame::from_rgba(width, height, &mut data.to_owned())) + } + _ => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Gif.into(), + UnsupportedErrorKind::Color(color.into()), + ), + )), + } + } + + /// Encode one frame of animation. + pub fn encode_frame(&mut self, img_frame: animation::Frame) -> ImageResult<()> { + let frame = self.convert_frame(img_frame)?; + self.encode_gif(frame) + } + + /// Encodes Frames. + /// Consider using `try_encode_frames` instead to encode an `animation::Frames` like iterator. + pub fn encode_frames<F>(&mut self, frames: F) -> ImageResult<()> + where + F: IntoIterator<Item = animation::Frame>, + { + for img_frame in frames { + self.encode_frame(img_frame)?; + } + Ok(()) + } + + /// Try to encode a collection of `ImageResult<animation::Frame>` objects. + /// Use this function to encode an `animation::Frames` like iterator. + /// Whenever an `Err` item is encountered, that value is returned without further actions. + pub fn try_encode_frames<F>(&mut self, frames: F) -> ImageResult<()> + where + F: IntoIterator<Item = ImageResult<animation::Frame>>, + { + for img_frame in frames { + self.encode_frame(img_frame?)?; + } + Ok(()) + } + + pub(crate) fn convert_frame( + &mut self, + img_frame: animation::Frame, + ) -> ImageResult<Frame<'static>> { + // get the delay before converting img_frame + let frame_delay = img_frame.delay().into_ratio().to_integer(); + // convert img_frame into RgbaImage + let mut rbga_frame = img_frame.into_buffer(); + let (width, height) = self.gif_dimensions(rbga_frame.width(), rbga_frame.height())?; + + // Create the gif::Frame from the animation::Frame + let mut frame = Frame::from_rgba_speed(width, height, &mut rbga_frame, self.speed); + // Saturate the conversion to u16::MAX instead of returning an error as that + // would require a new special cased variant in ParameterErrorKind which most + // likely couldn't be reused for other cases. This isn't a bad trade-off given + // that the current algorithm is already lossy. + frame.delay = (frame_delay / 10).try_into().unwrap_or(std::u16::MAX); + + Ok(frame) + } + + fn gif_dimensions(&self, width: u32, height: u32) -> ImageResult<(u16, u16)> { + fn inner_dimensions(width: u32, height: u32) -> Option<(u16, u16)> { + let width = u16::try_from(width).ok()?; + let height = u16::try_from(height).ok()?; + Some((width, height)) + } + + // TODO: this is not very idiomatic yet. Should return an EncodingError. + inner_dimensions(width, height).ok_or_else(|| { + ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + )) + }) + } + + pub(crate) fn encode_gif(&mut self, mut frame: Frame) -> ImageResult<()> { + let gif_encoder; + if let Some(ref mut encoder) = self.gif_encoder { + gif_encoder = encoder; + } else { + let writer = self.w.take().unwrap(); + let mut encoder = gif::Encoder::new(writer, frame.width, frame.height, &[]) + .map_err(ImageError::from_encoding)?; + if let Some(ref repeat) = self.repeat { + encoder + .set_repeat(repeat.to_gif_enum()) + .map_err(ImageError::from_encoding)?; + } + self.gif_encoder = Some(encoder); + gif_encoder = self.gif_encoder.as_mut().unwrap() + } + + frame.dispose = gif::DisposalMethod::Background; + + gif_encoder + .write_frame(&frame) + .map_err(ImageError::from_encoding) + } +} + +impl ImageError { + fn from_decoding(err: gif::DecodingError) -> ImageError { + use gif::DecodingError::*; + match err { + err @ Format(_) => { + ImageError::Decoding(DecodingError::new(ImageFormat::Gif.into(), err)) + } + Io(io_err) => ImageError::IoError(io_err), + } + } + + fn from_encoding(err: gif::EncodingError) -> ImageError { + use gif::EncodingError::*; + match err { + err @ Format(_) => { + ImageError::Encoding(EncodingError::new(ImageFormat::Gif.into(), err)) + } + Io(io_err) => ImageError::IoError(io_err), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn frames_exceeding_logical_screen_size() { + // This is a gif with 10x10 logical screen, but a 16x16 frame + 6px offset inside. + let data = vec![ + 0x47, 0x49, 0x46, 0x38, 0x39, 0x61, 0x0A, 0x00, 0x0A, 0x00, 0xF0, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x0E, 0xFF, 0x1F, 0x21, 0xF9, 0x04, 0x09, 0x64, 0x00, 0x00, 0x00, 0x2C, + 0x06, 0x00, 0x06, 0x00, 0x10, 0x00, 0x10, 0x00, 0x00, 0x02, 0x23, 0x84, 0x8F, 0xA9, + 0xBB, 0xE1, 0xE8, 0x42, 0x8A, 0x0F, 0x50, 0x79, 0xAE, 0xD1, 0xF9, 0x7A, 0xE8, 0x71, + 0x5B, 0x48, 0x81, 0x64, 0xD5, 0x91, 0xCA, 0x89, 0x4D, 0x21, 0x63, 0x89, 0x4C, 0x09, + 0x77, 0xF5, 0x6D, 0x14, 0x00, 0x3B, + ]; + + let decoder = GifDecoder::new(Cursor::new(data)).unwrap(); + let mut buf = vec![0u8; decoder.total_bytes() as usize]; + + assert!(decoder.read_image(&mut buf).is_ok()); + } +} diff --git a/vendor/image/src/codecs/hdr/decoder.rs b/vendor/image/src/codecs/hdr/decoder.rs new file mode 100644 index 0000000..8329d57 --- /dev/null +++ b/vendor/image/src/codecs/hdr/decoder.rs @@ -0,0 +1,1033 @@ +use crate::Primitive; +use num_traits::identities::Zero; +#[cfg(test)] +use std::borrow::Cow; +use std::convert::TryFrom; +use std::io::{self, BufRead, Cursor, Read, Seek}; +use std::iter::Iterator; +use std::marker::PhantomData; +use std::num::{ParseFloatError, ParseIntError}; +use std::path::Path; +use std::{error, fmt, mem}; + +use crate::color::{ColorType, Rgb}; +use crate::error::{ + DecodingError, ImageError, ImageFormatHint, ImageResult, ParameterError, ParameterErrorKind, + UnsupportedError, UnsupportedErrorKind, +}; +use crate::image::{self, ImageDecoder, ImageDecoderRect, ImageFormat, Progress}; + +/// Errors that can occur during decoding and parsing of a HDR image +#[derive(Debug, Clone, PartialEq, Eq)] +enum DecoderError { + /// HDR's "#?RADIANCE" signature wrong or missing + RadianceHdrSignatureInvalid, + /// EOF before end of header + TruncatedHeader, + /// EOF instead of image dimensions + TruncatedDimensions, + + /// A value couldn't be parsed + UnparsableF32(LineType, ParseFloatError), + /// A value couldn't be parsed + UnparsableU32(LineType, ParseIntError), + /// Not enough numbers in line + LineTooShort(LineType), + + /// COLORCORR contains too many numbers in strict mode + ExtraneousColorcorrNumbers, + + /// Dimensions line had too few elements + DimensionsLineTooShort(usize, usize), + /// Dimensions line had too many elements + DimensionsLineTooLong(usize), + + /// The length of a scanline (1) wasn't a match for the specified length (2) + WrongScanlineLength(usize, usize), + /// First pixel of a scanline is a run length marker + FirstPixelRlMarker, +} + +impl fmt::Display for DecoderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DecoderError::RadianceHdrSignatureInvalid => { + f.write_str("Radiance HDR signature not found") + } + DecoderError::TruncatedHeader => f.write_str("EOF in header"), + DecoderError::TruncatedDimensions => f.write_str("EOF in dimensions line"), + DecoderError::UnparsableF32(line, pe) => { + f.write_fmt(format_args!("Cannot parse {} value as f32: {}", line, pe)) + } + DecoderError::UnparsableU32(line, pe) => { + f.write_fmt(format_args!("Cannot parse {} value as u32: {}", line, pe)) + } + DecoderError::LineTooShort(line) => { + f.write_fmt(format_args!("Not enough numbers in {}", line)) + } + DecoderError::ExtraneousColorcorrNumbers => f.write_str("Extra numbers in COLORCORR"), + DecoderError::DimensionsLineTooShort(elements, expected) => f.write_fmt(format_args!( + "Dimensions line too short: have {} elements, expected {}", + elements, expected + )), + DecoderError::DimensionsLineTooLong(expected) => f.write_fmt(format_args!( + "Dimensions line too long, expected {} elements", + expected + )), + DecoderError::WrongScanlineLength(len, expected) => f.write_fmt(format_args!( + "Wrong length of decoded scanline: got {}, expected {}", + len, expected + )), + DecoderError::FirstPixelRlMarker => { + f.write_str("First pixel of a scanline shouldn't be run length marker") + } + } + } +} + +impl From<DecoderError> for ImageError { + fn from(e: DecoderError) -> ImageError { + ImageError::Decoding(DecodingError::new(ImageFormat::Hdr.into(), e)) + } +} + +impl error::Error for DecoderError { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + match self { + DecoderError::UnparsableF32(_, err) => Some(err), + DecoderError::UnparsableU32(_, err) => Some(err), + _ => None, + } + } +} + +/// Lines which contain parsable data that can fail +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +enum LineType { + Exposure, + Pixaspect, + Colorcorr, + DimensionsHeight, + DimensionsWidth, +} + +impl fmt::Display for LineType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(match self { + LineType::Exposure => "EXPOSURE", + LineType::Pixaspect => "PIXASPECT", + LineType::Colorcorr => "COLORCORR", + LineType::DimensionsHeight => "height dimension", + LineType::DimensionsWidth => "width dimension", + }) + } +} + +/// Adapter to conform to `ImageDecoder` trait +#[derive(Debug)] +pub struct HdrAdapter<R: Read> { + inner: Option<HdrDecoder<R>>, + // data: Option<Vec<u8>>, + meta: HdrMetadata, +} + +impl<R: BufRead> HdrAdapter<R> { + /// Creates adapter + pub fn new(r: R) -> ImageResult<HdrAdapter<R>> { + let decoder = HdrDecoder::new(r)?; + let meta = decoder.metadata(); + Ok(HdrAdapter { + inner: Some(decoder), + meta, + }) + } + + /// Allows reading old Radiance HDR images + pub fn new_nonstrict(r: R) -> ImageResult<HdrAdapter<R>> { + let decoder = HdrDecoder::with_strictness(r, false)?; + let meta = decoder.metadata(); + Ok(HdrAdapter { + inner: Some(decoder), + meta, + }) + } + + /// Read the actual data of the image, and store it in Self::data. + fn read_image_data(&mut self, buf: &mut [u8]) -> ImageResult<()> { + assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); + match self.inner.take() { + Some(decoder) => { + let img: Vec<Rgb<u8>> = decoder.read_image_ldr()?; + for (i, Rgb(data)) in img.into_iter().enumerate() { + buf[(i * 3)..][..3].copy_from_slice(&data); + } + + Ok(()) + } + None => Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::NoMoreData, + ))), + } + } +} + +/// Wrapper struct around a `Cursor<Vec<u8>>` +pub struct HdrReader<R>(Cursor<Vec<u8>>, PhantomData<R>); +impl<R> Read for HdrReader<R> { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { + if self.0.position() == 0 && buf.is_empty() { + mem::swap(buf, self.0.get_mut()); + Ok(buf.len()) + } else { + self.0.read_to_end(buf) + } + } +} + +impl<'a, R: 'a + BufRead> ImageDecoder<'a> for HdrAdapter<R> { + type Reader = HdrReader<R>; + + fn dimensions(&self) -> (u32, u32) { + (self.meta.width, self.meta.height) + } + + fn color_type(&self) -> ColorType { + ColorType::Rgb8 + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + Ok(HdrReader( + Cursor::new(image::decoder_to_vec(self)?), + PhantomData, + )) + } + + fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> { + self.read_image_data(buf) + } +} + +impl<'a, R: 'a + BufRead + Seek> ImageDecoderRect<'a> for HdrAdapter<R> { + fn read_rect_with_progress<F: Fn(Progress)>( + &mut self, + x: u32, + y: u32, + width: u32, + height: u32, + buf: &mut [u8], + progress_callback: F, + ) -> ImageResult<()> { + image::load_rect( + x, + y, + width, + height, + buf, + progress_callback, + self, + |_, _| unreachable!(), + |s, buf| s.read_image_data(buf), + ) + } +} + +/// Radiance HDR file signature +pub const SIGNATURE: &[u8] = b"#?RADIANCE"; +const SIGNATURE_LENGTH: usize = 10; + +/// An Radiance HDR decoder +#[derive(Debug)] +pub struct HdrDecoder<R> { + r: R, + width: u32, + height: u32, + meta: HdrMetadata, +} + +/// Refer to [wikipedia](https://en.wikipedia.org/wiki/RGBE_image_format) +#[repr(C)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub struct Rgbe8Pixel { + /// Color components + pub c: [u8; 3], + /// Exponent + pub e: u8, +} + +/// Creates `Rgbe8Pixel` from components +pub fn rgbe8(r: u8, g: u8, b: u8, e: u8) -> Rgbe8Pixel { + Rgbe8Pixel { c: [r, g, b], e } +} + +impl Rgbe8Pixel { + /// Converts `Rgbe8Pixel` into `Rgb<f32>` linearly + #[inline] + pub fn to_hdr(self) -> Rgb<f32> { + if self.e == 0 { + Rgb([0.0, 0.0, 0.0]) + } else { + // let exp = f32::ldexp(1., self.e as isize - (128 + 8)); // unstable + let exp = f32::exp2(<f32 as From<_>>::from(self.e) - (128.0 + 8.0)); + Rgb([ + exp * <f32 as From<_>>::from(self.c[0]), + exp * <f32 as From<_>>::from(self.c[1]), + exp * <f32 as From<_>>::from(self.c[2]), + ]) + } + } + + /// Converts `Rgbe8Pixel` into `Rgb<T>` with scale=1 and gamma=2.2 + /// + /// color_ldr = (color_hdr*scale)<sup>gamma</sup> + /// + /// # Panic + /// + /// Panics when `T::max_value()` cannot be represented as f32. + #[inline] + pub fn to_ldr<T: Primitive + Zero>(self) -> Rgb<T> { + self.to_ldr_scale_gamma(1.0, 2.2) + } + + /// Converts `Rgbe8Pixel` into `Rgb<T>` using provided scale and gamma + /// + /// color_ldr = (color_hdr*scale)<sup>gamma</sup> + /// + /// # Panic + /// + /// Panics when `T::max_value()` cannot be represented as f32. + /// Panics when scale or gamma is NaN + #[inline] + pub fn to_ldr_scale_gamma<T: Primitive + Zero>(self, scale: f32, gamma: f32) -> Rgb<T> { + let Rgb(data) = self.to_hdr(); + let (r, g, b) = (data[0], data[1], data[2]); + #[inline] + fn sg<T: Primitive + Zero>(v: f32, scale: f32, gamma: f32) -> T { + let t_max = T::max_value(); + // Disassembly shows that t_max_f32 is compiled into constant + let t_max_f32: f32 = num_traits::NumCast::from(t_max) + .expect("to_ldr_scale_gamma: maximum value of type is not representable as f32"); + let fv = f32::powf(v * scale, gamma) * t_max_f32 + 0.5; + if fv < 0.0 { + T::zero() + } else if fv > t_max_f32 { + t_max + } else { + num_traits::NumCast::from(fv) + .expect("to_ldr_scale_gamma: cannot convert f32 to target type. NaN?") + } + } + Rgb([ + sg(r, scale, gamma), + sg(g, scale, gamma), + sg(b, scale, gamma), + ]) + } +} + +impl<R: BufRead> HdrDecoder<R> { + /// Reads Radiance HDR image header from stream `r` + /// if the header is valid, creates HdrDecoder + /// strict mode is enabled + pub fn new(reader: R) -> ImageResult<HdrDecoder<R>> { + HdrDecoder::with_strictness(reader, true) + } + + /// Reads Radiance HDR image header from stream `reader`, + /// if the header is valid, creates `HdrDecoder`. + /// + /// strict enables strict mode + /// + /// Warning! Reading wrong file in non-strict mode + /// could consume file size worth of memory in the process. + pub fn with_strictness(mut reader: R, strict: bool) -> ImageResult<HdrDecoder<R>> { + let mut attributes = HdrMetadata::new(); + + { + // scope to make borrowck happy + let r = &mut reader; + if strict { + let mut signature = [0; SIGNATURE_LENGTH]; + r.read_exact(&mut signature)?; + if signature != SIGNATURE { + return Err(DecoderError::RadianceHdrSignatureInvalid.into()); + } // no else + // skip signature line ending + read_line_u8(r)?; + } else { + // Old Radiance HDR files (*.pic) don't use signature + // Let them be parsed in non-strict mode + } + // read header data until empty line + loop { + match read_line_u8(r)? { + None => { + // EOF before end of header + return Err(DecoderError::TruncatedHeader.into()); + } + Some(line) => { + if line.is_empty() { + // end of header + break; + } else if line[0] == b'#' { + // line[0] will not panic, line.len() == 0 is false here + // skip comments + continue; + } // no else + // process attribute line + let line = String::from_utf8_lossy(&line[..]); + attributes.update_header_info(&line, strict)?; + } // <= Some(line) + } // match read_line_u8() + } // loop + } // scope to end borrow of reader + // parse dimensions + let (width, height) = match read_line_u8(&mut reader)? { + None => { + // EOF instead of image dimensions + return Err(DecoderError::TruncatedDimensions.into()); + } + Some(dimensions) => { + let dimensions = String::from_utf8_lossy(&dimensions[..]); + parse_dimensions_line(&dimensions, strict)? + } + }; + + // color type is always rgb8 + if crate::utils::check_dimension_overflow(width, height, ColorType::Rgb8.bytes_per_pixel()) + { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Hdr.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Image dimensions ({}x{}) are too large", + width, height + )), + ), + )); + } + + Ok(HdrDecoder { + r: reader, + + width, + height, + meta: HdrMetadata { + width, + height, + ..attributes + }, + }) + } // end with_strictness + + /// Returns file metadata. Refer to `HdrMetadata` for details. + pub fn metadata(&self) -> HdrMetadata { + self.meta.clone() + } + + /// Consumes decoder and returns a vector of RGBE8 pixels + pub fn read_image_native(mut self) -> ImageResult<Vec<Rgbe8Pixel>> { + // Don't read anything if image is empty + if self.width == 0 || self.height == 0 { + return Ok(vec![]); + } + // expression self.width > 0 && self.height > 0 is true from now to the end of this method + let pixel_count = self.width as usize * self.height as usize; + let mut ret = vec![Default::default(); pixel_count]; + for chunk in ret.chunks_mut(self.width as usize) { + read_scanline(&mut self.r, chunk)?; + } + Ok(ret) + } + + /// Consumes decoder and returns a vector of transformed pixels + pub fn read_image_transform<T: Send, F: Send + Sync + Fn(Rgbe8Pixel) -> T>( + mut self, + f: F, + output_slice: &mut [T], + ) -> ImageResult<()> { + assert_eq!( + output_slice.len(), + self.width as usize * self.height as usize + ); + + // Don't read anything if image is empty + if self.width == 0 || self.height == 0 { + return Ok(()); + } + + let chunks_iter = output_slice.chunks_mut(self.width as usize); + + let mut buf = vec![Default::default(); self.width as usize]; + for chunk in chunks_iter { + // read_scanline overwrites the entire buffer or returns an Err, + // so not resetting the buffer here is ok. + read_scanline(&mut self.r, &mut buf[..])?; + for (dst, &pix) in chunk.iter_mut().zip(buf.iter()) { + *dst = f(pix); + } + } + Ok(()) + } + + /// Consumes decoder and returns a vector of `Rgb<u8>` pixels. + /// scale = 1, gamma = 2.2 + pub fn read_image_ldr(self) -> ImageResult<Vec<Rgb<u8>>> { + let mut ret = vec![Rgb([0, 0, 0]); self.width as usize * self.height as usize]; + self.read_image_transform(|pix| pix.to_ldr(), &mut ret[..])?; + Ok(ret) + } + + /// Consumes decoder and returns a vector of `Rgb<f32>` pixels. + /// + pub fn read_image_hdr(self) -> ImageResult<Vec<Rgb<f32>>> { + let mut ret = vec![Rgb([0.0, 0.0, 0.0]); self.width as usize * self.height as usize]; + self.read_image_transform(|pix| pix.to_hdr(), &mut ret[..])?; + Ok(ret) + } +} + +impl<R: Read> IntoIterator for HdrDecoder<R> { + type Item = ImageResult<Rgbe8Pixel>; + type IntoIter = HdrImageDecoderIterator<R>; + + fn into_iter(self) -> Self::IntoIter { + HdrImageDecoderIterator { + r: self.r, + scanline_cnt: self.height as usize, + buf: vec![Default::default(); self.width as usize], + col: 0, + scanline: 0, + trouble: true, // make first call to `next()` read scanline + error_encountered: false, + } + } +} + +/// Scanline buffered pixel by pixel iterator +pub struct HdrImageDecoderIterator<R: Read> { + r: R, + scanline_cnt: usize, + buf: Vec<Rgbe8Pixel>, // scanline buffer + col: usize, // current position in scanline + scanline: usize, // current scanline + trouble: bool, // optimization, true indicates that we need to check something + error_encountered: bool, +} + +impl<R: Read> HdrImageDecoderIterator<R> { + // Advances counter to the next pixel + #[inline] + fn advance(&mut self) { + self.col += 1; + if self.col == self.buf.len() { + self.col = 0; + self.scanline += 1; + self.trouble = true; + } + } +} + +impl<R: Read> Iterator for HdrImageDecoderIterator<R> { + type Item = ImageResult<Rgbe8Pixel>; + + fn next(&mut self) -> Option<Self::Item> { + if !self.trouble { + let ret = self.buf[self.col]; + self.advance(); + Some(Ok(ret)) + } else { + // some condition is pending + if self.buf.is_empty() || self.scanline == self.scanline_cnt { + // No more pixels + return None; + } // no else + if self.error_encountered { + self.advance(); + // Error was encountered. Keep producing errors. + // ImageError can't implement Clone, so just dump some error + return Some(Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::FailedAlready, + )))); + } // no else + if self.col == 0 { + // fill scanline buffer + match read_scanline(&mut self.r, &mut self.buf[..]) { + Ok(_) => { + // no action required + } + Err(err) => { + self.advance(); + self.error_encountered = true; + self.trouble = true; + return Some(Err(err)); + } + } + } // no else + self.trouble = false; + let ret = self.buf[0]; + self.advance(); + Some(Ok(ret)) + } + } + + fn size_hint(&self) -> (usize, Option<usize>) { + let total_cnt = self.buf.len() * self.scanline_cnt; + let cur_cnt = self.buf.len() * self.scanline + self.col; + let remaining = total_cnt - cur_cnt; + (remaining, Some(remaining)) + } +} + +impl<R: Read> ExactSizeIterator for HdrImageDecoderIterator<R> {} + +// Precondition: buf.len() > 0 +fn read_scanline<R: Read>(r: &mut R, buf: &mut [Rgbe8Pixel]) -> ImageResult<()> { + assert!(!buf.is_empty()); + let width = buf.len(); + // first 4 bytes in scanline allow to determine compression method + let fb = read_rgbe(r)?; + if fb.c[0] == 2 && fb.c[1] == 2 && fb.c[2] < 128 { + // denormalized pixel value (2,2,<128,_) indicates new per component RLE method + // decode_component guarantees that offset is within 0 .. width + // therefore we can skip bounds checking here, but we will not + decode_component(r, width, |offset, value| buf[offset].c[0] = value)?; + decode_component(r, width, |offset, value| buf[offset].c[1] = value)?; + decode_component(r, width, |offset, value| buf[offset].c[2] = value)?; + decode_component(r, width, |offset, value| buf[offset].e = value)?; + } else { + // old RLE method (it was considered old around 1991, should it be here?) + decode_old_rle(r, fb, buf)?; + } + Ok(()) +} + +#[inline(always)] +fn read_byte<R: Read>(r: &mut R) -> io::Result<u8> { + let mut buf = [0u8]; + r.read_exact(&mut buf[..])?; + Ok(buf[0]) +} + +// Guarantees that first parameter of set_component will be within pos .. pos+width +#[inline] +fn decode_component<R: Read, S: FnMut(usize, u8)>( + r: &mut R, + width: usize, + mut set_component: S, +) -> ImageResult<()> { + let mut buf = [0; 128]; + let mut pos = 0; + while pos < width { + // increment position by a number of decompressed values + pos += { + let rl = read_byte(r)?; + if rl <= 128 { + // sanity check + if pos + rl as usize > width { + return Err(DecoderError::WrongScanlineLength(pos + rl as usize, width).into()); + } + // read values + r.read_exact(&mut buf[0..rl as usize])?; + for (offset, &value) in buf[0..rl as usize].iter().enumerate() { + set_component(pos + offset, value); + } + rl as usize + } else { + // run + let rl = rl - 128; + // sanity check + if pos + rl as usize > width { + return Err(DecoderError::WrongScanlineLength(pos + rl as usize, width).into()); + } + // fill with same value + let value = read_byte(r)?; + for offset in 0..rl as usize { + set_component(pos + offset, value); + } + rl as usize + } + }; + } + if pos != width { + return Err(DecoderError::WrongScanlineLength(pos, width).into()); + } + Ok(()) +} + +// Decodes scanline, places it into buf +// Precondition: buf.len() > 0 +// fb - first 4 bytes of scanline +fn decode_old_rle<R: Read>(r: &mut R, fb: Rgbe8Pixel, buf: &mut [Rgbe8Pixel]) -> ImageResult<()> { + assert!(!buf.is_empty()); + let width = buf.len(); + // convenience function. + // returns run length if pixel is a run length marker + #[inline] + fn rl_marker(pix: Rgbe8Pixel) -> Option<usize> { + if pix.c == [1, 1, 1] { + Some(pix.e as usize) + } else { + None + } + } + // first pixel in scanline should not be run length marker + // it is error if it is + if rl_marker(fb).is_some() { + return Err(DecoderError::FirstPixelRlMarker.into()); + } + buf[0] = fb; // set first pixel of scanline + + let mut x_off = 1; // current offset from beginning of a scanline + let mut rl_mult = 1; // current run length multiplier + let mut prev_pixel = fb; + while x_off < width { + let pix = read_rgbe(r)?; + // it's harder to forget to increase x_off if I write this this way. + x_off += { + if let Some(rl) = rl_marker(pix) { + // rl_mult takes care of consecutive RL markers + let rl = rl * rl_mult; + rl_mult *= 256; + if x_off + rl <= width { + // do run + for b in &mut buf[x_off..x_off + rl] { + *b = prev_pixel; + } + } else { + return Err(DecoderError::WrongScanlineLength(x_off + rl, width).into()); + }; + rl // value to increase x_off by + } else { + rl_mult = 1; // chain of consecutive RL markers is broken + prev_pixel = pix; + buf[x_off] = pix; + 1 // value to increase x_off by + } + }; + } + if x_off != width { + return Err(DecoderError::WrongScanlineLength(x_off, width).into()); + } + Ok(()) +} + +fn read_rgbe<R: Read>(r: &mut R) -> io::Result<Rgbe8Pixel> { + let mut buf = [0u8; 4]; + r.read_exact(&mut buf[..])?; + Ok(Rgbe8Pixel { + c: [buf[0], buf[1], buf[2]], + e: buf[3], + }) +} + +/// Metadata for Radiance HDR image +#[derive(Debug, Clone)] +pub struct HdrMetadata { + /// Width of decoded image. It could be either scanline length, + /// or scanline count, depending on image orientation. + pub width: u32, + /// Height of decoded image. It depends on orientation too. + pub height: u32, + /// Orientation matrix. For standard orientation it is ((1,0),(0,1)) - left to right, top to bottom. + /// First pair tells how resulting pixel coordinates change along a scanline. + /// Second pair tells how they change from one scanline to the next. + pub orientation: ((i8, i8), (i8, i8)), + /// Divide color values by exposure to get to get physical radiance in + /// watts/steradian/m<sup>2</sup> + /// + /// Image may not contain physical data, even if this field is set. + pub exposure: Option<f32>, + /// Divide color values by corresponding tuple member (r, g, b) to get to get physical radiance + /// in watts/steradian/m<sup>2</sup> + /// + /// Image may not contain physical data, even if this field is set. + pub color_correction: Option<(f32, f32, f32)>, + /// Pixel height divided by pixel width + pub pixel_aspect_ratio: Option<f32>, + /// All lines contained in image header are put here. Ordering of lines is preserved. + /// Lines in the form "key=value" are represented as ("key", "value"). + /// All other lines are ("", "line") + pub custom_attributes: Vec<(String, String)>, +} + +impl HdrMetadata { + fn new() -> HdrMetadata { + HdrMetadata { + width: 0, + height: 0, + orientation: ((1, 0), (0, 1)), + exposure: None, + color_correction: None, + pixel_aspect_ratio: None, + custom_attributes: vec![], + } + } + + // Updates header info, in strict mode returns error for malformed lines (no '=' separator) + // unknown attributes are skipped + fn update_header_info(&mut self, line: &str, strict: bool) -> ImageResult<()> { + // split line at first '=' + // old Radiance HDR files (*.pic) feature tabs in key, so vvv trim + let maybe_key_value = split_at_first(line, "=").map(|(key, value)| (key.trim(), value)); + // save all header lines in custom_attributes + match maybe_key_value { + Some((key, val)) => self + .custom_attributes + .push((key.to_owned(), val.to_owned())), + None => self.custom_attributes.push(("".into(), line.to_owned())), + } + // parse known attributes + match maybe_key_value { + Some(("FORMAT", val)) => { + if val.trim() != "32-bit_rle_rgbe" { + // XYZE isn't supported yet + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Hdr.into(), + UnsupportedErrorKind::Format(ImageFormatHint::Name(limit_string_len( + val, 20, + ))), + ), + )); + } + } + Some(("EXPOSURE", val)) => { + match val.trim().parse::<f32>() { + Ok(v) => { + self.exposure = Some(self.exposure.unwrap_or(1.0) * v); // all encountered exposure values should be multiplied + } + Err(parse_error) => { + if strict { + return Err(DecoderError::UnparsableF32( + LineType::Exposure, + parse_error, + ) + .into()); + } // no else, skip this line in non-strict mode + } + }; + } + Some(("PIXASPECT", val)) => { + match val.trim().parse::<f32>() { + Ok(v) => { + self.pixel_aspect_ratio = Some(self.pixel_aspect_ratio.unwrap_or(1.0) * v); + // all encountered exposure values should be multiplied + } + Err(parse_error) => { + if strict { + return Err(DecoderError::UnparsableF32( + LineType::Pixaspect, + parse_error, + ) + .into()); + } // no else, skip this line in non-strict mode + } + }; + } + Some(("COLORCORR", val)) => { + let mut rgbcorr = [1.0, 1.0, 1.0]; + match parse_space_separated_f32(val, &mut rgbcorr, LineType::Colorcorr) { + Ok(extra_numbers) => { + if strict && extra_numbers { + return Err(DecoderError::ExtraneousColorcorrNumbers.into()); + } // no else, just ignore extra numbers + let (rc, gc, bc) = self.color_correction.unwrap_or((1.0, 1.0, 1.0)); + self.color_correction = + Some((rc * rgbcorr[0], gc * rgbcorr[1], bc * rgbcorr[2])); + } + Err(err) => { + if strict { + return Err(err); + } // no else, skip malformed line in non-strict mode + } + } + } + None => { + // old Radiance HDR files (*.pic) contain commands in a header + // just skip them + } + _ => { + // skip unknown attribute + } + } // match attributes + Ok(()) + } +} + +fn parse_space_separated_f32(line: &str, vals: &mut [f32], line_tp: LineType) -> ImageResult<bool> { + let mut nums = line.split_whitespace(); + for val in vals.iter_mut() { + if let Some(num) = nums.next() { + match num.parse::<f32>() { + Ok(v) => *val = v, + Err(err) => return Err(DecoderError::UnparsableF32(line_tp, err).into()), + } + } else { + // not enough numbers in line + return Err(DecoderError::LineTooShort(line_tp).into()); + } + } + Ok(nums.next().is_some()) +} + +// Parses dimension line "-Y height +X width" +// returns (width, height) or error +fn parse_dimensions_line(line: &str, strict: bool) -> ImageResult<(u32, u32)> { + const DIMENSIONS_COUNT: usize = 4; + + let mut dim_parts = line.split_whitespace(); + let c1_tag = dim_parts + .next() + .ok_or(DecoderError::DimensionsLineTooShort(0, DIMENSIONS_COUNT))?; + let c1_str = dim_parts + .next() + .ok_or(DecoderError::DimensionsLineTooShort(1, DIMENSIONS_COUNT))?; + let c2_tag = dim_parts + .next() + .ok_or(DecoderError::DimensionsLineTooShort(2, DIMENSIONS_COUNT))?; + let c2_str = dim_parts + .next() + .ok_or(DecoderError::DimensionsLineTooShort(3, DIMENSIONS_COUNT))?; + if strict && dim_parts.next().is_some() { + // extra data in dimensions line + return Err(DecoderError::DimensionsLineTooLong(DIMENSIONS_COUNT).into()); + } // no else + // dimensions line is in the form "-Y 10 +X 20" + // There are 8 possible orientations: +Y +X, +X -Y and so on + match (c1_tag, c2_tag) { + ("-Y", "+X") => { + // Common orientation (left-right, top-down) + // c1_str is height, c2_str is width + let height = c1_str + .parse::<u32>() + .map_err(|pe| DecoderError::UnparsableU32(LineType::DimensionsHeight, pe))?; + let width = c2_str + .parse::<u32>() + .map_err(|pe| DecoderError::UnparsableU32(LineType::DimensionsWidth, pe))?; + Ok((width, height)) + } + _ => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Hdr.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Orientation {} {}", + limit_string_len(c1_tag, 4), + limit_string_len(c2_tag, 4) + )), + ), + )), + } // final expression. Returns value +} + +// Returns string with no more than len+3 characters +fn limit_string_len(s: &str, len: usize) -> String { + let s_char_len = s.chars().count(); + if s_char_len > len { + s.chars().take(len).chain("...".chars()).collect() + } else { + s.into() + } +} + +// Splits string into (before separator, after separator) tuple +// or None if separator isn't found +fn split_at_first<'a>(s: &'a str, separator: &str) -> Option<(&'a str, &'a str)> { + match s.find(separator) { + None | Some(0) => None, + Some(p) if p >= s.len() - separator.len() => None, + Some(p) => Some((&s[..p], &s[(p + separator.len())..])), + } +} + +#[test] +fn split_at_first_test() { + assert_eq!(split_at_first(&Cow::Owned("".into()), "="), None); + assert_eq!(split_at_first(&Cow::Owned("=".into()), "="), None); + assert_eq!(split_at_first(&Cow::Owned("= ".into()), "="), None); + assert_eq!( + split_at_first(&Cow::Owned(" = ".into()), "="), + Some((" ", " ")) + ); + assert_eq!( + split_at_first(&Cow::Owned("EXPOSURE= ".into()), "="), + Some(("EXPOSURE", " ")) + ); + assert_eq!( + split_at_first(&Cow::Owned("EXPOSURE= =".into()), "="), + Some(("EXPOSURE", " =")) + ); + assert_eq!( + split_at_first(&Cow::Owned("EXPOSURE== =".into()), "=="), + Some(("EXPOSURE", " =")) + ); + assert_eq!(split_at_first(&Cow::Owned("EXPOSURE".into()), ""), None); +} + +// Reads input until b"\n" or EOF +// Returns vector of read bytes NOT including end of line characters +// or return None to indicate end of file +fn read_line_u8<R: BufRead>(r: &mut R) -> ::std::io::Result<Option<Vec<u8>>> { + let mut ret = Vec::with_capacity(16); + match r.read_until(b'\n', &mut ret) { + Ok(0) => Ok(None), + Ok(_) => { + if let Some(&b'\n') = ret[..].last() { + let _ = ret.pop(); + } + Ok(Some(ret)) + } + Err(err) => Err(err), + } +} + +#[test] +fn read_line_u8_test() { + let buf: Vec<_> = (&b"One\nTwo\nThree\nFour\n\n\n"[..]).into(); + let input = &mut ::std::io::Cursor::new(buf); + assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"One"[..]); + assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"Two"[..]); + assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"Three"[..]); + assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"Four"[..]); + assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b""[..]); + assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b""[..]); + assert_eq!(read_line_u8(input).unwrap(), None); +} + +/// Helper function for reading raw 3-channel f32 images +pub fn read_raw_file<P: AsRef<Path>>(path: P) -> ::std::io::Result<Vec<Rgb<f32>>> { + use byteorder::{LittleEndian as LE, ReadBytesExt}; + use std::fs::File; + use std::io::BufReader; + + let mut r = BufReader::new(File::open(path)?); + let w = r.read_u32::<LE>()? as usize; + let h = r.read_u32::<LE>()? as usize; + let c = r.read_u32::<LE>()? as usize; + assert_eq!(c, 3); + let cnt = w * h; + let mut ret = Vec::with_capacity(cnt); + for _ in 0..cnt { + let cr = r.read_f32::<LE>()?; + let cg = r.read_f32::<LE>()?; + let cb = r.read_f32::<LE>()?; + ret.push(Rgb([cr, cg, cb])); + } + Ok(ret) +} + +#[cfg(test)] +mod test { + use super::*; + use std::io::Cursor; + + #[test] + fn dimension_overflow() { + let data = b"#?RADIANCE\nFORMAT=32-bit_rle_rgbe\n\n -Y 4294967295 +X 4294967295"; + + assert!(HdrAdapter::new(Cursor::new(data)).is_err()); + assert!(HdrAdapter::new_nonstrict(Cursor::new(data)).is_err()); + } +} diff --git a/vendor/image/src/codecs/hdr/encoder.rs b/vendor/image/src/codecs/hdr/encoder.rs new file mode 100644 index 0000000..c3a176d --- /dev/null +++ b/vendor/image/src/codecs/hdr/encoder.rs @@ -0,0 +1,433 @@ +use crate::codecs::hdr::{rgbe8, Rgbe8Pixel, SIGNATURE}; +use crate::color::Rgb; +use crate::error::ImageResult; +use std::cmp::Ordering; +use std::io::{Result, Write}; + +/// Radiance HDR encoder +pub struct HdrEncoder<W: Write> { + w: W, +} + +impl<W: Write> HdrEncoder<W> { + /// Creates encoder + pub fn new(w: W) -> HdrEncoder<W> { + HdrEncoder { w } + } + + /// Encodes the image ```data``` + /// that has dimensions ```width``` and ```height``` + pub fn encode(mut self, data: &[Rgb<f32>], width: usize, height: usize) -> ImageResult<()> { + assert!(data.len() >= width * height); + let w = &mut self.w; + w.write_all(SIGNATURE)?; + w.write_all(b"\n")?; + w.write_all(b"# Rust HDR encoder\n")?; + w.write_all(b"FORMAT=32-bit_rle_rgbe\n\n")?; + w.write_all(format!("-Y {} +X {}\n", height, width).as_bytes())?; + + if !(8..=32_768).contains(&width) { + for &pix in data { + write_rgbe8(w, to_rgbe8(pix))?; + } + } else { + // new RLE marker contains scanline width + let marker = rgbe8(2, 2, (width / 256) as u8, (width % 256) as u8); + // buffers for encoded pixels + let mut bufr = vec![0; width]; + let mut bufg = vec![0; width]; + let mut bufb = vec![0; width]; + let mut bufe = vec![0; width]; + let mut rle_buf = vec![0; width]; + for scanline in data.chunks(width) { + for ((((r, g), b), e), &pix) in bufr + .iter_mut() + .zip(bufg.iter_mut()) + .zip(bufb.iter_mut()) + .zip(bufe.iter_mut()) + .zip(scanline.iter()) + { + let cp = to_rgbe8(pix); + *r = cp.c[0]; + *g = cp.c[1]; + *b = cp.c[2]; + *e = cp.e; + } + write_rgbe8(w, marker)?; // New RLE encoding marker + rle_buf.clear(); + rle_compress(&bufr[..], &mut rle_buf); + w.write_all(&rle_buf[..])?; + rle_buf.clear(); + rle_compress(&bufg[..], &mut rle_buf); + w.write_all(&rle_buf[..])?; + rle_buf.clear(); + rle_compress(&bufb[..], &mut rle_buf); + w.write_all(&rle_buf[..])?; + rle_buf.clear(); + rle_compress(&bufe[..], &mut rle_buf); + w.write_all(&rle_buf[..])?; + } + } + Ok(()) + } +} + +#[derive(Debug, PartialEq, Eq)] +enum RunOrNot { + Run(u8, usize), + Norun(usize, usize), +} +use self::RunOrNot::{Norun, Run}; + +const RUN_MAX_LEN: usize = 127; +const NORUN_MAX_LEN: usize = 128; + +struct RunIterator<'a> { + data: &'a [u8], + curidx: usize, +} + +impl<'a> RunIterator<'a> { + fn new(data: &'a [u8]) -> RunIterator<'a> { + RunIterator { data, curidx: 0 } + } +} + +impl<'a> Iterator for RunIterator<'a> { + type Item = RunOrNot; + + fn next(&mut self) -> Option<Self::Item> { + if self.curidx == self.data.len() { + None + } else { + let cv = self.data[self.curidx]; + let crun = self.data[self.curidx..] + .iter() + .take_while(|&&v| v == cv) + .take(RUN_MAX_LEN) + .count(); + let ret = if crun > 2 { + Run(cv, crun) + } else { + Norun(self.curidx, crun) + }; + self.curidx += crun; + Some(ret) + } + } +} + +struct NorunCombineIterator<'a> { + runiter: RunIterator<'a>, + prev: Option<RunOrNot>, +} + +impl<'a> NorunCombineIterator<'a> { + fn new(data: &'a [u8]) -> NorunCombineIterator<'a> { + NorunCombineIterator { + runiter: RunIterator::new(data), + prev: None, + } + } +} + +// Combines sequential noruns produced by RunIterator +impl<'a> Iterator for NorunCombineIterator<'a> { + type Item = RunOrNot; + fn next(&mut self) -> Option<Self::Item> { + loop { + match self.prev.take() { + Some(Run(c, len)) => { + // Just return stored run + return Some(Run(c, len)); + } + Some(Norun(idx, len)) => { + // Let's see if we need to continue norun + match self.runiter.next() { + Some(Norun(_, len1)) => { + // norun continues + let clen = len + len1; // combined length + match clen.cmp(&NORUN_MAX_LEN) { + Ordering::Equal => return Some(Norun(idx, clen)), + Ordering::Greater => { + // combined norun exceeds maximum length. store extra part of norun + self.prev = + Some(Norun(idx + NORUN_MAX_LEN, clen - NORUN_MAX_LEN)); + // then return maximal norun + return Some(Norun(idx, NORUN_MAX_LEN)); + } + Ordering::Less => { + // len + len1 < NORUN_MAX_LEN + self.prev = Some(Norun(idx, len + len1)); + // combine and continue loop + } + } + } + Some(Run(c, len1)) => { + // Run encountered. Store it + self.prev = Some(Run(c, len1)); + return Some(Norun(idx, len)); // and return combined norun + } + None => { + // End of sequence + return Some(Norun(idx, len)); // return combined norun + } + } + } // End match self.prev.take() == Some(NoRun()) + None => { + // No norun to combine + match self.runiter.next() { + Some(Norun(idx, len)) => { + self.prev = Some(Norun(idx, len)); + // store for combine and continue the loop + } + Some(Run(c, len)) => { + // Some run. Just return it + return Some(Run(c, len)); + } + None => { + // That's all, folks + return None; + } + } + } // End match self.prev.take() == None + } // End match + } // End loop + } +} + +// Appends RLE compressed ```data``` to ```rle``` +fn rle_compress(data: &[u8], rle: &mut Vec<u8>) { + rle.clear(); + if data.is_empty() { + rle.push(0); // Technically correct. It means read next 0 bytes. + return; + } + // Task: split data into chunks of repeating (max 127) and non-repeating bytes (max 128) + // Prepend non-repeating chunk with its length + // Replace repeating byte with (run length + 128) and the byte + for rnr in NorunCombineIterator::new(data) { + match rnr { + Run(c, len) => { + assert!(len <= 127); + rle.push(128u8 + len as u8); + rle.push(c); + } + Norun(idx, len) => { + assert!(len <= 128); + rle.push(len as u8); + rle.extend_from_slice(&data[idx..idx + len]); + } + } + } +} + +fn write_rgbe8<W: Write>(w: &mut W, v: Rgbe8Pixel) -> Result<()> { + w.write_all(&[v.c[0], v.c[1], v.c[2], v.e]) +} + +/// Converts ```Rgb<f32>``` into ```Rgbe8Pixel``` +pub fn to_rgbe8(pix: Rgb<f32>) -> Rgbe8Pixel { + let pix = pix.0; + let mx = f32::max(pix[0], f32::max(pix[1], pix[2])); + if mx <= 0.0 { + Rgbe8Pixel { c: [0, 0, 0], e: 0 } + } else { + // let (frac, exp) = mx.frexp(); // unstable yet + let exp = mx.log2().floor() as i32 + 1; + let mul = f32::powi(2.0, exp); + let mut conv = [0u8; 3]; + for (cv, &sv) in conv.iter_mut().zip(pix.iter()) { + *cv = f32::trunc(sv / mul * 256.0) as u8; + } + Rgbe8Pixel { + c: conv, + e: (exp + 128) as u8, + } + } +} + +#[test] +fn to_rgbe8_test() { + use crate::codecs::hdr::rgbe8; + let test_cases = vec![rgbe8(0, 0, 0, 0), rgbe8(1, 1, 128, 128)]; + for &pix in &test_cases { + assert_eq!(pix, to_rgbe8(pix.to_hdr())); + } + for mc in 128..255 { + // TODO: use inclusive range when stable + let pix = rgbe8(mc, mc, mc, 100); + assert_eq!(pix, to_rgbe8(pix.to_hdr())); + let pix = rgbe8(mc, 0, mc, 130); + assert_eq!(pix, to_rgbe8(pix.to_hdr())); + let pix = rgbe8(0, 0, mc, 140); + assert_eq!(pix, to_rgbe8(pix.to_hdr())); + let pix = rgbe8(1, 0, mc, 150); + assert_eq!(pix, to_rgbe8(pix.to_hdr())); + let pix = rgbe8(1, mc, 10, 128); + assert_eq!(pix, to_rgbe8(pix.to_hdr())); + for c in 0..255 { + // Radiance HDR seems to be pre IEEE 754. + // exponent can be -128 (represented as 0u8), so some colors cannot be represented in normalized f32 + // Let's exclude exponent value of -128 (0u8) from testing + let pix = rgbe8(1, mc, c, if c == 0 { 1 } else { c }); + assert_eq!(pix, to_rgbe8(pix.to_hdr())); + } + } + fn relative_dist(a: Rgb<f32>, b: Rgb<f32>) -> f32 { + // maximal difference divided by maximal value + let max_diff = + a.0.iter() + .zip(b.0.iter()) + .fold(0.0, |diff, (&a, &b)| f32::max(diff, (a - b).abs())); + let max_val = + a.0.iter() + .chain(b.0.iter()) + .fold(0.0, |maxv, &a| f32::max(maxv, a)); + if max_val == 0.0 { + 0.0 + } else { + max_diff / max_val + } + } + let test_values = vec![ + 0.000_001, 0.000_02, 0.000_3, 0.004, 0.05, 0.6, 7.0, 80.0, 900.0, 1_000.0, 20_000.0, + 300_000.0, + ]; + for &r in &test_values { + for &g in &test_values { + for &b in &test_values { + let c1 = Rgb([r, g, b]); + let c2 = to_rgbe8(c1).to_hdr(); + let rel_dist = relative_dist(c1, c2); + // Maximal value is normalized to the range 128..256, thus we have 1/128 precision + assert!( + rel_dist <= 1.0 / 128.0, + "Relative distance ({}) exceeds 1/128 for {:?} and {:?}", + rel_dist, + c1, + c2 + ); + } + } + } +} + +#[test] +fn runiterator_test() { + let data = []; + let mut run_iter = RunIterator::new(&data[..]); + assert_eq!(run_iter.next(), None); + let data = [5]; + let mut run_iter = RunIterator::new(&data[..]); + assert_eq!(run_iter.next(), Some(Norun(0, 1))); + assert_eq!(run_iter.next(), None); + let data = [1, 1]; + let mut run_iter = RunIterator::new(&data[..]); + assert_eq!(run_iter.next(), Some(Norun(0, 2))); + assert_eq!(run_iter.next(), None); + let data = [0, 0, 0]; + let mut run_iter = RunIterator::new(&data[..]); + assert_eq!(run_iter.next(), Some(Run(0u8, 3))); + assert_eq!(run_iter.next(), None); + let data = [0, 0, 1, 1]; + let mut run_iter = RunIterator::new(&data[..]); + assert_eq!(run_iter.next(), Some(Norun(0, 2))); + assert_eq!(run_iter.next(), Some(Norun(2, 2))); + assert_eq!(run_iter.next(), None); + let data = [0, 0, 0, 1, 1]; + let mut run_iter = RunIterator::new(&data[..]); + assert_eq!(run_iter.next(), Some(Run(0u8, 3))); + assert_eq!(run_iter.next(), Some(Norun(3, 2))); + assert_eq!(run_iter.next(), None); + let data = [1, 2, 2, 2]; + let mut run_iter = RunIterator::new(&data[..]); + assert_eq!(run_iter.next(), Some(Norun(0, 1))); + assert_eq!(run_iter.next(), Some(Run(2u8, 3))); + assert_eq!(run_iter.next(), None); + let data = [1, 1, 2, 2, 2]; + let mut run_iter = RunIterator::new(&data[..]); + assert_eq!(run_iter.next(), Some(Norun(0, 2))); + assert_eq!(run_iter.next(), Some(Run(2u8, 3))); + assert_eq!(run_iter.next(), None); + let data = [2; 128]; + let mut run_iter = RunIterator::new(&data[..]); + assert_eq!(run_iter.next(), Some(Run(2u8, 127))); + assert_eq!(run_iter.next(), Some(Norun(127, 1))); + assert_eq!(run_iter.next(), None); + let data = [2; 129]; + let mut run_iter = RunIterator::new(&data[..]); + assert_eq!(run_iter.next(), Some(Run(2u8, 127))); + assert_eq!(run_iter.next(), Some(Norun(127, 2))); + assert_eq!(run_iter.next(), None); + let data = [2; 130]; + let mut run_iter = RunIterator::new(&data[..]); + assert_eq!(run_iter.next(), Some(Run(2u8, 127))); + assert_eq!(run_iter.next(), Some(Run(2u8, 3))); + assert_eq!(run_iter.next(), None); +} + +#[test] +fn noruncombine_test() { + fn a<T>(mut v: Vec<T>, mut other: Vec<T>) -> Vec<T> { + v.append(&mut other); + v + } + + let v = vec![]; + let mut rsi = NorunCombineIterator::new(&v[..]); + assert_eq!(rsi.next(), None); + + let v = vec![1]; + let mut rsi = NorunCombineIterator::new(&v[..]); + assert_eq!(rsi.next(), Some(Norun(0, 1))); + assert_eq!(rsi.next(), None); + + let v = vec![2, 2]; + let mut rsi = NorunCombineIterator::new(&v[..]); + assert_eq!(rsi.next(), Some(Norun(0, 2))); + assert_eq!(rsi.next(), None); + + let v = vec![3, 3, 3]; + let mut rsi = NorunCombineIterator::new(&v[..]); + assert_eq!(rsi.next(), Some(Run(3, 3))); + assert_eq!(rsi.next(), None); + + let v = vec![4, 4, 3, 3, 3]; + let mut rsi = NorunCombineIterator::new(&v[..]); + assert_eq!(rsi.next(), Some(Norun(0, 2))); + assert_eq!(rsi.next(), Some(Run(3, 3))); + assert_eq!(rsi.next(), None); + + let v = vec![40; 400]; + let mut rsi = NorunCombineIterator::new(&v[..]); + assert_eq!(rsi.next(), Some(Run(40, 127))); + assert_eq!(rsi.next(), Some(Run(40, 127))); + assert_eq!(rsi.next(), Some(Run(40, 127))); + assert_eq!(rsi.next(), Some(Run(40, 19))); + assert_eq!(rsi.next(), None); + + let v = a(a(vec![5; 3], vec![6; 129]), vec![7, 3, 7, 10, 255]); + let mut rsi = NorunCombineIterator::new(&v[..]); + assert_eq!(rsi.next(), Some(Run(5, 3))); + assert_eq!(rsi.next(), Some(Run(6, 127))); + assert_eq!(rsi.next(), Some(Norun(130, 7))); + assert_eq!(rsi.next(), None); + + let v = a(a(vec![5; 2], vec![6; 129]), vec![7, 3, 7, 7, 255]); + let mut rsi = NorunCombineIterator::new(&v[..]); + assert_eq!(rsi.next(), Some(Norun(0, 2))); + assert_eq!(rsi.next(), Some(Run(6, 127))); + assert_eq!(rsi.next(), Some(Norun(129, 7))); + assert_eq!(rsi.next(), None); + + let v: Vec<_> = ::std::iter::repeat(()) + .flat_map(|_| (0..2)) + .take(257) + .collect(); + let mut rsi = NorunCombineIterator::new(&v[..]); + assert_eq!(rsi.next(), Some(Norun(0, 128))); + assert_eq!(rsi.next(), Some(Norun(128, 128))); + assert_eq!(rsi.next(), Some(Norun(256, 1))); + assert_eq!(rsi.next(), None); +} diff --git a/vendor/image/src/codecs/hdr/mod.rs b/vendor/image/src/codecs/hdr/mod.rs new file mode 100644 index 0000000..b3325bc --- /dev/null +++ b/vendor/image/src/codecs/hdr/mod.rs @@ -0,0 +1,15 @@ +//! Decoding of Radiance HDR Images +//! +//! A decoder for Radiance HDR images +//! +//! # Related Links +//! +//! * <http://radsite.lbl.gov/radiance/refer/filefmts.pdf> +//! * <http://www.graphics.cornell.edu/~bjw/rgbe/rgbe.c> +//! + +mod decoder; +mod encoder; + +pub use self::decoder::*; +pub use self::encoder::*; diff --git a/vendor/image/src/codecs/ico/decoder.rs b/vendor/image/src/codecs/ico/decoder.rs new file mode 100644 index 0000000..4f02787 --- /dev/null +++ b/vendor/image/src/codecs/ico/decoder.rs @@ -0,0 +1,470 @@ +use byteorder::{LittleEndian, ReadBytesExt}; +use std::convert::TryFrom; +use std::io::{self, Cursor, Read, Seek, SeekFrom}; +use std::marker::PhantomData; +use std::{error, fmt, mem}; + +use crate::color::ColorType; +use crate::error::{ + DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind, +}; +use crate::image::{self, ImageDecoder, ImageFormat}; + +use self::InnerDecoder::*; +use crate::codecs::bmp::BmpDecoder; +use crate::codecs::png::{PngDecoder, PNG_SIGNATURE}; + +/// Errors that can occur during decoding and parsing an ICO image or one of its enclosed images. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +enum DecoderError { + /// The ICO directory is empty + NoEntries, + /// The number of color planes (0 or 1), or the horizontal coordinate of the hotspot for CUR files too big. + IcoEntryTooManyPlanesOrHotspot, + /// The bit depth (may be 0 meaning unspecified), or the vertical coordinate of the hotspot for CUR files too big. + IcoEntryTooManyBitsPerPixelOrHotspot, + + /// The entry is in PNG format and specified a length that is shorter than PNG header. + PngShorterThanHeader, + /// The enclosed PNG is not in RGBA, which is invalid: https://blogs.msdn.microsoft.com/oldnewthing/20101022-00/?p=12473/. + PngNotRgba, + + /// The entry is in BMP format and specified a data size that is not correct for the image and optional mask data. + InvalidDataSize, + + /// The dimensions specified by the entry does not match the dimensions in the header of the enclosed image. + ImageEntryDimensionMismatch { + /// The mismatched subimage's type + format: IcoEntryImageFormat, + /// The dimensions specified by the entry + entry: (u16, u16), + /// The dimensions of the image itself + image: (u32, u32), + }, +} + +impl fmt::Display for DecoderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DecoderError::NoEntries => f.write_str("ICO directory contains no image"), + DecoderError::IcoEntryTooManyPlanesOrHotspot => { + f.write_str("ICO image entry has too many color planes or too large hotspot value") + } + DecoderError::IcoEntryTooManyBitsPerPixelOrHotspot => f.write_str( + "ICO image entry has too many bits per pixel or too large hotspot value", + ), + DecoderError::PngShorterThanHeader => { + f.write_str("Entry specified a length that is shorter than PNG header!") + } + DecoderError::PngNotRgba => f.write_str("The PNG is not in RGBA format!"), + DecoderError::InvalidDataSize => { + f.write_str("ICO image data size did not match expected size") + } + DecoderError::ImageEntryDimensionMismatch { + format, + entry, + image, + } => f.write_fmt(format_args!( + "Entry{:?} and {}{:?} dimensions do not match!", + entry, format, image + )), + } + } +} + +impl From<DecoderError> for ImageError { + fn from(e: DecoderError) -> ImageError { + ImageError::Decoding(DecodingError::new(ImageFormat::Ico.into(), e)) + } +} + +impl error::Error for DecoderError {} + +/// The image formats an ICO may contain +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +enum IcoEntryImageFormat { + /// PNG in ARGB + Png, + /// BMP with optional alpha mask + Bmp, +} + +impl fmt::Display for IcoEntryImageFormat { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(match self { + IcoEntryImageFormat::Png => "PNG", + IcoEntryImageFormat::Bmp => "BMP", + }) + } +} + +impl From<IcoEntryImageFormat> for ImageFormat { + fn from(val: IcoEntryImageFormat) -> Self { + match val { + IcoEntryImageFormat::Png => ImageFormat::Png, + IcoEntryImageFormat::Bmp => ImageFormat::Bmp, + } + } +} + +/// An ico decoder +pub struct IcoDecoder<R: Read> { + selected_entry: DirEntry, + inner_decoder: InnerDecoder<R>, +} + +enum InnerDecoder<R: Read> { + Bmp(BmpDecoder<R>), + Png(Box<PngDecoder<R>>), +} + +#[derive(Clone, Copy, Default)] +struct DirEntry { + width: u8, + height: u8, + // We ignore some header fields as they will be replicated in the PNG, BMP and they are not + // necessary for determining the best_entry. + #[allow(unused)] + color_count: u8, + // Wikipedia has this to say: + // Although Microsoft's technical documentation states that this value must be zero, the icon + // encoder built into .NET (System.Drawing.Icon.Save) sets this value to 255. It appears that + // the operating system ignores this value altogether. + #[allow(unused)] + reserved: u8, + + // We ignore some header fields as they will be replicated in the PNG, BMP and they are not + // necessary for determining the best_entry. + #[allow(unused)] + num_color_planes: u16, + bits_per_pixel: u16, + + image_length: u32, + image_offset: u32, +} + +impl<R: Read + Seek> IcoDecoder<R> { + /// Create a new decoder that decodes from the stream ```r``` + pub fn new(mut r: R) -> ImageResult<IcoDecoder<R>> { + let entries = read_entries(&mut r)?; + let entry = best_entry(entries)?; + let decoder = entry.decoder(r)?; + + Ok(IcoDecoder { + selected_entry: entry, + inner_decoder: decoder, + }) + } +} + +fn read_entries<R: Read>(r: &mut R) -> ImageResult<Vec<DirEntry>> { + let _reserved = r.read_u16::<LittleEndian>()?; + let _type = r.read_u16::<LittleEndian>()?; + let count = r.read_u16::<LittleEndian>()?; + (0..count).map(|_| read_entry(r)).collect() +} + +fn read_entry<R: Read>(r: &mut R) -> ImageResult<DirEntry> { + Ok(DirEntry { + width: r.read_u8()?, + height: r.read_u8()?, + color_count: r.read_u8()?, + reserved: r.read_u8()?, + num_color_planes: { + // This may be either the number of color planes (0 or 1), or the horizontal coordinate + // of the hotspot for CUR files. + let num = r.read_u16::<LittleEndian>()?; + if num > 256 { + return Err(DecoderError::IcoEntryTooManyPlanesOrHotspot.into()); + } + num + }, + bits_per_pixel: { + // This may be either the bit depth (may be 0 meaning unspecified), + // or the vertical coordinate of the hotspot for CUR files. + let num = r.read_u16::<LittleEndian>()?; + if num > 256 { + return Err(DecoderError::IcoEntryTooManyBitsPerPixelOrHotspot.into()); + } + num + }, + image_length: r.read_u32::<LittleEndian>()?, + image_offset: r.read_u32::<LittleEndian>()?, + }) +} + +/// Find the entry with the highest (color depth, size). +fn best_entry(mut entries: Vec<DirEntry>) -> ImageResult<DirEntry> { + let mut best = entries.pop().ok_or(DecoderError::NoEntries)?; + + let mut best_score = ( + best.bits_per_pixel, + u32::from(best.real_width()) * u32::from(best.real_height()), + ); + + for entry in entries { + let score = ( + entry.bits_per_pixel, + u32::from(entry.real_width()) * u32::from(entry.real_height()), + ); + if score > best_score { + best = entry; + best_score = score; + } + } + Ok(best) +} + +impl DirEntry { + fn real_width(&self) -> u16 { + match self.width { + 0 => 256, + w => u16::from(w), + } + } + + fn real_height(&self) -> u16 { + match self.height { + 0 => 256, + h => u16::from(h), + } + } + + fn matches_dimensions(&self, width: u32, height: u32) -> bool { + u32::from(self.real_width()) == width.min(256) + && u32::from(self.real_height()) == height.min(256) + } + + fn seek_to_start<R: Read + Seek>(&self, r: &mut R) -> ImageResult<()> { + r.seek(SeekFrom::Start(u64::from(self.image_offset)))?; + Ok(()) + } + + fn is_png<R: Read + Seek>(&self, r: &mut R) -> ImageResult<bool> { + self.seek_to_start(r)?; + + // Read the first 8 bytes to sniff the image. + let mut signature = [0u8; 8]; + r.read_exact(&mut signature)?; + + Ok(signature == PNG_SIGNATURE) + } + + fn decoder<R: Read + Seek>(&self, mut r: R) -> ImageResult<InnerDecoder<R>> { + let is_png = self.is_png(&mut r)?; + self.seek_to_start(&mut r)?; + + if is_png { + Ok(Png(Box::new(PngDecoder::new(r)?))) + } else { + Ok(Bmp(BmpDecoder::new_with_ico_format(r)?)) + } + } +} + +/// Wrapper struct around a `Cursor<Vec<u8>>` +pub struct IcoReader<R>(Cursor<Vec<u8>>, PhantomData<R>); +impl<R> Read for IcoReader<R> { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { + if self.0.position() == 0 && buf.is_empty() { + mem::swap(buf, self.0.get_mut()); + Ok(buf.len()) + } else { + self.0.read_to_end(buf) + } + } +} + +impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for IcoDecoder<R> { + type Reader = IcoReader<R>; + + fn dimensions(&self) -> (u32, u32) { + match self.inner_decoder { + Bmp(ref decoder) => decoder.dimensions(), + Png(ref decoder) => decoder.dimensions(), + } + } + + fn color_type(&self) -> ColorType { + match self.inner_decoder { + Bmp(ref decoder) => decoder.color_type(), + Png(ref decoder) => decoder.color_type(), + } + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + Ok(IcoReader( + Cursor::new(image::decoder_to_vec(self)?), + PhantomData, + )) + } + + fn read_image(self, buf: &mut [u8]) -> ImageResult<()> { + assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); + match self.inner_decoder { + Png(decoder) => { + if self.selected_entry.image_length < PNG_SIGNATURE.len() as u32 { + return Err(DecoderError::PngShorterThanHeader.into()); + } + + // Check if the image dimensions match the ones in the image data. + let (width, height) = decoder.dimensions(); + if !self.selected_entry.matches_dimensions(width, height) { + return Err(DecoderError::ImageEntryDimensionMismatch { + format: IcoEntryImageFormat::Png, + entry: ( + self.selected_entry.real_width(), + self.selected_entry.real_height(), + ), + image: (width, height), + } + .into()); + } + + // Embedded PNG images can only be of the 32BPP RGBA format. + // https://blogs.msdn.microsoft.com/oldnewthing/20101022-00/?p=12473/ + if decoder.color_type() != ColorType::Rgba8 { + return Err(DecoderError::PngNotRgba.into()); + } + + decoder.read_image(buf) + } + Bmp(mut decoder) => { + let (width, height) = decoder.dimensions(); + if !self.selected_entry.matches_dimensions(width, height) { + return Err(DecoderError::ImageEntryDimensionMismatch { + format: IcoEntryImageFormat::Bmp, + entry: ( + self.selected_entry.real_width(), + self.selected_entry.real_height(), + ), + image: (width, height), + } + .into()); + } + + // The ICO decoder needs an alpha channel to apply the AND mask. + if decoder.color_type() != ColorType::Rgba8 { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Bmp.into(), + UnsupportedErrorKind::Color(decoder.color_type().into()), + ), + )); + } + + decoder.read_image_data(buf)?; + + let r = decoder.reader(); + let image_end = r.stream_position()?; + let data_end = u64::from(self.selected_entry.image_offset) + + u64::from(self.selected_entry.image_length); + + let mask_row_bytes = ((width + 31) / 32) * 4; + let mask_length = u64::from(mask_row_bytes) * u64::from(height); + + // data_end should be image_end + the mask length (mask_row_bytes * height). + // According to + // https://devblogs.microsoft.com/oldnewthing/20101021-00/?p=12483 + // the mask is required, but according to Wikipedia + // https://en.wikipedia.org/wiki/ICO_(file_format) + // the mask is not required. Unfortunately, Wikipedia does not have a citation + // for that claim, so we can't be sure which is correct. + if data_end >= image_end + mask_length { + // If there's an AND mask following the image, read and apply it. + for y in 0..height { + let mut x = 0; + for _ in 0..mask_row_bytes { + // Apply the bits of each byte until we reach the end of the row. + let mask_byte = r.read_u8()?; + for bit in (0..8).rev() { + if x >= width { + break; + } + if mask_byte & (1 << bit) != 0 { + // Set alpha channel to transparent. + buf[((height - y - 1) * width + x) as usize * 4 + 3] = 0; + } + x += 1; + } + } + } + + Ok(()) + } else if data_end == image_end { + // accept images with no mask data + Ok(()) + } else { + Err(DecoderError::InvalidDataSize.into()) + } + } + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + // Test if BMP images without alpha channel inside ICOs don't panic. + // Because the test data is invalid decoding should produce an error. + #[test] + fn bmp_16_with_missing_alpha_channel() { + let data = vec![ + 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x0e, 0x04, 0xc3, 0x7e, 0x00, 0x00, 0x00, 0x00, + 0x7c, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0x01, 0x00, + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x8f, 0xf6, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x20, 0x66, 0x74, 0x83, 0x70, 0x61, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xeb, 0x00, 0x9b, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x47, 0x0d, + 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x62, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0c, + 0x00, 0x00, 0x00, 0xc3, 0x3f, 0x94, 0x61, 0xaa, 0x17, 0x4d, 0x8d, 0x79, 0x1d, 0x8b, + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x2e, 0x28, 0x40, 0xe5, 0x9f, + 0x4b, 0x4d, 0xe9, 0x87, 0xd3, 0xda, 0xd6, 0x89, 0x81, 0xc5, 0xa4, 0xa1, 0x60, 0x98, + 0x31, 0xc7, 0x1d, 0xb6, 0x8f, 0x20, 0xc8, 0x3e, 0xee, 0xd8, 0xe4, 0x8f, 0xee, 0x7b, + 0x48, 0x9b, 0x88, 0x25, 0x13, 0xda, 0xa4, 0x13, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x40, + 0x16, 0x01, 0xff, 0xff, 0xff, 0xff, 0xe9, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xa3, 0x66, 0x64, 0x41, 0x54, 0xa3, 0xa3, 0x00, 0x00, 0x00, 0xb8, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x66, 0x64, 0x41, 0x54, 0xa3, 0xa3, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0xf6, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x83, 0x70, 0x61, 0x76, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, + 0xeb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x62, 0x49, + 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xc8, 0x00, 0x02, 0x0c, 0x00, 0xff, 0xff, 0xc6, + 0x84, 0x00, 0x2a, 0x75, 0x03, 0xa3, 0x05, 0xfb, 0xe1, 0x6e, 0xe8, 0x27, 0xd6, 0xd3, + 0x96, 0xc1, 0xe4, 0x30, 0x0c, 0x05, 0xb9, 0xa3, 0x8b, 0x29, 0xda, 0xa4, 0xf1, 0x4d, + 0xf3, 0xb2, 0x98, 0x2b, 0xe6, 0x93, 0x07, 0xf9, 0xca, 0x2b, 0xc2, 0x39, 0x20, 0xba, + 0x7c, 0xa0, 0xb1, 0x43, 0xe6, 0xf9, 0xdc, 0xd1, 0xc2, 0x52, 0xdc, 0x41, 0xc1, 0x2f, + 0x29, 0xf7, 0x46, 0x32, 0xda, 0x1b, 0x72, 0x8c, 0xe6, 0x2b, 0x01, 0xe5, 0x49, 0x21, + 0x89, 0x89, 0xe4, 0x3d, 0xa1, 0xdb, 0x3b, 0x4a, 0x0b, 0x52, 0x86, 0x52, 0x33, 0x9d, + 0xb2, 0xcf, 0x4a, 0x86, 0x53, 0xd7, 0xa9, 0x4b, 0xaf, 0x62, 0x06, 0x49, 0x53, 0x00, + 0xc3, 0x3f, 0x94, 0x61, 0xaa, 0x17, 0x4d, 0x8d, 0x79, 0x1d, 0x8b, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x2e, 0x28, 0x40, 0xe5, 0x9f, 0x4b, 0x4d, 0xe9, + 0x87, 0xd3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe7, 0xc5, 0x00, + 0x02, 0x00, 0x00, 0x00, 0x06, 0x00, 0x0b, 0x00, 0x50, 0x31, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x76, 0x76, 0x01, 0x00, 0x00, 0x00, 0x76, 0x00, + 0x00, 0x23, 0x3f, 0x52, 0x41, 0x44, 0x49, 0x41, 0x4e, 0x43, 0x45, 0x61, 0x50, 0x35, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x4d, 0x47, 0x49, 0x46, 0x38, 0x37, 0x61, 0x05, + 0x50, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc7, 0x37, 0x61, + ]; + + let decoder = IcoDecoder::new(Cursor::new(&data)).unwrap(); + let mut buf = vec![0; usize::try_from(decoder.total_bytes()).unwrap()]; + assert!(decoder.read_image(&mut buf).is_err()); + } +} diff --git a/vendor/image/src/codecs/ico/encoder.rs b/vendor/image/src/codecs/ico/encoder.rs new file mode 100644 index 0000000..dd5961b --- /dev/null +++ b/vendor/image/src/codecs/ico/encoder.rs @@ -0,0 +1,194 @@ +use byteorder::{LittleEndian, WriteBytesExt}; +use std::borrow::Cow; +use std::io::{self, Write}; + +use crate::color::ColorType; +use crate::error::{ImageError, ImageResult, ParameterError, ParameterErrorKind}; +use crate::image::ImageEncoder; + +use crate::codecs::png::PngEncoder; + +// Enum value indicating an ICO image (as opposed to a CUR image): +const ICO_IMAGE_TYPE: u16 = 1; +// The length of an ICO file ICONDIR structure, in bytes: +const ICO_ICONDIR_SIZE: u32 = 6; +// The length of an ICO file DIRENTRY structure, in bytes: +const ICO_DIRENTRY_SIZE: u32 = 16; + +/// ICO encoder +pub struct IcoEncoder<W: Write> { + w: W, +} + +/// An ICO image entry +pub struct IcoFrame<'a> { + // Pre-encoded PNG or BMP + encoded_image: Cow<'a, [u8]>, + // Stored as `0 => 256, n => n` + width: u8, + // Stored as `0 => 256, n => n` + height: u8, + color_type: ColorType, +} + +impl<'a> IcoFrame<'a> { + /// Construct a new `IcoFrame` using a pre-encoded PNG or BMP + /// + /// The `width` and `height` must be between 1 and 256 (inclusive). + pub fn with_encoded( + encoded_image: impl Into<Cow<'a, [u8]>>, + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<Self> { + let encoded_image = encoded_image.into(); + + if !(1..=256).contains(&width) { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic(format!( + "the image width must be `1..=256`, instead width {} was provided", + width, + )), + ))); + } + + if !(1..=256).contains(&height) { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic(format!( + "the image height must be `1..=256`, instead height {} was provided", + height, + )), + ))); + } + + Ok(Self { + encoded_image, + width: width as u8, + height: height as u8, + color_type, + }) + } + + /// Construct a new `IcoFrame` by encoding `buf` as a PNG + /// + /// The `width` and `height` must be between 1 and 256 (inclusive) + pub fn as_png(buf: &[u8], width: u32, height: u32, color_type: ColorType) -> ImageResult<Self> { + let mut image_data: Vec<u8> = Vec::new(); + PngEncoder::new(&mut image_data).write_image(buf, width, height, color_type)?; + + let frame = Self::with_encoded(image_data, width, height, color_type)?; + Ok(frame) + } +} + +impl<W: Write> IcoEncoder<W> { + /// Create a new encoder that writes its output to ```w```. + pub fn new(w: W) -> IcoEncoder<W> { + IcoEncoder { w } + } + + /// Encodes the image ```image``` that has dimensions ```width``` and + /// ```height``` and ```ColorType``` ```c```. The dimensions of the image + /// must be between 1 and 256 (inclusive) or an error will be returned. + /// + /// Expects data to be big endian. + #[deprecated = "Use `IcoEncoder::write_image` instead. Beware that `write_image` has a different endianness convention"] + pub fn encode(self, data: &[u8], width: u32, height: u32, color: ColorType) -> ImageResult<()> { + let mut image_data: Vec<u8> = Vec::new(); + #[allow(deprecated)] + PngEncoder::new(&mut image_data).encode(data, width, height, color)?; + + let image = IcoFrame::with_encoded(&image_data, width, height, color)?; + self.encode_images(&[image]) + } + + /// Takes some [`IcoFrame`]s and encodes them into an ICO. + /// + /// `images` is a list of images, usually ordered by dimension, which + /// must be between 1 and 65535 (inclusive) in length. + pub fn encode_images(mut self, images: &[IcoFrame<'_>]) -> ImageResult<()> { + if !(1..=usize::from(u16::MAX)).contains(&images.len()) { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic(format!( + "the number of images must be `1..=u16::MAX`, instead {} images were provided", + images.len(), + )), + ))); + } + let num_images = images.len() as u16; + + let mut offset = ICO_ICONDIR_SIZE + (ICO_DIRENTRY_SIZE * (images.len() as u32)); + write_icondir(&mut self.w, num_images)?; + for image in images { + write_direntry( + &mut self.w, + image.width, + image.height, + image.color_type, + offset, + image.encoded_image.len() as u32, + )?; + + offset += image.encoded_image.len() as u32; + } + for image in images { + self.w.write_all(&image.encoded_image)?; + } + Ok(()) + } +} + +impl<W: Write> ImageEncoder for IcoEncoder<W> { + /// Write an ICO image with the specified width, height, and color type. + /// + /// For color types with 16-bit per channel or larger, the contents of `buf` should be in + /// native endian. + /// + /// WARNING: In image 0.23.14 and earlier this method erroneously expected buf to be in big endian. + fn write_image( + self, + buf: &[u8], + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<()> { + let image = IcoFrame::as_png(buf, width, height, color_type)?; + self.encode_images(&[image]) + } +} + +fn write_icondir<W: Write>(w: &mut W, num_images: u16) -> io::Result<()> { + // Reserved field (must be zero): + w.write_u16::<LittleEndian>(0)?; + // Image type (ICO or CUR): + w.write_u16::<LittleEndian>(ICO_IMAGE_TYPE)?; + // Number of images in the file: + w.write_u16::<LittleEndian>(num_images)?; + Ok(()) +} + +fn write_direntry<W: Write>( + w: &mut W, + width: u8, + height: u8, + color: ColorType, + data_start: u32, + data_size: u32, +) -> io::Result<()> { + // Image dimensions: + w.write_u8(width)?; + w.write_u8(height)?; + // Number of colors in palette (or zero for no palette): + w.write_u8(0)?; + // Reserved field (must be zero): + w.write_u8(0)?; + // Color planes: + w.write_u16::<LittleEndian>(0)?; + // Bits per pixel: + w.write_u16::<LittleEndian>(color.bits_per_pixel())?; + // Image data size, in bytes: + w.write_u32::<LittleEndian>(data_size)?; + // Image data offset, in bytes: + w.write_u32::<LittleEndian>(data_start)?; + Ok(()) +} diff --git a/vendor/image/src/codecs/ico/mod.rs b/vendor/image/src/codecs/ico/mod.rs new file mode 100644 index 0000000..11493ac --- /dev/null +++ b/vendor/image/src/codecs/ico/mod.rs @@ -0,0 +1,14 @@ +//! Decoding and Encoding of ICO files +//! +//! A decoder and encoder for ICO (Windows Icon) image container files. +//! +//! # Related Links +//! * <https://msdn.microsoft.com/en-us/library/ms997538.aspx> +//! * <https://en.wikipedia.org/wiki/ICO_%28file_format%29> + +pub use self::decoder::IcoDecoder; +#[allow(deprecated)] +pub use self::encoder::{IcoEncoder, IcoFrame}; + +mod decoder; +mod encoder; diff --git a/vendor/image/src/codecs/jpeg/decoder.rs b/vendor/image/src/codecs/jpeg/decoder.rs new file mode 100644 index 0000000..9625e33 --- /dev/null +++ b/vendor/image/src/codecs/jpeg/decoder.rs @@ -0,0 +1,1289 @@ +use std::convert::TryFrom; +use std::io::{self, Cursor, Read}; +use std::marker::PhantomData; +use std::mem; + +use crate::color::ColorType; +use crate::error::{ + DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind, +}; +use crate::image::{ImageDecoder, ImageFormat}; + +/// JPEG decoder +pub struct JpegDecoder<R> { + decoder: jpeg::Decoder<R>, + metadata: jpeg::ImageInfo, +} + +impl<R: Read> JpegDecoder<R> { + /// Create a new decoder that decodes from the stream ```r``` + pub fn new(r: R) -> ImageResult<JpegDecoder<R>> { + let mut decoder = jpeg::Decoder::new(r); + + decoder.read_info().map_err(ImageError::from_jpeg)?; + let mut metadata = decoder.info().ok_or_else(|| { + ImageError::Decoding(DecodingError::from_format_hint(ImageFormat::Jpeg.into())) + })?; + + // We convert CMYK data to RGB before returning it to the user. + if metadata.pixel_format == jpeg::PixelFormat::CMYK32 { + metadata.pixel_format = jpeg::PixelFormat::RGB24; + } + + Ok(JpegDecoder { decoder, metadata }) + } + + /// Configure the decoder to scale the image during decoding. + /// + /// This efficiently scales the image by the smallest supported + /// scale factor that produces an image larger than or equal to + /// the requested size in at least one axis. The currently + /// implemented scale factors are 1/8, 1/4, 1/2 and 1. + /// + /// To generate a thumbnail of an exact size, pass the desired + /// size and then scale to the final size using a traditional + /// resampling algorithm. + /// + /// The size of the image to be loaded, with the scale factor + /// applied, is returned. + pub fn scale( + &mut self, + requested_width: u16, + requested_height: u16, + ) -> ImageResult<(u16, u16)> { + let result = self + .decoder + .scale(requested_width, requested_height) + .map_err(ImageError::from_jpeg)?; + + self.metadata.width = result.0; + self.metadata.height = result.1; + + Ok(result) + } +} + +/// Wrapper struct around a `Cursor<Vec<u8>>` +pub struct JpegReader<R>(Cursor<Vec<u8>>, PhantomData<R>); +impl<R> Read for JpegReader<R> { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { + if self.0.position() == 0 && buf.is_empty() { + mem::swap(buf, self.0.get_mut()); + Ok(buf.len()) + } else { + self.0.read_to_end(buf) + } + } +} + +impl<'a, R: 'a + Read> ImageDecoder<'a> for JpegDecoder<R> { + type Reader = JpegReader<R>; + + fn dimensions(&self) -> (u32, u32) { + ( + u32::from(self.metadata.width), + u32::from(self.metadata.height), + ) + } + + fn color_type(&self) -> ColorType { + ColorType::from_jpeg(self.metadata.pixel_format) + } + + fn icc_profile(&mut self) -> Option<Vec<u8>> { + self.decoder.icc_profile() + } + + fn into_reader(mut self) -> ImageResult<Self::Reader> { + let mut data = self.decoder.decode().map_err(ImageError::from_jpeg)?; + data = match self.decoder.info().unwrap().pixel_format { + jpeg::PixelFormat::CMYK32 => cmyk_to_rgb(&data), + _ => data, + }; + + Ok(JpegReader(Cursor::new(data), PhantomData)) + } + + fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> { + assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); + + let mut data = self.decoder.decode().map_err(ImageError::from_jpeg)?; + data = match self.decoder.info().unwrap().pixel_format { + jpeg::PixelFormat::CMYK32 => cmyk_to_rgb(&data), + _ => data, + }; + + buf.copy_from_slice(&data); + Ok(()) + } +} + +fn cmyk_to_rgb(input: &[u8]) -> Vec<u8> { + let count = input.len() / 4; + let mut output = vec![0; 3 * count]; + + let in_pixels = input[..4 * count].chunks_exact(4); + let out_pixels = output[..3 * count].chunks_exact_mut(3); + + for (pixel, outp) in in_pixels.zip(out_pixels) { + let c = 255 - u16::from(pixel[0]); + let m = 255 - u16::from(pixel[1]); + let y = 255 - u16::from(pixel[2]); + let k = 255 - u16::from(pixel[3]); + // CMY -> RGB + let r = (k * c) / 255; + let g = (k * m) / 255; + let b = (k * y) / 255; + + outp[0] = r as u8; + outp[1] = g as u8; + outp[2] = b as u8; + } + + output +} + +impl ColorType { + fn from_jpeg(pixel_format: jpeg::PixelFormat) -> ColorType { + use jpeg::PixelFormat::*; + match pixel_format { + L8 => ColorType::L8, + L16 => ColorType::L16, + RGB24 => ColorType::Rgb8, + CMYK32 => panic!(), + } + } +} + +impl ImageError { + fn from_jpeg(err: jpeg::Error) -> ImageError { + use jpeg::Error::*; + match err { + err @ Format(_) => { + ImageError::Decoding(DecodingError::new(ImageFormat::Jpeg.into(), err)) + } + Unsupported(desc) => ImageError::Unsupported(UnsupportedError::from_format_and_kind( + ImageFormat::Jpeg.into(), + UnsupportedErrorKind::GenericFeature(format!("{:?}", desc)), + )), + Io(err) => ImageError::IoError(err), + Internal(err) => { + ImageError::Decoding(DecodingError::new(ImageFormat::Jpeg.into(), err)) + } + } + } +} + +#[cfg(test)] +mod tests { + #[cfg(feature = "benchmarks")] + extern crate test; + + use super::cmyk_to_rgb; + #[cfg(feature = "benchmarks")] + use test::Bencher; + + #[cfg(feature = "benchmarks")] + const W: usize = 256; + #[cfg(feature = "benchmarks")] + const H: usize = 256; + + #[test] + fn cmyk_to_rgb_correct() { + for c in 0..=255 { + for k in 0..=255 { + // Based on R = 255 * (1-C/255) * (1-K/255) + let r = (255.0 - f32::from(c)) * (255.0 - f32::from(k)) / 255.0; + let r_u8 = r as u8; + let convert_r = cmyk_to_rgb(&[c, 0, 0, k])[0]; + let convert_g = cmyk_to_rgb(&[0, c, 0, k])[1]; + let convert_b = cmyk_to_rgb(&[0, 0, c, k])[2]; + + assert_eq!( + convert_r, r_u8, + "c = {}, k = {}, cymk_to_rgb[0] = {}, should be {}", + c, k, convert_r, r_u8 + ); + assert_eq!( + convert_g, r_u8, + "m = {}, k = {}, cymk_to_rgb[1] = {}, should be {}", + c, k, convert_g, r_u8 + ); + assert_eq!( + convert_b, r_u8, + "y = {}, k = {}, cymk_to_rgb[2] = {}, should be {}", + c, k, convert_b, r_u8 + ); + } + } + } + + fn single_pix_correct(cmyk_pix: [u8; 4], rgb_pix_true: [u8; 3]) { + let rgb_pix = cmyk_to_rgb(&cmyk_pix); + assert_eq!( + rgb_pix_true[0], rgb_pix[0], + "With CMYK {:?} expected {:?}, got {:?}", + cmyk_pix, rgb_pix_true, rgb_pix + ); + assert_eq!( + rgb_pix_true[1], rgb_pix[1], + "With CMYK {:?} expected {:?}, got {:?}", + cmyk_pix, rgb_pix_true, rgb_pix + ); + assert_eq!( + rgb_pix_true[2], rgb_pix[2], + "With CMYK {:?} expected {:?}, got {:?}", + cmyk_pix, rgb_pix_true, rgb_pix + ); + } + + #[test] + fn test_assorted_colors() { + let cmyk_pixels = vec![ + [0, 51, 102, 65], + [153, 204, 0, 65], + [0, 0, 0, 67], + [0, 85, 170, 69], + [0, 0, 0, 71], + [0, 0, 0, 73], + [0, 17, 34, 75], + [51, 68, 85, 75], + [102, 119, 136, 75], + [153, 170, 187, 75], + [204, 221, 238, 75], + [0, 0, 0, 77], + [0, 0, 0, 79], + [0, 85, 170, 81], + [0, 0, 0, 83], + [0, 3, 6, 85], + [9, 12, 15, 85], + [18, 21, 24, 85], + [27, 30, 33, 85], + [36, 39, 42, 85], + [45, 48, 51, 85], + [54, 57, 60, 85], + [63, 66, 69, 85], + [72, 75, 78, 85], + [81, 84, 87, 85], + [90, 93, 96, 85], + [99, 102, 105, 85], + [108, 111, 114, 85], + [117, 120, 123, 85], + [126, 129, 132, 85], + [135, 138, 141, 85], + [144, 147, 150, 85], + [153, 156, 159, 85], + [162, 165, 168, 85], + [171, 174, 177, 85], + [180, 183, 186, 85], + [189, 192, 195, 85], + [198, 201, 204, 85], + [207, 210, 213, 85], + [216, 219, 222, 85], + [225, 228, 231, 85], + [234, 237, 240, 85], + [243, 246, 249, 85], + [252, 0, 0, 85], + [0, 85, 170, 87], + [0, 0, 0, 89], + [0, 0, 0, 91], + [0, 85, 170, 93], + [0, 51, 102, 95], + [153, 204, 0, 95], + [0, 0, 0, 97], + [0, 85, 170, 99], + [0, 0, 0, 101], + [0, 0, 0, 103], + [0, 17, 34, 105], + [51, 68, 85, 105], + [102, 119, 136, 105], + [153, 170, 187, 105], + [204, 221, 238, 105], + [0, 0, 0, 107], + [0, 0, 0, 109], + [0, 85, 170, 111], + [0, 0, 0, 113], + [0, 51, 102, 115], + [153, 204, 0, 115], + [0, 85, 170, 117], + [0, 15, 30, 119], + [45, 60, 75, 119], + [90, 105, 120, 119], + [135, 150, 165, 119], + [180, 195, 210, 119], + [225, 240, 0, 119], + [0, 0, 0, 121], + [0, 85, 170, 123], + [0, 51, 102, 125], + [153, 204, 0, 125], + [0, 0, 0, 127], + [0, 0, 0, 128], + [0, 85, 170, 129], + [0, 51, 102, 130], + [153, 204, 0, 130], + [0, 0, 0, 131], + [0, 85, 170, 132], + [0, 0, 0, 133], + [0, 0, 0, 134], + [0, 17, 34, 135], + [51, 68, 85, 135], + [102, 119, 136, 135], + [153, 170, 187, 135], + [204, 221, 238, 135], + [0, 15, 30, 136], + [45, 60, 75, 136], + [90, 105, 120, 136], + [135, 150, 165, 136], + [180, 195, 210, 136], + [225, 240, 0, 136], + [0, 0, 0, 137], + [0, 85, 170, 138], + [0, 0, 0, 139], + [0, 51, 102, 140], + [153, 204, 0, 140], + [0, 85, 170, 141], + [0, 0, 0, 142], + [0, 0, 0, 143], + [0, 85, 170, 144], + [0, 51, 102, 145], + [153, 204, 0, 145], + [0, 0, 0, 146], + [0, 85, 170, 147], + [0, 0, 0, 148], + [0, 0, 0, 149], + [0, 17, 34, 150], + [51, 68, 85, 150], + [102, 119, 136, 150], + [153, 170, 187, 150], + [204, 221, 238, 150], + [0, 0, 0, 151], + [0, 0, 0, 152], + [0, 5, 10, 153], + [15, 20, 25, 153], + [30, 35, 40, 153], + [45, 50, 55, 153], + [60, 65, 70, 153], + [75, 80, 85, 153], + [90, 95, 100, 153], + [105, 110, 115, 153], + [120, 125, 130, 153], + [135, 140, 145, 153], + [150, 155, 160, 153], + [165, 170, 175, 153], + [180, 185, 190, 153], + [195, 200, 205, 153], + [210, 215, 220, 153], + [225, 230, 235, 153], + [240, 245, 250, 153], + [0, 0, 0, 154], + [0, 51, 102, 155], + [153, 204, 0, 155], + [0, 85, 170, 156], + [0, 0, 0, 157], + [0, 0, 0, 158], + [0, 85, 170, 159], + [0, 51, 102, 160], + [153, 204, 0, 160], + [0, 0, 0, 161], + [0, 85, 170, 162], + [0, 0, 0, 163], + [0, 0, 0, 164], + [0, 17, 34, 165], + [51, 68, 85, 165], + [102, 119, 136, 165], + [153, 170, 187, 165], + [204, 221, 238, 165], + [0, 0, 0, 166], + [0, 0, 0, 167], + [0, 85, 170, 168], + [0, 0, 0, 169], + [0, 3, 6, 170], + [9, 12, 15, 170], + [18, 21, 24, 170], + [27, 30, 33, 170], + [36, 39, 42, 170], + [45, 48, 51, 170], + [54, 57, 60, 170], + [63, 66, 69, 170], + [72, 75, 78, 170], + [81, 84, 87, 170], + [90, 93, 96, 170], + [99, 102, 105, 170], + [108, 111, 114, 170], + [117, 120, 123, 170], + [126, 129, 132, 170], + [135, 138, 141, 170], + [144, 147, 150, 170], + [153, 156, 159, 170], + [162, 165, 168, 170], + [171, 174, 177, 170], + [180, 183, 186, 170], + [189, 192, 195, 170], + [198, 201, 204, 170], + [207, 210, 213, 170], + [216, 219, 222, 170], + [225, 228, 231, 170], + [234, 237, 240, 170], + [243, 246, 249, 170], + [252, 0, 0, 170], + [0, 85, 170, 171], + [0, 0, 0, 172], + [0, 0, 0, 173], + [0, 85, 170, 174], + [0, 51, 102, 175], + [153, 204, 0, 175], + [0, 0, 0, 176], + [0, 85, 170, 177], + [0, 0, 0, 178], + [0, 0, 0, 179], + [0, 17, 34, 180], + [51, 68, 85, 180], + [102, 119, 136, 180], + [153, 170, 187, 180], + [204, 221, 238, 180], + [0, 0, 0, 181], + [0, 0, 0, 182], + [0, 85, 170, 183], + [0, 0, 0, 184], + [0, 51, 102, 185], + [153, 204, 0, 185], + [0, 85, 170, 186], + [0, 15, 30, 187], + [45, 60, 75, 187], + [90, 105, 120, 187], + [135, 150, 165, 187], + [180, 195, 210, 187], + [225, 240, 0, 187], + [0, 0, 0, 188], + [0, 85, 170, 189], + [0, 51, 102, 190], + [153, 204, 0, 190], + [0, 0, 0, 191], + [0, 85, 170, 192], + [0, 0, 0, 193], + [0, 0, 0, 194], + [0, 17, 34, 195], + [51, 68, 85, 195], + [102, 119, 136, 195], + [153, 170, 187, 195], + [204, 221, 238, 195], + [0, 0, 0, 196], + [0, 0, 0, 197], + [0, 85, 170, 198], + [0, 0, 0, 199], + [0, 51, 102, 200], + [153, 204, 0, 200], + [0, 85, 170, 201], + [0, 0, 0, 202], + [0, 0, 0, 203], + [0, 5, 10, 204], + [15, 20, 25, 204], + [30, 35, 40, 204], + [45, 50, 55, 204], + [60, 65, 70, 204], + [75, 80, 85, 204], + [90, 95, 100, 204], + [105, 110, 115, 204], + [120, 125, 130, 204], + [135, 140, 145, 204], + [150, 155, 160, 204], + [165, 170, 175, 204], + [180, 185, 190, 204], + [195, 200, 205, 204], + [210, 215, 220, 204], + [225, 230, 235, 204], + [240, 245, 250, 204], + [0, 51, 102, 205], + [153, 204, 0, 205], + [0, 0, 0, 206], + [0, 85, 170, 207], + [0, 0, 0, 208], + [0, 0, 0, 209], + [0, 17, 34, 210], + [51, 68, 85, 210], + [102, 119, 136, 210], + [153, 170, 187, 210], + [204, 221, 238, 210], + [0, 0, 0, 211], + [0, 0, 0, 212], + [0, 85, 170, 213], + [0, 0, 0, 214], + [0, 51, 102, 215], + [153, 204, 0, 215], + [0, 85, 170, 216], + [0, 0, 0, 217], + [0, 0, 0, 218], + [0, 85, 170, 219], + [0, 51, 102, 220], + [153, 204, 0, 220], + [0, 15, 30, 221], + [45, 60, 75, 221], + [90, 105, 120, 221], + [135, 150, 165, 221], + [180, 195, 210, 221], + [225, 240, 0, 221], + [0, 85, 170, 222], + [0, 0, 0, 223], + [0, 0, 0, 224], + [0, 17, 34, 225], + [51, 68, 85, 225], + [102, 119, 136, 225], + [153, 170, 187, 225], + [204, 221, 238, 225], + [0, 0, 0, 226], + [0, 0, 0, 227], + [0, 85, 170, 228], + [0, 0, 0, 229], + [0, 51, 102, 230], + [153, 204, 0, 230], + [0, 85, 170, 231], + [0, 0, 0, 232], + [0, 0, 0, 233], + [0, 85, 170, 234], + [0, 51, 102, 235], + [153, 204, 0, 235], + [0, 0, 0, 236], + [0, 85, 170, 237], + [0, 15, 30, 238], + [45, 60, 75, 238], + [90, 105, 120, 238], + [135, 150, 165, 238], + [180, 195, 210, 238], + [225, 240, 0, 238], + [0, 0, 0, 239], + [0, 17, 34, 240], + [51, 68, 85, 240], + [102, 119, 136, 240], + [153, 170, 187, 240], + [204, 221, 238, 240], + [0, 0, 0, 241], + [0, 0, 0, 242], + [0, 85, 170, 243], + [0, 0, 0, 244], + [0, 51, 102, 245], + [153, 204, 0, 245], + [0, 85, 170, 246], + [0, 0, 0, 247], + [0, 0, 0, 248], + [0, 85, 170, 249], + [0, 51, 102, 250], + [153, 204, 0, 250], + [0, 0, 0, 251], + [0, 85, 170, 252], + [0, 0, 0, 253], + [0, 0, 0, 254], + [5, 15, 25, 102], + [35, 40, 45, 102], + [50, 55, 60, 102], + [65, 70, 75, 102], + [80, 85, 90, 102], + [95, 100, 105, 102], + [110, 115, 120, 102], + [125, 130, 135, 102], + [140, 145, 150, 102], + [155, 160, 165, 102], + [170, 175, 180, 102], + [185, 190, 195, 102], + [200, 205, 210, 102], + [215, 220, 225, 102], + [230, 235, 240, 102], + [245, 250, 0, 102], + [15, 45, 60, 68], + [75, 90, 105, 68], + [120, 135, 150, 68], + [165, 180, 195, 68], + [210, 225, 240, 68], + [17, 34, 51, 45], + [68, 85, 102, 45], + [119, 136, 153, 45], + [170, 187, 204, 45], + [221, 238, 0, 45], + [17, 51, 68, 60], + [85, 102, 119, 60], + [136, 153, 170, 60], + [187, 204, 221, 60], + [238, 0, 0, 60], + [17, 34, 51, 90], + [68, 85, 102, 90], + [119, 136, 153, 90], + [170, 187, 204, 90], + [221, 238, 0, 90], + [17, 34, 51, 120], + [68, 85, 102, 120], + [119, 136, 153, 120], + [170, 187, 204, 120], + [221, 238, 0, 120], + [20, 25, 30, 51], + [35, 40, 45, 51], + [50, 55, 60, 51], + [65, 70, 75, 51], + [80, 85, 90, 51], + [95, 100, 105, 51], + [110, 115, 120, 51], + [125, 130, 135, 51], + [140, 145, 150, 51], + [155, 160, 165, 51], + [170, 175, 180, 51], + [185, 190, 195, 51], + [200, 205, 210, 51], + [215, 220, 225, 51], + [230, 235, 240, 51], + [245, 250, 0, 51], + [45, 60, 75, 17], + [90, 105, 120, 17], + [135, 150, 165, 17], + [180, 195, 210, 17], + [225, 240, 0, 17], + [45, 75, 90, 34], + [105, 120, 135, 34], + [150, 165, 180, 34], + [195, 210, 225, 34], + [240, 0, 0, 34], + [51, 153, 204, 20], + [51, 102, 153, 25], + [204, 0, 0, 25], + [51, 85, 119, 30], + [136, 153, 170, 30], + [187, 204, 221, 30], + [238, 0, 0, 30], + [51, 102, 153, 35], + [204, 0, 0, 35], + [51, 102, 153, 40], + [204, 0, 0, 40], + [51, 102, 153, 50], + [204, 0, 0, 50], + [51, 102, 153, 55], + [204, 0, 0, 55], + [51, 102, 153, 70], + [204, 0, 0, 70], + [51, 102, 153, 80], + [204, 0, 0, 80], + [51, 102, 153, 100], + [204, 0, 0, 100], + [51, 102, 153, 110], + [204, 0, 0, 110], + [65, 67, 69, 0], + [71, 73, 75, 0], + [77, 79, 81, 0], + [83, 85, 87, 0], + [89, 91, 93, 0], + [95, 97, 99, 0], + [101, 103, 105, 0], + [107, 109, 111, 0], + [113, 115, 117, 0], + [119, 121, 123, 0], + [125, 127, 128, 0], + [129, 130, 131, 0], + [132, 133, 134, 0], + [135, 136, 137, 0], + [138, 139, 140, 0], + [141, 142, 143, 0], + [144, 145, 146, 0], + [147, 148, 149, 0], + [150, 151, 152, 0], + [153, 154, 155, 0], + [156, 157, 158, 0], + [159, 160, 161, 0], + [162, 163, 164, 0], + [165, 166, 167, 0], + [168, 169, 170, 0], + [171, 172, 173, 0], + [174, 175, 176, 0], + [177, 178, 179, 0], + [180, 181, 182, 0], + [183, 184, 185, 0], + [186, 187, 188, 0], + [189, 190, 191, 0], + [192, 193, 194, 0], + [195, 196, 197, 0], + [198, 199, 200, 0], + [201, 202, 203, 0], + [204, 205, 206, 0], + [207, 208, 209, 0], + [210, 211, 212, 0], + [213, 214, 215, 0], + [216, 217, 218, 0], + [219, 220, 221, 0], + [222, 223, 224, 0], + [225, 226, 227, 0], + [228, 229, 230, 0], + [231, 232, 233, 0], + [234, 235, 236, 0], + [237, 238, 239, 0], + [240, 241, 242, 0], + [243, 244, 245, 0], + [246, 247, 248, 0], + [249, 250, 251, 0], + [252, 253, 254, 0], + [68, 85, 102, 15], + [119, 136, 153, 15], + [170, 187, 204, 15], + [221, 238, 0, 15], + [85, 170, 0, 3], + [85, 170, 0, 6], + [85, 170, 0, 9], + [85, 170, 0, 12], + [85, 170, 0, 18], + [85, 170, 0, 21], + [85, 170, 0, 24], + [85, 170, 0, 27], + [85, 170, 0, 33], + [85, 170, 0, 36], + [85, 170, 0, 39], + [85, 170, 0, 42], + [85, 170, 0, 48], + [85, 170, 0, 54], + [85, 170, 0, 57], + [85, 170, 0, 63], + [85, 170, 0, 66], + [85, 170, 0, 72], + [85, 170, 0, 78], + [85, 170, 0, 84], + [85, 170, 0, 96], + [85, 170, 0, 108], + [85, 170, 0, 114], + [85, 170, 0, 126], + [102, 153, 204, 5], + [153, 204, 0, 10], + ]; + let rgb_pixels = vec![ + [190, 152, 114], + [76, 38, 190], + [188, 188, 188], + [186, 124, 62], + [184, 184, 184], + [182, 182, 182], + [180, 168, 156], + [144, 132, 120], + [108, 96, 84], + [72, 60, 48], + [36, 24, 12], + [178, 178, 178], + [176, 176, 176], + [174, 116, 58], + [172, 172, 172], + [170, 168, 166], + [164, 162, 160], + [158, 156, 154], + [152, 150, 148], + [146, 144, 142], + [140, 138, 136], + [134, 132, 130], + [128, 126, 124], + [122, 120, 118], + [116, 114, 112], + [110, 108, 106], + [104, 102, 100], + [98, 96, 94], + [92, 90, 88], + [86, 84, 82], + [80, 78, 76], + [74, 72, 70], + [68, 66, 64], + [62, 60, 58], + [56, 54, 52], + [50, 48, 46], + [44, 42, 40], + [38, 36, 34], + [32, 30, 28], + [26, 24, 22], + [20, 18, 16], + [14, 12, 10], + [8, 6, 4], + [2, 170, 170], + [168, 112, 56], + [166, 166, 166], + [164, 164, 164], + [162, 108, 54], + [160, 128, 96], + [64, 32, 160], + [158, 158, 158], + [156, 104, 52], + [154, 154, 154], + [152, 152, 152], + [150, 140, 130], + [120, 110, 100], + [90, 80, 70], + [60, 50, 40], + [30, 20, 10], + [148, 148, 148], + [146, 146, 146], + [144, 96, 48], + [142, 142, 142], + [140, 112, 84], + [56, 28, 140], + [138, 92, 46], + [136, 128, 120], + [112, 104, 96], + [88, 80, 72], + [64, 56, 48], + [40, 32, 24], + [16, 8, 136], + [134, 134, 134], + [132, 88, 44], + [130, 104, 78], + [52, 26, 130], + [128, 128, 128], + [127, 127, 127], + [126, 84, 42], + [125, 100, 75], + [50, 25, 125], + [124, 124, 124], + [123, 82, 41], + [122, 122, 122], + [121, 121, 121], + [120, 112, 104], + [96, 88, 80], + [72, 64, 56], + [48, 40, 32], + [24, 16, 8], + [119, 112, 105], + [98, 91, 84], + [77, 70, 63], + [56, 49, 42], + [35, 28, 21], + [14, 7, 119], + [118, 118, 118], + [117, 78, 39], + [116, 116, 116], + [115, 92, 69], + [46, 23, 115], + [114, 76, 38], + [113, 113, 113], + [112, 112, 112], + [111, 74, 37], + [110, 88, 66], + [44, 22, 110], + [109, 109, 109], + [108, 72, 36], + [107, 107, 107], + [106, 106, 106], + [105, 98, 91], + [84, 77, 70], + [63, 56, 49], + [42, 35, 28], + [21, 14, 7], + [104, 104, 104], + [103, 103, 103], + [102, 100, 98], + [96, 94, 92], + [90, 88, 86], + [84, 82, 80], + [78, 76, 74], + [72, 70, 68], + [66, 64, 62], + [60, 58, 56], + [54, 52, 50], + [48, 46, 44], + [42, 40, 38], + [36, 34, 32], + [30, 28, 26], + [24, 22, 20], + [18, 16, 14], + [12, 10, 8], + [6, 4, 2], + [101, 101, 101], + [100, 80, 60], + [40, 20, 100], + [99, 66, 33], + [98, 98, 98], + [97, 97, 97], + [96, 64, 32], + [95, 76, 57], + [38, 19, 95], + [94, 94, 94], + [93, 62, 31], + [92, 92, 92], + [91, 91, 91], + [90, 84, 78], + [72, 66, 60], + [54, 48, 42], + [36, 30, 24], + [18, 12, 6], + [89, 89, 89], + [88, 88, 88], + [87, 58, 29], + [86, 86, 86], + [85, 84, 83], + [82, 81, 80], + [79, 78, 77], + [76, 75, 74], + [73, 72, 71], + [70, 69, 68], + [67, 66, 65], + [64, 63, 62], + [61, 60, 59], + [58, 57, 56], + [55, 54, 53], + [52, 51, 50], + [49, 48, 47], + [46, 45, 44], + [43, 42, 41], + [40, 39, 38], + [37, 36, 35], + [34, 33, 32], + [31, 30, 29], + [28, 27, 26], + [25, 24, 23], + [22, 21, 20], + [19, 18, 17], + [16, 15, 14], + [13, 12, 11], + [10, 9, 8], + [7, 6, 5], + [4, 3, 2], + [1, 85, 85], + [84, 56, 28], + [83, 83, 83], + [82, 82, 82], + [81, 54, 27], + [80, 64, 48], + [32, 16, 80], + [79, 79, 79], + [78, 52, 26], + [77, 77, 77], + [76, 76, 76], + [75, 70, 65], + [60, 55, 50], + [45, 40, 35], + [30, 25, 20], + [15, 10, 5], + [74, 74, 74], + [73, 73, 73], + [72, 48, 24], + [71, 71, 71], + [70, 56, 42], + [28, 14, 70], + [69, 46, 23], + [68, 64, 60], + [56, 52, 48], + [44, 40, 36], + [32, 28, 24], + [20, 16, 12], + [8, 4, 68], + [67, 67, 67], + [66, 44, 22], + [65, 52, 39], + [26, 13, 65], + [64, 64, 64], + [63, 42, 21], + [62, 62, 62], + [61, 61, 61], + [60, 56, 52], + [48, 44, 40], + [36, 32, 28], + [24, 20, 16], + [12, 8, 4], + [59, 59, 59], + [58, 58, 58], + [57, 38, 19], + [56, 56, 56], + [55, 44, 33], + [22, 11, 55], + [54, 36, 18], + [53, 53, 53], + [52, 52, 52], + [51, 50, 49], + [48, 47, 46], + [45, 44, 43], + [42, 41, 40], + [39, 38, 37], + [36, 35, 34], + [33, 32, 31], + [30, 29, 28], + [27, 26, 25], + [24, 23, 22], + [21, 20, 19], + [18, 17, 16], + [15, 14, 13], + [12, 11, 10], + [9, 8, 7], + [6, 5, 4], + [3, 2, 1], + [50, 40, 30], + [20, 10, 50], + [49, 49, 49], + [48, 32, 16], + [47, 47, 47], + [46, 46, 46], + [45, 42, 39], + [36, 33, 30], + [27, 24, 21], + [18, 15, 12], + [9, 6, 3], + [44, 44, 44], + [43, 43, 43], + [42, 28, 14], + [41, 41, 41], + [40, 32, 24], + [16, 8, 40], + [39, 26, 13], + [38, 38, 38], + [37, 37, 37], + [36, 24, 12], + [35, 28, 21], + [14, 7, 35], + [34, 32, 30], + [28, 26, 24], + [22, 20, 18], + [16, 14, 12], + [10, 8, 6], + [4, 2, 34], + [33, 22, 11], + [32, 32, 32], + [31, 31, 31], + [30, 28, 26], + [24, 22, 20], + [18, 16, 14], + [12, 10, 8], + [6, 4, 2], + [29, 29, 29], + [28, 28, 28], + [27, 18, 9], + [26, 26, 26], + [25, 20, 15], + [10, 5, 25], + [24, 16, 8], + [23, 23, 23], + [22, 22, 22], + [21, 14, 7], + [20, 16, 12], + [8, 4, 20], + [19, 19, 19], + [18, 12, 6], + [17, 16, 15], + [14, 13, 12], + [11, 10, 9], + [8, 7, 6], + [5, 4, 3], + [2, 1, 17], + [16, 16, 16], + [15, 14, 13], + [12, 11, 10], + [9, 8, 7], + [6, 5, 4], + [3, 2, 1], + [14, 14, 14], + [13, 13, 13], + [12, 8, 4], + [11, 11, 11], + [10, 8, 6], + [4, 2, 10], + [9, 6, 3], + [8, 8, 8], + [7, 7, 7], + [6, 4, 2], + [5, 4, 3], + [2, 1, 5], + [4, 4, 4], + [3, 2, 1], + [2, 2, 2], + [1, 1, 1], + [150, 144, 138], + [132, 129, 126], + [123, 120, 117], + [114, 111, 108], + [105, 102, 99], + [96, 93, 90], + [87, 84, 81], + [78, 75, 72], + [69, 66, 63], + [60, 57, 54], + [51, 48, 45], + [42, 39, 36], + [33, 30, 27], + [24, 21, 18], + [15, 12, 9], + [6, 3, 153], + [176, 154, 143], + [132, 121, 110], + [99, 88, 77], + [66, 55, 44], + [33, 22, 11], + [196, 182, 168], + [154, 140, 126], + [112, 98, 84], + [70, 56, 42], + [28, 14, 210], + [182, 156, 143], + [130, 117, 104], + [91, 78, 65], + [52, 39, 26], + [13, 195, 195], + [154, 143, 132], + [121, 110, 99], + [88, 77, 66], + [55, 44, 33], + [22, 11, 165], + [126, 117, 108], + [99, 90, 81], + [72, 63, 54], + [45, 36, 27], + [18, 9, 135], + [188, 184, 180], + [176, 172, 168], + [164, 160, 156], + [152, 148, 144], + [140, 136, 132], + [128, 124, 120], + [116, 112, 108], + [104, 100, 96], + [92, 88, 84], + [80, 76, 72], + [68, 64, 60], + [56, 52, 48], + [44, 40, 36], + [32, 28, 24], + [20, 16, 12], + [8, 4, 204], + [196, 182, 168], + [154, 140, 126], + [112, 98, 84], + [70, 56, 42], + [28, 14, 238], + [182, 156, 143], + [130, 117, 104], + [91, 78, 65], + [52, 39, 26], + [13, 221, 221], + [188, 94, 47], + [184, 138, 92], + [46, 230, 230], + [180, 150, 120], + [105, 90, 75], + [60, 45, 30], + [15, 225, 225], + [176, 132, 88], + [44, 220, 220], + [172, 129, 86], + [43, 215, 215], + [164, 123, 82], + [41, 205, 205], + [160, 120, 80], + [40, 200, 200], + [148, 111, 74], + [37, 185, 185], + [140, 105, 70], + [35, 175, 175], + [124, 93, 62], + [31, 155, 155], + [116, 87, 58], + [29, 145, 145], + [190, 188, 186], + [184, 182, 180], + [178, 176, 174], + [172, 170, 168], + [166, 164, 162], + [160, 158, 156], + [154, 152, 150], + [148, 146, 144], + [142, 140, 138], + [136, 134, 132], + [130, 128, 127], + [126, 125, 124], + [123, 122, 121], + [120, 119, 118], + [117, 116, 115], + [114, 113, 112], + [111, 110, 109], + [108, 107, 106], + [105, 104, 103], + [102, 101, 100], + [99, 98, 97], + [96, 95, 94], + [93, 92, 91], + [90, 89, 88], + [87, 86, 85], + [84, 83, 82], + [81, 80, 79], + [78, 77, 76], + [75, 74, 73], + [72, 71, 70], + [69, 68, 67], + [66, 65, 64], + [63, 62, 61], + [60, 59, 58], + [57, 56, 55], + [54, 53, 52], + [51, 50, 49], + [48, 47, 46], + [45, 44, 43], + [42, 41, 40], + [39, 38, 37], + [36, 35, 34], + [33, 32, 31], + [30, 29, 28], + [27, 26, 25], + [24, 23, 22], + [21, 20, 19], + [18, 17, 16], + [15, 14, 13], + [12, 11, 10], + [9, 8, 7], + [6, 5, 4], + [3, 2, 1], + [176, 160, 144], + [128, 112, 96], + [80, 64, 48], + [32, 16, 240], + [168, 84, 252], + [166, 83, 249], + [164, 82, 246], + [162, 81, 243], + [158, 79, 237], + [156, 78, 234], + [154, 77, 231], + [152, 76, 228], + [148, 74, 222], + [146, 73, 219], + [144, 72, 216], + [142, 71, 213], + [138, 69, 207], + [134, 67, 201], + [132, 66, 198], + [128, 64, 192], + [126, 63, 189], + [122, 61, 183], + [118, 59, 177], + [114, 57, 171], + [106, 53, 159], + [98, 49, 147], + [94, 47, 141], + [86, 43, 129], + [150, 100, 50], + [98, 49, 245], + ]; + for (&cmyk_pixel, rgb_pixel) in cmyk_pixels.iter().zip(rgb_pixels) { + single_pix_correct(cmyk_pixel, rgb_pixel); + } + } + + #[cfg(feature = "benchmarks")] + #[bench] + fn bench_cmyk_to_rgb(b: &mut Bencher) { + let mut v = Vec::with_capacity((W * H * 4) as usize); + for c in 0..=255 { + for k in 0..=255 { + v.push(c as u8); + v.push(0); + v.push(0); + v.push(k as u8); + } + } + + b.iter(|| { + cmyk_to_rgb(&v); + }); + } + + #[cfg(feature = "benchmarks")] + #[bench] + fn bench_cmyk_to_rgb_single(b: &mut Bencher) { + b.iter(|| { + cmyk_to_rgb(&[128, 128, 128, 128]); + }); + } +} diff --git a/vendor/image/src/codecs/jpeg/encoder.rs b/vendor/image/src/codecs/jpeg/encoder.rs new file mode 100644 index 0000000..edb2a05 --- /dev/null +++ b/vendor/image/src/codecs/jpeg/encoder.rs @@ -0,0 +1,1074 @@ +#![allow(clippy::too_many_arguments)] + +use std::borrow::Cow; +use std::convert::TryFrom; +use std::io::{self, Write}; + +use crate::error::{ + ImageError, ImageResult, ParameterError, ParameterErrorKind, UnsupportedError, + UnsupportedErrorKind, +}; +use crate::image::{ImageEncoder, ImageFormat}; +use crate::utils::clamp; +use crate::{ColorType, GenericImageView, ImageBuffer, Luma, LumaA, Pixel, Rgb, Rgba}; + +use super::entropy::build_huff_lut_const; +use super::transform; +use crate::traits::PixelWithColorType; + +// Markers +// Baseline DCT +static SOF0: u8 = 0xC0; +// Huffman Tables +static DHT: u8 = 0xC4; +// Start of Image (standalone) +static SOI: u8 = 0xD8; +// End of image (standalone) +static EOI: u8 = 0xD9; +// Start of Scan +static SOS: u8 = 0xDA; +// Quantization Tables +static DQT: u8 = 0xDB; +// Application segments start and end +static APP0: u8 = 0xE0; + +// section K.1 +// table K.1 +#[rustfmt::skip] +static STD_LUMA_QTABLE: [u8; 64] = [ + 16, 11, 10, 16, 24, 40, 51, 61, + 12, 12, 14, 19, 26, 58, 60, 55, + 14, 13, 16, 24, 40, 57, 69, 56, + 14, 17, 22, 29, 51, 87, 80, 62, + 18, 22, 37, 56, 68, 109, 103, 77, + 24, 35, 55, 64, 81, 104, 113, 92, + 49, 64, 78, 87, 103, 121, 120, 101, + 72, 92, 95, 98, 112, 100, 103, 99, +]; + +// table K.2 +#[rustfmt::skip] +static STD_CHROMA_QTABLE: [u8; 64] = [ + 17, 18, 24, 47, 99, 99, 99, 99, + 18, 21, 26, 66, 99, 99, 99, 99, + 24, 26, 56, 99, 99, 99, 99, 99, + 47, 66, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, +]; + +// section K.3 +// Code lengths and values for table K.3 +static STD_LUMA_DC_CODE_LENGTHS: [u8; 16] = [ + 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +]; + +static STD_LUMA_DC_VALUES: [u8; 12] = [ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, +]; + +static STD_LUMA_DC_HUFF_LUT: [(u8, u16); 256] = + build_huff_lut_const(&STD_LUMA_DC_CODE_LENGTHS, &STD_LUMA_DC_VALUES); + +// Code lengths and values for table K.4 +static STD_CHROMA_DC_CODE_LENGTHS: [u8; 16] = [ + 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, +]; + +static STD_CHROMA_DC_VALUES: [u8; 12] = [ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, +]; + +static STD_CHROMA_DC_HUFF_LUT: [(u8, u16); 256] = + build_huff_lut_const(&STD_CHROMA_DC_CODE_LENGTHS, &STD_CHROMA_DC_VALUES); + +// Code lengths and values for table k.5 +static STD_LUMA_AC_CODE_LENGTHS: [u8; 16] = [ + 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D, +]; + +static STD_LUMA_AC_VALUES: [u8; 162] = [ + 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, + 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, + 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, + 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, + 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, + 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, + 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, + 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, + 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, + 0xF9, 0xFA, +]; + +static STD_LUMA_AC_HUFF_LUT: [(u8, u16); 256] = + build_huff_lut_const(&STD_LUMA_AC_CODE_LENGTHS, &STD_LUMA_AC_VALUES); + +// Code lengths and values for table k.6 +static STD_CHROMA_AC_CODE_LENGTHS: [u8; 16] = [ + 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77, +]; +static STD_CHROMA_AC_VALUES: [u8; 162] = [ + 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, + 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, + 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, + 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, + 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, + 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, + 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, + 0xF9, 0xFA, +]; + +static STD_CHROMA_AC_HUFF_LUT: [(u8, u16); 256] = + build_huff_lut_const(&STD_CHROMA_AC_CODE_LENGTHS, &STD_CHROMA_AC_VALUES); + +static DCCLASS: u8 = 0; +static ACCLASS: u8 = 1; + +static LUMADESTINATION: u8 = 0; +static CHROMADESTINATION: u8 = 1; + +static LUMAID: u8 = 1; +static CHROMABLUEID: u8 = 2; +static CHROMAREDID: u8 = 3; + +/// The permutation of dct coefficients. +#[rustfmt::skip] +static UNZIGZAG: [u8; 64] = [ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, +]; + +/// A representation of a JPEG component +#[derive(Copy, Clone)] +struct Component { + /// The Component's identifier + id: u8, + + /// Horizontal sampling factor + h: u8, + + /// Vertical sampling factor + v: u8, + + /// The quantization table selector + tq: u8, + + /// Index to the Huffman DC Table + dc_table: u8, + + /// Index to the AC Huffman Table + ac_table: u8, + + /// The dc prediction of the component + _dc_pred: i32, +} + +pub(crate) struct BitWriter<W> { + w: W, + accumulator: u32, + nbits: u8, +} + +impl<W: Write> BitWriter<W> { + fn new(w: W) -> Self { + BitWriter { + w, + accumulator: 0, + nbits: 0, + } + } + + fn write_bits(&mut self, bits: u16, size: u8) -> io::Result<()> { + if size == 0 { + return Ok(()); + } + + self.nbits += size; + self.accumulator |= u32::from(bits) << (32 - self.nbits) as usize; + + while self.nbits >= 8 { + let byte = self.accumulator >> 24; + self.w.write_all(&[byte as u8])?; + + if byte == 0xFF { + self.w.write_all(&[0x00])?; + } + + self.nbits -= 8; + self.accumulator <<= 8; + } + + Ok(()) + } + + fn pad_byte(&mut self) -> io::Result<()> { + self.write_bits(0x7F, 7) + } + + fn huffman_encode(&mut self, val: u8, table: &[(u8, u16); 256]) -> io::Result<()> { + let (size, code) = table[val as usize]; + + if size > 16 { + panic!("bad huffman value"); + } + + self.write_bits(code, size) + } + + fn write_block( + &mut self, + block: &[i32; 64], + prevdc: i32, + dctable: &[(u8, u16); 256], + actable: &[(u8, u16); 256], + ) -> io::Result<i32> { + // Differential DC encoding + let dcval = block[0]; + let diff = dcval - prevdc; + let (size, value) = encode_coefficient(diff); + + self.huffman_encode(size, dctable)?; + self.write_bits(value, size)?; + + // Figure F.2 + let mut zero_run = 0; + + for &k in &UNZIGZAG[1..] { + if block[k as usize] == 0 { + zero_run += 1; + } else { + while zero_run > 15 { + self.huffman_encode(0xF0, actable)?; + zero_run -= 16; + } + + let (size, value) = encode_coefficient(block[k as usize]); + let symbol = (zero_run << 4) | size; + + self.huffman_encode(symbol, actable)?; + self.write_bits(value, size)?; + + zero_run = 0; + } + } + + if block[UNZIGZAG[63] as usize] == 0 { + self.huffman_encode(0x00, actable)?; + } + + Ok(dcval) + } + + fn write_marker(&mut self, marker: u8) -> io::Result<()> { + self.w.write_all(&[0xFF, marker]) + } + + fn write_segment(&mut self, marker: u8, data: &[u8]) -> io::Result<()> { + self.w.write_all(&[0xFF, marker])?; + self.w.write_all(&(data.len() as u16 + 2).to_be_bytes())?; + self.w.write_all(data) + } +} + +/// Represents a unit in which the density of an image is measured +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum PixelDensityUnit { + /// Represents the absence of a unit, the values indicate only a + /// [pixel aspect ratio](https://en.wikipedia.org/wiki/Pixel_aspect_ratio) + PixelAspectRatio, + + /// Pixels per inch (2.54 cm) + Inches, + + /// Pixels per centimeter + Centimeters, +} + +/// Represents the pixel density of an image +/// +/// For example, a 300 DPI image is represented by: +/// +/// ```rust +/// use image::codecs::jpeg::*; +/// let hdpi = PixelDensity::dpi(300); +/// assert_eq!(hdpi, PixelDensity {density: (300,300), unit: PixelDensityUnit::Inches}) +/// ``` +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct PixelDensity { + /// A couple of values for (Xdensity, Ydensity) + pub density: (u16, u16), + /// The unit in which the density is measured + pub unit: PixelDensityUnit, +} + +impl PixelDensity { + /// Creates the most common pixel density type: + /// the horizontal and the vertical density are equal, + /// and measured in pixels per inch. + pub fn dpi(density: u16) -> Self { + PixelDensity { + density: (density, density), + unit: PixelDensityUnit::Inches, + } + } +} + +impl Default for PixelDensity { + /// Returns a pixel density with a pixel aspect ratio of 1 + fn default() -> Self { + PixelDensity { + density: (1, 1), + unit: PixelDensityUnit::PixelAspectRatio, + } + } +} + +/// The representation of a JPEG encoder +pub struct JpegEncoder<W> { + writer: BitWriter<W>, + + components: Vec<Component>, + tables: Vec<[u8; 64]>, + + luma_dctable: Cow<'static, [(u8, u16); 256]>, + luma_actable: Cow<'static, [(u8, u16); 256]>, + chroma_dctable: Cow<'static, [(u8, u16); 256]>, + chroma_actable: Cow<'static, [(u8, u16); 256]>, + + pixel_density: PixelDensity, +} + +impl<W: Write> JpegEncoder<W> { + /// Create a new encoder that writes its output to ```w``` + pub fn new(w: W) -> JpegEncoder<W> { + JpegEncoder::new_with_quality(w, 75) + } + + /// Create a new encoder that writes its output to ```w```, and has + /// the quality parameter ```quality``` with a value in the range 1-100 + /// where 1 is the worst and 100 is the best. + pub fn new_with_quality(w: W, quality: u8) -> JpegEncoder<W> { + let components = vec![ + Component { + id: LUMAID, + h: 1, + v: 1, + tq: LUMADESTINATION, + dc_table: LUMADESTINATION, + ac_table: LUMADESTINATION, + _dc_pred: 0, + }, + Component { + id: CHROMABLUEID, + h: 1, + v: 1, + tq: CHROMADESTINATION, + dc_table: CHROMADESTINATION, + ac_table: CHROMADESTINATION, + _dc_pred: 0, + }, + Component { + id: CHROMAREDID, + h: 1, + v: 1, + tq: CHROMADESTINATION, + dc_table: CHROMADESTINATION, + ac_table: CHROMADESTINATION, + _dc_pred: 0, + }, + ]; + + // Derive our quantization table scaling value using the libjpeg algorithm + let scale = u32::from(clamp(quality, 1, 100)); + let scale = if scale < 50 { + 5000 / scale + } else { + 200 - scale * 2 + }; + + let mut tables = vec![STD_LUMA_QTABLE, STD_CHROMA_QTABLE]; + tables.iter_mut().for_each(|t| { + t.iter_mut().for_each(|v| { + *v = clamp( + (u32::from(*v) * scale + 50) / 100, + 1, + u32::from(u8::max_value()), + ) as u8; + }) + }); + + JpegEncoder { + writer: BitWriter::new(w), + + components, + tables, + + luma_dctable: Cow::Borrowed(&STD_LUMA_DC_HUFF_LUT), + luma_actable: Cow::Borrowed(&STD_LUMA_AC_HUFF_LUT), + chroma_dctable: Cow::Borrowed(&STD_CHROMA_DC_HUFF_LUT), + chroma_actable: Cow::Borrowed(&STD_CHROMA_AC_HUFF_LUT), + + pixel_density: PixelDensity::default(), + } + } + + /// Set the pixel density of the images the encoder will encode. + /// If this method is not called, then a default pixel aspect ratio of 1x1 will be applied, + /// and no DPI information will be stored in the image. + pub fn set_pixel_density(&mut self, pixel_density: PixelDensity) { + self.pixel_density = pixel_density; + } + + /// Encodes the image stored in the raw byte buffer ```image``` + /// that has dimensions ```width``` and ```height``` + /// and ```ColorType``` ```c``` + /// + /// The Image in encoded with subsampling ratio 4:2:2 + pub fn encode( + &mut self, + image: &[u8], + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<()> { + match color_type { + ColorType::L8 => { + let image: ImageBuffer<Luma<_>, _> = + ImageBuffer::from_raw(width, height, image).unwrap(); + self.encode_image(&image) + } + ColorType::La8 => { + let image: ImageBuffer<LumaA<_>, _> = + ImageBuffer::from_raw(width, height, image).unwrap(); + self.encode_image(&image) + } + ColorType::Rgb8 => { + let image: ImageBuffer<Rgb<_>, _> = + ImageBuffer::from_raw(width, height, image).unwrap(); + self.encode_image(&image) + } + ColorType::Rgba8 => { + let image: ImageBuffer<Rgba<_>, _> = + ImageBuffer::from_raw(width, height, image).unwrap(); + self.encode_image(&image) + } + _ => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Jpeg.into(), + UnsupportedErrorKind::Color(color_type.into()), + ), + )), + } + } + + /// Encodes the given image. + /// + /// As a special feature this does not require the whole image to be present in memory at the + /// same time such that it may be computed on the fly, which is why this method exists on this + /// encoder but not on others. Instead the encoder will iterate over 8-by-8 blocks of pixels at + /// a time, inspecting each pixel exactly once. You can rely on this behaviour when calling + /// this method. + /// + /// The Image in encoded with subsampling ratio 4:2:2 + pub fn encode_image<I: GenericImageView>(&mut self, image: &I) -> ImageResult<()> + where + I::Pixel: PixelWithColorType, + { + let n = I::Pixel::CHANNEL_COUNT; + let color_type = I::Pixel::COLOR_TYPE; + let num_components = if n == 1 || n == 2 { 1 } else { 3 }; + + self.writer.write_marker(SOI)?; + + let mut buf = Vec::new(); + + build_jfif_header(&mut buf, self.pixel_density); + self.writer.write_segment(APP0, &buf)?; + + build_frame_header( + &mut buf, + 8, + // TODO: not idiomatic yet. Should be an EncodingError and mention jpg. Further it + // should check dimensions prior to writing. + u16::try_from(image.width()).map_err(|_| { + ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + )) + })?, + u16::try_from(image.height()).map_err(|_| { + ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + )) + })?, + &self.components[..num_components], + ); + self.writer.write_segment(SOF0, &buf)?; + + assert_eq!(self.tables.len(), 2); + let numtables = if num_components == 1 { 1 } else { 2 }; + + for (i, table) in self.tables[..numtables].iter().enumerate() { + build_quantization_segment(&mut buf, 8, i as u8, table); + self.writer.write_segment(DQT, &buf)?; + } + + build_huffman_segment( + &mut buf, + DCCLASS, + LUMADESTINATION, + &STD_LUMA_DC_CODE_LENGTHS, + &STD_LUMA_DC_VALUES, + ); + self.writer.write_segment(DHT, &buf)?; + + build_huffman_segment( + &mut buf, + ACCLASS, + LUMADESTINATION, + &STD_LUMA_AC_CODE_LENGTHS, + &STD_LUMA_AC_VALUES, + ); + self.writer.write_segment(DHT, &buf)?; + + if num_components == 3 { + build_huffman_segment( + &mut buf, + DCCLASS, + CHROMADESTINATION, + &STD_CHROMA_DC_CODE_LENGTHS, + &STD_CHROMA_DC_VALUES, + ); + self.writer.write_segment(DHT, &buf)?; + + build_huffman_segment( + &mut buf, + ACCLASS, + CHROMADESTINATION, + &STD_CHROMA_AC_CODE_LENGTHS, + &STD_CHROMA_AC_VALUES, + ); + self.writer.write_segment(DHT, &buf)?; + } + + build_scan_header(&mut buf, &self.components[..num_components]); + self.writer.write_segment(SOS, &buf)?; + + if color_type.has_color() { + self.encode_rgb(image) + } else { + self.encode_gray(image) + }?; + + self.writer.pad_byte()?; + self.writer.write_marker(EOI)?; + Ok(()) + } + + fn encode_gray<I: GenericImageView>(&mut self, image: &I) -> io::Result<()> { + let mut yblock = [0u8; 64]; + let mut y_dcprev = 0; + let mut dct_yblock = [0i32; 64]; + + for y in (0..image.height()).step_by(8) { + for x in (0..image.width()).step_by(8) { + copy_blocks_gray(image, x, y, &mut yblock); + + // Level shift and fdct + // Coeffs are scaled by 8 + transform::fdct(&yblock, &mut dct_yblock); + + // Quantization + for (i, dct) in dct_yblock.iter_mut().enumerate() { + *dct = ((*dct / 8) as f32 / f32::from(self.tables[0][i])).round() as i32; + } + + let la = &*self.luma_actable; + let ld = &*self.luma_dctable; + + y_dcprev = self.writer.write_block(&dct_yblock, y_dcprev, ld, la)?; + } + } + + Ok(()) + } + + fn encode_rgb<I: GenericImageView>(&mut self, image: &I) -> io::Result<()> { + let mut y_dcprev = 0; + let mut cb_dcprev = 0; + let mut cr_dcprev = 0; + + let mut dct_yblock = [0i32; 64]; + let mut dct_cb_block = [0i32; 64]; + let mut dct_cr_block = [0i32; 64]; + + let mut yblock = [0u8; 64]; + let mut cb_block = [0u8; 64]; + let mut cr_block = [0u8; 64]; + + for y in (0..image.height()).step_by(8) { + for x in (0..image.width()).step_by(8) { + // RGB -> YCbCr + copy_blocks_ycbcr(image, x, y, &mut yblock, &mut cb_block, &mut cr_block); + + // Level shift and fdct + // Coeffs are scaled by 8 + transform::fdct(&yblock, &mut dct_yblock); + transform::fdct(&cb_block, &mut dct_cb_block); + transform::fdct(&cr_block, &mut dct_cr_block); + + // Quantization + for i in 0usize..64 { + dct_yblock[i] = + ((dct_yblock[i] / 8) as f32 / f32::from(self.tables[0][i])).round() as i32; + dct_cb_block[i] = ((dct_cb_block[i] / 8) as f32 / f32::from(self.tables[1][i])) + .round() as i32; + dct_cr_block[i] = ((dct_cr_block[i] / 8) as f32 / f32::from(self.tables[1][i])) + .round() as i32; + } + + let la = &*self.luma_actable; + let ld = &*self.luma_dctable; + let cd = &*self.chroma_dctable; + let ca = &*self.chroma_actable; + + y_dcprev = self.writer.write_block(&dct_yblock, y_dcprev, ld, la)?; + cb_dcprev = self.writer.write_block(&dct_cb_block, cb_dcprev, cd, ca)?; + cr_dcprev = self.writer.write_block(&dct_cr_block, cr_dcprev, cd, ca)?; + } + } + + Ok(()) + } +} + +impl<W: Write> ImageEncoder for JpegEncoder<W> { + fn write_image( + mut self, + buf: &[u8], + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<()> { + self.encode(buf, width, height, color_type) + } +} + +fn build_jfif_header(m: &mut Vec<u8>, density: PixelDensity) { + m.clear(); + m.extend_from_slice(b"JFIF"); + m.extend_from_slice(&[ + 0, + 0x01, + 0x02, + match density.unit { + PixelDensityUnit::PixelAspectRatio => 0x00, + PixelDensityUnit::Inches => 0x01, + PixelDensityUnit::Centimeters => 0x02, + }, + ]); + m.extend_from_slice(&density.density.0.to_be_bytes()); + m.extend_from_slice(&density.density.1.to_be_bytes()); + m.extend_from_slice(&[0, 0]); +} + +fn build_frame_header( + m: &mut Vec<u8>, + precision: u8, + width: u16, + height: u16, + components: &[Component], +) { + m.clear(); + + m.push(precision); + m.extend_from_slice(&height.to_be_bytes()); + m.extend_from_slice(&width.to_be_bytes()); + m.push(components.len() as u8); + + for &comp in components.iter() { + let hv = (comp.h << 4) | comp.v; + m.extend_from_slice(&[comp.id, hv, comp.tq]); + } +} + +fn build_scan_header(m: &mut Vec<u8>, components: &[Component]) { + m.clear(); + + m.push(components.len() as u8); + + for &comp in components.iter() { + let tables = (comp.dc_table << 4) | comp.ac_table; + m.extend_from_slice(&[comp.id, tables]); + } + + // spectral start and end, approx. high and low + m.extend_from_slice(&[0, 63, 0]); +} + +fn build_huffman_segment( + m: &mut Vec<u8>, + class: u8, + destination: u8, + numcodes: &[u8; 16], + values: &[u8], +) { + m.clear(); + + let tcth = (class << 4) | destination; + m.push(tcth); + + m.extend_from_slice(numcodes); + + let sum: usize = numcodes.iter().map(|&x| x as usize).sum(); + + assert_eq!(sum, values.len()); + + m.extend_from_slice(values); +} + +fn build_quantization_segment(m: &mut Vec<u8>, precision: u8, identifier: u8, qtable: &[u8; 64]) { + m.clear(); + + let p = if precision == 8 { 0 } else { 1 }; + + let pqtq = (p << 4) | identifier; + m.push(pqtq); + + for &i in &UNZIGZAG[..] { + m.push(qtable[i as usize]); + } +} + +fn encode_coefficient(coefficient: i32) -> (u8, u16) { + let mut magnitude = coefficient.unsigned_abs() as u16; + let mut num_bits = 0u8; + + while magnitude > 0 { + magnitude >>= 1; + num_bits += 1; + } + + let mask = (1 << num_bits as usize) - 1; + + let val = if coefficient < 0 { + (coefficient - 1) as u16 & mask + } else { + coefficient as u16 & mask + }; + + (num_bits, val) +} + +#[inline] +fn rgb_to_ycbcr<P: Pixel>(pixel: P) -> (u8, u8, u8) { + use crate::traits::Primitive; + use num_traits::cast::ToPrimitive; + + let [r, g, b] = pixel.to_rgb().0; + let max: f32 = P::Subpixel::DEFAULT_MAX_VALUE.to_f32().unwrap(); + let r: f32 = r.to_f32().unwrap(); + let g: f32 = g.to_f32().unwrap(); + let b: f32 = b.to_f32().unwrap(); + + // Coefficients from JPEG File Interchange Format (Version 1.02), multiplied for 255 maximum. + let y = 76.245 / max * r + 149.685 / max * g + 29.07 / max * b; + let cb = -43.0185 / max * r - 84.4815 / max * g + 127.5 / max * b + 128.; + let cr = 127.5 / max * r - 106.7685 / max * g - 20.7315 / max * b + 128.; + + (y as u8, cb as u8, cr as u8) +} + +/// Returns the pixel at (x,y) if (x,y) is in the image, +/// otherwise the closest pixel in the image +#[inline] +fn pixel_at_or_near<I: GenericImageView>(source: &I, x: u32, y: u32) -> I::Pixel { + if source.in_bounds(x, y) { + source.get_pixel(x, y) + } else { + source.get_pixel(x.min(source.width() - 1), y.min(source.height() - 1)) + } +} + +fn copy_blocks_ycbcr<I: GenericImageView>( + source: &I, + x0: u32, + y0: u32, + yb: &mut [u8; 64], + cbb: &mut [u8; 64], + crb: &mut [u8; 64], +) { + for y in 0..8 { + for x in 0..8 { + let pixel = pixel_at_or_near(source, x + x0, y + y0); + let (yc, cb, cr) = rgb_to_ycbcr(pixel); + + yb[(y * 8 + x) as usize] = yc; + cbb[(y * 8 + x) as usize] = cb; + crb[(y * 8 + x) as usize] = cr; + } + } +} + +fn copy_blocks_gray<I: GenericImageView>(source: &I, x0: u32, y0: u32, gb: &mut [u8; 64]) { + use num_traits::cast::ToPrimitive; + for y in 0..8 { + for x in 0..8 { + let pixel = pixel_at_or_near(source, x0 + x, y0 + y); + let [luma] = pixel.to_luma().0; + gb[(y * 8 + x) as usize] = luma.to_u8().unwrap(); + } + } +} + +#[cfg(test)] +mod tests { + use std::io::Cursor; + + #[cfg(feature = "benchmarks")] + extern crate test; + #[cfg(feature = "benchmarks")] + use test::Bencher; + + use crate::color::ColorType; + use crate::error::ParameterErrorKind::DimensionMismatch; + use crate::image::ImageDecoder; + use crate::{ImageEncoder, ImageError}; + + use super::super::JpegDecoder; + use super::{ + build_frame_header, build_huffman_segment, build_jfif_header, build_quantization_segment, + build_scan_header, Component, JpegEncoder, PixelDensity, DCCLASS, LUMADESTINATION, + STD_LUMA_DC_CODE_LENGTHS, STD_LUMA_DC_VALUES, + }; + + fn decode(encoded: &[u8]) -> Vec<u8> { + let decoder = JpegDecoder::new(Cursor::new(encoded)).expect("Could not decode image"); + + let mut decoded = vec![0; decoder.total_bytes() as usize]; + decoder + .read_image(&mut decoded) + .expect("Could not decode image"); + decoded + } + + #[test] + fn roundtrip_sanity_check() { + // create a 1x1 8-bit image buffer containing a single red pixel + let img = [255u8, 0, 0]; + + // encode it into a memory buffer + let mut encoded_img = Vec::new(); + { + let encoder = JpegEncoder::new_with_quality(&mut encoded_img, 100); + encoder + .write_image(&img, 1, 1, ColorType::Rgb8) + .expect("Could not encode image"); + } + + // decode it from the memory buffer + { + let decoded = decode(&encoded_img); + // note that, even with the encode quality set to 100, we do not get the same image + // back. Therefore, we're going to assert that it's at least red-ish: + assert_eq!(3, decoded.len()); + assert!(decoded[0] > 0x80); + assert!(decoded[1] < 0x80); + assert!(decoded[2] < 0x80); + } + } + + #[test] + fn grayscale_roundtrip_sanity_check() { + // create a 2x2 8-bit image buffer containing a white diagonal + let img = [255u8, 0, 0, 255]; + + // encode it into a memory buffer + let mut encoded_img = Vec::new(); + { + let encoder = JpegEncoder::new_with_quality(&mut encoded_img, 100); + encoder + .write_image(&img[..], 2, 2, ColorType::L8) + .expect("Could not encode image"); + } + + // decode it from the memory buffer + { + let decoded = decode(&encoded_img); + // note that, even with the encode quality set to 100, we do not get the same image + // back. Therefore, we're going to assert that the diagonal is at least white-ish: + assert_eq!(4, decoded.len()); + assert!(decoded[0] > 0x80); + assert!(decoded[1] < 0x80); + assert!(decoded[2] < 0x80); + assert!(decoded[3] > 0x80); + } + } + + #[test] + fn jfif_header_density_check() { + let mut buffer = Vec::new(); + build_jfif_header(&mut buffer, PixelDensity::dpi(300)); + assert_eq!( + buffer, + vec![ + b'J', + b'F', + b'I', + b'F', + 0, + 1, + 2, // JFIF version 1.2 + 1, // density is in dpi + 300u16.to_be_bytes()[0], + 300u16.to_be_bytes()[1], + 300u16.to_be_bytes()[0], + 300u16.to_be_bytes()[1], + 0, + 0, // No thumbnail + ] + ); + } + + #[test] + fn test_image_too_large() { + // JPEG cannot encode images larger than 65,535×65,535 + // create a 65,536×1 8-bit black image buffer + let img = [0; 65_536]; + // Try to encode an image that is too large + let mut encoded = Vec::new(); + let encoder = JpegEncoder::new_with_quality(&mut encoded, 100); + let result = encoder.write_image(&img, 65_536, 1, ColorType::L8); + match result { + Err(ImageError::Parameter(err)) => { + assert_eq!(err.kind(), DimensionMismatch) + } + other => { + assert!( + false, + "Encoding an image that is too large should return a DimensionError \ + it returned {:?} instead", + other + ) + } + } + } + + #[test] + fn test_build_jfif_header() { + let mut buf = vec![]; + let density = PixelDensity::dpi(100); + build_jfif_header(&mut buf, density); + assert_eq!( + buf, + [0x4A, 0x46, 0x49, 0x46, 0x00, 0x01, 0x02, 0x01, 0, 100, 0, 100, 0, 0] + ); + } + + #[test] + fn test_build_frame_header() { + let mut buf = vec![]; + let components = vec![ + Component { + id: 1, + h: 1, + v: 1, + tq: 5, + dc_table: 5, + ac_table: 5, + _dc_pred: 0, + }, + Component { + id: 2, + h: 1, + v: 1, + tq: 4, + dc_table: 4, + ac_table: 4, + _dc_pred: 0, + }, + ]; + build_frame_header(&mut buf, 5, 100, 150, &components); + assert_eq!( + buf, + [5, 0, 150, 0, 100, 2, 1, 1 << 4 | 1, 5, 2, 1 << 4 | 1, 4] + ); + } + + #[test] + fn test_build_scan_header() { + let mut buf = vec![]; + let components = vec![ + Component { + id: 1, + h: 1, + v: 1, + tq: 5, + dc_table: 5, + ac_table: 5, + _dc_pred: 0, + }, + Component { + id: 2, + h: 1, + v: 1, + tq: 4, + dc_table: 4, + ac_table: 4, + _dc_pred: 0, + }, + ]; + build_scan_header(&mut buf, &components); + assert_eq!(buf, [2, 1, 5 << 4 | 5, 2, 4 << 4 | 4, 0, 63, 0]); + } + + #[test] + fn test_build_huffman_segment() { + let mut buf = vec![]; + build_huffman_segment( + &mut buf, + DCCLASS, + LUMADESTINATION, + &STD_LUMA_DC_CODE_LENGTHS, + &STD_LUMA_DC_VALUES, + ); + assert_eq!( + buf, + vec![ + 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11 + ] + ); + } + + #[test] + fn test_build_quantization_segment() { + let mut buf = vec![]; + let qtable = [0u8; 64]; + build_quantization_segment(&mut buf, 8, 1, &qtable); + let mut expected = vec![]; + expected.push(0 << 4 | 1); + expected.extend_from_slice(&[0; 64]); + assert_eq!(buf, expected) + } + + #[cfg(feature = "benchmarks")] + #[bench] + fn bench_jpeg_encoder_new(b: &mut Bencher) { + b.iter(|| { + let mut y = vec![]; + let x = JpegEncoder::new(&mut y); + }) + } +} diff --git a/vendor/image/src/codecs/jpeg/entropy.rs b/vendor/image/src/codecs/jpeg/entropy.rs new file mode 100644 index 0000000..5bdcef6 --- /dev/null +++ b/vendor/image/src/codecs/jpeg/entropy.rs @@ -0,0 +1,63 @@ +/// Given an array containing the number of codes of each code length, +/// this function generates the huffman codes lengths and their respective +/// code lengths as specified by the JPEG spec. +const fn derive_codes_and_sizes(bits: &[u8; 16]) -> ([u8; 256], [u16; 256]) { + let mut huffsize = [0u8; 256]; + let mut huffcode = [0u16; 256]; + + let mut k = 0; + + // Annex C.2 + // Figure C.1 + // Generate table of individual code lengths + let mut i = 0; + while i < 16 { + let mut j = 0; + while j < bits[i as usize] { + huffsize[k] = i + 1; + k += 1; + j += 1; + } + i += 1; + } + + huffsize[k] = 0; + + // Annex C.2 + // Figure C.2 + // Generate table of huffman codes + k = 0; + let mut code = 0u16; + let mut size = huffsize[0]; + + while huffsize[k] != 0 { + huffcode[k] = code; + code += 1; + k += 1; + + if huffsize[k] == size { + continue; + } + + // FIXME there is something wrong with this code + let diff = huffsize[k].wrapping_sub(size); + code = if diff < 16 { code << diff as usize } else { 0 }; + + size = size.wrapping_add(diff); + } + + (huffsize, huffcode) +} + +pub(crate) const fn build_huff_lut_const(bits: &[u8; 16], huffval: &[u8]) -> [(u8, u16); 256] { + let mut lut = [(17u8, 0u16); 256]; + let (huffsize, huffcode) = derive_codes_and_sizes(bits); + + let mut i = 0; + while i < huffval.len() { + lut[huffval[i] as usize] = (huffsize[i], huffcode[i]); + i += 1; + } + + lut +} diff --git a/vendor/image/src/codecs/jpeg/mod.rs b/vendor/image/src/codecs/jpeg/mod.rs new file mode 100644 index 0000000..4242733 --- /dev/null +++ b/vendor/image/src/codecs/jpeg/mod.rs @@ -0,0 +1,16 @@ +//! Decoding and Encoding of JPEG Images +//! +//! JPEG (Joint Photographic Experts Group) is an image format that supports lossy compression. +//! This module implements the Baseline JPEG standard. +//! +//! # Related Links +//! * <http://www.w3.org/Graphics/JPEG/itu-t81.pdf> - The JPEG specification +//! + +pub use self::decoder::JpegDecoder; +pub use self::encoder::{JpegEncoder, PixelDensity, PixelDensityUnit}; + +mod decoder; +mod encoder; +mod entropy; +mod transform; diff --git a/vendor/image/src/codecs/jpeg/transform.rs b/vendor/image/src/codecs/jpeg/transform.rs new file mode 100644 index 0000000..1ca01a9 --- /dev/null +++ b/vendor/image/src/codecs/jpeg/transform.rs @@ -0,0 +1,196 @@ +/* +fdct is a Rust translation of jfdctint.c from the +Independent JPEG Group's libjpeg version 9a +obtained from http://www.ijg.org/files/jpegsr9a.zip +It comes with the following conditions of distribution and use: + + In plain English: + + 1. We don't promise that this software works. (But if you find any bugs, + please let us know!) + 2. You can use this software for whatever you want. You don't have to pay us. + 3. You may not pretend that you wrote this software. If you use it in a + program, you must acknowledge somewhere in your documentation that + you've used the IJG code. + + In legalese: + + The authors make NO WARRANTY or representation, either express or implied, + with respect to this software, its quality, accuracy, merchantability, or + fitness for a particular purpose. This software is provided "AS IS", and you, + its user, assume the entire risk as to its quality and accuracy. + + This software is copyright (C) 1991-2014, Thomas G. Lane, Guido Vollbeding. + All Rights Reserved except as specified below. + + Permission is hereby granted to use, copy, modify, and distribute this + software (or portions thereof) for any purpose, without fee, subject to these + conditions: + (1) If any part of the source code for this software is distributed, then this + README file must be included, with this copyright and no-warranty notice + unaltered; and any additions, deletions, or changes to the original files + must be clearly indicated in accompanying documentation. + (2) If only executable code is distributed, then the accompanying + documentation must state that "this software is based in part on the work of + the Independent JPEG Group". + (3) Permission for use of this software is granted only if the user accepts + full responsibility for any undesirable consequences; the authors accept + NO LIABILITY for damages of any kind. + + These conditions apply to any software derived from or based on the IJG code, + not just to the unmodified library. If you use our work, you ought to + acknowledge us. + + Permission is NOT granted for the use of any IJG author's name or company name + in advertising or publicity relating to this software or products derived from + it. This software may be referred to only as "the Independent JPEG Group's + software". + + We specifically permit and encourage the use of this software as the basis of + commercial products, provided that all warranty or liability claims are + assumed by the product vendor. +*/ + +static CONST_BITS: i32 = 13; +static PASS1_BITS: i32 = 2; + +static FIX_0_298631336: i32 = 2446; +static FIX_0_390180644: i32 = 3196; +static FIX_0_541196100: i32 = 4433; +static FIX_0_765366865: i32 = 6270; +static FIX_0_899976223: i32 = 7373; +static FIX_1_175875602: i32 = 9633; +static FIX_1_501321110: i32 = 12_299; +static FIX_1_847759065: i32 = 15_137; +static FIX_1_961570560: i32 = 16_069; +static FIX_2_053119869: i32 = 16_819; +static FIX_2_562915447: i32 = 20_995; +static FIX_3_072711026: i32 = 25_172; + +pub(crate) fn fdct(samples: &[u8; 64], coeffs: &mut [i32; 64]) { + // Pass 1: process rows. + // Results are scaled by sqrt(8) compared to a true DCT + // furthermore we scale the results by 2**PASS1_BITS + for y in 0usize..8 { + let y0 = y * 8; + + // Even part + let t0 = i32::from(samples[y0]) + i32::from(samples[y0 + 7]); + let t1 = i32::from(samples[y0 + 1]) + i32::from(samples[y0 + 6]); + let t2 = i32::from(samples[y0 + 2]) + i32::from(samples[y0 + 5]); + let t3 = i32::from(samples[y0 + 3]) + i32::from(samples[y0 + 4]); + + let t10 = t0 + t3; + let t12 = t0 - t3; + let t11 = t1 + t2; + let t13 = t1 - t2; + + let t0 = i32::from(samples[y0]) - i32::from(samples[y0 + 7]); + let t1 = i32::from(samples[y0 + 1]) - i32::from(samples[y0 + 6]); + let t2 = i32::from(samples[y0 + 2]) - i32::from(samples[y0 + 5]); + let t3 = i32::from(samples[y0 + 3]) - i32::from(samples[y0 + 4]); + + // Apply unsigned -> signed conversion + coeffs[y0] = (t10 + t11 - 8 * 128) << PASS1_BITS as usize; + coeffs[y0 + 4] = (t10 - t11) << PASS1_BITS as usize; + + let mut z1 = (t12 + t13) * FIX_0_541196100; + // Add fudge factor here for final descale + z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as usize; + + coeffs[y0 + 2] = (z1 + t12 * FIX_0_765366865) >> (CONST_BITS - PASS1_BITS) as usize; + coeffs[y0 + 6] = (z1 - t13 * FIX_1_847759065) >> (CONST_BITS - PASS1_BITS) as usize; + + // Odd part + let t12 = t0 + t2; + let t13 = t1 + t3; + + let mut z1 = (t12 + t13) * FIX_1_175875602; + // Add fudge factor here for final descale + z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as usize; + + let mut t12 = t12 * (-FIX_0_390180644); + let mut t13 = t13 * (-FIX_1_961570560); + t12 += z1; + t13 += z1; + + let z1 = (t0 + t3) * (-FIX_0_899976223); + let mut t0 = t0 * FIX_1_501321110; + let mut t3 = t3 * FIX_0_298631336; + t0 += z1 + t12; + t3 += z1 + t13; + + let z1 = (t1 + t2) * (-FIX_2_562915447); + let mut t1 = t1 * FIX_3_072711026; + let mut t2 = t2 * FIX_2_053119869; + t1 += z1 + t13; + t2 += z1 + t12; + + coeffs[y0 + 1] = t0 >> (CONST_BITS - PASS1_BITS) as usize; + coeffs[y0 + 3] = t1 >> (CONST_BITS - PASS1_BITS) as usize; + coeffs[y0 + 5] = t2 >> (CONST_BITS - PASS1_BITS) as usize; + coeffs[y0 + 7] = t3 >> (CONST_BITS - PASS1_BITS) as usize; + } + + // Pass 2: process columns + // We remove the PASS1_BITS scaling but leave the results scaled up an + // overall factor of 8 + for x in (0usize..8).rev() { + // Even part + let t0 = coeffs[x] + coeffs[x + 8 * 7]; + let t1 = coeffs[x + 8] + coeffs[x + 8 * 6]; + let t2 = coeffs[x + 8 * 2] + coeffs[x + 8 * 5]; + let t3 = coeffs[x + 8 * 3] + coeffs[x + 8 * 4]; + + // Add fudge factor here for final descale + let t10 = t0 + t3 + (1 << (PASS1_BITS - 1) as usize); + let t12 = t0 - t3; + let t11 = t1 + t2; + let t13 = t1 - t2; + + let t0 = coeffs[x] - coeffs[x + 8 * 7]; + let t1 = coeffs[x + 8] - coeffs[x + 8 * 6]; + let t2 = coeffs[x + 8 * 2] - coeffs[x + 8 * 5]; + let t3 = coeffs[x + 8 * 3] - coeffs[x + 8 * 4]; + + coeffs[x] = (t10 + t11) >> PASS1_BITS as usize; + coeffs[x + 8 * 4] = (t10 - t11) >> PASS1_BITS as usize; + + let mut z1 = (t12 + t13) * FIX_0_541196100; + // Add fudge factor here for final descale + z1 += 1 << (CONST_BITS + PASS1_BITS - 1) as usize; + + coeffs[x + 8 * 2] = (z1 + t12 * FIX_0_765366865) >> (CONST_BITS + PASS1_BITS) as usize; + coeffs[x + 8 * 6] = (z1 - t13 * FIX_1_847759065) >> (CONST_BITS + PASS1_BITS) as usize; + + // Odd part + let t12 = t0 + t2; + let t13 = t1 + t3; + + let mut z1 = (t12 + t13) * FIX_1_175875602; + // Add fudge factor here for final descale + z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as usize; + + let mut t12 = t12 * (-FIX_0_390180644); + let mut t13 = t13 * (-FIX_1_961570560); + t12 += z1; + t13 += z1; + + let z1 = (t0 + t3) * (-FIX_0_899976223); + let mut t0 = t0 * FIX_1_501321110; + let mut t3 = t3 * FIX_0_298631336; + t0 += z1 + t12; + t3 += z1 + t13; + + let z1 = (t1 + t2) * (-FIX_2_562915447); + let mut t1 = t1 * FIX_3_072711026; + let mut t2 = t2 * FIX_2_053119869; + t1 += z1 + t13; + t2 += z1 + t12; + + coeffs[x + 8] = t0 >> (CONST_BITS + PASS1_BITS) as usize; + coeffs[x + 8 * 3] = t1 >> (CONST_BITS + PASS1_BITS) as usize; + coeffs[x + 8 * 5] = t2 >> (CONST_BITS + PASS1_BITS) as usize; + coeffs[x + 8 * 7] = t3 >> (CONST_BITS + PASS1_BITS) as usize; + } +} diff --git a/vendor/image/src/codecs/openexr.rs b/vendor/image/src/codecs/openexr.rs new file mode 100644 index 0000000..52d6ba9 --- /dev/null +++ b/vendor/image/src/codecs/openexr.rs @@ -0,0 +1,592 @@ +//! Decoding of OpenEXR (.exr) Images +//! +//! OpenEXR is an image format that is widely used, especially in VFX, +//! because it supports lossless and lossy compression for float data. +//! +//! This decoder only supports RGB and RGBA images. +//! If an image does not contain alpha information, +//! it is defaulted to `1.0` (no transparency). +//! +//! # Related Links +//! * <https://www.openexr.com/documentation.html> - The OpenEXR reference. +//! +//! +//! Current limitations (July 2021): +//! - only pixel type `Rgba32F` and `Rgba16F` are supported +//! - only non-deep rgb/rgba files supported, no conversion from/to YCbCr or similar +//! - only the first non-deep rgb layer is used +//! - only the largest mip map level is used +//! - pixels outside display window are lost +//! - meta data is lost +//! - dwaa/dwab compressed images not supported yet by the exr library +//! - (chroma) subsampling not supported yet by the exr library +use exr::prelude::*; + +use crate::error::{DecodingError, EncodingError, ImageFormatHint}; +use crate::image::decoder_to_vec; +use crate::{ + ColorType, ExtendedColorType, ImageDecoder, ImageEncoder, ImageError, ImageFormat, ImageResult, + Progress, +}; +use std::convert::TryInto; +use std::io::{Cursor, Read, Seek, Write}; + +/// An OpenEXR decoder. Immediately reads the meta data from the file. +#[derive(Debug)] +pub struct OpenExrDecoder<R> { + exr_reader: exr::block::reader::Reader<R>, + + // select a header that is rgb and not deep + header_index: usize, + + // decode either rgb or rgba. + // can be specified to include or discard alpha channels. + // if none, the alpha channel will only be allocated where the file contains data for it. + alpha_preference: Option<bool>, + + alpha_present_in_file: bool, +} + +impl<R: Read + Seek> OpenExrDecoder<R> { + /// Create a decoder. Consumes the first few bytes of the source to extract image dimensions. + /// Assumes the reader is buffered. In most cases, + /// you should wrap your reader in a `BufReader` for best performance. + /// Loads an alpha channel if the file has alpha samples. + /// Use `with_alpha_preference` if you want to load or not load alpha unconditionally. + pub fn new(source: R) -> ImageResult<Self> { + Self::with_alpha_preference(source, None) + } + + /// Create a decoder. Consumes the first few bytes of the source to extract image dimensions. + /// Assumes the reader is buffered. In most cases, + /// you should wrap your reader in a `BufReader` for best performance. + /// If alpha preference is specified, an alpha channel will + /// always be present or always be not present in the returned image. + /// If alpha preference is none, the alpha channel will only be returned if it is found in the file. + pub fn with_alpha_preference(source: R, alpha_preference: Option<bool>) -> ImageResult<Self> { + // read meta data, then wait for further instructions, keeping the file open and ready + let exr_reader = exr::block::read(source, false).map_err(to_image_err)?; + + let header_index = exr_reader + .headers() + .iter() + .position(|header| { + // check if r/g/b exists in the channels + let has_rgb = ["R", "G", "B"] + .iter() + .all(|&required| // alpha will be optional + header.channels.find_index_of_channel(&Text::from(required)).is_some()); + + // we currently dont support deep images, or images with other color spaces than rgb + !header.deep && has_rgb + }) + .ok_or_else(|| { + ImageError::Decoding(DecodingError::new( + ImageFormatHint::Exact(ImageFormat::OpenExr), + "image does not contain non-deep rgb channels", + )) + })?; + + let has_alpha = exr_reader.headers()[header_index] + .channels + .find_index_of_channel(&Text::from("A")) + .is_some(); + + Ok(Self { + alpha_preference, + exr_reader, + header_index, + alpha_present_in_file: has_alpha, + }) + } + + // does not leak exrs-specific meta data into public api, just does it for this module + fn selected_exr_header(&self) -> &exr::meta::header::Header { + &self.exr_reader.meta_data().headers[self.header_index] + } +} + +impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for OpenExrDecoder<R> { + type Reader = Cursor<Vec<u8>>; + + fn dimensions(&self) -> (u32, u32) { + let size = self + .selected_exr_header() + .shared_attributes + .display_window + .size; + (size.width() as u32, size.height() as u32) + } + + fn color_type(&self) -> ColorType { + let returns_alpha = self.alpha_preference.unwrap_or(self.alpha_present_in_file); + if returns_alpha { + ColorType::Rgba32F + } else { + ColorType::Rgb32F + } + } + + fn original_color_type(&self) -> ExtendedColorType { + if self.alpha_present_in_file { + ExtendedColorType::Rgba32F + } else { + ExtendedColorType::Rgb32F + } + } + + /// Use `read_image` instead if possible, + /// as this method creates a whole new buffer just to contain the entire image. + fn into_reader(self) -> ImageResult<Self::Reader> { + Ok(Cursor::new(decoder_to_vec(self)?)) + } + + fn scanline_bytes(&self) -> u64 { + // we cannot always read individual scan lines for every file, + // as the tiles or lines in the file could be in random or reversed order. + // therefore we currently read all lines at once + // Todo: optimize for specific exr.line_order? + self.total_bytes() + } + + // reads with or without alpha, depending on `self.alpha_preference` and `self.alpha_present_in_file` + fn read_image_with_progress<F: Fn(Progress)>( + self, + unaligned_bytes: &mut [u8], + progress_callback: F, + ) -> ImageResult<()> { + let blocks_in_header = self.selected_exr_header().chunk_count as u64; + let channel_count = self.color_type().channel_count() as usize; + + let display_window = self.selected_exr_header().shared_attributes.display_window; + let data_window_offset = + self.selected_exr_header().own_attributes.layer_position - display_window.position; + + { + // check whether the buffer is large enough for the dimensions of the file + let (width, height) = self.dimensions(); + let bytes_per_pixel = self.color_type().bytes_per_pixel() as usize; + let expected_byte_count = (width as usize) + .checked_mul(height as usize) + .and_then(|size| size.checked_mul(bytes_per_pixel)); + + // if the width and height does not match the length of the bytes, the arguments are invalid + let has_invalid_size_or_overflowed = expected_byte_count + .map(|expected_byte_count| unaligned_bytes.len() != expected_byte_count) + // otherwise, size calculation overflowed, is bigger than memory, + // therefore data is too small, so it is invalid. + .unwrap_or(true); + + if has_invalid_size_or_overflowed { + panic!("byte buffer not large enough for the specified dimensions and f32 pixels"); + } + } + + let result = read() + .no_deep_data() + .largest_resolution_level() + .rgba_channels( + move |_size, _channels| vec![0_f32; display_window.size.area() * channel_count], + move |buffer, index_in_data_window, (r, g, b, a_or_1): (f32, f32, f32, f32)| { + let index_in_display_window = + index_in_data_window.to_i32() + data_window_offset; + + // only keep pixels inside the data window + // TODO filter chunks based on this + if index_in_display_window.x() >= 0 + && index_in_display_window.y() >= 0 + && index_in_display_window.x() < display_window.size.width() as i32 + && index_in_display_window.y() < display_window.size.height() as i32 + { + let index_in_display_window = + index_in_display_window.to_usize("index bug").unwrap(); + let first_f32_index = + index_in_display_window.flat_index_for_size(display_window.size); + + buffer[first_f32_index * channel_count + ..(first_f32_index + 1) * channel_count] + .copy_from_slice(&[r, g, b, a_or_1][0..channel_count]); + + // TODO white point chromaticities + srgb/linear conversion? + } + }, + ) + .first_valid_layer() // TODO select exact layer by self.header_index? + .all_attributes() + .on_progress(|progress| { + progress_callback( + Progress::new( + (progress * blocks_in_header as f64) as u64, + blocks_in_header, + ), // TODO precision errors? + ); + }) + .from_chunks(self.exr_reader) + .map_err(to_image_err)?; + + // TODO this copy is strictly not necessary, but the exr api is a little too simple for reading into a borrowed target slice + + // this cast is safe and works with any alignment, as bytes are copied, and not f32 values. + // note: buffer slice length is checked in the beginning of this function and will be correct at this point + unaligned_bytes.copy_from_slice(bytemuck::cast_slice( + result.layer_data.channel_data.pixels.as_slice(), + )); + Ok(()) + } +} + +/// Write a raw byte buffer of pixels, +/// returning an Error if it has an invalid length. +/// +/// Assumes the writer is buffered. In most cases, +/// you should wrap your writer in a `BufWriter` for best performance. +// private. access via `OpenExrEncoder` +fn write_buffer( + mut buffered_write: impl Write + Seek, + unaligned_bytes: &[u8], + width: u32, + height: u32, + color_type: ColorType, +) -> ImageResult<()> { + let width = width as usize; + let height = height as usize; + + { + // check whether the buffer is large enough for the specified dimensions + let expected_byte_count = width + .checked_mul(height) + .and_then(|size| size.checked_mul(color_type.bytes_per_pixel() as usize)); + + // if the width and height does not match the length of the bytes, the arguments are invalid + let has_invalid_size_or_overflowed = expected_byte_count + .map(|expected_byte_count| unaligned_bytes.len() < expected_byte_count) + // otherwise, size calculation overflowed, is bigger than memory, + // therefore data is too small, so it is invalid. + .unwrap_or(true); + + if has_invalid_size_or_overflowed { + return Err(ImageError::Encoding(EncodingError::new( + ImageFormatHint::Exact(ImageFormat::OpenExr), + "byte buffer not large enough for the specified dimensions and f32 pixels", + ))); + } + } + + // bytes might be unaligned so we cannot cast the whole thing, instead lookup each f32 individually + let lookup_f32 = move |f32_index: usize| { + let unaligned_f32_bytes_slice = &unaligned_bytes[f32_index * 4..(f32_index + 1) * 4]; + let f32_bytes_array = unaligned_f32_bytes_slice + .try_into() + .expect("indexing error"); + f32::from_ne_bytes(f32_bytes_array) + }; + + match color_type { + ColorType::Rgb32F => { + exr::prelude::Image // TODO compression method zip?? + ::from_channels( + (width, height), + SpecificChannels::rgb(|pixel: Vec2<usize>| { + let pixel_index = 3 * pixel.flat_index_for_size(Vec2(width, height)); + ( + lookup_f32(pixel_index), + lookup_f32(pixel_index + 1), + lookup_f32(pixel_index + 2), + ) + }), + ) + .write() + // .on_progress(|progress| todo!()) + .to_buffered(&mut buffered_write) + .map_err(to_image_err)?; + } + + ColorType::Rgba32F => { + exr::prelude::Image // TODO compression method zip?? + ::from_channels( + (width, height), + SpecificChannels::rgba(|pixel: Vec2<usize>| { + let pixel_index = 4 * pixel.flat_index_for_size(Vec2(width, height)); + ( + lookup_f32(pixel_index), + lookup_f32(pixel_index + 1), + lookup_f32(pixel_index + 2), + lookup_f32(pixel_index + 3), + ) + }), + ) + .write() + // .on_progress(|progress| todo!()) + .to_buffered(&mut buffered_write) + .map_err(to_image_err)?; + } + + // TODO other color types and channel types + unsupported_color_type => { + return Err(ImageError::Encoding(EncodingError::new( + ImageFormatHint::Exact(ImageFormat::OpenExr), + format!( + "writing color type {:?} not yet supported", + unsupported_color_type + ), + ))) + } + } + + Ok(()) +} + +// TODO is this struct and trait actually used anywhere? +/// A thin wrapper that implements `ImageEncoder` for OpenEXR images. Will behave like `image::codecs::openexr::write_buffer`. +#[derive(Debug)] +pub struct OpenExrEncoder<W>(W); + +impl<W> OpenExrEncoder<W> { + /// Create an `ImageEncoder`. Does not write anything yet. Writing later will behave like `image::codecs::openexr::write_buffer`. + // use constructor, not public field, for future backwards-compatibility + pub fn new(write: W) -> Self { + Self(write) + } +} + +impl<W> ImageEncoder for OpenExrEncoder<W> +where + W: Write + Seek, +{ + /// Writes the complete image. + /// + /// Returns an Error if it has an invalid length. + /// Assumes the writer is buffered. In most cases, + /// you should wrap your writer in a `BufWriter` for best performance. + fn write_image( + self, + buf: &[u8], + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<()> { + write_buffer(self.0, buf, width, height, color_type) + } +} + +fn to_image_err(exr_error: Error) -> ImageError { + ImageError::Decoding(DecodingError::new( + ImageFormatHint::Exact(ImageFormat::OpenExr), + exr_error.to_string(), + )) +} + +#[cfg(test)] +mod test { + use super::*; + + use std::io::BufReader; + use std::path::{Path, PathBuf}; + + use crate::buffer_::{Rgb32FImage, Rgba32FImage}; + use crate::error::{LimitError, LimitErrorKind}; + use crate::{ImageBuffer, Rgb, Rgba}; + + const BASE_PATH: &[&str] = &[".", "tests", "images", "exr"]; + + /// Write an `Rgb32FImage`. + /// Assumes the writer is buffered. In most cases, + /// you should wrap your writer in a `BufWriter` for best performance. + fn write_rgb_image(write: impl Write + Seek, image: &Rgb32FImage) -> ImageResult<()> { + write_buffer( + write, + bytemuck::cast_slice(image.as_raw().as_slice()), + image.width(), + image.height(), + ColorType::Rgb32F, + ) + } + + /// Write an `Rgba32FImage`. + /// Assumes the writer is buffered. In most cases, + /// you should wrap your writer in a `BufWriter` for best performance. + fn write_rgba_image(write: impl Write + Seek, image: &Rgba32FImage) -> ImageResult<()> { + write_buffer( + write, + bytemuck::cast_slice(image.as_raw().as_slice()), + image.width(), + image.height(), + ColorType::Rgba32F, + ) + } + + /// Read the file from the specified path into an `Rgba32FImage`. + fn read_as_rgba_image_from_file(path: impl AsRef<Path>) -> ImageResult<Rgba32FImage> { + read_as_rgba_image(BufReader::new(std::fs::File::open(path)?)) + } + + /// Read the file from the specified path into an `Rgb32FImage`. + fn read_as_rgb_image_from_file(path: impl AsRef<Path>) -> ImageResult<Rgb32FImage> { + read_as_rgb_image(BufReader::new(std::fs::File::open(path)?)) + } + + /// Read the file from the specified path into an `Rgb32FImage`. + fn read_as_rgb_image(read: impl Read + Seek) -> ImageResult<Rgb32FImage> { + let decoder = OpenExrDecoder::with_alpha_preference(read, Some(false))?; + let (width, height) = decoder.dimensions(); + let buffer: Vec<f32> = decoder_to_vec(decoder)?; + + ImageBuffer::from_raw(width, height, buffer) + // this should be the only reason for the "from raw" call to fail, + // even though such a large allocation would probably cause an error much earlier + .ok_or_else(|| { + ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory)) + }) + } + + /// Read the file from the specified path into an `Rgba32FImage`. + fn read_as_rgba_image(read: impl Read + Seek) -> ImageResult<Rgba32FImage> { + let decoder = OpenExrDecoder::with_alpha_preference(read, Some(true))?; + let (width, height) = decoder.dimensions(); + let buffer: Vec<f32> = decoder_to_vec(decoder)?; + + ImageBuffer::from_raw(width, height, buffer) + // this should be the only reason for the "from raw" call to fail, + // even though such a large allocation would probably cause an error much earlier + .ok_or_else(|| { + ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory)) + }) + } + + #[test] + fn compare_exr_hdr() { + if cfg!(not(feature = "hdr")) { + eprintln!("warning: to run all the openexr tests, activate the hdr feature flag"); + } + + #[cfg(feature = "hdr")] + { + let folder = BASE_PATH.iter().collect::<PathBuf>(); + let reference_path = folder.clone().join("overexposed gradient.hdr"); + let exr_path = folder + .clone() + .join("overexposed gradient - data window equals display window.exr"); + + let hdr: Vec<Rgb<f32>> = crate::codecs::hdr::HdrDecoder::new(std::io::BufReader::new( + std::fs::File::open(&reference_path).unwrap(), + )) + .unwrap() + .read_image_hdr() + .unwrap(); + + let exr_pixels: Rgb32FImage = read_as_rgb_image_from_file(exr_path).unwrap(); + assert_eq!( + exr_pixels.dimensions().0 * exr_pixels.dimensions().1, + hdr.len() as u32 + ); + + for (expected, found) in hdr.iter().zip(exr_pixels.pixels()) { + for (expected, found) in expected.0.iter().zip(found.0.iter()) { + // the large tolerance seems to be caused by + // the RGBE u8x4 pixel quantization of the hdr image format + assert!( + (expected - found).abs() < 0.1, + "expected {}, found {}", + expected, + found + ); + } + } + } + } + + #[test] + fn roundtrip_rgba() { + let mut next_random = vec![1.0, 0.0, -1.0, -3.14, 27.0, 11.0, 31.0] + .into_iter() + .cycle(); + let mut next_random = move || next_random.next().unwrap(); + + let generated_image: Rgba32FImage = ImageBuffer::from_fn(9, 31, |_x, _y| { + Rgba([next_random(), next_random(), next_random(), next_random()]) + }); + + let mut bytes = vec![]; + write_rgba_image(Cursor::new(&mut bytes), &generated_image).unwrap(); + let decoded_image = read_as_rgba_image(Cursor::new(bytes)).unwrap(); + + debug_assert_eq!(generated_image, decoded_image); + } + + #[test] + fn roundtrip_rgb() { + let mut next_random = vec![1.0, 0.0, -1.0, -3.14, 27.0, 11.0, 31.0] + .into_iter() + .cycle(); + let mut next_random = move || next_random.next().unwrap(); + + let generated_image: Rgb32FImage = ImageBuffer::from_fn(9, 31, |_x, _y| { + Rgb([next_random(), next_random(), next_random()]) + }); + + let mut bytes = vec![]; + write_rgb_image(Cursor::new(&mut bytes), &generated_image).unwrap(); + let decoded_image = read_as_rgb_image(Cursor::new(bytes)).unwrap(); + + debug_assert_eq!(generated_image, decoded_image); + } + + #[test] + fn compare_rgba_rgb() { + let exr_path = BASE_PATH + .iter() + .collect::<PathBuf>() + .join("overexposed gradient - data window equals display window.exr"); + + let rgb: Rgb32FImage = read_as_rgb_image_from_file(&exr_path).unwrap(); + let rgba: Rgba32FImage = read_as_rgba_image_from_file(&exr_path).unwrap(); + + assert_eq!(rgba.dimensions(), rgb.dimensions()); + + for (Rgb(rgb), Rgba(rgba)) in rgb.pixels().zip(rgba.pixels()) { + assert_eq!(rgb, &rgba[..3]); + } + } + + #[test] + fn compare_cropped() { + // like in photoshop, exr images may have layers placed anywhere in a canvas. + // we don't want to load the pixels from the layer, but we want to load the pixels from the canvas. + // a layer might be smaller than the canvas, in that case the canvas should be transparent black + // where no layer was covering it. a layer might also be larger than the canvas, + // these pixels should be discarded. + // + // in this test we want to make sure that an + // auto-cropped image will be reproduced to the original. + + let exr_path = BASE_PATH.iter().collect::<PathBuf>(); + let original = exr_path.clone().join("cropping - uncropped original.exr"); + let cropped = exr_path + .clone() + .join("cropping - data window differs display window.exr"); + + // smoke-check that the exr files are actually not the same + { + let original_exr = read_first_flat_layer_from_file(&original).unwrap(); + let cropped_exr = read_first_flat_layer_from_file(&cropped).unwrap(); + assert_eq!( + original_exr.attributes.display_window, + cropped_exr.attributes.display_window + ); + assert_ne!( + original_exr.layer_data.attributes.layer_position, + cropped_exr.layer_data.attributes.layer_position + ); + assert_ne!(original_exr.layer_data.size, cropped_exr.layer_data.size); + } + + // check that they result in the same image + let original: Rgba32FImage = read_as_rgba_image_from_file(&original).unwrap(); + let cropped: Rgba32FImage = read_as_rgba_image_from_file(&cropped).unwrap(); + assert_eq!(original.dimensions(), cropped.dimensions()); + + // the following is not a simple assert_eq, as in case of an error, + // the whole image would be printed to the console, which takes forever + assert!(original.pixels().zip(cropped.pixels()).all(|(a, b)| a == b)); + } +} diff --git a/vendor/image/src/codecs/png.rs b/vendor/image/src/codecs/png.rs new file mode 100644 index 0000000..b9f98ce --- /dev/null +++ b/vendor/image/src/codecs/png.rs @@ -0,0 +1,778 @@ +//! Decoding and Encoding of PNG Images +//! +//! PNG (Portable Network Graphics) is an image format that supports lossless compression. +//! +//! # Related Links +//! * <http://www.w3.org/TR/PNG/> - The PNG Specification +//! + +use std::convert::TryFrom; +use std::fmt; +use std::io::{self, Read, Write}; + +use num_rational::Ratio; +use png::{BlendOp, DisposeOp}; + +use crate::animation::{Delay, Frame, Frames}; +use crate::color::{Blend, ColorType, ExtendedColorType}; +use crate::error::{ + DecodingError, EncodingError, ImageError, ImageResult, LimitError, LimitErrorKind, + ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind, +}; +use crate::image::{AnimationDecoder, ImageDecoder, ImageEncoder, ImageFormat}; +use crate::io::Limits; +use crate::{DynamicImage, GenericImage, ImageBuffer, Luma, LumaA, Rgb, Rgba, RgbaImage}; + +// http://www.w3.org/TR/PNG-Structure.html +// The first eight bytes of a PNG file always contain the following (decimal) values: +pub(crate) const PNG_SIGNATURE: [u8; 8] = [137, 80, 78, 71, 13, 10, 26, 10]; + +/// Png Reader +/// +/// This reader will try to read the png one row at a time, +/// however for interlaced png files this is not possible and +/// these are therefore read at once. +pub struct PngReader<R: Read> { + reader: png::Reader<R>, + buffer: Vec<u8>, + index: usize, +} + +impl<R: Read> PngReader<R> { + fn new(mut reader: png::Reader<R>) -> ImageResult<PngReader<R>> { + let len = reader.output_buffer_size(); + // Since interlaced images do not come in + // scanline order it is almost impossible to + // read them in a streaming fashion, however + // this shouldn't be a too big of a problem + // as most interlaced images should fit in memory. + let buffer = if reader.info().interlaced { + let mut buffer = vec![0; len]; + reader + .next_frame(&mut buffer) + .map_err(ImageError::from_png)?; + buffer + } else { + Vec::new() + }; + + Ok(PngReader { + reader, + buffer, + index: 0, + }) + } +} + +impl<R: Read> Read for PngReader<R> { + fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> { + // io::Write::write for slice cannot fail + let readed = buf.write(&self.buffer[self.index..]).unwrap(); + + let mut bytes = readed; + self.index += readed; + + while self.index >= self.buffer.len() { + match self.reader.next_row()? { + Some(row) => { + // Faster to copy directly to external buffer + let readed = buf.write(row.data()).unwrap(); + bytes += readed; + + self.buffer = row.data()[readed..].to_owned(); + self.index = 0; + } + None => return Ok(bytes), + } + } + + Ok(bytes) + } + + fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { + let mut bytes = self.buffer.len(); + if buf.is_empty() { + std::mem::swap(&mut self.buffer, buf); + } else { + buf.extend_from_slice(&self.buffer); + self.buffer.clear(); + } + + self.index = 0; + + while let Some(row) = self.reader.next_row()? { + buf.extend_from_slice(row.data()); + bytes += row.data().len(); + } + + Ok(bytes) + } +} + +/// PNG decoder +pub struct PngDecoder<R: Read> { + color_type: ColorType, + reader: png::Reader<R>, +} + +impl<R: Read> PngDecoder<R> { + /// Creates a new decoder that decodes from the stream ```r``` + pub fn new(r: R) -> ImageResult<PngDecoder<R>> { + Self::with_limits(r, Limits::default()) + } + + /// Creates a new decoder that decodes from the stream ```r``` with the given limits. + pub fn with_limits(r: R, limits: Limits) -> ImageResult<PngDecoder<R>> { + limits.check_support(&crate::io::LimitSupport::default())?; + + let max_bytes = usize::try_from(limits.max_alloc.unwrap_or(u64::MAX)).unwrap_or(usize::MAX); + let mut decoder = png::Decoder::new_with_limits(r, png::Limits { bytes: max_bytes }); + + let info = decoder.read_header_info().map_err(ImageError::from_png)?; + limits.check_dimensions(info.width, info.height)?; + + // By default the PNG decoder will scale 16 bpc to 8 bpc, so custom + // transformations must be set. EXPAND preserves the default behavior + // expanding bpc < 8 to 8 bpc. + decoder.set_transformations(png::Transformations::EXPAND); + let reader = decoder.read_info().map_err(ImageError::from_png)?; + let (color_type, bits) = reader.output_color_type(); + let color_type = match (color_type, bits) { + (png::ColorType::Grayscale, png::BitDepth::Eight) => ColorType::L8, + (png::ColorType::Grayscale, png::BitDepth::Sixteen) => ColorType::L16, + (png::ColorType::GrayscaleAlpha, png::BitDepth::Eight) => ColorType::La8, + (png::ColorType::GrayscaleAlpha, png::BitDepth::Sixteen) => ColorType::La16, + (png::ColorType::Rgb, png::BitDepth::Eight) => ColorType::Rgb8, + (png::ColorType::Rgb, png::BitDepth::Sixteen) => ColorType::Rgb16, + (png::ColorType::Rgba, png::BitDepth::Eight) => ColorType::Rgba8, + (png::ColorType::Rgba, png::BitDepth::Sixteen) => ColorType::Rgba16, + + (png::ColorType::Grayscale, png::BitDepth::One) => { + return Err(unsupported_color(ExtendedColorType::L1)) + } + (png::ColorType::GrayscaleAlpha, png::BitDepth::One) => { + return Err(unsupported_color(ExtendedColorType::La1)) + } + (png::ColorType::Rgb, png::BitDepth::One) => { + return Err(unsupported_color(ExtendedColorType::Rgb1)) + } + (png::ColorType::Rgba, png::BitDepth::One) => { + return Err(unsupported_color(ExtendedColorType::Rgba1)) + } + + (png::ColorType::Grayscale, png::BitDepth::Two) => { + return Err(unsupported_color(ExtendedColorType::L2)) + } + (png::ColorType::GrayscaleAlpha, png::BitDepth::Two) => { + return Err(unsupported_color(ExtendedColorType::La2)) + } + (png::ColorType::Rgb, png::BitDepth::Two) => { + return Err(unsupported_color(ExtendedColorType::Rgb2)) + } + (png::ColorType::Rgba, png::BitDepth::Two) => { + return Err(unsupported_color(ExtendedColorType::Rgba2)) + } + + (png::ColorType::Grayscale, png::BitDepth::Four) => { + return Err(unsupported_color(ExtendedColorType::L4)) + } + (png::ColorType::GrayscaleAlpha, png::BitDepth::Four) => { + return Err(unsupported_color(ExtendedColorType::La4)) + } + (png::ColorType::Rgb, png::BitDepth::Four) => { + return Err(unsupported_color(ExtendedColorType::Rgb4)) + } + (png::ColorType::Rgba, png::BitDepth::Four) => { + return Err(unsupported_color(ExtendedColorType::Rgba4)) + } + + (png::ColorType::Indexed, bits) => { + return Err(unsupported_color(ExtendedColorType::Unknown(bits as u8))) + } + }; + + Ok(PngDecoder { color_type, reader }) + } + + /// Turn this into an iterator over the animation frames. + /// + /// Reading the complete animation requires more memory than reading the data from the IDAT + /// frame–multiple frame buffers need to be reserved at the same time. We further do not + /// support compositing 16-bit colors. In any case this would be lossy as the interface of + /// animation decoders does not support 16-bit colors. + /// + /// If something is not supported or a limit is violated then the decoding step that requires + /// them will fail and an error will be returned instead of the frame. No further frames will + /// be returned. + pub fn apng(self) -> ApngDecoder<R> { + ApngDecoder::new(self) + } + + /// Returns if the image contains an animation. + /// + /// Note that the file itself decides if the default image is considered to be part of the + /// animation. When it is not the common interpretation is to use it as a thumbnail. + /// + /// If a non-animated image is converted into an `ApngDecoder` then its iterator is empty. + pub fn is_apng(&self) -> bool { + self.reader.info().animation_control.is_some() + } +} + +fn unsupported_color(ect: ExtendedColorType) -> ImageError { + ImageError::Unsupported(UnsupportedError::from_format_and_kind( + ImageFormat::Png.into(), + UnsupportedErrorKind::Color(ect), + )) +} + +impl<'a, R: 'a + Read> ImageDecoder<'a> for PngDecoder<R> { + type Reader = PngReader<R>; + + fn dimensions(&self) -> (u32, u32) { + self.reader.info().size() + } + + fn color_type(&self) -> ColorType { + self.color_type + } + + fn icc_profile(&mut self) -> Option<Vec<u8>> { + self.reader.info().icc_profile.as_ref().map(|x| x.to_vec()) + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + PngReader::new(self.reader) + } + + fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> { + use byteorder::{BigEndian, ByteOrder, NativeEndian}; + + assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); + self.reader.next_frame(buf).map_err(ImageError::from_png)?; + // PNG images are big endian. For 16 bit per channel and larger types, + // the buffer may need to be reordered to native endianness per the + // contract of `read_image`. + // TODO: assumes equal channel bit depth. + let bpc = self.color_type().bytes_per_pixel() / self.color_type().channel_count(); + + match bpc { + 1 => (), // No reodering necessary for u8 + 2 => buf.chunks_mut(2).for_each(|c| { + let v = BigEndian::read_u16(c); + NativeEndian::write_u16(c, v) + }), + _ => unreachable!(), + } + Ok(()) + } + + fn scanline_bytes(&self) -> u64 { + let width = self.reader.info().width; + self.reader.output_line_size(width) as u64 + } +} + +/// An [`AnimationDecoder`] adapter of [`PngDecoder`]. +/// +/// See [`PngDecoder::apng`] for more information. +/// +/// [`AnimationDecoder`]: ../trait.AnimationDecoder.html +/// [`PngDecoder`]: struct.PngDecoder.html +/// [`PngDecoder::apng`]: struct.PngDecoder.html#method.apng +pub struct ApngDecoder<R: Read> { + inner: PngDecoder<R>, + /// The current output buffer. + current: RgbaImage, + /// The previous output buffer, used for dispose op previous. + previous: RgbaImage, + /// The dispose op of the current frame. + dispose: DisposeOp, + /// The number of image still expected to be able to load. + remaining: u32, + /// The next (first) image is the thumbnail. + has_thumbnail: bool, +} + +impl<R: Read> ApngDecoder<R> { + fn new(inner: PngDecoder<R>) -> Self { + let (width, height) = inner.dimensions(); + let info = inner.reader.info(); + let remaining = match info.animation_control() { + // The expected number of fcTL in the remaining image. + Some(actl) => actl.num_frames, + None => 0, + }; + // If the IDAT has no fcTL then it is not part of the animation counted by + // num_frames. All following fdAT chunks must be preceded by an fcTL + let has_thumbnail = info.frame_control.is_none(); + ApngDecoder { + inner, + // TODO: should we delay this allocation? At least if we support limits we should. + current: RgbaImage::new(width, height), + previous: RgbaImage::new(width, height), + dispose: DisposeOp::Background, + remaining, + has_thumbnail, + } + } + + // TODO: thumbnail(&mut self) -> Option<impl ImageDecoder<'_>> + + /// Decode one subframe and overlay it on the canvas. + fn mix_next_frame(&mut self) -> Result<Option<&RgbaImage>, ImageError> { + // Remove this image from remaining. + self.remaining = match self.remaining.checked_sub(1) { + None => return Ok(None), + Some(next) => next, + }; + + // Shorten ourselves to 0 in case of error. + let remaining = self.remaining; + self.remaining = 0; + + // Skip the thumbnail that is not part of the animation. + if self.has_thumbnail { + self.has_thumbnail = false; + let mut buffer = vec![0; self.inner.reader.output_buffer_size()]; + self.inner + .reader + .next_frame(&mut buffer) + .map_err(ImageError::from_png)?; + } + + self.animatable_color_type()?; + + // Dispose of the previous frame. + match self.dispose { + DisposeOp::None => { + self.previous.clone_from(&self.current); + } + DisposeOp::Background => { + self.previous.clone_from(&self.current); + self.current + .pixels_mut() + .for_each(|pixel| *pixel = Rgba([0, 0, 0, 0])); + } + DisposeOp::Previous => { + self.current.clone_from(&self.previous); + } + } + + // Read next frame data. + let mut buffer = vec![0; self.inner.reader.output_buffer_size()]; + self.inner + .reader + .next_frame(&mut buffer) + .map_err(ImageError::from_png)?; + let info = self.inner.reader.info(); + + // Find out how to interpret the decoded frame. + let (width, height, px, py, blend); + match info.frame_control() { + None => { + width = info.width; + height = info.height; + px = 0; + py = 0; + blend = BlendOp::Source; + } + Some(fc) => { + width = fc.width; + height = fc.height; + px = fc.x_offset; + py = fc.y_offset; + blend = fc.blend_op; + self.dispose = fc.dispose_op; + } + }; + + // Turn the data into an rgba image proper. + let source = match self.inner.color_type { + ColorType::L8 => { + let image = ImageBuffer::<Luma<_>, _>::from_raw(width, height, buffer).unwrap(); + DynamicImage::ImageLuma8(image).into_rgba8() + } + ColorType::La8 => { + let image = ImageBuffer::<LumaA<_>, _>::from_raw(width, height, buffer).unwrap(); + DynamicImage::ImageLumaA8(image).into_rgba8() + } + ColorType::Rgb8 => { + let image = ImageBuffer::<Rgb<_>, _>::from_raw(width, height, buffer).unwrap(); + DynamicImage::ImageRgb8(image).into_rgba8() + } + ColorType::Rgba8 => ImageBuffer::<Rgba<_>, _>::from_raw(width, height, buffer).unwrap(), + ColorType::L16 | ColorType::Rgb16 | ColorType::La16 | ColorType::Rgba16 => { + // TODO: to enable remove restriction in `animatable_color_type` method. + unreachable!("16-bit apng not yet support") + } + _ => unreachable!("Invalid png color"), + }; + + match blend { + BlendOp::Source => { + self.current + .copy_from(&source, px, py) + .expect("Invalid png image not detected in png"); + } + BlendOp::Over => { + // TODO: investigate speed, speed-ups, and bounds-checks. + for (x, y, p) in source.enumerate_pixels() { + self.current.get_pixel_mut(x + px, y + py).blend(p); + } + } + } + + // Ok, we can proceed with actually remaining images. + self.remaining = remaining; + // Return composited output buffer. + Ok(Some(&self.current)) + } + + fn animatable_color_type(&self) -> Result<(), ImageError> { + match self.inner.color_type { + ColorType::L8 | ColorType::Rgb8 | ColorType::La8 | ColorType::Rgba8 => Ok(()), + // TODO: do not handle multi-byte colors. Remember to implement it in `mix_next_frame`. + ColorType::L16 | ColorType::Rgb16 | ColorType::La16 | ColorType::Rgba16 => { + Err(unsupported_color(self.inner.color_type.into())) + } + _ => unreachable!("{:?} not a valid png color", self.inner.color_type), + } + } +} + +impl<'a, R: Read + 'a> AnimationDecoder<'a> for ApngDecoder<R> { + fn into_frames(self) -> Frames<'a> { + struct FrameIterator<R: Read>(ApngDecoder<R>); + + impl<R: Read> Iterator for FrameIterator<R> { + type Item = ImageResult<Frame>; + + fn next(&mut self) -> Option<Self::Item> { + let image = match self.0.mix_next_frame() { + Ok(Some(image)) => image.clone(), + Ok(None) => return None, + Err(err) => return Some(Err(err)), + }; + + let info = self.0.inner.reader.info(); + let fc = info.frame_control().unwrap(); + // PNG delays are rations in seconds. + let num = u32::from(fc.delay_num) * 1_000u32; + let denom = match fc.delay_den { + // The standard dictates to replace by 100 when the denominator is 0. + 0 => 100, + d => u32::from(d), + }; + let delay = Delay::from_ratio(Ratio::new(num, denom)); + Some(Ok(Frame::from_parts(image, 0, 0, delay))) + } + } + + Frames::new(Box::new(FrameIterator(self))) + } +} + +/// PNG encoder +pub struct PngEncoder<W: Write> { + w: W, + compression: CompressionType, + filter: FilterType, +} + +/// Compression level of a PNG encoder. The default setting is `Fast`. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[non_exhaustive] +pub enum CompressionType { + /// Default compression level + Default, + /// Fast, minimal compression + Fast, + /// High compression level + Best, + /// Huffman coding compression + #[deprecated(note = "use one of the other compression levels instead, such as 'Fast'")] + Huffman, + /// Run-length encoding compression + #[deprecated(note = "use one of the other compression levels instead, such as 'Fast'")] + Rle, +} + +/// Filter algorithms used to process image data to improve compression. +/// +/// The default filter is `Adaptive`. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[non_exhaustive] +pub enum FilterType { + /// No processing done, best used for low bit depth grayscale or data with a + /// low color count + NoFilter, + /// Filters based on previous pixel in the same scanline + Sub, + /// Filters based on the scanline above + Up, + /// Filters based on the average of left and right neighbor pixels + Avg, + /// Algorithm that takes into account the left, upper left, and above pixels + Paeth, + /// Uses a heuristic to select one of the preceding filters for each + /// scanline rather than one filter for the entire image + Adaptive, +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[non_exhaustive] +enum BadPngRepresentation { + ColorType(ColorType), +} + +impl<W: Write> PngEncoder<W> { + /// Create a new encoder that writes its output to ```w``` + pub fn new(w: W) -> PngEncoder<W> { + PngEncoder { + w, + compression: CompressionType::default(), + filter: FilterType::default(), + } + } + + /// Create a new encoder that writes its output to `w` with `CompressionType` `compression` and + /// `FilterType` `filter`. + /// + /// It is best to view the options as a _hint_ to the implementation on the smallest or fastest + /// option for encoding a particular image. That is, using options that map directly to a PNG + /// image parameter will use this parameter where possible. But variants that have no direct + /// mapping may be interpreted differently in minor versions. The exact output is expressly + /// __not__ part the SemVer stability guarantee. + /// + /// Note that it is not optimal to use a single filter type, so an adaptive + /// filter type is selected as the default. The filter which best minimizes + /// file size may change with the type of compression used. + pub fn new_with_quality( + w: W, + compression: CompressionType, + filter: FilterType, + ) -> PngEncoder<W> { + PngEncoder { + w, + compression, + filter, + } + } + + /// Encodes the image `data` that has dimensions `width` and `height` and `ColorType` `c`. + /// + /// Expects data in big endian. + #[deprecated = "Use `PngEncoder::write_image` instead. Beware that `write_image` has a different endianness convention"] + pub fn encode(self, data: &[u8], width: u32, height: u32, color: ColorType) -> ImageResult<()> { + self.encode_inner(data, width, height, color) + } + + fn encode_inner( + self, + data: &[u8], + width: u32, + height: u32, + color: ColorType, + ) -> ImageResult<()> { + let (ct, bits) = match color { + ColorType::L8 => (png::ColorType::Grayscale, png::BitDepth::Eight), + ColorType::L16 => (png::ColorType::Grayscale, png::BitDepth::Sixteen), + ColorType::La8 => (png::ColorType::GrayscaleAlpha, png::BitDepth::Eight), + ColorType::La16 => (png::ColorType::GrayscaleAlpha, png::BitDepth::Sixteen), + ColorType::Rgb8 => (png::ColorType::Rgb, png::BitDepth::Eight), + ColorType::Rgb16 => (png::ColorType::Rgb, png::BitDepth::Sixteen), + ColorType::Rgba8 => (png::ColorType::Rgba, png::BitDepth::Eight), + ColorType::Rgba16 => (png::ColorType::Rgba, png::BitDepth::Sixteen), + _ => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Png.into(), + UnsupportedErrorKind::Color(color.into()), + ), + )) + } + }; + let comp = match self.compression { + CompressionType::Default => png::Compression::Default, + CompressionType::Best => png::Compression::Best, + _ => png::Compression::Fast, + }; + let (filter, adaptive_filter) = match self.filter { + FilterType::NoFilter => ( + png::FilterType::NoFilter, + png::AdaptiveFilterType::NonAdaptive, + ), + FilterType::Sub => (png::FilterType::Sub, png::AdaptiveFilterType::NonAdaptive), + FilterType::Up => (png::FilterType::Up, png::AdaptiveFilterType::NonAdaptive), + FilterType::Avg => (png::FilterType::Avg, png::AdaptiveFilterType::NonAdaptive), + FilterType::Paeth => (png::FilterType::Paeth, png::AdaptiveFilterType::NonAdaptive), + FilterType::Adaptive => (png::FilterType::Sub, png::AdaptiveFilterType::Adaptive), + }; + + let mut encoder = png::Encoder::new(self.w, width, height); + encoder.set_color(ct); + encoder.set_depth(bits); + encoder.set_compression(comp); + encoder.set_filter(filter); + encoder.set_adaptive_filter(adaptive_filter); + let mut writer = encoder + .write_header() + .map_err(|e| ImageError::IoError(e.into()))?; + writer + .write_image_data(data) + .map_err(|e| ImageError::IoError(e.into())) + } +} + +impl<W: Write> ImageEncoder for PngEncoder<W> { + /// Write a PNG image with the specified width, height, and color type. + /// + /// For color types with 16-bit per channel or larger, the contents of `buf` should be in + /// native endian. PngEncoder will automatically convert to big endian as required by the + /// underlying PNG format. + fn write_image( + self, + buf: &[u8], + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<()> { + use byteorder::{BigEndian, ByteOrder, NativeEndian}; + use ColorType::*; + + // PNG images are big endian. For 16 bit per channel and larger types, + // the buffer may need to be reordered to big endian per the + // contract of `write_image`. + // TODO: assumes equal channel bit depth. + match color_type { + L8 | La8 | Rgb8 | Rgba8 => { + // No reodering necessary for u8 + self.encode_inner(buf, width, height, color_type) + } + L16 | La16 | Rgb16 | Rgba16 => { + // Because the buffer is immutable and the PNG encoder does not + // yet take Write/Read traits, create a temporary buffer for + // big endian reordering. + let mut reordered = vec![0; buf.len()]; + buf.chunks(2) + .zip(reordered.chunks_mut(2)) + .for_each(|(b, r)| BigEndian::write_u16(r, NativeEndian::read_u16(b))); + self.encode_inner(&reordered, width, height, color_type) + } + _ => Err(ImageError::Encoding(EncodingError::new( + ImageFormat::Png.into(), + BadPngRepresentation::ColorType(color_type), + ))), + } + } +} + +impl ImageError { + fn from_png(err: png::DecodingError) -> ImageError { + use png::DecodingError::*; + match err { + IoError(err) => ImageError::IoError(err), + // The input image was not a valid PNG. + err @ Format(_) => { + ImageError::Decoding(DecodingError::new(ImageFormat::Png.into(), err)) + } + // Other is used when: + // - The decoder is polled for more animation frames despite being done (or not being animated + // in the first place). + // - The output buffer does not have the required size. + err @ Parameter(_) => ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic(err.to_string()), + )), + LimitsExceeded => { + ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory)) + } + } + } +} + +impl Default for CompressionType { + fn default() -> Self { + CompressionType::Fast + } +} + +impl Default for FilterType { + fn default() -> Self { + FilterType::Adaptive + } +} + +impl fmt::Display for BadPngRepresentation { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::ColorType(color_type) => write!( + f, + "The color {:?} can not be represented in PNG.", + color_type + ), + } + } +} + +impl std::error::Error for BadPngRepresentation {} + +#[cfg(test)] +mod tests { + use super::*; + use crate::image::ImageDecoder; + use crate::ImageOutputFormat; + + use std::io::{Cursor, Read}; + + #[test] + fn ensure_no_decoder_off_by_one() { + let dec = PngDecoder::new( + std::fs::File::open("tests/images/png/bugfixes/debug_triangle_corners_widescreen.png") + .unwrap(), + ) + .expect("Unable to read PNG file (does it exist?)"); + + assert_eq![(2000, 1000), dec.dimensions()]; + + assert_eq![ + ColorType::Rgb8, + dec.color_type(), + "Image MUST have the Rgb8 format" + ]; + + let correct_bytes = dec + .into_reader() + .expect("Unable to read file") + .bytes() + .map(|x| x.expect("Unable to read byte")) + .collect::<Vec<u8>>(); + + assert_eq![6_000_000, correct_bytes.len()]; + } + + #[test] + fn underlying_error() { + use std::error::Error; + + let mut not_png = + std::fs::read("tests/images/png/bugfixes/debug_triangle_corners_widescreen.png") + .unwrap(); + not_png[0] = 0; + + let error = PngDecoder::new(¬_png[..]).err().unwrap(); + let _ = error + .source() + .unwrap() + .downcast_ref::<png::DecodingError>() + .expect("Caused by a png error"); + } + + #[test] + fn encode_bad_color_type() { + // regression test for issue #1663 + let image = DynamicImage::new_rgb32f(1, 1); + let mut target = Cursor::new(vec![]); + let _ = image.write_to(&mut target, ImageOutputFormat::Png); + } +} diff --git a/vendor/image/src/codecs/pnm/autobreak.rs b/vendor/image/src/codecs/pnm/autobreak.rs new file mode 100644 index 0000000..cea2cd8 --- /dev/null +++ b/vendor/image/src/codecs/pnm/autobreak.rs @@ -0,0 +1,124 @@ +//! Insert line breaks between written buffers when they would overflow the line length. +use std::io; + +// The pnm standard says to insert line breaks after 70 characters. Assumes that no line breaks +// are actually written. We have to be careful to fully commit buffers or not commit them at all, +// otherwise we might insert a newline in the middle of a token. +pub(crate) struct AutoBreak<W: io::Write> { + wrapped: W, + line_capacity: usize, + line: Vec<u8>, + has_newline: bool, + panicked: bool, // see https://github.com/rust-lang/rust/issues/30888 +} + +impl<W: io::Write> AutoBreak<W> { + pub(crate) fn new(writer: W, line_capacity: usize) -> Self { + AutoBreak { + wrapped: writer, + line_capacity, + line: Vec::with_capacity(line_capacity + 1), + has_newline: false, + panicked: false, + } + } + + fn flush_buf(&mut self) -> io::Result<()> { + // from BufWriter + let mut written = 0; + let len = self.line.len(); + let mut ret = Ok(()); + while written < len { + self.panicked = true; + let r = self.wrapped.write(&self.line[written..]); + self.panicked = false; + match r { + Ok(0) => { + ret = Err(io::Error::new( + io::ErrorKind::WriteZero, + "failed to write the buffered data", + )); + break; + } + Ok(n) => written += n, + Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} + Err(e) => { + ret = Err(e); + break; + } + } + } + if written > 0 { + self.line.drain(..written); + } + ret + } +} + +impl<W: io::Write> io::Write for AutoBreak<W> { + fn write(&mut self, buffer: &[u8]) -> io::Result<usize> { + if self.has_newline { + self.flush()?; + self.has_newline = false; + } + + if !self.line.is_empty() && self.line.len() + buffer.len() > self.line_capacity { + self.line.push(b'\n'); + self.has_newline = true; + self.flush()?; + self.has_newline = false; + } + + self.line.extend_from_slice(buffer); + Ok(buffer.len()) + } + + fn flush(&mut self) -> io::Result<()> { + self.flush_buf()?; + self.wrapped.flush() + } +} + +impl<W: io::Write> Drop for AutoBreak<W> { + fn drop(&mut self) { + if !self.panicked { + let _r = self.flush_buf(); + // internal writer flushed automatically by Drop + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::Write; + + #[test] + fn test_aligned_writes() { + let mut output = Vec::new(); + + { + let mut writer = AutoBreak::new(&mut output, 10); + writer.write_all(b"0123456789").unwrap(); + writer.write_all(b"0123456789").unwrap(); + } + + assert_eq!(output.as_slice(), b"0123456789\n0123456789"); + } + + #[test] + fn test_greater_writes() { + let mut output = Vec::new(); + + { + let mut writer = AutoBreak::new(&mut output, 10); + writer.write_all(b"012").unwrap(); + writer.write_all(b"345").unwrap(); + writer.write_all(b"0123456789").unwrap(); + writer.write_all(b"012345678910").unwrap(); + writer.write_all(b"_").unwrap(); + } + + assert_eq!(output.as_slice(), b"012345\n0123456789\n012345678910\n_"); + } +} diff --git a/vendor/image/src/codecs/pnm/decoder.rs b/vendor/image/src/codecs/pnm/decoder.rs new file mode 100644 index 0000000..a495871 --- /dev/null +++ b/vendor/image/src/codecs/pnm/decoder.rs @@ -0,0 +1,1272 @@ +use std::convert::TryFrom; +use std::convert::TryInto; +use std::error; +use std::fmt::{self, Display}; +use std::io::{self, BufRead, Cursor, Read}; +use std::marker::PhantomData; +use std::mem; +use std::num::ParseIntError; +use std::str::{self, FromStr}; + +use super::{ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader, PixmapHeader}; +use super::{HeaderRecord, PnmHeader, PnmSubtype, SampleEncoding}; +use crate::color::{ColorType, ExtendedColorType}; +use crate::error::{ + DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind, +}; +use crate::image::{self, ImageDecoder, ImageFormat}; +use crate::utils; + +use byteorder::{BigEndian, ByteOrder, NativeEndian}; + +/// All errors that can occur when attempting to parse a PNM +#[derive(Debug, Clone)] +enum DecoderError { + /// PNM's "P[123456]" signature wrong or missing + PnmMagicInvalid([u8; 2]), + /// Couldn't parse the specified string as an integer from the specified source + UnparsableValue(ErrorDataSource, String, ParseIntError), + + /// More than the exactly one allowed plane specified by the format + NonAsciiByteInHeader(u8), + /// The PAM header contained a non-ASCII byte + NonAsciiLineInPamHeader, + /// A sample string contained a non-ASCII byte + NonAsciiSample, + + /// The byte after the P7 magic was not 0x0A NEWLINE + NotNewlineAfterP7Magic(u8), + /// The PNM header had too few lines + UnexpectedPnmHeaderEnd, + + /// The specified line was specified twice + HeaderLineDuplicated(PnmHeaderLine), + /// The line with the specified ID was not understood + HeaderLineUnknown(String), + /// At least one of the required lines were missing from the header (are `None` here) + /// + /// Same names as [`PnmHeaderLine`](enum.PnmHeaderLine.html) + #[allow(missing_docs)] + HeaderLineMissing { + height: Option<u32>, + width: Option<u32>, + depth: Option<u32>, + maxval: Option<u32>, + }, + + /// Not enough data was provided to the Decoder to decode the image + InputTooShort, + /// Sample raster contained unexpected byte + UnexpectedByteInRaster(u8), + /// Specified sample was out of bounds (e.g. >1 in B&W) + SampleOutOfBounds(u8), + /// The image's maxval exceeds 0xFFFF + MaxvalTooBig(u32), + + /// The specified tuple type supports restricted depths and maxvals, those restrictions were not met + InvalidDepthOrMaxval { + tuple_type: ArbitraryTuplType, + depth: u32, + maxval: u32, + }, + /// The specified tuple type supports restricted depths, those restrictions were not met + InvalidDepth { + tuple_type: ArbitraryTuplType, + depth: u32, + }, + /// The tuple type was not recognised by the parser + TupleTypeUnrecognised, + + /// Overflowed the specified value when parsing + Overflow, +} + +impl Display for DecoderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DecoderError::PnmMagicInvalid(magic) => f.write_fmt(format_args!( + "Expected magic constant for PNM: P1..P7, got [{:#04X?}, {:#04X?}]", + magic[0], magic[1] + )), + DecoderError::UnparsableValue(src, data, err) => { + f.write_fmt(format_args!("Error parsing {:?} as {}: {}", data, src, err)) + } + + DecoderError::NonAsciiByteInHeader(c) => { + f.write_fmt(format_args!("Non-ASCII character {:#04X?} in header", c)) + } + DecoderError::NonAsciiLineInPamHeader => f.write_str("Non-ASCII line in PAM header"), + DecoderError::NonAsciiSample => { + f.write_str("Non-ASCII character where sample value was expected") + } + + DecoderError::NotNewlineAfterP7Magic(c) => f.write_fmt(format_args!( + "Expected newline after P7 magic, got {:#04X?}", + c + )), + DecoderError::UnexpectedPnmHeaderEnd => f.write_str("Unexpected end of PNM header"), + + DecoderError::HeaderLineDuplicated(line) => { + f.write_fmt(format_args!("Duplicate {} line", line)) + } + DecoderError::HeaderLineUnknown(identifier) => f.write_fmt(format_args!( + "Unknown header line with identifier {:?}", + identifier + )), + DecoderError::HeaderLineMissing { + height, + width, + depth, + maxval, + } => f.write_fmt(format_args!( + "Missing header line: have height={:?}, width={:?}, depth={:?}, maxval={:?}", + height, width, depth, maxval + )), + + DecoderError::InputTooShort => { + f.write_str("Not enough data was provided to the Decoder to decode the image") + } + DecoderError::UnexpectedByteInRaster(c) => f.write_fmt(format_args!( + "Unexpected character {:#04X?} within sample raster", + c + )), + DecoderError::SampleOutOfBounds(val) => { + f.write_fmt(format_args!("Sample value {} outside of bounds", val)) + } + DecoderError::MaxvalTooBig(maxval) => { + f.write_fmt(format_args!("Image MAXVAL exceeds {}: {}", 0xFFFF, maxval)) + } + + DecoderError::InvalidDepthOrMaxval { + tuple_type, + depth, + maxval, + } => f.write_fmt(format_args!( + "Invalid depth ({}) or maxval ({}) for tuple type {}", + depth, + maxval, + tuple_type.name() + )), + DecoderError::InvalidDepth { tuple_type, depth } => f.write_fmt(format_args!( + "Invalid depth ({}) for tuple type {}", + depth, + tuple_type.name() + )), + DecoderError::TupleTypeUnrecognised => f.write_str("Tuple type not recognized"), + DecoderError::Overflow => f.write_str("Overflow when parsing value"), + } + } +} + +/// Note: should `pnm` be extracted into a separate crate, +/// this will need to be hidden until that crate hits version `1.0`. +impl From<DecoderError> for ImageError { + fn from(e: DecoderError) -> ImageError { + ImageError::Decoding(DecodingError::new(ImageFormat::Pnm.into(), e)) + } +} + +impl error::Error for DecoderError { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + match self { + DecoderError::UnparsableValue(_, _, err) => Some(err), + _ => None, + } + } +} + +/// Single-value lines in a PNM header +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +enum PnmHeaderLine { + /// "HEIGHT" + Height, + /// "WIDTH" + Width, + /// "DEPTH" + Depth, + /// "MAXVAL", a.k.a. `maxwhite` + Maxval, +} + +impl Display for PnmHeaderLine { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(match self { + PnmHeaderLine::Height => "HEIGHT", + PnmHeaderLine::Width => "WIDTH", + PnmHeaderLine::Depth => "DEPTH", + PnmHeaderLine::Maxval => "MAXVAL", + }) + } +} + +/// Single-value lines in a PNM header +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +enum ErrorDataSource { + /// One of the header lines + Line(PnmHeaderLine), + /// Value in the preamble + Preamble, + /// Sample/pixel data + Sample, +} + +impl Display for ErrorDataSource { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ErrorDataSource::Line(l) => l.fmt(f), + ErrorDataSource::Preamble => f.write_str("number in preamble"), + ErrorDataSource::Sample => f.write_str("sample"), + } + } +} + +/// Dynamic representation, represents all decodable (sample, depth) combinations. +#[derive(Clone, Copy)] +enum TupleType { + PbmBit, + BWBit, + GrayU8, + GrayU16, + RGBU8, + RGBU16, +} + +trait Sample { + fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize>; + fn from_bytes(bytes: &[u8], row_size: usize, output_buf: &mut [u8]) -> ImageResult<()>; + fn from_ascii(reader: &mut dyn Read, output_buf: &mut [u8]) -> ImageResult<()>; +} + +struct U8; +struct U16; +struct PbmBit; +struct BWBit; + +trait DecodableImageHeader { + fn tuple_type(&self) -> ImageResult<TupleType>; +} + +/// PNM decoder +pub struct PnmDecoder<R> { + reader: R, + header: PnmHeader, + tuple: TupleType, +} + +impl<R: BufRead> PnmDecoder<R> { + /// Create a new decoder that decodes from the stream ```read``` + pub fn new(mut buffered_read: R) -> ImageResult<PnmDecoder<R>> { + let magic = buffered_read.read_magic_constant()?; + + let subtype = match magic { + [b'P', b'1'] => PnmSubtype::Bitmap(SampleEncoding::Ascii), + [b'P', b'2'] => PnmSubtype::Graymap(SampleEncoding::Ascii), + [b'P', b'3'] => PnmSubtype::Pixmap(SampleEncoding::Ascii), + [b'P', b'4'] => PnmSubtype::Bitmap(SampleEncoding::Binary), + [b'P', b'5'] => PnmSubtype::Graymap(SampleEncoding::Binary), + [b'P', b'6'] => PnmSubtype::Pixmap(SampleEncoding::Binary), + [b'P', b'7'] => PnmSubtype::ArbitraryMap, + _ => return Err(DecoderError::PnmMagicInvalid(magic).into()), + }; + + let decoder = match subtype { + PnmSubtype::Bitmap(enc) => PnmDecoder::read_bitmap_header(buffered_read, enc), + PnmSubtype::Graymap(enc) => PnmDecoder::read_graymap_header(buffered_read, enc), + PnmSubtype::Pixmap(enc) => PnmDecoder::read_pixmap_header(buffered_read, enc), + PnmSubtype::ArbitraryMap => PnmDecoder::read_arbitrary_header(buffered_read), + }?; + + if utils::check_dimension_overflow( + decoder.dimensions().0, + decoder.dimensions().1, + decoder.color_type().bytes_per_pixel(), + ) { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Pnm.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Image dimensions ({}x{}) are too large", + decoder.dimensions().0, + decoder.dimensions().1 + )), + ), + )); + } + + Ok(decoder) + } + + /// Extract the reader and header after an image has been read. + pub fn into_inner(self) -> (R, PnmHeader) { + (self.reader, self.header) + } + + fn read_bitmap_header(mut reader: R, encoding: SampleEncoding) -> ImageResult<PnmDecoder<R>> { + let header = reader.read_bitmap_header(encoding)?; + Ok(PnmDecoder { + reader, + tuple: TupleType::PbmBit, + header: PnmHeader { + decoded: HeaderRecord::Bitmap(header), + encoded: None, + }, + }) + } + + fn read_graymap_header(mut reader: R, encoding: SampleEncoding) -> ImageResult<PnmDecoder<R>> { + let header = reader.read_graymap_header(encoding)?; + let tuple_type = header.tuple_type()?; + Ok(PnmDecoder { + reader, + tuple: tuple_type, + header: PnmHeader { + decoded: HeaderRecord::Graymap(header), + encoded: None, + }, + }) + } + + fn read_pixmap_header(mut reader: R, encoding: SampleEncoding) -> ImageResult<PnmDecoder<R>> { + let header = reader.read_pixmap_header(encoding)?; + let tuple_type = header.tuple_type()?; + Ok(PnmDecoder { + reader, + tuple: tuple_type, + header: PnmHeader { + decoded: HeaderRecord::Pixmap(header), + encoded: None, + }, + }) + } + + fn read_arbitrary_header(mut reader: R) -> ImageResult<PnmDecoder<R>> { + let header = reader.read_arbitrary_header()?; + let tuple_type = header.tuple_type()?; + Ok(PnmDecoder { + reader, + tuple: tuple_type, + header: PnmHeader { + decoded: HeaderRecord::Arbitrary(header), + encoded: None, + }, + }) + } +} + +trait HeaderReader: BufRead { + /// Reads the two magic constant bytes + fn read_magic_constant(&mut self) -> ImageResult<[u8; 2]> { + let mut magic: [u8; 2] = [0, 0]; + self.read_exact(&mut magic)?; + Ok(magic) + } + + /// Reads a string as well as a single whitespace after it, ignoring comments + fn read_next_string(&mut self) -> ImageResult<String> { + let mut bytes = Vec::new(); + + // pair input bytes with a bool mask to remove comments + let mark_comments = self.bytes().scan(true, |partof, read| { + let byte = match read { + Err(err) => return Some((*partof, Err(err))), + Ok(byte) => byte, + }; + let cur_enabled = *partof && byte != b'#'; + let next_enabled = cur_enabled || (byte == b'\r' || byte == b'\n'); + *partof = next_enabled; + Some((cur_enabled, Ok(byte))) + }); + + for (_, byte) in mark_comments.filter(|e| e.0) { + match byte { + Ok(b'\t') | Ok(b'\n') | Ok(b'\x0b') | Ok(b'\x0c') | Ok(b'\r') | Ok(b' ') => { + if !bytes.is_empty() { + break; // We're done as we already have some content + } + } + Ok(byte) if !byte.is_ascii() => { + return Err(DecoderError::NonAsciiByteInHeader(byte).into()) + } + Ok(byte) => { + bytes.push(byte); + } + Err(_) => break, + } + } + + if bytes.is_empty() { + return Err(ImageError::IoError(io::ErrorKind::UnexpectedEof.into())); + } + + if !bytes.as_slice().is_ascii() { + // We have only filled the buffer with characters for which `byte.is_ascii()` holds. + unreachable!("Non-ASCII character should have returned sooner") + } + + let string = String::from_utf8(bytes) + // We checked the precondition ourselves a few lines before, `bytes.as_slice().is_ascii()`. + .unwrap_or_else(|_| unreachable!("Only ASCII characters should be decoded")); + + Ok(string) + } + + /// Read the next line + fn read_next_line(&mut self) -> ImageResult<String> { + let mut buffer = String::new(); + self.read_line(&mut buffer)?; + Ok(buffer) + } + + fn read_next_u32(&mut self) -> ImageResult<u32> { + let s = self.read_next_string()?; + s.parse::<u32>() + .map_err(|err| DecoderError::UnparsableValue(ErrorDataSource::Preamble, s, err).into()) + } + + fn read_bitmap_header(&mut self, encoding: SampleEncoding) -> ImageResult<BitmapHeader> { + let width = self.read_next_u32()?; + let height = self.read_next_u32()?; + Ok(BitmapHeader { + encoding, + width, + height, + }) + } + + fn read_graymap_header(&mut self, encoding: SampleEncoding) -> ImageResult<GraymapHeader> { + self.read_pixmap_header(encoding).map( + |PixmapHeader { + encoding, + width, + height, + maxval, + }| GraymapHeader { + encoding, + width, + height, + maxwhite: maxval, + }, + ) + } + + fn read_pixmap_header(&mut self, encoding: SampleEncoding) -> ImageResult<PixmapHeader> { + let width = self.read_next_u32()?; + let height = self.read_next_u32()?; + let maxval = self.read_next_u32()?; + Ok(PixmapHeader { + encoding, + width, + height, + maxval, + }) + } + + fn read_arbitrary_header(&mut self) -> ImageResult<ArbitraryHeader> { + fn parse_single_value_line( + line_val: &mut Option<u32>, + rest: &str, + line: PnmHeaderLine, + ) -> ImageResult<()> { + if line_val.is_some() { + Err(DecoderError::HeaderLineDuplicated(line).into()) + } else { + let v = rest.trim().parse().map_err(|err| { + DecoderError::UnparsableValue(ErrorDataSource::Line(line), rest.to_owned(), err) + })?; + *line_val = Some(v); + Ok(()) + } + } + + match self.bytes().next() { + None => return Err(ImageError::IoError(io::ErrorKind::UnexpectedEof.into())), + Some(Err(io)) => return Err(ImageError::IoError(io)), + Some(Ok(b'\n')) => (), + Some(Ok(c)) => return Err(DecoderError::NotNewlineAfterP7Magic(c).into()), + } + + let mut line = String::new(); + let mut height: Option<u32> = None; + let mut width: Option<u32> = None; + let mut depth: Option<u32> = None; + let mut maxval: Option<u32> = None; + let mut tupltype: Option<String> = None; + loop { + line.truncate(0); + let len = self.read_line(&mut line)?; + if len == 0 { + return Err(DecoderError::UnexpectedPnmHeaderEnd.into()); + } + if line.as_bytes()[0] == b'#' { + continue; + } + if !line.is_ascii() { + return Err(DecoderError::NonAsciiLineInPamHeader.into()); + } + #[allow(deprecated)] + let (identifier, rest) = line + .trim_left() + .split_at(line.find(char::is_whitespace).unwrap_or(line.len())); + match identifier { + "ENDHDR" => break, + "HEIGHT" => parse_single_value_line(&mut height, rest, PnmHeaderLine::Height)?, + "WIDTH" => parse_single_value_line(&mut width, rest, PnmHeaderLine::Width)?, + "DEPTH" => parse_single_value_line(&mut depth, rest, PnmHeaderLine::Depth)?, + "MAXVAL" => parse_single_value_line(&mut maxval, rest, PnmHeaderLine::Maxval)?, + "TUPLTYPE" => { + let identifier = rest.trim(); + if tupltype.is_some() { + let appended = tupltype.take().map(|mut v| { + v.push(' '); + v.push_str(identifier); + v + }); + tupltype = appended; + } else { + tupltype = Some(identifier.to_string()); + } + } + _ => return Err(DecoderError::HeaderLineUnknown(identifier.to_string()).into()), + } + } + + let (h, w, d, m) = match (height, width, depth, maxval) { + (Some(h), Some(w), Some(d), Some(m)) => (h, w, d, m), + _ => { + return Err(DecoderError::HeaderLineMissing { + height, + width, + depth, + maxval, + } + .into()) + } + }; + + let tupltype = match tupltype { + None => None, + Some(ref t) if t == "BLACKANDWHITE" => Some(ArbitraryTuplType::BlackAndWhite), + Some(ref t) if t == "BLACKANDWHITE_ALPHA" => { + Some(ArbitraryTuplType::BlackAndWhiteAlpha) + } + Some(ref t) if t == "GRAYSCALE" => Some(ArbitraryTuplType::Grayscale), + Some(ref t) if t == "GRAYSCALE_ALPHA" => Some(ArbitraryTuplType::GrayscaleAlpha), + Some(ref t) if t == "RGB" => Some(ArbitraryTuplType::RGB), + Some(ref t) if t == "RGB_ALPHA" => Some(ArbitraryTuplType::RGBAlpha), + Some(other) => Some(ArbitraryTuplType::Custom(other)), + }; + + Ok(ArbitraryHeader { + height: h, + width: w, + depth: d, + maxval: m, + tupltype, + }) + } +} + +impl<R> HeaderReader for R where R: BufRead {} + +/// Wrapper struct around a `Cursor<Vec<u8>>` +pub struct PnmReader<R>(Cursor<Vec<u8>>, PhantomData<R>); +impl<R> Read for PnmReader<R> { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { + if self.0.position() == 0 && buf.is_empty() { + mem::swap(buf, self.0.get_mut()); + Ok(buf.len()) + } else { + self.0.read_to_end(buf) + } + } +} + +impl<'a, R: 'a + Read> ImageDecoder<'a> for PnmDecoder<R> { + type Reader = PnmReader<R>; + + fn dimensions(&self) -> (u32, u32) { + (self.header.width(), self.header.height()) + } + + fn color_type(&self) -> ColorType { + match self.tuple { + TupleType::PbmBit => ColorType::L8, + TupleType::BWBit => ColorType::L8, + TupleType::GrayU8 => ColorType::L8, + TupleType::GrayU16 => ColorType::L16, + TupleType::RGBU8 => ColorType::Rgb8, + TupleType::RGBU16 => ColorType::Rgb16, + } + } + + fn original_color_type(&self) -> ExtendedColorType { + match self.tuple { + TupleType::PbmBit => ExtendedColorType::L1, + TupleType::BWBit => ExtendedColorType::L1, + TupleType::GrayU8 => ExtendedColorType::L8, + TupleType::GrayU16 => ExtendedColorType::L16, + TupleType::RGBU8 => ExtendedColorType::Rgb8, + TupleType::RGBU16 => ExtendedColorType::Rgb16, + } + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + Ok(PnmReader( + Cursor::new(image::decoder_to_vec(self)?), + PhantomData, + )) + } + + fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> { + assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); + match self.tuple { + TupleType::PbmBit => self.read_samples::<PbmBit>(1, buf), + TupleType::BWBit => self.read_samples::<BWBit>(1, buf), + TupleType::RGBU8 => self.read_samples::<U8>(3, buf), + TupleType::RGBU16 => self.read_samples::<U16>(3, buf), + TupleType::GrayU8 => self.read_samples::<U8>(1, buf), + TupleType::GrayU16 => self.read_samples::<U16>(1, buf), + } + } +} + +impl<R: Read> PnmDecoder<R> { + fn read_samples<S: Sample>(&mut self, components: u32, buf: &mut [u8]) -> ImageResult<()> { + match self.subtype().sample_encoding() { + SampleEncoding::Binary => { + let width = self.header.width(); + let height = self.header.height(); + let bytecount = S::bytelen(width, height, components)?; + + let mut bytes = vec![]; + self.reader + .by_ref() + // This conversion is potentially lossy but unlikely and in that case we error + // later anyways. + .take(bytecount as u64) + .read_to_end(&mut bytes)?; + if bytes.len() != bytecount { + return Err(DecoderError::InputTooShort.into()); + } + + let width: usize = width.try_into().map_err(|_| DecoderError::Overflow)?; + let components: usize = + components.try_into().map_err(|_| DecoderError::Overflow)?; + let row_size = width + .checked_mul(components) + .ok_or(DecoderError::Overflow)?; + + S::from_bytes(&bytes, row_size, buf) + } + SampleEncoding::Ascii => self.read_ascii::<S>(buf), + } + } + + fn read_ascii<Basic: Sample>(&mut self, output_buf: &mut [u8]) -> ImageResult<()> { + Basic::from_ascii(&mut self.reader, output_buf) + } + + /// Get the pnm subtype, depending on the magic constant contained in the header + pub fn subtype(&self) -> PnmSubtype { + self.header.subtype() + } +} + +fn read_separated_ascii<T: FromStr<Err = ParseIntError>>(reader: &mut dyn Read) -> ImageResult<T> +where + T::Err: Display, +{ + let is_separator = |v: &u8| matches! { *v, b'\t' | b'\n' | b'\x0b' | b'\x0c' | b'\r' | b' ' }; + + let token = reader + .bytes() + .skip_while(|v| v.as_ref().ok().map(is_separator).unwrap_or(false)) + .take_while(|v| v.as_ref().ok().map(|c| !is_separator(c)).unwrap_or(false)) + .collect::<Result<Vec<u8>, _>>()?; + + if !token.is_ascii() { + return Err(DecoderError::NonAsciiSample.into()); + } + + let string = str::from_utf8(&token) + // We checked the precondition ourselves a few lines before with `token.is_ascii()`. + .unwrap_or_else(|_| unreachable!("Only ASCII characters should be decoded")); + + string.parse().map_err(|err| { + DecoderError::UnparsableValue(ErrorDataSource::Sample, string.to_owned(), err).into() + }) +} + +impl Sample for U8 { + fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize> { + Ok((width * height * samples) as usize) + } + + fn from_bytes(bytes: &[u8], _row_size: usize, output_buf: &mut [u8]) -> ImageResult<()> { + output_buf.copy_from_slice(bytes); + Ok(()) + } + + fn from_ascii(reader: &mut dyn Read, output_buf: &mut [u8]) -> ImageResult<()> { + for b in output_buf { + *b = read_separated_ascii(reader)?; + } + Ok(()) + } +} + +impl Sample for U16 { + fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize> { + Ok((width * height * samples * 2) as usize) + } + + fn from_bytes(bytes: &[u8], _row_size: usize, output_buf: &mut [u8]) -> ImageResult<()> { + output_buf.copy_from_slice(bytes); + for chunk in output_buf.chunks_exact_mut(2) { + let v = BigEndian::read_u16(chunk); + NativeEndian::write_u16(chunk, v); + } + Ok(()) + } + + fn from_ascii(reader: &mut dyn Read, output_buf: &mut [u8]) -> ImageResult<()> { + for chunk in output_buf.chunks_exact_mut(2) { + let v = read_separated_ascii::<u16>(reader)?; + NativeEndian::write_u16(chunk, v); + } + Ok(()) + } +} + +// The image is encoded in rows of bits, high order bits first. Any bits beyond the row bits should +// be ignored. Also, contrary to rgb, black pixels are encoded as a 1 while white is 0. This will +// need to be reversed for the grayscale output. +impl Sample for PbmBit { + fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize> { + let count = width * samples; + let linelen = (count / 8) + ((count % 8) != 0) as u32; + Ok((linelen * height) as usize) + } + + fn from_bytes(bytes: &[u8], row_size: usize, output_buf: &mut [u8]) -> ImageResult<()> { + let mut expanded = utils::expand_bits(1, row_size.try_into().unwrap(), bytes); + for b in expanded.iter_mut() { + *b = !*b; + } + output_buf.copy_from_slice(&expanded); + Ok(()) + } + + fn from_ascii(reader: &mut dyn Read, output_buf: &mut [u8]) -> ImageResult<()> { + let mut bytes = reader.bytes(); + for b in output_buf { + loop { + let byte = bytes + .next() + .ok_or_else::<ImageError, _>(|| DecoderError::InputTooShort.into())??; + match byte { + b'\t' | b'\n' | b'\x0b' | b'\x0c' | b'\r' | b' ' => continue, + b'0' => *b = 255, + b'1' => *b = 0, + c => return Err(DecoderError::UnexpectedByteInRaster(c).into()), + } + break; + } + } + + Ok(()) + } +} + +// Encoded just like a normal U8 but we check the values. +impl Sample for BWBit { + fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize> { + U8::bytelen(width, height, samples) + } + + fn from_bytes(bytes: &[u8], row_size: usize, output_buf: &mut [u8]) -> ImageResult<()> { + U8::from_bytes(bytes, row_size, output_buf)?; + if let Some(val) = output_buf.iter().find(|&val| *val > 1) { + return Err(DecoderError::SampleOutOfBounds(*val).into()); + } + Ok(()) + } + + fn from_ascii(_reader: &mut dyn Read, _output_buf: &mut [u8]) -> ImageResult<()> { + unreachable!("BW bits from anymaps are never encoded as ASCII") + } +} + +impl DecodableImageHeader for BitmapHeader { + fn tuple_type(&self) -> ImageResult<TupleType> { + Ok(TupleType::PbmBit) + } +} + +impl DecodableImageHeader for GraymapHeader { + fn tuple_type(&self) -> ImageResult<TupleType> { + match self.maxwhite { + v if v <= 0xFF => Ok(TupleType::GrayU8), + v if v <= 0xFFFF => Ok(TupleType::GrayU16), + _ => Err(DecoderError::MaxvalTooBig(self.maxwhite).into()), + } + } +} + +impl DecodableImageHeader for PixmapHeader { + fn tuple_type(&self) -> ImageResult<TupleType> { + match self.maxval { + v if v <= 0xFF => Ok(TupleType::RGBU8), + v if v <= 0xFFFF => Ok(TupleType::RGBU16), + _ => Err(DecoderError::MaxvalTooBig(self.maxval).into()), + } + } +} + +impl DecodableImageHeader for ArbitraryHeader { + fn tuple_type(&self) -> ImageResult<TupleType> { + match self.tupltype { + None if self.depth == 1 => Ok(TupleType::GrayU8), + None if self.depth == 2 => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Pnm.into(), + UnsupportedErrorKind::Color(ExtendedColorType::La8), + ), + )), + None if self.depth == 3 => Ok(TupleType::RGBU8), + None if self.depth == 4 => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Pnm.into(), + UnsupportedErrorKind::Color(ExtendedColorType::Rgba8), + ), + )), + + Some(ArbitraryTuplType::BlackAndWhite) if self.maxval == 1 && self.depth == 1 => { + Ok(TupleType::BWBit) + } + Some(ArbitraryTuplType::BlackAndWhite) => Err(DecoderError::InvalidDepthOrMaxval { + tuple_type: ArbitraryTuplType::BlackAndWhite, + maxval: self.maxval, + depth: self.depth, + } + .into()), + + Some(ArbitraryTuplType::Grayscale) if self.depth == 1 && self.maxval <= 0xFF => { + Ok(TupleType::GrayU8) + } + Some(ArbitraryTuplType::Grayscale) if self.depth <= 1 && self.maxval <= 0xFFFF => { + Ok(TupleType::GrayU16) + } + Some(ArbitraryTuplType::Grayscale) => Err(DecoderError::InvalidDepthOrMaxval { + tuple_type: ArbitraryTuplType::Grayscale, + maxval: self.maxval, + depth: self.depth, + } + .into()), + + Some(ArbitraryTuplType::RGB) if self.depth == 3 && self.maxval <= 0xFF => { + Ok(TupleType::RGBU8) + } + Some(ArbitraryTuplType::RGB) if self.depth == 3 && self.maxval <= 0xFFFF => { + Ok(TupleType::RGBU16) + } + Some(ArbitraryTuplType::RGB) => Err(DecoderError::InvalidDepth { + tuple_type: ArbitraryTuplType::RGB, + depth: self.depth, + } + .into()), + + Some(ArbitraryTuplType::BlackAndWhiteAlpha) => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Pnm.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Color type {}", + ArbitraryTuplType::BlackAndWhiteAlpha.name() + )), + ), + )), + Some(ArbitraryTuplType::GrayscaleAlpha) => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Pnm.into(), + UnsupportedErrorKind::Color(ExtendedColorType::La8), + ), + )), + Some(ArbitraryTuplType::RGBAlpha) => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Pnm.into(), + UnsupportedErrorKind::Color(ExtendedColorType::Rgba8), + ), + )), + Some(ArbitraryTuplType::Custom(ref custom)) => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Pnm.into(), + UnsupportedErrorKind::GenericFeature(format!("Tuple type {:?}", custom)), + ), + )), + None => Err(DecoderError::TupleTypeUnrecognised.into()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + /// Tests reading of a valid blackandwhite pam + #[test] + fn pam_blackandwhite() { + let pamdata = b"P7 +WIDTH 4 +HEIGHT 4 +DEPTH 1 +MAXVAL 1 +TUPLTYPE BLACKANDWHITE +# Comment line +ENDHDR +\x01\x00\x00\x01\x01\x00\x00\x01\x01\x00\x00\x01\x01\x00\x00\x01"; + let decoder = PnmDecoder::new(&pamdata[..]).unwrap(); + assert_eq!(decoder.color_type(), ColorType::L8); + assert_eq!(decoder.original_color_type(), ExtendedColorType::L1); + assert_eq!(decoder.dimensions(), (4, 4)); + assert_eq!(decoder.subtype(), PnmSubtype::ArbitraryMap); + + let mut image = vec![0; decoder.total_bytes() as usize]; + decoder.read_image(&mut image).unwrap(); + assert_eq!( + image, + vec![ + 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, + 0x00, 0x01 + ] + ); + match PnmDecoder::new(&pamdata[..]).unwrap().into_inner() { + ( + _, + PnmHeader { + decoded: + HeaderRecord::Arbitrary(ArbitraryHeader { + width: 4, + height: 4, + maxval: 1, + depth: 1, + tupltype: Some(ArbitraryTuplType::BlackAndWhite), + }), + encoded: _, + }, + ) => (), + _ => panic!("Decoded header is incorrect"), + } + } + + /// Tests reading of a valid grayscale pam + #[test] + fn pam_grayscale() { + let pamdata = b"P7 +WIDTH 4 +HEIGHT 4 +DEPTH 1 +MAXVAL 255 +TUPLTYPE GRAYSCALE +# Comment line +ENDHDR +\xde\xad\xbe\xef\xde\xad\xbe\xef\xde\xad\xbe\xef\xde\xad\xbe\xef"; + let decoder = PnmDecoder::new(&pamdata[..]).unwrap(); + assert_eq!(decoder.color_type(), ColorType::L8); + assert_eq!(decoder.dimensions(), (4, 4)); + assert_eq!(decoder.subtype(), PnmSubtype::ArbitraryMap); + + let mut image = vec![0; decoder.total_bytes() as usize]; + decoder.read_image(&mut image).unwrap(); + assert_eq!( + image, + vec![ + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, + 0xbe, 0xef + ] + ); + match PnmDecoder::new(&pamdata[..]).unwrap().into_inner() { + ( + _, + PnmHeader { + decoded: + HeaderRecord::Arbitrary(ArbitraryHeader { + width: 4, + height: 4, + depth: 1, + maxval: 255, + tupltype: Some(ArbitraryTuplType::Grayscale), + }), + encoded: _, + }, + ) => (), + _ => panic!("Decoded header is incorrect"), + } + } + + /// Tests reading of a valid rgb pam + #[test] + fn pam_rgb() { + let pamdata = b"P7 +# Comment line +MAXVAL 255 +TUPLTYPE RGB +DEPTH 3 +WIDTH 2 +HEIGHT 2 +ENDHDR +\xde\xad\xbe\xef\xde\xad\xbe\xef\xde\xad\xbe\xef"; + let decoder = PnmDecoder::new(&pamdata[..]).unwrap(); + assert_eq!(decoder.color_type(), ColorType::Rgb8); + assert_eq!(decoder.dimensions(), (2, 2)); + assert_eq!(decoder.subtype(), PnmSubtype::ArbitraryMap); + + let mut image = vec![0; decoder.total_bytes() as usize]; + decoder.read_image(&mut image).unwrap(); + assert_eq!( + image, + vec![0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef] + ); + match PnmDecoder::new(&pamdata[..]).unwrap().into_inner() { + ( + _, + PnmHeader { + decoded: + HeaderRecord::Arbitrary(ArbitraryHeader { + maxval: 255, + tupltype: Some(ArbitraryTuplType::RGB), + depth: 3, + width: 2, + height: 2, + }), + encoded: _, + }, + ) => (), + _ => panic!("Decoded header is incorrect"), + } + } + + #[test] + fn pbm_binary() { + // The data contains two rows of the image (each line is padded to the full byte). For + // comments on its format, see documentation of `impl SampleType for PbmBit`. + let pbmbinary = [&b"P4 6 2\n"[..], &[0b01101100 as u8, 0b10110111]].concat(); + let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap(); + assert_eq!(decoder.color_type(), ColorType::L8); + assert_eq!(decoder.original_color_type(), ExtendedColorType::L1); + assert_eq!(decoder.dimensions(), (6, 2)); + assert_eq!( + decoder.subtype(), + PnmSubtype::Bitmap(SampleEncoding::Binary) + ); + let mut image = vec![0; decoder.total_bytes() as usize]; + decoder.read_image(&mut image).unwrap(); + assert_eq!(image, vec![255, 0, 0, 255, 0, 0, 0, 255, 0, 0, 255, 0]); + match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() { + ( + _, + PnmHeader { + decoded: + HeaderRecord::Bitmap(BitmapHeader { + encoding: SampleEncoding::Binary, + width: 6, + height: 2, + }), + encoded: _, + }, + ) => (), + _ => panic!("Decoded header is incorrect"), + } + } + + /// A previous infinite loop. + #[test] + fn pbm_binary_ascii_termination() { + use std::io::{BufReader, Cursor, Error, ErrorKind, Read, Result}; + struct FailRead(Cursor<&'static [u8]>); + + impl Read for FailRead { + fn read(&mut self, buf: &mut [u8]) -> Result<usize> { + match self.0.read(buf) { + Ok(n) if n > 0 => Ok(n), + _ => Err(Error::new( + ErrorKind::BrokenPipe, + "Simulated broken pipe error", + )), + } + } + } + + let pbmbinary = BufReader::new(FailRead(Cursor::new(b"P1 1 1\n"))); + + let decoder = PnmDecoder::new(pbmbinary).unwrap(); + let mut image = vec![0; decoder.total_bytes() as usize]; + decoder + .read_image(&mut image) + .expect_err("Image is malformed"); + } + + #[test] + fn pbm_ascii() { + // The data contains two rows of the image (each line is padded to the full byte). For + // comments on its format, see documentation of `impl SampleType for PbmBit`. Tests all + // whitespace characters that should be allowed (the 6 characters according to POSIX). + let pbmbinary = b"P1 6 2\n 0 1 1 0 1 1\n1 0 1 1 0\t\n\x0b\x0c\r1"; + let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap(); + assert_eq!(decoder.color_type(), ColorType::L8); + assert_eq!(decoder.original_color_type(), ExtendedColorType::L1); + assert_eq!(decoder.dimensions(), (6, 2)); + assert_eq!(decoder.subtype(), PnmSubtype::Bitmap(SampleEncoding::Ascii)); + + let mut image = vec![0; decoder.total_bytes() as usize]; + decoder.read_image(&mut image).unwrap(); + assert_eq!(image, vec![255, 0, 0, 255, 0, 0, 0, 255, 0, 0, 255, 0]); + match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() { + ( + _, + PnmHeader { + decoded: + HeaderRecord::Bitmap(BitmapHeader { + encoding: SampleEncoding::Ascii, + width: 6, + height: 2, + }), + encoded: _, + }, + ) => (), + _ => panic!("Decoded header is incorrect"), + } + } + + #[test] + fn pbm_ascii_nospace() { + // The data contains two rows of the image (each line is padded to the full byte). Notably, + // it is completely within specification for the ascii data not to contain separating + // whitespace for the pbm format or any mix. + let pbmbinary = b"P1 6 2\n011011101101"; + let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap(); + assert_eq!(decoder.color_type(), ColorType::L8); + assert_eq!(decoder.original_color_type(), ExtendedColorType::L1); + assert_eq!(decoder.dimensions(), (6, 2)); + assert_eq!(decoder.subtype(), PnmSubtype::Bitmap(SampleEncoding::Ascii)); + + let mut image = vec![0; decoder.total_bytes() as usize]; + decoder.read_image(&mut image).unwrap(); + assert_eq!(image, vec![255, 0, 0, 255, 0, 0, 0, 255, 0, 0, 255, 0]); + match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() { + ( + _, + PnmHeader { + decoded: + HeaderRecord::Bitmap(BitmapHeader { + encoding: SampleEncoding::Ascii, + width: 6, + height: 2, + }), + encoded: _, + }, + ) => (), + _ => panic!("Decoded header is incorrect"), + } + } + + #[test] + fn pgm_binary() { + // The data contains two rows of the image (each line is padded to the full byte). For + // comments on its format, see documentation of `impl SampleType for PbmBit`. + let elements = (0..16).collect::<Vec<_>>(); + let pbmbinary = [&b"P5 4 4 255\n"[..], &elements].concat(); + let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap(); + assert_eq!(decoder.color_type(), ColorType::L8); + assert_eq!(decoder.dimensions(), (4, 4)); + assert_eq!( + decoder.subtype(), + PnmSubtype::Graymap(SampleEncoding::Binary) + ); + let mut image = vec![0; decoder.total_bytes() as usize]; + decoder.read_image(&mut image).unwrap(); + assert_eq!(image, elements); + match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() { + ( + _, + PnmHeader { + decoded: + HeaderRecord::Graymap(GraymapHeader { + encoding: SampleEncoding::Binary, + width: 4, + height: 4, + maxwhite: 255, + }), + encoded: _, + }, + ) => (), + _ => panic!("Decoded header is incorrect"), + } + } + + #[test] + fn pgm_ascii() { + // The data contains two rows of the image (each line is padded to the full byte). For + // comments on its format, see documentation of `impl SampleType for PbmBit`. + let pbmbinary = b"P2 4 4 255\n 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15"; + let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap(); + assert_eq!(decoder.color_type(), ColorType::L8); + assert_eq!(decoder.dimensions(), (4, 4)); + assert_eq!( + decoder.subtype(), + PnmSubtype::Graymap(SampleEncoding::Ascii) + ); + let mut image = vec![0; decoder.total_bytes() as usize]; + decoder.read_image(&mut image).unwrap(); + assert_eq!(image, (0..16).collect::<Vec<_>>()); + match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() { + ( + _, + PnmHeader { + decoded: + HeaderRecord::Graymap(GraymapHeader { + encoding: SampleEncoding::Ascii, + width: 4, + height: 4, + maxwhite: 255, + }), + encoded: _, + }, + ) => (), + _ => panic!("Decoded header is incorrect"), + } + } + + #[test] + fn dimension_overflow() { + let pamdata = b"P7 +# Comment line +MAXVAL 255 +TUPLTYPE RGB +DEPTH 3 +WIDTH 4294967295 +HEIGHT 4294967295 +ENDHDR +\xde\xad\xbe\xef\xde\xad\xbe\xef\xde\xad\xbe\xef"; + + assert!(PnmDecoder::new(&pamdata[..]).is_err()); + } + + #[test] + fn issue_1508() { + let _ = crate::load_from_memory(b"P391919 16999 1 1 9 919 16999 1 9999 999* 99999 N"); + } + + #[test] + fn issue_1616_overflow() { + let data = vec![ + 80, 54, 10, 52, 50, 57, 52, 56, 50, 57, 52, 56, 35, 56, 10, 52, 10, 48, 10, 12, 12, 56, + ]; + // Validate: we have a header. Note: we might already calculate that this will fail but + // then we could not return information about the header to the caller. + let decoder = PnmDecoder::new(&data[..]).unwrap(); + let mut image = vec![0; decoder.total_bytes() as usize]; + let _ = decoder.read_image(&mut image); + } +} diff --git a/vendor/image/src/codecs/pnm/encoder.rs b/vendor/image/src/codecs/pnm/encoder.rs new file mode 100644 index 0000000..9f823d0 --- /dev/null +++ b/vendor/image/src/codecs/pnm/encoder.rs @@ -0,0 +1,673 @@ +//! Encoding of PNM Images +use std::fmt; +use std::io; + +use std::io::Write; + +use super::AutoBreak; +use super::{ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader, PixmapHeader}; +use super::{HeaderRecord, PnmHeader, PnmSubtype, SampleEncoding}; +use crate::color::{ColorType, ExtendedColorType}; +use crate::error::{ + ImageError, ImageResult, ParameterError, ParameterErrorKind, UnsupportedError, + UnsupportedErrorKind, +}; +use crate::image::{ImageEncoder, ImageFormat}; + +use byteorder::{BigEndian, WriteBytesExt}; + +enum HeaderStrategy { + Dynamic, + Subtype(PnmSubtype), + Chosen(PnmHeader), +} + +#[derive(Clone, Copy)] +pub enum FlatSamples<'a> { + U8(&'a [u8]), + U16(&'a [u16]), +} + +/// Encodes images to any of the `pnm` image formats. +pub struct PnmEncoder<W: Write> { + writer: W, + header: HeaderStrategy, +} + +/// Encapsulate the checking system in the type system. Non of the fields are actually accessed +/// but requiring them forces us to validly construct the struct anyways. +struct CheckedImageBuffer<'a> { + _image: FlatSamples<'a>, + _width: u32, + _height: u32, + _color: ExtendedColorType, +} + +// Check the header against the buffer. Each struct produces the next after a check. +struct UncheckedHeader<'a> { + header: &'a PnmHeader, +} + +struct CheckedDimensions<'a> { + unchecked: UncheckedHeader<'a>, + width: u32, + height: u32, +} + +struct CheckedHeaderColor<'a> { + dimensions: CheckedDimensions<'a>, + color: ExtendedColorType, +} + +struct CheckedHeader<'a> { + color: CheckedHeaderColor<'a>, + encoding: TupleEncoding<'a>, + _image: CheckedImageBuffer<'a>, +} + +enum TupleEncoding<'a> { + PbmBits { + samples: FlatSamples<'a>, + width: u32, + }, + Ascii { + samples: FlatSamples<'a>, + }, + Bytes { + samples: FlatSamples<'a>, + }, +} + +impl<W: Write> PnmEncoder<W> { + /// Create new PnmEncoder from the `writer`. + /// + /// The encoded images will have some `pnm` format. If more control over the image type is + /// required, use either one of `with_subtype` or `with_header`. For more information on the + /// behaviour, see `with_dynamic_header`. + pub fn new(writer: W) -> Self { + PnmEncoder { + writer, + header: HeaderStrategy::Dynamic, + } + } + + /// Encode a specific pnm subtype image. + /// + /// The magic number and encoding type will be chosen as provided while the rest of the header + /// data will be generated dynamically. Trying to encode incompatible images (e.g. encoding an + /// RGB image as Graymap) will result in an error. + /// + /// This will overwrite the effect of earlier calls to `with_header` and `with_dynamic_header`. + pub fn with_subtype(self, subtype: PnmSubtype) -> Self { + PnmEncoder { + writer: self.writer, + header: HeaderStrategy::Subtype(subtype), + } + } + + /// Enforce the use of a chosen header. + /// + /// While this option gives the most control over the actual written data, the encoding process + /// will error in case the header data and image parameters do not agree. It is the users + /// obligation to ensure that the width and height are set accordingly, for example. + /// + /// Choose this option if you want a lossless decoding/encoding round trip. + /// + /// This will overwrite the effect of earlier calls to `with_subtype` and `with_dynamic_header`. + pub fn with_header(self, header: PnmHeader) -> Self { + PnmEncoder { + writer: self.writer, + header: HeaderStrategy::Chosen(header), + } + } + + /// Create the header dynamically for each image. + /// + /// This is the default option upon creation of the encoder. With this, most images should be + /// encodable but the specific format chosen is out of the users control. The pnm subtype is + /// chosen arbitrarily by the library. + /// + /// This will overwrite the effect of earlier calls to `with_subtype` and `with_header`. + pub fn with_dynamic_header(self) -> Self { + PnmEncoder { + writer: self.writer, + header: HeaderStrategy::Dynamic, + } + } + + /// Encode an image whose samples are represented as `u8`. + /// + /// Some `pnm` subtypes are incompatible with some color options, a chosen header most + /// certainly with any deviation from the original decoded image. + pub fn encode<'s, S>( + &mut self, + image: S, + width: u32, + height: u32, + color: ColorType, + ) -> ImageResult<()> + where + S: Into<FlatSamples<'s>>, + { + let image = image.into(); + match self.header { + HeaderStrategy::Dynamic => { + self.write_dynamic_header(image, width, height, color.into()) + } + HeaderStrategy::Subtype(subtype) => { + self.write_subtyped_header(subtype, image, width, height, color.into()) + } + HeaderStrategy::Chosen(ref header) => Self::write_with_header( + &mut self.writer, + header, + image, + width, + height, + color.into(), + ), + } + } + + /// Choose any valid pnm format that the image can be expressed in and write its header. + /// + /// Returns how the body should be written if successful. + fn write_dynamic_header( + &mut self, + image: FlatSamples, + width: u32, + height: u32, + color: ExtendedColorType, + ) -> ImageResult<()> { + let depth = u32::from(color.channel_count()); + let (maxval, tupltype) = match color { + ExtendedColorType::L1 => (1, ArbitraryTuplType::BlackAndWhite), + ExtendedColorType::L8 => (0xff, ArbitraryTuplType::Grayscale), + ExtendedColorType::L16 => (0xffff, ArbitraryTuplType::Grayscale), + ExtendedColorType::La1 => (1, ArbitraryTuplType::BlackAndWhiteAlpha), + ExtendedColorType::La8 => (0xff, ArbitraryTuplType::GrayscaleAlpha), + ExtendedColorType::La16 => (0xffff, ArbitraryTuplType::GrayscaleAlpha), + ExtendedColorType::Rgb8 => (0xff, ArbitraryTuplType::RGB), + ExtendedColorType::Rgb16 => (0xffff, ArbitraryTuplType::RGB), + ExtendedColorType::Rgba8 => (0xff, ArbitraryTuplType::RGBAlpha), + ExtendedColorType::Rgba16 => (0xffff, ArbitraryTuplType::RGBAlpha), + _ => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Pnm.into(), + UnsupportedErrorKind::Color(color), + ), + )) + } + }; + + let header = PnmHeader { + decoded: HeaderRecord::Arbitrary(ArbitraryHeader { + width, + height, + depth, + maxval, + tupltype: Some(tupltype), + }), + encoded: None, + }; + + Self::write_with_header(&mut self.writer, &header, image, width, height, color) + } + + /// Try to encode the image with the chosen format, give its corresponding pixel encoding type. + fn write_subtyped_header( + &mut self, + subtype: PnmSubtype, + image: FlatSamples, + width: u32, + height: u32, + color: ExtendedColorType, + ) -> ImageResult<()> { + let header = match (subtype, color) { + (PnmSubtype::ArbitraryMap, color) => { + return self.write_dynamic_header(image, width, height, color) + } + (PnmSubtype::Pixmap(encoding), ExtendedColorType::Rgb8) => PnmHeader { + decoded: HeaderRecord::Pixmap(PixmapHeader { + encoding, + width, + height, + maxval: 255, + }), + encoded: None, + }, + (PnmSubtype::Graymap(encoding), ExtendedColorType::L8) => PnmHeader { + decoded: HeaderRecord::Graymap(GraymapHeader { + encoding, + width, + height, + maxwhite: 255, + }), + encoded: None, + }, + (PnmSubtype::Bitmap(encoding), ExtendedColorType::L8) + | (PnmSubtype::Bitmap(encoding), ExtendedColorType::L1) => PnmHeader { + decoded: HeaderRecord::Bitmap(BitmapHeader { + encoding, + width, + height, + }), + encoded: None, + }, + (_, _) => { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic( + "Color type can not be represented in the chosen format".to_owned(), + ), + ))); + } + }; + + Self::write_with_header(&mut self.writer, &header, image, width, height, color) + } + + /// Try to encode the image with the chosen header, checking if values are correct. + /// + /// Returns how the body should be written if successful. + fn write_with_header( + writer: &mut dyn Write, + header: &PnmHeader, + image: FlatSamples, + width: u32, + height: u32, + color: ExtendedColorType, + ) -> ImageResult<()> { + let unchecked = UncheckedHeader { header }; + + unchecked + .check_header_dimensions(width, height)? + .check_header_color(color)? + .check_sample_values(image)? + .write_header(writer)? + .write_image(writer) + } +} + +impl<W: Write> ImageEncoder for PnmEncoder<W> { + fn write_image( + mut self, + buf: &[u8], + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<()> { + self.encode(buf, width, height, color_type) + } +} + +impl<'a> CheckedImageBuffer<'a> { + fn check( + image: FlatSamples<'a>, + width: u32, + height: u32, + color: ExtendedColorType, + ) -> ImageResult<CheckedImageBuffer<'a>> { + let components = color.channel_count() as usize; + let uwidth = width as usize; + let uheight = height as usize; + let expected_len = components + .checked_mul(uwidth) + .and_then(|v| v.checked_mul(uheight)); + if Some(image.len()) != expected_len { + // Image buffer does not correspond to size and colour. + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + ))); + } + Ok(CheckedImageBuffer { + _image: image, + _width: width, + _height: height, + _color: color, + }) + } +} + +impl<'a> UncheckedHeader<'a> { + fn check_header_dimensions( + self, + width: u32, + height: u32, + ) -> ImageResult<CheckedDimensions<'a>> { + if self.header.width() != width || self.header.height() != height { + // Chosen header does not match Image dimensions. + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + ))); + } + + Ok(CheckedDimensions { + unchecked: self, + width, + height, + }) + } +} + +impl<'a> CheckedDimensions<'a> { + // Check color compatibility with the header. This will only error when we are certain that + // the combination is bogus (e.g. combining Pixmap and Palette) but allows uncertain + // combinations (basically a ArbitraryTuplType::Custom with any color of fitting depth). + fn check_header_color(self, color: ExtendedColorType) -> ImageResult<CheckedHeaderColor<'a>> { + let components = u32::from(color.channel_count()); + + match *self.unchecked.header { + PnmHeader { + decoded: HeaderRecord::Bitmap(_), + .. + } => match color { + ExtendedColorType::L1 | ExtendedColorType::L8 | ExtendedColorType::L16 => (), + _ => { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic( + "PBM format only support luma color types".to_owned(), + ), + ))) + } + }, + PnmHeader { + decoded: HeaderRecord::Graymap(_), + .. + } => match color { + ExtendedColorType::L1 | ExtendedColorType::L8 | ExtendedColorType::L16 => (), + _ => { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic( + "PGM format only support luma color types".to_owned(), + ), + ))) + } + }, + PnmHeader { + decoded: HeaderRecord::Pixmap(_), + .. + } => match color { + ExtendedColorType::Rgb8 => (), + _ => { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic( + "PPM format only support ExtendedColorType::Rgb8".to_owned(), + ), + ))) + } + }, + PnmHeader { + decoded: + HeaderRecord::Arbitrary(ArbitraryHeader { + depth, + ref tupltype, + .. + }), + .. + } => match (tupltype, color) { + (&Some(ArbitraryTuplType::BlackAndWhite), ExtendedColorType::L1) => (), + (&Some(ArbitraryTuplType::BlackAndWhiteAlpha), ExtendedColorType::La8) => (), + + (&Some(ArbitraryTuplType::Grayscale), ExtendedColorType::L1) => (), + (&Some(ArbitraryTuplType::Grayscale), ExtendedColorType::L8) => (), + (&Some(ArbitraryTuplType::Grayscale), ExtendedColorType::L16) => (), + (&Some(ArbitraryTuplType::GrayscaleAlpha), ExtendedColorType::La8) => (), + + (&Some(ArbitraryTuplType::RGB), ExtendedColorType::Rgb8) => (), + (&Some(ArbitraryTuplType::RGBAlpha), ExtendedColorType::Rgba8) => (), + + (&None, _) if depth == components => (), + (&Some(ArbitraryTuplType::Custom(_)), _) if depth == components => (), + _ if depth != components => { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic(format!( + "Depth mismatch: header {} vs. color {}", + depth, components + )), + ))) + } + _ => { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic( + "Invalid color type for selected PAM color type".to_owned(), + ), + ))) + } + }, + } + + Ok(CheckedHeaderColor { + dimensions: self, + color, + }) + } +} + +impl<'a> CheckedHeaderColor<'a> { + fn check_sample_values(self, image: FlatSamples<'a>) -> ImageResult<CheckedHeader<'a>> { + let header_maxval = match self.dimensions.unchecked.header.decoded { + HeaderRecord::Bitmap(_) => 1, + HeaderRecord::Graymap(GraymapHeader { maxwhite, .. }) => maxwhite, + HeaderRecord::Pixmap(PixmapHeader { maxval, .. }) => maxval, + HeaderRecord::Arbitrary(ArbitraryHeader { maxval, .. }) => maxval, + }; + + // We trust the image color bit count to be correct at least. + let max_sample = match self.color { + ExtendedColorType::Unknown(n) if n <= 16 => (1 << n) - 1, + ExtendedColorType::L1 => 1, + ExtendedColorType::L8 + | ExtendedColorType::La8 + | ExtendedColorType::Rgb8 + | ExtendedColorType::Rgba8 + | ExtendedColorType::Bgr8 + | ExtendedColorType::Bgra8 => 0xff, + ExtendedColorType::L16 + | ExtendedColorType::La16 + | ExtendedColorType::Rgb16 + | ExtendedColorType::Rgba16 => 0xffff, + _ => { + // Unsupported target color type. + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Pnm.into(), + UnsupportedErrorKind::Color(self.color), + ), + )); + } + }; + + // Avoid the performance heavy check if possible, e.g. if the header has been chosen by us. + if header_maxval < max_sample && !image.all_smaller(header_maxval) { + // Sample value greater than allowed for chosen header. + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Pnm.into(), + UnsupportedErrorKind::GenericFeature( + "Sample value greater than allowed for chosen header".to_owned(), + ), + ), + )); + } + + let encoding = image.encoding_for(&self.dimensions.unchecked.header.decoded); + + let image = CheckedImageBuffer::check( + image, + self.dimensions.width, + self.dimensions.height, + self.color, + )?; + + Ok(CheckedHeader { + color: self, + encoding, + _image: image, + }) + } +} + +impl<'a> CheckedHeader<'a> { + fn write_header(self, writer: &mut dyn Write) -> ImageResult<TupleEncoding<'a>> { + self.header().write(writer)?; + Ok(self.encoding) + } + + fn header(&self) -> &PnmHeader { + self.color.dimensions.unchecked.header + } +} + +struct SampleWriter<'a>(&'a mut dyn Write); + +impl<'a> SampleWriter<'a> { + fn write_samples_ascii<V>(self, samples: V) -> io::Result<()> + where + V: Iterator, + V::Item: fmt::Display, + { + let mut auto_break_writer = AutoBreak::new(self.0, 70); + for value in samples { + write!(auto_break_writer, "{} ", value)?; + } + auto_break_writer.flush() + } + + fn write_pbm_bits<V>(self, samples: &[V], width: u32) -> io::Result<()> + /* Default gives 0 for all primitives. TODO: replace this with `Zeroable` once it hits stable */ + where + V: Default + Eq + Copy, + { + // The length of an encoded scanline + let line_width = (width - 1) / 8 + 1; + + // We'll be writing single bytes, so buffer + let mut line_buffer = Vec::with_capacity(line_width as usize); + + for line in samples.chunks(width as usize) { + for byte_bits in line.chunks(8) { + let mut byte = 0u8; + for i in 0..8 { + // Black pixels are encoded as 1s + if let Some(&v) = byte_bits.get(i) { + if v == V::default() { + byte |= 1u8 << (7 - i) + } + } + } + line_buffer.push(byte) + } + self.0.write_all(line_buffer.as_slice())?; + line_buffer.clear(); + } + + self.0.flush() + } +} + +impl<'a> FlatSamples<'a> { + fn len(&self) -> usize { + match *self { + FlatSamples::U8(arr) => arr.len(), + FlatSamples::U16(arr) => arr.len(), + } + } + + fn all_smaller(&self, max_val: u32) -> bool { + match *self { + FlatSamples::U8(arr) => arr.iter().any(|&val| u32::from(val) > max_val), + FlatSamples::U16(arr) => arr.iter().any(|&val| u32::from(val) > max_val), + } + } + + fn encoding_for(&self, header: &HeaderRecord) -> TupleEncoding<'a> { + match *header { + HeaderRecord::Bitmap(BitmapHeader { + encoding: SampleEncoding::Binary, + width, + .. + }) => TupleEncoding::PbmBits { + samples: *self, + width, + }, + + HeaderRecord::Bitmap(BitmapHeader { + encoding: SampleEncoding::Ascii, + .. + }) => TupleEncoding::Ascii { samples: *self }, + + HeaderRecord::Arbitrary(_) => TupleEncoding::Bytes { samples: *self }, + + HeaderRecord::Graymap(GraymapHeader { + encoding: SampleEncoding::Ascii, + .. + }) + | HeaderRecord::Pixmap(PixmapHeader { + encoding: SampleEncoding::Ascii, + .. + }) => TupleEncoding::Ascii { samples: *self }, + + HeaderRecord::Graymap(GraymapHeader { + encoding: SampleEncoding::Binary, + .. + }) + | HeaderRecord::Pixmap(PixmapHeader { + encoding: SampleEncoding::Binary, + .. + }) => TupleEncoding::Bytes { samples: *self }, + } + } +} + +impl<'a> From<&'a [u8]> for FlatSamples<'a> { + fn from(samples: &'a [u8]) -> Self { + FlatSamples::U8(samples) + } +} + +impl<'a> From<&'a [u16]> for FlatSamples<'a> { + fn from(samples: &'a [u16]) -> Self { + FlatSamples::U16(samples) + } +} + +impl<'a> TupleEncoding<'a> { + fn write_image(&self, writer: &mut dyn Write) -> ImageResult<()> { + match *self { + TupleEncoding::PbmBits { + samples: FlatSamples::U8(samples), + width, + } => SampleWriter(writer) + .write_pbm_bits(samples, width) + .map_err(ImageError::IoError), + TupleEncoding::PbmBits { + samples: FlatSamples::U16(samples), + width, + } => SampleWriter(writer) + .write_pbm_bits(samples, width) + .map_err(ImageError::IoError), + + TupleEncoding::Bytes { + samples: FlatSamples::U8(samples), + } => writer.write_all(samples).map_err(ImageError::IoError), + TupleEncoding::Bytes { + samples: FlatSamples::U16(samples), + } => samples.iter().try_for_each(|&sample| { + writer + .write_u16::<BigEndian>(sample) + .map_err(ImageError::IoError) + }), + + TupleEncoding::Ascii { + samples: FlatSamples::U8(samples), + } => SampleWriter(writer) + .write_samples_ascii(samples.iter()) + .map_err(ImageError::IoError), + TupleEncoding::Ascii { + samples: FlatSamples::U16(samples), + } => SampleWriter(writer) + .write_samples_ascii(samples.iter()) + .map_err(ImageError::IoError), + } + } +} diff --git a/vendor/image/src/codecs/pnm/header.rs b/vendor/image/src/codecs/pnm/header.rs new file mode 100644 index 0000000..443a701 --- /dev/null +++ b/vendor/image/src/codecs/pnm/header.rs @@ -0,0 +1,354 @@ +use std::{fmt, io}; + +/// The kind of encoding used to store sample values +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum SampleEncoding { + /// Samples are unsigned binary integers in big endian + Binary, + + /// Samples are encoded as decimal ascii strings separated by whitespace + Ascii, +} + +/// Denotes the category of the magic number +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum PnmSubtype { + /// Magic numbers P1 and P4 + Bitmap(SampleEncoding), + + /// Magic numbers P2 and P5 + Graymap(SampleEncoding), + + /// Magic numbers P3 and P6 + Pixmap(SampleEncoding), + + /// Magic number P7 + ArbitraryMap, +} + +/// Stores the complete header data of a file. +/// +/// Internally, provides mechanisms for lossless reencoding. After reading a file with the decoder +/// it is possible to recover the header and construct an encoder. Using the encoder on the just +/// loaded image should result in a byte copy of the original file (for single image pnms without +/// additional trailing data). +pub struct PnmHeader { + pub(crate) decoded: HeaderRecord, + pub(crate) encoded: Option<Vec<u8>>, +} + +pub(crate) enum HeaderRecord { + Bitmap(BitmapHeader), + Graymap(GraymapHeader), + Pixmap(PixmapHeader), + Arbitrary(ArbitraryHeader), +} + +/// Header produced by a `pbm` file ("Portable Bit Map") +#[derive(Clone, Copy, Debug)] +pub struct BitmapHeader { + /// Binary or Ascii encoded file + pub encoding: SampleEncoding, + + /// Height of the image file + pub height: u32, + + /// Width of the image file + pub width: u32, +} + +/// Header produced by a `pgm` file ("Portable Gray Map") +#[derive(Clone, Copy, Debug)] +pub struct GraymapHeader { + /// Binary or Ascii encoded file + pub encoding: SampleEncoding, + + /// Height of the image file + pub height: u32, + + /// Width of the image file + pub width: u32, + + /// Maximum sample value within the image + pub maxwhite: u32, +} + +/// Header produced by a `ppm` file ("Portable Pixel Map") +#[derive(Clone, Copy, Debug)] +pub struct PixmapHeader { + /// Binary or Ascii encoded file + pub encoding: SampleEncoding, + + /// Height of the image file + pub height: u32, + + /// Width of the image file + pub width: u32, + + /// Maximum sample value within the image + pub maxval: u32, +} + +/// Header produced by a `pam` file ("Portable Arbitrary Map") +#[derive(Clone, Debug)] +pub struct ArbitraryHeader { + /// Height of the image file + pub height: u32, + + /// Width of the image file + pub width: u32, + + /// Number of color channels + pub depth: u32, + + /// Maximum sample value within the image + pub maxval: u32, + + /// Color interpretation of image pixels + pub tupltype: Option<ArbitraryTuplType>, +} + +/// Standardized tuple type specifiers in the header of a `pam`. +#[derive(Clone, Debug)] +pub enum ArbitraryTuplType { + /// Pixels are either black (0) or white (1) + BlackAndWhite, + + /// Pixels are either black (0) or white (1) and a second alpha channel + BlackAndWhiteAlpha, + + /// Pixels represent the amount of white + Grayscale, + + /// Grayscale with an additional alpha channel + GrayscaleAlpha, + + /// Three channels: Red, Green, Blue + RGB, + + /// Four channels: Red, Green, Blue, Alpha + RGBAlpha, + + /// An image format which is not standardized + Custom(String), +} + +impl ArbitraryTuplType { + pub(crate) fn name(&self) -> &str { + match self { + ArbitraryTuplType::BlackAndWhite => "BLACKANDWHITE", + ArbitraryTuplType::BlackAndWhiteAlpha => "BLACKANDWHITE_ALPHA", + ArbitraryTuplType::Grayscale => "GRAYSCALE", + ArbitraryTuplType::GrayscaleAlpha => "GRAYSCALE_ALPHA", + ArbitraryTuplType::RGB => "RGB", + ArbitraryTuplType::RGBAlpha => "RGB_ALPHA", + ArbitraryTuplType::Custom(custom) => custom, + } + } +} + +impl PnmSubtype { + /// Get the two magic constant bytes corresponding to this format subtype. + pub fn magic_constant(self) -> &'static [u8; 2] { + match self { + PnmSubtype::Bitmap(SampleEncoding::Ascii) => b"P1", + PnmSubtype::Graymap(SampleEncoding::Ascii) => b"P2", + PnmSubtype::Pixmap(SampleEncoding::Ascii) => b"P3", + PnmSubtype::Bitmap(SampleEncoding::Binary) => b"P4", + PnmSubtype::Graymap(SampleEncoding::Binary) => b"P5", + PnmSubtype::Pixmap(SampleEncoding::Binary) => b"P6", + PnmSubtype::ArbitraryMap => b"P7", + } + } + + /// Whether samples are stored as binary or as decimal ascii + pub fn sample_encoding(self) -> SampleEncoding { + match self { + PnmSubtype::ArbitraryMap => SampleEncoding::Binary, + PnmSubtype::Bitmap(enc) => enc, + PnmSubtype::Graymap(enc) => enc, + PnmSubtype::Pixmap(enc) => enc, + } + } +} + +impl PnmHeader { + /// Retrieve the format subtype from which the header was created. + pub fn subtype(&self) -> PnmSubtype { + match self.decoded { + HeaderRecord::Bitmap(BitmapHeader { encoding, .. }) => PnmSubtype::Bitmap(encoding), + HeaderRecord::Graymap(GraymapHeader { encoding, .. }) => PnmSubtype::Graymap(encoding), + HeaderRecord::Pixmap(PixmapHeader { encoding, .. }) => PnmSubtype::Pixmap(encoding), + HeaderRecord::Arbitrary(ArbitraryHeader { .. }) => PnmSubtype::ArbitraryMap, + } + } + + /// The width of the image this header is for. + pub fn width(&self) -> u32 { + match self.decoded { + HeaderRecord::Bitmap(BitmapHeader { width, .. }) => width, + HeaderRecord::Graymap(GraymapHeader { width, .. }) => width, + HeaderRecord::Pixmap(PixmapHeader { width, .. }) => width, + HeaderRecord::Arbitrary(ArbitraryHeader { width, .. }) => width, + } + } + + /// The height of the image this header is for. + pub fn height(&self) -> u32 { + match self.decoded { + HeaderRecord::Bitmap(BitmapHeader { height, .. }) => height, + HeaderRecord::Graymap(GraymapHeader { height, .. }) => height, + HeaderRecord::Pixmap(PixmapHeader { height, .. }) => height, + HeaderRecord::Arbitrary(ArbitraryHeader { height, .. }) => height, + } + } + + /// The biggest value a sample can have. In other words, the colour resolution. + pub fn maximal_sample(&self) -> u32 { + match self.decoded { + HeaderRecord::Bitmap(BitmapHeader { .. }) => 1, + HeaderRecord::Graymap(GraymapHeader { maxwhite, .. }) => maxwhite, + HeaderRecord::Pixmap(PixmapHeader { maxval, .. }) => maxval, + HeaderRecord::Arbitrary(ArbitraryHeader { maxval, .. }) => maxval, + } + } + + /// Retrieve the underlying bitmap header if any + pub fn as_bitmap(&self) -> Option<&BitmapHeader> { + match self.decoded { + HeaderRecord::Bitmap(ref bitmap) => Some(bitmap), + _ => None, + } + } + + /// Retrieve the underlying graymap header if any + pub fn as_graymap(&self) -> Option<&GraymapHeader> { + match self.decoded { + HeaderRecord::Graymap(ref graymap) => Some(graymap), + _ => None, + } + } + + /// Retrieve the underlying pixmap header if any + pub fn as_pixmap(&self) -> Option<&PixmapHeader> { + match self.decoded { + HeaderRecord::Pixmap(ref pixmap) => Some(pixmap), + _ => None, + } + } + + /// Retrieve the underlying arbitrary header if any + pub fn as_arbitrary(&self) -> Option<&ArbitraryHeader> { + match self.decoded { + HeaderRecord::Arbitrary(ref arbitrary) => Some(arbitrary), + _ => None, + } + } + + /// Write the header back into a binary stream + pub fn write(&self, writer: &mut dyn io::Write) -> io::Result<()> { + writer.write_all(self.subtype().magic_constant())?; + match *self { + PnmHeader { + encoded: Some(ref content), + .. + } => writer.write_all(content), + PnmHeader { + decoded: + HeaderRecord::Bitmap(BitmapHeader { + encoding: _encoding, + width, + height, + }), + .. + } => writeln!(writer, "\n{} {}", width, height), + PnmHeader { + decoded: + HeaderRecord::Graymap(GraymapHeader { + encoding: _encoding, + width, + height, + maxwhite, + }), + .. + } => writeln!(writer, "\n{} {} {}", width, height, maxwhite), + PnmHeader { + decoded: + HeaderRecord::Pixmap(PixmapHeader { + encoding: _encoding, + width, + height, + maxval, + }), + .. + } => writeln!(writer, "\n{} {} {}", width, height, maxval), + PnmHeader { + decoded: + HeaderRecord::Arbitrary(ArbitraryHeader { + width, + height, + depth, + maxval, + ref tupltype, + }), + .. + } => { + struct TupltypeWriter<'a>(&'a Option<ArbitraryTuplType>); + impl<'a> fmt::Display for TupltypeWriter<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 { + Some(tt) => writeln!(f, "TUPLTYPE {}", tt.name()), + None => Ok(()), + } + } + } + + writeln!( + writer, + "\nWIDTH {}\nHEIGHT {}\nDEPTH {}\nMAXVAL {}\n{}ENDHDR", + width, + height, + depth, + maxval, + TupltypeWriter(tupltype) + ) + } + } + } +} + +impl From<BitmapHeader> for PnmHeader { + fn from(header: BitmapHeader) -> Self { + PnmHeader { + decoded: HeaderRecord::Bitmap(header), + encoded: None, + } + } +} + +impl From<GraymapHeader> for PnmHeader { + fn from(header: GraymapHeader) -> Self { + PnmHeader { + decoded: HeaderRecord::Graymap(header), + encoded: None, + } + } +} + +impl From<PixmapHeader> for PnmHeader { + fn from(header: PixmapHeader) -> Self { + PnmHeader { + decoded: HeaderRecord::Pixmap(header), + encoded: None, + } + } +} + +impl From<ArbitraryHeader> for PnmHeader { + fn from(header: ArbitraryHeader) -> Self { + PnmHeader { + decoded: HeaderRecord::Arbitrary(header), + encoded: None, + } + } +} diff --git a/vendor/image/src/codecs/pnm/mod.rs b/vendor/image/src/codecs/pnm/mod.rs new file mode 100644 index 0000000..de8612d --- /dev/null +++ b/vendor/image/src/codecs/pnm/mod.rs @@ -0,0 +1,184 @@ +//! Decoding of netpbm image formats (pbm, pgm, ppm and pam). +//! +//! The formats pbm, pgm and ppm are fully supported. The pam decoder recognizes the tuple types +//! `BLACKANDWHITE`, `GRAYSCALE` and `RGB` and explicitly recognizes but rejects their `_ALPHA` +//! variants for now as alpha color types are unsupported. +use self::autobreak::AutoBreak; +pub use self::decoder::PnmDecoder; +pub use self::encoder::PnmEncoder; +use self::header::HeaderRecord; +pub use self::header::{ + ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader, PixmapHeader, +}; +pub use self::header::{PnmHeader, PnmSubtype, SampleEncoding}; + +mod autobreak; +mod decoder; +mod encoder; +mod header; + +#[cfg(test)] +mod tests { + use super::*; + use crate::color::ColorType; + use crate::image::ImageDecoder; + use byteorder::{ByteOrder, NativeEndian}; + + fn execute_roundtrip_default(buffer: &[u8], width: u32, height: u32, color: ColorType) { + let mut encoded_buffer = Vec::new(); + + { + let mut encoder = PnmEncoder::new(&mut encoded_buffer); + encoder + .encode(buffer, width, height, color) + .expect("Failed to encode the image buffer"); + } + + let (header, loaded_color, loaded_image) = { + let decoder = PnmDecoder::new(&encoded_buffer[..]).unwrap(); + let color_type = decoder.color_type(); + let mut image = vec![0; decoder.total_bytes() as usize]; + decoder + .read_image(&mut image) + .expect("Failed to decode the image"); + let (_, header) = PnmDecoder::new(&encoded_buffer[..]).unwrap().into_inner(); + (header, color_type, image) + }; + + assert_eq!(header.width(), width); + assert_eq!(header.height(), height); + assert_eq!(loaded_color, color); + assert_eq!(loaded_image.as_slice(), buffer); + } + + fn execute_roundtrip_with_subtype( + buffer: &[u8], + width: u32, + height: u32, + color: ColorType, + subtype: PnmSubtype, + ) { + let mut encoded_buffer = Vec::new(); + + { + let mut encoder = PnmEncoder::new(&mut encoded_buffer).with_subtype(subtype); + encoder + .encode(buffer, width, height, color) + .expect("Failed to encode the image buffer"); + } + + let (header, loaded_color, loaded_image) = { + let decoder = PnmDecoder::new(&encoded_buffer[..]).unwrap(); + let color_type = decoder.color_type(); + let mut image = vec![0; decoder.total_bytes() as usize]; + decoder + .read_image(&mut image) + .expect("Failed to decode the image"); + let (_, header) = PnmDecoder::new(&encoded_buffer[..]).unwrap().into_inner(); + (header, color_type, image) + }; + + assert_eq!(header.width(), width); + assert_eq!(header.height(), height); + assert_eq!(header.subtype(), subtype); + assert_eq!(loaded_color, color); + assert_eq!(loaded_image.as_slice(), buffer); + } + + fn execute_roundtrip_u16(buffer: &[u16], width: u32, height: u32, color: ColorType) { + let mut encoded_buffer = Vec::new(); + + { + let mut encoder = PnmEncoder::new(&mut encoded_buffer); + encoder + .encode(buffer, width, height, color) + .expect("Failed to encode the image buffer"); + } + + let (header, loaded_color, loaded_image) = { + let decoder = PnmDecoder::new(&encoded_buffer[..]).unwrap(); + let color_type = decoder.color_type(); + let mut image = vec![0; decoder.total_bytes() as usize]; + decoder + .read_image(&mut image) + .expect("Failed to decode the image"); + let (_, header) = PnmDecoder::new(&encoded_buffer[..]).unwrap().into_inner(); + (header, color_type, image) + }; + + let mut buffer_u8 = vec![0; buffer.len() * 2]; + NativeEndian::write_u16_into(buffer, &mut buffer_u8[..]); + + assert_eq!(header.width(), width); + assert_eq!(header.height(), height); + assert_eq!(loaded_color, color); + assert_eq!(loaded_image, buffer_u8); + } + + #[test] + fn roundtrip_gray() { + #[rustfmt::skip] + let buf: [u8; 16] = [ + 0, 0, 0, 255, + 255, 255, 255, 255, + 255, 0, 255, 0, + 255, 0, 0, 0, + ]; + + execute_roundtrip_default(&buf, 4, 4, ColorType::L8); + execute_roundtrip_with_subtype(&buf, 4, 4, ColorType::L8, PnmSubtype::ArbitraryMap); + execute_roundtrip_with_subtype( + &buf, + 4, + 4, + ColorType::L8, + PnmSubtype::Graymap(SampleEncoding::Ascii), + ); + execute_roundtrip_with_subtype( + &buf, + 4, + 4, + ColorType::L8, + PnmSubtype::Graymap(SampleEncoding::Binary), + ); + } + + #[test] + fn roundtrip_rgb() { + #[rustfmt::skip] + let buf: [u8; 27] = [ + 0, 0, 0, + 0, 0, 255, + 0, 255, 0, + 0, 255, 255, + 255, 0, 0, + 255, 0, 255, + 255, 255, 0, + 255, 255, 255, + 255, 255, 255, + ]; + execute_roundtrip_default(&buf, 3, 3, ColorType::Rgb8); + execute_roundtrip_with_subtype(&buf, 3, 3, ColorType::Rgb8, PnmSubtype::ArbitraryMap); + execute_roundtrip_with_subtype( + &buf, + 3, + 3, + ColorType::Rgb8, + PnmSubtype::Pixmap(SampleEncoding::Binary), + ); + execute_roundtrip_with_subtype( + &buf, + 3, + 3, + ColorType::Rgb8, + PnmSubtype::Pixmap(SampleEncoding::Ascii), + ); + } + + #[test] + fn roundtrip_u16() { + let buf: [u16; 6] = [0, 1, 0xFFFF, 0x1234, 0x3412, 0xBEAF]; + + execute_roundtrip_u16(&buf, 6, 1, ColorType::L16); + } +} diff --git a/vendor/image/src/codecs/qoi.rs b/vendor/image/src/codecs/qoi.rs new file mode 100644 index 0000000..214e99b --- /dev/null +++ b/vendor/image/src/codecs/qoi.rs @@ -0,0 +1,104 @@ +//! Decoding and encoding of QOI images + +use crate::{ + error::{DecodingError, EncodingError}, + ColorType, ImageDecoder, ImageEncoder, ImageError, ImageFormat, ImageResult, +}; +use std::io::{Cursor, Read, Write}; + +/// QOI decoder +pub struct QoiDecoder<R> { + decoder: qoi::Decoder<R>, +} + +impl<R> QoiDecoder<R> +where + R: Read, +{ + /// Creates a new decoder that decodes from the stream ```reader``` + pub fn new(reader: R) -> ImageResult<Self> { + let decoder = qoi::Decoder::from_stream(reader).map_err(decoding_error)?; + Ok(Self { decoder }) + } +} + +impl<'a, R: Read + 'a> ImageDecoder<'a> for QoiDecoder<R> { + type Reader = Cursor<Vec<u8>>; + + fn dimensions(&self) -> (u32, u32) { + (self.decoder.header().width, self.decoder.header().height) + } + + fn color_type(&self) -> ColorType { + match self.decoder.header().channels { + qoi::Channels::Rgb => ColorType::Rgb8, + qoi::Channels::Rgba => ColorType::Rgba8, + } + } + + fn into_reader(mut self) -> ImageResult<Self::Reader> { + let buffer = self.decoder.decode_to_vec().map_err(decoding_error)?; + Ok(Cursor::new(buffer)) + } +} + +fn decoding_error(error: qoi::Error) -> ImageError { + ImageError::Decoding(DecodingError::new(ImageFormat::Qoi.into(), error)) +} + +fn encoding_error(error: qoi::Error) -> ImageError { + ImageError::Encoding(EncodingError::new(ImageFormat::Qoi.into(), error)) +} + +/// QOI encoder +pub struct QoiEncoder<W> { + writer: W, +} + +impl<W: Write> QoiEncoder<W> { + /// Creates a new encoder that writes its output to ```writer``` + pub fn new(writer: W) -> Self { + Self { writer } + } +} + +impl<W: Write> ImageEncoder for QoiEncoder<W> { + fn write_image( + mut self, + buf: &[u8], + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<()> { + if !matches!(color_type, ColorType::Rgba8 | ColorType::Rgb8) { + return Err(ImageError::Encoding(EncodingError::new( + ImageFormat::Qoi.into(), + format!("unsupported color type {color_type:?}. Supported are Rgba8 and Rgb8."), + ))); + } + + // Encode data in QOI + let data = qoi::encode_to_vec(buf, width, height).map_err(encoding_error)?; + + // Write data to buffer + self.writer.write_all(&data[..])?; + self.writer.flush()?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs::File; + + #[test] + fn decode_test_image() { + let decoder = QoiDecoder::new(File::open("tests/images/qoi/basic-test.qoi").unwrap()) + .expect("Unable to read QOI file"); + + assert_eq!((5, 5), decoder.dimensions()); + assert_eq!(ColorType::Rgba8, decoder.color_type()); + } +} diff --git a/vendor/image/src/codecs/tga/decoder.rs b/vendor/image/src/codecs/tga/decoder.rs new file mode 100644 index 0000000..16243ce --- /dev/null +++ b/vendor/image/src/codecs/tga/decoder.rs @@ -0,0 +1,502 @@ +use super::header::{Header, ImageType, ALPHA_BIT_MASK, SCREEN_ORIGIN_BIT_MASK}; +use crate::{ + color::{ColorType, ExtendedColorType}, + error::{ + ImageError, ImageResult, LimitError, LimitErrorKind, UnsupportedError, UnsupportedErrorKind, + }, + image::{ImageDecoder, ImageFormat, ImageReadBuffer}, +}; +use byteorder::ReadBytesExt; +use std::{ + convert::TryFrom, + io::{self, Read, Seek}, + mem, +}; + +struct ColorMap { + /// sizes in bytes + start_offset: usize, + entry_size: usize, + bytes: Vec<u8>, +} + +impl ColorMap { + pub(crate) fn from_reader( + r: &mut dyn Read, + start_offset: u16, + num_entries: u16, + bits_per_entry: u8, + ) -> ImageResult<ColorMap> { + let bytes_per_entry = (bits_per_entry as usize + 7) / 8; + + let mut bytes = vec![0; bytes_per_entry * num_entries as usize]; + r.read_exact(&mut bytes)?; + + Ok(ColorMap { + entry_size: bytes_per_entry, + start_offset: start_offset as usize, + bytes, + }) + } + + /// Get one entry from the color map + pub(crate) fn get(&self, index: usize) -> Option<&[u8]> { + let entry = self.start_offset + self.entry_size * index; + self.bytes.get(entry..entry + self.entry_size) + } +} + +/// The representation of a TGA decoder +pub struct TgaDecoder<R> { + r: R, + + width: usize, + height: usize, + bytes_per_pixel: usize, + has_loaded_metadata: bool, + + image_type: ImageType, + color_type: ColorType, + original_color_type: Option<ExtendedColorType>, + + header: Header, + color_map: Option<ColorMap>, + + // Used in read_scanline + line_read: Option<usize>, + line_remain_buff: Vec<u8>, +} + +impl<R: Read + Seek> TgaDecoder<R> { + /// Create a new decoder that decodes from the stream `r` + pub fn new(r: R) -> ImageResult<TgaDecoder<R>> { + let mut decoder = TgaDecoder { + r, + + width: 0, + height: 0, + bytes_per_pixel: 0, + has_loaded_metadata: false, + + image_type: ImageType::Unknown, + color_type: ColorType::L8, + original_color_type: None, + + header: Header::default(), + color_map: None, + + line_read: None, + line_remain_buff: Vec::new(), + }; + decoder.read_metadata()?; + Ok(decoder) + } + + fn read_header(&mut self) -> ImageResult<()> { + self.header = Header::from_reader(&mut self.r)?; + self.image_type = ImageType::new(self.header.image_type); + self.width = self.header.image_width as usize; + self.height = self.header.image_height as usize; + self.bytes_per_pixel = (self.header.pixel_depth as usize + 7) / 8; + Ok(()) + } + + fn read_metadata(&mut self) -> ImageResult<()> { + if !self.has_loaded_metadata { + self.read_header()?; + self.read_image_id()?; + self.read_color_map()?; + self.read_color_information()?; + self.has_loaded_metadata = true; + } + Ok(()) + } + + /// Loads the color information for the decoder + /// + /// To keep things simple, we won't handle bit depths that aren't divisible + /// by 8 and are larger than 32. + fn read_color_information(&mut self) -> ImageResult<()> { + if self.header.pixel_depth % 8 != 0 || self.header.pixel_depth > 32 { + // Bit depth must be divisible by 8, and must be less than or equal + // to 32. + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Tga.into(), + UnsupportedErrorKind::Color(ExtendedColorType::Unknown( + self.header.pixel_depth, + )), + ), + )); + } + + let num_alpha_bits = self.header.image_desc & ALPHA_BIT_MASK; + + let other_channel_bits = if self.header.map_type != 0 { + self.header.map_entry_size + } else { + if num_alpha_bits > self.header.pixel_depth { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Tga.into(), + UnsupportedErrorKind::Color(ExtendedColorType::Unknown( + self.header.pixel_depth, + )), + ), + )); + } + + self.header.pixel_depth - num_alpha_bits + }; + let color = self.image_type.is_color(); + + match (num_alpha_bits, other_channel_bits, color) { + // really, the encoding is BGR and BGRA, this is fixed + // up with `TgaDecoder::reverse_encoding`. + (0, 32, true) => self.color_type = ColorType::Rgba8, + (8, 24, true) => self.color_type = ColorType::Rgba8, + (0, 24, true) => self.color_type = ColorType::Rgb8, + (8, 8, false) => self.color_type = ColorType::La8, + (0, 8, false) => self.color_type = ColorType::L8, + (8, 0, false) => { + // alpha-only image is treated as L8 + self.color_type = ColorType::L8; + self.original_color_type = Some(ExtendedColorType::A8); + } + _ => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Tga.into(), + UnsupportedErrorKind::Color(ExtendedColorType::Unknown( + self.header.pixel_depth, + )), + ), + )) + } + } + Ok(()) + } + + /// Read the image id field + /// + /// We're not interested in this field, so this function skips it if it + /// is present + fn read_image_id(&mut self) -> ImageResult<()> { + self.r + .seek(io::SeekFrom::Current(i64::from(self.header.id_length)))?; + Ok(()) + } + + fn read_color_map(&mut self) -> ImageResult<()> { + if self.header.map_type == 1 { + // FIXME: we could reverse the map entries, which avoids having to reverse all pixels + // in the final output individually. + self.color_map = Some(ColorMap::from_reader( + &mut self.r, + self.header.map_origin, + self.header.map_length, + self.header.map_entry_size, + )?); + } + Ok(()) + } + + /// Expands indices into its mapped color + fn expand_color_map(&self, pixel_data: &[u8]) -> io::Result<Vec<u8>> { + #[inline] + fn bytes_to_index(bytes: &[u8]) -> usize { + let mut result = 0usize; + for byte in bytes.iter() { + result = result << 8 | *byte as usize; + } + result + } + + let bytes_per_entry = (self.header.map_entry_size as usize + 7) / 8; + let mut result = Vec::with_capacity(self.width * self.height * bytes_per_entry); + + if self.bytes_per_pixel == 0 { + return Err(io::ErrorKind::Other.into()); + } + + let color_map = self + .color_map + .as_ref() + .ok_or_else(|| io::Error::from(io::ErrorKind::Other))?; + + for chunk in pixel_data.chunks(self.bytes_per_pixel) { + let index = bytes_to_index(chunk); + if let Some(color) = color_map.get(index) { + result.extend_from_slice(color); + } else { + return Err(io::ErrorKind::Other.into()); + } + } + + Ok(result) + } + + /// Reads a run length encoded data for given number of bytes + fn read_encoded_data(&mut self, num_bytes: usize) -> io::Result<Vec<u8>> { + let mut pixel_data = Vec::with_capacity(num_bytes); + let mut repeat_buf = Vec::with_capacity(self.bytes_per_pixel); + + while pixel_data.len() < num_bytes { + let run_packet = self.r.read_u8()?; + // If the highest bit in `run_packet` is set, then we repeat pixels + // + // Note: the TGA format adds 1 to both counts because having a count + // of 0 would be pointless. + if (run_packet & 0x80) != 0 { + // high bit set, so we will repeat the data + let repeat_count = ((run_packet & !0x80) + 1) as usize; + self.r + .by_ref() + .take(self.bytes_per_pixel as u64) + .read_to_end(&mut repeat_buf)?; + + // get the repeating pixels from the bytes of the pixel stored in `repeat_buf` + let data = repeat_buf + .iter() + .cycle() + .take(repeat_count * self.bytes_per_pixel); + pixel_data.extend(data); + repeat_buf.clear(); + } else { + // not set, so `run_packet+1` is the number of non-encoded pixels + let num_raw_bytes = (run_packet + 1) as usize * self.bytes_per_pixel; + self.r + .by_ref() + .take(num_raw_bytes as u64) + .read_to_end(&mut pixel_data)?; + } + } + + if pixel_data.len() > num_bytes { + // FIXME: the last packet contained more data than we asked for! + // This is at least a warning. We truncate the data since some methods rely on the + // length to be accurate in the success case. + pixel_data.truncate(num_bytes); + } + + Ok(pixel_data) + } + + /// Reads a run length encoded packet + fn read_all_encoded_data(&mut self) -> ImageResult<Vec<u8>> { + let num_bytes = self.width * self.height * self.bytes_per_pixel; + + Ok(self.read_encoded_data(num_bytes)?) + } + + /// Reads a run length encoded line + fn read_encoded_line(&mut self) -> io::Result<Vec<u8>> { + let line_num_bytes = self.width * self.bytes_per_pixel; + let remain_len = self.line_remain_buff.len(); + + if remain_len >= line_num_bytes { + // `Vec::split_to` if std had it + let bytes = { + let bytes_after = self.line_remain_buff.split_off(line_num_bytes); + mem::replace(&mut self.line_remain_buff, bytes_after) + }; + + return Ok(bytes); + } + + let num_bytes = line_num_bytes - remain_len; + + let line_data = self.read_encoded_data(num_bytes)?; + + let mut pixel_data = Vec::with_capacity(line_num_bytes); + pixel_data.append(&mut self.line_remain_buff); + pixel_data.extend_from_slice(&line_data[..num_bytes]); + + // put the remain data to line_remain_buff. + // expects `self.line_remain_buff` to be empty from + // the above `pixel_data.append` call + debug_assert!(self.line_remain_buff.is_empty()); + self.line_remain_buff + .extend_from_slice(&line_data[num_bytes..]); + + Ok(pixel_data) + } + + /// Reverse from BGR encoding to RGB encoding + /// + /// TGA files are stored in the BGRA encoding. This function swaps + /// the blue and red bytes in the `pixels` array. + fn reverse_encoding_in_output(&mut self, pixels: &mut [u8]) { + // We only need to reverse the encoding of color images + match self.color_type { + ColorType::Rgb8 | ColorType::Rgba8 => { + for chunk in pixels.chunks_mut(self.color_type.bytes_per_pixel().into()) { + chunk.swap(0, 2); + } + } + _ => {} + } + } + + /// Flip the image vertically depending on the screen origin bit + /// + /// The bit in position 5 of the image descriptor byte is the screen origin bit. + /// If it's 1, the origin is in the top left corner. + /// If it's 0, the origin is in the bottom left corner. + /// This function checks the bit, and if it's 0, flips the image vertically. + fn flip_vertically(&mut self, pixels: &mut [u8]) { + if self.is_flipped_vertically() { + if self.height == 0 { + return; + } + + let num_bytes = pixels.len(); + + let width_bytes = num_bytes / self.height; + + // Flip the image vertically. + for vertical_index in 0..(self.height / 2) { + let vertical_target = (self.height - vertical_index) * width_bytes - width_bytes; + + for horizontal_index in 0..width_bytes { + let source = vertical_index * width_bytes + horizontal_index; + let target = vertical_target + horizontal_index; + + pixels.swap(target, source); + } + } + } + } + + /// Check whether the image is vertically flipped + /// + /// The bit in position 5 of the image descriptor byte is the screen origin bit. + /// If it's 1, the origin is in the top left corner. + /// If it's 0, the origin is in the bottom left corner. + /// This function checks the bit, and if it's 0, flips the image vertically. + fn is_flipped_vertically(&self) -> bool { + let screen_origin_bit = SCREEN_ORIGIN_BIT_MASK & self.header.image_desc != 0; + !screen_origin_bit + } + + fn read_scanline(&mut self, buf: &mut [u8]) -> io::Result<usize> { + if let Some(line_read) = self.line_read { + if line_read == self.height { + return Ok(0); + } + } + + // read the pixels from the data region + let mut pixel_data = if self.image_type.is_encoded() { + self.read_encoded_line()? + } else { + let num_raw_bytes = self.width * self.bytes_per_pixel; + let mut buf = vec![0; num_raw_bytes]; + self.r.by_ref().read_exact(&mut buf)?; + buf + }; + + // expand the indices using the color map if necessary + if self.image_type.is_color_mapped() { + pixel_data = self.expand_color_map(&pixel_data)?; + } + self.reverse_encoding_in_output(&mut pixel_data); + + // copy to the output buffer + buf[..pixel_data.len()].copy_from_slice(&pixel_data); + + self.line_read = Some(self.line_read.unwrap_or(0) + 1); + + Ok(pixel_data.len()) + } +} + +impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for TgaDecoder<R> { + type Reader = TGAReader<R>; + + fn dimensions(&self) -> (u32, u32) { + (self.width as u32, self.height as u32) + } + + fn color_type(&self) -> ColorType { + self.color_type + } + + fn original_color_type(&self) -> ExtendedColorType { + self.original_color_type + .unwrap_or_else(|| self.color_type().into()) + } + + fn scanline_bytes(&self) -> u64 { + // This cannot overflow because TGA has a maximum width of u16::MAX_VALUE and + // `bytes_per_pixel` is a u8. + u64::from(self.color_type.bytes_per_pixel()) * self.width as u64 + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + Ok(TGAReader { + buffer: ImageReadBuffer::new(self.scanline_bytes(), self.total_bytes()), + decoder: self, + }) + } + + fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> { + assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); + + // In indexed images, we might need more bytes than pixels to read them. That's nonsensical + // to encode but we'll not want to crash. + let mut fallback_buf = vec![]; + // read the pixels from the data region + let rawbuf = if self.image_type.is_encoded() { + let pixel_data = self.read_all_encoded_data()?; + if self.bytes_per_pixel <= usize::from(self.color_type.bytes_per_pixel()) { + buf[..pixel_data.len()].copy_from_slice(&pixel_data); + &buf[..pixel_data.len()] + } else { + fallback_buf = pixel_data; + &fallback_buf[..] + } + } else { + let num_raw_bytes = self.width * self.height * self.bytes_per_pixel; + if self.bytes_per_pixel <= usize::from(self.color_type.bytes_per_pixel()) { + self.r.by_ref().read_exact(&mut buf[..num_raw_bytes])?; + &buf[..num_raw_bytes] + } else { + fallback_buf.resize(num_raw_bytes, 0u8); + self.r + .by_ref() + .read_exact(&mut fallback_buf[..num_raw_bytes])?; + &fallback_buf[..num_raw_bytes] + } + }; + + // expand the indices using the color map if necessary + if self.image_type.is_color_mapped() { + let pixel_data = self.expand_color_map(rawbuf)?; + // not enough data to fill the buffer, or would overflow the buffer + if pixel_data.len() != buf.len() { + return Err(ImageError::Limits(LimitError::from_kind( + LimitErrorKind::DimensionError, + ))); + } + buf.copy_from_slice(&pixel_data); + } + + self.reverse_encoding_in_output(buf); + + self.flip_vertically(buf); + + Ok(()) + } +} + +pub struct TGAReader<R> { + buffer: ImageReadBuffer, + decoder: TgaDecoder<R>, +} +impl<R: Read + Seek> Read for TGAReader<R> { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + let decoder = &mut self.decoder; + self.buffer.read(buf, |buf| decoder.read_scanline(buf)) + } +} diff --git a/vendor/image/src/codecs/tga/encoder.rs b/vendor/image/src/codecs/tga/encoder.rs new file mode 100644 index 0000000..cf34984 --- /dev/null +++ b/vendor/image/src/codecs/tga/encoder.rs @@ -0,0 +1,215 @@ +use super::header::Header; +use crate::{error::EncodingError, ColorType, ImageEncoder, ImageError, ImageFormat, ImageResult}; +use std::{convert::TryFrom, error, fmt, io::Write}; + +/// Errors that can occur during encoding and saving of a TGA image. +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +enum EncoderError { + /// Invalid TGA width. + WidthInvalid(u32), + + /// Invalid TGA height. + HeightInvalid(u32), +} + +impl fmt::Display for EncoderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + EncoderError::WidthInvalid(s) => f.write_fmt(format_args!("Invalid TGA width: {}", s)), + EncoderError::HeightInvalid(s) => { + f.write_fmt(format_args!("Invalid TGA height: {}", s)) + } + } + } +} + +impl From<EncoderError> for ImageError { + fn from(e: EncoderError) -> ImageError { + ImageError::Encoding(EncodingError::new(ImageFormat::Tga.into(), e)) + } +} + +impl error::Error for EncoderError {} + +/// TGA encoder. +pub struct TgaEncoder<W: Write> { + writer: W, +} + +impl<W: Write> TgaEncoder<W> { + /// Create a new encoder that writes its output to ```w```. + pub fn new(w: W) -> TgaEncoder<W> { + TgaEncoder { writer: w } + } + + /// Encodes the image ```buf``` that has dimensions ```width``` + /// and ```height``` and ```ColorType``` ```color_type```. + /// + /// The dimensions of the image must be between 0 and 65535 (inclusive) or + /// an error will be returned. + pub fn encode( + mut self, + buf: &[u8], + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<()> { + // Validate dimensions. + let width = u16::try_from(width) + .map_err(|_| ImageError::from(EncoderError::WidthInvalid(width)))?; + + let height = u16::try_from(height) + .map_err(|_| ImageError::from(EncoderError::HeightInvalid(height)))?; + + // Write out TGA header. + let header = Header::from_pixel_info(color_type, width, height)?; + header.write_to(&mut self.writer)?; + + // Write out Bgr(a)8 or L(a)8 image data. + match color_type { + ColorType::Rgb8 | ColorType::Rgba8 => { + let mut image = Vec::from(buf); + + for chunk in image.chunks_mut(usize::from(color_type.bytes_per_pixel())) { + chunk.swap(0, 2); + } + + self.writer.write_all(&image)?; + } + _ => { + self.writer.write_all(buf)?; + } + } + + Ok(()) + } +} + +impl<W: Write> ImageEncoder for TgaEncoder<W> { + fn write_image( + self, + buf: &[u8], + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<()> { + self.encode(buf, width, height, color_type) + } +} + +#[cfg(test)] +mod tests { + use super::{EncoderError, TgaEncoder}; + use crate::{codecs::tga::TgaDecoder, ColorType, ImageDecoder, ImageError}; + use std::{error::Error, io::Cursor}; + + fn round_trip_image(image: &[u8], width: u32, height: u32, c: ColorType) -> Vec<u8> { + let mut encoded_data = Vec::new(); + { + let encoder = TgaEncoder::new(&mut encoded_data); + encoder + .encode(&image, width, height, c) + .expect("could not encode image"); + } + + let decoder = TgaDecoder::new(Cursor::new(&encoded_data)).expect("failed to decode"); + + let mut buf = vec![0; decoder.total_bytes() as usize]; + decoder.read_image(&mut buf).expect("failed to decode"); + buf + } + + #[test] + fn test_image_width_too_large() { + // TGA cannot encode images larger than 65,535×65,535 + // create a 65,536×1 8-bit black image buffer + let size = usize::from(u16::MAX) + 1; + let dimension = size as u32; + let img = vec![0u8; size]; + // Try to encode an image that is too large + let mut encoded = Vec::new(); + let encoder = TgaEncoder::new(&mut encoded); + let result = encoder.encode(&img, dimension, 1, ColorType::L8); + match result { + Err(ImageError::Encoding(err)) => { + let err = err + .source() + .unwrap() + .downcast_ref::<EncoderError>() + .unwrap(); + assert_eq!(*err, EncoderError::WidthInvalid(dimension)); + } + other => panic!( + "Encoding an image that is too wide should return a InvalidWidth \ + it returned {:?} instead", + other + ), + } + } + + #[test] + fn test_image_height_too_large() { + // TGA cannot encode images larger than 65,535×65,535 + // create a 65,536×1 8-bit black image buffer + let size = usize::from(u16::MAX) + 1; + let dimension = size as u32; + let img = vec![0u8; size]; + // Try to encode an image that is too large + let mut encoded = Vec::new(); + let encoder = TgaEncoder::new(&mut encoded); + let result = encoder.encode(&img, 1, dimension, ColorType::L8); + match result { + Err(ImageError::Encoding(err)) => { + let err = err + .source() + .unwrap() + .downcast_ref::<EncoderError>() + .unwrap(); + assert_eq!(*err, EncoderError::HeightInvalid(dimension)); + } + other => panic!( + "Encoding an image that is too tall should return a InvalidHeight \ + it returned {:?} instead", + other + ), + } + } + + #[test] + fn round_trip_single_pixel_rgb() { + let image = [0, 1, 2]; + let decoded = round_trip_image(&image, 1, 1, ColorType::Rgb8); + assert_eq!(decoded.len(), image.len()); + assert_eq!(decoded.as_slice(), image); + } + + #[test] + fn round_trip_single_pixel_rgba() { + let image = [0, 1, 2, 3]; + let decoded = round_trip_image(&image, 1, 1, ColorType::Rgba8); + assert_eq!(decoded.len(), image.len()); + assert_eq!(decoded.as_slice(), image); + } + + #[test] + fn round_trip_gray() { + let image = [0, 1, 2]; + let decoded = round_trip_image(&image, 3, 1, ColorType::L8); + assert_eq!(decoded.len(), image.len()); + assert_eq!(decoded.as_slice(), image); + } + + #[test] + fn round_trip_graya() { + let image = [0, 1, 2, 3, 4, 5]; + let decoded = round_trip_image(&image, 1, 3, ColorType::La8); + assert_eq!(decoded.len(), image.len()); + assert_eq!(decoded.as_slice(), image); + } + + #[test] + fn round_trip_3px_rgb() { + let image = [0; 3 * 3 * 3]; // 3x3 pixels, 3 bytes per pixel + let _decoded = round_trip_image(&image, 3, 3, ColorType::Rgb8); + } +} diff --git a/vendor/image/src/codecs/tga/header.rs b/vendor/image/src/codecs/tga/header.rs new file mode 100644 index 0000000..83ba7a3 --- /dev/null +++ b/vendor/image/src/codecs/tga/header.rs @@ -0,0 +1,150 @@ +use crate::{ + error::{UnsupportedError, UnsupportedErrorKind}, + ColorType, ImageError, ImageFormat, ImageResult, +}; +use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; +use std::io::{Read, Write}; + +pub(crate) const ALPHA_BIT_MASK: u8 = 0b1111; +pub(crate) const SCREEN_ORIGIN_BIT_MASK: u8 = 0b10_0000; + +pub(crate) enum ImageType { + NoImageData = 0, + /// Uncompressed images. + RawColorMap = 1, + RawTrueColor = 2, + RawGrayScale = 3, + /// Run length encoded images. + RunColorMap = 9, + RunTrueColor = 10, + RunGrayScale = 11, + Unknown, +} + +impl ImageType { + /// Create a new image type from a u8. + pub(crate) fn new(img_type: u8) -> ImageType { + match img_type { + 0 => ImageType::NoImageData, + + 1 => ImageType::RawColorMap, + 2 => ImageType::RawTrueColor, + 3 => ImageType::RawGrayScale, + + 9 => ImageType::RunColorMap, + 10 => ImageType::RunTrueColor, + 11 => ImageType::RunGrayScale, + + _ => ImageType::Unknown, + } + } + + /// Check if the image format uses colors as opposed to gray scale. + pub(crate) fn is_color(&self) -> bool { + matches! { *self, + ImageType::RawColorMap + | ImageType::RawTrueColor + | ImageType::RunTrueColor + | ImageType::RunColorMap + } + } + + /// Does the image use a color map. + pub(crate) fn is_color_mapped(&self) -> bool { + matches! { *self, ImageType::RawColorMap | ImageType::RunColorMap } + } + + /// Is the image run length encoded. + pub(crate) fn is_encoded(&self) -> bool { + matches! {*self, ImageType::RunColorMap | ImageType::RunTrueColor | ImageType::RunGrayScale } + } +} + +/// Header used by TGA image files. +#[derive(Debug, Default)] +pub(crate) struct Header { + pub(crate) id_length: u8, // length of ID string + pub(crate) map_type: u8, // color map type + pub(crate) image_type: u8, // image type code + pub(crate) map_origin: u16, // starting index of map + pub(crate) map_length: u16, // length of map + pub(crate) map_entry_size: u8, // size of map entries in bits + pub(crate) x_origin: u16, // x-origin of image + pub(crate) y_origin: u16, // y-origin of image + pub(crate) image_width: u16, // width of image + pub(crate) image_height: u16, // height of image + pub(crate) pixel_depth: u8, // bits per pixel + pub(crate) image_desc: u8, // image descriptor +} + +impl Header { + /// Load the header with values from pixel information. + pub(crate) fn from_pixel_info( + color_type: ColorType, + width: u16, + height: u16, + ) -> ImageResult<Self> { + let mut header = Self::default(); + + if width > 0 && height > 0 { + let (num_alpha_bits, other_channel_bits, image_type) = match color_type { + ColorType::Rgba8 => (8, 24, ImageType::RawTrueColor), + ColorType::Rgb8 => (0, 24, ImageType::RawTrueColor), + ColorType::La8 => (8, 8, ImageType::RawGrayScale), + ColorType::L8 => (0, 8, ImageType::RawGrayScale), + _ => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Tga.into(), + UnsupportedErrorKind::Color(color_type.into()), + ), + )) + } + }; + + header.image_type = image_type as u8; + header.image_width = width; + header.image_height = height; + header.pixel_depth = num_alpha_bits + other_channel_bits; + header.image_desc = num_alpha_bits & ALPHA_BIT_MASK; + header.image_desc |= SCREEN_ORIGIN_BIT_MASK; // Upper left origin. + } + + Ok(header) + } + + /// Load the header with values from the reader. + pub(crate) fn from_reader(r: &mut dyn Read) -> ImageResult<Self> { + Ok(Self { + id_length: r.read_u8()?, + map_type: r.read_u8()?, + image_type: r.read_u8()?, + map_origin: r.read_u16::<LittleEndian>()?, + map_length: r.read_u16::<LittleEndian>()?, + map_entry_size: r.read_u8()?, + x_origin: r.read_u16::<LittleEndian>()?, + y_origin: r.read_u16::<LittleEndian>()?, + image_width: r.read_u16::<LittleEndian>()?, + image_height: r.read_u16::<LittleEndian>()?, + pixel_depth: r.read_u8()?, + image_desc: r.read_u8()?, + }) + } + + /// Write out the header values. + pub(crate) fn write_to(&self, w: &mut dyn Write) -> ImageResult<()> { + w.write_u8(self.id_length)?; + w.write_u8(self.map_type)?; + w.write_u8(self.image_type)?; + w.write_u16::<LittleEndian>(self.map_origin)?; + w.write_u16::<LittleEndian>(self.map_length)?; + w.write_u8(self.map_entry_size)?; + w.write_u16::<LittleEndian>(self.x_origin)?; + w.write_u16::<LittleEndian>(self.y_origin)?; + w.write_u16::<LittleEndian>(self.image_width)?; + w.write_u16::<LittleEndian>(self.image_height)?; + w.write_u8(self.pixel_depth)?; + w.write_u8(self.image_desc)?; + Ok(()) + } +} diff --git a/vendor/image/src/codecs/tga/mod.rs b/vendor/image/src/codecs/tga/mod.rs new file mode 100644 index 0000000..fdc2f0c --- /dev/null +++ b/vendor/image/src/codecs/tga/mod.rs @@ -0,0 +1,17 @@ +//! Decoding of TGA Images +//! +//! # Related Links +//! <http://googlesites.inequation.org/tgautilities> + +/// A decoder for TGA images +/// +/// Currently this decoder does not support 8, 15 and 16 bit color images. +pub use self::decoder::TgaDecoder; + +//TODO add 8, 15, 16 bit color support + +pub use self::encoder::TgaEncoder; + +mod decoder; +mod encoder; +mod header; diff --git a/vendor/image/src/codecs/tiff.rs b/vendor/image/src/codecs/tiff.rs new file mode 100644 index 0000000..7c33412 --- /dev/null +++ b/vendor/image/src/codecs/tiff.rs @@ -0,0 +1,353 @@ +//! Decoding and Encoding of TIFF Images +//! +//! TIFF (Tagged Image File Format) is a versatile image format that supports +//! lossless and lossy compression. +//! +//! # Related Links +//! * <http://partners.adobe.com/public/developer/tiff/index.html> - The TIFF specification + +extern crate tiff; + +use std::convert::TryFrom; +use std::io::{self, Cursor, Read, Seek, Write}; +use std::marker::PhantomData; +use std::mem; + +use crate::color::{ColorType, ExtendedColorType}; +use crate::error::{ + DecodingError, EncodingError, ImageError, ImageResult, LimitError, LimitErrorKind, + ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind, +}; +use crate::image::{ImageDecoder, ImageEncoder, ImageFormat}; +use crate::utils; + +/// Decoder for TIFF images. +pub struct TiffDecoder<R> +where + R: Read + Seek, +{ + dimensions: (u32, u32), + color_type: ColorType, + + // We only use an Option here so we can call with_limits on the decoder without moving. + inner: Option<tiff::decoder::Decoder<R>>, +} + +impl<R> TiffDecoder<R> +where + R: Read + Seek, +{ + /// Create a new TiffDecoder. + pub fn new(r: R) -> Result<TiffDecoder<R>, ImageError> { + let mut inner = tiff::decoder::Decoder::new(r).map_err(ImageError::from_tiff_decode)?; + + let dimensions = inner.dimensions().map_err(ImageError::from_tiff_decode)?; + let color_type = inner.colortype().map_err(ImageError::from_tiff_decode)?; + match inner.find_tag_unsigned_vec::<u16>(tiff::tags::Tag::SampleFormat) { + Ok(Some(sample_formats)) => { + for format in sample_formats { + check_sample_format(format)?; + } + } + Ok(None) => { /* assume UInt format */ } + Err(other) => return Err(ImageError::from_tiff_decode(other)), + }; + + let color_type = match color_type { + tiff::ColorType::Gray(8) => ColorType::L8, + tiff::ColorType::Gray(16) => ColorType::L16, + tiff::ColorType::GrayA(8) => ColorType::La8, + tiff::ColorType::GrayA(16) => ColorType::La16, + tiff::ColorType::RGB(8) => ColorType::Rgb8, + tiff::ColorType::RGB(16) => ColorType::Rgb16, + tiff::ColorType::RGBA(8) => ColorType::Rgba8, + tiff::ColorType::RGBA(16) => ColorType::Rgba16, + + tiff::ColorType::Palette(n) | tiff::ColorType::Gray(n) => { + return Err(err_unknown_color_type(n)) + } + tiff::ColorType::GrayA(n) => return Err(err_unknown_color_type(n.saturating_mul(2))), + tiff::ColorType::RGB(n) => return Err(err_unknown_color_type(n.saturating_mul(3))), + tiff::ColorType::YCbCr(n) => return Err(err_unknown_color_type(n.saturating_mul(3))), + tiff::ColorType::RGBA(n) | tiff::ColorType::CMYK(n) => { + return Err(err_unknown_color_type(n.saturating_mul(4))) + } + }; + + Ok(TiffDecoder { + dimensions, + color_type, + inner: Some(inner), + }) + } +} + +fn check_sample_format(sample_format: u16) -> Result<(), ImageError> { + match tiff::tags::SampleFormat::from_u16(sample_format) { + Some(tiff::tags::SampleFormat::Uint) => Ok(()), + Some(other) => Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Tiff.into(), + UnsupportedErrorKind::GenericFeature(format!( + "Unhandled TIFF sample format {:?}", + other + )), + ), + )), + None => Err(ImageError::Decoding(DecodingError::from_format_hint( + ImageFormat::Tiff.into(), + ))), + } +} + +fn err_unknown_color_type(value: u8) -> ImageError { + ImageError::Unsupported(UnsupportedError::from_format_and_kind( + ImageFormat::Tiff.into(), + UnsupportedErrorKind::Color(ExtendedColorType::Unknown(value)), + )) +} + +impl ImageError { + fn from_tiff_decode(err: tiff::TiffError) -> ImageError { + match err { + tiff::TiffError::IoError(err) => ImageError::IoError(err), + err @ tiff::TiffError::FormatError(_) + | err @ tiff::TiffError::IntSizeError + | err @ tiff::TiffError::UsageError(_) => { + ImageError::Decoding(DecodingError::new(ImageFormat::Tiff.into(), err)) + } + tiff::TiffError::UnsupportedError(desc) => { + ImageError::Unsupported(UnsupportedError::from_format_and_kind( + ImageFormat::Tiff.into(), + UnsupportedErrorKind::GenericFeature(desc.to_string()), + )) + } + tiff::TiffError::LimitsExceeded => { + ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory)) + } + } + } + + fn from_tiff_encode(err: tiff::TiffError) -> ImageError { + match err { + tiff::TiffError::IoError(err) => ImageError::IoError(err), + err @ tiff::TiffError::FormatError(_) + | err @ tiff::TiffError::IntSizeError + | err @ tiff::TiffError::UsageError(_) => { + ImageError::Encoding(EncodingError::new(ImageFormat::Tiff.into(), err)) + } + tiff::TiffError::UnsupportedError(desc) => { + ImageError::Unsupported(UnsupportedError::from_format_and_kind( + ImageFormat::Tiff.into(), + UnsupportedErrorKind::GenericFeature(desc.to_string()), + )) + } + tiff::TiffError::LimitsExceeded => { + ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory)) + } + } + } +} + +/// Wrapper struct around a `Cursor<Vec<u8>>` +pub struct TiffReader<R>(Cursor<Vec<u8>>, PhantomData<R>); +impl<R> Read for TiffReader<R> { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { + if self.0.position() == 0 && buf.is_empty() { + mem::swap(buf, self.0.get_mut()); + Ok(buf.len()) + } else { + self.0.read_to_end(buf) + } + } +} + +impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for TiffDecoder<R> { + type Reader = TiffReader<R>; + + fn dimensions(&self) -> (u32, u32) { + self.dimensions + } + + fn color_type(&self) -> ColorType { + self.color_type + } + + fn icc_profile(&mut self) -> Option<Vec<u8>> { + if let Some(decoder) = &mut self.inner { + decoder.get_tag_u8_vec(tiff::tags::Tag::Unknown(34675)).ok() + } else { + None + } + } + + fn set_limits(&mut self, limits: crate::io::Limits) -> ImageResult<()> { + limits.check_support(&crate::io::LimitSupport::default())?; + + let (width, height) = self.dimensions(); + limits.check_dimensions(width, height)?; + + let max_alloc = limits.max_alloc.unwrap_or(u64::MAX); + let max_intermediate_alloc = max_alloc.saturating_sub(self.total_bytes()); + + let mut tiff_limits: tiff::decoder::Limits = Default::default(); + tiff_limits.decoding_buffer_size = + usize::try_from(max_alloc - max_intermediate_alloc).unwrap_or(usize::MAX); + tiff_limits.intermediate_buffer_size = + usize::try_from(max_intermediate_alloc).unwrap_or(usize::MAX); + tiff_limits.ifd_value_size = tiff_limits.intermediate_buffer_size; + self.inner = Some(self.inner.take().unwrap().with_limits(tiff_limits)); + + Ok(()) + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + let buf = match self + .inner + .unwrap() + .read_image() + .map_err(ImageError::from_tiff_decode)? + { + tiff::decoder::DecodingResult::U8(v) => v, + tiff::decoder::DecodingResult::U16(v) => utils::vec_copy_to_u8(&v), + tiff::decoder::DecodingResult::U32(v) => utils::vec_copy_to_u8(&v), + tiff::decoder::DecodingResult::U64(v) => utils::vec_copy_to_u8(&v), + tiff::decoder::DecodingResult::I8(v) => utils::vec_copy_to_u8(&v), + tiff::decoder::DecodingResult::I16(v) => utils::vec_copy_to_u8(&v), + tiff::decoder::DecodingResult::I32(v) => utils::vec_copy_to_u8(&v), + tiff::decoder::DecodingResult::I64(v) => utils::vec_copy_to_u8(&v), + tiff::decoder::DecodingResult::F32(v) => utils::vec_copy_to_u8(&v), + tiff::decoder::DecodingResult::F64(v) => utils::vec_copy_to_u8(&v), + }; + + Ok(TiffReader(Cursor::new(buf), PhantomData)) + } + + fn read_image(self, buf: &mut [u8]) -> ImageResult<()> { + assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); + match self + .inner + .unwrap() + .read_image() + .map_err(ImageError::from_tiff_decode)? + { + tiff::decoder::DecodingResult::U8(v) => { + buf.copy_from_slice(&v); + } + tiff::decoder::DecodingResult::U16(v) => { + buf.copy_from_slice(bytemuck::cast_slice(&v)); + } + tiff::decoder::DecodingResult::U32(v) => { + buf.copy_from_slice(bytemuck::cast_slice(&v)); + } + tiff::decoder::DecodingResult::U64(v) => { + buf.copy_from_slice(bytemuck::cast_slice(&v)); + } + tiff::decoder::DecodingResult::I8(v) => { + buf.copy_from_slice(bytemuck::cast_slice(&v)); + } + tiff::decoder::DecodingResult::I16(v) => { + buf.copy_from_slice(bytemuck::cast_slice(&v)); + } + tiff::decoder::DecodingResult::I32(v) => { + buf.copy_from_slice(bytemuck::cast_slice(&v)); + } + tiff::decoder::DecodingResult::I64(v) => { + buf.copy_from_slice(bytemuck::cast_slice(&v)); + } + tiff::decoder::DecodingResult::F32(v) => { + buf.copy_from_slice(bytemuck::cast_slice(&v)); + } + tiff::decoder::DecodingResult::F64(v) => { + buf.copy_from_slice(bytemuck::cast_slice(&v)); + } + } + Ok(()) + } +} + +/// Encoder for tiff images +pub struct TiffEncoder<W> { + w: W, +} + +// Utility to simplify and deduplicate error handling during 16-bit encoding. +fn u8_slice_as_u16(buf: &[u8]) -> ImageResult<&[u16]> { + bytemuck::try_cast_slice(buf).map_err(|err| { + // If the buffer is not aligned or the correct length for a u16 slice, err. + // + // `bytemuck::PodCastError` of bytemuck-1.2.0 does not implement + // `Error` and `Display` trait. + // See <https://github.com/Lokathor/bytemuck/issues/22>. + ImageError::Parameter(ParameterError::from_kind(ParameterErrorKind::Generic( + format!("{:?}", err), + ))) + }) +} + +impl<W: Write + Seek> TiffEncoder<W> { + /// Create a new encoder that writes its output to `w` + pub fn new(w: W) -> TiffEncoder<W> { + TiffEncoder { w } + } + + /// Encodes the image `image` that has dimensions `width` and `height` and `ColorType` `c`. + /// + /// 16-bit types assume the buffer is native endian. + pub fn encode(self, data: &[u8], width: u32, height: u32, color: ColorType) -> ImageResult<()> { + let mut encoder = + tiff::encoder::TiffEncoder::new(self.w).map_err(ImageError::from_tiff_encode)?; + match color { + ColorType::L8 => { + encoder.write_image::<tiff::encoder::colortype::Gray8>(width, height, data) + } + ColorType::Rgb8 => { + encoder.write_image::<tiff::encoder::colortype::RGB8>(width, height, data) + } + ColorType::Rgba8 => { + encoder.write_image::<tiff::encoder::colortype::RGBA8>(width, height, data) + } + ColorType::L16 => encoder.write_image::<tiff::encoder::colortype::Gray16>( + width, + height, + u8_slice_as_u16(data)?, + ), + ColorType::Rgb16 => encoder.write_image::<tiff::encoder::colortype::RGB16>( + width, + height, + u8_slice_as_u16(data)?, + ), + ColorType::Rgba16 => encoder.write_image::<tiff::encoder::colortype::RGBA16>( + width, + height, + u8_slice_as_u16(data)?, + ), + _ => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::Tiff.into(), + UnsupportedErrorKind::Color(color.into()), + ), + )) + } + } + .map_err(ImageError::from_tiff_encode)?; + + Ok(()) + } +} + +impl<W: Write + Seek> ImageEncoder for TiffEncoder<W> { + fn write_image( + self, + buf: &[u8], + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<()> { + self.encode(buf, width, height, color_type) + } +} diff --git a/vendor/image/src/codecs/webp/decoder.rs b/vendor/image/src/codecs/webp/decoder.rs new file mode 100644 index 0000000..9120290 --- /dev/null +++ b/vendor/image/src/codecs/webp/decoder.rs @@ -0,0 +1,399 @@ +use byteorder::{LittleEndian, ReadBytesExt}; +use std::convert::TryFrom; +use std::io::{self, Cursor, Error, Read}; +use std::marker::PhantomData; +use std::{error, fmt, mem}; + +use crate::error::{DecodingError, ImageError, ImageResult, ParameterError, ParameterErrorKind}; +use crate::image::{ImageDecoder, ImageFormat}; +use crate::{color, AnimationDecoder, Frames, Rgba}; + +use super::lossless::{LosslessDecoder, LosslessFrame}; +use super::vp8::{Frame as VP8Frame, Vp8Decoder}; + +use super::extended::{read_extended_header, ExtendedImage}; + +/// All errors that can occur when attempting to parse a WEBP container +#[derive(Debug, Clone, Copy)] +pub(crate) enum DecoderError { + /// RIFF's "RIFF" signature not found or invalid + RiffSignatureInvalid([u8; 4]), + /// WebP's "WEBP" signature not found or invalid + WebpSignatureInvalid([u8; 4]), + /// Chunk Header was incorrect or invalid in its usage + ChunkHeaderInvalid([u8; 4]), +} + +impl fmt::Display for DecoderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + struct SignatureWriter([u8; 4]); + impl fmt::Display for SignatureWriter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "[{:#04X?}, {:#04X?}, {:#04X?}, {:#04X?}]", + self.0[0], self.0[1], self.0[2], self.0[3] + ) + } + } + + match self { + DecoderError::RiffSignatureInvalid(riff) => f.write_fmt(format_args!( + "Invalid RIFF signature: {}", + SignatureWriter(*riff) + )), + DecoderError::WebpSignatureInvalid(webp) => f.write_fmt(format_args!( + "Invalid WebP signature: {}", + SignatureWriter(*webp) + )), + DecoderError::ChunkHeaderInvalid(header) => f.write_fmt(format_args!( + "Invalid Chunk header: {}", + SignatureWriter(*header) + )), + } + } +} + +impl From<DecoderError> for ImageError { + fn from(e: DecoderError) -> ImageError { + ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e)) + } +} + +impl error::Error for DecoderError {} + +/// All possible RIFF chunks in a WebP image file +#[allow(clippy::upper_case_acronyms)] +#[derive(Debug, Clone, Copy, PartialEq)] +pub(crate) enum WebPRiffChunk { + RIFF, + WEBP, + VP8, + VP8L, + VP8X, + ANIM, + ANMF, + ALPH, + ICCP, + EXIF, + XMP, +} + +impl WebPRiffChunk { + pub(crate) fn from_fourcc(chunk_fourcc: [u8; 4]) -> ImageResult<Self> { + match &chunk_fourcc { + b"RIFF" => Ok(Self::RIFF), + b"WEBP" => Ok(Self::WEBP), + b"VP8 " => Ok(Self::VP8), + b"VP8L" => Ok(Self::VP8L), + b"VP8X" => Ok(Self::VP8X), + b"ANIM" => Ok(Self::ANIM), + b"ANMF" => Ok(Self::ANMF), + b"ALPH" => Ok(Self::ALPH), + b"ICCP" => Ok(Self::ICCP), + b"EXIF" => Ok(Self::EXIF), + b"XMP " => Ok(Self::XMP), + _ => Err(DecoderError::ChunkHeaderInvalid(chunk_fourcc).into()), + } + } + + pub(crate) fn to_fourcc(&self) -> [u8; 4] { + match self { + Self::RIFF => *b"RIFF", + Self::WEBP => *b"WEBP", + Self::VP8 => *b"VP8 ", + Self::VP8L => *b"VP8L", + Self::VP8X => *b"VP8X", + Self::ANIM => *b"ANIM", + Self::ANMF => *b"ANMF", + Self::ALPH => *b"ALPH", + Self::ICCP => *b"ICCP", + Self::EXIF => *b"EXIF", + Self::XMP => *b"XMP ", + } + } +} + +enum WebPImage { + Lossy(VP8Frame), + Lossless(LosslessFrame), + Extended(ExtendedImage), +} + +/// WebP Image format decoder. Currently only supports lossy RGB images or lossless RGBA images. +pub struct WebPDecoder<R> { + r: R, + image: WebPImage, +} + +impl<R: Read> WebPDecoder<R> { + /// Create a new WebPDecoder from the Reader ```r```. + /// This function takes ownership of the Reader. + pub fn new(r: R) -> ImageResult<WebPDecoder<R>> { + let image = WebPImage::Lossy(Default::default()); + + let mut decoder = WebPDecoder { r, image }; + decoder.read_data()?; + Ok(decoder) + } + + //reads the 12 bytes of the WebP file header + fn read_riff_header(&mut self) -> ImageResult<u32> { + let mut riff = [0; 4]; + self.r.read_exact(&mut riff)?; + if &riff != b"RIFF" { + return Err(DecoderError::RiffSignatureInvalid(riff).into()); + } + + let size = self.r.read_u32::<LittleEndian>()?; + + let mut webp = [0; 4]; + self.r.read_exact(&mut webp)?; + if &webp != b"WEBP" { + return Err(DecoderError::WebpSignatureInvalid(webp).into()); + } + + Ok(size) + } + + //reads the chunk header, decodes the frame and returns the inner decoder + fn read_frame(&mut self) -> ImageResult<WebPImage> { + let chunk = read_chunk(&mut self.r)?; + + match chunk { + Some((cursor, WebPRiffChunk::VP8)) => { + let mut vp8_decoder = Vp8Decoder::new(cursor); + let frame = vp8_decoder.decode_frame()?; + + Ok(WebPImage::Lossy(frame.clone())) + } + Some((cursor, WebPRiffChunk::VP8L)) => { + let mut lossless_decoder = LosslessDecoder::new(cursor); + let frame = lossless_decoder.decode_frame()?; + + Ok(WebPImage::Lossless(frame.clone())) + } + Some((mut cursor, WebPRiffChunk::VP8X)) => { + let info = read_extended_header(&mut cursor)?; + + let image = ExtendedImage::read_extended_chunks(&mut self.r, info)?; + + Ok(WebPImage::Extended(image)) + } + None => Err(ImageError::IoError(Error::from( + io::ErrorKind::UnexpectedEof, + ))), + Some((_, chunk)) => Err(DecoderError::ChunkHeaderInvalid(chunk.to_fourcc()).into()), + } + } + + fn read_data(&mut self) -> ImageResult<()> { + let _size = self.read_riff_header()?; + + let image = self.read_frame()?; + + self.image = image; + + Ok(()) + } + + /// Returns true if the image as described by the bitstream is animated. + pub fn has_animation(&self) -> bool { + match &self.image { + WebPImage::Lossy(_) => false, + WebPImage::Lossless(_) => false, + WebPImage::Extended(extended) => extended.has_animation(), + } + } + + /// Sets the background color if the image is an extended and animated webp. + pub fn set_background_color(&mut self, color: Rgba<u8>) -> ImageResult<()> { + match &mut self.image { + WebPImage::Extended(image) => image.set_background_color(color), + _ => Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic( + "Background color can only be set on animated webp".to_owned(), + ), + ))), + } + } +} + +pub(crate) fn read_len_cursor<R>(r: &mut R) -> ImageResult<Cursor<Vec<u8>>> +where + R: Read, +{ + let unpadded_len = u64::from(r.read_u32::<LittleEndian>()?); + + // RIFF chunks containing an uneven number of bytes append + // an extra 0x00 at the end of the chunk + // + // The addition cannot overflow since we have a u64 that was created from a u32 + let len = unpadded_len + (unpadded_len % 2); + + let mut framedata = Vec::new(); + r.by_ref().take(len).read_to_end(&mut framedata)?; + + //remove padding byte + if unpadded_len % 2 == 1 { + framedata.pop(); + } + + Ok(io::Cursor::new(framedata)) +} + +/// Reads a chunk header FourCC +/// Returns None if and only if we hit end of file reading the four character code of the chunk +/// The inner error is `Err` if and only if the chunk header FourCC is present but unknown +pub(crate) fn read_fourcc<R: Read>(r: &mut R) -> ImageResult<Option<ImageResult<WebPRiffChunk>>> { + let mut chunk_fourcc = [0; 4]; + let result = r.read_exact(&mut chunk_fourcc); + + match result { + Ok(()) => {} + Err(err) => { + if err.kind() == io::ErrorKind::UnexpectedEof { + return Ok(None); + } else { + return Err(err.into()); + } + } + } + + let chunk = WebPRiffChunk::from_fourcc(chunk_fourcc); + Ok(Some(chunk)) +} + +/// Reads a chunk +/// Returns an error if the chunk header is not a valid webp header or some other reading error +/// Returns None if and only if we hit end of file reading the four character code of the chunk +pub(crate) fn read_chunk<R>(r: &mut R) -> ImageResult<Option<(Cursor<Vec<u8>>, WebPRiffChunk)>> +where + R: Read, +{ + if let Some(chunk) = read_fourcc(r)? { + let chunk = chunk?; + let cursor = read_len_cursor(r)?; + Ok(Some((cursor, chunk))) + } else { + Ok(None) + } +} + +/// Wrapper struct around a `Cursor<Vec<u8>>` +pub struct WebpReader<R>(Cursor<Vec<u8>>, PhantomData<R>); +impl<R> Read for WebpReader<R> { + fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { + self.0.read(buf) + } + fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> { + if self.0.position() == 0 && buf.is_empty() { + mem::swap(buf, self.0.get_mut()); + Ok(buf.len()) + } else { + self.0.read_to_end(buf) + } + } +} + +impl<'a, R: 'a + Read> ImageDecoder<'a> for WebPDecoder<R> { + type Reader = WebpReader<R>; + + fn dimensions(&self) -> (u32, u32) { + match &self.image { + WebPImage::Lossy(vp8_frame) => { + (u32::from(vp8_frame.width), u32::from(vp8_frame.height)) + } + WebPImage::Lossless(lossless_frame) => ( + u32::from(lossless_frame.width), + u32::from(lossless_frame.height), + ), + WebPImage::Extended(extended) => extended.dimensions(), + } + } + + fn color_type(&self) -> color::ColorType { + match &self.image { + WebPImage::Lossy(_) => color::ColorType::Rgb8, + WebPImage::Lossless(_) => color::ColorType::Rgba8, + WebPImage::Extended(extended) => extended.color_type(), + } + } + + fn into_reader(self) -> ImageResult<Self::Reader> { + match &self.image { + WebPImage::Lossy(vp8_frame) => { + let mut data = vec![0; vp8_frame.get_buf_size()]; + vp8_frame.fill_rgb(data.as_mut_slice()); + Ok(WebpReader(Cursor::new(data), PhantomData)) + } + WebPImage::Lossless(lossless_frame) => { + let mut data = vec![0; lossless_frame.get_buf_size()]; + lossless_frame.fill_rgba(data.as_mut_slice()); + Ok(WebpReader(Cursor::new(data), PhantomData)) + } + WebPImage::Extended(extended) => { + let mut data = vec![0; extended.get_buf_size()]; + extended.fill_buf(data.as_mut_slice()); + Ok(WebpReader(Cursor::new(data), PhantomData)) + } + } + } + + fn read_image(self, buf: &mut [u8]) -> ImageResult<()> { + assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes())); + + match &self.image { + WebPImage::Lossy(vp8_frame) => { + vp8_frame.fill_rgb(buf); + } + WebPImage::Lossless(lossless_frame) => { + lossless_frame.fill_rgba(buf); + } + WebPImage::Extended(extended) => { + extended.fill_buf(buf); + } + } + Ok(()) + } + + fn icc_profile(&mut self) -> Option<Vec<u8>> { + if let WebPImage::Extended(extended) = &self.image { + extended.icc_profile() + } else { + None + } + } +} + +impl<'a, R: 'a + Read> AnimationDecoder<'a> for WebPDecoder<R> { + fn into_frames(self) -> Frames<'a> { + match self.image { + WebPImage::Lossy(_) | WebPImage::Lossless(_) => { + Frames::new(Box::new(std::iter::empty())) + } + WebPImage::Extended(extended_image) => extended_image.into_frames(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn add_with_overflow_size() { + let bytes = vec![ + 0x52, 0x49, 0x46, 0x46, 0xaf, 0x37, 0x80, 0x47, 0x57, 0x45, 0x42, 0x50, 0x6c, 0x64, + 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xfb, 0x7e, 0x73, 0x00, 0x06, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x40, 0xfb, 0xff, 0xff, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x00, 0x00, 0x00, 0x00, 0x62, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, + 0x49, 0x54, 0x55, 0x50, 0x4c, 0x54, 0x59, 0x50, 0x45, 0x33, 0x37, 0x44, 0x4d, 0x46, + ]; + + let data = std::io::Cursor::new(bytes); + + let _ = WebPDecoder::new(data); + } +} diff --git a/vendor/image/src/codecs/webp/encoder.rs b/vendor/image/src/codecs/webp/encoder.rs new file mode 100644 index 0000000..0383046 --- /dev/null +++ b/vendor/image/src/codecs/webp/encoder.rs @@ -0,0 +1,242 @@ +//! Encoding of WebP images. +/// +/// Uses the simple encoding API from the [libwebp] library. +/// +/// [libwebp]: https://developers.google.com/speed/webp/docs/api#simple_encoding_api +use std::io::Write; + +use libwebp::{Encoder, PixelLayout, WebPMemory}; + +use crate::error::{ + EncodingError, ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind, +}; +use crate::flat::SampleLayout; +use crate::{ColorType, ImageEncoder, ImageError, ImageFormat, ImageResult}; + +/// WebP Encoder. +pub struct WebPEncoder<W> { + inner: W, + quality: WebPQuality, +} + +/// WebP encoder quality. +#[derive(Debug, Copy, Clone)] +pub struct WebPQuality(Quality); + +#[derive(Debug, Copy, Clone)] +enum Quality { + Lossless, + Lossy(u8), +} + +impl WebPQuality { + /// Minimum lossy quality value (0). + pub const MIN: u8 = 0; + /// Maximum lossy quality value (100). + pub const MAX: u8 = 100; + /// Default lossy quality (80), providing a balance of quality and file size. + pub const DEFAULT: u8 = 80; + + /// Lossless encoding. + pub fn lossless() -> Self { + Self(Quality::Lossless) + } + + /// Lossy encoding. 0 = low quality, small size; 100 = high quality, large size. + /// + /// Values are clamped from 0 to 100. + pub fn lossy(quality: u8) -> Self { + Self(Quality::Lossy(quality.clamp(Self::MIN, Self::MAX))) + } +} + +impl Default for WebPQuality { + fn default() -> Self { + Self::lossy(WebPQuality::DEFAULT) + } +} + +impl<W: Write> WebPEncoder<W> { + /// Create a new encoder that writes its output to `w`. + /// + /// Defaults to lossy encoding, see [`WebPQuality::DEFAULT`]. + pub fn new(w: W) -> Self { + WebPEncoder::new_with_quality(w, WebPQuality::default()) + } + + /// Create a new encoder with the specified quality, that writes its output to `w`. + pub fn new_with_quality(w: W, quality: WebPQuality) -> Self { + Self { inner: w, quality } + } + + /// Encode image data with the indicated color type. + /// + /// The encoder requires image data be Rgb8 or Rgba8. + pub fn encode( + mut self, + data: &[u8], + width: u32, + height: u32, + color: ColorType, + ) -> ImageResult<()> { + // TODO: convert color types internally? + let layout = match color { + ColorType::Rgb8 => PixelLayout::Rgb, + ColorType::Rgba8 => PixelLayout::Rgba, + _ => { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::WebP.into(), + UnsupportedErrorKind::Color(color.into()), + ), + )) + } + }; + + // Validate dimensions upfront to avoid panics. + if width == 0 + || height == 0 + || !SampleLayout::row_major_packed(color.channel_count(), width, height) + .fits(data.len()) + { + return Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::DimensionMismatch, + ))); + } + + // Call the native libwebp library to encode the image. + let encoder = Encoder::new(data, layout, width, height); + let encoded: WebPMemory = match self.quality.0 { + Quality::Lossless => encoder.encode_lossless(), + Quality::Lossy(quality) => encoder.encode(quality as f32), + }; + + // The simple encoding API in libwebp does not return errors. + if encoded.is_empty() { + return Err(ImageError::Encoding(EncodingError::new( + ImageFormat::WebP.into(), + "encoding failed, output empty", + ))); + } + + self.inner.write_all(&encoded)?; + Ok(()) + } +} + +impl<W: Write> ImageEncoder for WebPEncoder<W> { + fn write_image( + self, + buf: &[u8], + width: u32, + height: u32, + color_type: ColorType, + ) -> ImageResult<()> { + self.encode(buf, width, height, color_type) + } +} + +#[cfg(test)] +mod tests { + use crate::codecs::webp::{WebPEncoder, WebPQuality}; + use crate::{ColorType, ImageEncoder}; + + #[test] + fn webp_lossless_deterministic() { + // 1x1 8-bit image buffer containing a single red pixel. + let rgb: &[u8] = &[255, 0, 0]; + let rgba: &[u8] = &[255, 0, 0, 128]; + for (color, img, expected) in [ + ( + ColorType::Rgb8, + rgb, + [ + 82, 73, 70, 70, 28, 0, 0, 0, 87, 69, 66, 80, 86, 80, 56, 76, 15, 0, 0, 0, 47, + 0, 0, 0, 0, 7, 16, 253, 143, 254, 7, 34, 162, 255, 1, 0, + ], + ), + ( + ColorType::Rgba8, + rgba, + [ + 82, 73, 70, 70, 28, 0, 0, 0, 87, 69, 66, 80, 86, 80, 56, 76, 15, 0, 0, 0, 47, + 0, 0, 0, 16, 7, 16, 253, 143, 2, 6, 34, 162, 255, 1, 0, + ], + ), + ] { + // Encode it into a memory buffer. + let mut encoded_img = Vec::new(); + { + let encoder = + WebPEncoder::new_with_quality(&mut encoded_img, WebPQuality::lossless()); + encoder + .write_image(&img, 1, 1, color) + .expect("image encoding failed"); + } + + // WebP encoding should be deterministic. + assert_eq!(encoded_img, expected); + } + } + + #[derive(Debug, Clone)] + struct MockImage { + width: u32, + height: u32, + color: ColorType, + data: Vec<u8>, + } + + impl quickcheck::Arbitrary for MockImage { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + // Limit to small, non-empty images <= 512x512. + let width = u32::arbitrary(g) % 512 + 1; + let height = u32::arbitrary(g) % 512 + 1; + let (color, stride) = if bool::arbitrary(g) { + (ColorType::Rgb8, 3) + } else { + (ColorType::Rgba8, 4) + }; + let size = width * height * stride; + let data: Vec<u8> = (0..size).map(|_| u8::arbitrary(g)).collect(); + MockImage { + width, + height, + color, + data, + } + } + } + + quickcheck! { + fn fuzz_webp_valid_image(image: MockImage, quality: u8) -> bool { + // Check valid images do not panic. + let mut buffer = Vec::<u8>::new(); + for webp_quality in [WebPQuality::lossless(), WebPQuality::lossy(quality)] { + buffer.clear(); + let encoder = WebPEncoder::new_with_quality(&mut buffer, webp_quality); + if !encoder + .write_image(&image.data, image.width, image.height, image.color) + .is_ok() { + return false; + } + } + true + } + + fn fuzz_webp_no_panic(data: Vec<u8>, width: u8, height: u8, quality: u8) -> bool { + // Check random (usually invalid) parameters do not panic. + let mut buffer = Vec::<u8>::new(); + for color in [ColorType::Rgb8, ColorType::Rgba8] { + for webp_quality in [WebPQuality::lossless(), WebPQuality::lossy(quality)] { + buffer.clear(); + let encoder = WebPEncoder::new_with_quality(&mut buffer, webp_quality); + // Ignore errors. + let _ = encoder + .write_image(&data, width as u32, height as u32, color); + } + } + true + } + } +} diff --git a/vendor/image/src/codecs/webp/extended.rs b/vendor/image/src/codecs/webp/extended.rs new file mode 100644 index 0000000..3dc6b34 --- /dev/null +++ b/vendor/image/src/codecs/webp/extended.rs @@ -0,0 +1,839 @@ +use std::convert::TryInto; +use std::io::{self, Cursor, Error, Read}; +use std::{error, fmt}; + +use super::decoder::{ + read_chunk, read_fourcc, read_len_cursor, DecoderError::ChunkHeaderInvalid, WebPRiffChunk, +}; +use super::lossless::{LosslessDecoder, LosslessFrame}; +use super::vp8::{Frame as VP8Frame, Vp8Decoder}; +use crate::error::{DecodingError, ParameterError, ParameterErrorKind}; +use crate::image::ImageFormat; +use crate::{ + ColorType, Delay, Frame, Frames, ImageError, ImageResult, Rgb, RgbImage, Rgba, RgbaImage, +}; +use byteorder::{LittleEndian, ReadBytesExt}; + +//all errors that can occur while parsing extended chunks in a WebP file +#[derive(Debug, Clone, Copy)] +enum DecoderError { + // Some bits were invalid + InfoBitsInvalid { name: &'static str, value: u32 }, + // Alpha chunk doesn't match the frame's size + AlphaChunkSizeMismatch, + // Image is too large, either for the platform's pointer size or generally + ImageTooLarge, + // Frame would go out of the canvas + FrameOutsideImage, +} + +impl fmt::Display for DecoderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DecoderError::InfoBitsInvalid { name, value } => f.write_fmt(format_args!( + "Info bits `{}` invalid, received value: {}", + name, value + )), + DecoderError::AlphaChunkSizeMismatch => { + f.write_str("Alpha chunk doesn't match the size of the frame") + } + DecoderError::ImageTooLarge => f.write_str("Image is too large to be decoded"), + DecoderError::FrameOutsideImage => { + f.write_str("Frame is too large and would go outside the image") + } + } + } +} + +impl From<DecoderError> for ImageError { + fn from(e: DecoderError) -> ImageError { + ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e)) + } +} + +impl error::Error for DecoderError {} + +#[derive(Debug, Clone)] +pub(crate) struct WebPExtendedInfo { + _icc_profile: bool, + _alpha: bool, + _exif_metadata: bool, + _xmp_metadata: bool, + _animation: bool, + canvas_width: u32, + canvas_height: u32, + icc_profile: Option<Vec<u8>>, +} + +#[derive(Debug)] +enum ExtendedImageData { + Animation { + frames: Vec<AnimatedFrame>, + anim_info: WebPAnimatedInfo, + }, + Static(WebPStatic), +} + +#[derive(Debug)] +pub(crate) struct ExtendedImage { + info: WebPExtendedInfo, + image: ExtendedImageData, +} + +impl ExtendedImage { + pub(crate) fn dimensions(&self) -> (u32, u32) { + (self.info.canvas_width, self.info.canvas_height) + } + + pub(crate) fn has_animation(&self) -> bool { + self.info._animation + } + + pub(crate) fn icc_profile(&self) -> Option<Vec<u8>> { + self.info.icc_profile.clone() + } + + pub(crate) fn color_type(&self) -> ColorType { + match &self.image { + ExtendedImageData::Animation { frames, .. } => &frames[0].image, + ExtendedImageData::Static(image) => image, + } + .color_type() + } + + pub(crate) fn into_frames<'a>(self) -> Frames<'a> { + struct FrameIterator { + image: ExtendedImage, + index: usize, + canvas: RgbaImage, + } + + impl Iterator for FrameIterator { + type Item = ImageResult<Frame>; + + fn next(&mut self) -> Option<Self::Item> { + if let ExtendedImageData::Animation { frames, anim_info } = &self.image.image { + let frame = frames.get(self.index); + match frame { + Some(anim_image) => { + self.index += 1; + ExtendedImage::draw_subimage( + &mut self.canvas, + anim_image, + anim_info.background_color, + ) + } + None => None, + } + } else { + None + } + } + } + + let width = self.info.canvas_width; + let height = self.info.canvas_height; + let background_color = + if let ExtendedImageData::Animation { ref anim_info, .. } = self.image { + anim_info.background_color + } else { + Rgba([0, 0, 0, 0]) + }; + + let frame_iter = FrameIterator { + image: self, + index: 0, + canvas: RgbaImage::from_pixel(width, height, background_color), + }; + + Frames::new(Box::new(frame_iter)) + } + + pub(crate) fn read_extended_chunks<R: Read>( + reader: &mut R, + mut info: WebPExtendedInfo, + ) -> ImageResult<ExtendedImage> { + let mut anim_info: Option<WebPAnimatedInfo> = None; + let mut anim_frames: Vec<AnimatedFrame> = Vec::new(); + let mut static_frame: Option<WebPStatic> = None; + //go until end of file and while chunk headers are valid + while let Some((mut cursor, chunk)) = read_extended_chunk(reader)? { + match chunk { + WebPRiffChunk::EXIF | WebPRiffChunk::XMP => { + //ignore these chunks + } + WebPRiffChunk::ANIM => { + if anim_info.is_none() { + anim_info = Some(Self::read_anim_info(&mut cursor)?); + } + } + WebPRiffChunk::ANMF => { + let frame = read_anim_frame(cursor, info.canvas_width, info.canvas_height)?; + anim_frames.push(frame); + } + WebPRiffChunk::ALPH => { + if static_frame.is_none() { + let alpha_chunk = + read_alpha_chunk(&mut cursor, info.canvas_width, info.canvas_height)?; + + let vp8_frame = read_lossy_with_chunk(reader)?; + + let img = WebPStatic::from_alpha_lossy(alpha_chunk, vp8_frame)?; + + static_frame = Some(img); + } + } + WebPRiffChunk::ICCP => { + let mut icc_profile = Vec::new(); + cursor.read_to_end(&mut icc_profile)?; + info.icc_profile = Some(icc_profile); + } + WebPRiffChunk::VP8 => { + if static_frame.is_none() { + let vp8_frame = read_lossy(cursor)?; + + let img = WebPStatic::from_lossy(vp8_frame)?; + + static_frame = Some(img); + } + } + WebPRiffChunk::VP8L => { + if static_frame.is_none() { + let mut lossless_decoder = LosslessDecoder::new(cursor); + let frame = lossless_decoder.decode_frame()?; + let image = WebPStatic::Lossless(frame.clone()); + + static_frame = Some(image); + } + } + _ => return Err(ChunkHeaderInvalid(chunk.to_fourcc()).into()), + } + } + + let image = if let Some(info) = anim_info { + if anim_frames.is_empty() { + return Err(ImageError::IoError(Error::from( + io::ErrorKind::UnexpectedEof, + ))); + } + ExtendedImageData::Animation { + frames: anim_frames, + anim_info: info, + } + } else if let Some(frame) = static_frame { + ExtendedImageData::Static(frame) + } else { + //reached end of file too early before image data was reached + return Err(ImageError::IoError(Error::from( + io::ErrorKind::UnexpectedEof, + ))); + }; + + let image = ExtendedImage { image, info }; + + Ok(image) + } + + fn read_anim_info<R: Read>(reader: &mut R) -> ImageResult<WebPAnimatedInfo> { + let mut colors: [u8; 4] = [0; 4]; + reader.read_exact(&mut colors)?; + + //background color is [blue, green, red, alpha] + let background_color = Rgba([colors[2], colors[1], colors[0], colors[3]]); + + let loop_count = reader.read_u16::<LittleEndian>()?; + + let info = WebPAnimatedInfo { + background_color, + _loop_count: loop_count, + }; + + Ok(info) + } + + fn draw_subimage( + canvas: &mut RgbaImage, + anim_image: &AnimatedFrame, + background_color: Rgba<u8>, + ) -> Option<ImageResult<Frame>> { + let mut buffer = vec![0; anim_image.image.get_buf_size()]; + anim_image.image.fill_buf(&mut buffer); + let has_alpha = anim_image.image.has_alpha(); + let pixel_len: u32 = anim_image.image.color_type().bytes_per_pixel().into(); + + 'x: for x in 0..anim_image.width { + for y in 0..anim_image.height { + let canvas_index: (u32, u32) = (x + anim_image.offset_x, y + anim_image.offset_y); + // Negative offsets are not possible due to unsigned ints + // If we go out of bounds by height, still continue by x + if canvas_index.1 >= canvas.height() { + continue 'x; + } + // If we go out of bounds by width, it doesn't make sense to continue at all + if canvas_index.0 >= canvas.width() { + break 'x; + } + let index: usize = ((y * anim_image.width + x) * pixel_len).try_into().unwrap(); + canvas[canvas_index] = if anim_image.use_alpha_blending && has_alpha { + let buffer: [u8; 4] = buffer[index..][..4].try_into().unwrap(); + ExtendedImage::do_alpha_blending(buffer, canvas[canvas_index]) + } else { + Rgba([ + buffer[index], + buffer[index + 1], + buffer[index + 2], + if has_alpha { buffer[index + 3] } else { 255 }, + ]) + }; + } + } + + let delay = Delay::from_numer_denom_ms(anim_image.duration, 1); + let img = canvas.clone(); + let frame = Frame::from_parts(img, 0, 0, delay); + + if anim_image.dispose { + for x in 0..anim_image.width { + for y in 0..anim_image.height { + let canvas_index = (x + anim_image.offset_x, y + anim_image.offset_y); + canvas[canvas_index] = background_color; + } + } + } + + Some(Ok(frame)) + } + + fn do_alpha_blending(buffer: [u8; 4], canvas: Rgba<u8>) -> Rgba<u8> { + let canvas_alpha = f64::from(canvas[3]); + let buffer_alpha = f64::from(buffer[3]); + let blend_alpha_f64 = buffer_alpha + canvas_alpha * (1.0 - buffer_alpha / 255.0); + //value should be between 0 and 255, this truncates the fractional part + let blend_alpha: u8 = blend_alpha_f64 as u8; + + let blend_rgb: [u8; 3] = if blend_alpha == 0 { + [0, 0, 0] + } else { + let mut rgb = [0u8; 3]; + for i in 0..3 { + let canvas_f64 = f64::from(canvas[i]); + let buffer_f64 = f64::from(buffer[i]); + + let val = (buffer_f64 * buffer_alpha + + canvas_f64 * canvas_alpha * (1.0 - buffer_alpha / 255.0)) + / blend_alpha_f64; + //value should be between 0 and 255, this truncates the fractional part + rgb[i] = val as u8; + } + + rgb + }; + + Rgba([blend_rgb[0], blend_rgb[1], blend_rgb[2], blend_alpha]) + } + + pub(crate) fn fill_buf(&self, buf: &mut [u8]) { + match &self.image { + // will always have at least one frame + ExtendedImageData::Animation { frames, anim_info } => { + let first_frame = &frames[0]; + let (canvas_width, canvas_height) = self.dimensions(); + if canvas_width == first_frame.width && canvas_height == first_frame.height { + first_frame.image.fill_buf(buf); + } else { + let bg_color = match &self.info._alpha { + true => Rgba::from([0, 0, 0, 0]), + false => anim_info.background_color, + }; + let mut canvas = RgbaImage::from_pixel(canvas_width, canvas_height, bg_color); + let _ = ExtendedImage::draw_subimage(&mut canvas, first_frame, bg_color) + .unwrap() + .unwrap(); + buf.copy_from_slice(canvas.into_raw().as_slice()); + } + } + ExtendedImageData::Static(image) => { + image.fill_buf(buf); + } + } + } + + pub(crate) fn get_buf_size(&self) -> usize { + match &self.image { + // will always have at least one frame + ExtendedImageData::Animation { frames, .. } => &frames[0].image, + ExtendedImageData::Static(image) => image, + } + .get_buf_size() + } + + pub(crate) fn set_background_color(&mut self, color: Rgba<u8>) -> ImageResult<()> { + match &mut self.image { + ExtendedImageData::Animation { anim_info, .. } => { + anim_info.background_color = color; + Ok(()) + } + _ => Err(ImageError::Parameter(ParameterError::from_kind( + ParameterErrorKind::Generic( + "Background color can only be set on animated webp".to_owned(), + ), + ))), + } + } +} + +#[derive(Debug)] +enum WebPStatic { + LossyWithAlpha(RgbaImage), + LossyWithoutAlpha(RgbImage), + Lossless(LosslessFrame), +} + +impl WebPStatic { + pub(crate) fn from_alpha_lossy( + alpha: AlphaChunk, + vp8_frame: VP8Frame, + ) -> ImageResult<WebPStatic> { + if alpha.data.len() != usize::from(vp8_frame.width) * usize::from(vp8_frame.height) { + return Err(DecoderError::AlphaChunkSizeMismatch.into()); + } + + let size = usize::from(vp8_frame.width).checked_mul(usize::from(vp8_frame.height) * 4); + let mut image_vec = match size { + Some(size) => vec![0u8; size], + None => return Err(DecoderError::ImageTooLarge.into()), + }; + + vp8_frame.fill_rgba(&mut image_vec); + + for y in 0..vp8_frame.height { + for x in 0..vp8_frame.width { + let predictor: u8 = WebPStatic::get_predictor( + x.into(), + y.into(), + vp8_frame.width.into(), + alpha.filtering_method, + &image_vec, + ); + let predictor = u16::from(predictor); + + let alpha_index = usize::from(y) * usize::from(vp8_frame.width) + usize::from(x); + let alpha_val = alpha.data[alpha_index]; + let alpha: u8 = ((predictor + u16::from(alpha_val)) % 256) + .try_into() + .unwrap(); + + let alpha_index = alpha_index * 4 + 3; + image_vec[alpha_index] = alpha; + } + } + + let image = RgbaImage::from_vec(vp8_frame.width.into(), vp8_frame.height.into(), image_vec) + .unwrap(); + + Ok(WebPStatic::LossyWithAlpha(image)) + } + + fn get_predictor( + x: usize, + y: usize, + width: usize, + filtering_method: FilteringMethod, + image_slice: &[u8], + ) -> u8 { + match filtering_method { + FilteringMethod::None => 0, + FilteringMethod::Horizontal => { + if x == 0 && y == 0 { + 0 + } else if x == 0 { + let index = (y - 1) * width + x; + image_slice[index * 4 + 3] + } else { + let index = y * width + x - 1; + image_slice[index * 4 + 3] + } + } + FilteringMethod::Vertical => { + if x == 0 && y == 0 { + 0 + } else if y == 0 { + let index = y * width + x - 1; + image_slice[index * 4 + 3] + } else { + let index = (y - 1) * width + x; + image_slice[index * 4 + 3] + } + } + FilteringMethod::Gradient => { + let (left, top, top_left) = match (x, y) { + (0, 0) => (0, 0, 0), + (0, y) => { + let above_index = (y - 1) * width + x; + let val = image_slice[above_index * 4 + 3]; + (val, val, val) + } + (x, 0) => { + let before_index = y * width + x - 1; + let val = image_slice[before_index * 4 + 3]; + (val, val, val) + } + (x, y) => { + let left_index = y * width + x - 1; + let left = image_slice[left_index * 4 + 3]; + let top_index = (y - 1) * width + x; + let top = image_slice[top_index * 4 + 3]; + let top_left_index = (y - 1) * width + x - 1; + let top_left = image_slice[top_left_index * 4 + 3]; + + (left, top, top_left) + } + }; + + let combination = i16::from(left) + i16::from(top) - i16::from(top_left); + i16::clamp(combination, 0, 255).try_into().unwrap() + } + } + } + + pub(crate) fn from_lossy(vp8_frame: VP8Frame) -> ImageResult<WebPStatic> { + let mut image = RgbImage::from_pixel( + vp8_frame.width.into(), + vp8_frame.height.into(), + Rgb([0, 0, 0]), + ); + + vp8_frame.fill_rgb(&mut image); + + Ok(WebPStatic::LossyWithoutAlpha(image)) + } + + pub(crate) fn fill_buf(&self, buf: &mut [u8]) { + match self { + WebPStatic::LossyWithAlpha(image) => { + buf.copy_from_slice(image); + } + WebPStatic::LossyWithoutAlpha(image) => { + buf.copy_from_slice(image); + } + WebPStatic::Lossless(lossless) => { + lossless.fill_rgba(buf); + } + } + } + + pub(crate) fn get_buf_size(&self) -> usize { + match self { + WebPStatic::LossyWithAlpha(rgb_image) => rgb_image.len(), + WebPStatic::LossyWithoutAlpha(rgba_image) => rgba_image.len(), + WebPStatic::Lossless(lossless) => lossless.get_buf_size(), + } + } + + pub(crate) fn color_type(&self) -> ColorType { + if self.has_alpha() { + ColorType::Rgba8 + } else { + ColorType::Rgb8 + } + } + + pub(crate) fn has_alpha(&self) -> bool { + match self { + Self::LossyWithAlpha(..) | Self::Lossless(..) => true, + Self::LossyWithoutAlpha(..) => false, + } + } +} + +#[derive(Debug)] +struct WebPAnimatedInfo { + background_color: Rgba<u8>, + _loop_count: u16, +} + +#[derive(Debug)] +struct AnimatedFrame { + offset_x: u32, + offset_y: u32, + width: u32, + height: u32, + duration: u32, + use_alpha_blending: bool, + dispose: bool, + image: WebPStatic, +} + +/// Reads a chunk, but silently ignores unknown chunks at the end of a file +fn read_extended_chunk<R>(r: &mut R) -> ImageResult<Option<(Cursor<Vec<u8>>, WebPRiffChunk)>> +where + R: Read, +{ + let mut unknown_chunk = Ok(()); + + while let Some(chunk) = read_fourcc(r)? { + let cursor = read_len_cursor(r)?; + match chunk { + Ok(chunk) => return unknown_chunk.and(Ok(Some((cursor, chunk)))), + Err(err) => unknown_chunk = unknown_chunk.and(Err(err)), + } + } + + Ok(None) +} + +pub(crate) fn read_extended_header<R: Read>(reader: &mut R) -> ImageResult<WebPExtendedInfo> { + let chunk_flags = reader.read_u8()?; + + let reserved_first = chunk_flags & 0b11000000; + let icc_profile = chunk_flags & 0b00100000 != 0; + let alpha = chunk_flags & 0b00010000 != 0; + let exif_metadata = chunk_flags & 0b00001000 != 0; + let xmp_metadata = chunk_flags & 0b00000100 != 0; + let animation = chunk_flags & 0b00000010 != 0; + let reserved_second = chunk_flags & 0b00000001; + + let reserved_third = read_3_bytes(reader)?; + + if reserved_first != 0 || reserved_second != 0 || reserved_third != 0 { + let value: u32 = if reserved_first != 0 { + reserved_first.into() + } else if reserved_second != 0 { + reserved_second.into() + } else { + reserved_third + }; + return Err(DecoderError::InfoBitsInvalid { + name: "reserved", + value, + } + .into()); + } + + let canvas_width = read_3_bytes(reader)? + 1; + let canvas_height = read_3_bytes(reader)? + 1; + + //product of canvas dimensions cannot be larger than u32 max + if u32::checked_mul(canvas_width, canvas_height).is_none() { + return Err(DecoderError::ImageTooLarge.into()); + } + + let info = WebPExtendedInfo { + _icc_profile: icc_profile, + _alpha: alpha, + _exif_metadata: exif_metadata, + _xmp_metadata: xmp_metadata, + _animation: animation, + canvas_width, + canvas_height, + icc_profile: None, + }; + + Ok(info) +} + +fn read_anim_frame<R: Read>( + mut reader: R, + canvas_width: u32, + canvas_height: u32, +) -> ImageResult<AnimatedFrame> { + //offsets for the frames are twice the values + let frame_x = read_3_bytes(&mut reader)? * 2; + let frame_y = read_3_bytes(&mut reader)? * 2; + + let frame_width = read_3_bytes(&mut reader)? + 1; + let frame_height = read_3_bytes(&mut reader)? + 1; + + if frame_x + frame_width > canvas_width || frame_y + frame_height > canvas_height { + return Err(DecoderError::FrameOutsideImage.into()); + } + + let duration = read_3_bytes(&mut reader)?; + + let frame_info = reader.read_u8()?; + let reserved = frame_info & 0b11111100; + if reserved != 0 { + return Err(DecoderError::InfoBitsInvalid { + name: "reserved", + value: reserved.into(), + } + .into()); + } + let use_alpha_blending = frame_info & 0b00000010 == 0; + let dispose = frame_info & 0b00000001 != 0; + + //read normal bitstream now + let static_image = read_image(&mut reader, frame_width, frame_height)?; + + let frame = AnimatedFrame { + offset_x: frame_x, + offset_y: frame_y, + width: frame_width, + height: frame_height, + duration, + use_alpha_blending, + dispose, + image: static_image, + }; + + Ok(frame) +} + +fn read_3_bytes<R: Read>(reader: &mut R) -> ImageResult<u32> { + let mut buffer: [u8; 3] = [0; 3]; + reader.read_exact(&mut buffer)?; + let value: u32 = + (u32::from(buffer[2]) << 16) | (u32::from(buffer[1]) << 8) | u32::from(buffer[0]); + Ok(value) +} + +fn read_lossy_with_chunk<R: Read>(reader: &mut R) -> ImageResult<VP8Frame> { + let (cursor, chunk) = + read_chunk(reader)?.ok_or_else(|| Error::from(io::ErrorKind::UnexpectedEof))?; + + if chunk != WebPRiffChunk::VP8 { + return Err(ChunkHeaderInvalid(chunk.to_fourcc()).into()); + } + + read_lossy(cursor) +} + +fn read_lossy(cursor: Cursor<Vec<u8>>) -> ImageResult<VP8Frame> { + let mut vp8_decoder = Vp8Decoder::new(cursor); + let frame = vp8_decoder.decode_frame()?; + + Ok(frame.clone()) +} + +fn read_image<R: Read>(reader: &mut R, width: u32, height: u32) -> ImageResult<WebPStatic> { + let chunk = read_chunk(reader)?; + + match chunk { + Some((cursor, WebPRiffChunk::VP8)) => { + let mut vp8_decoder = Vp8Decoder::new(cursor); + let frame = vp8_decoder.decode_frame()?; + + let img = WebPStatic::from_lossy(frame.clone())?; + + Ok(img) + } + Some((cursor, WebPRiffChunk::VP8L)) => { + let mut lossless_decoder = LosslessDecoder::new(cursor); + let frame = lossless_decoder.decode_frame()?; + + let img = WebPStatic::Lossless(frame.clone()); + + Ok(img) + } + Some((mut cursor, WebPRiffChunk::ALPH)) => { + let alpha_chunk = read_alpha_chunk(&mut cursor, width, height)?; + + let vp8_frame = read_lossy_with_chunk(reader)?; + + let img = WebPStatic::from_alpha_lossy(alpha_chunk, vp8_frame)?; + + Ok(img) + } + None => Err(ImageError::IoError(Error::from( + io::ErrorKind::UnexpectedEof, + ))), + Some((_, chunk)) => Err(ChunkHeaderInvalid(chunk.to_fourcc()).into()), + } +} + +#[derive(Debug)] +struct AlphaChunk { + _preprocessing: bool, + filtering_method: FilteringMethod, + data: Vec<u8>, +} + +#[derive(Debug, Copy, Clone)] +enum FilteringMethod { + None, + Horizontal, + Vertical, + Gradient, +} + +fn read_alpha_chunk<R: Read>(reader: &mut R, width: u32, height: u32) -> ImageResult<AlphaChunk> { + let info_byte = reader.read_u8()?; + + let reserved = info_byte & 0b11000000; + let preprocessing = (info_byte & 0b00110000) >> 4; + let filtering = (info_byte & 0b00001100) >> 2; + let compression = info_byte & 0b00000011; + + if reserved != 0 { + return Err(DecoderError::InfoBitsInvalid { + name: "reserved", + value: reserved.into(), + } + .into()); + } + + let preprocessing = match preprocessing { + 0 => false, + 1 => true, + _ => { + return Err(DecoderError::InfoBitsInvalid { + name: "reserved", + value: preprocessing.into(), + } + .into()) + } + }; + + let filtering_method = match filtering { + 0 => FilteringMethod::None, + 1 => FilteringMethod::Horizontal, + 2 => FilteringMethod::Vertical, + 3 => FilteringMethod::Gradient, + _ => unreachable!(), + }; + + let lossless_compression = match compression { + 0 => false, + 1 => true, + _ => { + return Err(DecoderError::InfoBitsInvalid { + name: "lossless compression", + value: compression.into(), + } + .into()) + } + }; + + let mut framedata = Vec::new(); + reader.read_to_end(&mut framedata)?; + + let data = if lossless_compression { + let cursor = io::Cursor::new(framedata); + + let mut decoder = LosslessDecoder::new(cursor); + //this is a potential problem for large images; would require rewriting lossless decoder to use u32 for width and height + let width: u16 = width + .try_into() + .map_err(|_| ImageError::from(DecoderError::ImageTooLarge))?; + let height: u16 = height + .try_into() + .map_err(|_| ImageError::from(DecoderError::ImageTooLarge))?; + let frame = decoder.decode_frame_implicit_dims(width, height)?; + + let mut data = vec![0u8; usize::from(width) * usize::from(height)]; + + frame.fill_green(&mut data); + + data + } else { + framedata + }; + + let chunk = AlphaChunk { + _preprocessing: preprocessing, + filtering_method, + data, + }; + + Ok(chunk) +} diff --git a/vendor/image/src/codecs/webp/huffman.rs b/vendor/image/src/codecs/webp/huffman.rs new file mode 100644 index 0000000..986eee6 --- /dev/null +++ b/vendor/image/src/codecs/webp/huffman.rs @@ -0,0 +1,202 @@ +use std::convert::TryInto; + +use super::lossless::BitReader; +use super::lossless::DecoderError; +use crate::ImageResult; + +/// Rudimentary utility for reading Canonical Huffman Codes. +/// Based off https://github.com/webmproject/libwebp/blob/7f8472a610b61ec780ef0a8873cd954ac512a505/src/utils/huffman.c +/// + +const MAX_ALLOWED_CODE_LENGTH: usize = 15; + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum HuffmanTreeNode { + Branch(usize), //offset in vector to children + Leaf(u16), //symbol stored in leaf + Empty, +} + +/// Huffman tree +#[derive(Clone, Debug, Default)] +pub(crate) struct HuffmanTree { + tree: Vec<HuffmanTreeNode>, + max_nodes: usize, + num_nodes: usize, +} + +impl HuffmanTree { + fn is_full(&self) -> bool { + self.num_nodes == self.max_nodes + } + + /// Turns a node from empty into a branch and assigns its children + fn assign_children(&mut self, node_index: usize) -> usize { + let offset_index = self.num_nodes - node_index; + self.tree[node_index] = HuffmanTreeNode::Branch(offset_index); + self.num_nodes += 2; + + offset_index + } + + /// Init a huffman tree + fn init(num_leaves: usize) -> ImageResult<HuffmanTree> { + if num_leaves == 0 { + return Err(DecoderError::HuffmanError.into()); + } + + let max_nodes = 2 * num_leaves - 1; + let tree = vec![HuffmanTreeNode::Empty; max_nodes]; + let num_nodes = 1; + + let tree = HuffmanTree { + tree, + max_nodes, + num_nodes, + }; + + Ok(tree) + } + + /// Converts code lengths to codes + fn code_lengths_to_codes(code_lengths: &[u16]) -> ImageResult<Vec<Option<u16>>> { + let max_code_length = *code_lengths + .iter() + .reduce(|a, b| if a >= b { a } else { b }) + .unwrap(); + + if max_code_length > MAX_ALLOWED_CODE_LENGTH.try_into().unwrap() { + return Err(DecoderError::HuffmanError.into()); + } + + let mut code_length_hist = vec![0; MAX_ALLOWED_CODE_LENGTH + 1]; + + for &length in code_lengths.iter() { + code_length_hist[usize::from(length)] += 1; + } + + code_length_hist[0] = 0; + + let mut curr_code = 0; + let mut next_codes = vec![None; MAX_ALLOWED_CODE_LENGTH + 1]; + + for code_len in 1..=usize::from(max_code_length) { + curr_code = (curr_code + code_length_hist[code_len - 1]) << 1; + next_codes[code_len] = Some(curr_code); + } + + let mut huff_codes = vec![None; code_lengths.len()]; + + for (symbol, &length) in code_lengths.iter().enumerate() { + let length = usize::from(length); + if length > 0 { + huff_codes[symbol] = next_codes[length]; + if let Some(value) = next_codes[length].as_mut() { + *value += 1; + } + } else { + huff_codes[symbol] = None; + } + } + + Ok(huff_codes) + } + + /// Adds a symbol to a huffman tree + fn add_symbol(&mut self, symbol: u16, code: u16, code_length: u16) -> ImageResult<()> { + let mut node_index = 0; + let code = usize::from(code); + + for length in (0..code_length).rev() { + if node_index >= self.max_nodes { + return Err(DecoderError::HuffmanError.into()); + } + + let node = self.tree[node_index]; + + let offset = match node { + HuffmanTreeNode::Empty => { + if self.is_full() { + return Err(DecoderError::HuffmanError.into()); + } + self.assign_children(node_index) + } + HuffmanTreeNode::Leaf(_) => return Err(DecoderError::HuffmanError.into()), + HuffmanTreeNode::Branch(offset) => offset, + }; + + node_index += offset + ((code >> length) & 1); + } + + match self.tree[node_index] { + HuffmanTreeNode::Empty => self.tree[node_index] = HuffmanTreeNode::Leaf(symbol), + HuffmanTreeNode::Leaf(_) => return Err(DecoderError::HuffmanError.into()), + HuffmanTreeNode::Branch(_offset) => return Err(DecoderError::HuffmanError.into()), + } + + Ok(()) + } + + /// Builds a tree implicitly, just from code lengths + pub(crate) fn build_implicit(code_lengths: Vec<u16>) -> ImageResult<HuffmanTree> { + let mut num_symbols = 0; + let mut root_symbol = 0; + + for (symbol, length) in code_lengths.iter().enumerate() { + if *length > 0 { + num_symbols += 1; + root_symbol = symbol.try_into().unwrap(); + } + } + + let mut tree = HuffmanTree::init(num_symbols)?; + + if num_symbols == 1 { + tree.add_symbol(root_symbol, 0, 0)?; + } else { + let codes = HuffmanTree::code_lengths_to_codes(&code_lengths)?; + + for (symbol, &length) in code_lengths.iter().enumerate() { + if length > 0 && codes[symbol].is_some() { + tree.add_symbol(symbol.try_into().unwrap(), codes[symbol].unwrap(), length)?; + } + } + } + + Ok(tree) + } + + /// Builds a tree explicitly from lengths, codes and symbols + pub(crate) fn build_explicit( + code_lengths: Vec<u16>, + codes: Vec<u16>, + symbols: Vec<u16>, + ) -> ImageResult<HuffmanTree> { + let mut tree = HuffmanTree::init(symbols.len())?; + + for i in 0..symbols.len() { + tree.add_symbol(symbols[i], codes[i], code_lengths[i])?; + } + + Ok(tree) + } + + /// Reads a symbol using the bitstream + pub(crate) fn read_symbol(&self, bit_reader: &mut BitReader) -> ImageResult<u16> { + let mut index = 0; + let mut node = self.tree[index]; + + while let HuffmanTreeNode::Branch(children_offset) = node { + index += children_offset + bit_reader.read_bits::<usize>(1)?; + node = self.tree[index]; + } + + let symbol = match node { + HuffmanTreeNode::Branch(_) => unreachable!(), + HuffmanTreeNode::Empty => return Err(DecoderError::HuffmanError.into()), + HuffmanTreeNode::Leaf(symbol) => symbol, + }; + + Ok(symbol) + } +} diff --git a/vendor/image/src/codecs/webp/loop_filter.rs b/vendor/image/src/codecs/webp/loop_filter.rs new file mode 100644 index 0000000..312059f --- /dev/null +++ b/vendor/image/src/codecs/webp/loop_filter.rs @@ -0,0 +1,147 @@ +//! Does loop filtering on webp lossy images + +use crate::utils::clamp; + +#[inline] +fn c(val: i32) -> i32 { + clamp(val, -128, 127) +} + +//unsigned to signed +#[inline] +fn u2s(val: u8) -> i32 { + i32::from(val) - 128 +} + +//signed to unsigned +#[inline] +fn s2u(val: i32) -> u8 { + (c(val) + 128) as u8 +} + +#[inline] +fn diff(val1: u8, val2: u8) -> u8 { + if val1 > val2 { + val1 - val2 + } else { + val2 - val1 + } +} + +//15.2 +fn common_adjust(use_outer_taps: bool, pixels: &mut [u8], point: usize, stride: usize) -> i32 { + let p1 = u2s(pixels[point - 2 * stride]); + let p0 = u2s(pixels[point - stride]); + let q0 = u2s(pixels[point]); + let q1 = u2s(pixels[point + stride]); + + //value for the outer 2 pixels + let outer = if use_outer_taps { c(p1 - q1) } else { 0 }; + + let mut a = c(outer + 3 * (q0 - p0)); + + let b = (c(a + 3)) >> 3; + + a = (c(a + 4)) >> 3; + + pixels[point] = s2u(q0 - a); + pixels[point - stride] = s2u(p0 + b); + + a +} + +fn simple_threshold(filter_limit: i32, pixels: &[u8], point: usize, stride: usize) -> bool { + i32::from(diff(pixels[point - stride], pixels[point])) * 2 + + i32::from(diff(pixels[point - 2 * stride], pixels[point + stride])) / 2 + <= filter_limit +} + +fn should_filter( + interior_limit: u8, + edge_limit: u8, + pixels: &[u8], + point: usize, + stride: usize, +) -> bool { + simple_threshold(i32::from(edge_limit), pixels, point, stride) + && diff(pixels[point - 4 * stride], pixels[point - 3 * stride]) <= interior_limit + && diff(pixels[point - 3 * stride], pixels[point - 2 * stride]) <= interior_limit + && diff(pixels[point - 2 * stride], pixels[point - stride]) <= interior_limit + && diff(pixels[point + 3 * stride], pixels[point + 2 * stride]) <= interior_limit + && diff(pixels[point + 2 * stride], pixels[point + stride]) <= interior_limit + && diff(pixels[point + stride], pixels[point]) <= interior_limit +} + +fn high_edge_variance(threshold: u8, pixels: &[u8], point: usize, stride: usize) -> bool { + diff(pixels[point - 2 * stride], pixels[point - stride]) > threshold + || diff(pixels[point + stride], pixels[point]) > threshold +} + +//simple filter +//effects 4 pixels on an edge(2 each side) +pub(crate) fn simple_segment(edge_limit: u8, pixels: &mut [u8], point: usize, stride: usize) { + if simple_threshold(i32::from(edge_limit), pixels, point, stride) { + common_adjust(true, pixels, point, stride); + } +} + +//normal filter +//works on the 8 pixels on the edges between subblocks inside a macroblock +pub(crate) fn subblock_filter( + hev_threshold: u8, + interior_limit: u8, + edge_limit: u8, + pixels: &mut [u8], + point: usize, + stride: usize, +) { + if should_filter(interior_limit, edge_limit, pixels, point, stride) { + let hv = high_edge_variance(hev_threshold, pixels, point, stride); + + let a = (common_adjust(hv, pixels, point, stride) + 1) >> 1; + + if !hv { + pixels[point + stride] = s2u(u2s(pixels[point + stride]) - a); + pixels[point - 2 * stride] = s2u(u2s(pixels[point - 2 * stride]) - a); + } + } +} + +//normal filter +//works on the 8 pixels on the edges between macroblocks +pub(crate) fn macroblock_filter( + hev_threshold: u8, + interior_limit: u8, + edge_limit: u8, + pixels: &mut [u8], + point: usize, + stride: usize, +) { + let mut spixels = [0i32; 8]; + for i in 0..8 { + spixels[i] = u2s(pixels[point + i * stride - 4 * stride]); + } + + if should_filter(interior_limit, edge_limit, pixels, point, stride) { + if !high_edge_variance(hev_threshold, pixels, point, stride) { + let w = c(c(spixels[2] - spixels[5]) + 3 * (spixels[4] - spixels[3])); + + let mut a = c((27 * w + 63) >> 7); + + pixels[point] = s2u(spixels[4] - a); + pixels[point - stride] = s2u(spixels[3] + a); + + a = c((18 * w + 63) >> 7); + + pixels[point + stride] = s2u(spixels[5] - a); + pixels[point - 2 * stride] = s2u(spixels[2] + a); + + a = c((9 * w + 63) >> 7); + + pixels[point + 2 * stride] = s2u(spixels[6] - a); + pixels[point - 3 * stride] = s2u(spixels[1] + a); + } else { + common_adjust(true, pixels, point, stride); + } + } +} diff --git a/vendor/image/src/codecs/webp/lossless.rs b/vendor/image/src/codecs/webp/lossless.rs new file mode 100644 index 0000000..7271eda --- /dev/null +++ b/vendor/image/src/codecs/webp/lossless.rs @@ -0,0 +1,783 @@ +//! Decoding of lossless WebP images +//! +//! [Lossless spec](https://developers.google.com/speed/webp/docs/webp_lossless_bitstream_specification) +//! + +use std::{ + convert::TryFrom, + convert::TryInto, + error, fmt, + io::Read, + ops::{AddAssign, Shl}, +}; + +use byteorder::ReadBytesExt; + +use crate::{error::DecodingError, ImageError, ImageFormat, ImageResult}; + +use super::huffman::HuffmanTree; +use super::lossless_transform::{add_pixels, TransformType}; + +const CODE_LENGTH_CODES: usize = 19; +const CODE_LENGTH_CODE_ORDER: [usize; CODE_LENGTH_CODES] = [ + 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +]; + +#[rustfmt::skip] +const DISTANCE_MAP: [(i8, i8); 120] = [ + (0, 1), (1, 0), (1, 1), (-1, 1), (0, 2), (2, 0), (1, 2), (-1, 2), + (2, 1), (-2, 1), (2, 2), (-2, 2), (0, 3), (3, 0), (1, 3), (-1, 3), + (3, 1), (-3, 1), (2, 3), (-2, 3), (3, 2), (-3, 2), (0, 4), (4, 0), + (1, 4), (-1, 4), (4, 1), (-4, 1), (3, 3), (-3, 3), (2, 4), (-2, 4), + (4, 2), (-4, 2), (0, 5), (3, 4), (-3, 4), (4, 3), (-4, 3), (5, 0), + (1, 5), (-1, 5), (5, 1), (-5, 1), (2, 5), (-2, 5), (5, 2), (-5, 2), + (4, 4), (-4, 4), (3, 5), (-3, 5), (5, 3), (-5, 3), (0, 6), (6, 0), + (1, 6), (-1, 6), (6, 1), (-6, 1), (2, 6), (-2, 6), (6, 2), (-6, 2), + (4, 5), (-4, 5), (5, 4), (-5, 4), (3, 6), (-3, 6), (6, 3), (-6, 3), + (0, 7), (7, 0), (1, 7), (-1, 7), (5, 5), (-5, 5), (7, 1), (-7, 1), + (4, 6), (-4, 6), (6, 4), (-6, 4), (2, 7), (-2, 7), (7, 2), (-7, 2), + (3, 7), (-3, 7), (7, 3), (-7, 3), (5, 6), (-5, 6), (6, 5), (-6, 5), + (8, 0), (4, 7), (-4, 7), (7, 4), (-7, 4), (8, 1), (8, 2), (6, 6), + (-6, 6), (8, 3), (5, 7), (-5, 7), (7, 5), (-7, 5), (8, 4), (6, 7), + (-6, 7), (7, 6), (-7, 6), (8, 5), (7, 7), (-7, 7), (8, 6), (8, 7) +]; + +const GREEN: usize = 0; +const RED: usize = 1; +const BLUE: usize = 2; +const ALPHA: usize = 3; +const DIST: usize = 4; + +const HUFFMAN_CODES_PER_META_CODE: usize = 5; + +type HuffmanCodeGroup = [HuffmanTree; HUFFMAN_CODES_PER_META_CODE]; + +const ALPHABET_SIZE: [u16; HUFFMAN_CODES_PER_META_CODE] = [256 + 24, 256, 256, 256, 40]; + +#[inline] +pub(crate) fn subsample_size(size: u16, bits: u8) -> u16 { + ((u32::from(size) + (1u32 << bits) - 1) >> bits) + .try_into() + .unwrap() +} + +#[derive(Debug, Clone, Copy)] +pub(crate) enum DecoderError { + /// Signature of 0x2f not found + LosslessSignatureInvalid(u8), + /// Version Number must be 0 + VersionNumberInvalid(u8), + + /// + InvalidColorCacheBits(u8), + + HuffmanError, + + BitStreamError, + + TransformError, +} + +impl fmt::Display for DecoderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DecoderError::LosslessSignatureInvalid(sig) => { + f.write_fmt(format_args!("Invalid lossless signature: {}", sig)) + } + DecoderError::VersionNumberInvalid(num) => { + f.write_fmt(format_args!("Invalid version number: {}", num)) + } + DecoderError::InvalidColorCacheBits(num) => f.write_fmt(format_args!( + "Invalid color cache(must be between 1-11): {}", + num + )), + DecoderError::HuffmanError => f.write_fmt(format_args!("Error building Huffman Tree")), + DecoderError::BitStreamError => { + f.write_fmt(format_args!("Error while reading bitstream")) + } + DecoderError::TransformError => { + f.write_fmt(format_args!("Error while reading or writing transforms")) + } + } + } +} + +impl From<DecoderError> for ImageError { + fn from(e: DecoderError) -> ImageError { + ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e)) + } +} + +impl error::Error for DecoderError {} + +const NUM_TRANSFORM_TYPES: usize = 4; + +//Decodes lossless WebP images +#[derive(Debug)] +pub(crate) struct LosslessDecoder<R> { + r: R, + bit_reader: BitReader, + frame: LosslessFrame, + transforms: [Option<TransformType>; NUM_TRANSFORM_TYPES], + transform_order: Vec<u8>, +} + +impl<R: Read> LosslessDecoder<R> { + /// Create a new decoder + pub(crate) fn new(r: R) -> LosslessDecoder<R> { + LosslessDecoder { + r, + bit_reader: BitReader::new(), + frame: Default::default(), + transforms: [None, None, None, None], + transform_order: Vec::new(), + } + } + + /// Reads the frame + pub(crate) fn decode_frame(&mut self) -> ImageResult<&LosslessFrame> { + let signature = self.r.read_u8()?; + + if signature != 0x2f { + return Err(DecoderError::LosslessSignatureInvalid(signature).into()); + } + + let mut buf = Vec::new(); + self.r.read_to_end(&mut buf)?; + self.bit_reader.init(buf); + + self.frame.width = self.bit_reader.read_bits::<u16>(14)? + 1; + self.frame.height = self.bit_reader.read_bits::<u16>(14)? + 1; + + let _alpha_used = self.bit_reader.read_bits::<u8>(1)?; + + let version_num = self.bit_reader.read_bits::<u8>(3)?; + + if version_num != 0 { + return Err(DecoderError::VersionNumberInvalid(version_num).into()); + } + + let mut data = self.decode_image_stream(self.frame.width, self.frame.height, true)?; + + for &trans_index in self.transform_order.iter().rev() { + let trans = self.transforms[usize::from(trans_index)].as_ref().unwrap(); + trans.apply_transform(&mut data, self.frame.width, self.frame.height)?; + } + + self.frame.buf = data; + Ok(&self.frame) + } + + //used for alpha data in extended decoding + pub(crate) fn decode_frame_implicit_dims( + &mut self, + width: u16, + height: u16, + ) -> ImageResult<&LosslessFrame> { + let mut buf = Vec::new(); + self.r.read_to_end(&mut buf)?; + self.bit_reader.init(buf); + + self.frame.width = width; + self.frame.height = height; + + let mut data = self.decode_image_stream(self.frame.width, self.frame.height, true)?; + + //transform_order is vector of indices(0-3) into transforms in order decoded + for &trans_index in self.transform_order.iter().rev() { + let trans = self.transforms[usize::from(trans_index)].as_ref().unwrap(); + trans.apply_transform(&mut data, self.frame.width, self.frame.height)?; + } + + self.frame.buf = data; + Ok(&self.frame) + } + + /// Reads Image data from the bitstream + /// Can be in any of the 5 roles described in the Specification + /// ARGB Image role has different behaviour to the other 4 + /// xsize and ysize describe the size of the blocks where each block has its own entropy code + fn decode_image_stream( + &mut self, + xsize: u16, + ysize: u16, + is_argb_img: bool, + ) -> ImageResult<Vec<u32>> { + let trans_xsize = if is_argb_img { + self.read_transforms()? + } else { + xsize + }; + + let color_cache_bits = self.read_color_cache()?; + + let color_cache = color_cache_bits.map(|bits| { + let size = 1 << bits; + let cache = vec![0u32; size]; + ColorCache { + color_cache_bits: bits, + color_cache: cache, + } + }); + + let huffman_info = self.read_huffman_codes(is_argb_img, trans_xsize, ysize, color_cache)?; + + //decode data + let data = self.decode_image_data(trans_xsize, ysize, huffman_info)?; + + Ok(data) + } + + /// Reads transforms and their data from the bitstream + fn read_transforms(&mut self) -> ImageResult<u16> { + let mut xsize = self.frame.width; + + while self.bit_reader.read_bits::<u8>(1)? == 1 { + let transform_type_val = self.bit_reader.read_bits::<u8>(2)?; + + if self.transforms[usize::from(transform_type_val)].is_some() { + //can only have one of each transform, error + return Err(DecoderError::TransformError.into()); + } + + self.transform_order.push(transform_type_val); + + let transform_type = match transform_type_val { + 0 => { + //predictor + + let size_bits = self.bit_reader.read_bits::<u8>(3)? + 2; + + let block_xsize = subsample_size(xsize, size_bits); + let block_ysize = subsample_size(self.frame.height, size_bits); + + let data = self.decode_image_stream(block_xsize, block_ysize, false)?; + + TransformType::PredictorTransform { + size_bits, + predictor_data: data, + } + } + 1 => { + //color transform + + let size_bits = self.bit_reader.read_bits::<u8>(3)? + 2; + + let block_xsize = subsample_size(xsize, size_bits); + let block_ysize = subsample_size(self.frame.height, size_bits); + + let data = self.decode_image_stream(block_xsize, block_ysize, false)?; + + TransformType::ColorTransform { + size_bits, + transform_data: data, + } + } + 2 => { + //subtract green + + TransformType::SubtractGreen + } + 3 => { + let color_table_size = self.bit_reader.read_bits::<u16>(8)? + 1; + + let mut color_map = self.decode_image_stream(color_table_size, 1, false)?; + + let bits = if color_table_size <= 2 { + 3 + } else if color_table_size <= 4 { + 2 + } else if color_table_size <= 16 { + 1 + } else { + 0 + }; + xsize = subsample_size(xsize, bits); + + Self::adjust_color_map(&mut color_map); + + TransformType::ColorIndexingTransform { + table_size: color_table_size, + table_data: color_map, + } + } + _ => unreachable!(), + }; + + self.transforms[usize::from(transform_type_val)] = Some(transform_type); + } + + Ok(xsize) + } + + /// Adjusts the color map since it's subtraction coded + fn adjust_color_map(color_map: &mut Vec<u32>) { + for i in 1..color_map.len() { + color_map[i] = add_pixels(color_map[i], color_map[i - 1]); + } + } + + /// Reads huffman codes associated with an image + fn read_huffman_codes( + &mut self, + read_meta: bool, + xsize: u16, + ysize: u16, + color_cache: Option<ColorCache>, + ) -> ImageResult<HuffmanInfo> { + let mut num_huff_groups = 1; + + let mut huffman_bits = 0; + let mut huffman_xsize = 1; + let mut huffman_ysize = 1; + let mut entropy_image = Vec::new(); + + if read_meta && self.bit_reader.read_bits::<u8>(1)? == 1 { + //meta huffman codes + huffman_bits = self.bit_reader.read_bits::<u8>(3)? + 2; + huffman_xsize = subsample_size(xsize, huffman_bits); + huffman_ysize = subsample_size(ysize, huffman_bits); + + entropy_image = self.decode_image_stream(huffman_xsize, huffman_ysize, false)?; + + for pixel in entropy_image.iter_mut() { + let meta_huff_code = (*pixel >> 8) & 0xffff; + + *pixel = meta_huff_code; + + if meta_huff_code >= num_huff_groups { + num_huff_groups = meta_huff_code + 1; + } + } + } + + let mut hufftree_groups = Vec::new(); + + for _i in 0..num_huff_groups { + let mut group: HuffmanCodeGroup = Default::default(); + for j in 0..HUFFMAN_CODES_PER_META_CODE { + let mut alphabet_size = ALPHABET_SIZE[j]; + if j == 0 { + if let Some(color_cache) = color_cache.as_ref() { + alphabet_size += 1 << color_cache.color_cache_bits; + } + } + + let tree = self.read_huffman_code(alphabet_size)?; + group[j] = tree; + } + hufftree_groups.push(group); + } + + let huffman_mask = if huffman_bits == 0 { + !0 + } else { + (1 << huffman_bits) - 1 + }; + + let info = HuffmanInfo { + xsize: huffman_xsize, + _ysize: huffman_ysize, + color_cache, + image: entropy_image, + bits: huffman_bits, + mask: huffman_mask, + huffman_code_groups: hufftree_groups, + }; + + Ok(info) + } + + /// Decodes and returns a single huffman tree + fn read_huffman_code(&mut self, alphabet_size: u16) -> ImageResult<HuffmanTree> { + let simple = self.bit_reader.read_bits::<u8>(1)? == 1; + + if simple { + let num_symbols = self.bit_reader.read_bits::<u8>(1)? + 1; + + let mut code_lengths = vec![u16::from(num_symbols - 1)]; + let mut codes = vec![0]; + let mut symbols = Vec::new(); + + let is_first_8bits = self.bit_reader.read_bits::<u8>(1)?; + symbols.push(self.bit_reader.read_bits::<u16>(1 + 7 * is_first_8bits)?); + + if num_symbols == 2 { + symbols.push(self.bit_reader.read_bits::<u16>(8)?); + code_lengths.push(1); + codes.push(1); + } + + HuffmanTree::build_explicit(code_lengths, codes, symbols) + } else { + let mut code_length_code_lengths = vec![0; CODE_LENGTH_CODES]; + + let num_code_lengths = 4 + self.bit_reader.read_bits::<usize>(4)?; + for i in 0..num_code_lengths { + code_length_code_lengths[CODE_LENGTH_CODE_ORDER[i]] = + self.bit_reader.read_bits(3)?; + } + + let new_code_lengths = + self.read_huffman_code_lengths(code_length_code_lengths, alphabet_size)?; + + HuffmanTree::build_implicit(new_code_lengths) + } + } + + /// Reads huffman code lengths + fn read_huffman_code_lengths( + &mut self, + code_length_code_lengths: Vec<u16>, + num_symbols: u16, + ) -> ImageResult<Vec<u16>> { + let table = HuffmanTree::build_implicit(code_length_code_lengths)?; + + let mut max_symbol = if self.bit_reader.read_bits::<u8>(1)? == 1 { + let length_nbits = 2 + 2 * self.bit_reader.read_bits::<u8>(3)?; + 2 + self.bit_reader.read_bits::<u16>(length_nbits)? + } else { + num_symbols + }; + + let mut code_lengths = vec![0; usize::from(num_symbols)]; + let mut prev_code_len = 8; //default code length + + let mut symbol = 0; + while symbol < num_symbols { + if max_symbol == 0 { + break; + } + max_symbol -= 1; + + let code_len = table.read_symbol(&mut self.bit_reader)?; + + if code_len < 16 { + code_lengths[usize::from(symbol)] = code_len; + symbol += 1; + if code_len != 0 { + prev_code_len = code_len; + } + } else { + let use_prev = code_len == 16; + let slot = code_len - 16; + let extra_bits = match slot { + 0 => 2, + 1 => 3, + 2 => 7, + _ => return Err(DecoderError::BitStreamError.into()), + }; + let repeat_offset = match slot { + 0 | 1 => 3, + 2 => 11, + _ => return Err(DecoderError::BitStreamError.into()), + }; + + let mut repeat = self.bit_reader.read_bits::<u16>(extra_bits)? + repeat_offset; + + if symbol + repeat > num_symbols { + return Err(DecoderError::BitStreamError.into()); + } else { + let length = if use_prev { prev_code_len } else { 0 }; + while repeat > 0 { + repeat -= 1; + code_lengths[usize::from(symbol)] = length; + symbol += 1; + } + } + } + } + + Ok(code_lengths) + } + + /// Decodes the image data using the huffman trees and either of the 3 methods of decoding + fn decode_image_data( + &mut self, + width: u16, + height: u16, + mut huffman_info: HuffmanInfo, + ) -> ImageResult<Vec<u32>> { + let num_values = usize::from(width) * usize::from(height); + let mut data = vec![0; num_values]; + + let huff_index = huffman_info.get_huff_index(0, 0); + let mut tree = &huffman_info.huffman_code_groups[huff_index]; + let mut last_cached = 0; + let mut index = 0; + let mut x = 0; + let mut y = 0; + while index < num_values { + if (x & huffman_info.mask) == 0 { + let index = huffman_info.get_huff_index(x, y); + tree = &huffman_info.huffman_code_groups[index]; + } + + let code = tree[GREEN].read_symbol(&mut self.bit_reader)?; + + //check code + if code < 256 { + //literal, so just use huffman codes and read as argb + let red = tree[RED].read_symbol(&mut self.bit_reader)?; + let blue = tree[BLUE].read_symbol(&mut self.bit_reader)?; + let alpha = tree[ALPHA].read_symbol(&mut self.bit_reader)?; + + data[index] = (u32::from(alpha) << 24) + + (u32::from(red) << 16) + + (u32::from(code) << 8) + + u32::from(blue); + + index += 1; + x += 1; + if x >= width { + x = 0; + y += 1; + } + } else if code < 256 + 24 { + //backward reference, so go back and use that to add image data + let length_symbol = code - 256; + let length = Self::get_copy_distance(&mut self.bit_reader, length_symbol)?; + + let dist_symbol = tree[DIST].read_symbol(&mut self.bit_reader)?; + let dist_code = Self::get_copy_distance(&mut self.bit_reader, dist_symbol)?; + let dist = Self::plane_code_to_distance(width, dist_code); + + if index < dist || num_values - index < length { + return Err(DecoderError::BitStreamError.into()); + } + + for i in 0..length { + data[index + i] = data[index + i - dist]; + } + index += length; + x += u16::try_from(length).unwrap(); + while x >= width { + x -= width; + y += 1; + } + if index < num_values { + let index = huffman_info.get_huff_index(x, y); + tree = &huffman_info.huffman_code_groups[index]; + } + } else { + //color cache, so use previously stored pixels to get this pixel + let key = code - 256 - 24; + + if let Some(color_cache) = huffman_info.color_cache.as_mut() { + //cache old colors + while last_cached < index { + color_cache.insert(data[last_cached]); + last_cached += 1; + } + data[index] = color_cache.lookup(key.into())?; + } else { + return Err(DecoderError::BitStreamError.into()); + } + index += 1; + x += 1; + if x >= width { + x = 0; + y += 1; + } + } + } + + Ok(data) + } + + /// Reads color cache data from the bitstream + fn read_color_cache(&mut self) -> ImageResult<Option<u8>> { + if self.bit_reader.read_bits::<u8>(1)? == 1 { + let code_bits = self.bit_reader.read_bits::<u8>(4)?; + + if !(1..=11).contains(&code_bits) { + return Err(DecoderError::InvalidColorCacheBits(code_bits).into()); + } + + Ok(Some(code_bits)) + } else { + Ok(None) + } + } + + /// Gets the copy distance from the prefix code and bitstream + fn get_copy_distance(bit_reader: &mut BitReader, prefix_code: u16) -> ImageResult<usize> { + if prefix_code < 4 { + return Ok(usize::from(prefix_code + 1)); + } + let extra_bits: u8 = ((prefix_code - 2) >> 1).try_into().unwrap(); + let offset = (2 + (usize::from(prefix_code) & 1)) << extra_bits; + + Ok(offset + bit_reader.read_bits::<usize>(extra_bits)? + 1) + } + + /// Gets distance to pixel + fn plane_code_to_distance(xsize: u16, plane_code: usize) -> usize { + if plane_code > 120 { + plane_code - 120 + } else { + let (xoffset, yoffset) = DISTANCE_MAP[plane_code - 1]; + + let dist = i32::from(xoffset) + i32::from(yoffset) * i32::from(xsize); + if dist < 1 { + return 1; + } + dist.try_into().unwrap() + } + } +} + +#[derive(Debug, Clone)] +struct HuffmanInfo { + xsize: u16, + _ysize: u16, + color_cache: Option<ColorCache>, + image: Vec<u32>, + bits: u8, + mask: u16, + huffman_code_groups: Vec<HuffmanCodeGroup>, +} + +impl HuffmanInfo { + fn get_huff_index(&self, x: u16, y: u16) -> usize { + if self.bits == 0 { + return 0; + } + let position = usize::from((y >> self.bits) * self.xsize + (x >> self.bits)); + let meta_huff_code: usize = self.image[position].try_into().unwrap(); + meta_huff_code + } +} + +#[derive(Debug, Clone)] +struct ColorCache { + color_cache_bits: u8, + color_cache: Vec<u32>, +} + +impl ColorCache { + fn insert(&mut self, color: u32) { + let index = (0x1e35a7bdu32.overflowing_mul(color).0) >> (32 - self.color_cache_bits); + self.color_cache[index as usize] = color; + } + + fn lookup(&self, index: usize) -> ImageResult<u32> { + match self.color_cache.get(index) { + Some(&value) => Ok(value), + None => Err(DecoderError::BitStreamError.into()), + } + } +} + +#[derive(Debug, Clone)] +pub(crate) struct BitReader { + buf: Vec<u8>, + index: usize, + bit_count: u8, +} + +impl BitReader { + fn new() -> BitReader { + BitReader { + buf: Vec::new(), + index: 0, + bit_count: 0, + } + } + + fn init(&mut self, buf: Vec<u8>) { + self.buf = buf; + } + + pub(crate) fn read_bits<T>(&mut self, num: u8) -> ImageResult<T> + where + T: num_traits::Unsigned + Shl<u8, Output = T> + AddAssign<T> + From<bool>, + { + let mut value: T = T::zero(); + + for i in 0..num { + if self.buf.len() <= self.index { + return Err(DecoderError::BitStreamError.into()); + } + let bit_true = self.buf[self.index] & (1 << self.bit_count) != 0; + value += T::from(bit_true) << i; + self.bit_count = if self.bit_count == 7 { + self.index += 1; + 0 + } else { + self.bit_count + 1 + }; + } + + Ok(value) + } +} + +#[derive(Debug, Clone, Default)] +pub(crate) struct LosslessFrame { + pub(crate) width: u16, + pub(crate) height: u16, + + pub(crate) buf: Vec<u32>, +} + +impl LosslessFrame { + /// Fills a buffer by converting from argb to rgba + pub(crate) fn fill_rgba(&self, buf: &mut [u8]) { + for (&argb_val, chunk) in self.buf.iter().zip(buf.chunks_exact_mut(4)) { + chunk[0] = ((argb_val >> 16) & 0xff).try_into().unwrap(); + chunk[1] = ((argb_val >> 8) & 0xff).try_into().unwrap(); + chunk[2] = (argb_val & 0xff).try_into().unwrap(); + chunk[3] = ((argb_val >> 24) & 0xff).try_into().unwrap(); + } + } + + /// Get buffer size from the image + pub(crate) fn get_buf_size(&self) -> usize { + usize::from(self.width) * usize::from(self.height) * 4 + } + + /// Fills a buffer with just the green values from the lossless decoding + /// Used in extended alpha decoding + pub(crate) fn fill_green(&self, buf: &mut [u8]) { + for (&argb_val, buf_value) in self.buf.iter().zip(buf.iter_mut()) { + *buf_value = ((argb_val >> 8) & 0xff).try_into().unwrap(); + } + } +} + +#[cfg(test)] +mod test { + + use super::BitReader; + + #[test] + fn bit_read_test() { + let mut bit_reader = BitReader::new(); + + //10011100 01000001 11100001 + let buf = vec![0x9C, 0x41, 0xE1]; + + bit_reader.init(buf); + + assert_eq!(bit_reader.read_bits::<u8>(3).unwrap(), 4); //100 + assert_eq!(bit_reader.read_bits::<u8>(2).unwrap(), 3); //11 + assert_eq!(bit_reader.read_bits::<u8>(6).unwrap(), 12); //001100 + assert_eq!(bit_reader.read_bits::<u16>(10).unwrap(), 40); //0000101000 + assert_eq!(bit_reader.read_bits::<u8>(3).unwrap(), 7); //111 + } + + #[test] + fn bit_read_error_test() { + let mut bit_reader = BitReader::new(); + + //01101010 + let buf = vec![0x6A]; + + bit_reader.init(buf); + + assert_eq!(bit_reader.read_bits::<u8>(3).unwrap(), 2); //010 + assert_eq!(bit_reader.read_bits::<u8>(5).unwrap(), 13); //01101 + assert!(bit_reader.read_bits::<u8>(4).is_err()); //error + } +} diff --git a/vendor/image/src/codecs/webp/lossless_transform.rs b/vendor/image/src/codecs/webp/lossless_transform.rs new file mode 100644 index 0000000..f9a82c1 --- /dev/null +++ b/vendor/image/src/codecs/webp/lossless_transform.rs @@ -0,0 +1,464 @@ +use std::convert::TryFrom; +use std::convert::TryInto; + +use super::lossless::subsample_size; +use super::lossless::DecoderError; + +#[derive(Debug, Clone)] +pub(crate) enum TransformType { + PredictorTransform { + size_bits: u8, + predictor_data: Vec<u32>, + }, + ColorTransform { + size_bits: u8, + transform_data: Vec<u32>, + }, + SubtractGreen, + ColorIndexingTransform { + table_size: u16, + table_data: Vec<u32>, + }, +} + +impl TransformType { + /// Applies a transform to the image data + pub(crate) fn apply_transform( + &self, + image_data: &mut Vec<u32>, + width: u16, + height: u16, + ) -> Result<(), DecoderError> { + match self { + TransformType::PredictorTransform { + size_bits, + predictor_data, + } => { + let block_xsize = usize::from(subsample_size(width, *size_bits)); + let width = usize::from(width); + let height = usize::from(height); + + if image_data.len() < width * height { + return Err(DecoderError::TransformError); + } + + //handle top and left borders specially + //this involves ignoring mode and just setting prediction values like this + image_data[0] = add_pixels(image_data[0], 0xff000000); + + for x in 1..width { + image_data[x] = add_pixels(image_data[x], get_left(image_data, x, 0, width)); + } + + for y in 1..height { + image_data[y * width] = + add_pixels(image_data[y * width], get_top(image_data, 0, y, width)); + } + + for y in 1..height { + for x in 1..width { + let block_index = (y >> size_bits) * block_xsize + (x >> size_bits); + + let index = y * width + x; + + let green = (predictor_data[block_index] >> 8) & 0xff; + + match green { + 0 => image_data[index] = add_pixels(image_data[index], 0xff000000), + 1 => { + image_data[index] = + add_pixels(image_data[index], get_left(image_data, x, y, width)) + } + 2 => { + image_data[index] = + add_pixels(image_data[index], get_top(image_data, x, y, width)) + } + 3 => { + image_data[index] = add_pixels( + image_data[index], + get_top_right(image_data, x, y, width), + ) + } + 4 => { + image_data[index] = add_pixels( + image_data[index], + get_top_left(image_data, x, y, width), + ) + } + 5 => { + image_data[index] = add_pixels(image_data[index], { + let first = average2( + get_left(image_data, x, y, width), + get_top_right(image_data, x, y, width), + ); + average2(first, get_top(image_data, x, y, width)) + }) + } + 6 => { + image_data[index] = add_pixels( + image_data[index], + average2( + get_left(image_data, x, y, width), + get_top_left(image_data, x, y, width), + ), + ) + } + 7 => { + image_data[index] = add_pixels( + image_data[index], + average2( + get_left(image_data, x, y, width), + get_top(image_data, x, y, width), + ), + ) + } + 8 => { + image_data[index] = add_pixels( + image_data[index], + average2( + get_top_left(image_data, x, y, width), + get_top(image_data, x, y, width), + ), + ) + } + 9 => { + image_data[index] = add_pixels( + image_data[index], + average2( + get_top(image_data, x, y, width), + get_top_right(image_data, x, y, width), + ), + ) + } + 10 => { + image_data[index] = add_pixels(image_data[index], { + let first = average2( + get_left(image_data, x, y, width), + get_top_left(image_data, x, y, width), + ); + let second = average2( + get_top(image_data, x, y, width), + get_top_right(image_data, x, y, width), + ); + average2(first, second) + }) + } + 11 => { + image_data[index] = add_pixels( + image_data[index], + select( + get_left(image_data, x, y, width), + get_top(image_data, x, y, width), + get_top_left(image_data, x, y, width), + ), + ) + } + 12 => { + image_data[index] = add_pixels( + image_data[index], + clamp_add_subtract_full( + get_left(image_data, x, y, width), + get_top(image_data, x, y, width), + get_top_left(image_data, x, y, width), + ), + ) + } + 13 => { + image_data[index] = add_pixels(image_data[index], { + let first = average2( + get_left(image_data, x, y, width), + get_top(image_data, x, y, width), + ); + clamp_add_subtract_half( + first, + get_top_left(image_data, x, y, width), + ) + }) + } + _ => {} + } + } + } + } + TransformType::ColorTransform { + size_bits, + transform_data, + } => { + let block_xsize = usize::from(subsample_size(width, *size_bits)); + let width = usize::from(width); + let height = usize::from(height); + + for y in 0..height { + for x in 0..width { + let block_index = (y >> size_bits) * block_xsize + (x >> size_bits); + + let index = y * width + x; + + let multiplier = + ColorTransformElement::from_color_code(transform_data[block_index]); + + image_data[index] = transform_color(&multiplier, image_data[index]); + } + } + } + TransformType::SubtractGreen => { + let width = usize::from(width); + for y in 0..usize::from(height) { + for x in 0..width { + image_data[y * width + x] = add_green(image_data[y * width + x]); + } + } + } + TransformType::ColorIndexingTransform { + table_size, + table_data, + } => { + let mut new_image_data = + Vec::with_capacity(usize::from(width) * usize::from(height)); + + let table_size = *table_size; + let width_bits: u8 = if table_size <= 2 { + 3 + } else if table_size <= 4 { + 2 + } else if table_size <= 16 { + 1 + } else { + 0 + }; + + let bits_per_pixel = 8 >> width_bits; + let mask = (1 << bits_per_pixel) - 1; + + let mut src = 0; + let width = usize::from(width); + + let pixels_per_byte = 1 << width_bits; + let count_mask = pixels_per_byte - 1; + let mut packed_pixels = 0; + + for _y in 0..usize::from(height) { + for x in 0..width { + if (x & count_mask) == 0 { + packed_pixels = (image_data[src] >> 8) & 0xff; + src += 1; + } + + let pixels: usize = (packed_pixels & mask).try_into().unwrap(); + let new_val = if pixels >= table_size.into() { + 0x00000000 + } else { + table_data[pixels] + }; + + new_image_data.push(new_val); + + packed_pixels >>= bits_per_pixel; + } + } + + *image_data = new_image_data; + } + } + + Ok(()) + } +} + +//predictor functions + +/// Adds 2 pixels mod 256 for each pixel +pub(crate) fn add_pixels(a: u32, b: u32) -> u32 { + let new_alpha = ((a >> 24) + (b >> 24)) & 0xff; + let new_red = (((a >> 16) & 0xff) + ((b >> 16) & 0xff)) & 0xff; + let new_green = (((a >> 8) & 0xff) + ((b >> 8) & 0xff)) & 0xff; + let new_blue = ((a & 0xff) + (b & 0xff)) & 0xff; + + (new_alpha << 24) + (new_red << 16) + (new_green << 8) + new_blue +} + +/// Get left pixel +fn get_left(data: &[u32], x: usize, y: usize, width: usize) -> u32 { + data[y * width + x - 1] +} + +/// Get top pixel +fn get_top(data: &[u32], x: usize, y: usize, width: usize) -> u32 { + data[(y - 1) * width + x] +} + +/// Get pixel to top right +fn get_top_right(data: &[u32], x: usize, y: usize, width: usize) -> u32 { + // if x == width - 1 this gets the left most pixel of the current row + // as described in the specification + data[(y - 1) * width + x + 1] +} + +/// Get pixel to top left +fn get_top_left(data: &[u32], x: usize, y: usize, width: usize) -> u32 { + data[(y - 1) * width + x - 1] +} + +/// Get average of 2 pixels +fn average2(a: u32, b: u32) -> u32 { + let mut avg = 0u32; + for i in 0..4 { + let sub_a: u8 = ((a >> (i * 8)) & 0xff).try_into().unwrap(); + let sub_b: u8 = ((b >> (i * 8)) & 0xff).try_into().unwrap(); + avg |= u32::from(sub_average2(sub_a, sub_b)) << (i * 8); + } + avg +} + +/// Get average of 2 bytes +fn sub_average2(a: u8, b: u8) -> u8 { + ((u16::from(a) + u16::from(b)) / 2).try_into().unwrap() +} + +/// Get a specific byte from argb pixel +fn get_byte(val: u32, byte: u8) -> u8 { + ((val >> (byte * 8)) & 0xff).try_into().unwrap() +} + +/// Get byte as i32 for convenience +fn get_byte_i32(val: u32, byte: u8) -> i32 { + i32::from(get_byte(val, byte)) +} + +/// Select left or top byte +fn select(left: u32, top: u32, top_left: u32) -> u32 { + let predict_alpha = get_byte_i32(left, 3) + get_byte_i32(top, 3) - get_byte_i32(top_left, 3); + let predict_red = get_byte_i32(left, 2) + get_byte_i32(top, 2) - get_byte_i32(top_left, 2); + let predict_green = get_byte_i32(left, 1) + get_byte_i32(top, 1) - get_byte_i32(top_left, 1); + let predict_blue = get_byte_i32(left, 0) + get_byte_i32(top, 0) - get_byte_i32(top_left, 0); + + let predict_left = i32::abs(predict_alpha - get_byte_i32(left, 3)) + + i32::abs(predict_red - get_byte_i32(left, 2)) + + i32::abs(predict_green - get_byte_i32(left, 1)) + + i32::abs(predict_blue - get_byte_i32(left, 0)); + let predict_top = i32::abs(predict_alpha - get_byte_i32(top, 3)) + + i32::abs(predict_red - get_byte_i32(top, 2)) + + i32::abs(predict_green - get_byte_i32(top, 1)) + + i32::abs(predict_blue - get_byte_i32(top, 0)); + + if predict_left < predict_top { + left + } else { + top + } +} + +/// Clamp a to [0, 255] +fn clamp(a: i32) -> i32 { + if a < 0 { + 0 + } else if a > 255 { + 255 + } else { + a + } +} + +/// Clamp add subtract full on one part +fn clamp_add_subtract_full_sub(a: i32, b: i32, c: i32) -> i32 { + clamp(a + b - c) +} + +/// Clamp add subtract half on one part +fn clamp_add_subtract_half_sub(a: i32, b: i32) -> i32 { + clamp(a + (a - b) / 2) +} + +/// Clamp add subtract full on 3 pixels +fn clamp_add_subtract_full(a: u32, b: u32, c: u32) -> u32 { + let mut value: u32 = 0; + for i in 0..4u8 { + let sub_a: i32 = ((a >> (i * 8)) & 0xff).try_into().unwrap(); + let sub_b: i32 = ((b >> (i * 8)) & 0xff).try_into().unwrap(); + let sub_c: i32 = ((c >> (i * 8)) & 0xff).try_into().unwrap(); + value |= + u32::try_from(clamp_add_subtract_full_sub(sub_a, sub_b, sub_c)).unwrap() << (i * 8); + } + value +} + +/// Clamp add subtract half on 2 pixels +fn clamp_add_subtract_half(a: u32, b: u32) -> u32 { + let mut value = 0; + for i in 0..4u8 { + let sub_a: i32 = ((a >> (i * 8)) & 0xff).try_into().unwrap(); + let sub_b: i32 = ((b >> (i * 8)) & 0xff).try_into().unwrap(); + value |= u32::try_from(clamp_add_subtract_half_sub(sub_a, sub_b)).unwrap() << (i * 8); + } + + value +} + +//color transform + +#[derive(Debug, Clone, Copy)] +struct ColorTransformElement { + green_to_red: u8, + green_to_blue: u8, + red_to_blue: u8, +} + +impl ColorTransformElement { + fn from_color_code(color_code: u32) -> ColorTransformElement { + ColorTransformElement { + green_to_red: (color_code & 0xff).try_into().unwrap(), + green_to_blue: ((color_code >> 8) & 0xff).try_into().unwrap(), + red_to_blue: ((color_code >> 16) & 0xff).try_into().unwrap(), + } + } +} + +/// Does color transform on red and blue transformed by green +fn color_transform(red: u8, blue: u8, green: u8, trans: &ColorTransformElement) -> (u8, u8) { + let mut temp_red = u32::from(red); + let mut temp_blue = u32::from(blue); + + //as does the conversion from u8 to signed two's complement i8 required + temp_red += color_transform_delta(trans.green_to_red as i8, green as i8); + temp_blue += color_transform_delta(trans.green_to_blue as i8, green as i8); + temp_blue += color_transform_delta(trans.red_to_blue as i8, temp_red as i8); + + ( + (temp_red & 0xff).try_into().unwrap(), + (temp_blue & 0xff).try_into().unwrap(), + ) +} + +/// Does color transform on 2 numbers +fn color_transform_delta(t: i8, c: i8) -> u32 { + ((i16::from(t) * i16::from(c)) as u32) >> 5 +} + +// Does color transform on a pixel with a color transform element +fn transform_color(multiplier: &ColorTransformElement, color_value: u32) -> u32 { + let alpha = get_byte(color_value, 3); + let red = get_byte(color_value, 2); + let green = get_byte(color_value, 1); + let blue = get_byte(color_value, 0); + + let (new_red, new_blue) = color_transform(red, blue, green, multiplier); + + (u32::from(alpha) << 24) + + (u32::from(new_red) << 16) + + (u32::from(green) << 8) + + u32::from(new_blue) +} + +//subtract green function + +/// Adds green to red and blue of a pixel +fn add_green(argb: u32) -> u32 { + let red = (argb >> 16) & 0xff; + let green = (argb >> 8) & 0xff; + let blue = argb & 0xff; + + let new_red = (red + green) & 0xff; + let new_blue = (blue + green) & 0xff; + + (argb & 0xff00ff00) | (new_red << 16) | (new_blue) +} diff --git a/vendor/image/src/codecs/webp/mod.rs b/vendor/image/src/codecs/webp/mod.rs new file mode 100644 index 0000000..b38faed --- /dev/null +++ b/vendor/image/src/codecs/webp/mod.rs @@ -0,0 +1,28 @@ +//! Decoding and Encoding of WebP Images + +#[cfg(feature = "webp-encoder")] +pub use self::encoder::{WebPEncoder, WebPQuality}; + +#[cfg(feature = "webp-encoder")] +mod encoder; + +#[cfg(feature = "webp")] +pub use self::decoder::WebPDecoder; + +#[cfg(feature = "webp")] +mod decoder; +#[cfg(feature = "webp")] +mod extended; +#[cfg(feature = "webp")] +mod huffman; +#[cfg(feature = "webp")] +mod loop_filter; +#[cfg(feature = "webp")] +mod lossless; +#[cfg(feature = "webp")] +mod lossless_transform; +#[cfg(feature = "webp")] +mod transform; + +#[cfg(feature = "webp")] +pub mod vp8; diff --git a/vendor/image/src/codecs/webp/transform.rs b/vendor/image/src/codecs/webp/transform.rs new file mode 100644 index 0000000..3b3ef5a --- /dev/null +++ b/vendor/image/src/codecs/webp/transform.rs @@ -0,0 +1,77 @@ +static CONST1: i64 = 20091; +static CONST2: i64 = 35468; + +pub(crate) fn idct4x4(block: &mut [i32]) { + // The intermediate results may overflow the types, so we stretch the type. + fn fetch(block: &mut [i32], idx: usize) -> i64 { + i64::from(block[idx]) + } + + for i in 0usize..4 { + let a1 = fetch(block, i) + fetch(block, 8 + i); + let b1 = fetch(block, i) - fetch(block, 8 + i); + + let t1 = (fetch(block, 4 + i) * CONST2) >> 16; + let t2 = fetch(block, 12 + i) + ((fetch(block, 12 + i) * CONST1) >> 16); + let c1 = t1 - t2; + + let t1 = fetch(block, 4 + i) + ((fetch(block, 4 + i) * CONST1) >> 16); + let t2 = (fetch(block, 12 + i) * CONST2) >> 16; + let d1 = t1 + t2; + + block[i] = (a1 + d1) as i32; + block[4 + i] = (b1 + c1) as i32; + block[4 * 3 + i] = (a1 - d1) as i32; + block[4 * 2 + i] = (b1 - c1) as i32; + } + + for i in 0usize..4 { + let a1 = fetch(block, 4 * i) + fetch(block, 4 * i + 2); + let b1 = fetch(block, 4 * i) - fetch(block, 4 * i + 2); + + let t1 = (fetch(block, 4 * i + 1) * CONST2) >> 16; + let t2 = fetch(block, 4 * i + 3) + ((fetch(block, 4 * i + 3) * CONST1) >> 16); + let c1 = t1 - t2; + + let t1 = fetch(block, 4 * i + 1) + ((fetch(block, 4 * i + 1) * CONST1) >> 16); + let t2 = (fetch(block, 4 * i + 3) * CONST2) >> 16; + let d1 = t1 + t2; + + block[4 * i] = ((a1 + d1 + 4) >> 3) as i32; + block[4 * i + 3] = ((a1 - d1 + 4) >> 3) as i32; + block[4 * i + 1] = ((b1 + c1 + 4) >> 3) as i32; + block[4 * i + 2] = ((b1 - c1 + 4) >> 3) as i32; + } +} + +// 14.3 +pub(crate) fn iwht4x4(block: &mut [i32]) { + for i in 0usize..4 { + let a1 = block[i] + block[12 + i]; + let b1 = block[4 + i] + block[8 + i]; + let c1 = block[4 + i] - block[8 + i]; + let d1 = block[i] - block[12 + i]; + + block[i] = a1 + b1; + block[4 + i] = c1 + d1; + block[8 + i] = a1 - b1; + block[12 + i] = d1 - c1; + } + + for i in 0usize..4 { + let a1 = block[4 * i] + block[4 * i + 3]; + let b1 = block[4 * i + 1] + block[4 * i + 2]; + let c1 = block[4 * i + 1] - block[4 * i + 2]; + let d1 = block[4 * i] - block[4 * i + 3]; + + let a2 = a1 + b1; + let b2 = c1 + d1; + let c2 = a1 - b1; + let d2 = d1 - c1; + + block[4 * i] = (a2 + 3) >> 3; + block[4 * i + 1] = (b2 + 3) >> 3; + block[4 * i + 2] = (c2 + 3) >> 3; + block[4 * i + 3] = (d2 + 3) >> 3; + } +} diff --git a/vendor/image/src/codecs/webp/vp8.rs b/vendor/image/src/codecs/webp/vp8.rs new file mode 100644 index 0000000..67b8820 --- /dev/null +++ b/vendor/image/src/codecs/webp/vp8.rs @@ -0,0 +1,2932 @@ +//! An implementation of the VP8 Video Codec +//! +//! This module contains a partial implementation of the +//! VP8 video format as defined in RFC-6386. +//! +//! It decodes Keyframes only. +//! VP8 is the underpinning of the WebP image format +//! +//! # Related Links +//! * [rfc-6386](http://tools.ietf.org/html/rfc6386) - The VP8 Data Format and Decoding Guide +//! * [VP8.pdf](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37073.pdf) - An overview of +//! of the VP8 format +//! + +use byteorder::{LittleEndian, ReadBytesExt}; +use std::convert::TryInto; +use std::default::Default; +use std::io::Read; +use std::{cmp, error, fmt}; + +use super::loop_filter; +use super::transform; +use crate::error::{ + DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind, +}; +use crate::image::ImageFormat; + +use crate::utils::clamp; + +const MAX_SEGMENTS: usize = 4; +const NUM_DCT_TOKENS: usize = 12; + +// Prediction modes +const DC_PRED: i8 = 0; +const V_PRED: i8 = 1; +const H_PRED: i8 = 2; +const TM_PRED: i8 = 3; +const B_PRED: i8 = 4; + +const B_DC_PRED: i8 = 0; +const B_TM_PRED: i8 = 1; +const B_VE_PRED: i8 = 2; +const B_HE_PRED: i8 = 3; +const B_LD_PRED: i8 = 4; +const B_RD_PRED: i8 = 5; +const B_VR_PRED: i8 = 6; +const B_VL_PRED: i8 = 7; +const B_HD_PRED: i8 = 8; +const B_HU_PRED: i8 = 9; + +// Prediction mode enum +#[repr(i8)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum LumaMode { + /// Predict DC using row above and column to the left. + DC = DC_PRED, + + /// Predict rows using row above. + V = V_PRED, + + /// Predict columns using column to the left. + H = H_PRED, + + /// Propagate second differences. + TM = TM_PRED, + + /// Each Y subblock is independently predicted. + B = B_PRED, +} + +#[repr(i8)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum ChromaMode { + /// Predict DC using row above and column to the left. + DC = DC_PRED, + + /// Predict rows using row above. + V = V_PRED, + + /// Predict columns using column to the left. + H = H_PRED, + + /// Propagate second differences. + TM = TM_PRED, +} + +#[repr(i8)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum IntraMode { + DC = B_DC_PRED, + TM = B_TM_PRED, + VE = B_VE_PRED, + HE = B_HE_PRED, + LD = B_LD_PRED, + RD = B_RD_PRED, + VR = B_VR_PRED, + VL = B_VL_PRED, + HD = B_HD_PRED, + HU = B_HU_PRED, +} + +type Prob = u8; + +static SEGMENT_ID_TREE: [i8; 6] = [2, 4, -0, -1, -2, -3]; + +// Section 11.2 +// Tree for determining the keyframe luma intra prediction modes: +static KEYFRAME_YMODE_TREE: [i8; 8] = [-B_PRED, 2, 4, 6, -DC_PRED, -V_PRED, -H_PRED, -TM_PRED]; + +// Default probabilities for decoding the keyframe luma modes +static KEYFRAME_YMODE_PROBS: [Prob; 4] = [145, 156, 163, 128]; + +// Tree for determining the keyframe B_PRED mode: +static KEYFRAME_BPRED_MODE_TREE: [i8; 18] = [ + -B_DC_PRED, 2, -B_TM_PRED, 4, -B_VE_PRED, 6, 8, 12, -B_HE_PRED, 10, -B_RD_PRED, -B_VR_PRED, + -B_LD_PRED, 14, -B_VL_PRED, 16, -B_HD_PRED, -B_HU_PRED, +]; + +// Probabilities for the BPRED_MODE_TREE +static KEYFRAME_BPRED_MODE_PROBS: [[[u8; 9]; 10]; 10] = [ + [ + [231, 120, 48, 89, 115, 113, 120, 152, 112], + [152, 179, 64, 126, 170, 118, 46, 70, 95], + [175, 69, 143, 80, 85, 82, 72, 155, 103], + [56, 58, 10, 171, 218, 189, 17, 13, 152], + [144, 71, 10, 38, 171, 213, 144, 34, 26], + [114, 26, 17, 163, 44, 195, 21, 10, 173], + [121, 24, 80, 195, 26, 62, 44, 64, 85], + [170, 46, 55, 19, 136, 160, 33, 206, 71], + [63, 20, 8, 114, 114, 208, 12, 9, 226], + [81, 40, 11, 96, 182, 84, 29, 16, 36], + ], + [ + [134, 183, 89, 137, 98, 101, 106, 165, 148], + [72, 187, 100, 130, 157, 111, 32, 75, 80], + [66, 102, 167, 99, 74, 62, 40, 234, 128], + [41, 53, 9, 178, 241, 141, 26, 8, 107], + [104, 79, 12, 27, 217, 255, 87, 17, 7], + [74, 43, 26, 146, 73, 166, 49, 23, 157], + [65, 38, 105, 160, 51, 52, 31, 115, 128], + [87, 68, 71, 44, 114, 51, 15, 186, 23], + [47, 41, 14, 110, 182, 183, 21, 17, 194], + [66, 45, 25, 102, 197, 189, 23, 18, 22], + ], + [ + [88, 88, 147, 150, 42, 46, 45, 196, 205], + [43, 97, 183, 117, 85, 38, 35, 179, 61], + [39, 53, 200, 87, 26, 21, 43, 232, 171], + [56, 34, 51, 104, 114, 102, 29, 93, 77], + [107, 54, 32, 26, 51, 1, 81, 43, 31], + [39, 28, 85, 171, 58, 165, 90, 98, 64], + [34, 22, 116, 206, 23, 34, 43, 166, 73], + [68, 25, 106, 22, 64, 171, 36, 225, 114], + [34, 19, 21, 102, 132, 188, 16, 76, 124], + [62, 18, 78, 95, 85, 57, 50, 48, 51], + ], + [ + [193, 101, 35, 159, 215, 111, 89, 46, 111], + [60, 148, 31, 172, 219, 228, 21, 18, 111], + [112, 113, 77, 85, 179, 255, 38, 120, 114], + [40, 42, 1, 196, 245, 209, 10, 25, 109], + [100, 80, 8, 43, 154, 1, 51, 26, 71], + [88, 43, 29, 140, 166, 213, 37, 43, 154], + [61, 63, 30, 155, 67, 45, 68, 1, 209], + [142, 78, 78, 16, 255, 128, 34, 197, 171], + [41, 40, 5, 102, 211, 183, 4, 1, 221], + [51, 50, 17, 168, 209, 192, 23, 25, 82], + ], + [ + [125, 98, 42, 88, 104, 85, 117, 175, 82], + [95, 84, 53, 89, 128, 100, 113, 101, 45], + [75, 79, 123, 47, 51, 128, 81, 171, 1], + [57, 17, 5, 71, 102, 57, 53, 41, 49], + [115, 21, 2, 10, 102, 255, 166, 23, 6], + [38, 33, 13, 121, 57, 73, 26, 1, 85], + [41, 10, 67, 138, 77, 110, 90, 47, 114], + [101, 29, 16, 10, 85, 128, 101, 196, 26], + [57, 18, 10, 102, 102, 213, 34, 20, 43], + [117, 20, 15, 36, 163, 128, 68, 1, 26], + ], + [ + [138, 31, 36, 171, 27, 166, 38, 44, 229], + [67, 87, 58, 169, 82, 115, 26, 59, 179], + [63, 59, 90, 180, 59, 166, 93, 73, 154], + [40, 40, 21, 116, 143, 209, 34, 39, 175], + [57, 46, 22, 24, 128, 1, 54, 17, 37], + [47, 15, 16, 183, 34, 223, 49, 45, 183], + [46, 17, 33, 183, 6, 98, 15, 32, 183], + [65, 32, 73, 115, 28, 128, 23, 128, 205], + [40, 3, 9, 115, 51, 192, 18, 6, 223], + [87, 37, 9, 115, 59, 77, 64, 21, 47], + ], + [ + [104, 55, 44, 218, 9, 54, 53, 130, 226], + [64, 90, 70, 205, 40, 41, 23, 26, 57], + [54, 57, 112, 184, 5, 41, 38, 166, 213], + [30, 34, 26, 133, 152, 116, 10, 32, 134], + [75, 32, 12, 51, 192, 255, 160, 43, 51], + [39, 19, 53, 221, 26, 114, 32, 73, 255], + [31, 9, 65, 234, 2, 15, 1, 118, 73], + [88, 31, 35, 67, 102, 85, 55, 186, 85], + [56, 21, 23, 111, 59, 205, 45, 37, 192], + [55, 38, 70, 124, 73, 102, 1, 34, 98], + ], + [ + [102, 61, 71, 37, 34, 53, 31, 243, 192], + [69, 60, 71, 38, 73, 119, 28, 222, 37], + [68, 45, 128, 34, 1, 47, 11, 245, 171], + [62, 17, 19, 70, 146, 85, 55, 62, 70], + [75, 15, 9, 9, 64, 255, 184, 119, 16], + [37, 43, 37, 154, 100, 163, 85, 160, 1], + [63, 9, 92, 136, 28, 64, 32, 201, 85], + [86, 6, 28, 5, 64, 255, 25, 248, 1], + [56, 8, 17, 132, 137, 255, 55, 116, 128], + [58, 15, 20, 82, 135, 57, 26, 121, 40], + ], + [ + [164, 50, 31, 137, 154, 133, 25, 35, 218], + [51, 103, 44, 131, 131, 123, 31, 6, 158], + [86, 40, 64, 135, 148, 224, 45, 183, 128], + [22, 26, 17, 131, 240, 154, 14, 1, 209], + [83, 12, 13, 54, 192, 255, 68, 47, 28], + [45, 16, 21, 91, 64, 222, 7, 1, 197], + [56, 21, 39, 155, 60, 138, 23, 102, 213], + [85, 26, 85, 85, 128, 128, 32, 146, 171], + [18, 11, 7, 63, 144, 171, 4, 4, 246], + [35, 27, 10, 146, 174, 171, 12, 26, 128], + ], + [ + [190, 80, 35, 99, 180, 80, 126, 54, 45], + [85, 126, 47, 87, 176, 51, 41, 20, 32], + [101, 75, 128, 139, 118, 146, 116, 128, 85], + [56, 41, 15, 176, 236, 85, 37, 9, 62], + [146, 36, 19, 30, 171, 255, 97, 27, 20], + [71, 30, 17, 119, 118, 255, 17, 18, 138], + [101, 38, 60, 138, 55, 70, 43, 26, 142], + [138, 45, 61, 62, 219, 1, 81, 188, 64], + [32, 41, 20, 117, 151, 142, 20, 21, 163], + [112, 19, 12, 61, 195, 128, 48, 4, 24], + ], +]; + +// Section 11.4 Tree for determining macroblock the chroma mode +static KEYFRAME_UV_MODE_TREE: [i8; 6] = [-DC_PRED, 2, -V_PRED, 4, -H_PRED, -TM_PRED]; + +// Probabilities for determining macroblock mode +static KEYFRAME_UV_MODE_PROBS: [Prob; 3] = [142, 114, 183]; + +// Section 13.4 +type TokenProbTables = [[[[Prob; NUM_DCT_TOKENS - 1]; 3]; 8]; 4]; + +// Probabilities that a token's probability will be updated +static COEFF_UPDATE_PROBS: TokenProbTables = [ + [ + [ + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255], + [249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255], + [234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255], + [250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255], + [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + ], + [ + [ + [217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255], + [234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255], + ], + [ + [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255], + [250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + ], + [ + [ + [186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255], + [234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255], + [251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255], + ], + [ + [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + ], + [ + [ + [248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255], + [248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255], + [246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255], + [252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255], + [248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255], + [253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255], + [252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255], + [250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + [ + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], + ], + ], +]; + +// Section 13.5 +// Default Probabilities for tokens +static COEFF_PROBS: TokenProbTables = [ + [ + [ + [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], + [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], + [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], + ], + [ + [253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128], + [189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128], + [106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128], + ], + [ + [1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128], + [181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128], + [78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128], + ], + [ + [1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128], + [184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128], + [77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128], + ], + [ + [1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128], + [170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128], + [37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128], + ], + [ + [1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128], + [207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128], + [102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128], + ], + [ + [1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128], + [177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128], + [80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128], + ], + [ + [1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128], + [246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128], + [255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], + ], + ], + [ + [ + [198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62], + [131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1], + [68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128], + ], + [ + [1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128], + [184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128], + [81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128], + ], + [ + [1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128], + [99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128], + [23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128], + ], + [ + [1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128], + [109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128], + [44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128], + ], + [ + [1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128], + [94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128], + [22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128], + ], + [ + [1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128], + [124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128], + [35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128], + ], + [ + [1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128], + [121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128], + [45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128], + ], + [ + [1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128], + [203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128], + [137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128], + ], + ], + [ + [ + [253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128], + [175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128], + [73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128], + ], + [ + [1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128], + [239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128], + [155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128], + ], + [ + [1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128], + [201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128], + [69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128], + ], + [ + [1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128], + [223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128], + [141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128], + ], + [ + [1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128], + [190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128], + [149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128], + ], + [ + [1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128], + [247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128], + [240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128], + ], + [ + [1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128], + [213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128], + [55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128], + ], + [ + [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], + [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], + [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128], + ], + ], + [ + [ + [202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255], + [126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128], + [61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128], + ], + [ + [1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128], + [166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128], + [39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128], + ], + [ + [1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128], + [124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128], + [24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128], + ], + [ + [1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128], + [149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128], + [28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128], + ], + [ + [1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128], + [123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128], + [20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128], + ], + [ + [1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128], + [168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128], + [47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128], + ], + [ + [1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128], + [141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128], + [42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128], + ], + [ + [1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128], + [244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128], + [238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128], + ], + ], +]; + +// DCT Tokens +const DCT_0: i8 = 0; +const DCT_1: i8 = 1; +const DCT_2: i8 = 2; +const DCT_3: i8 = 3; +const DCT_4: i8 = 4; +const DCT_CAT1: i8 = 5; +const DCT_CAT2: i8 = 6; +const DCT_CAT3: i8 = 7; +const DCT_CAT4: i8 = 8; +const DCT_CAT5: i8 = 9; +const DCT_CAT6: i8 = 10; +const DCT_EOB: i8 = 11; + +static DCT_TOKEN_TREE: [i8; 22] = [ + -DCT_EOB, 2, -DCT_0, 4, -DCT_1, 6, 8, 12, -DCT_2, 10, -DCT_3, -DCT_4, 14, 16, -DCT_CAT1, + -DCT_CAT2, 18, 20, -DCT_CAT3, -DCT_CAT4, -DCT_CAT5, -DCT_CAT6, +]; + +static PROB_DCT_CAT: [[Prob; 12]; 6] = [ + [159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [165, 145, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [173, 148, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [176, 155, 140, 135, 0, 0, 0, 0, 0, 0, 0, 0], + [180, 157, 141, 134, 130, 0, 0, 0, 0, 0, 0, 0], + [254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0], +]; + +static DCT_CAT_BASE: [u8; 6] = [5, 7, 11, 19, 35, 67]; +static COEFF_BANDS: [u8; 16] = [0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7]; + +#[rustfmt::skip] +static DC_QUANT: [i16; 128] = [ + 4, 5, 6, 7, 8, 9, 10, 10, + 11, 12, 13, 14, 15, 16, 17, 17, + 18, 19, 20, 20, 21, 21, 22, 22, + 23, 23, 24, 25, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 37, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, + 91, 93, 95, 96, 98, 100, 101, 102, + 104, 106, 108, 110, 112, 114, 116, 118, + 122, 124, 126, 128, 130, 132, 134, 136, + 138, 140, 143, 145, 148, 151, 154, 157, +]; + +#[rustfmt::skip] +static AC_QUANT: [i16; 128] = [ + 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 60, + 62, 64, 66, 68, 70, 72, 74, 76, + 78, 80, 82, 84, 86, 88, 90, 92, + 94, 96, 98, 100, 102, 104, 106, 108, + 110, 112, 114, 116, 119, 122, 125, 128, + 131, 134, 137, 140, 143, 146, 149, 152, + 155, 158, 161, 164, 167, 170, 173, 177, + 181, 185, 189, 193, 197, 201, 205, 209, + 213, 217, 221, 225, 229, 234, 239, 245, + 249, 254, 259, 264, 269, 274, 279, 284, +]; + +static ZIGZAG: [u8; 16] = [0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15]; + +/// All errors that can occur when attempting to parse a VP8 codec inside WebP +#[derive(Debug, Clone, Copy)] +enum DecoderError { + /// VP8's `[0x9D, 0x01, 0x2A]` magic not found or invalid + Vp8MagicInvalid([u8; 3]), + + /// Decoder initialisation wasn't provided with enough data + NotEnoughInitData, + + /// At time of writing, only the YUV colour-space encoded as `0` is specified + ColorSpaceInvalid(u8), + /// LUMA prediction mode was not recognised + LumaPredictionModeInvalid(i8), + /// Intra-prediction mode was not recognised + IntraPredictionModeInvalid(i8), + /// Chroma prediction mode was not recognised + ChromaPredictionModeInvalid(i8), +} + +impl fmt::Display for DecoderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DecoderError::Vp8MagicInvalid(tag) => f.write_fmt(format_args!( + "Invalid VP8 magic: [{:#04X?}, {:#04X?}, {:#04X?}]", + tag[0], tag[1], tag[2] + )), + + DecoderError::NotEnoughInitData => { + f.write_str("Expected at least 2 bytes of VP8 decoder initialization data") + } + + DecoderError::ColorSpaceInvalid(cs) => { + f.write_fmt(format_args!("Invalid non-YUV VP8 color space {}", cs)) + } + DecoderError::LumaPredictionModeInvalid(pm) => { + f.write_fmt(format_args!("Invalid VP8 LUMA prediction mode {}", pm)) + } + DecoderError::IntraPredictionModeInvalid(i) => { + f.write_fmt(format_args!("Invalid VP8 intra-prediction mode {}", i)) + } + DecoderError::ChromaPredictionModeInvalid(c) => { + f.write_fmt(format_args!("Invalid VP8 chroma prediction mode {}", c)) + } + } + } +} + +impl From<DecoderError> for ImageError { + fn from(e: DecoderError) -> ImageError { + ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e)) + } +} + +impl error::Error for DecoderError {} + +struct BoolReader { + buf: Vec<u8>, + index: usize, + + range: u32, + value: u32, + bit_count: u8, +} + +impl BoolReader { + pub(crate) fn new() -> BoolReader { + BoolReader { + buf: Vec::new(), + range: 0, + value: 0, + bit_count: 0, + index: 0, + } + } + + pub(crate) fn init(&mut self, buf: Vec<u8>) -> ImageResult<()> { + if buf.len() < 2 { + return Err(DecoderError::NotEnoughInitData.into()); + } + + self.buf = buf; + // Direct access safe, since length has just been validated. + self.value = (u32::from(self.buf[0]) << 8) | u32::from(self.buf[1]); + self.index = 2; + self.range = 255; + self.bit_count = 0; + + Ok(()) + } + + pub(crate) fn read_bool(&mut self, probability: u8) -> bool { + let split = 1 + (((self.range - 1) * u32::from(probability)) >> 8); + let bigsplit = split << 8; + + let retval = if self.value >= bigsplit { + self.range -= split; + self.value -= bigsplit; + true + } else { + self.range = split; + false + }; + + while self.range < 128 { + self.value <<= 1; + self.range <<= 1; + self.bit_count += 1; + + if self.bit_count == 8 { + self.bit_count = 0; + + // If no more bits are available, just don't do anything. + // This strategy is suggested in the reference implementation of RFC6386 (p.135) + if self.index < self.buf.len() { + self.value |= u32::from(self.buf[self.index]); + self.index += 1; + } + } + } + + retval + } + + pub(crate) fn read_literal(&mut self, n: u8) -> u8 { + let mut v = 0u8; + let mut n = n; + + while n != 0 { + v = (v << 1) + self.read_bool(128u8) as u8; + n -= 1; + } + + v + } + + pub(crate) fn read_magnitude_and_sign(&mut self, n: u8) -> i32 { + let magnitude = self.read_literal(n); + let sign = self.read_literal(1); + + if sign == 1 { + -i32::from(magnitude) + } else { + i32::from(magnitude) + } + } + + pub(crate) fn read_with_tree(&mut self, tree: &[i8], probs: &[Prob], start: isize) -> i8 { + let mut index = start; + + loop { + let a = self.read_bool(probs[index as usize >> 1]); + let b = index + a as isize; + index = tree[b as usize] as isize; + + if index <= 0 { + break; + } + } + + -index as i8 + } + + pub(crate) fn read_flag(&mut self) -> bool { + 0 != self.read_literal(1) + } +} + +#[derive(Default, Clone, Copy)] +struct MacroBlock { + bpred: [IntraMode; 16], + complexity: [u8; 9], + luma_mode: LumaMode, + chroma_mode: ChromaMode, + segmentid: u8, + coeffs_skipped: bool, +} + +/// A Representation of the last decoded video frame +#[derive(Default, Debug, Clone)] +pub struct Frame { + /// The width of the luma plane + pub width: u16, + + /// The height of the luma plane + pub height: u16, + + /// The luma plane of the frame + pub ybuf: Vec<u8>, + + /// The blue plane of the frame + pub ubuf: Vec<u8>, + + /// The red plane of the frame + pub vbuf: Vec<u8>, + + /// Indicates whether this frame is a keyframe + pub keyframe: bool, + + version: u8, + + /// Indicates whether this frame is intended for display + pub for_display: bool, + + // Section 9.2 + /// The pixel type of the frame as defined by Section 9.2 + /// of the VP8 Specification + pub pixel_type: u8, + + // Section 9.4 and 15 + filter_type: bool, //if true uses simple filter // if false uses normal filter + filter_level: u8, + sharpness_level: u8, +} + +impl Frame { + /// Chroma plane is half the size of the Luma plane + fn chroma_width(&self) -> u16 { + (self.width + 1) / 2 + } + + fn chroma_height(&self) -> u16 { + (self.height + 1) / 2 + } + + /// Fills an rgb buffer with the image + pub(crate) fn fill_rgb(&self, buf: &mut [u8]) { + for (index, rgb_chunk) in (0..self.ybuf.len()).zip(buf.chunks_exact_mut(3)) { + let y = index / self.width as usize; + let x = index % self.width as usize; + let chroma_index = self.chroma_width() as usize * (y / 2) + x / 2; + + Frame::fill_single( + self.ybuf[index], + self.ubuf[chroma_index], + self.vbuf[chroma_index], + rgb_chunk, + ); + } + } + + /// Fills an rgba buffer by skipping the alpha values + pub(crate) fn fill_rgba(&self, buf: &mut [u8]) { + for (index, rgba_chunk) in (0..self.ybuf.len()).zip(buf.chunks_exact_mut(4)) { + let y = index / self.width as usize; + let x = index % self.width as usize; + let chroma_index = self.chroma_width() as usize * (y / 2) + x / 2; + + Frame::fill_single( + self.ybuf[index], + self.ubuf[chroma_index], + self.vbuf[chroma_index], + rgba_chunk, + ); + } + } + + /// Conversion values from https://docs.microsoft.com/en-us/windows/win32/medfound/recommended-8-bit-yuv-formats-for-video-rendering#converting-8-bit-yuv-to-rgb888 + fn fill_single(y: u8, u: u8, v: u8, rgb: &mut [u8]) { + let c: i32 = i32::from(y) - 16; + let d: i32 = i32::from(u) - 128; + let e: i32 = i32::from(v) - 128; + + let r: u8 = clamp((298 * c + 409 * e + 128) >> 8, 0, 255) + .try_into() + .unwrap(); + let g: u8 = clamp((298 * c - 100 * d - 208 * e + 128) >> 8, 0, 255) + .try_into() + .unwrap(); + let b: u8 = clamp((298 * c + 516 * d + 128) >> 8, 0, 255) + .try_into() + .unwrap(); + + rgb[0] = r; + rgb[1] = g; + rgb[2] = b; + } + + /// Gets the buffer size + pub fn get_buf_size(&self) -> usize { + self.ybuf.len() * 3 + } +} + +#[derive(Clone, Copy, Default)] +struct Segment { + ydc: i16, + yac: i16, + + y2dc: i16, + y2ac: i16, + + uvdc: i16, + uvac: i16, + + delta_values: bool, + + quantizer_level: i8, + loopfilter_level: i8, +} + +/// VP8 Decoder +/// +/// Only decodes keyframes +pub struct Vp8Decoder<R> { + r: R, + b: BoolReader, + + mbwidth: u16, + mbheight: u16, + macroblocks: Vec<MacroBlock>, + + frame: Frame, + + segments_enabled: bool, + segments_update_map: bool, + segment: [Segment; MAX_SEGMENTS], + + ref_delta: [i32; 4], + mode_delta: [i32; 4], + + partitions: [BoolReader; 8], + num_partitions: u8, + + segment_tree_probs: [Prob; 3], + token_probs: Box<TokenProbTables>, + + // Section 9.10 + prob_intra: Prob, + + // Section 9.11 + prob_skip_false: Option<Prob>, + + top: Vec<MacroBlock>, + left: MacroBlock, + + top_border: Vec<u8>, + left_border: Vec<u8>, +} + +impl<R: Read> Vp8Decoder<R> { + /// Create a new decoder. + /// The reader must present a raw vp8 bitstream to the decoder + pub fn new(r: R) -> Vp8Decoder<R> { + let f = Frame::default(); + let s = Segment::default(); + let m = MacroBlock::default(); + + Vp8Decoder { + r, + b: BoolReader::new(), + + mbwidth: 0, + mbheight: 0, + macroblocks: Vec::new(), + + frame: f, + segments_enabled: false, + segments_update_map: false, + segment: [s; MAX_SEGMENTS], + + ref_delta: [0; 4], + mode_delta: [0; 4], + + partitions: [ + BoolReader::new(), + BoolReader::new(), + BoolReader::new(), + BoolReader::new(), + BoolReader::new(), + BoolReader::new(), + BoolReader::new(), + BoolReader::new(), + ], + + num_partitions: 1, + + segment_tree_probs: [255u8; 3], + token_probs: Box::new(COEFF_PROBS), + + // Section 9.10 + prob_intra: 0u8, + + // Section 9.11 + prob_skip_false: None, + + top: Vec::new(), + left: m, + + top_border: Vec::new(), + left_border: Vec::new(), + } + } + + fn update_token_probabilities(&mut self) { + for (i, is) in COEFF_UPDATE_PROBS.iter().enumerate() { + for (j, js) in is.iter().enumerate() { + for (k, ks) in js.iter().enumerate() { + for (t, prob) in ks.iter().enumerate().take(NUM_DCT_TOKENS - 1) { + if self.b.read_bool(*prob) { + let v = self.b.read_literal(8); + self.token_probs[i][j][k][t] = v; + } + } + } + } + } + } + + fn init_partitions(&mut self, n: usize) -> ImageResult<()> { + if n > 1 { + let mut sizes = vec![0; 3 * n - 3]; + self.r.read_exact(sizes.as_mut_slice())?; + + for (i, s) in sizes.chunks(3).enumerate() { + let size = { s } + .read_u24::<LittleEndian>() + .expect("Reading from &[u8] can't fail and the chunk is complete"); + + let mut buf = vec![0; size as usize]; + self.r.read_exact(buf.as_mut_slice())?; + + self.partitions[i].init(buf)?; + } + } + + let mut buf = Vec::new(); + self.r.read_to_end(&mut buf)?; + self.partitions[n - 1].init(buf)?; + + Ok(()) + } + + fn read_quantization_indices(&mut self) { + fn dc_quant(index: i32) -> i16 { + DC_QUANT[clamp(index, 0, 127) as usize] + } + + fn ac_quant(index: i32) -> i16 { + AC_QUANT[clamp(index, 0, 127) as usize] + } + + let yac_abs = self.b.read_literal(7); + let ydc_delta = if self.b.read_flag() { + self.b.read_magnitude_and_sign(4) + } else { + 0 + }; + + let y2dc_delta = if self.b.read_flag() { + self.b.read_magnitude_and_sign(4) + } else { + 0 + }; + + let y2ac_delta = if self.b.read_flag() { + self.b.read_magnitude_and_sign(4) + } else { + 0 + }; + + let uvdc_delta = if self.b.read_flag() { + self.b.read_magnitude_and_sign(4) + } else { + 0 + }; + + let uvac_delta = if self.b.read_flag() { + self.b.read_magnitude_and_sign(4) + } else { + 0 + }; + + let n = if self.segments_enabled { + MAX_SEGMENTS + } else { + 1 + }; + for i in 0usize..n { + let base = i32::from(if !self.segment[i].delta_values { + i16::from(self.segment[i].quantizer_level) + } else { + i16::from(self.segment[i].quantizer_level) + i16::from(yac_abs) + }); + + self.segment[i].ydc = dc_quant(base + ydc_delta); + self.segment[i].yac = ac_quant(base); + + self.segment[i].y2dc = dc_quant(base + y2dc_delta) * 2; + // The intermediate result (max`284*155`) can be larger than the `i16` range. + self.segment[i].y2ac = (i32::from(ac_quant(base + y2ac_delta)) * 155 / 100) as i16; + + self.segment[i].uvdc = dc_quant(base + uvdc_delta); + self.segment[i].uvac = ac_quant(base + uvac_delta); + + if self.segment[i].y2ac < 8 { + self.segment[i].y2ac = 8; + } + + if self.segment[i].uvdc > 132 { + self.segment[i].uvdc = 132; + } + } + } + + fn read_loop_filter_adjustments(&mut self) { + if self.b.read_flag() { + for i in 0usize..4 { + let ref_frame_delta_update_flag = self.b.read_flag(); + + self.ref_delta[i] = if ref_frame_delta_update_flag { + self.b.read_magnitude_and_sign(6) + } else { + 0i32 + }; + } + + for i in 0usize..4 { + let mb_mode_delta_update_flag = self.b.read_flag(); + + self.mode_delta[i] = if mb_mode_delta_update_flag { + self.b.read_magnitude_and_sign(6) + } else { + 0i32 + }; + } + } + } + + fn read_segment_updates(&mut self) { + // Section 9.3 + self.segments_update_map = self.b.read_flag(); + let update_segment_feature_data = self.b.read_flag(); + + if update_segment_feature_data { + let segment_feature_mode = self.b.read_flag(); + + for i in 0usize..MAX_SEGMENTS { + self.segment[i].delta_values = !segment_feature_mode; + } + + for i in 0usize..MAX_SEGMENTS { + let update = self.b.read_flag(); + + self.segment[i].quantizer_level = if update { + self.b.read_magnitude_and_sign(7) + } else { + 0i32 + } as i8; + } + + for i in 0usize..MAX_SEGMENTS { + let update = self.b.read_flag(); + + self.segment[i].loopfilter_level = if update { + self.b.read_magnitude_and_sign(6) + } else { + 0i32 + } as i8; + } + } + + if self.segments_update_map { + for i in 0usize..3 { + let update = self.b.read_flag(); + + self.segment_tree_probs[i] = if update { self.b.read_literal(8) } else { 255 }; + } + } + } + + fn read_frame_header(&mut self) -> ImageResult<()> { + let tag = self.r.read_u24::<LittleEndian>()?; + + self.frame.keyframe = tag & 1 == 0; + self.frame.version = ((tag >> 1) & 7) as u8; + self.frame.for_display = (tag >> 4) & 1 != 0; + + let first_partition_size = tag >> 5; + + if self.frame.keyframe { + let mut tag = [0u8; 3]; + self.r.read_exact(&mut tag)?; + + if tag != [0x9d, 0x01, 0x2a] { + return Err(DecoderError::Vp8MagicInvalid(tag).into()); + } + + let w = self.r.read_u16::<LittleEndian>()?; + let h = self.r.read_u16::<LittleEndian>()?; + + self.frame.width = w & 0x3FFF; + self.frame.height = h & 0x3FFF; + + self.top = init_top_macroblocks(self.frame.width as usize); + // Almost always the first macro block, except when non exists (i.e. `width == 0`) + self.left = self.top.get(0).cloned().unwrap_or_default(); + + self.mbwidth = (self.frame.width + 15) / 16; + self.mbheight = (self.frame.height + 15) / 16; + + self.frame.ybuf = vec![0u8; self.frame.width as usize * self.frame.height as usize]; + self.frame.ubuf = + vec![0u8; self.frame.chroma_width() as usize * self.frame.chroma_height() as usize]; + self.frame.vbuf = + vec![0u8; self.frame.chroma_width() as usize * self.frame.chroma_height() as usize]; + + self.top_border = vec![127u8; self.frame.width as usize + 4 + 16]; + self.left_border = vec![129u8; 1 + 16]; + } + + let mut buf = vec![0; first_partition_size as usize]; + self.r.read_exact(&mut buf)?; + + // initialise binary decoder + self.b.init(buf)?; + + if self.frame.keyframe { + let color_space = self.b.read_literal(1); + self.frame.pixel_type = self.b.read_literal(1); + + if color_space != 0 { + return Err(DecoderError::ColorSpaceInvalid(color_space).into()); + } + } + + self.segments_enabled = self.b.read_flag(); + if self.segments_enabled { + self.read_segment_updates(); + } + + self.frame.filter_type = self.b.read_flag(); + self.frame.filter_level = self.b.read_literal(6); + self.frame.sharpness_level = self.b.read_literal(3); + + let lf_adjust_enable = self.b.read_flag(); + if lf_adjust_enable { + self.read_loop_filter_adjustments(); + } + + self.num_partitions = (1usize << self.b.read_literal(2) as usize) as u8; + let num_partitions = self.num_partitions as usize; + self.init_partitions(num_partitions)?; + + self.read_quantization_indices(); + + if !self.frame.keyframe { + // 9.7 refresh golden frame and altref frame + // FIXME: support this? + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::WebP.into(), + UnsupportedErrorKind::GenericFeature("Non-keyframe frames".to_owned()), + ), + )); + } else { + // Refresh entropy probs ????? + let _ = self.b.read_literal(1); + } + + self.update_token_probabilities(); + + let mb_no_skip_coeff = self.b.read_literal(1); + self.prob_skip_false = if mb_no_skip_coeff == 1 { + Some(self.b.read_literal(8)) + } else { + None + }; + + if !self.frame.keyframe { + // 9.10 remaining frame data + self.prob_intra = 0; + + // FIXME: support this? + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::WebP.into(), + UnsupportedErrorKind::GenericFeature("Non-keyframe frames".to_owned()), + ), + )); + } else { + // Reset motion vectors + } + + Ok(()) + } + + fn read_macroblock_header(&mut self, mbx: usize) -> ImageResult<MacroBlock> { + let mut mb = MacroBlock::default(); + + if self.segments_enabled && self.segments_update_map { + mb.segmentid = self + .b + .read_with_tree(&SEGMENT_ID_TREE, &self.segment_tree_probs, 0) + as u8; + }; + + mb.coeffs_skipped = if self.prob_skip_false.is_some() { + self.b.read_bool(*self.prob_skip_false.as_ref().unwrap()) + } else { + false + }; + + let inter_predicted = if !self.frame.keyframe { + self.b.read_bool(self.prob_intra) + } else { + false + }; + + if inter_predicted { + return Err(ImageError::Unsupported( + UnsupportedError::from_format_and_kind( + ImageFormat::WebP.into(), + UnsupportedErrorKind::GenericFeature("VP8 inter-prediction".to_owned()), + ), + )); + } + + if self.frame.keyframe { + // intra prediction + let luma = self + .b + .read_with_tree(&KEYFRAME_YMODE_TREE, &KEYFRAME_YMODE_PROBS, 0); + mb.luma_mode = + LumaMode::from_i8(luma).ok_or(DecoderError::LumaPredictionModeInvalid(luma))?; + + match mb.luma_mode.into_intra() { + // `LumaMode::B` - This is predicted individually + None => { + for y in 0usize..4 { + for x in 0usize..4 { + let top = self.top[mbx].bpred[12 + x]; + let left = self.left.bpred[y]; + let intra = self.b.read_with_tree( + &KEYFRAME_BPRED_MODE_TREE, + &KEYFRAME_BPRED_MODE_PROBS[top as usize][left as usize], + 0, + ); + let bmode = IntraMode::from_i8(intra) + .ok_or(DecoderError::IntraPredictionModeInvalid(intra))?; + mb.bpred[x + y * 4] = bmode; + + self.top[mbx].bpred[12 + x] = bmode; + self.left.bpred[y] = bmode; + } + } + } + Some(mode) => { + for i in 0usize..4 { + mb.bpred[12 + i] = mode; + self.left.bpred[i] = mode; + } + } + } + + let chroma = self + .b + .read_with_tree(&KEYFRAME_UV_MODE_TREE, &KEYFRAME_UV_MODE_PROBS, 0); + mb.chroma_mode = ChromaMode::from_i8(chroma) + .ok_or(DecoderError::ChromaPredictionModeInvalid(chroma))?; + } + + self.top[mbx].chroma_mode = mb.chroma_mode; + self.top[mbx].luma_mode = mb.luma_mode; + self.top[mbx].bpred = mb.bpred; + + Ok(mb) + } + + fn intra_predict_luma(&mut self, mbx: usize, mby: usize, mb: &MacroBlock, resdata: &[i32]) { + let stride = 1usize + 16 + 4; + let w = self.frame.width as usize; + let mw = self.mbwidth as usize; + let mut ws = create_border_luma(mbx, mby, mw, &self.top_border, &self.left_border); + + match mb.luma_mode { + LumaMode::V => predict_vpred(&mut ws, 16, 1, 1, stride), + LumaMode::H => predict_hpred(&mut ws, 16, 1, 1, stride), + LumaMode::TM => predict_tmpred(&mut ws, 16, 1, 1, stride), + LumaMode::DC => predict_dcpred(&mut ws, 16, stride, mby != 0, mbx != 0), + LumaMode::B => predict_4x4(&mut ws, stride, &mb.bpred, resdata), + } + + if mb.luma_mode != LumaMode::B { + for y in 0usize..4 { + for x in 0usize..4 { + let i = x + y * 4; + // Create a reference to a [i32; 16] array for add_residue (slices of size 16 do not work). + let rb: &[i32; 16] = resdata[i * 16..][..16].try_into().unwrap(); + let y0 = 1 + y * 4; + let x0 = 1 + x * 4; + + add_residue(&mut ws, rb, y0, x0, stride); + } + } + } + + self.left_border[0] = ws[16]; + + for i in 0usize..16 { + self.top_border[mbx * 16 + i] = ws[16 * stride + 1 + i]; + self.left_border[i + 1] = ws[(i + 1) * stride + 16]; + } + + // Length is the remainder to the border, but maximally the current chunk. + let ylength = cmp::min(self.frame.height as usize - mby * 16, 16); + let xlength = cmp::min(self.frame.width as usize - mbx * 16, 16); + + for y in 0usize..ylength { + for x in 0usize..xlength { + self.frame.ybuf[(mby * 16 + y) * w + mbx * 16 + x] = ws[(1 + y) * stride + 1 + x]; + } + } + } + + fn intra_predict_chroma(&mut self, mbx: usize, mby: usize, mb: &MacroBlock, resdata: &[i32]) { + let stride = 1usize + 8; + + let w = self.frame.chroma_width() as usize; + + //8x8 with left top border of 1 + let mut uws = [0u8; (8 + 1) * (8 + 1)]; + let mut vws = [0u8; (8 + 1) * (8 + 1)]; + + let ylength = cmp::min(self.frame.chroma_height() as usize - mby * 8, 8); + let xlength = cmp::min(self.frame.chroma_width() as usize - mbx * 8, 8); + + //left border + for y in 0usize..8 { + let (uy, vy) = if mbx == 0 || y >= ylength { + (129, 129) + } else { + let index = (mby * 8 + y) * w + ((mbx - 1) * 8 + 7); + (self.frame.ubuf[index], self.frame.vbuf[index]) + }; + + uws[(y + 1) * stride] = uy; + vws[(y + 1) * stride] = vy; + } + //top border + for x in 0usize..8 { + let (ux, vx) = if mby == 0 || x >= xlength { + (127, 127) + } else { + let index = ((mby - 1) * 8 + 7) * w + (mbx * 8 + x); + (self.frame.ubuf[index], self.frame.vbuf[index]) + }; + + uws[x + 1] = ux; + vws[x + 1] = vx; + } + + //top left point + let (u1, v1) = if mby == 0 { + (127, 127) + } else if mbx == 0 { + (129, 129) + } else { + let index = ((mby - 1) * 8 + 7) * w + (mbx - 1) * 8 + 7; + if index >= self.frame.ubuf.len() { + (127, 127) + } else { + (self.frame.ubuf[index], self.frame.vbuf[index]) + } + }; + + uws[0] = u1; + vws[0] = v1; + + match mb.chroma_mode { + ChromaMode::DC => { + predict_dcpred(&mut uws, 8, stride, mby != 0, mbx != 0); + predict_dcpred(&mut vws, 8, stride, mby != 0, mbx != 0); + } + ChromaMode::V => { + predict_vpred(&mut uws, 8, 1, 1, stride); + predict_vpred(&mut vws, 8, 1, 1, stride); + } + ChromaMode::H => { + predict_hpred(&mut uws, 8, 1, 1, stride); + predict_hpred(&mut vws, 8, 1, 1, stride); + } + ChromaMode::TM => { + predict_tmpred(&mut uws, 8, 1, 1, stride); + predict_tmpred(&mut vws, 8, 1, 1, stride); + } + } + + for y in 0usize..2 { + for x in 0usize..2 { + let i = x + y * 2; + let urb: &[i32; 16] = resdata[16 * 16 + i * 16..][..16].try_into().unwrap(); + + let y0 = 1 + y * 4; + let x0 = 1 + x * 4; + add_residue(&mut uws, urb, y0, x0, stride); + + let vrb: &[i32; 16] = resdata[20 * 16 + i * 16..][..16].try_into().unwrap(); + + add_residue(&mut vws, vrb, y0, x0, stride); + } + } + + for y in 0usize..ylength { + for x in 0usize..xlength { + self.frame.ubuf[(mby * 8 + y) * w + mbx * 8 + x] = uws[(1 + y) * stride + 1 + x]; + self.frame.vbuf[(mby * 8 + y) * w + mbx * 8 + x] = vws[(1 + y) * stride + 1 + x]; + } + } + } + + fn read_coefficients( + &mut self, + block: &mut [i32], + p: usize, + plane: usize, + complexity: usize, + dcq: i16, + acq: i16, + ) -> bool { + let first = if plane == 0 { 1usize } else { 0usize }; + let probs = &self.token_probs[plane]; + let tree = &DCT_TOKEN_TREE; + + let mut complexity = complexity; + let mut has_coefficients = false; + let mut skip = false; + + for i in first..16usize { + let table = &probs[COEFF_BANDS[i] as usize][complexity]; + + let token = if !skip { + self.partitions[p].read_with_tree(tree, table, 0) + } else { + self.partitions[p].read_with_tree(tree, table, 2) + }; + + let mut abs_value = i32::from(match token { + DCT_EOB => break, + + DCT_0 => { + skip = true; + has_coefficients = true; + complexity = 0; + continue; + } + + literal @ DCT_1..=DCT_4 => i16::from(literal), + + category @ DCT_CAT1..=DCT_CAT6 => { + let t = PROB_DCT_CAT[(category - DCT_CAT1) as usize]; + + let mut extra = 0i16; + let mut j = 0; + + while t[j] > 0 { + extra = extra + extra + self.partitions[p].read_bool(t[j]) as i16; + j += 1; + } + + i16::from(DCT_CAT_BASE[(category - DCT_CAT1) as usize]) + extra + } + + c => panic!("unknown token: {}", c), + }); + + skip = false; + + complexity = if abs_value == 0 { + 0 + } else if abs_value == 1 { + 1 + } else { + 2 + }; + + if self.partitions[p].read_bool(128) { + abs_value = -abs_value; + } + + block[ZIGZAG[i] as usize] = + abs_value * i32::from(if ZIGZAG[i] > 0 { acq } else { dcq }); + + has_coefficients = true; + } + + has_coefficients + } + + fn read_residual_data(&mut self, mb: &MacroBlock, mbx: usize, p: usize) -> [i32; 384] { + let sindex = mb.segmentid as usize; + let mut blocks = [0i32; 384]; + let mut plane = if mb.luma_mode == LumaMode::B { 3 } else { 1 }; + + if plane == 1 { + let complexity = self.top[mbx].complexity[0] + self.left.complexity[0]; + let mut block = [0i32; 16]; + let dcq = self.segment[sindex].y2dc; + let acq = self.segment[sindex].y2ac; + let n = self.read_coefficients(&mut block, p, plane, complexity as usize, dcq, acq); + + self.left.complexity[0] = if n { 1 } else { 0 }; + self.top[mbx].complexity[0] = if n { 1 } else { 0 }; + + transform::iwht4x4(&mut block); + + for k in 0usize..16 { + blocks[16 * k] = block[k]; + } + + plane = 0; + } + + for y in 0usize..4 { + let mut left = self.left.complexity[y + 1]; + for x in 0usize..4 { + let i = x + y * 4; + let block = &mut blocks[i * 16..i * 16 + 16]; + + let complexity = self.top[mbx].complexity[x + 1] + left; + let dcq = self.segment[sindex].ydc; + let acq = self.segment[sindex].yac; + + let n = self.read_coefficients(block, p, plane, complexity as usize, dcq, acq); + + if block[0] != 0 || n { + transform::idct4x4(block); + } + + left = if n { 1 } else { 0 }; + self.top[mbx].complexity[x + 1] = if n { 1 } else { 0 }; + } + + self.left.complexity[y + 1] = left; + } + + plane = 2; + + for &j in &[5usize, 7usize] { + for y in 0usize..2 { + let mut left = self.left.complexity[y + j]; + + for x in 0usize..2 { + let i = x + y * 2 + if j == 5 { 16 } else { 20 }; + let block = &mut blocks[i * 16..i * 16 + 16]; + + let complexity = self.top[mbx].complexity[x + j] + left; + let dcq = self.segment[sindex].uvdc; + let acq = self.segment[sindex].uvac; + + let n = self.read_coefficients(block, p, plane, complexity as usize, dcq, acq); + if block[0] != 0 || n { + transform::idct4x4(block); + } + + left = if n { 1 } else { 0 }; + self.top[mbx].complexity[x + j] = if n { 1 } else { 0 }; + } + + self.left.complexity[y + j] = left; + } + } + + blocks + } + + /// Does loop filtering on the macroblock + fn loop_filter(&mut self, mbx: usize, mby: usize, mb: &MacroBlock) { + let luma_w = self.frame.width as usize; + let luma_h = self.frame.height as usize; + let chroma_w = self.frame.chroma_width() as usize; + let chroma_h = self.frame.chroma_height() as usize; + + let (filter_level, interior_limit, hev_threshold) = self.calculate_filter_parameters(mb); + + if filter_level > 0 { + let mbedge_limit = (filter_level + 2) * 2 + interior_limit; + let sub_bedge_limit = (filter_level * 2) + interior_limit; + + let luma_ylength = cmp::min(luma_h - 16 * mby, 16); + let luma_xlength = cmp::min(luma_w - 16 * mbx, 16); + + let chroma_ylength = cmp::min(chroma_h - 8 * mby, 8); + let chroma_xlength = cmp::min(chroma_w - 8 * mbx, 8); + + //filter across left of macroblock + if mbx > 0 { + //simple loop filtering + if self.frame.filter_type { + if luma_xlength >= 2 { + for y in 0usize..luma_ylength { + let y0 = mby * 16 + y; + let x0 = mbx * 16; + + loop_filter::simple_segment( + mbedge_limit, + &mut self.frame.ybuf[..], + y0 * luma_w + x0, + 1, + ); + } + } + } else { + if luma_xlength >= 4 { + for y in 0usize..luma_ylength { + let y0 = mby * 16 + y; + let x0 = mbx * 16; + + loop_filter::macroblock_filter( + hev_threshold, + interior_limit, + mbedge_limit, + &mut self.frame.ybuf[..], + y0 * luma_w + x0, + 1, + ); + } + } + + if chroma_xlength >= 4 { + for y in 0usize..chroma_ylength { + let y0 = mby * 8 + y; + let x0 = mbx * 8; + + loop_filter::macroblock_filter( + hev_threshold, + interior_limit, + mbedge_limit, + &mut self.frame.ubuf[..], + y0 * chroma_w + x0, + 1, + ); + loop_filter::macroblock_filter( + hev_threshold, + interior_limit, + mbedge_limit, + &mut self.frame.vbuf[..], + y0 * chroma_w + x0, + 1, + ); + } + } + } + } + + //filter across vertical subblocks in macroblock + if mb.luma_mode == LumaMode::B || !mb.coeffs_skipped { + if self.frame.filter_type { + for x in (4usize..luma_xlength - 1).step_by(4) { + for y in 0..luma_ylength { + let y0 = mby * 16 + y; + let x0 = mbx * 16 + x; + + loop_filter::simple_segment( + sub_bedge_limit, + &mut self.frame.ybuf[..], + y0 * luma_w + x0, + 1, + ); + } + } + } else { + if luma_xlength > 3 { + for x in (4usize..luma_xlength - 3).step_by(4) { + for y in 0..luma_ylength { + let y0 = mby * 16 + y; + let x0 = mbx * 16 + x; + + loop_filter::subblock_filter( + hev_threshold, + interior_limit, + sub_bedge_limit, + &mut self.frame.ybuf[..], + y0 * luma_w + x0, + 1, + ); + } + } + } + + if chroma_xlength == 8 { + for y in 0usize..chroma_ylength { + let y0 = mby * 8 + y; + let x0 = mbx * 8 + 4; + + loop_filter::subblock_filter( + hev_threshold, + interior_limit, + sub_bedge_limit, + &mut self.frame.ubuf[..], + y0 * chroma_w + x0, + 1, + ); + + loop_filter::subblock_filter( + hev_threshold, + interior_limit, + sub_bedge_limit, + &mut self.frame.vbuf[..], + y0 * chroma_w + x0, + 1, + ); + } + } + } + } + + //filter across top of macroblock + if mby > 0 { + if self.frame.filter_type { + if luma_ylength >= 2 { + for x in 0usize..luma_xlength { + let y0 = mby * 16; + let x0 = mbx * 16 + x; + + loop_filter::simple_segment( + mbedge_limit, + &mut self.frame.ybuf[..], + y0 * luma_w + x0, + luma_w, + ); + } + } + } else { + //if bottom macroblock, can only filter if there is 3 pixels below + if luma_ylength >= 4 { + for x in 0usize..luma_xlength { + let y0 = mby * 16; + let x0 = mbx * 16 + x; + + loop_filter::macroblock_filter( + hev_threshold, + interior_limit, + mbedge_limit, + &mut self.frame.ybuf[..], + y0 * luma_w + x0, + luma_w, + ); + } + } + + if chroma_ylength >= 4 { + for x in 0usize..chroma_xlength { + let y0 = mby * 8; + let x0 = mbx * 8 + x; + + loop_filter::macroblock_filter( + hev_threshold, + interior_limit, + mbedge_limit, + &mut self.frame.ubuf[..], + y0 * chroma_w + x0, + chroma_w, + ); + loop_filter::macroblock_filter( + hev_threshold, + interior_limit, + mbedge_limit, + &mut self.frame.vbuf[..], + y0 * chroma_w + x0, + chroma_w, + ); + } + } + } + } + + //filter across horizontal subblock edges within the macroblock + if mb.luma_mode == LumaMode::B || !mb.coeffs_skipped { + if self.frame.filter_type { + for y in (4usize..luma_ylength - 1).step_by(4) { + for x in 0..luma_xlength { + let y0 = mby * 16 + y; + let x0 = mbx * 16 + x; + + loop_filter::simple_segment( + sub_bedge_limit, + &mut self.frame.ybuf[..], + y0 * luma_w + x0, + luma_w, + ); + } + } + } else { + if luma_ylength > 3 { + for y in (4usize..luma_ylength - 3).step_by(4) { + for x in 0..luma_xlength { + let y0 = mby * 16 + y; + let x0 = mbx * 16 + x; + + loop_filter::subblock_filter( + hev_threshold, + interior_limit, + sub_bedge_limit, + &mut self.frame.ybuf[..], + y0 * luma_w + x0, + luma_w, + ); + } + } + } + + if chroma_ylength == 8 { + for x in 0..chroma_xlength { + let y0 = mby * 8 + 4; + let x0 = mbx * 8 + x; + + loop_filter::subblock_filter( + hev_threshold, + interior_limit, + sub_bedge_limit, + &mut self.frame.ubuf[..], + y0 * chroma_w + x0, + chroma_w, + ); + + loop_filter::subblock_filter( + hev_threshold, + interior_limit, + sub_bedge_limit, + &mut self.frame.vbuf[..], + y0 * chroma_w + x0, + chroma_w, + ); + } + } + } + } + } + } + + //return values are the filter level, interior limit and hev threshold + fn calculate_filter_parameters(&self, macroblock: &MacroBlock) -> (u8, u8, u8) { + let segment = self.segment[macroblock.segmentid as usize]; + let mut filter_level = self.frame.filter_level as i32; + + if self.segments_enabled { + if segment.delta_values { + filter_level += i32::from(segment.loopfilter_level); + } else { + filter_level = i32::from(segment.loopfilter_level); + } + } + + filter_level = clamp(filter_level, 0, 63); + + if macroblock.luma_mode == LumaMode::B { + filter_level += self.mode_delta[0]; + } + + let filter_level = clamp(filter_level, 0, 63) as u8; + + //interior limit + let mut interior_limit = filter_level; + + if self.frame.sharpness_level > 0 { + interior_limit >>= if self.frame.sharpness_level > 4 { 2 } else { 1 }; + + if interior_limit > 9 - self.frame.sharpness_level { + interior_limit = 9 - self.frame.sharpness_level; + } + } + + if interior_limit == 0 { + interior_limit = 1; + } + + //high edge variance threshold + let mut hev_threshold = 0; + + #[allow(clippy::collapsible_else_if)] + if self.frame.keyframe { + if filter_level >= 40 { + hev_threshold = 2; + } else { + hev_threshold = 1; + } + } else { + if filter_level >= 40 { + hev_threshold = 3; + } else if filter_level >= 20 { + hev_threshold = 2; + } else if filter_level >= 15 { + hev_threshold = 1; + } + } + + (filter_level, interior_limit, hev_threshold) + } + + /// Decodes the current frame + pub fn decode_frame(&mut self) -> ImageResult<&Frame> { + self.read_frame_header()?; + + for mby in 0..self.mbheight as usize { + let p = mby % self.num_partitions as usize; + self.left = MacroBlock::default(); + + for mbx in 0..self.mbwidth as usize { + let mb = self.read_macroblock_header(mbx)?; + let blocks = if !mb.coeffs_skipped { + self.read_residual_data(&mb, mbx, p) + } else { + if mb.luma_mode != LumaMode::B { + self.left.complexity[0] = 0; + self.top[mbx].complexity[0] = 0; + } + + for i in 1usize..9 { + self.left.complexity[i] = 0; + self.top[mbx].complexity[i] = 0; + } + + [0i32; 384] + }; + + self.intra_predict_luma(mbx, mby, &mb, &blocks); + self.intra_predict_chroma(mbx, mby, &mb, &blocks); + + self.macroblocks.push(mb); + } + + self.left_border = vec![129u8; 1 + 16]; + } + + //do loop filtering + for mby in 0..self.mbheight as usize { + for mbx in 0..self.mbwidth as usize { + let mb = self.macroblocks[mby * self.mbwidth as usize + mbx]; + self.loop_filter(mbx, mby, &mb); + } + } + + Ok(&self.frame) + } +} + +impl LumaMode { + fn from_i8(val: i8) -> Option<Self> { + Some(match val { + DC_PRED => LumaMode::DC, + V_PRED => LumaMode::V, + H_PRED => LumaMode::H, + TM_PRED => LumaMode::TM, + B_PRED => LumaMode::B, + _ => return None, + }) + } + + fn into_intra(self) -> Option<IntraMode> { + Some(match self { + LumaMode::DC => IntraMode::DC, + LumaMode::V => IntraMode::VE, + LumaMode::H => IntraMode::HE, + LumaMode::TM => IntraMode::TM, + LumaMode::B => return None, + }) + } +} + +impl Default for LumaMode { + fn default() -> Self { + LumaMode::DC + } +} + +impl ChromaMode { + fn from_i8(val: i8) -> Option<Self> { + Some(match val { + DC_PRED => ChromaMode::DC, + V_PRED => ChromaMode::V, + H_PRED => ChromaMode::H, + TM_PRED => ChromaMode::TM, + _ => return None, + }) + } +} + +impl Default for ChromaMode { + fn default() -> Self { + ChromaMode::DC + } +} + +impl IntraMode { + fn from_i8(val: i8) -> Option<Self> { + Some(match val { + B_DC_PRED => IntraMode::DC, + B_TM_PRED => IntraMode::TM, + B_VE_PRED => IntraMode::VE, + B_HE_PRED => IntraMode::HE, + B_LD_PRED => IntraMode::LD, + B_RD_PRED => IntraMode::RD, + B_VR_PRED => IntraMode::VR, + B_VL_PRED => IntraMode::VL, + B_HD_PRED => IntraMode::HD, + B_HU_PRED => IntraMode::HU, + _ => return None, + }) + } +} + +impl Default for IntraMode { + fn default() -> Self { + IntraMode::DC + } +} + +fn init_top_macroblocks(width: usize) -> Vec<MacroBlock> { + let mb_width = (width + 15) / 16; + + let mb = MacroBlock { + // Section 11.3 #3 + bpred: [IntraMode::DC; 16], + luma_mode: LumaMode::DC, + ..MacroBlock::default() + }; + + vec![mb; mb_width] +} + +fn create_border_luma(mbx: usize, mby: usize, mbw: usize, top: &[u8], left: &[u8]) -> [u8; 357] { + let stride = 1usize + 16 + 4; + let mut ws = [0u8; (1 + 16) * (1 + 16 + 4)]; + + // A + { + let above = &mut ws[1..stride]; + if mby == 0 { + for above in above.iter_mut() { + *above = 127; + } + } else { + for i in 0usize..16 { + above[i] = top[mbx * 16 + i]; + } + + if mbx == mbw - 1 { + for above in above.iter_mut().skip(16) { + *above = top[mbx * 16 + 15]; + } + } else { + for i in 16usize..above.len() { + above[i] = top[mbx * 16 + i]; + } + } + } + } + + for i in 17usize..stride { + ws[4 * stride + i] = ws[i]; + ws[8 * stride + i] = ws[i]; + ws[12 * stride + i] = ws[i]; + } + + // L + if mbx == 0 { + for i in 0usize..16 { + ws[(i + 1) * stride] = 129; + } + } else { + for i in 0usize..16 { + ws[(i + 1) * stride] = left[i + 1]; + } + } + + // P + ws[0] = if mby == 0 { + 127 + } else if mbx == 0 { + 129 + } else { + left[0] + }; + + ws +} + +fn avg3(left: u8, this: u8, right: u8) -> u8 { + let avg = (u16::from(left) + 2 * u16::from(this) + u16::from(right) + 2) >> 2; + avg as u8 +} + +fn avg2(this: u8, right: u8) -> u8 { + let avg = (u16::from(this) + u16::from(right) + 1) >> 1; + avg as u8 +} + +// Only 16 elements from rblock are used to add residue, so it is restricted to 16 elements +// to enable SIMD and other optimizations. +fn add_residue(pblock: &mut [u8], rblock: &[i32; 16], y0: usize, x0: usize, stride: usize) { + let mut pos = y0 * stride + x0; + for row in rblock.chunks(4) { + for (p, &a) in pblock[pos..pos + 4].iter_mut().zip(row.iter()) { + *p = clamp(a + i32::from(*p), 0, 255) as u8; + } + pos += stride; + } +} + +fn predict_4x4(ws: &mut [u8], stride: usize, modes: &[IntraMode], resdata: &[i32]) { + for sby in 0usize..4 { + for sbx in 0usize..4 { + let i = sbx + sby * 4; + let y0 = sby * 4 + 1; + let x0 = sbx * 4 + 1; + + match modes[i] { + IntraMode::TM => predict_tmpred(ws, 4, x0, y0, stride), + IntraMode::VE => predict_bvepred(ws, x0, y0, stride), + IntraMode::HE => predict_bhepred(ws, x0, y0, stride), + IntraMode::DC => predict_bdcpred(ws, x0, y0, stride), + IntraMode::LD => predict_bldpred(ws, x0, y0, stride), + IntraMode::RD => predict_brdpred(ws, x0, y0, stride), + IntraMode::VR => predict_bvrpred(ws, x0, y0, stride), + IntraMode::VL => predict_bvlpred(ws, x0, y0, stride), + IntraMode::HD => predict_bhdpred(ws, x0, y0, stride), + IntraMode::HU => predict_bhupred(ws, x0, y0, stride), + } + + let rb: &[i32; 16] = resdata[i * 16..][..16].try_into().unwrap(); + add_residue(ws, rb, y0, x0, stride); + } + } +} + +fn predict_vpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) { + for y in 0usize..size { + for x in 0usize..size { + a[(x + x0) + stride * (y + y0)] = a[(x + x0) + stride * (y0 + y - 1)]; + } + } +} + +fn predict_hpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) { + for y in 0usize..size { + for x in 0usize..size { + a[(x + x0) + stride * (y + y0)] = a[(x + x0 - 1) + stride * (y0 + y)]; + } + } +} + +fn predict_dcpred(a: &mut [u8], size: usize, stride: usize, above: bool, left: bool) { + let mut sum = 0; + let mut shf = if size == 8 { 2 } else { 3 }; + + if left { + for y in 0usize..size { + sum += u32::from(a[(y + 1) * stride]); + } + + shf += 1; + } + + if above { + for x in 0usize..size { + sum += u32::from(a[x + 1]); + } + + shf += 1; + } + + let dcval = if !left && !above { + 128 + } else { + (sum + (1 << (shf - 1))) >> shf + }; + + for y in 0usize..size { + for x in 0usize..size { + a[(x + 1) + stride * (y + 1)] = dcval as u8; + } + } +} + +fn predict_tmpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) { + for y in 0usize..size { + for x in 0usize..size { + let pred = i32::from(a[(y0 + y) * stride + x0 - 1]) + + i32::from(a[(y0 - 1) * stride + x0 + x]) + - i32::from(a[(y0 - 1) * stride + x0 - 1]); + + a[(x + x0) + stride * (y + y0)] = clamp(pred, 0, 255) as u8; + } + } +} + +fn predict_bdcpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { + let mut v = 4; + for i in 0usize..4 { + v += u32::from(a[(y0 + i) * stride + x0 - 1]) + u32::from(a[(y0 - 1) * stride + x0 + i]); + } + + v >>= 3; + for y in 0usize..4 { + for x in 0usize..4 { + a[x + x0 + stride * (y + y0)] = v as u8; + } + } +} + +fn topleft_pixel(a: &[u8], x0: usize, y0: usize, stride: usize) -> u8 { + a[(y0 - 1) * stride + x0 - 1] +} + +fn top_pixels(a: &[u8], x0: usize, y0: usize, stride: usize) -> (u8, u8, u8, u8, u8, u8, u8, u8) { + let pos = (y0 - 1) * stride + x0; + let a_slice = &a[pos..pos + 8]; + let a0 = a_slice[0]; + let a1 = a_slice[1]; + let a2 = a_slice[2]; + let a3 = a_slice[3]; + let a4 = a_slice[4]; + let a5 = a_slice[5]; + let a6 = a_slice[6]; + let a7 = a_slice[7]; + + (a0, a1, a2, a3, a4, a5, a6, a7) +} + +fn left_pixels(a: &[u8], x0: usize, y0: usize, stride: usize) -> (u8, u8, u8, u8) { + let l0 = a[y0 * stride + x0 - 1]; + let l1 = a[(y0 + 1) * stride + x0 - 1]; + let l2 = a[(y0 + 2) * stride + x0 - 1]; + let l3 = a[(y0 + 3) * stride + x0 - 1]; + + (l0, l1, l2, l3) +} + +fn edge_pixels( + a: &[u8], + x0: usize, + y0: usize, + stride: usize, +) -> (u8, u8, u8, u8, u8, u8, u8, u8, u8) { + let pos = (y0 - 1) * stride + x0 - 1; + let a_slice = &a[pos..=pos + 4]; + let e0 = a[pos + 4 * stride]; + let e1 = a[pos + 3 * stride]; + let e2 = a[pos + 2 * stride]; + let e3 = a[pos + stride]; + let e4 = a_slice[0]; + let e5 = a_slice[1]; + let e6 = a_slice[2]; + let e7 = a_slice[3]; + let e8 = a_slice[4]; + + (e0, e1, e2, e3, e4, e5, e6, e7, e8) +} + +fn predict_bvepred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { + let p = topleft_pixel(a, x0, y0, stride); + let (a0, a1, a2, a3, a4, _, _, _) = top_pixels(a, x0, y0, stride); + let avg_1 = avg3(p, a0, a1); + let avg_2 = avg3(a0, a1, a2); + let avg_3 = avg3(a1, a2, a3); + let avg_4 = avg3(a2, a3, a4); + + let avg = [avg_1, avg_2, avg_3, avg_4]; + + let mut pos = y0 * stride + x0; + for _ in 0..4 { + a[pos..=pos + 3].copy_from_slice(&avg); + pos += stride; + } +} + +fn predict_bhepred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { + let p = topleft_pixel(a, x0, y0, stride); + let (l0, l1, l2, l3) = left_pixels(a, x0, y0, stride); + + let avgs = [ + avg3(p, l0, l1), + avg3(l0, l1, l2), + avg3(l1, l2, l3), + avg3(l2, l3, l3), + ]; + + let mut pos = y0 * stride + x0; + for &avg in avgs.iter() { + for a_p in a[pos..=pos + 3].iter_mut() { + *a_p = avg; + } + pos += stride; + } +} + +fn predict_bldpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { + let (a0, a1, a2, a3, a4, a5, a6, a7) = top_pixels(a, x0, y0, stride); + + let avgs = [ + avg3(a0, a1, a2), + avg3(a1, a2, a3), + avg3(a2, a3, a4), + avg3(a3, a4, a5), + avg3(a4, a5, a6), + avg3(a5, a6, a7), + avg3(a6, a7, a7), + ]; + + let mut pos = y0 * stride + x0; + + for i in 0..4 { + a[pos..=pos + 3].copy_from_slice(&avgs[i..=i + 3]); + pos += stride; + } +} + +fn predict_brdpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { + let (e0, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(a, x0, y0, stride); + + let avgs = [ + avg3(e0, e1, e2), + avg3(e1, e2, e3), + avg3(e2, e3, e4), + avg3(e3, e4, e5), + avg3(e4, e5, e6), + avg3(e5, e6, e7), + avg3(e6, e7, e8), + ]; + let mut pos = y0 * stride + x0; + + for i in 0..4 { + a[pos..=pos + 3].copy_from_slice(&avgs[3 - i..7 - i]); + pos += stride; + } +} + +fn predict_bvrpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { + let (_, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(a, x0, y0, stride); + + a[(y0 + 3) * stride + x0] = avg3(e1, e2, e3); + a[(y0 + 2) * stride + x0] = avg3(e2, e3, e4); + a[(y0 + 3) * stride + x0 + 1] = avg3(e3, e4, e5); + a[(y0 + 1) * stride + x0] = avg3(e3, e4, e5); + a[(y0 + 2) * stride + x0 + 1] = avg2(e4, e5); + a[y0 * stride + x0] = avg2(e4, e5); + a[(y0 + 3) * stride + x0 + 2] = avg3(e4, e5, e6); + a[(y0 + 1) * stride + x0 + 1] = avg3(e4, e5, e6); + a[(y0 + 2) * stride + x0 + 2] = avg2(e5, e6); + a[y0 * stride + x0 + 1] = avg2(e5, e6); + a[(y0 + 3) * stride + x0 + 3] = avg3(e5, e6, e7); + a[(y0 + 1) * stride + x0 + 2] = avg3(e5, e6, e7); + a[(y0 + 2) * stride + x0 + 3] = avg2(e6, e7); + a[y0 * stride + x0 + 2] = avg2(e6, e7); + a[(y0 + 1) * stride + x0 + 3] = avg3(e6, e7, e8); + a[y0 * stride + x0 + 3] = avg2(e7, e8); +} + +fn predict_bvlpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { + let (a0, a1, a2, a3, a4, a5, a6, a7) = top_pixels(a, x0, y0, stride); + + a[y0 * stride + x0] = avg2(a0, a1); + a[(y0 + 1) * stride + x0] = avg3(a0, a1, a2); + a[(y0 + 2) * stride + x0] = avg2(a1, a2); + a[y0 * stride + x0 + 1] = avg2(a1, a2); + a[(y0 + 1) * stride + x0 + 1] = avg3(a1, a2, a3); + a[(y0 + 3) * stride + x0] = avg3(a1, a2, a3); + a[(y0 + 2) * stride + x0 + 1] = avg2(a2, a3); + a[y0 * stride + x0 + 2] = avg2(a2, a3); + a[(y0 + 3) * stride + x0 + 1] = avg3(a2, a3, a4); + a[(y0 + 1) * stride + x0 + 2] = avg3(a2, a3, a4); + a[(y0 + 2) * stride + x0 + 2] = avg2(a3, a4); + a[y0 * stride + x0 + 3] = avg2(a3, a4); + a[(y0 + 3) * stride + x0 + 2] = avg3(a3, a4, a5); + a[(y0 + 1) * stride + x0 + 3] = avg3(a3, a4, a5); + a[(y0 + 2) * stride + x0 + 3] = avg3(a4, a5, a6); + a[(y0 + 3) * stride + x0 + 3] = avg3(a5, a6, a7); +} + +fn predict_bhdpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { + let (e0, e1, e2, e3, e4, e5, e6, e7, _) = edge_pixels(a, x0, y0, stride); + + a[(y0 + 3) * stride + x0] = avg2(e0, e1); + a[(y0 + 3) * stride + x0 + 1] = avg3(e0, e1, e2); + a[(y0 + 2) * stride + x0] = avg2(e1, e2); + a[(y0 + 3) * stride + x0 + 2] = avg2(e1, e2); + a[(y0 + 2) * stride + x0 + 1] = avg3(e1, e2, e3); + a[(y0 + 3) * stride + x0 + 3] = avg3(e1, e2, e3); + a[(y0 + 2) * stride + x0 + 2] = avg2(e2, e3); + a[(y0 + 1) * stride + x0] = avg2(e2, e3); + a[(y0 + 2) * stride + x0 + 3] = avg3(e2, e3, e4); + a[(y0 + 1) * stride + x0 + 1] = avg3(e2, e3, e4); + a[(y0 + 1) * stride + x0 + 2] = avg2(e3, e4); + a[y0 * stride + x0] = avg2(e3, e4); + a[(y0 + 1) * stride + x0 + 3] = avg3(e3, e4, e5); + a[y0 * stride + x0 + 1] = avg3(e3, e4, e5); + a[y0 * stride + x0 + 2] = avg3(e4, e5, e6); + a[y0 * stride + x0 + 3] = avg3(e5, e6, e7); +} + +fn predict_bhupred(a: &mut [u8], x0: usize, y0: usize, stride: usize) { + let (l0, l1, l2, l3) = left_pixels(a, x0, y0, stride); + + a[y0 * stride + x0] = avg2(l0, l1); + a[y0 * stride + x0 + 1] = avg3(l0, l1, l2); + a[y0 * stride + x0 + 2] = avg2(l1, l2); + a[(y0 + 1) * stride + x0] = avg2(l1, l2); + a[y0 * stride + x0 + 3] = avg3(l1, l2, l3); + a[(y0 + 1) * stride + x0 + 1] = avg3(l1, l2, l3); + a[(y0 + 1) * stride + x0 + 2] = avg2(l2, l3); + a[(y0 + 2) * stride + x0] = avg2(l2, l3); + a[(y0 + 1) * stride + x0 + 3] = avg3(l2, l3, l3); + a[(y0 + 2) * stride + x0 + 1] = avg3(l2, l3, l3); + a[(y0 + 2) * stride + x0 + 2] = l3; + a[(y0 + 2) * stride + x0 + 3] = l3; + a[(y0 + 3) * stride + x0] = l3; + a[(y0 + 3) * stride + x0 + 1] = l3; + a[(y0 + 3) * stride + x0 + 2] = l3; + a[(y0 + 3) * stride + x0 + 3] = l3; +} + +#[cfg(test)] +mod test { + + #[cfg(feature = "benchmarks")] + extern crate test; + use super::{ + add_residue, avg2, avg3, edge_pixels, predict_bhepred, predict_bldpred, predict_brdpred, + predict_bvepred, top_pixels, + }; + #[cfg(feature = "benchmarks")] + use super::{predict_4x4, IntraMode}; + #[cfg(feature = "benchmarks")] + use test::{black_box, Bencher}; + + #[cfg(feature = "benchmarks")] + const W: usize = 256; + #[cfg(feature = "benchmarks")] + const H: usize = 256; + + #[cfg(feature = "benchmarks")] + fn make_sample_image() -> Vec<u8> { + let mut v = Vec::with_capacity((W * H * 4) as usize); + for c in 0u8..=255 { + for k in 0u8..=255 { + v.push(c); + v.push(0); + v.push(0); + v.push(k); + } + } + v + } + + #[cfg(feature = "benchmarks")] + #[bench] + fn bench_predict_4x4(b: &mut Bencher) { + let mut v = black_box(make_sample_image()); + + let res_data = vec![1i32; W * H * 4]; + let modes = [ + IntraMode::TM, + IntraMode::VE, + IntraMode::HE, + IntraMode::DC, + IntraMode::LD, + IntraMode::RD, + IntraMode::VR, + IntraMode::VL, + IntraMode::HD, + IntraMode::HU, + IntraMode::TM, + IntraMode::VE, + IntraMode::HE, + IntraMode::DC, + IntraMode::LD, + IntraMode::RD, + ]; + + b.iter(|| { + black_box(predict_4x4(&mut v, W * 2, &modes, &res_data)); + }); + } + + #[cfg(feature = "benchmarks")] + #[bench] + fn bench_predict_bvepred(b: &mut Bencher) { + let mut v = make_sample_image(); + + b.iter(|| { + predict_bvepred(black_box(&mut v), 5, 5, W * 2); + }); + } + + #[cfg(feature = "benchmarks")] + #[bench] + fn bench_predict_bldpred(b: &mut Bencher) { + let mut v = black_box(make_sample_image()); + + b.iter(|| { + black_box(predict_bldpred(black_box(&mut v), 5, 5, W * 2)); + }); + } + + #[cfg(feature = "benchmarks")] + #[bench] + fn bench_predict_brdpred(b: &mut Bencher) { + let mut v = black_box(make_sample_image()); + + b.iter(|| { + black_box(predict_brdpred(black_box(&mut v), 5, 5, W * 2)); + }); + } + + #[cfg(feature = "benchmarks")] + #[bench] + fn bench_predict_bhepred(b: &mut Bencher) { + let mut v = black_box(make_sample_image()); + + b.iter(|| { + black_box(predict_bhepred(black_box(&mut v), 5, 5, W * 2)); + }); + } + + #[cfg(feature = "benchmarks")] + #[bench] + fn bench_top_pixels(b: &mut Bencher) { + let v = black_box(make_sample_image()); + + b.iter(|| { + black_box(top_pixels(black_box(&v), 5, 5, W * 2)); + }); + } + + #[cfg(feature = "benchmarks")] + #[bench] + fn bench_edge_pixels(b: &mut Bencher) { + let v = black_box(make_sample_image()); + + b.iter(|| { + black_box(edge_pixels(black_box(&v), 5, 5, W * 2)); + }); + } + + #[test] + fn test_avg2() { + for i in 0u8..=255 { + for j in 0u8..=255 { + let ceil_avg = ((i as f32) + (j as f32)) / 2.0; + let ceil_avg = ceil_avg.ceil() as u8; + assert_eq!( + ceil_avg, + avg2(i, j), + "avg2({}, {}), expected {}, got {}.", + i, + j, + ceil_avg, + avg2(i, j) + ); + } + } + } + + #[test] + fn test_avg2_specific() { + assert_eq!( + 255, + avg2(255, 255), + "avg2(255, 255), expected 255, got {}.", + avg2(255, 255) + ); + assert_eq!(1, avg2(1, 1), "avg2(1, 1), expected 1, got {}.", avg2(1, 1)); + assert_eq!(2, avg2(2, 1), "avg2(2, 1), expected 2, got {}.", avg2(2, 1)); + } + + #[test] + fn test_avg3() { + for i in 0u8..=255 { + for j in 0u8..=255 { + for k in 0u8..=255 { + let floor_avg = ((i as f32) + 2.0 * (j as f32) + { k as f32 } + 2.0) / 4.0; + let floor_avg = floor_avg.floor() as u8; + assert_eq!( + floor_avg, + avg3(i, j, k), + "avg3({}, {}, {}), expected {}, got {}.", + i, + j, + k, + floor_avg, + avg3(i, j, k) + ); + } + } + } + } + + #[test] + fn test_edge_pixels() { + #[rustfmt::skip] + let im = vec![5, 6, 7, 8, 9, + 4, 0, 0, 0, 0, + 3, 0, 0, 0, 0, + 2, 0, 0, 0, 0, + 1, 0, 0, 0, 0]; + let (e0, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(&im, 1, 1, 5); + assert_eq!(e0, 1); + assert_eq!(e1, 2); + assert_eq!(e2, 3); + assert_eq!(e3, 4); + assert_eq!(e4, 5); + assert_eq!(e5, 6); + assert_eq!(e6, 7); + assert_eq!(e7, 8); + assert_eq!(e8, 9); + } + + #[test] + fn test_top_pixels() { + #[rustfmt::skip] + let im = vec![1, 2, 3, 4, 5, 6, 7, 8, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0]; + let (e0, e1, e2, e3, e4, e5, e6, e7) = top_pixels(&im, 0, 1, 8); + assert_eq!(e0, 1); + assert_eq!(e1, 2); + assert_eq!(e2, 3); + assert_eq!(e3, 4); + assert_eq!(e4, 5); + assert_eq!(e5, 6); + assert_eq!(e6, 7); + assert_eq!(e7, 8); + } + + #[test] + fn test_add_residue() { + let mut pblock = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + let rblock = [ + -1, -2, -3, -4, 250, 249, 248, 250, -10, -18, -192, -17, -3, 15, 18, 9, + ]; + let expected: [u8; 16] = [0, 0, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 10, 29, 33, 25]; + + add_residue(&mut pblock, &rblock, 0, 0, 4); + + for (&e, &i) in expected.iter().zip(&pblock) { + assert_eq!(e, i); + } + } + + #[test] + fn test_predict_bhepred() { + #[rustfmt::skip] + let expected: Vec<u8> = vec![5, 0, 0, 0, 0, + 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1]; + + #[rustfmt::skip] + let mut im = vec![5, 0, 0, 0, 0, + 4, 0, 0, 0, 0, + 3, 0, 0, 0, 0, + 2, 0, 0, 0, 0, + 1, 0, 0, 0, 0]; + predict_bhepred(&mut im, 1, 1, 5); + for (&e, i) in expected.iter().zip(im) { + assert_eq!(e, i); + } + } + + #[test] + fn test_predict_brdpred() { + #[rustfmt::skip] + let expected: Vec<u8> = vec![5, 6, 7, 8, 9, + 4, 5, 6, 7, 8, + 3, 4, 5, 6, 7, + 2, 3, 4, 5, 6, + 1, 2, 3, 4, 5]; + + #[rustfmt::skip] + let mut im = vec![5, 6, 7, 8, 9, + 4, 0, 0, 0, 0, + 3, 0, 0, 0, 0, + 2, 0, 0, 0, 0, + 1, 0, 0, 0, 0]; + predict_brdpred(&mut im, 1, 1, 5); + for (&e, i) in expected.iter().zip(im) { + assert_eq!(e, i); + } + } + + #[test] + fn test_predict_bldpred() { + #[rustfmt::skip] + let mut im: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0]; + let avg_1 = 2u8; + let avg_2 = 3u8; + let avg_3 = 4u8; + let avg_4 = 5u8; + let avg_5 = 6u8; + let avg_6 = 7u8; + let avg_7 = 8u8; + + predict_bldpred(&mut im, 0, 1, 8); + + assert_eq!(im[8], avg_1); + assert_eq!(im[9], avg_2); + assert_eq!(im[10], avg_3); + assert_eq!(im[11], avg_4); + assert_eq!(im[16], avg_2); + assert_eq!(im[17], avg_3); + assert_eq!(im[18], avg_4); + assert_eq!(im[19], avg_5); + assert_eq!(im[24], avg_3); + assert_eq!(im[25], avg_4); + assert_eq!(im[26], avg_5); + assert_eq!(im[27], avg_6); + assert_eq!(im[32], avg_4); + assert_eq!(im[33], avg_5); + assert_eq!(im[34], avg_6); + assert_eq!(im[35], avg_7); + } + + #[test] + fn test_predict_bvepred() { + #[rustfmt::skip] + let mut im: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0]; + let avg_1 = 2u8; + let avg_2 = 3u8; + let avg_3 = 4u8; + let avg_4 = 5u8; + + predict_bvepred(&mut im, 1, 1, 9); + + assert_eq!(im[10], avg_1); + assert_eq!(im[11], avg_2); + assert_eq!(im[12], avg_3); + assert_eq!(im[13], avg_4); + assert_eq!(im[19], avg_1); + assert_eq!(im[20], avg_2); + assert_eq!(im[21], avg_3); + assert_eq!(im[22], avg_4); + assert_eq!(im[28], avg_1); + assert_eq!(im[29], avg_2); + assert_eq!(im[30], avg_3); + assert_eq!(im[31], avg_4); + assert_eq!(im[37], avg_1); + assert_eq!(im[38], avg_2); + assert_eq!(im[39], avg_3); + assert_eq!(im[40], avg_4); + } +} |