aboutsummaryrefslogtreecommitdiff
path: root/vendor/image/src/codecs/webp
diff options
context:
space:
mode:
authorValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
committerValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
commit1b6a04ca5504955c571d1c97504fb45ea0befee4 (patch)
tree7579f518b23313e8a9748a88ab6173d5e030b227 /vendor/image/src/codecs/webp
parent5ecd8cf2cba827454317368b68571df0d13d7842 (diff)
downloadfparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.tar.xz
fparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.zip
Initial vendor packages
Signed-off-by: Valentin Popov <valentin@popov.link>
Diffstat (limited to 'vendor/image/src/codecs/webp')
-rw-r--r--vendor/image/src/codecs/webp/decoder.rs399
-rw-r--r--vendor/image/src/codecs/webp/encoder.rs242
-rw-r--r--vendor/image/src/codecs/webp/extended.rs839
-rw-r--r--vendor/image/src/codecs/webp/huffman.rs202
-rw-r--r--vendor/image/src/codecs/webp/loop_filter.rs147
-rw-r--r--vendor/image/src/codecs/webp/lossless.rs783
-rw-r--r--vendor/image/src/codecs/webp/lossless_transform.rs464
-rw-r--r--vendor/image/src/codecs/webp/mod.rs28
-rw-r--r--vendor/image/src/codecs/webp/transform.rs77
-rw-r--r--vendor/image/src/codecs/webp/vp8.rs2932
10 files changed, 6113 insertions, 0 deletions
diff --git a/vendor/image/src/codecs/webp/decoder.rs b/vendor/image/src/codecs/webp/decoder.rs
new file mode 100644
index 0000000..9120290
--- /dev/null
+++ b/vendor/image/src/codecs/webp/decoder.rs
@@ -0,0 +1,399 @@
+use byteorder::{LittleEndian, ReadBytesExt};
+use std::convert::TryFrom;
+use std::io::{self, Cursor, Error, Read};
+use std::marker::PhantomData;
+use std::{error, fmt, mem};
+
+use crate::error::{DecodingError, ImageError, ImageResult, ParameterError, ParameterErrorKind};
+use crate::image::{ImageDecoder, ImageFormat};
+use crate::{color, AnimationDecoder, Frames, Rgba};
+
+use super::lossless::{LosslessDecoder, LosslessFrame};
+use super::vp8::{Frame as VP8Frame, Vp8Decoder};
+
+use super::extended::{read_extended_header, ExtendedImage};
+
+/// All errors that can occur when attempting to parse a WEBP container
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum DecoderError {
+ /// RIFF's "RIFF" signature not found or invalid
+ RiffSignatureInvalid([u8; 4]),
+ /// WebP's "WEBP" signature not found or invalid
+ WebpSignatureInvalid([u8; 4]),
+ /// Chunk Header was incorrect or invalid in its usage
+ ChunkHeaderInvalid([u8; 4]),
+}
+
+impl fmt::Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ struct SignatureWriter([u8; 4]);
+ impl fmt::Display for SignatureWriter {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "[{:#04X?}, {:#04X?}, {:#04X?}, {:#04X?}]",
+ self.0[0], self.0[1], self.0[2], self.0[3]
+ )
+ }
+ }
+
+ match self {
+ DecoderError::RiffSignatureInvalid(riff) => f.write_fmt(format_args!(
+ "Invalid RIFF signature: {}",
+ SignatureWriter(*riff)
+ )),
+ DecoderError::WebpSignatureInvalid(webp) => f.write_fmt(format_args!(
+ "Invalid WebP signature: {}",
+ SignatureWriter(*webp)
+ )),
+ DecoderError::ChunkHeaderInvalid(header) => f.write_fmt(format_args!(
+ "Invalid Chunk header: {}",
+ SignatureWriter(*header)
+ )),
+ }
+ }
+}
+
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {}
+
+/// All possible RIFF chunks in a WebP image file
+#[allow(clippy::upper_case_acronyms)]
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub(crate) enum WebPRiffChunk {
+ RIFF,
+ WEBP,
+ VP8,
+ VP8L,
+ VP8X,
+ ANIM,
+ ANMF,
+ ALPH,
+ ICCP,
+ EXIF,
+ XMP,
+}
+
+impl WebPRiffChunk {
+ pub(crate) fn from_fourcc(chunk_fourcc: [u8; 4]) -> ImageResult<Self> {
+ match &chunk_fourcc {
+ b"RIFF" => Ok(Self::RIFF),
+ b"WEBP" => Ok(Self::WEBP),
+ b"VP8 " => Ok(Self::VP8),
+ b"VP8L" => Ok(Self::VP8L),
+ b"VP8X" => Ok(Self::VP8X),
+ b"ANIM" => Ok(Self::ANIM),
+ b"ANMF" => Ok(Self::ANMF),
+ b"ALPH" => Ok(Self::ALPH),
+ b"ICCP" => Ok(Self::ICCP),
+ b"EXIF" => Ok(Self::EXIF),
+ b"XMP " => Ok(Self::XMP),
+ _ => Err(DecoderError::ChunkHeaderInvalid(chunk_fourcc).into()),
+ }
+ }
+
+ pub(crate) fn to_fourcc(&self) -> [u8; 4] {
+ match self {
+ Self::RIFF => *b"RIFF",
+ Self::WEBP => *b"WEBP",
+ Self::VP8 => *b"VP8 ",
+ Self::VP8L => *b"VP8L",
+ Self::VP8X => *b"VP8X",
+ Self::ANIM => *b"ANIM",
+ Self::ANMF => *b"ANMF",
+ Self::ALPH => *b"ALPH",
+ Self::ICCP => *b"ICCP",
+ Self::EXIF => *b"EXIF",
+ Self::XMP => *b"XMP ",
+ }
+ }
+}
+
+enum WebPImage {
+ Lossy(VP8Frame),
+ Lossless(LosslessFrame),
+ Extended(ExtendedImage),
+}
+
+/// WebP Image format decoder. Currently only supports lossy RGB images or lossless RGBA images.
+pub struct WebPDecoder<R> {
+ r: R,
+ image: WebPImage,
+}
+
+impl<R: Read> WebPDecoder<R> {
+ /// Create a new WebPDecoder from the Reader ```r```.
+ /// This function takes ownership of the Reader.
+ pub fn new(r: R) -> ImageResult<WebPDecoder<R>> {
+ let image = WebPImage::Lossy(Default::default());
+
+ let mut decoder = WebPDecoder { r, image };
+ decoder.read_data()?;
+ Ok(decoder)
+ }
+
+ //reads the 12 bytes of the WebP file header
+ fn read_riff_header(&mut self) -> ImageResult<u32> {
+ let mut riff = [0; 4];
+ self.r.read_exact(&mut riff)?;
+ if &riff != b"RIFF" {
+ return Err(DecoderError::RiffSignatureInvalid(riff).into());
+ }
+
+ let size = self.r.read_u32::<LittleEndian>()?;
+
+ let mut webp = [0; 4];
+ self.r.read_exact(&mut webp)?;
+ if &webp != b"WEBP" {
+ return Err(DecoderError::WebpSignatureInvalid(webp).into());
+ }
+
+ Ok(size)
+ }
+
+ //reads the chunk header, decodes the frame and returns the inner decoder
+ fn read_frame(&mut self) -> ImageResult<WebPImage> {
+ let chunk = read_chunk(&mut self.r)?;
+
+ match chunk {
+ Some((cursor, WebPRiffChunk::VP8)) => {
+ let mut vp8_decoder = Vp8Decoder::new(cursor);
+ let frame = vp8_decoder.decode_frame()?;
+
+ Ok(WebPImage::Lossy(frame.clone()))
+ }
+ Some((cursor, WebPRiffChunk::VP8L)) => {
+ let mut lossless_decoder = LosslessDecoder::new(cursor);
+ let frame = lossless_decoder.decode_frame()?;
+
+ Ok(WebPImage::Lossless(frame.clone()))
+ }
+ Some((mut cursor, WebPRiffChunk::VP8X)) => {
+ let info = read_extended_header(&mut cursor)?;
+
+ let image = ExtendedImage::read_extended_chunks(&mut self.r, info)?;
+
+ Ok(WebPImage::Extended(image))
+ }
+ None => Err(ImageError::IoError(Error::from(
+ io::ErrorKind::UnexpectedEof,
+ ))),
+ Some((_, chunk)) => Err(DecoderError::ChunkHeaderInvalid(chunk.to_fourcc()).into()),
+ }
+ }
+
+ fn read_data(&mut self) -> ImageResult<()> {
+ let _size = self.read_riff_header()?;
+
+ let image = self.read_frame()?;
+
+ self.image = image;
+
+ Ok(())
+ }
+
+ /// Returns true if the image as described by the bitstream is animated.
+ pub fn has_animation(&self) -> bool {
+ match &self.image {
+ WebPImage::Lossy(_) => false,
+ WebPImage::Lossless(_) => false,
+ WebPImage::Extended(extended) => extended.has_animation(),
+ }
+ }
+
+ /// Sets the background color if the image is an extended and animated webp.
+ pub fn set_background_color(&mut self, color: Rgba<u8>) -> ImageResult<()> {
+ match &mut self.image {
+ WebPImage::Extended(image) => image.set_background_color(color),
+ _ => Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(
+ "Background color can only be set on animated webp".to_owned(),
+ ),
+ ))),
+ }
+ }
+}
+
+pub(crate) fn read_len_cursor<R>(r: &mut R) -> ImageResult<Cursor<Vec<u8>>>
+where
+ R: Read,
+{
+ let unpadded_len = u64::from(r.read_u32::<LittleEndian>()?);
+
+ // RIFF chunks containing an uneven number of bytes append
+ // an extra 0x00 at the end of the chunk
+ //
+ // The addition cannot overflow since we have a u64 that was created from a u32
+ let len = unpadded_len + (unpadded_len % 2);
+
+ let mut framedata = Vec::new();
+ r.by_ref().take(len).read_to_end(&mut framedata)?;
+
+ //remove padding byte
+ if unpadded_len % 2 == 1 {
+ framedata.pop();
+ }
+
+ Ok(io::Cursor::new(framedata))
+}
+
+/// Reads a chunk header FourCC
+/// Returns None if and only if we hit end of file reading the four character code of the chunk
+/// The inner error is `Err` if and only if the chunk header FourCC is present but unknown
+pub(crate) fn read_fourcc<R: Read>(r: &mut R) -> ImageResult<Option<ImageResult<WebPRiffChunk>>> {
+ let mut chunk_fourcc = [0; 4];
+ let result = r.read_exact(&mut chunk_fourcc);
+
+ match result {
+ Ok(()) => {}
+ Err(err) => {
+ if err.kind() == io::ErrorKind::UnexpectedEof {
+ return Ok(None);
+ } else {
+ return Err(err.into());
+ }
+ }
+ }
+
+ let chunk = WebPRiffChunk::from_fourcc(chunk_fourcc);
+ Ok(Some(chunk))
+}
+
+/// Reads a chunk
+/// Returns an error if the chunk header is not a valid webp header or some other reading error
+/// Returns None if and only if we hit end of file reading the four character code of the chunk
+pub(crate) fn read_chunk<R>(r: &mut R) -> ImageResult<Option<(Cursor<Vec<u8>>, WebPRiffChunk)>>
+where
+ R: Read,
+{
+ if let Some(chunk) = read_fourcc(r)? {
+ let chunk = chunk?;
+ let cursor = read_len_cursor(r)?;
+ Ok(Some((cursor, chunk)))
+ } else {
+ Ok(None)
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct WebpReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for WebpReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for WebPDecoder<R> {
+ type Reader = WebpReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ match &self.image {
+ WebPImage::Lossy(vp8_frame) => {
+ (u32::from(vp8_frame.width), u32::from(vp8_frame.height))
+ }
+ WebPImage::Lossless(lossless_frame) => (
+ u32::from(lossless_frame.width),
+ u32::from(lossless_frame.height),
+ ),
+ WebPImage::Extended(extended) => extended.dimensions(),
+ }
+ }
+
+ fn color_type(&self) -> color::ColorType {
+ match &self.image {
+ WebPImage::Lossy(_) => color::ColorType::Rgb8,
+ WebPImage::Lossless(_) => color::ColorType::Rgba8,
+ WebPImage::Extended(extended) => extended.color_type(),
+ }
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ match &self.image {
+ WebPImage::Lossy(vp8_frame) => {
+ let mut data = vec![0; vp8_frame.get_buf_size()];
+ vp8_frame.fill_rgb(data.as_mut_slice());
+ Ok(WebpReader(Cursor::new(data), PhantomData))
+ }
+ WebPImage::Lossless(lossless_frame) => {
+ let mut data = vec![0; lossless_frame.get_buf_size()];
+ lossless_frame.fill_rgba(data.as_mut_slice());
+ Ok(WebpReader(Cursor::new(data), PhantomData))
+ }
+ WebPImage::Extended(extended) => {
+ let mut data = vec![0; extended.get_buf_size()];
+ extended.fill_buf(data.as_mut_slice());
+ Ok(WebpReader(Cursor::new(data), PhantomData))
+ }
+ }
+ }
+
+ fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ match &self.image {
+ WebPImage::Lossy(vp8_frame) => {
+ vp8_frame.fill_rgb(buf);
+ }
+ WebPImage::Lossless(lossless_frame) => {
+ lossless_frame.fill_rgba(buf);
+ }
+ WebPImage::Extended(extended) => {
+ extended.fill_buf(buf);
+ }
+ }
+ Ok(())
+ }
+
+ fn icc_profile(&mut self) -> Option<Vec<u8>> {
+ if let WebPImage::Extended(extended) = &self.image {
+ extended.icc_profile()
+ } else {
+ None
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> AnimationDecoder<'a> for WebPDecoder<R> {
+ fn into_frames(self) -> Frames<'a> {
+ match self.image {
+ WebPImage::Lossy(_) | WebPImage::Lossless(_) => {
+ Frames::new(Box::new(std::iter::empty()))
+ }
+ WebPImage::Extended(extended_image) => extended_image.into_frames(),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn add_with_overflow_size() {
+ let bytes = vec![
+ 0x52, 0x49, 0x46, 0x46, 0xaf, 0x37, 0x80, 0x47, 0x57, 0x45, 0x42, 0x50, 0x6c, 0x64,
+ 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xfb, 0x7e, 0x73, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65,
+ 0x40, 0xfb, 0xff, 0xff, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x62, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49,
+ 0x49, 0x54, 0x55, 0x50, 0x4c, 0x54, 0x59, 0x50, 0x45, 0x33, 0x37, 0x44, 0x4d, 0x46,
+ ];
+
+ let data = std::io::Cursor::new(bytes);
+
+ let _ = WebPDecoder::new(data);
+ }
+}
diff --git a/vendor/image/src/codecs/webp/encoder.rs b/vendor/image/src/codecs/webp/encoder.rs
new file mode 100644
index 0000000..0383046
--- /dev/null
+++ b/vendor/image/src/codecs/webp/encoder.rs
@@ -0,0 +1,242 @@
+//! Encoding of WebP images.
+///
+/// Uses the simple encoding API from the [libwebp] library.
+///
+/// [libwebp]: https://developers.google.com/speed/webp/docs/api#simple_encoding_api
+use std::io::Write;
+
+use libwebp::{Encoder, PixelLayout, WebPMemory};
+
+use crate::error::{
+ EncodingError, ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::flat::SampleLayout;
+use crate::{ColorType, ImageEncoder, ImageError, ImageFormat, ImageResult};
+
+/// WebP Encoder.
+pub struct WebPEncoder<W> {
+ inner: W,
+ quality: WebPQuality,
+}
+
+/// WebP encoder quality.
+#[derive(Debug, Copy, Clone)]
+pub struct WebPQuality(Quality);
+
+#[derive(Debug, Copy, Clone)]
+enum Quality {
+ Lossless,
+ Lossy(u8),
+}
+
+impl WebPQuality {
+ /// Minimum lossy quality value (0).
+ pub const MIN: u8 = 0;
+ /// Maximum lossy quality value (100).
+ pub const MAX: u8 = 100;
+ /// Default lossy quality (80), providing a balance of quality and file size.
+ pub const DEFAULT: u8 = 80;
+
+ /// Lossless encoding.
+ pub fn lossless() -> Self {
+ Self(Quality::Lossless)
+ }
+
+ /// Lossy encoding. 0 = low quality, small size; 100 = high quality, large size.
+ ///
+ /// Values are clamped from 0 to 100.
+ pub fn lossy(quality: u8) -> Self {
+ Self(Quality::Lossy(quality.clamp(Self::MIN, Self::MAX)))
+ }
+}
+
+impl Default for WebPQuality {
+ fn default() -> Self {
+ Self::lossy(WebPQuality::DEFAULT)
+ }
+}
+
+impl<W: Write> WebPEncoder<W> {
+ /// Create a new encoder that writes its output to `w`.
+ ///
+ /// Defaults to lossy encoding, see [`WebPQuality::DEFAULT`].
+ pub fn new(w: W) -> Self {
+ WebPEncoder::new_with_quality(w, WebPQuality::default())
+ }
+
+ /// Create a new encoder with the specified quality, that writes its output to `w`.
+ pub fn new_with_quality(w: W, quality: WebPQuality) -> Self {
+ Self { inner: w, quality }
+ }
+
+ /// Encode image data with the indicated color type.
+ ///
+ /// The encoder requires image data be Rgb8 or Rgba8.
+ pub fn encode(
+ mut self,
+ data: &[u8],
+ width: u32,
+ height: u32,
+ color: ColorType,
+ ) -> ImageResult<()> {
+ // TODO: convert color types internally?
+ let layout = match color {
+ ColorType::Rgb8 => PixelLayout::Rgb,
+ ColorType::Rgba8 => PixelLayout::Rgba,
+ _ => {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::WebP.into(),
+ UnsupportedErrorKind::Color(color.into()),
+ ),
+ ))
+ }
+ };
+
+ // Validate dimensions upfront to avoid panics.
+ if width == 0
+ || height == 0
+ || !SampleLayout::row_major_packed(color.channel_count(), width, height)
+ .fits(data.len())
+ {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+
+ // Call the native libwebp library to encode the image.
+ let encoder = Encoder::new(data, layout, width, height);
+ let encoded: WebPMemory = match self.quality.0 {
+ Quality::Lossless => encoder.encode_lossless(),
+ Quality::Lossy(quality) => encoder.encode(quality as f32),
+ };
+
+ // The simple encoding API in libwebp does not return errors.
+ if encoded.is_empty() {
+ return Err(ImageError::Encoding(EncodingError::new(
+ ImageFormat::WebP.into(),
+ "encoding failed, output empty",
+ )));
+ }
+
+ self.inner.write_all(&encoded)?;
+ Ok(())
+ }
+}
+
+impl<W: Write> ImageEncoder for WebPEncoder<W> {
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ self.encode(buf, width, height, color_type)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::codecs::webp::{WebPEncoder, WebPQuality};
+ use crate::{ColorType, ImageEncoder};
+
+ #[test]
+ fn webp_lossless_deterministic() {
+ // 1x1 8-bit image buffer containing a single red pixel.
+ let rgb: &[u8] = &[255, 0, 0];
+ let rgba: &[u8] = &[255, 0, 0, 128];
+ for (color, img, expected) in [
+ (
+ ColorType::Rgb8,
+ rgb,
+ [
+ 82, 73, 70, 70, 28, 0, 0, 0, 87, 69, 66, 80, 86, 80, 56, 76, 15, 0, 0, 0, 47,
+ 0, 0, 0, 0, 7, 16, 253, 143, 254, 7, 34, 162, 255, 1, 0,
+ ],
+ ),
+ (
+ ColorType::Rgba8,
+ rgba,
+ [
+ 82, 73, 70, 70, 28, 0, 0, 0, 87, 69, 66, 80, 86, 80, 56, 76, 15, 0, 0, 0, 47,
+ 0, 0, 0, 16, 7, 16, 253, 143, 2, 6, 34, 162, 255, 1, 0,
+ ],
+ ),
+ ] {
+ // Encode it into a memory buffer.
+ let mut encoded_img = Vec::new();
+ {
+ let encoder =
+ WebPEncoder::new_with_quality(&mut encoded_img, WebPQuality::lossless());
+ encoder
+ .write_image(&img, 1, 1, color)
+ .expect("image encoding failed");
+ }
+
+ // WebP encoding should be deterministic.
+ assert_eq!(encoded_img, expected);
+ }
+ }
+
+ #[derive(Debug, Clone)]
+ struct MockImage {
+ width: u32,
+ height: u32,
+ color: ColorType,
+ data: Vec<u8>,
+ }
+
+ impl quickcheck::Arbitrary for MockImage {
+ fn arbitrary(g: &mut quickcheck::Gen) -> Self {
+ // Limit to small, non-empty images <= 512x512.
+ let width = u32::arbitrary(g) % 512 + 1;
+ let height = u32::arbitrary(g) % 512 + 1;
+ let (color, stride) = if bool::arbitrary(g) {
+ (ColorType::Rgb8, 3)
+ } else {
+ (ColorType::Rgba8, 4)
+ };
+ let size = width * height * stride;
+ let data: Vec<u8> = (0..size).map(|_| u8::arbitrary(g)).collect();
+ MockImage {
+ width,
+ height,
+ color,
+ data,
+ }
+ }
+ }
+
+ quickcheck! {
+ fn fuzz_webp_valid_image(image: MockImage, quality: u8) -> bool {
+ // Check valid images do not panic.
+ let mut buffer = Vec::<u8>::new();
+ for webp_quality in [WebPQuality::lossless(), WebPQuality::lossy(quality)] {
+ buffer.clear();
+ let encoder = WebPEncoder::new_with_quality(&mut buffer, webp_quality);
+ if !encoder
+ .write_image(&image.data, image.width, image.height, image.color)
+ .is_ok() {
+ return false;
+ }
+ }
+ true
+ }
+
+ fn fuzz_webp_no_panic(data: Vec<u8>, width: u8, height: u8, quality: u8) -> bool {
+ // Check random (usually invalid) parameters do not panic.
+ let mut buffer = Vec::<u8>::new();
+ for color in [ColorType::Rgb8, ColorType::Rgba8] {
+ for webp_quality in [WebPQuality::lossless(), WebPQuality::lossy(quality)] {
+ buffer.clear();
+ let encoder = WebPEncoder::new_with_quality(&mut buffer, webp_quality);
+ // Ignore errors.
+ let _ = encoder
+ .write_image(&data, width as u32, height as u32, color);
+ }
+ }
+ true
+ }
+ }
+}
diff --git a/vendor/image/src/codecs/webp/extended.rs b/vendor/image/src/codecs/webp/extended.rs
new file mode 100644
index 0000000..3dc6b34
--- /dev/null
+++ b/vendor/image/src/codecs/webp/extended.rs
@@ -0,0 +1,839 @@
+use std::convert::TryInto;
+use std::io::{self, Cursor, Error, Read};
+use std::{error, fmt};
+
+use super::decoder::{
+ read_chunk, read_fourcc, read_len_cursor, DecoderError::ChunkHeaderInvalid, WebPRiffChunk,
+};
+use super::lossless::{LosslessDecoder, LosslessFrame};
+use super::vp8::{Frame as VP8Frame, Vp8Decoder};
+use crate::error::{DecodingError, ParameterError, ParameterErrorKind};
+use crate::image::ImageFormat;
+use crate::{
+ ColorType, Delay, Frame, Frames, ImageError, ImageResult, Rgb, RgbImage, Rgba, RgbaImage,
+};
+use byteorder::{LittleEndian, ReadBytesExt};
+
+//all errors that can occur while parsing extended chunks in a WebP file
+#[derive(Debug, Clone, Copy)]
+enum DecoderError {
+ // Some bits were invalid
+ InfoBitsInvalid { name: &'static str, value: u32 },
+ // Alpha chunk doesn't match the frame's size
+ AlphaChunkSizeMismatch,
+ // Image is too large, either for the platform's pointer size or generally
+ ImageTooLarge,
+ // Frame would go out of the canvas
+ FrameOutsideImage,
+}
+
+impl fmt::Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DecoderError::InfoBitsInvalid { name, value } => f.write_fmt(format_args!(
+ "Info bits `{}` invalid, received value: {}",
+ name, value
+ )),
+ DecoderError::AlphaChunkSizeMismatch => {
+ f.write_str("Alpha chunk doesn't match the size of the frame")
+ }
+ DecoderError::ImageTooLarge => f.write_str("Image is too large to be decoded"),
+ DecoderError::FrameOutsideImage => {
+ f.write_str("Frame is too large and would go outside the image")
+ }
+ }
+ }
+}
+
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {}
+
+#[derive(Debug, Clone)]
+pub(crate) struct WebPExtendedInfo {
+ _icc_profile: bool,
+ _alpha: bool,
+ _exif_metadata: bool,
+ _xmp_metadata: bool,
+ _animation: bool,
+ canvas_width: u32,
+ canvas_height: u32,
+ icc_profile: Option<Vec<u8>>,
+}
+
+#[derive(Debug)]
+enum ExtendedImageData {
+ Animation {
+ frames: Vec<AnimatedFrame>,
+ anim_info: WebPAnimatedInfo,
+ },
+ Static(WebPStatic),
+}
+
+#[derive(Debug)]
+pub(crate) struct ExtendedImage {
+ info: WebPExtendedInfo,
+ image: ExtendedImageData,
+}
+
+impl ExtendedImage {
+ pub(crate) fn dimensions(&self) -> (u32, u32) {
+ (self.info.canvas_width, self.info.canvas_height)
+ }
+
+ pub(crate) fn has_animation(&self) -> bool {
+ self.info._animation
+ }
+
+ pub(crate) fn icc_profile(&self) -> Option<Vec<u8>> {
+ self.info.icc_profile.clone()
+ }
+
+ pub(crate) fn color_type(&self) -> ColorType {
+ match &self.image {
+ ExtendedImageData::Animation { frames, .. } => &frames[0].image,
+ ExtendedImageData::Static(image) => image,
+ }
+ .color_type()
+ }
+
+ pub(crate) fn into_frames<'a>(self) -> Frames<'a> {
+ struct FrameIterator {
+ image: ExtendedImage,
+ index: usize,
+ canvas: RgbaImage,
+ }
+
+ impl Iterator for FrameIterator {
+ type Item = ImageResult<Frame>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if let ExtendedImageData::Animation { frames, anim_info } = &self.image.image {
+ let frame = frames.get(self.index);
+ match frame {
+ Some(anim_image) => {
+ self.index += 1;
+ ExtendedImage::draw_subimage(
+ &mut self.canvas,
+ anim_image,
+ anim_info.background_color,
+ )
+ }
+ None => None,
+ }
+ } else {
+ None
+ }
+ }
+ }
+
+ let width = self.info.canvas_width;
+ let height = self.info.canvas_height;
+ let background_color =
+ if let ExtendedImageData::Animation { ref anim_info, .. } = self.image {
+ anim_info.background_color
+ } else {
+ Rgba([0, 0, 0, 0])
+ };
+
+ let frame_iter = FrameIterator {
+ image: self,
+ index: 0,
+ canvas: RgbaImage::from_pixel(width, height, background_color),
+ };
+
+ Frames::new(Box::new(frame_iter))
+ }
+
+ pub(crate) fn read_extended_chunks<R: Read>(
+ reader: &mut R,
+ mut info: WebPExtendedInfo,
+ ) -> ImageResult<ExtendedImage> {
+ let mut anim_info: Option<WebPAnimatedInfo> = None;
+ let mut anim_frames: Vec<AnimatedFrame> = Vec::new();
+ let mut static_frame: Option<WebPStatic> = None;
+ //go until end of file and while chunk headers are valid
+ while let Some((mut cursor, chunk)) = read_extended_chunk(reader)? {
+ match chunk {
+ WebPRiffChunk::EXIF | WebPRiffChunk::XMP => {
+ //ignore these chunks
+ }
+ WebPRiffChunk::ANIM => {
+ if anim_info.is_none() {
+ anim_info = Some(Self::read_anim_info(&mut cursor)?);
+ }
+ }
+ WebPRiffChunk::ANMF => {
+ let frame = read_anim_frame(cursor, info.canvas_width, info.canvas_height)?;
+ anim_frames.push(frame);
+ }
+ WebPRiffChunk::ALPH => {
+ if static_frame.is_none() {
+ let alpha_chunk =
+ read_alpha_chunk(&mut cursor, info.canvas_width, info.canvas_height)?;
+
+ let vp8_frame = read_lossy_with_chunk(reader)?;
+
+ let img = WebPStatic::from_alpha_lossy(alpha_chunk, vp8_frame)?;
+
+ static_frame = Some(img);
+ }
+ }
+ WebPRiffChunk::ICCP => {
+ let mut icc_profile = Vec::new();
+ cursor.read_to_end(&mut icc_profile)?;
+ info.icc_profile = Some(icc_profile);
+ }
+ WebPRiffChunk::VP8 => {
+ if static_frame.is_none() {
+ let vp8_frame = read_lossy(cursor)?;
+
+ let img = WebPStatic::from_lossy(vp8_frame)?;
+
+ static_frame = Some(img);
+ }
+ }
+ WebPRiffChunk::VP8L => {
+ if static_frame.is_none() {
+ let mut lossless_decoder = LosslessDecoder::new(cursor);
+ let frame = lossless_decoder.decode_frame()?;
+ let image = WebPStatic::Lossless(frame.clone());
+
+ static_frame = Some(image);
+ }
+ }
+ _ => return Err(ChunkHeaderInvalid(chunk.to_fourcc()).into()),
+ }
+ }
+
+ let image = if let Some(info) = anim_info {
+ if anim_frames.is_empty() {
+ return Err(ImageError::IoError(Error::from(
+ io::ErrorKind::UnexpectedEof,
+ )));
+ }
+ ExtendedImageData::Animation {
+ frames: anim_frames,
+ anim_info: info,
+ }
+ } else if let Some(frame) = static_frame {
+ ExtendedImageData::Static(frame)
+ } else {
+ //reached end of file too early before image data was reached
+ return Err(ImageError::IoError(Error::from(
+ io::ErrorKind::UnexpectedEof,
+ )));
+ };
+
+ let image = ExtendedImage { image, info };
+
+ Ok(image)
+ }
+
+ fn read_anim_info<R: Read>(reader: &mut R) -> ImageResult<WebPAnimatedInfo> {
+ let mut colors: [u8; 4] = [0; 4];
+ reader.read_exact(&mut colors)?;
+
+ //background color is [blue, green, red, alpha]
+ let background_color = Rgba([colors[2], colors[1], colors[0], colors[3]]);
+
+ let loop_count = reader.read_u16::<LittleEndian>()?;
+
+ let info = WebPAnimatedInfo {
+ background_color,
+ _loop_count: loop_count,
+ };
+
+ Ok(info)
+ }
+
+ fn draw_subimage(
+ canvas: &mut RgbaImage,
+ anim_image: &AnimatedFrame,
+ background_color: Rgba<u8>,
+ ) -> Option<ImageResult<Frame>> {
+ let mut buffer = vec![0; anim_image.image.get_buf_size()];
+ anim_image.image.fill_buf(&mut buffer);
+ let has_alpha = anim_image.image.has_alpha();
+ let pixel_len: u32 = anim_image.image.color_type().bytes_per_pixel().into();
+
+ 'x: for x in 0..anim_image.width {
+ for y in 0..anim_image.height {
+ let canvas_index: (u32, u32) = (x + anim_image.offset_x, y + anim_image.offset_y);
+ // Negative offsets are not possible due to unsigned ints
+ // If we go out of bounds by height, still continue by x
+ if canvas_index.1 >= canvas.height() {
+ continue 'x;
+ }
+ // If we go out of bounds by width, it doesn't make sense to continue at all
+ if canvas_index.0 >= canvas.width() {
+ break 'x;
+ }
+ let index: usize = ((y * anim_image.width + x) * pixel_len).try_into().unwrap();
+ canvas[canvas_index] = if anim_image.use_alpha_blending && has_alpha {
+ let buffer: [u8; 4] = buffer[index..][..4].try_into().unwrap();
+ ExtendedImage::do_alpha_blending(buffer, canvas[canvas_index])
+ } else {
+ Rgba([
+ buffer[index],
+ buffer[index + 1],
+ buffer[index + 2],
+ if has_alpha { buffer[index + 3] } else { 255 },
+ ])
+ };
+ }
+ }
+
+ let delay = Delay::from_numer_denom_ms(anim_image.duration, 1);
+ let img = canvas.clone();
+ let frame = Frame::from_parts(img, 0, 0, delay);
+
+ if anim_image.dispose {
+ for x in 0..anim_image.width {
+ for y in 0..anim_image.height {
+ let canvas_index = (x + anim_image.offset_x, y + anim_image.offset_y);
+ canvas[canvas_index] = background_color;
+ }
+ }
+ }
+
+ Some(Ok(frame))
+ }
+
+ fn do_alpha_blending(buffer: [u8; 4], canvas: Rgba<u8>) -> Rgba<u8> {
+ let canvas_alpha = f64::from(canvas[3]);
+ let buffer_alpha = f64::from(buffer[3]);
+ let blend_alpha_f64 = buffer_alpha + canvas_alpha * (1.0 - buffer_alpha / 255.0);
+ //value should be between 0 and 255, this truncates the fractional part
+ let blend_alpha: u8 = blend_alpha_f64 as u8;
+
+ let blend_rgb: [u8; 3] = if blend_alpha == 0 {
+ [0, 0, 0]
+ } else {
+ let mut rgb = [0u8; 3];
+ for i in 0..3 {
+ let canvas_f64 = f64::from(canvas[i]);
+ let buffer_f64 = f64::from(buffer[i]);
+
+ let val = (buffer_f64 * buffer_alpha
+ + canvas_f64 * canvas_alpha * (1.0 - buffer_alpha / 255.0))
+ / blend_alpha_f64;
+ //value should be between 0 and 255, this truncates the fractional part
+ rgb[i] = val as u8;
+ }
+
+ rgb
+ };
+
+ Rgba([blend_rgb[0], blend_rgb[1], blend_rgb[2], blend_alpha])
+ }
+
+ pub(crate) fn fill_buf(&self, buf: &mut [u8]) {
+ match &self.image {
+ // will always have at least one frame
+ ExtendedImageData::Animation { frames, anim_info } => {
+ let first_frame = &frames[0];
+ let (canvas_width, canvas_height) = self.dimensions();
+ if canvas_width == first_frame.width && canvas_height == first_frame.height {
+ first_frame.image.fill_buf(buf);
+ } else {
+ let bg_color = match &self.info._alpha {
+ true => Rgba::from([0, 0, 0, 0]),
+ false => anim_info.background_color,
+ };
+ let mut canvas = RgbaImage::from_pixel(canvas_width, canvas_height, bg_color);
+ let _ = ExtendedImage::draw_subimage(&mut canvas, first_frame, bg_color)
+ .unwrap()
+ .unwrap();
+ buf.copy_from_slice(canvas.into_raw().as_slice());
+ }
+ }
+ ExtendedImageData::Static(image) => {
+ image.fill_buf(buf);
+ }
+ }
+ }
+
+ pub(crate) fn get_buf_size(&self) -> usize {
+ match &self.image {
+ // will always have at least one frame
+ ExtendedImageData::Animation { frames, .. } => &frames[0].image,
+ ExtendedImageData::Static(image) => image,
+ }
+ .get_buf_size()
+ }
+
+ pub(crate) fn set_background_color(&mut self, color: Rgba<u8>) -> ImageResult<()> {
+ match &mut self.image {
+ ExtendedImageData::Animation { anim_info, .. } => {
+ anim_info.background_color = color;
+ Ok(())
+ }
+ _ => Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(
+ "Background color can only be set on animated webp".to_owned(),
+ ),
+ ))),
+ }
+ }
+}
+
+#[derive(Debug)]
+enum WebPStatic {
+ LossyWithAlpha(RgbaImage),
+ LossyWithoutAlpha(RgbImage),
+ Lossless(LosslessFrame),
+}
+
+impl WebPStatic {
+ pub(crate) fn from_alpha_lossy(
+ alpha: AlphaChunk,
+ vp8_frame: VP8Frame,
+ ) -> ImageResult<WebPStatic> {
+ if alpha.data.len() != usize::from(vp8_frame.width) * usize::from(vp8_frame.height) {
+ return Err(DecoderError::AlphaChunkSizeMismatch.into());
+ }
+
+ let size = usize::from(vp8_frame.width).checked_mul(usize::from(vp8_frame.height) * 4);
+ let mut image_vec = match size {
+ Some(size) => vec![0u8; size],
+ None => return Err(DecoderError::ImageTooLarge.into()),
+ };
+
+ vp8_frame.fill_rgba(&mut image_vec);
+
+ for y in 0..vp8_frame.height {
+ for x in 0..vp8_frame.width {
+ let predictor: u8 = WebPStatic::get_predictor(
+ x.into(),
+ y.into(),
+ vp8_frame.width.into(),
+ alpha.filtering_method,
+ &image_vec,
+ );
+ let predictor = u16::from(predictor);
+
+ let alpha_index = usize::from(y) * usize::from(vp8_frame.width) + usize::from(x);
+ let alpha_val = alpha.data[alpha_index];
+ let alpha: u8 = ((predictor + u16::from(alpha_val)) % 256)
+ .try_into()
+ .unwrap();
+
+ let alpha_index = alpha_index * 4 + 3;
+ image_vec[alpha_index] = alpha;
+ }
+ }
+
+ let image = RgbaImage::from_vec(vp8_frame.width.into(), vp8_frame.height.into(), image_vec)
+ .unwrap();
+
+ Ok(WebPStatic::LossyWithAlpha(image))
+ }
+
+ fn get_predictor(
+ x: usize,
+ y: usize,
+ width: usize,
+ filtering_method: FilteringMethod,
+ image_slice: &[u8],
+ ) -> u8 {
+ match filtering_method {
+ FilteringMethod::None => 0,
+ FilteringMethod::Horizontal => {
+ if x == 0 && y == 0 {
+ 0
+ } else if x == 0 {
+ let index = (y - 1) * width + x;
+ image_slice[index * 4 + 3]
+ } else {
+ let index = y * width + x - 1;
+ image_slice[index * 4 + 3]
+ }
+ }
+ FilteringMethod::Vertical => {
+ if x == 0 && y == 0 {
+ 0
+ } else if y == 0 {
+ let index = y * width + x - 1;
+ image_slice[index * 4 + 3]
+ } else {
+ let index = (y - 1) * width + x;
+ image_slice[index * 4 + 3]
+ }
+ }
+ FilteringMethod::Gradient => {
+ let (left, top, top_left) = match (x, y) {
+ (0, 0) => (0, 0, 0),
+ (0, y) => {
+ let above_index = (y - 1) * width + x;
+ let val = image_slice[above_index * 4 + 3];
+ (val, val, val)
+ }
+ (x, 0) => {
+ let before_index = y * width + x - 1;
+ let val = image_slice[before_index * 4 + 3];
+ (val, val, val)
+ }
+ (x, y) => {
+ let left_index = y * width + x - 1;
+ let left = image_slice[left_index * 4 + 3];
+ let top_index = (y - 1) * width + x;
+ let top = image_slice[top_index * 4 + 3];
+ let top_left_index = (y - 1) * width + x - 1;
+ let top_left = image_slice[top_left_index * 4 + 3];
+
+ (left, top, top_left)
+ }
+ };
+
+ let combination = i16::from(left) + i16::from(top) - i16::from(top_left);
+ i16::clamp(combination, 0, 255).try_into().unwrap()
+ }
+ }
+ }
+
+ pub(crate) fn from_lossy(vp8_frame: VP8Frame) -> ImageResult<WebPStatic> {
+ let mut image = RgbImage::from_pixel(
+ vp8_frame.width.into(),
+ vp8_frame.height.into(),
+ Rgb([0, 0, 0]),
+ );
+
+ vp8_frame.fill_rgb(&mut image);
+
+ Ok(WebPStatic::LossyWithoutAlpha(image))
+ }
+
+ pub(crate) fn fill_buf(&self, buf: &mut [u8]) {
+ match self {
+ WebPStatic::LossyWithAlpha(image) => {
+ buf.copy_from_slice(image);
+ }
+ WebPStatic::LossyWithoutAlpha(image) => {
+ buf.copy_from_slice(image);
+ }
+ WebPStatic::Lossless(lossless) => {
+ lossless.fill_rgba(buf);
+ }
+ }
+ }
+
+ pub(crate) fn get_buf_size(&self) -> usize {
+ match self {
+ WebPStatic::LossyWithAlpha(rgb_image) => rgb_image.len(),
+ WebPStatic::LossyWithoutAlpha(rgba_image) => rgba_image.len(),
+ WebPStatic::Lossless(lossless) => lossless.get_buf_size(),
+ }
+ }
+
+ pub(crate) fn color_type(&self) -> ColorType {
+ if self.has_alpha() {
+ ColorType::Rgba8
+ } else {
+ ColorType::Rgb8
+ }
+ }
+
+ pub(crate) fn has_alpha(&self) -> bool {
+ match self {
+ Self::LossyWithAlpha(..) | Self::Lossless(..) => true,
+ Self::LossyWithoutAlpha(..) => false,
+ }
+ }
+}
+
+#[derive(Debug)]
+struct WebPAnimatedInfo {
+ background_color: Rgba<u8>,
+ _loop_count: u16,
+}
+
+#[derive(Debug)]
+struct AnimatedFrame {
+ offset_x: u32,
+ offset_y: u32,
+ width: u32,
+ height: u32,
+ duration: u32,
+ use_alpha_blending: bool,
+ dispose: bool,
+ image: WebPStatic,
+}
+
+/// Reads a chunk, but silently ignores unknown chunks at the end of a file
+fn read_extended_chunk<R>(r: &mut R) -> ImageResult<Option<(Cursor<Vec<u8>>, WebPRiffChunk)>>
+where
+ R: Read,
+{
+ let mut unknown_chunk = Ok(());
+
+ while let Some(chunk) = read_fourcc(r)? {
+ let cursor = read_len_cursor(r)?;
+ match chunk {
+ Ok(chunk) => return unknown_chunk.and(Ok(Some((cursor, chunk)))),
+ Err(err) => unknown_chunk = unknown_chunk.and(Err(err)),
+ }
+ }
+
+ Ok(None)
+}
+
+pub(crate) fn read_extended_header<R: Read>(reader: &mut R) -> ImageResult<WebPExtendedInfo> {
+ let chunk_flags = reader.read_u8()?;
+
+ let reserved_first = chunk_flags & 0b11000000;
+ let icc_profile = chunk_flags & 0b00100000 != 0;
+ let alpha = chunk_flags & 0b00010000 != 0;
+ let exif_metadata = chunk_flags & 0b00001000 != 0;
+ let xmp_metadata = chunk_flags & 0b00000100 != 0;
+ let animation = chunk_flags & 0b00000010 != 0;
+ let reserved_second = chunk_flags & 0b00000001;
+
+ let reserved_third = read_3_bytes(reader)?;
+
+ if reserved_first != 0 || reserved_second != 0 || reserved_third != 0 {
+ let value: u32 = if reserved_first != 0 {
+ reserved_first.into()
+ } else if reserved_second != 0 {
+ reserved_second.into()
+ } else {
+ reserved_third
+ };
+ return Err(DecoderError::InfoBitsInvalid {
+ name: "reserved",
+ value,
+ }
+ .into());
+ }
+
+ let canvas_width = read_3_bytes(reader)? + 1;
+ let canvas_height = read_3_bytes(reader)? + 1;
+
+ //product of canvas dimensions cannot be larger than u32 max
+ if u32::checked_mul(canvas_width, canvas_height).is_none() {
+ return Err(DecoderError::ImageTooLarge.into());
+ }
+
+ let info = WebPExtendedInfo {
+ _icc_profile: icc_profile,
+ _alpha: alpha,
+ _exif_metadata: exif_metadata,
+ _xmp_metadata: xmp_metadata,
+ _animation: animation,
+ canvas_width,
+ canvas_height,
+ icc_profile: None,
+ };
+
+ Ok(info)
+}
+
+fn read_anim_frame<R: Read>(
+ mut reader: R,
+ canvas_width: u32,
+ canvas_height: u32,
+) -> ImageResult<AnimatedFrame> {
+ //offsets for the frames are twice the values
+ let frame_x = read_3_bytes(&mut reader)? * 2;
+ let frame_y = read_3_bytes(&mut reader)? * 2;
+
+ let frame_width = read_3_bytes(&mut reader)? + 1;
+ let frame_height = read_3_bytes(&mut reader)? + 1;
+
+ if frame_x + frame_width > canvas_width || frame_y + frame_height > canvas_height {
+ return Err(DecoderError::FrameOutsideImage.into());
+ }
+
+ let duration = read_3_bytes(&mut reader)?;
+
+ let frame_info = reader.read_u8()?;
+ let reserved = frame_info & 0b11111100;
+ if reserved != 0 {
+ return Err(DecoderError::InfoBitsInvalid {
+ name: "reserved",
+ value: reserved.into(),
+ }
+ .into());
+ }
+ let use_alpha_blending = frame_info & 0b00000010 == 0;
+ let dispose = frame_info & 0b00000001 != 0;
+
+ //read normal bitstream now
+ let static_image = read_image(&mut reader, frame_width, frame_height)?;
+
+ let frame = AnimatedFrame {
+ offset_x: frame_x,
+ offset_y: frame_y,
+ width: frame_width,
+ height: frame_height,
+ duration,
+ use_alpha_blending,
+ dispose,
+ image: static_image,
+ };
+
+ Ok(frame)
+}
+
+fn read_3_bytes<R: Read>(reader: &mut R) -> ImageResult<u32> {
+ let mut buffer: [u8; 3] = [0; 3];
+ reader.read_exact(&mut buffer)?;
+ let value: u32 =
+ (u32::from(buffer[2]) << 16) | (u32::from(buffer[1]) << 8) | u32::from(buffer[0]);
+ Ok(value)
+}
+
+fn read_lossy_with_chunk<R: Read>(reader: &mut R) -> ImageResult<VP8Frame> {
+ let (cursor, chunk) =
+ read_chunk(reader)?.ok_or_else(|| Error::from(io::ErrorKind::UnexpectedEof))?;
+
+ if chunk != WebPRiffChunk::VP8 {
+ return Err(ChunkHeaderInvalid(chunk.to_fourcc()).into());
+ }
+
+ read_lossy(cursor)
+}
+
+fn read_lossy(cursor: Cursor<Vec<u8>>) -> ImageResult<VP8Frame> {
+ let mut vp8_decoder = Vp8Decoder::new(cursor);
+ let frame = vp8_decoder.decode_frame()?;
+
+ Ok(frame.clone())
+}
+
+fn read_image<R: Read>(reader: &mut R, width: u32, height: u32) -> ImageResult<WebPStatic> {
+ let chunk = read_chunk(reader)?;
+
+ match chunk {
+ Some((cursor, WebPRiffChunk::VP8)) => {
+ let mut vp8_decoder = Vp8Decoder::new(cursor);
+ let frame = vp8_decoder.decode_frame()?;
+
+ let img = WebPStatic::from_lossy(frame.clone())?;
+
+ Ok(img)
+ }
+ Some((cursor, WebPRiffChunk::VP8L)) => {
+ let mut lossless_decoder = LosslessDecoder::new(cursor);
+ let frame = lossless_decoder.decode_frame()?;
+
+ let img = WebPStatic::Lossless(frame.clone());
+
+ Ok(img)
+ }
+ Some((mut cursor, WebPRiffChunk::ALPH)) => {
+ let alpha_chunk = read_alpha_chunk(&mut cursor, width, height)?;
+
+ let vp8_frame = read_lossy_with_chunk(reader)?;
+
+ let img = WebPStatic::from_alpha_lossy(alpha_chunk, vp8_frame)?;
+
+ Ok(img)
+ }
+ None => Err(ImageError::IoError(Error::from(
+ io::ErrorKind::UnexpectedEof,
+ ))),
+ Some((_, chunk)) => Err(ChunkHeaderInvalid(chunk.to_fourcc()).into()),
+ }
+}
+
+#[derive(Debug)]
+struct AlphaChunk {
+ _preprocessing: bool,
+ filtering_method: FilteringMethod,
+ data: Vec<u8>,
+}
+
+#[derive(Debug, Copy, Clone)]
+enum FilteringMethod {
+ None,
+ Horizontal,
+ Vertical,
+ Gradient,
+}
+
+fn read_alpha_chunk<R: Read>(reader: &mut R, width: u32, height: u32) -> ImageResult<AlphaChunk> {
+ let info_byte = reader.read_u8()?;
+
+ let reserved = info_byte & 0b11000000;
+ let preprocessing = (info_byte & 0b00110000) >> 4;
+ let filtering = (info_byte & 0b00001100) >> 2;
+ let compression = info_byte & 0b00000011;
+
+ if reserved != 0 {
+ return Err(DecoderError::InfoBitsInvalid {
+ name: "reserved",
+ value: reserved.into(),
+ }
+ .into());
+ }
+
+ let preprocessing = match preprocessing {
+ 0 => false,
+ 1 => true,
+ _ => {
+ return Err(DecoderError::InfoBitsInvalid {
+ name: "reserved",
+ value: preprocessing.into(),
+ }
+ .into())
+ }
+ };
+
+ let filtering_method = match filtering {
+ 0 => FilteringMethod::None,
+ 1 => FilteringMethod::Horizontal,
+ 2 => FilteringMethod::Vertical,
+ 3 => FilteringMethod::Gradient,
+ _ => unreachable!(),
+ };
+
+ let lossless_compression = match compression {
+ 0 => false,
+ 1 => true,
+ _ => {
+ return Err(DecoderError::InfoBitsInvalid {
+ name: "lossless compression",
+ value: compression.into(),
+ }
+ .into())
+ }
+ };
+
+ let mut framedata = Vec::new();
+ reader.read_to_end(&mut framedata)?;
+
+ let data = if lossless_compression {
+ let cursor = io::Cursor::new(framedata);
+
+ let mut decoder = LosslessDecoder::new(cursor);
+ //this is a potential problem for large images; would require rewriting lossless decoder to use u32 for width and height
+ let width: u16 = width
+ .try_into()
+ .map_err(|_| ImageError::from(DecoderError::ImageTooLarge))?;
+ let height: u16 = height
+ .try_into()
+ .map_err(|_| ImageError::from(DecoderError::ImageTooLarge))?;
+ let frame = decoder.decode_frame_implicit_dims(width, height)?;
+
+ let mut data = vec![0u8; usize::from(width) * usize::from(height)];
+
+ frame.fill_green(&mut data);
+
+ data
+ } else {
+ framedata
+ };
+
+ let chunk = AlphaChunk {
+ _preprocessing: preprocessing,
+ filtering_method,
+ data,
+ };
+
+ Ok(chunk)
+}
diff --git a/vendor/image/src/codecs/webp/huffman.rs b/vendor/image/src/codecs/webp/huffman.rs
new file mode 100644
index 0000000..986eee6
--- /dev/null
+++ b/vendor/image/src/codecs/webp/huffman.rs
@@ -0,0 +1,202 @@
+use std::convert::TryInto;
+
+use super::lossless::BitReader;
+use super::lossless::DecoderError;
+use crate::ImageResult;
+
+/// Rudimentary utility for reading Canonical Huffman Codes.
+/// Based off https://github.com/webmproject/libwebp/blob/7f8472a610b61ec780ef0a8873cd954ac512a505/src/utils/huffman.c
+///
+
+const MAX_ALLOWED_CODE_LENGTH: usize = 15;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum HuffmanTreeNode {
+ Branch(usize), //offset in vector to children
+ Leaf(u16), //symbol stored in leaf
+ Empty,
+}
+
+/// Huffman tree
+#[derive(Clone, Debug, Default)]
+pub(crate) struct HuffmanTree {
+ tree: Vec<HuffmanTreeNode>,
+ max_nodes: usize,
+ num_nodes: usize,
+}
+
+impl HuffmanTree {
+ fn is_full(&self) -> bool {
+ self.num_nodes == self.max_nodes
+ }
+
+ /// Turns a node from empty into a branch and assigns its children
+ fn assign_children(&mut self, node_index: usize) -> usize {
+ let offset_index = self.num_nodes - node_index;
+ self.tree[node_index] = HuffmanTreeNode::Branch(offset_index);
+ self.num_nodes += 2;
+
+ offset_index
+ }
+
+ /// Init a huffman tree
+ fn init(num_leaves: usize) -> ImageResult<HuffmanTree> {
+ if num_leaves == 0 {
+ return Err(DecoderError::HuffmanError.into());
+ }
+
+ let max_nodes = 2 * num_leaves - 1;
+ let tree = vec![HuffmanTreeNode::Empty; max_nodes];
+ let num_nodes = 1;
+
+ let tree = HuffmanTree {
+ tree,
+ max_nodes,
+ num_nodes,
+ };
+
+ Ok(tree)
+ }
+
+ /// Converts code lengths to codes
+ fn code_lengths_to_codes(code_lengths: &[u16]) -> ImageResult<Vec<Option<u16>>> {
+ let max_code_length = *code_lengths
+ .iter()
+ .reduce(|a, b| if a >= b { a } else { b })
+ .unwrap();
+
+ if max_code_length > MAX_ALLOWED_CODE_LENGTH.try_into().unwrap() {
+ return Err(DecoderError::HuffmanError.into());
+ }
+
+ let mut code_length_hist = vec![0; MAX_ALLOWED_CODE_LENGTH + 1];
+
+ for &length in code_lengths.iter() {
+ code_length_hist[usize::from(length)] += 1;
+ }
+
+ code_length_hist[0] = 0;
+
+ let mut curr_code = 0;
+ let mut next_codes = vec![None; MAX_ALLOWED_CODE_LENGTH + 1];
+
+ for code_len in 1..=usize::from(max_code_length) {
+ curr_code = (curr_code + code_length_hist[code_len - 1]) << 1;
+ next_codes[code_len] = Some(curr_code);
+ }
+
+ let mut huff_codes = vec![None; code_lengths.len()];
+
+ for (symbol, &length) in code_lengths.iter().enumerate() {
+ let length = usize::from(length);
+ if length > 0 {
+ huff_codes[symbol] = next_codes[length];
+ if let Some(value) = next_codes[length].as_mut() {
+ *value += 1;
+ }
+ } else {
+ huff_codes[symbol] = None;
+ }
+ }
+
+ Ok(huff_codes)
+ }
+
+ /// Adds a symbol to a huffman tree
+ fn add_symbol(&mut self, symbol: u16, code: u16, code_length: u16) -> ImageResult<()> {
+ let mut node_index = 0;
+ let code = usize::from(code);
+
+ for length in (0..code_length).rev() {
+ if node_index >= self.max_nodes {
+ return Err(DecoderError::HuffmanError.into());
+ }
+
+ let node = self.tree[node_index];
+
+ let offset = match node {
+ HuffmanTreeNode::Empty => {
+ if self.is_full() {
+ return Err(DecoderError::HuffmanError.into());
+ }
+ self.assign_children(node_index)
+ }
+ HuffmanTreeNode::Leaf(_) => return Err(DecoderError::HuffmanError.into()),
+ HuffmanTreeNode::Branch(offset) => offset,
+ };
+
+ node_index += offset + ((code >> length) & 1);
+ }
+
+ match self.tree[node_index] {
+ HuffmanTreeNode::Empty => self.tree[node_index] = HuffmanTreeNode::Leaf(symbol),
+ HuffmanTreeNode::Leaf(_) => return Err(DecoderError::HuffmanError.into()),
+ HuffmanTreeNode::Branch(_offset) => return Err(DecoderError::HuffmanError.into()),
+ }
+
+ Ok(())
+ }
+
+ /// Builds a tree implicitly, just from code lengths
+ pub(crate) fn build_implicit(code_lengths: Vec<u16>) -> ImageResult<HuffmanTree> {
+ let mut num_symbols = 0;
+ let mut root_symbol = 0;
+
+ for (symbol, length) in code_lengths.iter().enumerate() {
+ if *length > 0 {
+ num_symbols += 1;
+ root_symbol = symbol.try_into().unwrap();
+ }
+ }
+
+ let mut tree = HuffmanTree::init(num_symbols)?;
+
+ if num_symbols == 1 {
+ tree.add_symbol(root_symbol, 0, 0)?;
+ } else {
+ let codes = HuffmanTree::code_lengths_to_codes(&code_lengths)?;
+
+ for (symbol, &length) in code_lengths.iter().enumerate() {
+ if length > 0 && codes[symbol].is_some() {
+ tree.add_symbol(symbol.try_into().unwrap(), codes[symbol].unwrap(), length)?;
+ }
+ }
+ }
+
+ Ok(tree)
+ }
+
+ /// Builds a tree explicitly from lengths, codes and symbols
+ pub(crate) fn build_explicit(
+ code_lengths: Vec<u16>,
+ codes: Vec<u16>,
+ symbols: Vec<u16>,
+ ) -> ImageResult<HuffmanTree> {
+ let mut tree = HuffmanTree::init(symbols.len())?;
+
+ for i in 0..symbols.len() {
+ tree.add_symbol(symbols[i], codes[i], code_lengths[i])?;
+ }
+
+ Ok(tree)
+ }
+
+ /// Reads a symbol using the bitstream
+ pub(crate) fn read_symbol(&self, bit_reader: &mut BitReader) -> ImageResult<u16> {
+ let mut index = 0;
+ let mut node = self.tree[index];
+
+ while let HuffmanTreeNode::Branch(children_offset) = node {
+ index += children_offset + bit_reader.read_bits::<usize>(1)?;
+ node = self.tree[index];
+ }
+
+ let symbol = match node {
+ HuffmanTreeNode::Branch(_) => unreachable!(),
+ HuffmanTreeNode::Empty => return Err(DecoderError::HuffmanError.into()),
+ HuffmanTreeNode::Leaf(symbol) => symbol,
+ };
+
+ Ok(symbol)
+ }
+}
diff --git a/vendor/image/src/codecs/webp/loop_filter.rs b/vendor/image/src/codecs/webp/loop_filter.rs
new file mode 100644
index 0000000..312059f
--- /dev/null
+++ b/vendor/image/src/codecs/webp/loop_filter.rs
@@ -0,0 +1,147 @@
+//! Does loop filtering on webp lossy images
+
+use crate::utils::clamp;
+
+#[inline]
+fn c(val: i32) -> i32 {
+ clamp(val, -128, 127)
+}
+
+//unsigned to signed
+#[inline]
+fn u2s(val: u8) -> i32 {
+ i32::from(val) - 128
+}
+
+//signed to unsigned
+#[inline]
+fn s2u(val: i32) -> u8 {
+ (c(val) + 128) as u8
+}
+
+#[inline]
+fn diff(val1: u8, val2: u8) -> u8 {
+ if val1 > val2 {
+ val1 - val2
+ } else {
+ val2 - val1
+ }
+}
+
+//15.2
+fn common_adjust(use_outer_taps: bool, pixels: &mut [u8], point: usize, stride: usize) -> i32 {
+ let p1 = u2s(pixels[point - 2 * stride]);
+ let p0 = u2s(pixels[point - stride]);
+ let q0 = u2s(pixels[point]);
+ let q1 = u2s(pixels[point + stride]);
+
+ //value for the outer 2 pixels
+ let outer = if use_outer_taps { c(p1 - q1) } else { 0 };
+
+ let mut a = c(outer + 3 * (q0 - p0));
+
+ let b = (c(a + 3)) >> 3;
+
+ a = (c(a + 4)) >> 3;
+
+ pixels[point] = s2u(q0 - a);
+ pixels[point - stride] = s2u(p0 + b);
+
+ a
+}
+
+fn simple_threshold(filter_limit: i32, pixels: &[u8], point: usize, stride: usize) -> bool {
+ i32::from(diff(pixels[point - stride], pixels[point])) * 2
+ + i32::from(diff(pixels[point - 2 * stride], pixels[point + stride])) / 2
+ <= filter_limit
+}
+
+fn should_filter(
+ interior_limit: u8,
+ edge_limit: u8,
+ pixels: &[u8],
+ point: usize,
+ stride: usize,
+) -> bool {
+ simple_threshold(i32::from(edge_limit), pixels, point, stride)
+ && diff(pixels[point - 4 * stride], pixels[point - 3 * stride]) <= interior_limit
+ && diff(pixels[point - 3 * stride], pixels[point - 2 * stride]) <= interior_limit
+ && diff(pixels[point - 2 * stride], pixels[point - stride]) <= interior_limit
+ && diff(pixels[point + 3 * stride], pixels[point + 2 * stride]) <= interior_limit
+ && diff(pixels[point + 2 * stride], pixels[point + stride]) <= interior_limit
+ && diff(pixels[point + stride], pixels[point]) <= interior_limit
+}
+
+fn high_edge_variance(threshold: u8, pixels: &[u8], point: usize, stride: usize) -> bool {
+ diff(pixels[point - 2 * stride], pixels[point - stride]) > threshold
+ || diff(pixels[point + stride], pixels[point]) > threshold
+}
+
+//simple filter
+//effects 4 pixels on an edge(2 each side)
+pub(crate) fn simple_segment(edge_limit: u8, pixels: &mut [u8], point: usize, stride: usize) {
+ if simple_threshold(i32::from(edge_limit), pixels, point, stride) {
+ common_adjust(true, pixels, point, stride);
+ }
+}
+
+//normal filter
+//works on the 8 pixels on the edges between subblocks inside a macroblock
+pub(crate) fn subblock_filter(
+ hev_threshold: u8,
+ interior_limit: u8,
+ edge_limit: u8,
+ pixels: &mut [u8],
+ point: usize,
+ stride: usize,
+) {
+ if should_filter(interior_limit, edge_limit, pixels, point, stride) {
+ let hv = high_edge_variance(hev_threshold, pixels, point, stride);
+
+ let a = (common_adjust(hv, pixels, point, stride) + 1) >> 1;
+
+ if !hv {
+ pixels[point + stride] = s2u(u2s(pixels[point + stride]) - a);
+ pixels[point - 2 * stride] = s2u(u2s(pixels[point - 2 * stride]) - a);
+ }
+ }
+}
+
+//normal filter
+//works on the 8 pixels on the edges between macroblocks
+pub(crate) fn macroblock_filter(
+ hev_threshold: u8,
+ interior_limit: u8,
+ edge_limit: u8,
+ pixels: &mut [u8],
+ point: usize,
+ stride: usize,
+) {
+ let mut spixels = [0i32; 8];
+ for i in 0..8 {
+ spixels[i] = u2s(pixels[point + i * stride - 4 * stride]);
+ }
+
+ if should_filter(interior_limit, edge_limit, pixels, point, stride) {
+ if !high_edge_variance(hev_threshold, pixels, point, stride) {
+ let w = c(c(spixels[2] - spixels[5]) + 3 * (spixels[4] - spixels[3]));
+
+ let mut a = c((27 * w + 63) >> 7);
+
+ pixels[point] = s2u(spixels[4] - a);
+ pixels[point - stride] = s2u(spixels[3] + a);
+
+ a = c((18 * w + 63) >> 7);
+
+ pixels[point + stride] = s2u(spixels[5] - a);
+ pixels[point - 2 * stride] = s2u(spixels[2] + a);
+
+ a = c((9 * w + 63) >> 7);
+
+ pixels[point + 2 * stride] = s2u(spixels[6] - a);
+ pixels[point - 3 * stride] = s2u(spixels[1] + a);
+ } else {
+ common_adjust(true, pixels, point, stride);
+ }
+ }
+}
diff --git a/vendor/image/src/codecs/webp/lossless.rs b/vendor/image/src/codecs/webp/lossless.rs
new file mode 100644
index 0000000..7271eda
--- /dev/null
+++ b/vendor/image/src/codecs/webp/lossless.rs
@@ -0,0 +1,783 @@
+//! Decoding of lossless WebP images
+//!
+//! [Lossless spec](https://developers.google.com/speed/webp/docs/webp_lossless_bitstream_specification)
+//!
+
+use std::{
+ convert::TryFrom,
+ convert::TryInto,
+ error, fmt,
+ io::Read,
+ ops::{AddAssign, Shl},
+};
+
+use byteorder::ReadBytesExt;
+
+use crate::{error::DecodingError, ImageError, ImageFormat, ImageResult};
+
+use super::huffman::HuffmanTree;
+use super::lossless_transform::{add_pixels, TransformType};
+
+const CODE_LENGTH_CODES: usize = 19;
+const CODE_LENGTH_CODE_ORDER: [usize; CODE_LENGTH_CODES] = [
+ 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+];
+
+#[rustfmt::skip]
+const DISTANCE_MAP: [(i8, i8); 120] = [
+ (0, 1), (1, 0), (1, 1), (-1, 1), (0, 2), (2, 0), (1, 2), (-1, 2),
+ (2, 1), (-2, 1), (2, 2), (-2, 2), (0, 3), (3, 0), (1, 3), (-1, 3),
+ (3, 1), (-3, 1), (2, 3), (-2, 3), (3, 2), (-3, 2), (0, 4), (4, 0),
+ (1, 4), (-1, 4), (4, 1), (-4, 1), (3, 3), (-3, 3), (2, 4), (-2, 4),
+ (4, 2), (-4, 2), (0, 5), (3, 4), (-3, 4), (4, 3), (-4, 3), (5, 0),
+ (1, 5), (-1, 5), (5, 1), (-5, 1), (2, 5), (-2, 5), (5, 2), (-5, 2),
+ (4, 4), (-4, 4), (3, 5), (-3, 5), (5, 3), (-5, 3), (0, 6), (6, 0),
+ (1, 6), (-1, 6), (6, 1), (-6, 1), (2, 6), (-2, 6), (6, 2), (-6, 2),
+ (4, 5), (-4, 5), (5, 4), (-5, 4), (3, 6), (-3, 6), (6, 3), (-6, 3),
+ (0, 7), (7, 0), (1, 7), (-1, 7), (5, 5), (-5, 5), (7, 1), (-7, 1),
+ (4, 6), (-4, 6), (6, 4), (-6, 4), (2, 7), (-2, 7), (7, 2), (-7, 2),
+ (3, 7), (-3, 7), (7, 3), (-7, 3), (5, 6), (-5, 6), (6, 5), (-6, 5),
+ (8, 0), (4, 7), (-4, 7), (7, 4), (-7, 4), (8, 1), (8, 2), (6, 6),
+ (-6, 6), (8, 3), (5, 7), (-5, 7), (7, 5), (-7, 5), (8, 4), (6, 7),
+ (-6, 7), (7, 6), (-7, 6), (8, 5), (7, 7), (-7, 7), (8, 6), (8, 7)
+];
+
+const GREEN: usize = 0;
+const RED: usize = 1;
+const BLUE: usize = 2;
+const ALPHA: usize = 3;
+const DIST: usize = 4;
+
+const HUFFMAN_CODES_PER_META_CODE: usize = 5;
+
+type HuffmanCodeGroup = [HuffmanTree; HUFFMAN_CODES_PER_META_CODE];
+
+const ALPHABET_SIZE: [u16; HUFFMAN_CODES_PER_META_CODE] = [256 + 24, 256, 256, 256, 40];
+
+#[inline]
+pub(crate) fn subsample_size(size: u16, bits: u8) -> u16 {
+ ((u32::from(size) + (1u32 << bits) - 1) >> bits)
+ .try_into()
+ .unwrap()
+}
+
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum DecoderError {
+ /// Signature of 0x2f not found
+ LosslessSignatureInvalid(u8),
+ /// Version Number must be 0
+ VersionNumberInvalid(u8),
+
+ ///
+ InvalidColorCacheBits(u8),
+
+ HuffmanError,
+
+ BitStreamError,
+
+ TransformError,
+}
+
+impl fmt::Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DecoderError::LosslessSignatureInvalid(sig) => {
+ f.write_fmt(format_args!("Invalid lossless signature: {}", sig))
+ }
+ DecoderError::VersionNumberInvalid(num) => {
+ f.write_fmt(format_args!("Invalid version number: {}", num))
+ }
+ DecoderError::InvalidColorCacheBits(num) => f.write_fmt(format_args!(
+ "Invalid color cache(must be between 1-11): {}",
+ num
+ )),
+ DecoderError::HuffmanError => f.write_fmt(format_args!("Error building Huffman Tree")),
+ DecoderError::BitStreamError => {
+ f.write_fmt(format_args!("Error while reading bitstream"))
+ }
+ DecoderError::TransformError => {
+ f.write_fmt(format_args!("Error while reading or writing transforms"))
+ }
+ }
+ }
+}
+
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {}
+
+const NUM_TRANSFORM_TYPES: usize = 4;
+
+//Decodes lossless WebP images
+#[derive(Debug)]
+pub(crate) struct LosslessDecoder<R> {
+ r: R,
+ bit_reader: BitReader,
+ frame: LosslessFrame,
+ transforms: [Option<TransformType>; NUM_TRANSFORM_TYPES],
+ transform_order: Vec<u8>,
+}
+
+impl<R: Read> LosslessDecoder<R> {
+ /// Create a new decoder
+ pub(crate) fn new(r: R) -> LosslessDecoder<R> {
+ LosslessDecoder {
+ r,
+ bit_reader: BitReader::new(),
+ frame: Default::default(),
+ transforms: [None, None, None, None],
+ transform_order: Vec::new(),
+ }
+ }
+
+ /// Reads the frame
+ pub(crate) fn decode_frame(&mut self) -> ImageResult<&LosslessFrame> {
+ let signature = self.r.read_u8()?;
+
+ if signature != 0x2f {
+ return Err(DecoderError::LosslessSignatureInvalid(signature).into());
+ }
+
+ let mut buf = Vec::new();
+ self.r.read_to_end(&mut buf)?;
+ self.bit_reader.init(buf);
+
+ self.frame.width = self.bit_reader.read_bits::<u16>(14)? + 1;
+ self.frame.height = self.bit_reader.read_bits::<u16>(14)? + 1;
+
+ let _alpha_used = self.bit_reader.read_bits::<u8>(1)?;
+
+ let version_num = self.bit_reader.read_bits::<u8>(3)?;
+
+ if version_num != 0 {
+ return Err(DecoderError::VersionNumberInvalid(version_num).into());
+ }
+
+ let mut data = self.decode_image_stream(self.frame.width, self.frame.height, true)?;
+
+ for &trans_index in self.transform_order.iter().rev() {
+ let trans = self.transforms[usize::from(trans_index)].as_ref().unwrap();
+ trans.apply_transform(&mut data, self.frame.width, self.frame.height)?;
+ }
+
+ self.frame.buf = data;
+ Ok(&self.frame)
+ }
+
+ //used for alpha data in extended decoding
+ pub(crate) fn decode_frame_implicit_dims(
+ &mut self,
+ width: u16,
+ height: u16,
+ ) -> ImageResult<&LosslessFrame> {
+ let mut buf = Vec::new();
+ self.r.read_to_end(&mut buf)?;
+ self.bit_reader.init(buf);
+
+ self.frame.width = width;
+ self.frame.height = height;
+
+ let mut data = self.decode_image_stream(self.frame.width, self.frame.height, true)?;
+
+ //transform_order is vector of indices(0-3) into transforms in order decoded
+ for &trans_index in self.transform_order.iter().rev() {
+ let trans = self.transforms[usize::from(trans_index)].as_ref().unwrap();
+ trans.apply_transform(&mut data, self.frame.width, self.frame.height)?;
+ }
+
+ self.frame.buf = data;
+ Ok(&self.frame)
+ }
+
+ /// Reads Image data from the bitstream
+ /// Can be in any of the 5 roles described in the Specification
+ /// ARGB Image role has different behaviour to the other 4
+ /// xsize and ysize describe the size of the blocks where each block has its own entropy code
+ fn decode_image_stream(
+ &mut self,
+ xsize: u16,
+ ysize: u16,
+ is_argb_img: bool,
+ ) -> ImageResult<Vec<u32>> {
+ let trans_xsize = if is_argb_img {
+ self.read_transforms()?
+ } else {
+ xsize
+ };
+
+ let color_cache_bits = self.read_color_cache()?;
+
+ let color_cache = color_cache_bits.map(|bits| {
+ let size = 1 << bits;
+ let cache = vec![0u32; size];
+ ColorCache {
+ color_cache_bits: bits,
+ color_cache: cache,
+ }
+ });
+
+ let huffman_info = self.read_huffman_codes(is_argb_img, trans_xsize, ysize, color_cache)?;
+
+ //decode data
+ let data = self.decode_image_data(trans_xsize, ysize, huffman_info)?;
+
+ Ok(data)
+ }
+
+ /// Reads transforms and their data from the bitstream
+ fn read_transforms(&mut self) -> ImageResult<u16> {
+ let mut xsize = self.frame.width;
+
+ while self.bit_reader.read_bits::<u8>(1)? == 1 {
+ let transform_type_val = self.bit_reader.read_bits::<u8>(2)?;
+
+ if self.transforms[usize::from(transform_type_val)].is_some() {
+ //can only have one of each transform, error
+ return Err(DecoderError::TransformError.into());
+ }
+
+ self.transform_order.push(transform_type_val);
+
+ let transform_type = match transform_type_val {
+ 0 => {
+ //predictor
+
+ let size_bits = self.bit_reader.read_bits::<u8>(3)? + 2;
+
+ let block_xsize = subsample_size(xsize, size_bits);
+ let block_ysize = subsample_size(self.frame.height, size_bits);
+
+ let data = self.decode_image_stream(block_xsize, block_ysize, false)?;
+
+ TransformType::PredictorTransform {
+ size_bits,
+ predictor_data: data,
+ }
+ }
+ 1 => {
+ //color transform
+
+ let size_bits = self.bit_reader.read_bits::<u8>(3)? + 2;
+
+ let block_xsize = subsample_size(xsize, size_bits);
+ let block_ysize = subsample_size(self.frame.height, size_bits);
+
+ let data = self.decode_image_stream(block_xsize, block_ysize, false)?;
+
+ TransformType::ColorTransform {
+ size_bits,
+ transform_data: data,
+ }
+ }
+ 2 => {
+ //subtract green
+
+ TransformType::SubtractGreen
+ }
+ 3 => {
+ let color_table_size = self.bit_reader.read_bits::<u16>(8)? + 1;
+
+ let mut color_map = self.decode_image_stream(color_table_size, 1, false)?;
+
+ let bits = if color_table_size <= 2 {
+ 3
+ } else if color_table_size <= 4 {
+ 2
+ } else if color_table_size <= 16 {
+ 1
+ } else {
+ 0
+ };
+ xsize = subsample_size(xsize, bits);
+
+ Self::adjust_color_map(&mut color_map);
+
+ TransformType::ColorIndexingTransform {
+ table_size: color_table_size,
+ table_data: color_map,
+ }
+ }
+ _ => unreachable!(),
+ };
+
+ self.transforms[usize::from(transform_type_val)] = Some(transform_type);
+ }
+
+ Ok(xsize)
+ }
+
+ /// Adjusts the color map since it's subtraction coded
+ fn adjust_color_map(color_map: &mut Vec<u32>) {
+ for i in 1..color_map.len() {
+ color_map[i] = add_pixels(color_map[i], color_map[i - 1]);
+ }
+ }
+
+ /// Reads huffman codes associated with an image
+ fn read_huffman_codes(
+ &mut self,
+ read_meta: bool,
+ xsize: u16,
+ ysize: u16,
+ color_cache: Option<ColorCache>,
+ ) -> ImageResult<HuffmanInfo> {
+ let mut num_huff_groups = 1;
+
+ let mut huffman_bits = 0;
+ let mut huffman_xsize = 1;
+ let mut huffman_ysize = 1;
+ let mut entropy_image = Vec::new();
+
+ if read_meta && self.bit_reader.read_bits::<u8>(1)? == 1 {
+ //meta huffman codes
+ huffman_bits = self.bit_reader.read_bits::<u8>(3)? + 2;
+ huffman_xsize = subsample_size(xsize, huffman_bits);
+ huffman_ysize = subsample_size(ysize, huffman_bits);
+
+ entropy_image = self.decode_image_stream(huffman_xsize, huffman_ysize, false)?;
+
+ for pixel in entropy_image.iter_mut() {
+ let meta_huff_code = (*pixel >> 8) & 0xffff;
+
+ *pixel = meta_huff_code;
+
+ if meta_huff_code >= num_huff_groups {
+ num_huff_groups = meta_huff_code + 1;
+ }
+ }
+ }
+
+ let mut hufftree_groups = Vec::new();
+
+ for _i in 0..num_huff_groups {
+ let mut group: HuffmanCodeGroup = Default::default();
+ for j in 0..HUFFMAN_CODES_PER_META_CODE {
+ let mut alphabet_size = ALPHABET_SIZE[j];
+ if j == 0 {
+ if let Some(color_cache) = color_cache.as_ref() {
+ alphabet_size += 1 << color_cache.color_cache_bits;
+ }
+ }
+
+ let tree = self.read_huffman_code(alphabet_size)?;
+ group[j] = tree;
+ }
+ hufftree_groups.push(group);
+ }
+
+ let huffman_mask = if huffman_bits == 0 {
+ !0
+ } else {
+ (1 << huffman_bits) - 1
+ };
+
+ let info = HuffmanInfo {
+ xsize: huffman_xsize,
+ _ysize: huffman_ysize,
+ color_cache,
+ image: entropy_image,
+ bits: huffman_bits,
+ mask: huffman_mask,
+ huffman_code_groups: hufftree_groups,
+ };
+
+ Ok(info)
+ }
+
+ /// Decodes and returns a single huffman tree
+ fn read_huffman_code(&mut self, alphabet_size: u16) -> ImageResult<HuffmanTree> {
+ let simple = self.bit_reader.read_bits::<u8>(1)? == 1;
+
+ if simple {
+ let num_symbols = self.bit_reader.read_bits::<u8>(1)? + 1;
+
+ let mut code_lengths = vec![u16::from(num_symbols - 1)];
+ let mut codes = vec![0];
+ let mut symbols = Vec::new();
+
+ let is_first_8bits = self.bit_reader.read_bits::<u8>(1)?;
+ symbols.push(self.bit_reader.read_bits::<u16>(1 + 7 * is_first_8bits)?);
+
+ if num_symbols == 2 {
+ symbols.push(self.bit_reader.read_bits::<u16>(8)?);
+ code_lengths.push(1);
+ codes.push(1);
+ }
+
+ HuffmanTree::build_explicit(code_lengths, codes, symbols)
+ } else {
+ let mut code_length_code_lengths = vec![0; CODE_LENGTH_CODES];
+
+ let num_code_lengths = 4 + self.bit_reader.read_bits::<usize>(4)?;
+ for i in 0..num_code_lengths {
+ code_length_code_lengths[CODE_LENGTH_CODE_ORDER[i]] =
+ self.bit_reader.read_bits(3)?;
+ }
+
+ let new_code_lengths =
+ self.read_huffman_code_lengths(code_length_code_lengths, alphabet_size)?;
+
+ HuffmanTree::build_implicit(new_code_lengths)
+ }
+ }
+
+ /// Reads huffman code lengths
+ fn read_huffman_code_lengths(
+ &mut self,
+ code_length_code_lengths: Vec<u16>,
+ num_symbols: u16,
+ ) -> ImageResult<Vec<u16>> {
+ let table = HuffmanTree::build_implicit(code_length_code_lengths)?;
+
+ let mut max_symbol = if self.bit_reader.read_bits::<u8>(1)? == 1 {
+ let length_nbits = 2 + 2 * self.bit_reader.read_bits::<u8>(3)?;
+ 2 + self.bit_reader.read_bits::<u16>(length_nbits)?
+ } else {
+ num_symbols
+ };
+
+ let mut code_lengths = vec![0; usize::from(num_symbols)];
+ let mut prev_code_len = 8; //default code length
+
+ let mut symbol = 0;
+ while symbol < num_symbols {
+ if max_symbol == 0 {
+ break;
+ }
+ max_symbol -= 1;
+
+ let code_len = table.read_symbol(&mut self.bit_reader)?;
+
+ if code_len < 16 {
+ code_lengths[usize::from(symbol)] = code_len;
+ symbol += 1;
+ if code_len != 0 {
+ prev_code_len = code_len;
+ }
+ } else {
+ let use_prev = code_len == 16;
+ let slot = code_len - 16;
+ let extra_bits = match slot {
+ 0 => 2,
+ 1 => 3,
+ 2 => 7,
+ _ => return Err(DecoderError::BitStreamError.into()),
+ };
+ let repeat_offset = match slot {
+ 0 | 1 => 3,
+ 2 => 11,
+ _ => return Err(DecoderError::BitStreamError.into()),
+ };
+
+ let mut repeat = self.bit_reader.read_bits::<u16>(extra_bits)? + repeat_offset;
+
+ if symbol + repeat > num_symbols {
+ return Err(DecoderError::BitStreamError.into());
+ } else {
+ let length = if use_prev { prev_code_len } else { 0 };
+ while repeat > 0 {
+ repeat -= 1;
+ code_lengths[usize::from(symbol)] = length;
+ symbol += 1;
+ }
+ }
+ }
+ }
+
+ Ok(code_lengths)
+ }
+
+ /// Decodes the image data using the huffman trees and either of the 3 methods of decoding
+ fn decode_image_data(
+ &mut self,
+ width: u16,
+ height: u16,
+ mut huffman_info: HuffmanInfo,
+ ) -> ImageResult<Vec<u32>> {
+ let num_values = usize::from(width) * usize::from(height);
+ let mut data = vec![0; num_values];
+
+ let huff_index = huffman_info.get_huff_index(0, 0);
+ let mut tree = &huffman_info.huffman_code_groups[huff_index];
+ let mut last_cached = 0;
+ let mut index = 0;
+ let mut x = 0;
+ let mut y = 0;
+ while index < num_values {
+ if (x & huffman_info.mask) == 0 {
+ let index = huffman_info.get_huff_index(x, y);
+ tree = &huffman_info.huffman_code_groups[index];
+ }
+
+ let code = tree[GREEN].read_symbol(&mut self.bit_reader)?;
+
+ //check code
+ if code < 256 {
+ //literal, so just use huffman codes and read as argb
+ let red = tree[RED].read_symbol(&mut self.bit_reader)?;
+ let blue = tree[BLUE].read_symbol(&mut self.bit_reader)?;
+ let alpha = tree[ALPHA].read_symbol(&mut self.bit_reader)?;
+
+ data[index] = (u32::from(alpha) << 24)
+ + (u32::from(red) << 16)
+ + (u32::from(code) << 8)
+ + u32::from(blue);
+
+ index += 1;
+ x += 1;
+ if x >= width {
+ x = 0;
+ y += 1;
+ }
+ } else if code < 256 + 24 {
+ //backward reference, so go back and use that to add image data
+ let length_symbol = code - 256;
+ let length = Self::get_copy_distance(&mut self.bit_reader, length_symbol)?;
+
+ let dist_symbol = tree[DIST].read_symbol(&mut self.bit_reader)?;
+ let dist_code = Self::get_copy_distance(&mut self.bit_reader, dist_symbol)?;
+ let dist = Self::plane_code_to_distance(width, dist_code);
+
+ if index < dist || num_values - index < length {
+ return Err(DecoderError::BitStreamError.into());
+ }
+
+ for i in 0..length {
+ data[index + i] = data[index + i - dist];
+ }
+ index += length;
+ x += u16::try_from(length).unwrap();
+ while x >= width {
+ x -= width;
+ y += 1;
+ }
+ if index < num_values {
+ let index = huffman_info.get_huff_index(x, y);
+ tree = &huffman_info.huffman_code_groups[index];
+ }
+ } else {
+ //color cache, so use previously stored pixels to get this pixel
+ let key = code - 256 - 24;
+
+ if let Some(color_cache) = huffman_info.color_cache.as_mut() {
+ //cache old colors
+ while last_cached < index {
+ color_cache.insert(data[last_cached]);
+ last_cached += 1;
+ }
+ data[index] = color_cache.lookup(key.into())?;
+ } else {
+ return Err(DecoderError::BitStreamError.into());
+ }
+ index += 1;
+ x += 1;
+ if x >= width {
+ x = 0;
+ y += 1;
+ }
+ }
+ }
+
+ Ok(data)
+ }
+
+ /// Reads color cache data from the bitstream
+ fn read_color_cache(&mut self) -> ImageResult<Option<u8>> {
+ if self.bit_reader.read_bits::<u8>(1)? == 1 {
+ let code_bits = self.bit_reader.read_bits::<u8>(4)?;
+
+ if !(1..=11).contains(&code_bits) {
+ return Err(DecoderError::InvalidColorCacheBits(code_bits).into());
+ }
+
+ Ok(Some(code_bits))
+ } else {
+ Ok(None)
+ }
+ }
+
+ /// Gets the copy distance from the prefix code and bitstream
+ fn get_copy_distance(bit_reader: &mut BitReader, prefix_code: u16) -> ImageResult<usize> {
+ if prefix_code < 4 {
+ return Ok(usize::from(prefix_code + 1));
+ }
+ let extra_bits: u8 = ((prefix_code - 2) >> 1).try_into().unwrap();
+ let offset = (2 + (usize::from(prefix_code) & 1)) << extra_bits;
+
+ Ok(offset + bit_reader.read_bits::<usize>(extra_bits)? + 1)
+ }
+
+ /// Gets distance to pixel
+ fn plane_code_to_distance(xsize: u16, plane_code: usize) -> usize {
+ if plane_code > 120 {
+ plane_code - 120
+ } else {
+ let (xoffset, yoffset) = DISTANCE_MAP[plane_code - 1];
+
+ let dist = i32::from(xoffset) + i32::from(yoffset) * i32::from(xsize);
+ if dist < 1 {
+ return 1;
+ }
+ dist.try_into().unwrap()
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+struct HuffmanInfo {
+ xsize: u16,
+ _ysize: u16,
+ color_cache: Option<ColorCache>,
+ image: Vec<u32>,
+ bits: u8,
+ mask: u16,
+ huffman_code_groups: Vec<HuffmanCodeGroup>,
+}
+
+impl HuffmanInfo {
+ fn get_huff_index(&self, x: u16, y: u16) -> usize {
+ if self.bits == 0 {
+ return 0;
+ }
+ let position = usize::from((y >> self.bits) * self.xsize + (x >> self.bits));
+ let meta_huff_code: usize = self.image[position].try_into().unwrap();
+ meta_huff_code
+ }
+}
+
+#[derive(Debug, Clone)]
+struct ColorCache {
+ color_cache_bits: u8,
+ color_cache: Vec<u32>,
+}
+
+impl ColorCache {
+ fn insert(&mut self, color: u32) {
+ let index = (0x1e35a7bdu32.overflowing_mul(color).0) >> (32 - self.color_cache_bits);
+ self.color_cache[index as usize] = color;
+ }
+
+ fn lookup(&self, index: usize) -> ImageResult<u32> {
+ match self.color_cache.get(index) {
+ Some(&value) => Ok(value),
+ None => Err(DecoderError::BitStreamError.into()),
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+pub(crate) struct BitReader {
+ buf: Vec<u8>,
+ index: usize,
+ bit_count: u8,
+}
+
+impl BitReader {
+ fn new() -> BitReader {
+ BitReader {
+ buf: Vec::new(),
+ index: 0,
+ bit_count: 0,
+ }
+ }
+
+ fn init(&mut self, buf: Vec<u8>) {
+ self.buf = buf;
+ }
+
+ pub(crate) fn read_bits<T>(&mut self, num: u8) -> ImageResult<T>
+ where
+ T: num_traits::Unsigned + Shl<u8, Output = T> + AddAssign<T> + From<bool>,
+ {
+ let mut value: T = T::zero();
+
+ for i in 0..num {
+ if self.buf.len() <= self.index {
+ return Err(DecoderError::BitStreamError.into());
+ }
+ let bit_true = self.buf[self.index] & (1 << self.bit_count) != 0;
+ value += T::from(bit_true) << i;
+ self.bit_count = if self.bit_count == 7 {
+ self.index += 1;
+ 0
+ } else {
+ self.bit_count + 1
+ };
+ }
+
+ Ok(value)
+ }
+}
+
+#[derive(Debug, Clone, Default)]
+pub(crate) struct LosslessFrame {
+ pub(crate) width: u16,
+ pub(crate) height: u16,
+
+ pub(crate) buf: Vec<u32>,
+}
+
+impl LosslessFrame {
+ /// Fills a buffer by converting from argb to rgba
+ pub(crate) fn fill_rgba(&self, buf: &mut [u8]) {
+ for (&argb_val, chunk) in self.buf.iter().zip(buf.chunks_exact_mut(4)) {
+ chunk[0] = ((argb_val >> 16) & 0xff).try_into().unwrap();
+ chunk[1] = ((argb_val >> 8) & 0xff).try_into().unwrap();
+ chunk[2] = (argb_val & 0xff).try_into().unwrap();
+ chunk[3] = ((argb_val >> 24) & 0xff).try_into().unwrap();
+ }
+ }
+
+ /// Get buffer size from the image
+ pub(crate) fn get_buf_size(&self) -> usize {
+ usize::from(self.width) * usize::from(self.height) * 4
+ }
+
+ /// Fills a buffer with just the green values from the lossless decoding
+ /// Used in extended alpha decoding
+ pub(crate) fn fill_green(&self, buf: &mut [u8]) {
+ for (&argb_val, buf_value) in self.buf.iter().zip(buf.iter_mut()) {
+ *buf_value = ((argb_val >> 8) & 0xff).try_into().unwrap();
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+
+ use super::BitReader;
+
+ #[test]
+ fn bit_read_test() {
+ let mut bit_reader = BitReader::new();
+
+ //10011100 01000001 11100001
+ let buf = vec![0x9C, 0x41, 0xE1];
+
+ bit_reader.init(buf);
+
+ assert_eq!(bit_reader.read_bits::<u8>(3).unwrap(), 4); //100
+ assert_eq!(bit_reader.read_bits::<u8>(2).unwrap(), 3); //11
+ assert_eq!(bit_reader.read_bits::<u8>(6).unwrap(), 12); //001100
+ assert_eq!(bit_reader.read_bits::<u16>(10).unwrap(), 40); //0000101000
+ assert_eq!(bit_reader.read_bits::<u8>(3).unwrap(), 7); //111
+ }
+
+ #[test]
+ fn bit_read_error_test() {
+ let mut bit_reader = BitReader::new();
+
+ //01101010
+ let buf = vec![0x6A];
+
+ bit_reader.init(buf);
+
+ assert_eq!(bit_reader.read_bits::<u8>(3).unwrap(), 2); //010
+ assert_eq!(bit_reader.read_bits::<u8>(5).unwrap(), 13); //01101
+ assert!(bit_reader.read_bits::<u8>(4).is_err()); //error
+ }
+}
diff --git a/vendor/image/src/codecs/webp/lossless_transform.rs b/vendor/image/src/codecs/webp/lossless_transform.rs
new file mode 100644
index 0000000..f9a82c1
--- /dev/null
+++ b/vendor/image/src/codecs/webp/lossless_transform.rs
@@ -0,0 +1,464 @@
+use std::convert::TryFrom;
+use std::convert::TryInto;
+
+use super::lossless::subsample_size;
+use super::lossless::DecoderError;
+
+#[derive(Debug, Clone)]
+pub(crate) enum TransformType {
+ PredictorTransform {
+ size_bits: u8,
+ predictor_data: Vec<u32>,
+ },
+ ColorTransform {
+ size_bits: u8,
+ transform_data: Vec<u32>,
+ },
+ SubtractGreen,
+ ColorIndexingTransform {
+ table_size: u16,
+ table_data: Vec<u32>,
+ },
+}
+
+impl TransformType {
+ /// Applies a transform to the image data
+ pub(crate) fn apply_transform(
+ &self,
+ image_data: &mut Vec<u32>,
+ width: u16,
+ height: u16,
+ ) -> Result<(), DecoderError> {
+ match self {
+ TransformType::PredictorTransform {
+ size_bits,
+ predictor_data,
+ } => {
+ let block_xsize = usize::from(subsample_size(width, *size_bits));
+ let width = usize::from(width);
+ let height = usize::from(height);
+
+ if image_data.len() < width * height {
+ return Err(DecoderError::TransformError);
+ }
+
+ //handle top and left borders specially
+ //this involves ignoring mode and just setting prediction values like this
+ image_data[0] = add_pixels(image_data[0], 0xff000000);
+
+ for x in 1..width {
+ image_data[x] = add_pixels(image_data[x], get_left(image_data, x, 0, width));
+ }
+
+ for y in 1..height {
+ image_data[y * width] =
+ add_pixels(image_data[y * width], get_top(image_data, 0, y, width));
+ }
+
+ for y in 1..height {
+ for x in 1..width {
+ let block_index = (y >> size_bits) * block_xsize + (x >> size_bits);
+
+ let index = y * width + x;
+
+ let green = (predictor_data[block_index] >> 8) & 0xff;
+
+ match green {
+ 0 => image_data[index] = add_pixels(image_data[index], 0xff000000),
+ 1 => {
+ image_data[index] =
+ add_pixels(image_data[index], get_left(image_data, x, y, width))
+ }
+ 2 => {
+ image_data[index] =
+ add_pixels(image_data[index], get_top(image_data, x, y, width))
+ }
+ 3 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ get_top_right(image_data, x, y, width),
+ )
+ }
+ 4 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ get_top_left(image_data, x, y, width),
+ )
+ }
+ 5 => {
+ image_data[index] = add_pixels(image_data[index], {
+ let first = average2(
+ get_left(image_data, x, y, width),
+ get_top_right(image_data, x, y, width),
+ );
+ average2(first, get_top(image_data, x, y, width))
+ })
+ }
+ 6 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ average2(
+ get_left(image_data, x, y, width),
+ get_top_left(image_data, x, y, width),
+ ),
+ )
+ }
+ 7 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ average2(
+ get_left(image_data, x, y, width),
+ get_top(image_data, x, y, width),
+ ),
+ )
+ }
+ 8 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ average2(
+ get_top_left(image_data, x, y, width),
+ get_top(image_data, x, y, width),
+ ),
+ )
+ }
+ 9 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ average2(
+ get_top(image_data, x, y, width),
+ get_top_right(image_data, x, y, width),
+ ),
+ )
+ }
+ 10 => {
+ image_data[index] = add_pixels(image_data[index], {
+ let first = average2(
+ get_left(image_data, x, y, width),
+ get_top_left(image_data, x, y, width),
+ );
+ let second = average2(
+ get_top(image_data, x, y, width),
+ get_top_right(image_data, x, y, width),
+ );
+ average2(first, second)
+ })
+ }
+ 11 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ select(
+ get_left(image_data, x, y, width),
+ get_top(image_data, x, y, width),
+ get_top_left(image_data, x, y, width),
+ ),
+ )
+ }
+ 12 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ clamp_add_subtract_full(
+ get_left(image_data, x, y, width),
+ get_top(image_data, x, y, width),
+ get_top_left(image_data, x, y, width),
+ ),
+ )
+ }
+ 13 => {
+ image_data[index] = add_pixels(image_data[index], {
+ let first = average2(
+ get_left(image_data, x, y, width),
+ get_top(image_data, x, y, width),
+ );
+ clamp_add_subtract_half(
+ first,
+ get_top_left(image_data, x, y, width),
+ )
+ })
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+ TransformType::ColorTransform {
+ size_bits,
+ transform_data,
+ } => {
+ let block_xsize = usize::from(subsample_size(width, *size_bits));
+ let width = usize::from(width);
+ let height = usize::from(height);
+
+ for y in 0..height {
+ for x in 0..width {
+ let block_index = (y >> size_bits) * block_xsize + (x >> size_bits);
+
+ let index = y * width + x;
+
+ let multiplier =
+ ColorTransformElement::from_color_code(transform_data[block_index]);
+
+ image_data[index] = transform_color(&multiplier, image_data[index]);
+ }
+ }
+ }
+ TransformType::SubtractGreen => {
+ let width = usize::from(width);
+ for y in 0..usize::from(height) {
+ for x in 0..width {
+ image_data[y * width + x] = add_green(image_data[y * width + x]);
+ }
+ }
+ }
+ TransformType::ColorIndexingTransform {
+ table_size,
+ table_data,
+ } => {
+ let mut new_image_data =
+ Vec::with_capacity(usize::from(width) * usize::from(height));
+
+ let table_size = *table_size;
+ let width_bits: u8 = if table_size <= 2 {
+ 3
+ } else if table_size <= 4 {
+ 2
+ } else if table_size <= 16 {
+ 1
+ } else {
+ 0
+ };
+
+ let bits_per_pixel = 8 >> width_bits;
+ let mask = (1 << bits_per_pixel) - 1;
+
+ let mut src = 0;
+ let width = usize::from(width);
+
+ let pixels_per_byte = 1 << width_bits;
+ let count_mask = pixels_per_byte - 1;
+ let mut packed_pixels = 0;
+
+ for _y in 0..usize::from(height) {
+ for x in 0..width {
+ if (x & count_mask) == 0 {
+ packed_pixels = (image_data[src] >> 8) & 0xff;
+ src += 1;
+ }
+
+ let pixels: usize = (packed_pixels & mask).try_into().unwrap();
+ let new_val = if pixels >= table_size.into() {
+ 0x00000000
+ } else {
+ table_data[pixels]
+ };
+
+ new_image_data.push(new_val);
+
+ packed_pixels >>= bits_per_pixel;
+ }
+ }
+
+ *image_data = new_image_data;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+//predictor functions
+
+/// Adds 2 pixels mod 256 for each pixel
+pub(crate) fn add_pixels(a: u32, b: u32) -> u32 {
+ let new_alpha = ((a >> 24) + (b >> 24)) & 0xff;
+ let new_red = (((a >> 16) & 0xff) + ((b >> 16) & 0xff)) & 0xff;
+ let new_green = (((a >> 8) & 0xff) + ((b >> 8) & 0xff)) & 0xff;
+ let new_blue = ((a & 0xff) + (b & 0xff)) & 0xff;
+
+ (new_alpha << 24) + (new_red << 16) + (new_green << 8) + new_blue
+}
+
+/// Get left pixel
+fn get_left(data: &[u32], x: usize, y: usize, width: usize) -> u32 {
+ data[y * width + x - 1]
+}
+
+/// Get top pixel
+fn get_top(data: &[u32], x: usize, y: usize, width: usize) -> u32 {
+ data[(y - 1) * width + x]
+}
+
+/// Get pixel to top right
+fn get_top_right(data: &[u32], x: usize, y: usize, width: usize) -> u32 {
+ // if x == width - 1 this gets the left most pixel of the current row
+ // as described in the specification
+ data[(y - 1) * width + x + 1]
+}
+
+/// Get pixel to top left
+fn get_top_left(data: &[u32], x: usize, y: usize, width: usize) -> u32 {
+ data[(y - 1) * width + x - 1]
+}
+
+/// Get average of 2 pixels
+fn average2(a: u32, b: u32) -> u32 {
+ let mut avg = 0u32;
+ for i in 0..4 {
+ let sub_a: u8 = ((a >> (i * 8)) & 0xff).try_into().unwrap();
+ let sub_b: u8 = ((b >> (i * 8)) & 0xff).try_into().unwrap();
+ avg |= u32::from(sub_average2(sub_a, sub_b)) << (i * 8);
+ }
+ avg
+}
+
+/// Get average of 2 bytes
+fn sub_average2(a: u8, b: u8) -> u8 {
+ ((u16::from(a) + u16::from(b)) / 2).try_into().unwrap()
+}
+
+/// Get a specific byte from argb pixel
+fn get_byte(val: u32, byte: u8) -> u8 {
+ ((val >> (byte * 8)) & 0xff).try_into().unwrap()
+}
+
+/// Get byte as i32 for convenience
+fn get_byte_i32(val: u32, byte: u8) -> i32 {
+ i32::from(get_byte(val, byte))
+}
+
+/// Select left or top byte
+fn select(left: u32, top: u32, top_left: u32) -> u32 {
+ let predict_alpha = get_byte_i32(left, 3) + get_byte_i32(top, 3) - get_byte_i32(top_left, 3);
+ let predict_red = get_byte_i32(left, 2) + get_byte_i32(top, 2) - get_byte_i32(top_left, 2);
+ let predict_green = get_byte_i32(left, 1) + get_byte_i32(top, 1) - get_byte_i32(top_left, 1);
+ let predict_blue = get_byte_i32(left, 0) + get_byte_i32(top, 0) - get_byte_i32(top_left, 0);
+
+ let predict_left = i32::abs(predict_alpha - get_byte_i32(left, 3))
+ + i32::abs(predict_red - get_byte_i32(left, 2))
+ + i32::abs(predict_green - get_byte_i32(left, 1))
+ + i32::abs(predict_blue - get_byte_i32(left, 0));
+ let predict_top = i32::abs(predict_alpha - get_byte_i32(top, 3))
+ + i32::abs(predict_red - get_byte_i32(top, 2))
+ + i32::abs(predict_green - get_byte_i32(top, 1))
+ + i32::abs(predict_blue - get_byte_i32(top, 0));
+
+ if predict_left < predict_top {
+ left
+ } else {
+ top
+ }
+}
+
+/// Clamp a to [0, 255]
+fn clamp(a: i32) -> i32 {
+ if a < 0 {
+ 0
+ } else if a > 255 {
+ 255
+ } else {
+ a
+ }
+}
+
+/// Clamp add subtract full on one part
+fn clamp_add_subtract_full_sub(a: i32, b: i32, c: i32) -> i32 {
+ clamp(a + b - c)
+}
+
+/// Clamp add subtract half on one part
+fn clamp_add_subtract_half_sub(a: i32, b: i32) -> i32 {
+ clamp(a + (a - b) / 2)
+}
+
+/// Clamp add subtract full on 3 pixels
+fn clamp_add_subtract_full(a: u32, b: u32, c: u32) -> u32 {
+ let mut value: u32 = 0;
+ for i in 0..4u8 {
+ let sub_a: i32 = ((a >> (i * 8)) & 0xff).try_into().unwrap();
+ let sub_b: i32 = ((b >> (i * 8)) & 0xff).try_into().unwrap();
+ let sub_c: i32 = ((c >> (i * 8)) & 0xff).try_into().unwrap();
+ value |=
+ u32::try_from(clamp_add_subtract_full_sub(sub_a, sub_b, sub_c)).unwrap() << (i * 8);
+ }
+ value
+}
+
+/// Clamp add subtract half on 2 pixels
+fn clamp_add_subtract_half(a: u32, b: u32) -> u32 {
+ let mut value = 0;
+ for i in 0..4u8 {
+ let sub_a: i32 = ((a >> (i * 8)) & 0xff).try_into().unwrap();
+ let sub_b: i32 = ((b >> (i * 8)) & 0xff).try_into().unwrap();
+ value |= u32::try_from(clamp_add_subtract_half_sub(sub_a, sub_b)).unwrap() << (i * 8);
+ }
+
+ value
+}
+
+//color transform
+
+#[derive(Debug, Clone, Copy)]
+struct ColorTransformElement {
+ green_to_red: u8,
+ green_to_blue: u8,
+ red_to_blue: u8,
+}
+
+impl ColorTransformElement {
+ fn from_color_code(color_code: u32) -> ColorTransformElement {
+ ColorTransformElement {
+ green_to_red: (color_code & 0xff).try_into().unwrap(),
+ green_to_blue: ((color_code >> 8) & 0xff).try_into().unwrap(),
+ red_to_blue: ((color_code >> 16) & 0xff).try_into().unwrap(),
+ }
+ }
+}
+
+/// Does color transform on red and blue transformed by green
+fn color_transform(red: u8, blue: u8, green: u8, trans: &ColorTransformElement) -> (u8, u8) {
+ let mut temp_red = u32::from(red);
+ let mut temp_blue = u32::from(blue);
+
+ //as does the conversion from u8 to signed two's complement i8 required
+ temp_red += color_transform_delta(trans.green_to_red as i8, green as i8);
+ temp_blue += color_transform_delta(trans.green_to_blue as i8, green as i8);
+ temp_blue += color_transform_delta(trans.red_to_blue as i8, temp_red as i8);
+
+ (
+ (temp_red & 0xff).try_into().unwrap(),
+ (temp_blue & 0xff).try_into().unwrap(),
+ )
+}
+
+/// Does color transform on 2 numbers
+fn color_transform_delta(t: i8, c: i8) -> u32 {
+ ((i16::from(t) * i16::from(c)) as u32) >> 5
+}
+
+// Does color transform on a pixel with a color transform element
+fn transform_color(multiplier: &ColorTransformElement, color_value: u32) -> u32 {
+ let alpha = get_byte(color_value, 3);
+ let red = get_byte(color_value, 2);
+ let green = get_byte(color_value, 1);
+ let blue = get_byte(color_value, 0);
+
+ let (new_red, new_blue) = color_transform(red, blue, green, multiplier);
+
+ (u32::from(alpha) << 24)
+ + (u32::from(new_red) << 16)
+ + (u32::from(green) << 8)
+ + u32::from(new_blue)
+}
+
+//subtract green function
+
+/// Adds green to red and blue of a pixel
+fn add_green(argb: u32) -> u32 {
+ let red = (argb >> 16) & 0xff;
+ let green = (argb >> 8) & 0xff;
+ let blue = argb & 0xff;
+
+ let new_red = (red + green) & 0xff;
+ let new_blue = (blue + green) & 0xff;
+
+ (argb & 0xff00ff00) | (new_red << 16) | (new_blue)
+}
diff --git a/vendor/image/src/codecs/webp/mod.rs b/vendor/image/src/codecs/webp/mod.rs
new file mode 100644
index 0000000..b38faed
--- /dev/null
+++ b/vendor/image/src/codecs/webp/mod.rs
@@ -0,0 +1,28 @@
+//! Decoding and Encoding of WebP Images
+
+#[cfg(feature = "webp-encoder")]
+pub use self::encoder::{WebPEncoder, WebPQuality};
+
+#[cfg(feature = "webp-encoder")]
+mod encoder;
+
+#[cfg(feature = "webp")]
+pub use self::decoder::WebPDecoder;
+
+#[cfg(feature = "webp")]
+mod decoder;
+#[cfg(feature = "webp")]
+mod extended;
+#[cfg(feature = "webp")]
+mod huffman;
+#[cfg(feature = "webp")]
+mod loop_filter;
+#[cfg(feature = "webp")]
+mod lossless;
+#[cfg(feature = "webp")]
+mod lossless_transform;
+#[cfg(feature = "webp")]
+mod transform;
+
+#[cfg(feature = "webp")]
+pub mod vp8;
diff --git a/vendor/image/src/codecs/webp/transform.rs b/vendor/image/src/codecs/webp/transform.rs
new file mode 100644
index 0000000..3b3ef5a
--- /dev/null
+++ b/vendor/image/src/codecs/webp/transform.rs
@@ -0,0 +1,77 @@
+static CONST1: i64 = 20091;
+static CONST2: i64 = 35468;
+
+pub(crate) fn idct4x4(block: &mut [i32]) {
+ // The intermediate results may overflow the types, so we stretch the type.
+ fn fetch(block: &mut [i32], idx: usize) -> i64 {
+ i64::from(block[idx])
+ }
+
+ for i in 0usize..4 {
+ let a1 = fetch(block, i) + fetch(block, 8 + i);
+ let b1 = fetch(block, i) - fetch(block, 8 + i);
+
+ let t1 = (fetch(block, 4 + i) * CONST2) >> 16;
+ let t2 = fetch(block, 12 + i) + ((fetch(block, 12 + i) * CONST1) >> 16);
+ let c1 = t1 - t2;
+
+ let t1 = fetch(block, 4 + i) + ((fetch(block, 4 + i) * CONST1) >> 16);
+ let t2 = (fetch(block, 12 + i) * CONST2) >> 16;
+ let d1 = t1 + t2;
+
+ block[i] = (a1 + d1) as i32;
+ block[4 + i] = (b1 + c1) as i32;
+ block[4 * 3 + i] = (a1 - d1) as i32;
+ block[4 * 2 + i] = (b1 - c1) as i32;
+ }
+
+ for i in 0usize..4 {
+ let a1 = fetch(block, 4 * i) + fetch(block, 4 * i + 2);
+ let b1 = fetch(block, 4 * i) - fetch(block, 4 * i + 2);
+
+ let t1 = (fetch(block, 4 * i + 1) * CONST2) >> 16;
+ let t2 = fetch(block, 4 * i + 3) + ((fetch(block, 4 * i + 3) * CONST1) >> 16);
+ let c1 = t1 - t2;
+
+ let t1 = fetch(block, 4 * i + 1) + ((fetch(block, 4 * i + 1) * CONST1) >> 16);
+ let t2 = (fetch(block, 4 * i + 3) * CONST2) >> 16;
+ let d1 = t1 + t2;
+
+ block[4 * i] = ((a1 + d1 + 4) >> 3) as i32;
+ block[4 * i + 3] = ((a1 - d1 + 4) >> 3) as i32;
+ block[4 * i + 1] = ((b1 + c1 + 4) >> 3) as i32;
+ block[4 * i + 2] = ((b1 - c1 + 4) >> 3) as i32;
+ }
+}
+
+// 14.3
+pub(crate) fn iwht4x4(block: &mut [i32]) {
+ for i in 0usize..4 {
+ let a1 = block[i] + block[12 + i];
+ let b1 = block[4 + i] + block[8 + i];
+ let c1 = block[4 + i] - block[8 + i];
+ let d1 = block[i] - block[12 + i];
+
+ block[i] = a1 + b1;
+ block[4 + i] = c1 + d1;
+ block[8 + i] = a1 - b1;
+ block[12 + i] = d1 - c1;
+ }
+
+ for i in 0usize..4 {
+ let a1 = block[4 * i] + block[4 * i + 3];
+ let b1 = block[4 * i + 1] + block[4 * i + 2];
+ let c1 = block[4 * i + 1] - block[4 * i + 2];
+ let d1 = block[4 * i] - block[4 * i + 3];
+
+ let a2 = a1 + b1;
+ let b2 = c1 + d1;
+ let c2 = a1 - b1;
+ let d2 = d1 - c1;
+
+ block[4 * i] = (a2 + 3) >> 3;
+ block[4 * i + 1] = (b2 + 3) >> 3;
+ block[4 * i + 2] = (c2 + 3) >> 3;
+ block[4 * i + 3] = (d2 + 3) >> 3;
+ }
+}
diff --git a/vendor/image/src/codecs/webp/vp8.rs b/vendor/image/src/codecs/webp/vp8.rs
new file mode 100644
index 0000000..67b8820
--- /dev/null
+++ b/vendor/image/src/codecs/webp/vp8.rs
@@ -0,0 +1,2932 @@
+//! An implementation of the VP8 Video Codec
+//!
+//! This module contains a partial implementation of the
+//! VP8 video format as defined in RFC-6386.
+//!
+//! It decodes Keyframes only.
+//! VP8 is the underpinning of the WebP image format
+//!
+//! # Related Links
+//! * [rfc-6386](http://tools.ietf.org/html/rfc6386) - The VP8 Data Format and Decoding Guide
+//! * [VP8.pdf](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37073.pdf) - An overview of
+//! of the VP8 format
+//!
+
+use byteorder::{LittleEndian, ReadBytesExt};
+use std::convert::TryInto;
+use std::default::Default;
+use std::io::Read;
+use std::{cmp, error, fmt};
+
+use super::loop_filter;
+use super::transform;
+use crate::error::{
+ DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::ImageFormat;
+
+use crate::utils::clamp;
+
+const MAX_SEGMENTS: usize = 4;
+const NUM_DCT_TOKENS: usize = 12;
+
+// Prediction modes
+const DC_PRED: i8 = 0;
+const V_PRED: i8 = 1;
+const H_PRED: i8 = 2;
+const TM_PRED: i8 = 3;
+const B_PRED: i8 = 4;
+
+const B_DC_PRED: i8 = 0;
+const B_TM_PRED: i8 = 1;
+const B_VE_PRED: i8 = 2;
+const B_HE_PRED: i8 = 3;
+const B_LD_PRED: i8 = 4;
+const B_RD_PRED: i8 = 5;
+const B_VR_PRED: i8 = 6;
+const B_VL_PRED: i8 = 7;
+const B_HD_PRED: i8 = 8;
+const B_HU_PRED: i8 = 9;
+
+// Prediction mode enum
+#[repr(i8)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum LumaMode {
+ /// Predict DC using row above and column to the left.
+ DC = DC_PRED,
+
+ /// Predict rows using row above.
+ V = V_PRED,
+
+ /// Predict columns using column to the left.
+ H = H_PRED,
+
+ /// Propagate second differences.
+ TM = TM_PRED,
+
+ /// Each Y subblock is independently predicted.
+ B = B_PRED,
+}
+
+#[repr(i8)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum ChromaMode {
+ /// Predict DC using row above and column to the left.
+ DC = DC_PRED,
+
+ /// Predict rows using row above.
+ V = V_PRED,
+
+ /// Predict columns using column to the left.
+ H = H_PRED,
+
+ /// Propagate second differences.
+ TM = TM_PRED,
+}
+
+#[repr(i8)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum IntraMode {
+ DC = B_DC_PRED,
+ TM = B_TM_PRED,
+ VE = B_VE_PRED,
+ HE = B_HE_PRED,
+ LD = B_LD_PRED,
+ RD = B_RD_PRED,
+ VR = B_VR_PRED,
+ VL = B_VL_PRED,
+ HD = B_HD_PRED,
+ HU = B_HU_PRED,
+}
+
+type Prob = u8;
+
+static SEGMENT_ID_TREE: [i8; 6] = [2, 4, -0, -1, -2, -3];
+
+// Section 11.2
+// Tree for determining the keyframe luma intra prediction modes:
+static KEYFRAME_YMODE_TREE: [i8; 8] = [-B_PRED, 2, 4, 6, -DC_PRED, -V_PRED, -H_PRED, -TM_PRED];
+
+// Default probabilities for decoding the keyframe luma modes
+static KEYFRAME_YMODE_PROBS: [Prob; 4] = [145, 156, 163, 128];
+
+// Tree for determining the keyframe B_PRED mode:
+static KEYFRAME_BPRED_MODE_TREE: [i8; 18] = [
+ -B_DC_PRED, 2, -B_TM_PRED, 4, -B_VE_PRED, 6, 8, 12, -B_HE_PRED, 10, -B_RD_PRED, -B_VR_PRED,
+ -B_LD_PRED, 14, -B_VL_PRED, 16, -B_HD_PRED, -B_HU_PRED,
+];
+
+// Probabilities for the BPRED_MODE_TREE
+static KEYFRAME_BPRED_MODE_PROBS: [[[u8; 9]; 10]; 10] = [
+ [
+ [231, 120, 48, 89, 115, 113, 120, 152, 112],
+ [152, 179, 64, 126, 170, 118, 46, 70, 95],
+ [175, 69, 143, 80, 85, 82, 72, 155, 103],
+ [56, 58, 10, 171, 218, 189, 17, 13, 152],
+ [144, 71, 10, 38, 171, 213, 144, 34, 26],
+ [114, 26, 17, 163, 44, 195, 21, 10, 173],
+ [121, 24, 80, 195, 26, 62, 44, 64, 85],
+ [170, 46, 55, 19, 136, 160, 33, 206, 71],
+ [63, 20, 8, 114, 114, 208, 12, 9, 226],
+ [81, 40, 11, 96, 182, 84, 29, 16, 36],
+ ],
+ [
+ [134, 183, 89, 137, 98, 101, 106, 165, 148],
+ [72, 187, 100, 130, 157, 111, 32, 75, 80],
+ [66, 102, 167, 99, 74, 62, 40, 234, 128],
+ [41, 53, 9, 178, 241, 141, 26, 8, 107],
+ [104, 79, 12, 27, 217, 255, 87, 17, 7],
+ [74, 43, 26, 146, 73, 166, 49, 23, 157],
+ [65, 38, 105, 160, 51, 52, 31, 115, 128],
+ [87, 68, 71, 44, 114, 51, 15, 186, 23],
+ [47, 41, 14, 110, 182, 183, 21, 17, 194],
+ [66, 45, 25, 102, 197, 189, 23, 18, 22],
+ ],
+ [
+ [88, 88, 147, 150, 42, 46, 45, 196, 205],
+ [43, 97, 183, 117, 85, 38, 35, 179, 61],
+ [39, 53, 200, 87, 26, 21, 43, 232, 171],
+ [56, 34, 51, 104, 114, 102, 29, 93, 77],
+ [107, 54, 32, 26, 51, 1, 81, 43, 31],
+ [39, 28, 85, 171, 58, 165, 90, 98, 64],
+ [34, 22, 116, 206, 23, 34, 43, 166, 73],
+ [68, 25, 106, 22, 64, 171, 36, 225, 114],
+ [34, 19, 21, 102, 132, 188, 16, 76, 124],
+ [62, 18, 78, 95, 85, 57, 50, 48, 51],
+ ],
+ [
+ [193, 101, 35, 159, 215, 111, 89, 46, 111],
+ [60, 148, 31, 172, 219, 228, 21, 18, 111],
+ [112, 113, 77, 85, 179, 255, 38, 120, 114],
+ [40, 42, 1, 196, 245, 209, 10, 25, 109],
+ [100, 80, 8, 43, 154, 1, 51, 26, 71],
+ [88, 43, 29, 140, 166, 213, 37, 43, 154],
+ [61, 63, 30, 155, 67, 45, 68, 1, 209],
+ [142, 78, 78, 16, 255, 128, 34, 197, 171],
+ [41, 40, 5, 102, 211, 183, 4, 1, 221],
+ [51, 50, 17, 168, 209, 192, 23, 25, 82],
+ ],
+ [
+ [125, 98, 42, 88, 104, 85, 117, 175, 82],
+ [95, 84, 53, 89, 128, 100, 113, 101, 45],
+ [75, 79, 123, 47, 51, 128, 81, 171, 1],
+ [57, 17, 5, 71, 102, 57, 53, 41, 49],
+ [115, 21, 2, 10, 102, 255, 166, 23, 6],
+ [38, 33, 13, 121, 57, 73, 26, 1, 85],
+ [41, 10, 67, 138, 77, 110, 90, 47, 114],
+ [101, 29, 16, 10, 85, 128, 101, 196, 26],
+ [57, 18, 10, 102, 102, 213, 34, 20, 43],
+ [117, 20, 15, 36, 163, 128, 68, 1, 26],
+ ],
+ [
+ [138, 31, 36, 171, 27, 166, 38, 44, 229],
+ [67, 87, 58, 169, 82, 115, 26, 59, 179],
+ [63, 59, 90, 180, 59, 166, 93, 73, 154],
+ [40, 40, 21, 116, 143, 209, 34, 39, 175],
+ [57, 46, 22, 24, 128, 1, 54, 17, 37],
+ [47, 15, 16, 183, 34, 223, 49, 45, 183],
+ [46, 17, 33, 183, 6, 98, 15, 32, 183],
+ [65, 32, 73, 115, 28, 128, 23, 128, 205],
+ [40, 3, 9, 115, 51, 192, 18, 6, 223],
+ [87, 37, 9, 115, 59, 77, 64, 21, 47],
+ ],
+ [
+ [104, 55, 44, 218, 9, 54, 53, 130, 226],
+ [64, 90, 70, 205, 40, 41, 23, 26, 57],
+ [54, 57, 112, 184, 5, 41, 38, 166, 213],
+ [30, 34, 26, 133, 152, 116, 10, 32, 134],
+ [75, 32, 12, 51, 192, 255, 160, 43, 51],
+ [39, 19, 53, 221, 26, 114, 32, 73, 255],
+ [31, 9, 65, 234, 2, 15, 1, 118, 73],
+ [88, 31, 35, 67, 102, 85, 55, 186, 85],
+ [56, 21, 23, 111, 59, 205, 45, 37, 192],
+ [55, 38, 70, 124, 73, 102, 1, 34, 98],
+ ],
+ [
+ [102, 61, 71, 37, 34, 53, 31, 243, 192],
+ [69, 60, 71, 38, 73, 119, 28, 222, 37],
+ [68, 45, 128, 34, 1, 47, 11, 245, 171],
+ [62, 17, 19, 70, 146, 85, 55, 62, 70],
+ [75, 15, 9, 9, 64, 255, 184, 119, 16],
+ [37, 43, 37, 154, 100, 163, 85, 160, 1],
+ [63, 9, 92, 136, 28, 64, 32, 201, 85],
+ [86, 6, 28, 5, 64, 255, 25, 248, 1],
+ [56, 8, 17, 132, 137, 255, 55, 116, 128],
+ [58, 15, 20, 82, 135, 57, 26, 121, 40],
+ ],
+ [
+ [164, 50, 31, 137, 154, 133, 25, 35, 218],
+ [51, 103, 44, 131, 131, 123, 31, 6, 158],
+ [86, 40, 64, 135, 148, 224, 45, 183, 128],
+ [22, 26, 17, 131, 240, 154, 14, 1, 209],
+ [83, 12, 13, 54, 192, 255, 68, 47, 28],
+ [45, 16, 21, 91, 64, 222, 7, 1, 197],
+ [56, 21, 39, 155, 60, 138, 23, 102, 213],
+ [85, 26, 85, 85, 128, 128, 32, 146, 171],
+ [18, 11, 7, 63, 144, 171, 4, 4, 246],
+ [35, 27, 10, 146, 174, 171, 12, 26, 128],
+ ],
+ [
+ [190, 80, 35, 99, 180, 80, 126, 54, 45],
+ [85, 126, 47, 87, 176, 51, 41, 20, 32],
+ [101, 75, 128, 139, 118, 146, 116, 128, 85],
+ [56, 41, 15, 176, 236, 85, 37, 9, 62],
+ [146, 36, 19, 30, 171, 255, 97, 27, 20],
+ [71, 30, 17, 119, 118, 255, 17, 18, 138],
+ [101, 38, 60, 138, 55, 70, 43, 26, 142],
+ [138, 45, 61, 62, 219, 1, 81, 188, 64],
+ [32, 41, 20, 117, 151, 142, 20, 21, 163],
+ [112, 19, 12, 61, 195, 128, 48, 4, 24],
+ ],
+];
+
+// Section 11.4 Tree for determining macroblock the chroma mode
+static KEYFRAME_UV_MODE_TREE: [i8; 6] = [-DC_PRED, 2, -V_PRED, 4, -H_PRED, -TM_PRED];
+
+// Probabilities for determining macroblock mode
+static KEYFRAME_UV_MODE_PROBS: [Prob; 3] = [142, 114, 183];
+
+// Section 13.4
+type TokenProbTables = [[[[Prob; NUM_DCT_TOKENS - 1]; 3]; 8]; 4];
+
+// Probabilities that a token's probability will be updated
+static COEFF_UPDATE_PROBS: TokenProbTables = [
+ [
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255],
+ [249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255],
+ [234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255],
+ [250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ ],
+ [
+ [
+ [217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255],
+ [234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255],
+ ],
+ [
+ [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ ],
+ [
+ [
+ [186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255],
+ [234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255],
+ [251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ ],
+ [
+ [
+ [248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255],
+ [248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255],
+ [248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ ],
+];
+
+// Section 13.5
+// Default Probabilities for tokens
+static COEFF_PROBS: TokenProbTables = [
+ [
+ [
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128],
+ [189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128],
+ [106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128],
+ [181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128],
+ [78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128],
+ ],
+ [
+ [1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128],
+ [184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128],
+ [77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128],
+ [170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128],
+ [37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128],
+ [207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128],
+ [102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128],
+ [177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128],
+ [80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ ],
+ [
+ [
+ [198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62],
+ [131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1],
+ [68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128],
+ ],
+ [
+ [1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128],
+ [184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128],
+ [81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128],
+ ],
+ [
+ [1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128],
+ [99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128],
+ [23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128],
+ ],
+ [
+ [1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128],
+ [109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128],
+ [44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128],
+ [94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128],
+ [22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128],
+ ],
+ [
+ [1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128],
+ [124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128],
+ [35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128],
+ ],
+ [
+ [1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128],
+ [121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128],
+ [45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128],
+ ],
+ [
+ [1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128],
+ [203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128],
+ [137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128],
+ ],
+ ],
+ [
+ [
+ [253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128],
+ [175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128],
+ [73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128],
+ ],
+ [
+ [1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128],
+ [239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128],
+ [155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128],
+ [201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128],
+ [69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128],
+ ],
+ [
+ [1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128],
+ [223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128],
+ [141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128],
+ [190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128],
+ [149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128],
+ [213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128],
+ [55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ ],
+ [
+ [
+ [202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255],
+ [126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128],
+ [61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128],
+ ],
+ [
+ [1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128],
+ [166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128],
+ [39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128],
+ ],
+ [
+ [1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128],
+ [124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128],
+ [24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128],
+ ],
+ [
+ [1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128],
+ [149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128],
+ [28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128],
+ ],
+ [
+ [1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128],
+ [123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128],
+ [20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128],
+ ],
+ [
+ [1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128],
+ [168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128],
+ [47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128],
+ [141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128],
+ [42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128],
+ ],
+ [
+ [1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ ],
+];
+
+// DCT Tokens
+const DCT_0: i8 = 0;
+const DCT_1: i8 = 1;
+const DCT_2: i8 = 2;
+const DCT_3: i8 = 3;
+const DCT_4: i8 = 4;
+const DCT_CAT1: i8 = 5;
+const DCT_CAT2: i8 = 6;
+const DCT_CAT3: i8 = 7;
+const DCT_CAT4: i8 = 8;
+const DCT_CAT5: i8 = 9;
+const DCT_CAT6: i8 = 10;
+const DCT_EOB: i8 = 11;
+
+static DCT_TOKEN_TREE: [i8; 22] = [
+ -DCT_EOB, 2, -DCT_0, 4, -DCT_1, 6, 8, 12, -DCT_2, 10, -DCT_3, -DCT_4, 14, 16, -DCT_CAT1,
+ -DCT_CAT2, 18, 20, -DCT_CAT3, -DCT_CAT4, -DCT_CAT5, -DCT_CAT6,
+];
+
+static PROB_DCT_CAT: [[Prob; 12]; 6] = [
+ [159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [165, 145, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [173, 148, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [176, 155, 140, 135, 0, 0, 0, 0, 0, 0, 0, 0],
+ [180, 157, 141, 134, 130, 0, 0, 0, 0, 0, 0, 0],
+ [254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0],
+];
+
+static DCT_CAT_BASE: [u8; 6] = [5, 7, 11, 19, 35, 67];
+static COEFF_BANDS: [u8; 16] = [0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7];
+
+#[rustfmt::skip]
+static DC_QUANT: [i16; 128] = [
+ 4, 5, 6, 7, 8, 9, 10, 10,
+ 11, 12, 13, 14, 15, 16, 17, 17,
+ 18, 19, 20, 20, 21, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89,
+ 91, 93, 95, 96, 98, 100, 101, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118,
+ 122, 124, 126, 128, 130, 132, 134, 136,
+ 138, 140, 143, 145, 148, 151, 154, 157,
+];
+
+#[rustfmt::skip]
+static AC_QUANT: [i16; 128] = [
+ 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 60,
+ 62, 64, 66, 68, 70, 72, 74, 76,
+ 78, 80, 82, 84, 86, 88, 90, 92,
+ 94, 96, 98, 100, 102, 104, 106, 108,
+ 110, 112, 114, 116, 119, 122, 125, 128,
+ 131, 134, 137, 140, 143, 146, 149, 152,
+ 155, 158, 161, 164, 167, 170, 173, 177,
+ 181, 185, 189, 193, 197, 201, 205, 209,
+ 213, 217, 221, 225, 229, 234, 239, 245,
+ 249, 254, 259, 264, 269, 274, 279, 284,
+];
+
+static ZIGZAG: [u8; 16] = [0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15];
+
+/// All errors that can occur when attempting to parse a VP8 codec inside WebP
+#[derive(Debug, Clone, Copy)]
+enum DecoderError {
+ /// VP8's `[0x9D, 0x01, 0x2A]` magic not found or invalid
+ Vp8MagicInvalid([u8; 3]),
+
+ /// Decoder initialisation wasn't provided with enough data
+ NotEnoughInitData,
+
+ /// At time of writing, only the YUV colour-space encoded as `0` is specified
+ ColorSpaceInvalid(u8),
+ /// LUMA prediction mode was not recognised
+ LumaPredictionModeInvalid(i8),
+ /// Intra-prediction mode was not recognised
+ IntraPredictionModeInvalid(i8),
+ /// Chroma prediction mode was not recognised
+ ChromaPredictionModeInvalid(i8),
+}
+
+impl fmt::Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DecoderError::Vp8MagicInvalid(tag) => f.write_fmt(format_args!(
+ "Invalid VP8 magic: [{:#04X?}, {:#04X?}, {:#04X?}]",
+ tag[0], tag[1], tag[2]
+ )),
+
+ DecoderError::NotEnoughInitData => {
+ f.write_str("Expected at least 2 bytes of VP8 decoder initialization data")
+ }
+
+ DecoderError::ColorSpaceInvalid(cs) => {
+ f.write_fmt(format_args!("Invalid non-YUV VP8 color space {}", cs))
+ }
+ DecoderError::LumaPredictionModeInvalid(pm) => {
+ f.write_fmt(format_args!("Invalid VP8 LUMA prediction mode {}", pm))
+ }
+ DecoderError::IntraPredictionModeInvalid(i) => {
+ f.write_fmt(format_args!("Invalid VP8 intra-prediction mode {}", i))
+ }
+ DecoderError::ChromaPredictionModeInvalid(c) => {
+ f.write_fmt(format_args!("Invalid VP8 chroma prediction mode {}", c))
+ }
+ }
+ }
+}
+
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {}
+
+struct BoolReader {
+ buf: Vec<u8>,
+ index: usize,
+
+ range: u32,
+ value: u32,
+ bit_count: u8,
+}
+
+impl BoolReader {
+ pub(crate) fn new() -> BoolReader {
+ BoolReader {
+ buf: Vec::new(),
+ range: 0,
+ value: 0,
+ bit_count: 0,
+ index: 0,
+ }
+ }
+
+ pub(crate) fn init(&mut self, buf: Vec<u8>) -> ImageResult<()> {
+ if buf.len() < 2 {
+ return Err(DecoderError::NotEnoughInitData.into());
+ }
+
+ self.buf = buf;
+ // Direct access safe, since length has just been validated.
+ self.value = (u32::from(self.buf[0]) << 8) | u32::from(self.buf[1]);
+ self.index = 2;
+ self.range = 255;
+ self.bit_count = 0;
+
+ Ok(())
+ }
+
+ pub(crate) fn read_bool(&mut self, probability: u8) -> bool {
+ let split = 1 + (((self.range - 1) * u32::from(probability)) >> 8);
+ let bigsplit = split << 8;
+
+ let retval = if self.value >= bigsplit {
+ self.range -= split;
+ self.value -= bigsplit;
+ true
+ } else {
+ self.range = split;
+ false
+ };
+
+ while self.range < 128 {
+ self.value <<= 1;
+ self.range <<= 1;
+ self.bit_count += 1;
+
+ if self.bit_count == 8 {
+ self.bit_count = 0;
+
+ // If no more bits are available, just don't do anything.
+ // This strategy is suggested in the reference implementation of RFC6386 (p.135)
+ if self.index < self.buf.len() {
+ self.value |= u32::from(self.buf[self.index]);
+ self.index += 1;
+ }
+ }
+ }
+
+ retval
+ }
+
+ pub(crate) fn read_literal(&mut self, n: u8) -> u8 {
+ let mut v = 0u8;
+ let mut n = n;
+
+ while n != 0 {
+ v = (v << 1) + self.read_bool(128u8) as u8;
+ n -= 1;
+ }
+
+ v
+ }
+
+ pub(crate) fn read_magnitude_and_sign(&mut self, n: u8) -> i32 {
+ let magnitude = self.read_literal(n);
+ let sign = self.read_literal(1);
+
+ if sign == 1 {
+ -i32::from(magnitude)
+ } else {
+ i32::from(magnitude)
+ }
+ }
+
+ pub(crate) fn read_with_tree(&mut self, tree: &[i8], probs: &[Prob], start: isize) -> i8 {
+ let mut index = start;
+
+ loop {
+ let a = self.read_bool(probs[index as usize >> 1]);
+ let b = index + a as isize;
+ index = tree[b as usize] as isize;
+
+ if index <= 0 {
+ break;
+ }
+ }
+
+ -index as i8
+ }
+
+ pub(crate) fn read_flag(&mut self) -> bool {
+ 0 != self.read_literal(1)
+ }
+}
+
+#[derive(Default, Clone, Copy)]
+struct MacroBlock {
+ bpred: [IntraMode; 16],
+ complexity: [u8; 9],
+ luma_mode: LumaMode,
+ chroma_mode: ChromaMode,
+ segmentid: u8,
+ coeffs_skipped: bool,
+}
+
+/// A Representation of the last decoded video frame
+#[derive(Default, Debug, Clone)]
+pub struct Frame {
+ /// The width of the luma plane
+ pub width: u16,
+
+ /// The height of the luma plane
+ pub height: u16,
+
+ /// The luma plane of the frame
+ pub ybuf: Vec<u8>,
+
+ /// The blue plane of the frame
+ pub ubuf: Vec<u8>,
+
+ /// The red plane of the frame
+ pub vbuf: Vec<u8>,
+
+ /// Indicates whether this frame is a keyframe
+ pub keyframe: bool,
+
+ version: u8,
+
+ /// Indicates whether this frame is intended for display
+ pub for_display: bool,
+
+ // Section 9.2
+ /// The pixel type of the frame as defined by Section 9.2
+ /// of the VP8 Specification
+ pub pixel_type: u8,
+
+ // Section 9.4 and 15
+ filter_type: bool, //if true uses simple filter // if false uses normal filter
+ filter_level: u8,
+ sharpness_level: u8,
+}
+
+impl Frame {
+ /// Chroma plane is half the size of the Luma plane
+ fn chroma_width(&self) -> u16 {
+ (self.width + 1) / 2
+ }
+
+ fn chroma_height(&self) -> u16 {
+ (self.height + 1) / 2
+ }
+
+ /// Fills an rgb buffer with the image
+ pub(crate) fn fill_rgb(&self, buf: &mut [u8]) {
+ for (index, rgb_chunk) in (0..self.ybuf.len()).zip(buf.chunks_exact_mut(3)) {
+ let y = index / self.width as usize;
+ let x = index % self.width as usize;
+ let chroma_index = self.chroma_width() as usize * (y / 2) + x / 2;
+
+ Frame::fill_single(
+ self.ybuf[index],
+ self.ubuf[chroma_index],
+ self.vbuf[chroma_index],
+ rgb_chunk,
+ );
+ }
+ }
+
+ /// Fills an rgba buffer by skipping the alpha values
+ pub(crate) fn fill_rgba(&self, buf: &mut [u8]) {
+ for (index, rgba_chunk) in (0..self.ybuf.len()).zip(buf.chunks_exact_mut(4)) {
+ let y = index / self.width as usize;
+ let x = index % self.width as usize;
+ let chroma_index = self.chroma_width() as usize * (y / 2) + x / 2;
+
+ Frame::fill_single(
+ self.ybuf[index],
+ self.ubuf[chroma_index],
+ self.vbuf[chroma_index],
+ rgba_chunk,
+ );
+ }
+ }
+
+ /// Conversion values from https://docs.microsoft.com/en-us/windows/win32/medfound/recommended-8-bit-yuv-formats-for-video-rendering#converting-8-bit-yuv-to-rgb888
+ fn fill_single(y: u8, u: u8, v: u8, rgb: &mut [u8]) {
+ let c: i32 = i32::from(y) - 16;
+ let d: i32 = i32::from(u) - 128;
+ let e: i32 = i32::from(v) - 128;
+
+ let r: u8 = clamp((298 * c + 409 * e + 128) >> 8, 0, 255)
+ .try_into()
+ .unwrap();
+ let g: u8 = clamp((298 * c - 100 * d - 208 * e + 128) >> 8, 0, 255)
+ .try_into()
+ .unwrap();
+ let b: u8 = clamp((298 * c + 516 * d + 128) >> 8, 0, 255)
+ .try_into()
+ .unwrap();
+
+ rgb[0] = r;
+ rgb[1] = g;
+ rgb[2] = b;
+ }
+
+ /// Gets the buffer size
+ pub fn get_buf_size(&self) -> usize {
+ self.ybuf.len() * 3
+ }
+}
+
+#[derive(Clone, Copy, Default)]
+struct Segment {
+ ydc: i16,
+ yac: i16,
+
+ y2dc: i16,
+ y2ac: i16,
+
+ uvdc: i16,
+ uvac: i16,
+
+ delta_values: bool,
+
+ quantizer_level: i8,
+ loopfilter_level: i8,
+}
+
+/// VP8 Decoder
+///
+/// Only decodes keyframes
+pub struct Vp8Decoder<R> {
+ r: R,
+ b: BoolReader,
+
+ mbwidth: u16,
+ mbheight: u16,
+ macroblocks: Vec<MacroBlock>,
+
+ frame: Frame,
+
+ segments_enabled: bool,
+ segments_update_map: bool,
+ segment: [Segment; MAX_SEGMENTS],
+
+ ref_delta: [i32; 4],
+ mode_delta: [i32; 4],
+
+ partitions: [BoolReader; 8],
+ num_partitions: u8,
+
+ segment_tree_probs: [Prob; 3],
+ token_probs: Box<TokenProbTables>,
+
+ // Section 9.10
+ prob_intra: Prob,
+
+ // Section 9.11
+ prob_skip_false: Option<Prob>,
+
+ top: Vec<MacroBlock>,
+ left: MacroBlock,
+
+ top_border: Vec<u8>,
+ left_border: Vec<u8>,
+}
+
+impl<R: Read> Vp8Decoder<R> {
+ /// Create a new decoder.
+ /// The reader must present a raw vp8 bitstream to the decoder
+ pub fn new(r: R) -> Vp8Decoder<R> {
+ let f = Frame::default();
+ let s = Segment::default();
+ let m = MacroBlock::default();
+
+ Vp8Decoder {
+ r,
+ b: BoolReader::new(),
+
+ mbwidth: 0,
+ mbheight: 0,
+ macroblocks: Vec::new(),
+
+ frame: f,
+ segments_enabled: false,
+ segments_update_map: false,
+ segment: [s; MAX_SEGMENTS],
+
+ ref_delta: [0; 4],
+ mode_delta: [0; 4],
+
+ partitions: [
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ ],
+
+ num_partitions: 1,
+
+ segment_tree_probs: [255u8; 3],
+ token_probs: Box::new(COEFF_PROBS),
+
+ // Section 9.10
+ prob_intra: 0u8,
+
+ // Section 9.11
+ prob_skip_false: None,
+
+ top: Vec::new(),
+ left: m,
+
+ top_border: Vec::new(),
+ left_border: Vec::new(),
+ }
+ }
+
+ fn update_token_probabilities(&mut self) {
+ for (i, is) in COEFF_UPDATE_PROBS.iter().enumerate() {
+ for (j, js) in is.iter().enumerate() {
+ for (k, ks) in js.iter().enumerate() {
+ for (t, prob) in ks.iter().enumerate().take(NUM_DCT_TOKENS - 1) {
+ if self.b.read_bool(*prob) {
+ let v = self.b.read_literal(8);
+ self.token_probs[i][j][k][t] = v;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ fn init_partitions(&mut self, n: usize) -> ImageResult<()> {
+ if n > 1 {
+ let mut sizes = vec![0; 3 * n - 3];
+ self.r.read_exact(sizes.as_mut_slice())?;
+
+ for (i, s) in sizes.chunks(3).enumerate() {
+ let size = { s }
+ .read_u24::<LittleEndian>()
+ .expect("Reading from &[u8] can't fail and the chunk is complete");
+
+ let mut buf = vec![0; size as usize];
+ self.r.read_exact(buf.as_mut_slice())?;
+
+ self.partitions[i].init(buf)?;
+ }
+ }
+
+ let mut buf = Vec::new();
+ self.r.read_to_end(&mut buf)?;
+ self.partitions[n - 1].init(buf)?;
+
+ Ok(())
+ }
+
+ fn read_quantization_indices(&mut self) {
+ fn dc_quant(index: i32) -> i16 {
+ DC_QUANT[clamp(index, 0, 127) as usize]
+ }
+
+ fn ac_quant(index: i32) -> i16 {
+ AC_QUANT[clamp(index, 0, 127) as usize]
+ }
+
+ let yac_abs = self.b.read_literal(7);
+ let ydc_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let y2dc_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let y2ac_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let uvdc_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let uvac_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let n = if self.segments_enabled {
+ MAX_SEGMENTS
+ } else {
+ 1
+ };
+ for i in 0usize..n {
+ let base = i32::from(if !self.segment[i].delta_values {
+ i16::from(self.segment[i].quantizer_level)
+ } else {
+ i16::from(self.segment[i].quantizer_level) + i16::from(yac_abs)
+ });
+
+ self.segment[i].ydc = dc_quant(base + ydc_delta);
+ self.segment[i].yac = ac_quant(base);
+
+ self.segment[i].y2dc = dc_quant(base + y2dc_delta) * 2;
+ // The intermediate result (max`284*155`) can be larger than the `i16` range.
+ self.segment[i].y2ac = (i32::from(ac_quant(base + y2ac_delta)) * 155 / 100) as i16;
+
+ self.segment[i].uvdc = dc_quant(base + uvdc_delta);
+ self.segment[i].uvac = ac_quant(base + uvac_delta);
+
+ if self.segment[i].y2ac < 8 {
+ self.segment[i].y2ac = 8;
+ }
+
+ if self.segment[i].uvdc > 132 {
+ self.segment[i].uvdc = 132;
+ }
+ }
+ }
+
+ fn read_loop_filter_adjustments(&mut self) {
+ if self.b.read_flag() {
+ for i in 0usize..4 {
+ let ref_frame_delta_update_flag = self.b.read_flag();
+
+ self.ref_delta[i] = if ref_frame_delta_update_flag {
+ self.b.read_magnitude_and_sign(6)
+ } else {
+ 0i32
+ };
+ }
+
+ for i in 0usize..4 {
+ let mb_mode_delta_update_flag = self.b.read_flag();
+
+ self.mode_delta[i] = if mb_mode_delta_update_flag {
+ self.b.read_magnitude_and_sign(6)
+ } else {
+ 0i32
+ };
+ }
+ }
+ }
+
+ fn read_segment_updates(&mut self) {
+ // Section 9.3
+ self.segments_update_map = self.b.read_flag();
+ let update_segment_feature_data = self.b.read_flag();
+
+ if update_segment_feature_data {
+ let segment_feature_mode = self.b.read_flag();
+
+ for i in 0usize..MAX_SEGMENTS {
+ self.segment[i].delta_values = !segment_feature_mode;
+ }
+
+ for i in 0usize..MAX_SEGMENTS {
+ let update = self.b.read_flag();
+
+ self.segment[i].quantizer_level = if update {
+ self.b.read_magnitude_and_sign(7)
+ } else {
+ 0i32
+ } as i8;
+ }
+
+ for i in 0usize..MAX_SEGMENTS {
+ let update = self.b.read_flag();
+
+ self.segment[i].loopfilter_level = if update {
+ self.b.read_magnitude_and_sign(6)
+ } else {
+ 0i32
+ } as i8;
+ }
+ }
+
+ if self.segments_update_map {
+ for i in 0usize..3 {
+ let update = self.b.read_flag();
+
+ self.segment_tree_probs[i] = if update { self.b.read_literal(8) } else { 255 };
+ }
+ }
+ }
+
+ fn read_frame_header(&mut self) -> ImageResult<()> {
+ let tag = self.r.read_u24::<LittleEndian>()?;
+
+ self.frame.keyframe = tag & 1 == 0;
+ self.frame.version = ((tag >> 1) & 7) as u8;
+ self.frame.for_display = (tag >> 4) & 1 != 0;
+
+ let first_partition_size = tag >> 5;
+
+ if self.frame.keyframe {
+ let mut tag = [0u8; 3];
+ self.r.read_exact(&mut tag)?;
+
+ if tag != [0x9d, 0x01, 0x2a] {
+ return Err(DecoderError::Vp8MagicInvalid(tag).into());
+ }
+
+ let w = self.r.read_u16::<LittleEndian>()?;
+ let h = self.r.read_u16::<LittleEndian>()?;
+
+ self.frame.width = w & 0x3FFF;
+ self.frame.height = h & 0x3FFF;
+
+ self.top = init_top_macroblocks(self.frame.width as usize);
+ // Almost always the first macro block, except when non exists (i.e. `width == 0`)
+ self.left = self.top.get(0).cloned().unwrap_or_default();
+
+ self.mbwidth = (self.frame.width + 15) / 16;
+ self.mbheight = (self.frame.height + 15) / 16;
+
+ self.frame.ybuf = vec![0u8; self.frame.width as usize * self.frame.height as usize];
+ self.frame.ubuf =
+ vec![0u8; self.frame.chroma_width() as usize * self.frame.chroma_height() as usize];
+ self.frame.vbuf =
+ vec![0u8; self.frame.chroma_width() as usize * self.frame.chroma_height() as usize];
+
+ self.top_border = vec![127u8; self.frame.width as usize + 4 + 16];
+ self.left_border = vec![129u8; 1 + 16];
+ }
+
+ let mut buf = vec![0; first_partition_size as usize];
+ self.r.read_exact(&mut buf)?;
+
+ // initialise binary decoder
+ self.b.init(buf)?;
+
+ if self.frame.keyframe {
+ let color_space = self.b.read_literal(1);
+ self.frame.pixel_type = self.b.read_literal(1);
+
+ if color_space != 0 {
+ return Err(DecoderError::ColorSpaceInvalid(color_space).into());
+ }
+ }
+
+ self.segments_enabled = self.b.read_flag();
+ if self.segments_enabled {
+ self.read_segment_updates();
+ }
+
+ self.frame.filter_type = self.b.read_flag();
+ self.frame.filter_level = self.b.read_literal(6);
+ self.frame.sharpness_level = self.b.read_literal(3);
+
+ let lf_adjust_enable = self.b.read_flag();
+ if lf_adjust_enable {
+ self.read_loop_filter_adjustments();
+ }
+
+ self.num_partitions = (1usize << self.b.read_literal(2) as usize) as u8;
+ let num_partitions = self.num_partitions as usize;
+ self.init_partitions(num_partitions)?;
+
+ self.read_quantization_indices();
+
+ if !self.frame.keyframe {
+ // 9.7 refresh golden frame and altref frame
+ // FIXME: support this?
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::WebP.into(),
+ UnsupportedErrorKind::GenericFeature("Non-keyframe frames".to_owned()),
+ ),
+ ));
+ } else {
+ // Refresh entropy probs ?????
+ let _ = self.b.read_literal(1);
+ }
+
+ self.update_token_probabilities();
+
+ let mb_no_skip_coeff = self.b.read_literal(1);
+ self.prob_skip_false = if mb_no_skip_coeff == 1 {
+ Some(self.b.read_literal(8))
+ } else {
+ None
+ };
+
+ if !self.frame.keyframe {
+ // 9.10 remaining frame data
+ self.prob_intra = 0;
+
+ // FIXME: support this?
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::WebP.into(),
+ UnsupportedErrorKind::GenericFeature("Non-keyframe frames".to_owned()),
+ ),
+ ));
+ } else {
+ // Reset motion vectors
+ }
+
+ Ok(())
+ }
+
+ fn read_macroblock_header(&mut self, mbx: usize) -> ImageResult<MacroBlock> {
+ let mut mb = MacroBlock::default();
+
+ if self.segments_enabled && self.segments_update_map {
+ mb.segmentid = self
+ .b
+ .read_with_tree(&SEGMENT_ID_TREE, &self.segment_tree_probs, 0)
+ as u8;
+ };
+
+ mb.coeffs_skipped = if self.prob_skip_false.is_some() {
+ self.b.read_bool(*self.prob_skip_false.as_ref().unwrap())
+ } else {
+ false
+ };
+
+ let inter_predicted = if !self.frame.keyframe {
+ self.b.read_bool(self.prob_intra)
+ } else {
+ false
+ };
+
+ if inter_predicted {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::WebP.into(),
+ UnsupportedErrorKind::GenericFeature("VP8 inter-prediction".to_owned()),
+ ),
+ ));
+ }
+
+ if self.frame.keyframe {
+ // intra prediction
+ let luma = self
+ .b
+ .read_with_tree(&KEYFRAME_YMODE_TREE, &KEYFRAME_YMODE_PROBS, 0);
+ mb.luma_mode =
+ LumaMode::from_i8(luma).ok_or(DecoderError::LumaPredictionModeInvalid(luma))?;
+
+ match mb.luma_mode.into_intra() {
+ // `LumaMode::B` - This is predicted individually
+ None => {
+ for y in 0usize..4 {
+ for x in 0usize..4 {
+ let top = self.top[mbx].bpred[12 + x];
+ let left = self.left.bpred[y];
+ let intra = self.b.read_with_tree(
+ &KEYFRAME_BPRED_MODE_TREE,
+ &KEYFRAME_BPRED_MODE_PROBS[top as usize][left as usize],
+ 0,
+ );
+ let bmode = IntraMode::from_i8(intra)
+ .ok_or(DecoderError::IntraPredictionModeInvalid(intra))?;
+ mb.bpred[x + y * 4] = bmode;
+
+ self.top[mbx].bpred[12 + x] = bmode;
+ self.left.bpred[y] = bmode;
+ }
+ }
+ }
+ Some(mode) => {
+ for i in 0usize..4 {
+ mb.bpred[12 + i] = mode;
+ self.left.bpred[i] = mode;
+ }
+ }
+ }
+
+ let chroma = self
+ .b
+ .read_with_tree(&KEYFRAME_UV_MODE_TREE, &KEYFRAME_UV_MODE_PROBS, 0);
+ mb.chroma_mode = ChromaMode::from_i8(chroma)
+ .ok_or(DecoderError::ChromaPredictionModeInvalid(chroma))?;
+ }
+
+ self.top[mbx].chroma_mode = mb.chroma_mode;
+ self.top[mbx].luma_mode = mb.luma_mode;
+ self.top[mbx].bpred = mb.bpred;
+
+ Ok(mb)
+ }
+
+ fn intra_predict_luma(&mut self, mbx: usize, mby: usize, mb: &MacroBlock, resdata: &[i32]) {
+ let stride = 1usize + 16 + 4;
+ let w = self.frame.width as usize;
+ let mw = self.mbwidth as usize;
+ let mut ws = create_border_luma(mbx, mby, mw, &self.top_border, &self.left_border);
+
+ match mb.luma_mode {
+ LumaMode::V => predict_vpred(&mut ws, 16, 1, 1, stride),
+ LumaMode::H => predict_hpred(&mut ws, 16, 1, 1, stride),
+ LumaMode::TM => predict_tmpred(&mut ws, 16, 1, 1, stride),
+ LumaMode::DC => predict_dcpred(&mut ws, 16, stride, mby != 0, mbx != 0),
+ LumaMode::B => predict_4x4(&mut ws, stride, &mb.bpred, resdata),
+ }
+
+ if mb.luma_mode != LumaMode::B {
+ for y in 0usize..4 {
+ for x in 0usize..4 {
+ let i = x + y * 4;
+ // Create a reference to a [i32; 16] array for add_residue (slices of size 16 do not work).
+ let rb: &[i32; 16] = resdata[i * 16..][..16].try_into().unwrap();
+ let y0 = 1 + y * 4;
+ let x0 = 1 + x * 4;
+
+ add_residue(&mut ws, rb, y0, x0, stride);
+ }
+ }
+ }
+
+ self.left_border[0] = ws[16];
+
+ for i in 0usize..16 {
+ self.top_border[mbx * 16 + i] = ws[16 * stride + 1 + i];
+ self.left_border[i + 1] = ws[(i + 1) * stride + 16];
+ }
+
+ // Length is the remainder to the border, but maximally the current chunk.
+ let ylength = cmp::min(self.frame.height as usize - mby * 16, 16);
+ let xlength = cmp::min(self.frame.width as usize - mbx * 16, 16);
+
+ for y in 0usize..ylength {
+ for x in 0usize..xlength {
+ self.frame.ybuf[(mby * 16 + y) * w + mbx * 16 + x] = ws[(1 + y) * stride + 1 + x];
+ }
+ }
+ }
+
+ fn intra_predict_chroma(&mut self, mbx: usize, mby: usize, mb: &MacroBlock, resdata: &[i32]) {
+ let stride = 1usize + 8;
+
+ let w = self.frame.chroma_width() as usize;
+
+ //8x8 with left top border of 1
+ let mut uws = [0u8; (8 + 1) * (8 + 1)];
+ let mut vws = [0u8; (8 + 1) * (8 + 1)];
+
+ let ylength = cmp::min(self.frame.chroma_height() as usize - mby * 8, 8);
+ let xlength = cmp::min(self.frame.chroma_width() as usize - mbx * 8, 8);
+
+ //left border
+ for y in 0usize..8 {
+ let (uy, vy) = if mbx == 0 || y >= ylength {
+ (129, 129)
+ } else {
+ let index = (mby * 8 + y) * w + ((mbx - 1) * 8 + 7);
+ (self.frame.ubuf[index], self.frame.vbuf[index])
+ };
+
+ uws[(y + 1) * stride] = uy;
+ vws[(y + 1) * stride] = vy;
+ }
+ //top border
+ for x in 0usize..8 {
+ let (ux, vx) = if mby == 0 || x >= xlength {
+ (127, 127)
+ } else {
+ let index = ((mby - 1) * 8 + 7) * w + (mbx * 8 + x);
+ (self.frame.ubuf[index], self.frame.vbuf[index])
+ };
+
+ uws[x + 1] = ux;
+ vws[x + 1] = vx;
+ }
+
+ //top left point
+ let (u1, v1) = if mby == 0 {
+ (127, 127)
+ } else if mbx == 0 {
+ (129, 129)
+ } else {
+ let index = ((mby - 1) * 8 + 7) * w + (mbx - 1) * 8 + 7;
+ if index >= self.frame.ubuf.len() {
+ (127, 127)
+ } else {
+ (self.frame.ubuf[index], self.frame.vbuf[index])
+ }
+ };
+
+ uws[0] = u1;
+ vws[0] = v1;
+
+ match mb.chroma_mode {
+ ChromaMode::DC => {
+ predict_dcpred(&mut uws, 8, stride, mby != 0, mbx != 0);
+ predict_dcpred(&mut vws, 8, stride, mby != 0, mbx != 0);
+ }
+ ChromaMode::V => {
+ predict_vpred(&mut uws, 8, 1, 1, stride);
+ predict_vpred(&mut vws, 8, 1, 1, stride);
+ }
+ ChromaMode::H => {
+ predict_hpred(&mut uws, 8, 1, 1, stride);
+ predict_hpred(&mut vws, 8, 1, 1, stride);
+ }
+ ChromaMode::TM => {
+ predict_tmpred(&mut uws, 8, 1, 1, stride);
+ predict_tmpred(&mut vws, 8, 1, 1, stride);
+ }
+ }
+
+ for y in 0usize..2 {
+ for x in 0usize..2 {
+ let i = x + y * 2;
+ let urb: &[i32; 16] = resdata[16 * 16 + i * 16..][..16].try_into().unwrap();
+
+ let y0 = 1 + y * 4;
+ let x0 = 1 + x * 4;
+ add_residue(&mut uws, urb, y0, x0, stride);
+
+ let vrb: &[i32; 16] = resdata[20 * 16 + i * 16..][..16].try_into().unwrap();
+
+ add_residue(&mut vws, vrb, y0, x0, stride);
+ }
+ }
+
+ for y in 0usize..ylength {
+ for x in 0usize..xlength {
+ self.frame.ubuf[(mby * 8 + y) * w + mbx * 8 + x] = uws[(1 + y) * stride + 1 + x];
+ self.frame.vbuf[(mby * 8 + y) * w + mbx * 8 + x] = vws[(1 + y) * stride + 1 + x];
+ }
+ }
+ }
+
+ fn read_coefficients(
+ &mut self,
+ block: &mut [i32],
+ p: usize,
+ plane: usize,
+ complexity: usize,
+ dcq: i16,
+ acq: i16,
+ ) -> bool {
+ let first = if plane == 0 { 1usize } else { 0usize };
+ let probs = &self.token_probs[plane];
+ let tree = &DCT_TOKEN_TREE;
+
+ let mut complexity = complexity;
+ let mut has_coefficients = false;
+ let mut skip = false;
+
+ for i in first..16usize {
+ let table = &probs[COEFF_BANDS[i] as usize][complexity];
+
+ let token = if !skip {
+ self.partitions[p].read_with_tree(tree, table, 0)
+ } else {
+ self.partitions[p].read_with_tree(tree, table, 2)
+ };
+
+ let mut abs_value = i32::from(match token {
+ DCT_EOB => break,
+
+ DCT_0 => {
+ skip = true;
+ has_coefficients = true;
+ complexity = 0;
+ continue;
+ }
+
+ literal @ DCT_1..=DCT_4 => i16::from(literal),
+
+ category @ DCT_CAT1..=DCT_CAT6 => {
+ let t = PROB_DCT_CAT[(category - DCT_CAT1) as usize];
+
+ let mut extra = 0i16;
+ let mut j = 0;
+
+ while t[j] > 0 {
+ extra = extra + extra + self.partitions[p].read_bool(t[j]) as i16;
+ j += 1;
+ }
+
+ i16::from(DCT_CAT_BASE[(category - DCT_CAT1) as usize]) + extra
+ }
+
+ c => panic!("unknown token: {}", c),
+ });
+
+ skip = false;
+
+ complexity = if abs_value == 0 {
+ 0
+ } else if abs_value == 1 {
+ 1
+ } else {
+ 2
+ };
+
+ if self.partitions[p].read_bool(128) {
+ abs_value = -abs_value;
+ }
+
+ block[ZIGZAG[i] as usize] =
+ abs_value * i32::from(if ZIGZAG[i] > 0 { acq } else { dcq });
+
+ has_coefficients = true;
+ }
+
+ has_coefficients
+ }
+
+ fn read_residual_data(&mut self, mb: &MacroBlock, mbx: usize, p: usize) -> [i32; 384] {
+ let sindex = mb.segmentid as usize;
+ let mut blocks = [0i32; 384];
+ let mut plane = if mb.luma_mode == LumaMode::B { 3 } else { 1 };
+
+ if plane == 1 {
+ let complexity = self.top[mbx].complexity[0] + self.left.complexity[0];
+ let mut block = [0i32; 16];
+ let dcq = self.segment[sindex].y2dc;
+ let acq = self.segment[sindex].y2ac;
+ let n = self.read_coefficients(&mut block, p, plane, complexity as usize, dcq, acq);
+
+ self.left.complexity[0] = if n { 1 } else { 0 };
+ self.top[mbx].complexity[0] = if n { 1 } else { 0 };
+
+ transform::iwht4x4(&mut block);
+
+ for k in 0usize..16 {
+ blocks[16 * k] = block[k];
+ }
+
+ plane = 0;
+ }
+
+ for y in 0usize..4 {
+ let mut left = self.left.complexity[y + 1];
+ for x in 0usize..4 {
+ let i = x + y * 4;
+ let block = &mut blocks[i * 16..i * 16 + 16];
+
+ let complexity = self.top[mbx].complexity[x + 1] + left;
+ let dcq = self.segment[sindex].ydc;
+ let acq = self.segment[sindex].yac;
+
+ let n = self.read_coefficients(block, p, plane, complexity as usize, dcq, acq);
+
+ if block[0] != 0 || n {
+ transform::idct4x4(block);
+ }
+
+ left = if n { 1 } else { 0 };
+ self.top[mbx].complexity[x + 1] = if n { 1 } else { 0 };
+ }
+
+ self.left.complexity[y + 1] = left;
+ }
+
+ plane = 2;
+
+ for &j in &[5usize, 7usize] {
+ for y in 0usize..2 {
+ let mut left = self.left.complexity[y + j];
+
+ for x in 0usize..2 {
+ let i = x + y * 2 + if j == 5 { 16 } else { 20 };
+ let block = &mut blocks[i * 16..i * 16 + 16];
+
+ let complexity = self.top[mbx].complexity[x + j] + left;
+ let dcq = self.segment[sindex].uvdc;
+ let acq = self.segment[sindex].uvac;
+
+ let n = self.read_coefficients(block, p, plane, complexity as usize, dcq, acq);
+ if block[0] != 0 || n {
+ transform::idct4x4(block);
+ }
+
+ left = if n { 1 } else { 0 };
+ self.top[mbx].complexity[x + j] = if n { 1 } else { 0 };
+ }
+
+ self.left.complexity[y + j] = left;
+ }
+ }
+
+ blocks
+ }
+
+ /// Does loop filtering on the macroblock
+ fn loop_filter(&mut self, mbx: usize, mby: usize, mb: &MacroBlock) {
+ let luma_w = self.frame.width as usize;
+ let luma_h = self.frame.height as usize;
+ let chroma_w = self.frame.chroma_width() as usize;
+ let chroma_h = self.frame.chroma_height() as usize;
+
+ let (filter_level, interior_limit, hev_threshold) = self.calculate_filter_parameters(mb);
+
+ if filter_level > 0 {
+ let mbedge_limit = (filter_level + 2) * 2 + interior_limit;
+ let sub_bedge_limit = (filter_level * 2) + interior_limit;
+
+ let luma_ylength = cmp::min(luma_h - 16 * mby, 16);
+ let luma_xlength = cmp::min(luma_w - 16 * mbx, 16);
+
+ let chroma_ylength = cmp::min(chroma_h - 8 * mby, 8);
+ let chroma_xlength = cmp::min(chroma_w - 8 * mbx, 8);
+
+ //filter across left of macroblock
+ if mbx > 0 {
+ //simple loop filtering
+ if self.frame.filter_type {
+ if luma_xlength >= 2 {
+ for y in 0usize..luma_ylength {
+ let y0 = mby * 16 + y;
+ let x0 = mbx * 16;
+
+ loop_filter::simple_segment(
+ mbedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ 1,
+ );
+ }
+ }
+ } else {
+ if luma_xlength >= 4 {
+ for y in 0usize..luma_ylength {
+ let y0 = mby * 16 + y;
+ let x0 = mbx * 16;
+
+ loop_filter::macroblock_filter(
+ hev_threshold,
+ interior_limit,
+ mbedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ 1,
+ );
+ }
+ }
+
+ if chroma_xlength >= 4 {
+ for y in 0usize..chroma_ylength {
+ let y0 = mby * 8 + y;
+ let x0 = mbx * 8;
+
+ loop_filter::macroblock_filter(
+ hev_threshold,
+ interior_limit,
+ mbedge_limit,
+ &mut self.frame.ubuf[..],
+ y0 * chroma_w + x0,
+ 1,
+ );
+ loop_filter::macroblock_filter(
+ hev_threshold,
+ interior_limit,
+ mbedge_limit,
+ &mut self.frame.vbuf[..],
+ y0 * chroma_w + x0,
+ 1,
+ );
+ }
+ }
+ }
+ }
+
+ //filter across vertical subblocks in macroblock
+ if mb.luma_mode == LumaMode::B || !mb.coeffs_skipped {
+ if self.frame.filter_type {
+ for x in (4usize..luma_xlength - 1).step_by(4) {
+ for y in 0..luma_ylength {
+ let y0 = mby * 16 + y;
+ let x0 = mbx * 16 + x;
+
+ loop_filter::simple_segment(
+ sub_bedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ 1,
+ );
+ }
+ }
+ } else {
+ if luma_xlength > 3 {
+ for x in (4usize..luma_xlength - 3).step_by(4) {
+ for y in 0..luma_ylength {
+ let y0 = mby * 16 + y;
+ let x0 = mbx * 16 + x;
+
+ loop_filter::subblock_filter(
+ hev_threshold,
+ interior_limit,
+ sub_bedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ 1,
+ );
+ }
+ }
+ }
+
+ if chroma_xlength == 8 {
+ for y in 0usize..chroma_ylength {
+ let y0 = mby * 8 + y;
+ let x0 = mbx * 8 + 4;
+
+ loop_filter::subblock_filter(
+ hev_threshold,
+ interior_limit,
+ sub_bedge_limit,
+ &mut self.frame.ubuf[..],
+ y0 * chroma_w + x0,
+ 1,
+ );
+
+ loop_filter::subblock_filter(
+ hev_threshold,
+ interior_limit,
+ sub_bedge_limit,
+ &mut self.frame.vbuf[..],
+ y0 * chroma_w + x0,
+ 1,
+ );
+ }
+ }
+ }
+ }
+
+ //filter across top of macroblock
+ if mby > 0 {
+ if self.frame.filter_type {
+ if luma_ylength >= 2 {
+ for x in 0usize..luma_xlength {
+ let y0 = mby * 16;
+ let x0 = mbx * 16 + x;
+
+ loop_filter::simple_segment(
+ mbedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ luma_w,
+ );
+ }
+ }
+ } else {
+ //if bottom macroblock, can only filter if there is 3 pixels below
+ if luma_ylength >= 4 {
+ for x in 0usize..luma_xlength {
+ let y0 = mby * 16;
+ let x0 = mbx * 16 + x;
+
+ loop_filter::macroblock_filter(
+ hev_threshold,
+ interior_limit,
+ mbedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ luma_w,
+ );
+ }
+ }
+
+ if chroma_ylength >= 4 {
+ for x in 0usize..chroma_xlength {
+ let y0 = mby * 8;
+ let x0 = mbx * 8 + x;
+
+ loop_filter::macroblock_filter(
+ hev_threshold,
+ interior_limit,
+ mbedge_limit,
+ &mut self.frame.ubuf[..],
+ y0 * chroma_w + x0,
+ chroma_w,
+ );
+ loop_filter::macroblock_filter(
+ hev_threshold,
+ interior_limit,
+ mbedge_limit,
+ &mut self.frame.vbuf[..],
+ y0 * chroma_w + x0,
+ chroma_w,
+ );
+ }
+ }
+ }
+ }
+
+ //filter across horizontal subblock edges within the macroblock
+ if mb.luma_mode == LumaMode::B || !mb.coeffs_skipped {
+ if self.frame.filter_type {
+ for y in (4usize..luma_ylength - 1).step_by(4) {
+ for x in 0..luma_xlength {
+ let y0 = mby * 16 + y;
+ let x0 = mbx * 16 + x;
+
+ loop_filter::simple_segment(
+ sub_bedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ luma_w,
+ );
+ }
+ }
+ } else {
+ if luma_ylength > 3 {
+ for y in (4usize..luma_ylength - 3).step_by(4) {
+ for x in 0..luma_xlength {
+ let y0 = mby * 16 + y;
+ let x0 = mbx * 16 + x;
+
+ loop_filter::subblock_filter(
+ hev_threshold,
+ interior_limit,
+ sub_bedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ luma_w,
+ );
+ }
+ }
+ }
+
+ if chroma_ylength == 8 {
+ for x in 0..chroma_xlength {
+ let y0 = mby * 8 + 4;
+ let x0 = mbx * 8 + x;
+
+ loop_filter::subblock_filter(
+ hev_threshold,
+ interior_limit,
+ sub_bedge_limit,
+ &mut self.frame.ubuf[..],
+ y0 * chroma_w + x0,
+ chroma_w,
+ );
+
+ loop_filter::subblock_filter(
+ hev_threshold,
+ interior_limit,
+ sub_bedge_limit,
+ &mut self.frame.vbuf[..],
+ y0 * chroma_w + x0,
+ chroma_w,
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ //return values are the filter level, interior limit and hev threshold
+ fn calculate_filter_parameters(&self, macroblock: &MacroBlock) -> (u8, u8, u8) {
+ let segment = self.segment[macroblock.segmentid as usize];
+ let mut filter_level = self.frame.filter_level as i32;
+
+ if self.segments_enabled {
+ if segment.delta_values {
+ filter_level += i32::from(segment.loopfilter_level);
+ } else {
+ filter_level = i32::from(segment.loopfilter_level);
+ }
+ }
+
+ filter_level = clamp(filter_level, 0, 63);
+
+ if macroblock.luma_mode == LumaMode::B {
+ filter_level += self.mode_delta[0];
+ }
+
+ let filter_level = clamp(filter_level, 0, 63) as u8;
+
+ //interior limit
+ let mut interior_limit = filter_level;
+
+ if self.frame.sharpness_level > 0 {
+ interior_limit >>= if self.frame.sharpness_level > 4 { 2 } else { 1 };
+
+ if interior_limit > 9 - self.frame.sharpness_level {
+ interior_limit = 9 - self.frame.sharpness_level;
+ }
+ }
+
+ if interior_limit == 0 {
+ interior_limit = 1;
+ }
+
+ //high edge variance threshold
+ let mut hev_threshold = 0;
+
+ #[allow(clippy::collapsible_else_if)]
+ if self.frame.keyframe {
+ if filter_level >= 40 {
+ hev_threshold = 2;
+ } else {
+ hev_threshold = 1;
+ }
+ } else {
+ if filter_level >= 40 {
+ hev_threshold = 3;
+ } else if filter_level >= 20 {
+ hev_threshold = 2;
+ } else if filter_level >= 15 {
+ hev_threshold = 1;
+ }
+ }
+
+ (filter_level, interior_limit, hev_threshold)
+ }
+
+ /// Decodes the current frame
+ pub fn decode_frame(&mut self) -> ImageResult<&Frame> {
+ self.read_frame_header()?;
+
+ for mby in 0..self.mbheight as usize {
+ let p = mby % self.num_partitions as usize;
+ self.left = MacroBlock::default();
+
+ for mbx in 0..self.mbwidth as usize {
+ let mb = self.read_macroblock_header(mbx)?;
+ let blocks = if !mb.coeffs_skipped {
+ self.read_residual_data(&mb, mbx, p)
+ } else {
+ if mb.luma_mode != LumaMode::B {
+ self.left.complexity[0] = 0;
+ self.top[mbx].complexity[0] = 0;
+ }
+
+ for i in 1usize..9 {
+ self.left.complexity[i] = 0;
+ self.top[mbx].complexity[i] = 0;
+ }
+
+ [0i32; 384]
+ };
+
+ self.intra_predict_luma(mbx, mby, &mb, &blocks);
+ self.intra_predict_chroma(mbx, mby, &mb, &blocks);
+
+ self.macroblocks.push(mb);
+ }
+
+ self.left_border = vec![129u8; 1 + 16];
+ }
+
+ //do loop filtering
+ for mby in 0..self.mbheight as usize {
+ for mbx in 0..self.mbwidth as usize {
+ let mb = self.macroblocks[mby * self.mbwidth as usize + mbx];
+ self.loop_filter(mbx, mby, &mb);
+ }
+ }
+
+ Ok(&self.frame)
+ }
+}
+
+impl LumaMode {
+ fn from_i8(val: i8) -> Option<Self> {
+ Some(match val {
+ DC_PRED => LumaMode::DC,
+ V_PRED => LumaMode::V,
+ H_PRED => LumaMode::H,
+ TM_PRED => LumaMode::TM,
+ B_PRED => LumaMode::B,
+ _ => return None,
+ })
+ }
+
+ fn into_intra(self) -> Option<IntraMode> {
+ Some(match self {
+ LumaMode::DC => IntraMode::DC,
+ LumaMode::V => IntraMode::VE,
+ LumaMode::H => IntraMode::HE,
+ LumaMode::TM => IntraMode::TM,
+ LumaMode::B => return None,
+ })
+ }
+}
+
+impl Default for LumaMode {
+ fn default() -> Self {
+ LumaMode::DC
+ }
+}
+
+impl ChromaMode {
+ fn from_i8(val: i8) -> Option<Self> {
+ Some(match val {
+ DC_PRED => ChromaMode::DC,
+ V_PRED => ChromaMode::V,
+ H_PRED => ChromaMode::H,
+ TM_PRED => ChromaMode::TM,
+ _ => return None,
+ })
+ }
+}
+
+impl Default for ChromaMode {
+ fn default() -> Self {
+ ChromaMode::DC
+ }
+}
+
+impl IntraMode {
+ fn from_i8(val: i8) -> Option<Self> {
+ Some(match val {
+ B_DC_PRED => IntraMode::DC,
+ B_TM_PRED => IntraMode::TM,
+ B_VE_PRED => IntraMode::VE,
+ B_HE_PRED => IntraMode::HE,
+ B_LD_PRED => IntraMode::LD,
+ B_RD_PRED => IntraMode::RD,
+ B_VR_PRED => IntraMode::VR,
+ B_VL_PRED => IntraMode::VL,
+ B_HD_PRED => IntraMode::HD,
+ B_HU_PRED => IntraMode::HU,
+ _ => return None,
+ })
+ }
+}
+
+impl Default for IntraMode {
+ fn default() -> Self {
+ IntraMode::DC
+ }
+}
+
+fn init_top_macroblocks(width: usize) -> Vec<MacroBlock> {
+ let mb_width = (width + 15) / 16;
+
+ let mb = MacroBlock {
+ // Section 11.3 #3
+ bpred: [IntraMode::DC; 16],
+ luma_mode: LumaMode::DC,
+ ..MacroBlock::default()
+ };
+
+ vec![mb; mb_width]
+}
+
+fn create_border_luma(mbx: usize, mby: usize, mbw: usize, top: &[u8], left: &[u8]) -> [u8; 357] {
+ let stride = 1usize + 16 + 4;
+ let mut ws = [0u8; (1 + 16) * (1 + 16 + 4)];
+
+ // A
+ {
+ let above = &mut ws[1..stride];
+ if mby == 0 {
+ for above in above.iter_mut() {
+ *above = 127;
+ }
+ } else {
+ for i in 0usize..16 {
+ above[i] = top[mbx * 16 + i];
+ }
+
+ if mbx == mbw - 1 {
+ for above in above.iter_mut().skip(16) {
+ *above = top[mbx * 16 + 15];
+ }
+ } else {
+ for i in 16usize..above.len() {
+ above[i] = top[mbx * 16 + i];
+ }
+ }
+ }
+ }
+
+ for i in 17usize..stride {
+ ws[4 * stride + i] = ws[i];
+ ws[8 * stride + i] = ws[i];
+ ws[12 * stride + i] = ws[i];
+ }
+
+ // L
+ if mbx == 0 {
+ for i in 0usize..16 {
+ ws[(i + 1) * stride] = 129;
+ }
+ } else {
+ for i in 0usize..16 {
+ ws[(i + 1) * stride] = left[i + 1];
+ }
+ }
+
+ // P
+ ws[0] = if mby == 0 {
+ 127
+ } else if mbx == 0 {
+ 129
+ } else {
+ left[0]
+ };
+
+ ws
+}
+
+fn avg3(left: u8, this: u8, right: u8) -> u8 {
+ let avg = (u16::from(left) + 2 * u16::from(this) + u16::from(right) + 2) >> 2;
+ avg as u8
+}
+
+fn avg2(this: u8, right: u8) -> u8 {
+ let avg = (u16::from(this) + u16::from(right) + 1) >> 1;
+ avg as u8
+}
+
+// Only 16 elements from rblock are used to add residue, so it is restricted to 16 elements
+// to enable SIMD and other optimizations.
+fn add_residue(pblock: &mut [u8], rblock: &[i32; 16], y0: usize, x0: usize, stride: usize) {
+ let mut pos = y0 * stride + x0;
+ for row in rblock.chunks(4) {
+ for (p, &a) in pblock[pos..pos + 4].iter_mut().zip(row.iter()) {
+ *p = clamp(a + i32::from(*p), 0, 255) as u8;
+ }
+ pos += stride;
+ }
+}
+
+fn predict_4x4(ws: &mut [u8], stride: usize, modes: &[IntraMode], resdata: &[i32]) {
+ for sby in 0usize..4 {
+ for sbx in 0usize..4 {
+ let i = sbx + sby * 4;
+ let y0 = sby * 4 + 1;
+ let x0 = sbx * 4 + 1;
+
+ match modes[i] {
+ IntraMode::TM => predict_tmpred(ws, 4, x0, y0, stride),
+ IntraMode::VE => predict_bvepred(ws, x0, y0, stride),
+ IntraMode::HE => predict_bhepred(ws, x0, y0, stride),
+ IntraMode::DC => predict_bdcpred(ws, x0, y0, stride),
+ IntraMode::LD => predict_bldpred(ws, x0, y0, stride),
+ IntraMode::RD => predict_brdpred(ws, x0, y0, stride),
+ IntraMode::VR => predict_bvrpred(ws, x0, y0, stride),
+ IntraMode::VL => predict_bvlpred(ws, x0, y0, stride),
+ IntraMode::HD => predict_bhdpred(ws, x0, y0, stride),
+ IntraMode::HU => predict_bhupred(ws, x0, y0, stride),
+ }
+
+ let rb: &[i32; 16] = resdata[i * 16..][..16].try_into().unwrap();
+ add_residue(ws, rb, y0, x0, stride);
+ }
+ }
+}
+
+fn predict_vpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) {
+ for y in 0usize..size {
+ for x in 0usize..size {
+ a[(x + x0) + stride * (y + y0)] = a[(x + x0) + stride * (y0 + y - 1)];
+ }
+ }
+}
+
+fn predict_hpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) {
+ for y in 0usize..size {
+ for x in 0usize..size {
+ a[(x + x0) + stride * (y + y0)] = a[(x + x0 - 1) + stride * (y0 + y)];
+ }
+ }
+}
+
+fn predict_dcpred(a: &mut [u8], size: usize, stride: usize, above: bool, left: bool) {
+ let mut sum = 0;
+ let mut shf = if size == 8 { 2 } else { 3 };
+
+ if left {
+ for y in 0usize..size {
+ sum += u32::from(a[(y + 1) * stride]);
+ }
+
+ shf += 1;
+ }
+
+ if above {
+ for x in 0usize..size {
+ sum += u32::from(a[x + 1]);
+ }
+
+ shf += 1;
+ }
+
+ let dcval = if !left && !above {
+ 128
+ } else {
+ (sum + (1 << (shf - 1))) >> shf
+ };
+
+ for y in 0usize..size {
+ for x in 0usize..size {
+ a[(x + 1) + stride * (y + 1)] = dcval as u8;
+ }
+ }
+}
+
+fn predict_tmpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) {
+ for y in 0usize..size {
+ for x in 0usize..size {
+ let pred = i32::from(a[(y0 + y) * stride + x0 - 1])
+ + i32::from(a[(y0 - 1) * stride + x0 + x])
+ - i32::from(a[(y0 - 1) * stride + x0 - 1]);
+
+ a[(x + x0) + stride * (y + y0)] = clamp(pred, 0, 255) as u8;
+ }
+ }
+}
+
+fn predict_bdcpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let mut v = 4;
+ for i in 0usize..4 {
+ v += u32::from(a[(y0 + i) * stride + x0 - 1]) + u32::from(a[(y0 - 1) * stride + x0 + i]);
+ }
+
+ v >>= 3;
+ for y in 0usize..4 {
+ for x in 0usize..4 {
+ a[x + x0 + stride * (y + y0)] = v as u8;
+ }
+ }
+}
+
+fn topleft_pixel(a: &[u8], x0: usize, y0: usize, stride: usize) -> u8 {
+ a[(y0 - 1) * stride + x0 - 1]
+}
+
+fn top_pixels(a: &[u8], x0: usize, y0: usize, stride: usize) -> (u8, u8, u8, u8, u8, u8, u8, u8) {
+ let pos = (y0 - 1) * stride + x0;
+ let a_slice = &a[pos..pos + 8];
+ let a0 = a_slice[0];
+ let a1 = a_slice[1];
+ let a2 = a_slice[2];
+ let a3 = a_slice[3];
+ let a4 = a_slice[4];
+ let a5 = a_slice[5];
+ let a6 = a_slice[6];
+ let a7 = a_slice[7];
+
+ (a0, a1, a2, a3, a4, a5, a6, a7)
+}
+
+fn left_pixels(a: &[u8], x0: usize, y0: usize, stride: usize) -> (u8, u8, u8, u8) {
+ let l0 = a[y0 * stride + x0 - 1];
+ let l1 = a[(y0 + 1) * stride + x0 - 1];
+ let l2 = a[(y0 + 2) * stride + x0 - 1];
+ let l3 = a[(y0 + 3) * stride + x0 - 1];
+
+ (l0, l1, l2, l3)
+}
+
+fn edge_pixels(
+ a: &[u8],
+ x0: usize,
+ y0: usize,
+ stride: usize,
+) -> (u8, u8, u8, u8, u8, u8, u8, u8, u8) {
+ let pos = (y0 - 1) * stride + x0 - 1;
+ let a_slice = &a[pos..=pos + 4];
+ let e0 = a[pos + 4 * stride];
+ let e1 = a[pos + 3 * stride];
+ let e2 = a[pos + 2 * stride];
+ let e3 = a[pos + stride];
+ let e4 = a_slice[0];
+ let e5 = a_slice[1];
+ let e6 = a_slice[2];
+ let e7 = a_slice[3];
+ let e8 = a_slice[4];
+
+ (e0, e1, e2, e3, e4, e5, e6, e7, e8)
+}
+
+fn predict_bvepred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let p = topleft_pixel(a, x0, y0, stride);
+ let (a0, a1, a2, a3, a4, _, _, _) = top_pixels(a, x0, y0, stride);
+ let avg_1 = avg3(p, a0, a1);
+ let avg_2 = avg3(a0, a1, a2);
+ let avg_3 = avg3(a1, a2, a3);
+ let avg_4 = avg3(a2, a3, a4);
+
+ let avg = [avg_1, avg_2, avg_3, avg_4];
+
+ let mut pos = y0 * stride + x0;
+ for _ in 0..4 {
+ a[pos..=pos + 3].copy_from_slice(&avg);
+ pos += stride;
+ }
+}
+
+fn predict_bhepred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let p = topleft_pixel(a, x0, y0, stride);
+ let (l0, l1, l2, l3) = left_pixels(a, x0, y0, stride);
+
+ let avgs = [
+ avg3(p, l0, l1),
+ avg3(l0, l1, l2),
+ avg3(l1, l2, l3),
+ avg3(l2, l3, l3),
+ ];
+
+ let mut pos = y0 * stride + x0;
+ for &avg in avgs.iter() {
+ for a_p in a[pos..=pos + 3].iter_mut() {
+ *a_p = avg;
+ }
+ pos += stride;
+ }
+}
+
+fn predict_bldpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (a0, a1, a2, a3, a4, a5, a6, a7) = top_pixels(a, x0, y0, stride);
+
+ let avgs = [
+ avg3(a0, a1, a2),
+ avg3(a1, a2, a3),
+ avg3(a2, a3, a4),
+ avg3(a3, a4, a5),
+ avg3(a4, a5, a6),
+ avg3(a5, a6, a7),
+ avg3(a6, a7, a7),
+ ];
+
+ let mut pos = y0 * stride + x0;
+
+ for i in 0..4 {
+ a[pos..=pos + 3].copy_from_slice(&avgs[i..=i + 3]);
+ pos += stride;
+ }
+}
+
+fn predict_brdpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (e0, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(a, x0, y0, stride);
+
+ let avgs = [
+ avg3(e0, e1, e2),
+ avg3(e1, e2, e3),
+ avg3(e2, e3, e4),
+ avg3(e3, e4, e5),
+ avg3(e4, e5, e6),
+ avg3(e5, e6, e7),
+ avg3(e6, e7, e8),
+ ];
+ let mut pos = y0 * stride + x0;
+
+ for i in 0..4 {
+ a[pos..=pos + 3].copy_from_slice(&avgs[3 - i..7 - i]);
+ pos += stride;
+ }
+}
+
+fn predict_bvrpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (_, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(a, x0, y0, stride);
+
+ a[(y0 + 3) * stride + x0] = avg3(e1, e2, e3);
+ a[(y0 + 2) * stride + x0] = avg3(e2, e3, e4);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(e3, e4, e5);
+ a[(y0 + 1) * stride + x0] = avg3(e3, e4, e5);
+ a[(y0 + 2) * stride + x0 + 1] = avg2(e4, e5);
+ a[y0 * stride + x0] = avg2(e4, e5);
+ a[(y0 + 3) * stride + x0 + 2] = avg3(e4, e5, e6);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(e4, e5, e6);
+ a[(y0 + 2) * stride + x0 + 2] = avg2(e5, e6);
+ a[y0 * stride + x0 + 1] = avg2(e5, e6);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(e5, e6, e7);
+ a[(y0 + 1) * stride + x0 + 2] = avg3(e5, e6, e7);
+ a[(y0 + 2) * stride + x0 + 3] = avg2(e6, e7);
+ a[y0 * stride + x0 + 2] = avg2(e6, e7);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(e6, e7, e8);
+ a[y0 * stride + x0 + 3] = avg2(e7, e8);
+}
+
+fn predict_bvlpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (a0, a1, a2, a3, a4, a5, a6, a7) = top_pixels(a, x0, y0, stride);
+
+ a[y0 * stride + x0] = avg2(a0, a1);
+ a[(y0 + 1) * stride + x0] = avg3(a0, a1, a2);
+ a[(y0 + 2) * stride + x0] = avg2(a1, a2);
+ a[y0 * stride + x0 + 1] = avg2(a1, a2);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(a1, a2, a3);
+ a[(y0 + 3) * stride + x0] = avg3(a1, a2, a3);
+ a[(y0 + 2) * stride + x0 + 1] = avg2(a2, a3);
+ a[y0 * stride + x0 + 2] = avg2(a2, a3);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(a2, a3, a4);
+ a[(y0 + 1) * stride + x0 + 2] = avg3(a2, a3, a4);
+ a[(y0 + 2) * stride + x0 + 2] = avg2(a3, a4);
+ a[y0 * stride + x0 + 3] = avg2(a3, a4);
+ a[(y0 + 3) * stride + x0 + 2] = avg3(a3, a4, a5);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(a3, a4, a5);
+ a[(y0 + 2) * stride + x0 + 3] = avg3(a4, a5, a6);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(a5, a6, a7);
+}
+
+fn predict_bhdpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (e0, e1, e2, e3, e4, e5, e6, e7, _) = edge_pixels(a, x0, y0, stride);
+
+ a[(y0 + 3) * stride + x0] = avg2(e0, e1);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(e0, e1, e2);
+ a[(y0 + 2) * stride + x0] = avg2(e1, e2);
+ a[(y0 + 3) * stride + x0 + 2] = avg2(e1, e2);
+ a[(y0 + 2) * stride + x0 + 1] = avg3(e1, e2, e3);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(e1, e2, e3);
+ a[(y0 + 2) * stride + x0 + 2] = avg2(e2, e3);
+ a[(y0 + 1) * stride + x0] = avg2(e2, e3);
+ a[(y0 + 2) * stride + x0 + 3] = avg3(e2, e3, e4);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(e2, e3, e4);
+ a[(y0 + 1) * stride + x0 + 2] = avg2(e3, e4);
+ a[y0 * stride + x0] = avg2(e3, e4);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(e3, e4, e5);
+ a[y0 * stride + x0 + 1] = avg3(e3, e4, e5);
+ a[y0 * stride + x0 + 2] = avg3(e4, e5, e6);
+ a[y0 * stride + x0 + 3] = avg3(e5, e6, e7);
+}
+
+fn predict_bhupred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (l0, l1, l2, l3) = left_pixels(a, x0, y0, stride);
+
+ a[y0 * stride + x0] = avg2(l0, l1);
+ a[y0 * stride + x0 + 1] = avg3(l0, l1, l2);
+ a[y0 * stride + x0 + 2] = avg2(l1, l2);
+ a[(y0 + 1) * stride + x0] = avg2(l1, l2);
+ a[y0 * stride + x0 + 3] = avg3(l1, l2, l3);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(l1, l2, l3);
+ a[(y0 + 1) * stride + x0 + 2] = avg2(l2, l3);
+ a[(y0 + 2) * stride + x0] = avg2(l2, l3);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(l2, l3, l3);
+ a[(y0 + 2) * stride + x0 + 1] = avg3(l2, l3, l3);
+ a[(y0 + 2) * stride + x0 + 2] = l3;
+ a[(y0 + 2) * stride + x0 + 3] = l3;
+ a[(y0 + 3) * stride + x0] = l3;
+ a[(y0 + 3) * stride + x0 + 1] = l3;
+ a[(y0 + 3) * stride + x0 + 2] = l3;
+ a[(y0 + 3) * stride + x0 + 3] = l3;
+}
+
+#[cfg(test)]
+mod test {
+
+ #[cfg(feature = "benchmarks")]
+ extern crate test;
+ use super::{
+ add_residue, avg2, avg3, edge_pixels, predict_bhepred, predict_bldpred, predict_brdpred,
+ predict_bvepred, top_pixels,
+ };
+ #[cfg(feature = "benchmarks")]
+ use super::{predict_4x4, IntraMode};
+ #[cfg(feature = "benchmarks")]
+ use test::{black_box, Bencher};
+
+ #[cfg(feature = "benchmarks")]
+ const W: usize = 256;
+ #[cfg(feature = "benchmarks")]
+ const H: usize = 256;
+
+ #[cfg(feature = "benchmarks")]
+ fn make_sample_image() -> Vec<u8> {
+ let mut v = Vec::with_capacity((W * H * 4) as usize);
+ for c in 0u8..=255 {
+ for k in 0u8..=255 {
+ v.push(c);
+ v.push(0);
+ v.push(0);
+ v.push(k);
+ }
+ }
+ v
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_predict_4x4(b: &mut Bencher) {
+ let mut v = black_box(make_sample_image());
+
+ let res_data = vec![1i32; W * H * 4];
+ let modes = [
+ IntraMode::TM,
+ IntraMode::VE,
+ IntraMode::HE,
+ IntraMode::DC,
+ IntraMode::LD,
+ IntraMode::RD,
+ IntraMode::VR,
+ IntraMode::VL,
+ IntraMode::HD,
+ IntraMode::HU,
+ IntraMode::TM,
+ IntraMode::VE,
+ IntraMode::HE,
+ IntraMode::DC,
+ IntraMode::LD,
+ IntraMode::RD,
+ ];
+
+ b.iter(|| {
+ black_box(predict_4x4(&mut v, W * 2, &modes, &res_data));
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_predict_bvepred(b: &mut Bencher) {
+ let mut v = make_sample_image();
+
+ b.iter(|| {
+ predict_bvepred(black_box(&mut v), 5, 5, W * 2);
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_predict_bldpred(b: &mut Bencher) {
+ let mut v = black_box(make_sample_image());
+
+ b.iter(|| {
+ black_box(predict_bldpred(black_box(&mut v), 5, 5, W * 2));
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_predict_brdpred(b: &mut Bencher) {
+ let mut v = black_box(make_sample_image());
+
+ b.iter(|| {
+ black_box(predict_brdpred(black_box(&mut v), 5, 5, W * 2));
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_predict_bhepred(b: &mut Bencher) {
+ let mut v = black_box(make_sample_image());
+
+ b.iter(|| {
+ black_box(predict_bhepred(black_box(&mut v), 5, 5, W * 2));
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_top_pixels(b: &mut Bencher) {
+ let v = black_box(make_sample_image());
+
+ b.iter(|| {
+ black_box(top_pixels(black_box(&v), 5, 5, W * 2));
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_edge_pixels(b: &mut Bencher) {
+ let v = black_box(make_sample_image());
+
+ b.iter(|| {
+ black_box(edge_pixels(black_box(&v), 5, 5, W * 2));
+ });
+ }
+
+ #[test]
+ fn test_avg2() {
+ for i in 0u8..=255 {
+ for j in 0u8..=255 {
+ let ceil_avg = ((i as f32) + (j as f32)) / 2.0;
+ let ceil_avg = ceil_avg.ceil() as u8;
+ assert_eq!(
+ ceil_avg,
+ avg2(i, j),
+ "avg2({}, {}), expected {}, got {}.",
+ i,
+ j,
+ ceil_avg,
+ avg2(i, j)
+ );
+ }
+ }
+ }
+
+ #[test]
+ fn test_avg2_specific() {
+ assert_eq!(
+ 255,
+ avg2(255, 255),
+ "avg2(255, 255), expected 255, got {}.",
+ avg2(255, 255)
+ );
+ assert_eq!(1, avg2(1, 1), "avg2(1, 1), expected 1, got {}.", avg2(1, 1));
+ assert_eq!(2, avg2(2, 1), "avg2(2, 1), expected 2, got {}.", avg2(2, 1));
+ }
+
+ #[test]
+ fn test_avg3() {
+ for i in 0u8..=255 {
+ for j in 0u8..=255 {
+ for k in 0u8..=255 {
+ let floor_avg = ((i as f32) + 2.0 * (j as f32) + { k as f32 } + 2.0) / 4.0;
+ let floor_avg = floor_avg.floor() as u8;
+ assert_eq!(
+ floor_avg,
+ avg3(i, j, k),
+ "avg3({}, {}, {}), expected {}, got {}.",
+ i,
+ j,
+ k,
+ floor_avg,
+ avg3(i, j, k)
+ );
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_edge_pixels() {
+ #[rustfmt::skip]
+ let im = vec![5, 6, 7, 8, 9,
+ 4, 0, 0, 0, 0,
+ 3, 0, 0, 0, 0,
+ 2, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0];
+ let (e0, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(&im, 1, 1, 5);
+ assert_eq!(e0, 1);
+ assert_eq!(e1, 2);
+ assert_eq!(e2, 3);
+ assert_eq!(e3, 4);
+ assert_eq!(e4, 5);
+ assert_eq!(e5, 6);
+ assert_eq!(e6, 7);
+ assert_eq!(e7, 8);
+ assert_eq!(e8, 9);
+ }
+
+ #[test]
+ fn test_top_pixels() {
+ #[rustfmt::skip]
+ let im = vec![1, 2, 3, 4, 5, 6, 7, 8,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0];
+ let (e0, e1, e2, e3, e4, e5, e6, e7) = top_pixels(&im, 0, 1, 8);
+ assert_eq!(e0, 1);
+ assert_eq!(e1, 2);
+ assert_eq!(e2, 3);
+ assert_eq!(e3, 4);
+ assert_eq!(e4, 5);
+ assert_eq!(e5, 6);
+ assert_eq!(e6, 7);
+ assert_eq!(e7, 8);
+ }
+
+ #[test]
+ fn test_add_residue() {
+ let mut pblock = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
+ let rblock = [
+ -1, -2, -3, -4, 250, 249, 248, 250, -10, -18, -192, -17, -3, 15, 18, 9,
+ ];
+ let expected: [u8; 16] = [0, 0, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 10, 29, 33, 25];
+
+ add_residue(&mut pblock, &rblock, 0, 0, 4);
+
+ for (&e, &i) in expected.iter().zip(&pblock) {
+ assert_eq!(e, i);
+ }
+ }
+
+ #[test]
+ fn test_predict_bhepred() {
+ #[rustfmt::skip]
+ let expected: Vec<u8> = vec![5, 0, 0, 0, 0,
+ 4, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1];
+
+ #[rustfmt::skip]
+ let mut im = vec![5, 0, 0, 0, 0,
+ 4, 0, 0, 0, 0,
+ 3, 0, 0, 0, 0,
+ 2, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0];
+ predict_bhepred(&mut im, 1, 1, 5);
+ for (&e, i) in expected.iter().zip(im) {
+ assert_eq!(e, i);
+ }
+ }
+
+ #[test]
+ fn test_predict_brdpred() {
+ #[rustfmt::skip]
+ let expected: Vec<u8> = vec![5, 6, 7, 8, 9,
+ 4, 5, 6, 7, 8,
+ 3, 4, 5, 6, 7,
+ 2, 3, 4, 5, 6,
+ 1, 2, 3, 4, 5];
+
+ #[rustfmt::skip]
+ let mut im = vec![5, 6, 7, 8, 9,
+ 4, 0, 0, 0, 0,
+ 3, 0, 0, 0, 0,
+ 2, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0];
+ predict_brdpred(&mut im, 1, 1, 5);
+ for (&e, i) in expected.iter().zip(im) {
+ assert_eq!(e, i);
+ }
+ }
+
+ #[test]
+ fn test_predict_bldpred() {
+ #[rustfmt::skip]
+ let mut im: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0];
+ let avg_1 = 2u8;
+ let avg_2 = 3u8;
+ let avg_3 = 4u8;
+ let avg_4 = 5u8;
+ let avg_5 = 6u8;
+ let avg_6 = 7u8;
+ let avg_7 = 8u8;
+
+ predict_bldpred(&mut im, 0, 1, 8);
+
+ assert_eq!(im[8], avg_1);
+ assert_eq!(im[9], avg_2);
+ assert_eq!(im[10], avg_3);
+ assert_eq!(im[11], avg_4);
+ assert_eq!(im[16], avg_2);
+ assert_eq!(im[17], avg_3);
+ assert_eq!(im[18], avg_4);
+ assert_eq!(im[19], avg_5);
+ assert_eq!(im[24], avg_3);
+ assert_eq!(im[25], avg_4);
+ assert_eq!(im[26], avg_5);
+ assert_eq!(im[27], avg_6);
+ assert_eq!(im[32], avg_4);
+ assert_eq!(im[33], avg_5);
+ assert_eq!(im[34], avg_6);
+ assert_eq!(im[35], avg_7);
+ }
+
+ #[test]
+ fn test_predict_bvepred() {
+ #[rustfmt::skip]
+ let mut im: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0];
+ let avg_1 = 2u8;
+ let avg_2 = 3u8;
+ let avg_3 = 4u8;
+ let avg_4 = 5u8;
+
+ predict_bvepred(&mut im, 1, 1, 9);
+
+ assert_eq!(im[10], avg_1);
+ assert_eq!(im[11], avg_2);
+ assert_eq!(im[12], avg_3);
+ assert_eq!(im[13], avg_4);
+ assert_eq!(im[19], avg_1);
+ assert_eq!(im[20], avg_2);
+ assert_eq!(im[21], avg_3);
+ assert_eq!(im[22], avg_4);
+ assert_eq!(im[28], avg_1);
+ assert_eq!(im[29], avg_2);
+ assert_eq!(im[30], avg_3);
+ assert_eq!(im[31], avg_4);
+ assert_eq!(im[37], avg_1);
+ assert_eq!(im[38], avg_2);
+ assert_eq!(im[39], avg_3);
+ assert_eq!(im[40], avg_4);
+ }
+}