//! Decoding and Encoding of PNG Images
//!
//! PNG (Portable Network Graphics) is an image format that supports lossless compression.
//!
//! # Related Links
//! * - The PNG Specification
//!
use std::convert::TryFrom;
use std::fmt;
use std::io::{self, Read, Write};
use num_rational::Ratio;
use png::{BlendOp, DisposeOp};
use crate::animation::{Delay, Frame, Frames};
use crate::color::{Blend, ColorType, ExtendedColorType};
use crate::error::{
DecodingError, EncodingError, ImageError, ImageResult, LimitError, LimitErrorKind,
ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind,
};
use crate::image::{AnimationDecoder, ImageDecoder, ImageEncoder, ImageFormat};
use crate::io::Limits;
use crate::{DynamicImage, GenericImage, ImageBuffer, Luma, LumaA, Rgb, Rgba, RgbaImage};
// http://www.w3.org/TR/PNG-Structure.html
// The first eight bytes of a PNG file always contain the following (decimal) values:
pub(crate) const PNG_SIGNATURE: [u8; 8] = [137, 80, 78, 71, 13, 10, 26, 10];
/// Png Reader
///
/// This reader will try to read the png one row at a time,
/// however for interlaced png files this is not possible and
/// these are therefore read at once.
pub struct PngReader {
reader: png::Reader,
buffer: Vec,
index: usize,
}
impl PngReader {
fn new(mut reader: png::Reader) -> ImageResult> {
let len = reader.output_buffer_size();
// Since interlaced images do not come in
// scanline order it is almost impossible to
// read them in a streaming fashion, however
// this shouldn't be a too big of a problem
// as most interlaced images should fit in memory.
let buffer = if reader.info().interlaced {
let mut buffer = vec![0; len];
reader
.next_frame(&mut buffer)
.map_err(ImageError::from_png)?;
buffer
} else {
Vec::new()
};
Ok(PngReader {
reader,
buffer,
index: 0,
})
}
}
impl Read for PngReader {
fn read(&mut self, mut buf: &mut [u8]) -> io::Result {
// io::Write::write for slice cannot fail
let readed = buf.write(&self.buffer[self.index..]).unwrap();
let mut bytes = readed;
self.index += readed;
while self.index >= self.buffer.len() {
match self.reader.next_row()? {
Some(row) => {
// Faster to copy directly to external buffer
let readed = buf.write(row.data()).unwrap();
bytes += readed;
self.buffer = row.data()[readed..].to_owned();
self.index = 0;
}
None => return Ok(bytes),
}
}
Ok(bytes)
}
fn read_to_end(&mut self, buf: &mut Vec) -> io::Result {
let mut bytes = self.buffer.len();
if buf.is_empty() {
std::mem::swap(&mut self.buffer, buf);
} else {
buf.extend_from_slice(&self.buffer);
self.buffer.clear();
}
self.index = 0;
while let Some(row) = self.reader.next_row()? {
buf.extend_from_slice(row.data());
bytes += row.data().len();
}
Ok(bytes)
}
}
/// PNG decoder
pub struct PngDecoder {
color_type: ColorType,
reader: png::Reader,
}
impl PngDecoder {
/// Creates a new decoder that decodes from the stream ```r```
pub fn new(r: R) -> ImageResult> {
Self::with_limits(r, Limits::default())
}
/// Creates a new decoder that decodes from the stream ```r``` with the given limits.
pub fn with_limits(r: R, limits: Limits) -> ImageResult> {
limits.check_support(&crate::io::LimitSupport::default())?;
let max_bytes = usize::try_from(limits.max_alloc.unwrap_or(u64::MAX)).unwrap_or(usize::MAX);
let mut decoder = png::Decoder::new_with_limits(r, png::Limits { bytes: max_bytes });
let info = decoder.read_header_info().map_err(ImageError::from_png)?;
limits.check_dimensions(info.width, info.height)?;
// By default the PNG decoder will scale 16 bpc to 8 bpc, so custom
// transformations must be set. EXPAND preserves the default behavior
// expanding bpc < 8 to 8 bpc.
decoder.set_transformations(png::Transformations::EXPAND);
let reader = decoder.read_info().map_err(ImageError::from_png)?;
let (color_type, bits) = reader.output_color_type();
let color_type = match (color_type, bits) {
(png::ColorType::Grayscale, png::BitDepth::Eight) => ColorType::L8,
(png::ColorType::Grayscale, png::BitDepth::Sixteen) => ColorType::L16,
(png::ColorType::GrayscaleAlpha, png::BitDepth::Eight) => ColorType::La8,
(png::ColorType::GrayscaleAlpha, png::BitDepth::Sixteen) => ColorType::La16,
(png::ColorType::Rgb, png::BitDepth::Eight) => ColorType::Rgb8,
(png::ColorType::Rgb, png::BitDepth::Sixteen) => ColorType::Rgb16,
(png::ColorType::Rgba, png::BitDepth::Eight) => ColorType::Rgba8,
(png::ColorType::Rgba, png::BitDepth::Sixteen) => ColorType::Rgba16,
(png::ColorType::Grayscale, png::BitDepth::One) => {
return Err(unsupported_color(ExtendedColorType::L1))
}
(png::ColorType::GrayscaleAlpha, png::BitDepth::One) => {
return Err(unsupported_color(ExtendedColorType::La1))
}
(png::ColorType::Rgb, png::BitDepth::One) => {
return Err(unsupported_color(ExtendedColorType::Rgb1))
}
(png::ColorType::Rgba, png::BitDepth::One) => {
return Err(unsupported_color(ExtendedColorType::Rgba1))
}
(png::ColorType::Grayscale, png::BitDepth::Two) => {
return Err(unsupported_color(ExtendedColorType::L2))
}
(png::ColorType::GrayscaleAlpha, png::BitDepth::Two) => {
return Err(unsupported_color(ExtendedColorType::La2))
}
(png::ColorType::Rgb, png::BitDepth::Two) => {
return Err(unsupported_color(ExtendedColorType::Rgb2))
}
(png::ColorType::Rgba, png::BitDepth::Two) => {
return Err(unsupported_color(ExtendedColorType::Rgba2))
}
(png::ColorType::Grayscale, png::BitDepth::Four) => {
return Err(unsupported_color(ExtendedColorType::L4))
}
(png::ColorType::GrayscaleAlpha, png::BitDepth::Four) => {
return Err(unsupported_color(ExtendedColorType::La4))
}
(png::ColorType::Rgb, png::BitDepth::Four) => {
return Err(unsupported_color(ExtendedColorType::Rgb4))
}
(png::ColorType::Rgba, png::BitDepth::Four) => {
return Err(unsupported_color(ExtendedColorType::Rgba4))
}
(png::ColorType::Indexed, bits) => {
return Err(unsupported_color(ExtendedColorType::Unknown(bits as u8)))
}
};
Ok(PngDecoder { color_type, reader })
}
/// Turn this into an iterator over the animation frames.
///
/// Reading the complete animation requires more memory than reading the data from the IDAT
/// frame–multiple frame buffers need to be reserved at the same time. We further do not
/// support compositing 16-bit colors. In any case this would be lossy as the interface of
/// animation decoders does not support 16-bit colors.
///
/// If something is not supported or a limit is violated then the decoding step that requires
/// them will fail and an error will be returned instead of the frame. No further frames will
/// be returned.
pub fn apng(self) -> ApngDecoder {
ApngDecoder::new(self)
}
/// Returns if the image contains an animation.
///
/// Note that the file itself decides if the default image is considered to be part of the
/// animation. When it is not the common interpretation is to use it as a thumbnail.
///
/// If a non-animated image is converted into an `ApngDecoder` then its iterator is empty.
pub fn is_apng(&self) -> bool {
self.reader.info().animation_control.is_some()
}
}
fn unsupported_color(ect: ExtendedColorType) -> ImageError {
ImageError::Unsupported(UnsupportedError::from_format_and_kind(
ImageFormat::Png.into(),
UnsupportedErrorKind::Color(ect),
))
}
impl<'a, R: 'a + Read> ImageDecoder<'a> for PngDecoder {
type Reader = PngReader;
fn dimensions(&self) -> (u32, u32) {
self.reader.info().size()
}
fn color_type(&self) -> ColorType {
self.color_type
}
fn icc_profile(&mut self) -> Option> {
self.reader.info().icc_profile.as_ref().map(|x| x.to_vec())
}
fn into_reader(self) -> ImageResult {
PngReader::new(self.reader)
}
fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
use byteorder::{BigEndian, ByteOrder, NativeEndian};
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
self.reader.next_frame(buf).map_err(ImageError::from_png)?;
// PNG images are big endian. For 16 bit per channel and larger types,
// the buffer may need to be reordered to native endianness per the
// contract of `read_image`.
// TODO: assumes equal channel bit depth.
let bpc = self.color_type().bytes_per_pixel() / self.color_type().channel_count();
match bpc {
1 => (), // No reodering necessary for u8
2 => buf.chunks_mut(2).for_each(|c| {
let v = BigEndian::read_u16(c);
NativeEndian::write_u16(c, v)
}),
_ => unreachable!(),
}
Ok(())
}
fn scanline_bytes(&self) -> u64 {
let width = self.reader.info().width;
self.reader.output_line_size(width) as u64
}
}
/// An [`AnimationDecoder`] adapter of [`PngDecoder`].
///
/// See [`PngDecoder::apng`] for more information.
///
/// [`AnimationDecoder`]: ../trait.AnimationDecoder.html
/// [`PngDecoder`]: struct.PngDecoder.html
/// [`PngDecoder::apng`]: struct.PngDecoder.html#method.apng
pub struct ApngDecoder {
inner: PngDecoder,
/// The current output buffer.
current: RgbaImage,
/// The previous output buffer, used for dispose op previous.
previous: RgbaImage,
/// The dispose op of the current frame.
dispose: DisposeOp,
/// The number of image still expected to be able to load.
remaining: u32,
/// The next (first) image is the thumbnail.
has_thumbnail: bool,
}
impl ApngDecoder {
fn new(inner: PngDecoder) -> Self {
let (width, height) = inner.dimensions();
let info = inner.reader.info();
let remaining = match info.animation_control() {
// The expected number of fcTL in the remaining image.
Some(actl) => actl.num_frames,
None => 0,
};
// If the IDAT has no fcTL then it is not part of the animation counted by
// num_frames. All following fdAT chunks must be preceded by an fcTL
let has_thumbnail = info.frame_control.is_none();
ApngDecoder {
inner,
// TODO: should we delay this allocation? At least if we support limits we should.
current: RgbaImage::new(width, height),
previous: RgbaImage::new(width, height),
dispose: DisposeOp::Background,
remaining,
has_thumbnail,
}
}
// TODO: thumbnail(&mut self) -> Option>
/// Decode one subframe and overlay it on the canvas.
fn mix_next_frame(&mut self) -> Result