summaryrefslogtreecommitdiff
path: root/vendor/png
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/png')
-rw-r--r--vendor/png/.cargo-checksum.json1
-rw-r--r--vendor/png/CHANGES.md171
-rw-r--r--vendor/png/Cargo.lock2145
-rw-r--r--vendor/png/Cargo.toml80
-rw-r--r--vendor/png/LICENSE-APACHE201
-rw-r--r--vendor/png/LICENSE-MIT25
-rw-r--r--vendor/png/README.md39
-rw-r--r--vendor/png/benches/README.md6
-rw-r--r--vendor/png/benches/decoder.rs38
-rw-r--r--vendor/png/examples/corpus-bench.rs198
-rw-r--r--vendor/png/examples/png-generate.rs55
-rw-r--r--vendor/png/examples/pngcheck.rs381
-rw-r--r--vendor/png/examples/show.rs198
-rw-r--r--vendor/png/src/chunk.rs98
-rw-r--r--vendor/png/src/common.rs808
-rw-r--r--vendor/png/src/decoder/mod.rs961
-rw-r--r--vendor/png/src/decoder/stream.rs1576
-rw-r--r--vendor/png/src/decoder/zlib.rs212
-rw-r--r--vendor/png/src/encoder.rs2389
-rw-r--r--vendor/png/src/filter.rs801
-rw-r--r--vendor/png/src/lib.rs81
-rw-r--r--vendor/png/src/srgb.rs30
-rw-r--r--vendor/png/src/text_metadata.rs586
-rw-r--r--vendor/png/src/traits.rs43
-rw-r--r--vendor/png/src/utils.rs463
25 files changed, 11586 insertions, 0 deletions
diff --git a/vendor/png/.cargo-checksum.json b/vendor/png/.cargo-checksum.json
new file mode 100644
index 0000000..9c7a0e4
--- /dev/null
+++ b/vendor/png/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGES.md":"40d21212c11de3f25dc0563700d455c1d347daeaac53f9d8dde7dd6450cd7a30","Cargo.lock":"2c9e68ff7a31ae0874dec0b7a72bffd1a68aa79c38c6f1fee15b5c9239cc3da5","Cargo.toml":"bee5f601128d49e9586bf77957638b180fef8808cd097ac1b845b670903a22a2","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"eaf40297c75da471f7cda1f3458e8d91b4b2ec866e609527a13acfa93b638652","README.md":"a87e2bc972068409cff0665241349c4eb72291b33d12ed97a09f97e9a2560655","benches/README.md":"0c60c3d497abdf6c032863aa47da41bc6bb4f5ff696d45dec0e6eb33459b14b0","benches/decoder.rs":"2f5258ae02fdcdd8ca917512df012a9f74893a27936629a6c0db7d555d6fadbb","examples/corpus-bench.rs":"0a9f2a95b0bd72a76dd2cc27d86f960746d5e0e707e2c3efbf281db0998e9e3a","examples/png-generate.rs":"e4cca06b9cc3291b52261d88a2e016940620b613fc1402bb54ccc68f73026a96","examples/pngcheck.rs":"7a5cb4cbb4d166f4337ff69a9e4b16783dce482a56ca56a709bf636d8f3bb981","examples/show.rs":"d5d120e50a8375361f6b265fc77ff9e823d7567e79ad5cc2f162b37f73a98f39","src/chunk.rs":"eff04345e9af621ce51a9141f35657262ee1a891e724332a80ac40eec90a2c45","src/common.rs":"3f2aa7bcebbcfb7ce08e891ae1fbdaadde440aabc178424da4e993e4b794ce18","src/decoder/mod.rs":"67a37100957659999308767f35ab6158e8b86e6df0b77567f0ff31110b39055a","src/decoder/stream.rs":"885b64a639a6101e02612f4f67b178ac2d6be1eabb29d1eedec9770b759e1e02","src/decoder/zlib.rs":"42a31c28feae91b0dd5f410bba6fcfc8eb15b50990b24b79339f13607f9c5215","src/encoder.rs":"004abc4a2cc173d3d379ac86668cc7382b704d57598159cd56af2446705ad864","src/filter.rs":"d72f43620e8ab4865702dc09123496cb08caf1bba839747987de314cdbc6e7c3","src/lib.rs":"78b272d03da14f52254dc2a9c97a138d5a9082f1df3213bf36ec5ee7ab6f1700","src/srgb.rs":"da1609902064016853410633926d316b5289d4bbe1fa469b21f116c1c1b2c18e","src/text_metadata.rs":"e531277c3f1205239d21825aa3dacb8d828e3fa4c257c94af799b0c04d38a8d5","src/traits.rs":"79d357244e493f5174ca11873b0d5c443fd4a5e6e1f7c6df400a1767c5ad05b2","src/utils.rs":"1ddd60fb80e8e301ba1836dcb5dfd097341c638a018d646c3fc4fad06cad7bc5"},"package":"dd75bf2d8dd3702b9707cdbc56a5b9ef42cec752eb8b3bafc01234558442aa64"} \ No newline at end of file
diff --git a/vendor/png/CHANGES.md b/vendor/png/CHANGES.md
new file mode 100644
index 0000000..6baffb4
--- /dev/null
+++ b/vendor/png/CHANGES.md
@@ -0,0 +1,171 @@
+## Unreleased
+
+## 0.17.10
+
+* Added Transformations::ALPHA
+* Enable encoding pixel dimensions
+
+## 0.17.9
+
+* Fixed a bug in ICC profile decompression.
+* Improved unfilter performance.
+
+## 0.17.8
+
+* Increased MSRV to 1.57.0.
+* Substantially optimized encoding and decoding:
+ - Autovectorize filtering and unfiltering.
+ - Make the "fast" compression preset use fdeflate.
+ - Switch decompression to always use fdeflate.
+ - Updated to miniz_oxide 0.7.
+ - Added an option to ignore checksums.
+* Added corpus-bench example which measures the compression ratio and time to
+ re-encode and subsequently decode a corpus of images.
+* More fuzz testing.
+
+## 0.17.7
+
+* Fixed handling broken tRNS chunk.
+* Updated to miniz_oxide 0.6.
+
+## 0.17.6
+
+* Added `Decoder::read_header_info` to query the information contained in the
+ PNG header.
+* Switched to using the flate2 crate for encoding.
+
+## 0.17.5
+
+* Fixed a regression, introduced by chunk validation, that made the decoder
+ sensitive to the order of `gAMA`, `cHRM`, and `sRGB` chunks.
+
+## 0.17.4
+
+* Added `{Decoder,StreamDecoder}::set_ignore_text_chunk` to disable decoding of
+ ancillary text chunks during the decoding process (chunks decoded by default).
+* Added duplicate chunk checks. The decoder now enforces that standard chunks
+ such as palette, gamma, … occur at most once as specified.
+* Added `#[forbid(unsafe_code)]` again. This may come at a minor performance
+ cost when decoding ASCII text for now.
+* Fixed a bug where decoding of large chunks (>32kB) failed to produce the
+ correct result, or fail the image decoding. As new chunk types are decoded
+ this introduced regressions relative to previous versions.
+
+## 0.17.3
+
+* Fixed a bug where `Writer::finish` would not drop the underlying writer. This
+ would fail to flush and leak memory when using a buffered file writers.
+* Calling `Writer::finish` will now eagerly flush the underlying writer,
+ returning any error that this operation may result in.
+* Errors in inflate are now diagnosed with more details.
+* The color and depth combination is now checked in stream decoder.
+
+## 0.17.2
+
+* Added support for encoding and decoding tEXt/zTXt/iTXt chunks.
+* Added `Encoder::validate_sequence` to enable validation of the written frame
+ sequence, that is, if the number of written images is consistent with the
+ animation state.
+* Validation is now off by default. The basis of the new validation had been
+ introduced in 0.17 but this fixes some cases where this validation was too
+ aggressive compared to previous versions.
+* Added `Writer::finish` to fully check the write of the end of an image
+ instead of silently ignoring potential errors in `Drop`.
+* The `Writer::write_chunk` method now validates that the computed chunk length
+ does not overflow the limit set by PNG.
+* Fix an issue where the library would panic or even abort the process when
+ `flush` or `write` of an underlying writer panicked, or in some other uses of
+ `StreamWriter`.
+
+## 0.17.1
+
+* Fix panic in adaptive filter method `sum_buffer`
+
+## 0.17.0
+
+* Increased MSRV to 1.46.0
+* Rework output info usage
+* Implement APNG encoding
+* Improve ergonomics of encoder set_palette and set_trns methods
+* Make Info struct non-exhaustive
+* Make encoder a core feature
+* Default Transformations to Identity
+* Add Adaptive filtering method for encoding
+* Fix SCREAM_CASE on ColorType variants
+* Forbid unsafe code
+
+## 0.16.7
+
+* Added `Encoder::set_trns` to register a transparency table to be written.
+
+## 0.16.6
+
+* Fixed silent integer overflows in buffer size calculation, resulting in
+ panics from assertions and out-of-bounds accesses when actually decoding.
+ This improves the stability of 32-bit and 16-bit targets and make decoding
+ run as stable as on 64-bit.
+* Reject invalid color/depth combinations. Some would lead to mismatched output
+ buffer size and panics during decoding.
+* Add `Clone` impl for `Info` struct.
+
+## 0.16.5
+
+* Decoding of APNG subframes is now officially supported and specified. Note
+ that dispose ops and positioning in the image need to be done by the caller.
+* Added encoding of indexed data.
+* Switched to `miniz_oxide` for decompressing image data, with 30%-50% speedup
+ in common cases and up to 200% in special ones.
+* Fix accepting images only with consecutive IDAT chunks, rules out data loss.
+
+## 0.16.4
+
+* The fdAT frames are no longer inspected when the main image is read. This
+ would previously be the case for non-interlaced images. This would lead to
+ incorrect failure and, e.g. an error of the form `"invalid filter method"`.
+* Fix always validating the last IDAT-chunks checksum, was sometimes ignored.
+* Prevent encoding color/bit-depth combinations forbidden by the specification.
+* The fixes for APNG/fdAT enable further implementation. The _next_ release is
+ expected to officially support APNG.
+
+## 0.16.3
+
+* Fix encoding with filtering methods Up, Avg, Paeth
+* Optimize decoding throughput by up to +30%
+
+## 0.16.2
+
+* Added method constructing an owned stream encoder.
+
+## 0.16.1
+
+* Addressed files bloating the packed crate
+
+## 0.16.0
+
+* Fix a bug compressing images with deflate
+* Address use of deprecated error interfaces
+
+## 0.15.3
+
+* Fix panic while trying to encode empty images. Such images are no longer
+ accepted and error when calling `write_header` before any data has been
+ written. The specification does not permit empty images.
+
+## 0.15.2
+
+* Fix `EXPAND` transformation to leave bit depths above 8 unchanged
+
+## 0.15.1
+
+* Fix encoding writing invalid chunks. Images written can be corrected: see
+ https://github.com/image-rs/image/issues/1074 for a recovery.
+* Fix a panic in bit unpacking with checked arithmetic (e.g. in debug builds)
+* Added better fuzzer integration
+* Update `term`, `rand` dev-dependency
+* Note: The `show` example program requires a newer compiler than 1.34.2 on
+ some targets due to depending on `glium`. This is not considered a breaking
+ bug.
+
+## 0.15
+
+Begin of changelog
diff --git a/vendor/png/Cargo.lock b/vendor/png/Cargo.lock
new file mode 100644
index 0000000..3532262
--- /dev/null
+++ b/vendor/png/Cargo.lock
@@ -0,0 +1,2145 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "addr2line"
+version = "0.20.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3"
+dependencies = [
+ "gimli",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+
+[[package]]
+name = "aho-corasick"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "86b8f9420f797f2d9e935edf629310eb938a0d839f984e25327f3c7eed22300c"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "arrayref"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545"
+
+[[package]]
+name = "arrayvec"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi 0.1.19",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "backtrace"
+version = "0.3.68"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12"
+dependencies = [
+ "addr2line",
+ "cc",
+ "cfg-if",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "rustc-demangle",
+]
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "block"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a"
+
+[[package]]
+name = "bumpalo"
+version = "3.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
+
+[[package]]
+name = "bytemuck"
+version = "1.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea"
+
+[[package]]
+name = "calloop"
+version = "0.10.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "52e0d00eb1ea24371a97d2da6201c6747a633dc6dc1988ef503403b4c59504a8"
+dependencies = [
+ "bitflags",
+ "log",
+ "nix 0.25.1",
+ "slotmap",
+ "thiserror",
+ "vec_map",
+]
+
+[[package]]
+name = "cast"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
+
+[[package]]
+name = "cc"
+version = "1.0.82"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "305fe645edc1442a0fa8b6726ba61d422798d37a52e12eaecf4b022ebbb88f01"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "cgl"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ced0551234e87afee12411d535648dd89d2e7f34c78b753395567aff3d447ff"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "clap"
+version = "2.34.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
+dependencies = [
+ "bitflags",
+ "textwrap 0.11.0",
+ "unicode-width",
+]
+
+[[package]]
+name = "clap"
+version = "3.2.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
+dependencies = [
+ "atty",
+ "bitflags",
+ "clap_derive",
+ "clap_lex",
+ "indexmap 1.9.3",
+ "once_cell",
+ "strsim",
+ "termcolor",
+ "textwrap 0.16.0",
+]
+
+[[package]]
+name = "clap_derive"
+version = "3.2.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008"
+dependencies = [
+ "heck",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
+dependencies = [
+ "os_str_bytes",
+]
+
+[[package]]
+name = "cmake"
+version = "0.1.50"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "cocoa"
+version = "0.24.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f425db7937052c684daec3bd6375c8abe2d146dca4b8b143d6db777c39138f3a"
+dependencies = [
+ "bitflags",
+ "block",
+ "cocoa-foundation",
+ "core-foundation",
+ "core-graphics",
+ "foreign-types 0.3.2",
+ "libc",
+ "objc",
+]
+
+[[package]]
+name = "cocoa-foundation"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "931d3837c286f56e3c58423ce4eba12d08db2374461a785c86f672b08b5650d6"
+dependencies = [
+ "bitflags",
+ "block",
+ "core-foundation",
+ "core-graphics-types",
+ "foreign-types 0.3.2",
+ "libc",
+ "objc",
+]
+
+[[package]]
+name = "core-foundation"
+version = "0.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
+
+[[package]]
+name = "core-foundation-sys"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa"
+
+[[package]]
+name = "core-graphics"
+version = "0.22.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2581bbab3b8ffc6fcbd550bf46c355135d16e9ff2a6ea032ad6b9bf1d7efe4fb"
+dependencies = [
+ "bitflags",
+ "core-foundation",
+ "core-graphics-types",
+ "foreign-types 0.3.2",
+ "libc",
+]
+
+[[package]]
+name = "core-graphics-types"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2bb142d41022986c1d8ff29103a1411c8a3dfad3552f87a4f8dc50d61d4f4e33"
+dependencies = [
+ "bitflags",
+ "core-foundation",
+ "libc",
+]
+
+[[package]]
+name = "core-text"
+version = "19.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99d74ada66e07c1cefa18f8abfba765b486f250de2e4a999e5727fc0dd4b4a25"
+dependencies = [
+ "core-foundation",
+ "core-graphics",
+ "foreign-types 0.3.2",
+ "libc",
+]
+
+[[package]]
+name = "crc32fast"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "criterion"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f"
+dependencies = [
+ "atty",
+ "cast",
+ "clap 2.34.0",
+ "criterion-plot",
+ "csv",
+ "itertools",
+ "lazy_static",
+ "num-traits",
+ "oorandom",
+ "plotters",
+ "rayon",
+ "regex",
+ "serde",
+ "serde_cbor",
+ "serde_derive",
+ "serde_json",
+ "tinytemplate",
+ "walkdir",
+]
+
+[[package]]
+name = "criterion-plot"
+version = "0.4.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876"
+dependencies = [
+ "cast",
+ "itertools",
+]
+
+[[package]]
+name = "crossbeam-channel"
+version = "0.5.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
+dependencies = [
+ "cfg-if",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7"
+dependencies = [
+ "autocfg",
+ "cfg-if",
+ "crossbeam-utils",
+ "memoffset 0.9.0",
+ "scopeguard",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "crossfont"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "21fd3add36ea31aba1520aa5288714dd63be506106753226d0eb387a93bc9c45"
+dependencies = [
+ "cocoa",
+ "core-foundation",
+ "core-foundation-sys",
+ "core-graphics",
+ "core-text",
+ "dwrote",
+ "foreign-types 0.5.0",
+ "freetype-rs",
+ "libc",
+ "log",
+ "objc",
+ "once_cell",
+ "pkg-config",
+ "servo-fontconfig",
+ "winapi",
+]
+
+[[package]]
+name = "csv"
+version = "1.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "626ae34994d3d8d668f4269922248239db4ae42d538b14c398b74a52208e8086"
+dependencies = [
+ "csv-core",
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "csv-core"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "cty"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b365fabc795046672053e29c954733ec3b05e4be654ab130fe8f1f94d7051f35"
+
+[[package]]
+name = "darling"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c"
+dependencies = [
+ "darling_core",
+ "darling_macro",
+]
+
+[[package]]
+name = "darling_core"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610"
+dependencies = [
+ "fnv",
+ "ident_case",
+ "proc-macro2",
+ "quote",
+ "strsim",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "darling_macro"
+version = "0.13.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835"
+dependencies = [
+ "darling_core",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "dirs-next"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1"
+dependencies = [
+ "cfg-if",
+ "dirs-sys-next",
+]
+
+[[package]]
+name = "dirs-sys-next"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d"
+dependencies = [
+ "libc",
+ "redox_users",
+ "winapi",
+]
+
+[[package]]
+name = "dispatch"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b"
+
+[[package]]
+name = "dlib"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "330c60081dcc4c72131f8eb70510f1ac07223e5d4163db481a04a0befcffa412"
+dependencies = [
+ "libloading 0.8.0",
+]
+
+[[package]]
+name = "downcast-rs"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650"
+
+[[package]]
+name = "dwrote"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "439a1c2ba5611ad3ed731280541d36d2e9c4ac5e7fb818a27b604bdc5a6aa65b"
+dependencies = [
+ "lazy_static",
+ "libc",
+ "serde",
+ "serde_derive",
+ "winapi",
+ "wio",
+]
+
+[[package]]
+name = "either"
+version = "1.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07"
+
+[[package]]
+name = "equivalent"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
+
+[[package]]
+name = "expat-sys"
+version = "2.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "658f19728920138342f68408b7cf7644d90d4784353d8ebc32e7e8663dbe45fa"
+dependencies = [
+ "cmake",
+ "pkg-config",
+]
+
+[[package]]
+name = "fdeflate"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d329bdeac514ee06249dabc27877490f17f5d371ec693360768b838e19f3ae10"
+dependencies = [
+ "simd-adler32",
+]
+
+[[package]]
+name = "flate2"
+version = "1.0.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743"
+dependencies = [
+ "crc32fast",
+ "miniz_oxide",
+]
+
+[[package]]
+name = "fnv"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
+
+[[package]]
+name = "foreign-types"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
+dependencies = [
+ "foreign-types-shared 0.1.1",
+]
+
+[[package]]
+name = "foreign-types"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965"
+dependencies = [
+ "foreign-types-macros",
+ "foreign-types-shared 0.3.1",
+]
+
+[[package]]
+name = "foreign-types-macros"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.28",
+]
+
+[[package]]
+name = "foreign-types-shared"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
+
+[[package]]
+name = "foreign-types-shared"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b"
+
+[[package]]
+name = "freetype-rs"
+version = "0.26.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "74eadec9d0a5c28c54bb9882e54787275152a4e36ce206b45d7451384e5bf5fb"
+dependencies = [
+ "bitflags",
+ "freetype-sys",
+ "libc",
+]
+
+[[package]]
+name = "freetype-sys"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a37d4011c0cc628dfa766fcc195454f4b068d7afdc2adfd28861191d866e731a"
+dependencies = [
+ "cmake",
+ "libc",
+ "pkg-config",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "unicode-width",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "gimli"
+version = "0.27.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e"
+
+[[package]]
+name = "gl_generator"
+version = "0.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a95dfc23a2b4a9a2f5ab41d194f8bfda3cabec42af4e39f08c339eb2a0c124d"
+dependencies = [
+ "khronos_api",
+ "log",
+ "xml-rs",
+]
+
+[[package]]
+name = "glium"
+version = "0.32.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2766728ecb86014b91d3d687614b32d65aacbbdc887f424a7b03cba3ab593bf"
+dependencies = [
+ "backtrace",
+ "fnv",
+ "gl_generator",
+ "glutin",
+ "lazy_static",
+ "memoffset 0.6.5",
+ "smallvec",
+ "takeable-option",
+]
+
+[[package]]
+name = "glob"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
+
+[[package]]
+name = "glutin"
+version = "0.29.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "444c9ad294fdcaf20ccf6726b78f380b5450275540c9b68ab62f49726ad1c713"
+dependencies = [
+ "cgl",
+ "cocoa",
+ "core-foundation",
+ "glutin_egl_sys",
+ "glutin_gles2_sys",
+ "glutin_glx_sys",
+ "glutin_wgl_sys",
+ "libloading 0.7.4",
+ "log",
+ "objc",
+ "once_cell",
+ "osmesa-sys",
+ "parking_lot",
+ "raw-window-handle 0.5.2",
+ "wayland-client",
+ "wayland-egl",
+ "winapi",
+ "winit",
+]
+
+[[package]]
+name = "glutin_egl_sys"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68900f84b471f31ea1d1355567eb865a2cf446294f06cef8d653ed7bcf5f013d"
+dependencies = [
+ "gl_generator",
+ "winapi",
+]
+
+[[package]]
+name = "glutin_gles2_sys"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e8094e708b730a7c8a1954f4f8a31880af00eb8a1c5b5bf85d28a0a3c6d69103"
+dependencies = [
+ "gl_generator",
+ "objc",
+]
+
+[[package]]
+name = "glutin_glx_sys"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d93d0575865098580c5b3a423188cd959419912ea60b1e48e8b3b526f6d02468"
+dependencies = [
+ "gl_generator",
+ "x11-dl",
+]
+
+[[package]]
+name = "glutin_wgl_sys"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3da5951a1569dbab865c6f2a863efafff193a93caf05538d193e9e3816d21696"
+dependencies = [
+ "gl_generator",
+]
+
+[[package]]
+name = "half"
+version = "1.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+
+[[package]]
+name = "hashbrown"
+version = "0.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
+
+[[package]]
+name = "heck"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b"
+
+[[package]]
+name = "ident_case"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
+
+[[package]]
+name = "indexmap"
+version = "1.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
+dependencies = [
+ "autocfg",
+ "hashbrown 0.12.3",
+]
+
+[[package]]
+name = "indexmap"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d"
+dependencies = [
+ "equivalent",
+ "hashbrown 0.14.0",
+]
+
+[[package]]
+name = "instant"
+version = "0.1.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "itertools"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
+
+[[package]]
+name = "jni-sys"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
+
+[[package]]
+name = "js-sys"
+version = "0.3.64"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "khronos_api"
+version = "3.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2db585e1d738fc771bf08a151420d3ed193d9d895a36df7f6f8a9456b911ddc"
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "libc"
+version = "0.2.147"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
+
+[[package]]
+name = "libloading"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f"
+dependencies = [
+ "cfg-if",
+ "winapi",
+]
+
+[[package]]
+name = "libloading"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d580318f95776505201b28cf98eb1fa5e4be3b689633ba6a3e6cd880ff22d8cb"
+dependencies = [
+ "cfg-if",
+ "windows-sys 0.48.0",
+]
+
+[[package]]
+name = "lock_api"
+version = "0.4.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16"
+dependencies = [
+ "autocfg",
+ "scopeguard",
+]
+
+[[package]]
+name = "log"
+version = "0.4.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
+
+[[package]]
+name = "malloc_buf"
+version = "0.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+
+[[package]]
+name = "memmap2"
+version = "0.5.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "memoffset"
+version = "0.6.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "memoffset"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "minimal-lexical"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
+
+[[package]]
+name = "miniz_oxide"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
+dependencies = [
+ "adler",
+ "simd-adler32",
+]
+
+[[package]]
+name = "mio"
+version = "0.8.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2"
+dependencies = [
+ "libc",
+ "log",
+ "wasi",
+ "windows-sys 0.48.0",
+]
+
+[[package]]
+name = "ndk"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "451422b7e4718271c8b5b3aadf5adedba43dc76312454b387e98fae0fc951aa0"
+dependencies = [
+ "bitflags",
+ "jni-sys",
+ "ndk-sys",
+ "num_enum",
+ "raw-window-handle 0.5.2",
+ "thiserror",
+]
+
+[[package]]
+name = "ndk-context"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b"
+
+[[package]]
+name = "ndk-glue"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0434fabdd2c15e0aab768ca31d5b7b333717f03cf02037d5a0a3ff3c278ed67f"
+dependencies = [
+ "libc",
+ "log",
+ "ndk",
+ "ndk-context",
+ "ndk-macro",
+ "ndk-sys",
+ "once_cell",
+ "parking_lot",
+]
+
+[[package]]
+name = "ndk-macro"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0df7ac00c4672f9d5aece54ee3347520b7e20f158656c7db2e6de01902eb7a6c"
+dependencies = [
+ "darling",
+ "proc-macro-crate",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "ndk-sys"
+version = "0.4.1+23.1.7779620"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3cf2aae958bd232cac5069850591667ad422d263686d75b52a065f9badeee5a3"
+dependencies = [
+ "jni-sys",
+]
+
+[[package]]
+name = "nix"
+version = "0.24.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069"
+dependencies = [
+ "bitflags",
+ "cfg-if",
+ "libc",
+ "memoffset 0.6.5",
+]
+
+[[package]]
+name = "nix"
+version = "0.25.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4"
+dependencies = [
+ "autocfg",
+ "bitflags",
+ "cfg-if",
+ "libc",
+ "memoffset 0.6.5",
+]
+
+[[package]]
+name = "nom"
+version = "7.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
+dependencies = [
+ "memchr",
+ "minimal-lexical",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"
+dependencies = [
+ "hermit-abi 0.3.2",
+ "libc",
+]
+
+[[package]]
+name = "num_enum"
+version = "0.5.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f646caf906c20226733ed5b1374287eb97e3c2a5c227ce668c1f2ce20ae57c9"
+dependencies = [
+ "num_enum_derive",
+]
+
+[[package]]
+name = "num_enum_derive"
+version = "0.5.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dcbff9bc912032c62bf65ef1d5aea88983b420f4f839db1e9b0c281a25c9c799"
+dependencies = [
+ "proc-macro-crate",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "objc"
+version = "0.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1"
+dependencies = [
+ "malloc_buf",
+]
+
+[[package]]
+name = "object"
+version = "0.31.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.18.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
+
+[[package]]
+name = "oorandom"
+version = "11.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
+
+[[package]]
+name = "os_str_bytes"
+version = "6.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac"
+
+[[package]]
+name = "osmesa-sys"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "88cfece6e95d2e717e0872a7f53a8684712ad13822a7979bc760b9c77ec0013b"
+dependencies = [
+ "shared_library",
+]
+
+[[package]]
+name = "parking_lot"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f"
+dependencies = [
+ "lock_api",
+ "parking_lot_core",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.9.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "redox_syscall 0.3.5",
+ "smallvec",
+ "windows-targets",
+]
+
+[[package]]
+name = "percent-encoding"
+version = "2.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
+
+[[package]]
+name = "pkg-config"
+version = "0.3.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964"
+
+[[package]]
+name = "plotters"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45"
+dependencies = [
+ "num-traits",
+ "plotters-backend",
+ "plotters-svg",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "plotters-backend"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609"
+
+[[package]]
+name = "plotters-svg"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab"
+dependencies = [
+ "plotters-backend",
+]
+
+[[package]]
+name = "png"
+version = "0.17.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "59871cc5b6cce7eaccca5a802b4173377a1c2ba90654246789a8fa2334426d11"
+dependencies = [
+ "bitflags",
+ "crc32fast",
+ "fdeflate",
+ "flate2",
+ "miniz_oxide",
+]
+
+[[package]]
+name = "png"
+version = "0.17.10"
+dependencies = [
+ "bitflags",
+ "clap 3.2.25",
+ "crc32fast",
+ "criterion",
+ "fdeflate",
+ "flate2",
+ "getopts",
+ "glium",
+ "glob",
+ "miniz_oxide",
+ "rand",
+ "term",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
+
+[[package]]
+name = "proc-macro-crate"
+version = "1.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919"
+dependencies = [
+ "once_cell",
+ "toml_edit",
+]
+
+[[package]]
+name = "proc-macro-error"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
+dependencies = [
+ "proc-macro-error-attr",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro-error-attr"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.66"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "50f3b39ccfb720540debaa0164757101c08ecb8d326b15358ce76a62c7e85965"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rand"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
+dependencies = [
+ "libc",
+ "rand_chacha",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "raw-window-handle"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b800beb9b6e7d2df1fe337c9e3d04e3af22a124460fb4c30fcc22c9117cefb41"
+dependencies = [
+ "cty",
+]
+
+[[package]]
+name = "raw-window-handle"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f2ff9a1f06a88b01621b7ae906ef0211290d1c8a168a15542486a8f61c0833b9"
+
+[[package]]
+name = "rayon"
+version = "1.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b"
+dependencies = [
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d"
+dependencies = [
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-utils",
+ "num_cpus",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.2.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "redox_users"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b"
+dependencies = [
+ "getrandom",
+ "redox_syscall 0.2.16",
+ "thiserror",
+]
+
+[[package]]
+name = "regex"
+version = "1.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-automata",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-automata"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2"
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
+
+[[package]]
+name = "rustversion"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
+
+[[package]]
+name = "ryu"
+version = "1.0.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
+
+[[package]]
+name = "safe_arch"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c1ff3d6d9696af502cc3110dacce942840fb06ff4514cad92236ecc455f2ce05"
+dependencies = [
+ "bytemuck",
+]
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "scoped-tls"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294"
+
+[[package]]
+name = "scopeguard"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
+
+[[package]]
+name = "sctk-adwaita"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "61270629cc6b4d77ec1907db1033d5c2e1a404c412743621981a871dc9c12339"
+dependencies = [
+ "crossfont",
+ "log",
+ "smithay-client-toolkit",
+ "tiny-skia",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.183"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "32ac8da02677876d532745a130fc9d8e6edfa81a269b107c5b00829b91d8eb3c"
+
+[[package]]
+name = "serde_cbor"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
+dependencies = [
+ "half",
+ "serde",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.183"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aafe972d60b0b9bee71a91b92fee2d4fb3c9d7e8f6b179aa99f27203d99a4816"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.28",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.104"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "076066c5f1078eac5b722a31827a8832fe108bed65dfa75e233c89f8206e976c"
+dependencies = [
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "servo-fontconfig"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c7e3e22fe5fd73d04ebf0daa049d3efe3eae55369ce38ab16d07ddd9ac5c217c"
+dependencies = [
+ "libc",
+ "servo-fontconfig-sys",
+]
+
+[[package]]
+name = "servo-fontconfig-sys"
+version = "5.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e36b879db9892dfa40f95da1c38a835d41634b825fbd8c4c418093d53c24b388"
+dependencies = [
+ "expat-sys",
+ "freetype-sys",
+ "pkg-config",
+]
+
+[[package]]
+name = "shared_library"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a9e7e0f2bfae24d8a5b5a66c5b257a83c7412304311512a0c054cd5e619da11"
+dependencies = [
+ "lazy_static",
+ "libc",
+]
+
+[[package]]
+name = "simd-adler32"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
+
+[[package]]
+name = "slotmap"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1e08e261d0e8f5c43123b7adf3e4ca1690d655377ac93a03b2c9d3e98de1342"
+dependencies = [
+ "version_check",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
+
+[[package]]
+name = "smithay-client-toolkit"
+version = "0.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f307c47d32d2715eb2e0ece5589057820e0e5e70d07c247d1063e844e107f454"
+dependencies = [
+ "bitflags",
+ "calloop",
+ "dlib",
+ "lazy_static",
+ "log",
+ "memmap2",
+ "nix 0.24.3",
+ "pkg-config",
+ "wayland-client",
+ "wayland-cursor",
+ "wayland-protocols",
+]
+
+[[package]]
+name = "strsim"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04361975b3f5e348b2189d8dc55bc942f278b2d482a6a0365de5bdd62d351567"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "takeable-option"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "36ae8932fcfea38b7d3883ae2ab357b0d57a02caaa18ebb4f5ece08beaec4aa0"
+
+[[package]]
+name = "term"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f"
+dependencies = [
+ "dirs-next",
+ "rustversion",
+ "winapi",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
+dependencies = [
+ "unicode-width",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
+
+[[package]]
+name = "thiserror"
+version = "1.0.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "611040a08a0439f8248d1990b111c95baa9c704c805fa1f62104b39655fd7f90"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "090198534930841fab3a5d1bb637cde49e339654e606195f8d9c76eeb081dc96"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.28",
+]
+
+[[package]]
+name = "tiny-skia"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "642680569bb895b16e4b9d181c60be1ed136fa0c9c7f11d004daf053ba89bf82"
+dependencies = [
+ "arrayref",
+ "arrayvec",
+ "bytemuck",
+ "cfg-if",
+ "png 0.17.9",
+ "safe_arch",
+ "tiny-skia-path",
+]
+
+[[package]]
+name = "tiny-skia-path"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c114d32f0c2ee43d585367cb013dfaba967ab9f62b90d9af0d696e955e70fa6c"
+dependencies = [
+ "arrayref",
+ "bytemuck",
+]
+
+[[package]]
+name = "tinytemplate"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
+dependencies = [
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "toml_datetime"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b"
+
+[[package]]
+name = "toml_edit"
+version = "0.19.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a"
+dependencies = [
+ "indexmap 2.0.0",
+ "toml_datetime",
+ "winnow",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c"
+
+[[package]]
+name = "unicode-width"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
+
+[[package]]
+name = "vec_map"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "walkdir"
+version = "2.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698"
+dependencies = [
+ "same-file",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342"
+dependencies = [
+ "cfg-if",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd"
+dependencies = [
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.28",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.28",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1"
+
+[[package]]
+name = "wayland-client"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f3b068c05a039c9f755f881dc50f01732214f5685e379829759088967c46715"
+dependencies = [
+ "bitflags",
+ "downcast-rs",
+ "libc",
+ "nix 0.24.3",
+ "scoped-tls",
+ "wayland-commons",
+ "wayland-scanner",
+ "wayland-sys",
+]
+
+[[package]]
+name = "wayland-commons"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8691f134d584a33a6606d9d717b95c4fa20065605f798a3f350d78dced02a902"
+dependencies = [
+ "nix 0.24.3",
+ "once_cell",
+ "smallvec",
+ "wayland-sys",
+]
+
+[[package]]
+name = "wayland-cursor"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6865c6b66f13d6257bef1cd40cbfe8ef2f150fb8ebbdb1e8e873455931377661"
+dependencies = [
+ "nix 0.24.3",
+ "wayland-client",
+ "xcursor",
+]
+
+[[package]]
+name = "wayland-egl"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "402de949f81a012926d821a2d659f930694257e76dd92b6e0042ceb27be4107d"
+dependencies = [
+ "wayland-client",
+ "wayland-sys",
+]
+
+[[package]]
+name = "wayland-protocols"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b950621f9354b322ee817a23474e479b34be96c2e909c14f7bc0100e9a970bc6"
+dependencies = [
+ "bitflags",
+ "wayland-client",
+ "wayland-commons",
+ "wayland-scanner",
+]
+
+[[package]]
+name = "wayland-scanner"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f4303d8fa22ab852f789e75a967f0a2cdc430a607751c0499bada3e451cbd53"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "xml-rs",
+]
+
+[[package]]
+name = "wayland-sys"
+version = "0.29.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be12ce1a3c39ec7dba25594b97b42cb3195d54953ddb9d3d95a7c3902bc6e9d4"
+dependencies = [
+ "dlib",
+ "lazy_static",
+ "pkg-config",
+]
+
+[[package]]
+name = "web-sys"
+version = "0.3.64"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-sys"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
+dependencies = [
+ "windows_aarch64_msvc 0.36.1",
+ "windows_i686_gnu 0.36.1",
+ "windows_i686_msvc 0.36.1",
+ "windows_x86_64_gnu 0.36.1",
+ "windows_x86_64_msvc 0.36.1",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.48.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc 0.48.0",
+ "windows_i686_gnu 0.48.0",
+ "windows_i686_msvc 0.48.0",
+ "windows_x86_64_gnu 0.48.0",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc 0.48.0",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.36.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
+
+[[package]]
+name = "winit"
+version = "0.27.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bb796d6fbd86b2fd896c9471e6f04d39d750076ebe5680a3958f00f5ab97657c"
+dependencies = [
+ "bitflags",
+ "cocoa",
+ "core-foundation",
+ "core-graphics",
+ "dispatch",
+ "instant",
+ "libc",
+ "log",
+ "mio",
+ "ndk",
+ "ndk-glue",
+ "objc",
+ "once_cell",
+ "parking_lot",
+ "percent-encoding",
+ "raw-window-handle 0.4.3",
+ "raw-window-handle 0.5.2",
+ "sctk-adwaita",
+ "smithay-client-toolkit",
+ "wasm-bindgen",
+ "wayland-client",
+ "wayland-protocols",
+ "web-sys",
+ "windows-sys 0.36.1",
+ "x11-dl",
+]
+
+[[package]]
+name = "winnow"
+version = "0.5.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5504cc7644f4b593cbc05c4a55bf9bd4e94b867c3c0bd440934174d50482427d"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "wio"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d129932f4644ac2396cb456385cbf9e63b5b30c6e8dc4820bdca4eb082037a5"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "x11-dl"
+version = "2.21.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38735924fedd5314a6e548792904ed8c6de6636285cb9fec04d5b1db85c1516f"
+dependencies = [
+ "libc",
+ "once_cell",
+ "pkg-config",
+]
+
+[[package]]
+name = "xcursor"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "463705a63313cd4301184381c5e8042f0a7e9b4bb63653f216311d4ae74690b7"
+dependencies = [
+ "nom",
+]
+
+[[package]]
+name = "xml-rs"
+version = "0.8.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "47430998a7b5d499ccee752b41567bc3afc57e1327dc855b1a2aa44ce29b5fa1"
diff --git a/vendor/png/Cargo.toml b/vendor/png/Cargo.toml
new file mode 100644
index 0000000..00756a4
--- /dev/null
+++ b/vendor/png/Cargo.toml
@@ -0,0 +1,80 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.57"
+name = "png"
+version = "0.17.10"
+authors = ["The image-rs Developers"]
+include = [
+ "/LICENSE-MIT",
+ "/LICENSE-APACHE",
+ "/README.md",
+ "/CHANGES.md",
+ "/src/",
+ "/examples/",
+ "/benches/",
+]
+description = "PNG decoding and encoding library in pure Rust"
+readme = "README.md"
+categories = ["multimedia::images"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/image-rs/image-png.git"
+
+[[bench]]
+name = "decoder"
+path = "benches/decoder.rs"
+harness = false
+
+[dependencies.bitflags]
+version = "1.0"
+
+[dependencies.crc32fast]
+version = "1.2.0"
+
+[dependencies.fdeflate]
+version = "0.3.0"
+
+[dependencies.flate2]
+version = "1.0"
+
+[dependencies.miniz_oxide]
+version = "0.7.1"
+features = ["simd"]
+
+[dev-dependencies.clap]
+version = "3.0"
+features = ["derive"]
+
+[dev-dependencies.criterion]
+version = "0.3.1"
+
+[dev-dependencies.getopts]
+version = "0.2.14"
+
+[dev-dependencies.glium]
+version = "0.32"
+features = ["glutin"]
+default-features = false
+
+[dev-dependencies.glob]
+version = "0.3"
+
+[dev-dependencies.rand]
+version = "0.8.4"
+
+[dev-dependencies.term]
+version = "0.7"
+
+[features]
+benchmarks = []
+unstable = []
diff --git a/vendor/png/LICENSE-APACHE b/vendor/png/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/vendor/png/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/png/LICENSE-MIT b/vendor/png/LICENSE-MIT
new file mode 100644
index 0000000..ea471f0
--- /dev/null
+++ b/vendor/png/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2015 nwin
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/png/README.md b/vendor/png/README.md
new file mode 100644
index 0000000..e6719bc
--- /dev/null
+++ b/vendor/png/README.md
@@ -0,0 +1,39 @@
+# PNG Decoder/Encoder
+[![Build Status](https://github.com/image-rs/image-png/workflows/Rust%20CI/badge.svg)](https://github.com/image-rs/image-png/actions)
+[![Documentation](https://docs.rs/png/badge.svg)](https://docs.rs/png)
+[![Crates.io](https://img.shields.io/crates/v/png.svg)](https://crates.io/crates/png)
+![Lines of Code](https://tokei.rs/b1/github/image-rs/image-png)
+[![License](https://img.shields.io/crates/l/png.svg)](https://github.com/image-rs/image-png)
+[![fuzzit](https://app.fuzzit.dev/badge?org_id=image-rs)](https://app.fuzzit.dev/orgs/image-rs/dashboard)
+
+PNG decoder/encoder in pure Rust.
+
+It contains all features required to handle the entirety of [the PngSuite by
+Willem van Schack][PngSuite].
+
+[PngSuite]: http://www.schaik.com/pngsuite2011/pngsuite.html
+
+## pngcheck
+
+The `pngcheck` utility is a small demonstration binary that checks and prints
+metadata on every `.png` image provided via parameter. You can run it (for
+example on the test directories) with
+
+```bash
+cargo run --release --example pngcheck ./tests/pngsuite/*
+```
+
+## License
+
+Licensed under either of
+
+ * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
+additional terms or conditions.
diff --git a/vendor/png/benches/README.md b/vendor/png/benches/README.md
new file mode 100644
index 0000000..bf13a55
--- /dev/null
+++ b/vendor/png/benches/README.md
@@ -0,0 +1,6 @@
+# Getting started with benchmarking
+
+To run the benchmarks you need a nightly rust toolchain.
+Then you launch it with
+
+ rustup run nightly cargo bench --features=benchmarks
diff --git a/vendor/png/benches/decoder.rs b/vendor/png/benches/decoder.rs
new file mode 100644
index 0000000..a078c9a
--- /dev/null
+++ b/vendor/png/benches/decoder.rs
@@ -0,0 +1,38 @@
+use std::fs;
+
+use criterion::{criterion_group, criterion_main, Criterion, Throughput};
+use png::Decoder;
+
+fn load_all(c: &mut Criterion) {
+ for entry in fs::read_dir("tests/benches/").unwrap().flatten() {
+ match entry.path().extension() {
+ Some(st) if st == "png" => {}
+ _ => continue,
+ }
+
+ let data = fs::read(entry.path()).unwrap();
+ bench_file(c, data, entry.file_name().into_string().unwrap());
+ }
+}
+
+criterion_group!(benches, load_all);
+criterion_main!(benches);
+
+fn bench_file(c: &mut Criterion, data: Vec<u8>, name: String) {
+ let mut group = c.benchmark_group("decode");
+ group.sample_size(20);
+
+ let decoder = Decoder::new(&*data);
+ let mut reader = decoder.read_info().unwrap();
+ let mut image = vec![0; reader.output_buffer_size()];
+ let info = reader.next_frame(&mut image).unwrap();
+
+ group.throughput(Throughput::Bytes(info.buffer_size() as u64));
+ group.bench_with_input(name, &data, |b, data| {
+ b.iter(|| {
+ let decoder = Decoder::new(data.as_slice());
+ let mut decoder = decoder.read_info().unwrap();
+ decoder.next_frame(&mut image).unwrap();
+ })
+ });
+}
diff --git a/vendor/png/examples/corpus-bench.rs b/vendor/png/examples/corpus-bench.rs
new file mode 100644
index 0000000..b030d6d
--- /dev/null
+++ b/vendor/png/examples/corpus-bench.rs
@@ -0,0 +1,198 @@
+use std::{fs, path::PathBuf};
+
+use clap::Parser;
+use png::Decoder;
+
+#[derive(clap::ValueEnum, Clone)]
+enum Speed {
+ Fast,
+ Default,
+ Best,
+}
+
+#[derive(clap::ValueEnum, Clone)]
+enum Filter {
+ None,
+ Sub,
+ Up,
+ Average,
+ Paeth,
+ Adaptive,
+}
+
+#[derive(clap::Parser)]
+struct Args {
+ directory: Option<PathBuf>,
+ #[clap(short, long, value_enum, default_value_t = Speed::Fast)]
+ speed: Speed,
+ #[clap(short, long, value_enum, default_value_t = Filter::Adaptive)]
+ filter: Filter,
+}
+
+#[inline(never)]
+fn run_encode(
+ args: &Args,
+ dimensions: (u32, u32),
+ color_type: png::ColorType,
+ bit_depth: png::BitDepth,
+ image: &[u8],
+) -> Vec<u8> {
+ let mut reencoded = Vec::new();
+ let mut encoder = png::Encoder::new(&mut reencoded, dimensions.0, dimensions.1);
+ encoder.set_color(color_type);
+ encoder.set_depth(bit_depth);
+ encoder.set_compression(match args.speed {
+ Speed::Fast => png::Compression::Fast,
+ Speed::Default => png::Compression::Default,
+ Speed::Best => png::Compression::Best,
+ });
+ encoder.set_filter(match args.filter {
+ Filter::None => png::FilterType::NoFilter,
+ Filter::Sub => png::FilterType::Sub,
+ Filter::Up => png::FilterType::Up,
+ Filter::Average => png::FilterType::Avg,
+ Filter::Paeth => png::FilterType::Paeth,
+ Filter::Adaptive => png::FilterType::Paeth,
+ });
+ encoder.set_adaptive_filter(match args.filter {
+ Filter::Adaptive => png::AdaptiveFilterType::Adaptive,
+ _ => png::AdaptiveFilterType::NonAdaptive,
+ });
+ let mut encoder = encoder.write_header().unwrap();
+ encoder.write_image_data(&image).unwrap();
+ encoder.finish().unwrap();
+ reencoded
+}
+
+#[inline(never)]
+fn run_decode(image: &[u8], output: &mut [u8]) {
+ let mut reader = Decoder::new(image).read_info().unwrap();
+ reader.next_frame(output).unwrap();
+}
+
+fn main() {
+ let mut total_uncompressed = 0;
+ let mut total_compressed = 0;
+ let mut total_pixels = 0;
+ let mut total_encode_time = 0;
+ let mut total_decode_time = 0;
+
+ let args = Args::parse();
+
+ println!(
+ "{:45} Ratio Encode Decode",
+ "Directory"
+ );
+ println!(
+ "{:45}------- -------------------- --------------------",
+ "---------"
+ );
+
+ let mut image2 = Vec::new();
+
+ let mut pending = vec![args.directory.clone().unwrap_or(PathBuf::from("."))];
+ while let Some(directory) = pending.pop() {
+ let mut dir_uncompressed = 0;
+ let mut dir_compressed = 0;
+ let mut dir_pixels = 0;
+ let mut dir_encode_time = 0;
+ let mut dir_decode_time = 0;
+
+ for entry in fs::read_dir(&directory).unwrap().flatten() {
+ if entry.file_type().unwrap().is_dir() {
+ pending.push(entry.path());
+ continue;
+ }
+
+ match entry.path().extension() {
+ Some(st) if st == "png" => {}
+ _ => continue,
+ }
+
+ // Parse
+ let data = fs::read(entry.path()).unwrap();
+ let mut decoder = Decoder::new(&*data);
+ if decoder.read_header_info().ok().map(|h| h.color_type)
+ == Some(png::ColorType::Indexed)
+ {
+ decoder.set_transformations(
+ png::Transformations::EXPAND | png::Transformations::STRIP_16,
+ );
+ }
+ let mut reader = match decoder.read_info() {
+ Ok(reader) => reader,
+ Err(_) => continue,
+ };
+ let mut image = vec![0; reader.output_buffer_size()];
+ let info = match reader.next_frame(&mut image) {
+ Ok(info) => info,
+ Err(_) => continue,
+ };
+ let (width, height) = (info.width, info.height);
+ let bit_depth = info.bit_depth;
+ let mut color_type = info.color_type;
+
+ // qoibench expands grayscale to RGB, so we do the same.
+ if bit_depth == png::BitDepth::Eight {
+ if color_type == png::ColorType::Grayscale {
+ image = image.into_iter().flat_map(|v| [v, v, v, 255]).collect();
+ color_type = png::ColorType::Rgba;
+ } else if color_type == png::ColorType::GrayscaleAlpha {
+ image = image
+ .chunks_exact(2)
+ .flat_map(|v| [v[0], v[0], v[0], v[1]])
+ .collect();
+ color_type = png::ColorType::Rgba;
+ }
+ }
+
+ // Re-encode
+ let start = std::time::Instant::now();
+ let reencoded = run_encode(&args, (width, height), color_type, bit_depth, &image);
+ let elapsed = start.elapsed().as_nanos() as u64;
+
+ // And decode again
+ image2.resize(image.len(), 0);
+ let start2 = std::time::Instant::now();
+ run_decode(&reencoded, &mut image2);
+ let elapsed2 = start2.elapsed().as_nanos() as u64;
+
+ assert_eq!(image, image2);
+
+ // Stats
+ dir_uncompressed += image.len();
+ dir_compressed += reencoded.len();
+ dir_pixels += (width * height) as u64;
+ dir_encode_time += elapsed;
+ dir_decode_time += elapsed2;
+ }
+ if dir_uncompressed > 0 {
+ println!(
+ "{:45}{:6.2}%{:8} mps {:6.2} GiB/s {:8} mps {:6.2} GiB/s",
+ directory.display(),
+ 100.0 * dir_compressed as f64 / dir_uncompressed as f64,
+ dir_pixels * 1000 / dir_encode_time,
+ dir_uncompressed as f64 / (dir_encode_time as f64 * 1e-9 * (1 << 30) as f64),
+ dir_pixels * 1000 / dir_decode_time,
+ dir_uncompressed as f64 / (dir_decode_time as f64 * 1e-9 * (1 << 30) as f64)
+ );
+ }
+
+ total_uncompressed += dir_uncompressed;
+ total_compressed += dir_compressed;
+ total_pixels += dir_pixels;
+ total_encode_time += dir_encode_time;
+ total_decode_time += dir_decode_time;
+ }
+
+ println!();
+ println!(
+ "{:44}{:7.3}%{:8} mps {:6.3} GiB/s {:8} mps {:6.3} GiB/s",
+ "Total",
+ 100.0 * total_compressed as f64 / total_uncompressed as f64,
+ total_pixels * 1000 / total_encode_time,
+ total_uncompressed as f64 / (total_encode_time as f64 * 1e-9 * (1 << 30) as f64),
+ total_pixels * 1000 / total_decode_time,
+ total_uncompressed as f64 / (total_decode_time as f64 * 1e-9 * (1 << 30) as f64)
+ );
+}
diff --git a/vendor/png/examples/png-generate.rs b/vendor/png/examples/png-generate.rs
new file mode 100644
index 0000000..9036a04
--- /dev/null
+++ b/vendor/png/examples/png-generate.rs
@@ -0,0 +1,55 @@
+// For reading and opening files
+use png::text_metadata::{ITXtChunk, ZTXtChunk};
+use std::env;
+use std::fs::File;
+use std::io::BufWriter;
+
+fn main() {
+ let path = env::args()
+ .nth(1)
+ .expect("Expected a filename to output to.");
+ let file = File::create(path).unwrap();
+ let w = &mut BufWriter::new(file);
+
+ let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1.
+ encoder.set_color(png::ColorType::Rgba);
+ encoder.set_depth(png::BitDepth::Eight);
+ // Adding text chunks to the header
+ encoder
+ .add_text_chunk(
+ "Testing tEXt".to_string(),
+ "This is a tEXt chunk that will appear before the IDAT chunks.".to_string(),
+ )
+ .unwrap();
+ encoder
+ .add_ztxt_chunk(
+ "Testing zTXt".to_string(),
+ "This is a zTXt chunk that is compressed in the png file.".to_string(),
+ )
+ .unwrap();
+ encoder
+ .add_itxt_chunk(
+ "Testing iTXt".to_string(),
+ "iTXt chunks support all of UTF8. Example: हिंदी.".to_string(),
+ )
+ .unwrap();
+
+ let mut writer = encoder.write_header().unwrap();
+
+ let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black.
+ writer.write_image_data(&data).unwrap(); // Save
+
+ // We can add a tEXt/zTXt/iTXt at any point before the encoder is dropped from scope. These chunks will be at the end of the png file.
+ let tail_ztxt_chunk = ZTXtChunk::new(
+ "Comment".to_string(),
+ "A zTXt chunk after the image data.".to_string(),
+ );
+ writer.write_text_chunk(&tail_ztxt_chunk).unwrap();
+
+ // The fields of the text chunk are public, so they can be mutated before being written to the file.
+ let mut tail_itxt_chunk = ITXtChunk::new("Author".to_string(), "सायंतन खान".to_string());
+ tail_itxt_chunk.compressed = true;
+ tail_itxt_chunk.language_tag = "hi".to_string();
+ tail_itxt_chunk.translated_keyword = "लेखक".to_string();
+ writer.write_text_chunk(&tail_itxt_chunk).unwrap();
+}
diff --git a/vendor/png/examples/pngcheck.rs b/vendor/png/examples/pngcheck.rs
new file mode 100644
index 0000000..69e95e3
--- /dev/null
+++ b/vendor/png/examples/pngcheck.rs
@@ -0,0 +1,381 @@
+#![allow(non_upper_case_globals)]
+
+extern crate getopts;
+extern crate glob;
+extern crate png;
+
+use std::env;
+use std::fs::File;
+use std::io;
+use std::io::prelude::*;
+use std::path::Path;
+
+use getopts::{Matches, Options, ParsingStyle};
+use term::{color, Attr};
+
+fn parse_args() -> Matches {
+ let args: Vec<String> = env::args().collect();
+ let mut opts = Options::new();
+ opts.optflag("c", "", "colorize output (for ANSI terminals)")
+ .optflag("q", "", "test quietly (output only errors)")
+ .optflag(
+ "t",
+ "",
+ "print contents of tEXt/zTXt/iTXt chunks (can be used with -q)",
+ )
+ .optflag("v", "", "test verbosely (print most chunk data)")
+ .parsing_style(ParsingStyle::StopAtFirstFree);
+ if args.len() > 1 {
+ match opts.parse(&args[1..]) {
+ Ok(matches) => return matches,
+ Err(err) => println!("{}", err),
+ }
+ }
+ println!("{}", opts.usage("Usage: pngcheck [-cpt] [file ...]"));
+ std::process::exit(0);
+}
+
+#[derive(Clone, Copy)]
+struct Config {
+ quiet: bool,
+ verbose: bool,
+ color: bool,
+ text: bool,
+}
+
+fn display_interlaced(i: bool) -> &'static str {
+ if i {
+ "interlaced"
+ } else {
+ "non-interlaced"
+ }
+}
+
+fn display_image_type(bits: u8, color: png::ColorType) -> String {
+ use png::ColorType::*;
+ format!(
+ "{}-bit {}",
+ bits,
+ match color {
+ Grayscale => "grayscale",
+ Rgb => "RGB",
+ Indexed => "palette",
+ GrayscaleAlpha => "grayscale+alpha",
+ Rgba => "RGB+alpha",
+ }
+ )
+}
+// channels after expansion of tRNS
+fn final_channels(c: png::ColorType, trns: bool) -> u8 {
+ use png::ColorType::*;
+ match c {
+ Grayscale => 1 + u8::from(trns),
+ Rgb => 3,
+ Indexed => 3 + u8::from(trns),
+ GrayscaleAlpha => 2,
+ Rgba => 4,
+ }
+}
+fn check_image<P: AsRef<Path>>(c: Config, fname: P) -> io::Result<()> {
+ // TODO improve performance by resusing allocations from decoder
+ use png::Decoded::*;
+ let mut t = term::stdout()
+ .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "could not open terminal"))?;
+ let data = &mut vec![0; 10 * 1024][..];
+ let mut reader = io::BufReader::new(File::open(&fname)?);
+ let fname = fname.as_ref().to_string_lossy();
+ let n = reader.read(data)?;
+ let mut buf = &data[..n];
+ let mut pos = 0;
+ let mut decoder = png::StreamingDecoder::new();
+ // Image data
+ let mut width = 0;
+ let mut height = 0;
+ let mut color = png::ColorType::Grayscale;
+ let mut bits = 0;
+ let mut trns = false;
+ let mut interlaced = false;
+ let mut compressed_size = 0;
+ let mut n_chunks = 0;
+ let mut have_idat = false;
+ macro_rules! c_ratio(
+ // TODO add palette entries to compressed_size
+ () => ({
+ compressed_size as f32/(
+ height as u64 *
+ (width as u64 * final_channels(color, trns) as u64 * bits as u64 + 7)>>3
+ ) as f32
+ });
+ );
+ let display_error = |err| -> Result<_, io::Error> {
+ let mut t = term::stdout()
+ .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "could not open terminal"))?;
+ if c.verbose {
+ if c.color {
+ print!(": ");
+ t.fg(color::RED)?;
+ writeln!(t, "{}", err)?;
+ t.attr(Attr::Bold)?;
+ write!(t, "ERRORS DETECTED")?;
+ t.reset()?;
+ } else {
+ println!(": {}", err);
+ print!("ERRORS DETECTED")
+ }
+ println!(" in {}", fname);
+ } else {
+ if !c.quiet {
+ if c.color {
+ t.fg(color::RED)?;
+ t.attr(Attr::Bold)?;
+ write!(t, "ERROR")?;
+ t.reset()?;
+ write!(t, ": ")?;
+ t.fg(color::YELLOW)?;
+ writeln!(t, "{}", fname)?;
+ t.reset()?;
+ } else {
+ println!("ERROR: {}", fname)
+ }
+ }
+ print!("{}: ", fname);
+ if c.color {
+ t.fg(color::RED)?;
+ writeln!(t, "{}", err)?;
+ t.reset()?;
+ } else {
+ println!("{}", err);
+ }
+ }
+ Ok(())
+ };
+
+ if c.verbose {
+ print!("File: ");
+ if c.color {
+ t.attr(Attr::Bold)?;
+ write!(t, "{}", fname)?;
+ t.reset()?;
+ } else {
+ print!("{}", fname);
+ }
+ print!(" ({}) bytes", data.len())
+ }
+ loop {
+ if buf.is_empty() {
+ // circumvent borrow checker
+ assert!(!data.is_empty());
+ let n = reader.read(data)?;
+
+ // EOF
+ if n == 0 {
+ println!("ERROR: premature end of file {}", fname);
+ break;
+ }
+ buf = &data[..n];
+ }
+ match decoder.update(buf, &mut Vec::new()) {
+ Ok((_, ImageEnd)) => {
+ if !have_idat {
+ // This isn't beautiful. But it works.
+ display_error(png::DecodingError::IoError(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "IDAT chunk missing",
+ )))?;
+ break;
+ }
+ if !c.verbose && !c.quiet {
+ if c.color {
+ t.fg(color::GREEN)?;
+ t.attr(Attr::Bold)?;
+ write!(t, "OK")?;
+ t.reset()?;
+ write!(t, ": ")?;
+ t.fg(color::YELLOW)?;
+ write!(t, "{}", fname)?;
+ t.reset()?;
+ } else {
+ print!("OK: {}", fname)
+ }
+ println!(
+ " ({}x{}, {}{}, {}, {:.1}%)",
+ width,
+ height,
+ display_image_type(bits, color),
+ (if trns { "+trns" } else { "" }),
+ display_interlaced(interlaced),
+ 100.0 * (1.0 - c_ratio!())
+ )
+ } else if !c.quiet {
+ println!();
+ if c.color {
+ t.fg(color::GREEN)?;
+ t.attr(Attr::Bold)?;
+ write!(t, "No errors detected ")?;
+ t.reset()?;
+ } else {
+ print!("No errors detected ");
+ }
+ println!(
+ "in {} ({} chunks, {:.1}% compression)",
+ fname,
+ n_chunks,
+ 100.0 * (1.0 - c_ratio!()),
+ )
+ }
+ break;
+ }
+ Ok((n, res)) => {
+ buf = &buf[n..];
+ pos += n;
+ match res {
+ Header(w, h, b, c, i) => {
+ width = w;
+ height = h;
+ bits = b as u8;
+ color = c;
+ interlaced = i;
+ }
+ ChunkBegin(len, type_str) => {
+ use png::chunk;
+ n_chunks += 1;
+ if c.verbose {
+ let chunk = type_str;
+ println!();
+ print!(" chunk ");
+ if c.color {
+ t.fg(color::YELLOW)?;
+ write!(t, "{:?}", chunk)?;
+ t.reset()?;
+ } else {
+ print!("{:?}", chunk)
+ }
+ print!(
+ " at offset {:#07x}, length {}",
+ pos - 4, // substract chunk name length
+ len
+ )
+ }
+ match type_str {
+ chunk::IDAT => {
+ have_idat = true;
+ compressed_size += len
+ }
+ chunk::tRNS => {
+ trns = true;
+ }
+ _ => (),
+ }
+ }
+ ImageData => {
+ //println!("got {} bytes of image data", data.len())
+ }
+ ChunkComplete(_, type_str) if c.verbose => {
+ use png::chunk::*;
+ if type_str == IHDR {
+ println!();
+ print!(
+ " {} x {} image, {}{}, {}",
+ width,
+ height,
+ display_image_type(bits, color),
+ (if trns { "+trns" } else { "" }),
+ display_interlaced(interlaced),
+ );
+ }
+ }
+ AnimationControl(actl) => {
+ println!();
+ print!(" {} frames, {} plays", actl.num_frames, actl.num_plays,);
+ }
+ FrameControl(fctl) => {
+ println!();
+ println!(
+ " sequence #{}, {} x {} pixels @ ({}, {})",
+ fctl.sequence_number,
+ fctl.width,
+ fctl.height,
+ fctl.x_offset,
+ fctl.y_offset,
+ /*fctl.delay_num,
+ fctl.delay_den,
+ fctl.dispose_op,
+ fctl.blend_op,*/
+ );
+ print!(
+ " {}/{} s delay, dispose: {}, blend: {}",
+ fctl.delay_num,
+ if fctl.delay_den == 0 {
+ 100
+ } else {
+ fctl.delay_den
+ },
+ fctl.dispose_op,
+ fctl.blend_op,
+ );
+ }
+ _ => (),
+ }
+ //println!("{} {:?}", n, res)
+ }
+ Err(err) => {
+ let _ = display_error(err);
+ break;
+ }
+ }
+ }
+ if c.text {
+ println!("Parsed tEXt chunks:");
+ for text_chunk in &decoder.info().unwrap().uncompressed_latin1_text {
+ println!("{:#?}", text_chunk);
+ }
+
+ println!("Parsed zTXt chunks:");
+ for text_chunk in &decoder.info().unwrap().compressed_latin1_text {
+ let mut cloned_text_chunk = text_chunk.clone();
+ cloned_text_chunk.decompress_text()?;
+ println!("{:#?}", cloned_text_chunk);
+ }
+
+ println!("Parsed iTXt chunks:");
+ for text_chunk in &decoder.info().unwrap().utf8_text {
+ let mut cloned_text_chunk = text_chunk.clone();
+ cloned_text_chunk.decompress_text()?;
+ println!("{:#?}", cloned_text_chunk);
+ }
+ }
+
+ Ok(())
+}
+
+fn main() {
+ let m = parse_args();
+
+ let config = Config {
+ quiet: m.opt_present("q"),
+ verbose: m.opt_present("v"),
+ color: m.opt_present("c"),
+ text: m.opt_present("t"),
+ };
+
+ for file in m.free {
+ let result = if file.contains('*') {
+ glob::glob(&file)
+ .map_err(|err| io::Error::new(io::ErrorKind::Other, err))
+ .and_then(|mut glob| {
+ glob.try_for_each(|entry| {
+ entry
+ .map_err(|err| io::Error::new(io::ErrorKind::Other, err))
+ .and_then(|file| check_image(config, file))
+ })
+ })
+ } else {
+ check_image(config, &file)
+ };
+
+ result.unwrap_or_else(|err| {
+ println!("{}: {}", file, err);
+ std::process::exit(1)
+ });
+ }
+}
diff --git a/vendor/png/examples/show.rs b/vendor/png/examples/show.rs
new file mode 100644
index 0000000..d8ddf75
--- /dev/null
+++ b/vendor/png/examples/show.rs
@@ -0,0 +1,198 @@
+use glium::{
+ backend::glutin::Display,
+ glutin::{
+ self, dpi,
+ event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent},
+ event_loop::ControlFlow,
+ },
+ texture::{ClientFormat, RawImage2d},
+ BlitTarget, Rect, Surface,
+};
+use std::{borrow::Cow, env, fs::File, io, path};
+
+/// Load the image using `png`
+fn load_image(path: &path::PathBuf) -> io::Result<RawImage2d<'static, u8>> {
+ use png::ColorType::*;
+ let mut decoder = png::Decoder::new(File::open(path)?);
+ decoder.set_transformations(png::Transformations::normalize_to_color8());
+ let mut reader = decoder.read_info()?;
+ let mut img_data = vec![0; reader.output_buffer_size()];
+ let info = reader.next_frame(&mut img_data)?;
+
+ let (data, format) = match info.color_type {
+ Rgb => (img_data, ClientFormat::U8U8U8),
+ Rgba => (img_data, ClientFormat::U8U8U8U8),
+ Grayscale => (
+ {
+ let mut vec = Vec::with_capacity(img_data.len() * 3);
+ for g in img_data {
+ vec.extend([g, g, g].iter().cloned())
+ }
+ vec
+ },
+ ClientFormat::U8U8U8,
+ ),
+ GrayscaleAlpha => (
+ {
+ let mut vec = Vec::with_capacity(img_data.len() * 3);
+ for ga in img_data.chunks(2) {
+ let g = ga[0];
+ let a = ga[1];
+ vec.extend([g, g, g, a].iter().cloned())
+ }
+ vec
+ },
+ ClientFormat::U8U8U8U8,
+ ),
+ _ => unreachable!("uncovered color type"),
+ };
+
+ Ok(RawImage2d {
+ data: Cow::Owned(data),
+ width: info.width,
+ height: info.height,
+ format,
+ })
+}
+
+fn main_loop(files: Vec<path::PathBuf>) -> io::Result<()> {
+ let mut files = files.into_iter();
+ let image = load_image(&files.next().unwrap())?;
+
+ let event_loop = glutin::event_loop::EventLoop::new();
+ let window_builder = glutin::window::WindowBuilder::new().with_title("Show Example");
+ let context_builder = glutin::ContextBuilder::new().with_vsync(true);
+ let display = glium::Display::new(window_builder, context_builder, &event_loop)
+ .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
+ resize_window(&display, &image);
+ let mut texture = glium::Texture2d::new(&display, image).unwrap();
+ draw(&display, &texture);
+
+ event_loop.run(move |event, _, control_flow| match event {
+ Event::WindowEvent {
+ event: WindowEvent::CloseRequested,
+ ..
+ } => exit(control_flow),
+ Event::WindowEvent {
+ event:
+ WindowEvent::KeyboardInput {
+ input:
+ KeyboardInput {
+ state: ElementState::Pressed,
+ virtual_keycode: code,
+ ..
+ },
+ ..
+ },
+ ..
+ } => match code {
+ Some(VirtualKeyCode::Escape) => exit(control_flow),
+ Some(VirtualKeyCode::Right) => match &files.next() {
+ Some(path) => {
+ match load_image(path) {
+ Ok(image) => {
+ resize_window(&display, &image);
+ texture = glium::Texture2d::new(&display, image).unwrap();
+ draw(&display, &texture);
+ }
+ Err(err) => {
+ println!("Error: {}", err);
+ exit(control_flow);
+ }
+ };
+ }
+ None => exit(control_flow),
+ },
+ _ => {}
+ },
+ Event::RedrawRequested(_) => draw(&display, &texture),
+ _ => {}
+ });
+}
+
+fn draw(display: &glium::Display, texture: &glium::Texture2d) {
+ let frame = display.draw();
+ fill_v_flipped(
+ &texture.as_surface(),
+ &frame,
+ glium::uniforms::MagnifySamplerFilter::Linear,
+ );
+ frame.finish().unwrap();
+}
+
+fn exit(control_flow: &mut ControlFlow) {
+ *control_flow = ControlFlow::Exit;
+}
+
+fn fill_v_flipped<S1, S2>(src: &S1, target: &S2, filter: glium::uniforms::MagnifySamplerFilter)
+where
+ S1: Surface,
+ S2: Surface,
+{
+ let src_dim = src.get_dimensions();
+ let src_rect = Rect {
+ left: 0,
+ bottom: 0,
+ width: src_dim.0 as u32,
+ height: src_dim.1 as u32,
+ };
+ let target_dim = target.get_dimensions();
+ let target_rect = BlitTarget {
+ left: 0,
+ bottom: target_dim.1,
+ width: target_dim.0 as i32,
+ height: -(target_dim.1 as i32),
+ };
+ src.blit_color(&src_rect, target, &target_rect, filter);
+}
+
+fn resize_window(display: &Display, image: &RawImage2d<'static, u8>) {
+ let mut width = image.width;
+ let mut height = image.height;
+ if width < 50 && height < 50 {
+ width *= 10;
+ height *= 10;
+ }
+ display
+ .gl_window()
+ .window()
+ .set_inner_size(dpi::LogicalSize::new(f64::from(width), f64::from(height)));
+}
+
+fn main() {
+ let args: Vec<String> = env::args().collect();
+ if args.len() < 2 {
+ println!("Usage: show files [...]");
+ } else {
+ let mut files = vec![];
+ for file in args.iter().skip(1) {
+ match if file.contains('*') {
+ (|| -> io::Result<_> {
+ for entry in glob::glob(file)
+ .map_err(|err| io::Error::new(io::ErrorKind::Other, err.msg))?
+ {
+ files.push(
+ entry
+ .map_err(|_| io::Error::new(io::ErrorKind::Other, "glob error"))?,
+ )
+ }
+ Ok(())
+ })()
+ } else {
+ files.push(path::PathBuf::from(file));
+ Ok(())
+ } {
+ Ok(_) => (),
+ Err(err) => {
+ println!("{}: {}", file, err);
+ break;
+ }
+ }
+ }
+ // "tests/pngsuite/pngsuite.png"
+ match main_loop(files) {
+ Ok(_) => (),
+ Err(err) => println!("Error: {}", err),
+ }
+ }
+}
diff --git a/vendor/png/src/chunk.rs b/vendor/png/src/chunk.rs
new file mode 100644
index 0000000..39578a4
--- /dev/null
+++ b/vendor/png/src/chunk.rs
@@ -0,0 +1,98 @@
+//! Chunk types and functions
+#![allow(dead_code)]
+#![allow(non_upper_case_globals)]
+use core::fmt;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct ChunkType(pub [u8; 4]);
+
+// -- Critical chunks --
+
+/// Image header
+pub const IHDR: ChunkType = ChunkType(*b"IHDR");
+/// Palette
+pub const PLTE: ChunkType = ChunkType(*b"PLTE");
+/// Image data
+pub const IDAT: ChunkType = ChunkType(*b"IDAT");
+/// Image trailer
+pub const IEND: ChunkType = ChunkType(*b"IEND");
+
+// -- Ancillary chunks --
+
+/// Transparency
+pub const tRNS: ChunkType = ChunkType(*b"tRNS");
+/// Background colour
+pub const bKGD: ChunkType = ChunkType(*b"bKGD");
+/// Image last-modification time
+pub const tIME: ChunkType = ChunkType(*b"tIME");
+/// Physical pixel dimensions
+pub const pHYs: ChunkType = ChunkType(*b"pHYs");
+/// Source system's pixel chromaticities
+pub const cHRM: ChunkType = ChunkType(*b"cHRM");
+/// Source system's gamma value
+pub const gAMA: ChunkType = ChunkType(*b"gAMA");
+/// sRGB color space chunk
+pub const sRGB: ChunkType = ChunkType(*b"sRGB");
+/// ICC profile chunk
+pub const iCCP: ChunkType = ChunkType(*b"iCCP");
+/// Latin-1 uncompressed textual data
+pub const tEXt: ChunkType = ChunkType(*b"tEXt");
+/// Latin-1 compressed textual data
+pub const zTXt: ChunkType = ChunkType(*b"zTXt");
+/// UTF-8 textual data
+pub const iTXt: ChunkType = ChunkType(*b"iTXt");
+
+// -- Extension chunks --
+
+/// Animation control
+pub const acTL: ChunkType = ChunkType(*b"acTL");
+/// Frame control
+pub const fcTL: ChunkType = ChunkType(*b"fcTL");
+/// Frame data
+pub const fdAT: ChunkType = ChunkType(*b"fdAT");
+
+// -- Chunk type determination --
+
+/// Returns true if the chunk is critical.
+pub fn is_critical(ChunkType(type_): ChunkType) -> bool {
+ type_[0] & 32 == 0
+}
+
+/// Returns true if the chunk is private.
+pub fn is_private(ChunkType(type_): ChunkType) -> bool {
+ type_[1] & 32 != 0
+}
+
+/// Checks whether the reserved bit of the chunk name is set.
+/// If it is set the chunk name is invalid.
+pub fn reserved_set(ChunkType(type_): ChunkType) -> bool {
+ type_[2] & 32 != 0
+}
+
+/// Returns true if the chunk is safe to copy if unknown.
+pub fn safe_to_copy(ChunkType(type_): ChunkType) -> bool {
+ type_[3] & 32 != 0
+}
+
+impl fmt::Debug for ChunkType {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ struct DebugType([u8; 4]);
+
+ impl fmt::Debug for DebugType {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ for &c in &self.0[..] {
+ write!(f, "{}", char::from(c).escape_debug())?;
+ }
+ Ok(())
+ }
+ }
+
+ f.debug_struct("ChunkType")
+ .field("type", &DebugType(self.0))
+ .field("critical", &is_critical(*self))
+ .field("private", &is_private(*self))
+ .field("reserved", &reserved_set(*self))
+ .field("safecopy", &safe_to_copy(*self))
+ .finish()
+ }
+}
diff --git a/vendor/png/src/common.rs b/vendor/png/src/common.rs
new file mode 100644
index 0000000..6e5dbff
--- /dev/null
+++ b/vendor/png/src/common.rs
@@ -0,0 +1,808 @@
+//! Common types shared between the encoder and decoder
+use crate::text_metadata::{EncodableTextChunk, ITXtChunk, TEXtChunk, ZTXtChunk};
+use crate::{chunk, encoder};
+use io::Write;
+use std::{borrow::Cow, convert::TryFrom, fmt, io};
+
+/// Describes how a pixel is encoded.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[repr(u8)]
+pub enum ColorType {
+ /// 1 grayscale sample.
+ Grayscale = 0,
+ /// 1 red sample, 1 green sample, 1 blue sample.
+ Rgb = 2,
+ /// 1 sample for the palette index.
+ Indexed = 3,
+ /// 1 grayscale sample, then 1 alpha sample.
+ GrayscaleAlpha = 4,
+ /// 1 red sample, 1 green sample, 1 blue sample, and finally, 1 alpha sample.
+ Rgba = 6,
+}
+
+impl ColorType {
+ /// Returns the number of samples used per pixel encoded in this way.
+ pub fn samples(self) -> usize {
+ self.samples_u8().into()
+ }
+
+ pub(crate) fn samples_u8(self) -> u8 {
+ use self::ColorType::*;
+ match self {
+ Grayscale | Indexed => 1,
+ Rgb => 3,
+ GrayscaleAlpha => 2,
+ Rgba => 4,
+ }
+ }
+
+ /// u8 -> Self. Temporary solution until Rust provides a canonical one.
+ pub fn from_u8(n: u8) -> Option<ColorType> {
+ match n {
+ 0 => Some(ColorType::Grayscale),
+ 2 => Some(ColorType::Rgb),
+ 3 => Some(ColorType::Indexed),
+ 4 => Some(ColorType::GrayscaleAlpha),
+ 6 => Some(ColorType::Rgba),
+ _ => None,
+ }
+ }
+
+ pub(crate) fn checked_raw_row_length(self, depth: BitDepth, width: u32) -> Option<usize> {
+ // No overflow can occur in 64 bits, we multiply 32-bit with 5 more bits.
+ let bits = u64::from(width) * u64::from(self.samples_u8()) * u64::from(depth.into_u8());
+ TryFrom::try_from(1 + (bits + 7) / 8).ok()
+ }
+
+ pub(crate) fn raw_row_length_from_width(self, depth: BitDepth, width: u32) -> usize {
+ let samples = width as usize * self.samples();
+ 1 + match depth {
+ BitDepth::Sixteen => samples * 2,
+ BitDepth::Eight => samples,
+ subbyte => {
+ let samples_per_byte = 8 / subbyte as usize;
+ let whole = samples / samples_per_byte;
+ let fract = usize::from(samples % samples_per_byte > 0);
+ whole + fract
+ }
+ }
+ }
+
+ pub(crate) fn is_combination_invalid(self, bit_depth: BitDepth) -> bool {
+ // Section 11.2.2 of the PNG standard disallows several combinations
+ // of bit depth and color type
+ ((bit_depth == BitDepth::One || bit_depth == BitDepth::Two || bit_depth == BitDepth::Four)
+ && (self == ColorType::Rgb
+ || self == ColorType::GrayscaleAlpha
+ || self == ColorType::Rgba))
+ || (bit_depth == BitDepth::Sixteen && self == ColorType::Indexed)
+ }
+}
+
+/// Bit depth of the PNG file.
+/// Specifies the number of bits per sample.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[repr(u8)]
+pub enum BitDepth {
+ One = 1,
+ Two = 2,
+ Four = 4,
+ Eight = 8,
+ Sixteen = 16,
+}
+
+/// Internal count of bytes per pixel.
+/// This is used for filtering which never uses sub-byte units. This essentially reduces the number
+/// of possible byte chunk lengths to a very small set of values appropriate to be defined as an
+/// enum.
+#[derive(Debug, Clone, Copy)]
+#[repr(u8)]
+pub(crate) enum BytesPerPixel {
+ One = 1,
+ Two = 2,
+ Three = 3,
+ Four = 4,
+ Six = 6,
+ Eight = 8,
+}
+
+impl BitDepth {
+ /// u8 -> Self. Temporary solution until Rust provides a canonical one.
+ pub fn from_u8(n: u8) -> Option<BitDepth> {
+ match n {
+ 1 => Some(BitDepth::One),
+ 2 => Some(BitDepth::Two),
+ 4 => Some(BitDepth::Four),
+ 8 => Some(BitDepth::Eight),
+ 16 => Some(BitDepth::Sixteen),
+ _ => None,
+ }
+ }
+
+ pub(crate) fn into_u8(self) -> u8 {
+ self as u8
+ }
+}
+
+/// Pixel dimensions information
+#[derive(Clone, Copy, Debug)]
+pub struct PixelDimensions {
+ /// Pixels per unit, X axis
+ pub xppu: u32,
+ /// Pixels per unit, Y axis
+ pub yppu: u32,
+ /// Either *Meter* or *Unspecified*
+ pub unit: Unit,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[repr(u8)]
+/// Physical unit of the pixel dimensions
+pub enum Unit {
+ Unspecified = 0,
+ Meter = 1,
+}
+
+impl Unit {
+ /// u8 -> Self. Temporary solution until Rust provides a canonical one.
+ pub fn from_u8(n: u8) -> Option<Unit> {
+ match n {
+ 0 => Some(Unit::Unspecified),
+ 1 => Some(Unit::Meter),
+ _ => None,
+ }
+ }
+}
+
+/// How to reset buffer of an animated png (APNG) at the end of a frame.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[repr(u8)]
+pub enum DisposeOp {
+ /// Leave the buffer unchanged.
+ None = 0,
+ /// Clear buffer with the background color.
+ Background = 1,
+ /// Reset the buffer to the state before the current frame.
+ Previous = 2,
+}
+
+impl DisposeOp {
+ /// u8 -> Self. Using enum_primitive or transmute is probably the right thing but this will do for now.
+ pub fn from_u8(n: u8) -> Option<DisposeOp> {
+ match n {
+ 0 => Some(DisposeOp::None),
+ 1 => Some(DisposeOp::Background),
+ 2 => Some(DisposeOp::Previous),
+ _ => None,
+ }
+ }
+}
+
+impl fmt::Display for DisposeOp {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let name = match *self {
+ DisposeOp::None => "DISPOSE_OP_NONE",
+ DisposeOp::Background => "DISPOSE_OP_BACKGROUND",
+ DisposeOp::Previous => "DISPOSE_OP_PREVIOUS",
+ };
+ write!(f, "{}", name)
+ }
+}
+
+/// How pixels are written into the buffer.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[repr(u8)]
+pub enum BlendOp {
+ /// Pixels overwrite the value at their position.
+ Source = 0,
+ /// The new pixels are blended into the current state based on alpha.
+ Over = 1,
+}
+
+impl BlendOp {
+ /// u8 -> Self. Using enum_primitive or transmute is probably the right thing but this will do for now.
+ pub fn from_u8(n: u8) -> Option<BlendOp> {
+ match n {
+ 0 => Some(BlendOp::Source),
+ 1 => Some(BlendOp::Over),
+ _ => None,
+ }
+ }
+}
+
+impl fmt::Display for BlendOp {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let name = match *self {
+ BlendOp::Source => "BLEND_OP_SOURCE",
+ BlendOp::Over => "BLEND_OP_OVER",
+ };
+ write!(f, "{}", name)
+ }
+}
+
+/// Frame control information
+#[derive(Clone, Copy, Debug)]
+pub struct FrameControl {
+ /// Sequence number of the animation chunk, starting from 0
+ pub sequence_number: u32,
+ /// Width of the following frame
+ pub width: u32,
+ /// Height of the following frame
+ pub height: u32,
+ /// X position at which to render the following frame
+ pub x_offset: u32,
+ /// Y position at which to render the following frame
+ pub y_offset: u32,
+ /// Frame delay fraction numerator
+ pub delay_num: u16,
+ /// Frame delay fraction denominator
+ pub delay_den: u16,
+ /// Type of frame area disposal to be done after rendering this frame
+ pub dispose_op: DisposeOp,
+ /// Type of frame area rendering for this frame
+ pub blend_op: BlendOp,
+}
+
+impl Default for FrameControl {
+ fn default() -> FrameControl {
+ FrameControl {
+ sequence_number: 0,
+ width: 0,
+ height: 0,
+ x_offset: 0,
+ y_offset: 0,
+ delay_num: 1,
+ delay_den: 30,
+ dispose_op: DisposeOp::None,
+ blend_op: BlendOp::Source,
+ }
+ }
+}
+
+impl FrameControl {
+ pub fn set_seq_num(&mut self, s: u32) {
+ self.sequence_number = s;
+ }
+
+ pub fn inc_seq_num(&mut self, i: u32) {
+ self.sequence_number += i;
+ }
+
+ pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
+ let mut data = [0u8; 26];
+ data[..4].copy_from_slice(&self.sequence_number.to_be_bytes());
+ data[4..8].copy_from_slice(&self.width.to_be_bytes());
+ data[8..12].copy_from_slice(&self.height.to_be_bytes());
+ data[12..16].copy_from_slice(&self.x_offset.to_be_bytes());
+ data[16..20].copy_from_slice(&self.y_offset.to_be_bytes());
+ data[20..22].copy_from_slice(&self.delay_num.to_be_bytes());
+ data[22..24].copy_from_slice(&self.delay_den.to_be_bytes());
+ data[24] = self.dispose_op as u8;
+ data[25] = self.blend_op as u8;
+
+ encoder::write_chunk(w, chunk::fcTL, &data)
+ }
+}
+
+/// Animation control information
+#[derive(Clone, Copy, Debug)]
+pub struct AnimationControl {
+ /// Number of frames
+ pub num_frames: u32,
+ /// Number of times to loop this APNG. 0 indicates infinite looping.
+ pub num_plays: u32,
+}
+
+impl AnimationControl {
+ pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
+ let mut data = [0; 8];
+ data[..4].copy_from_slice(&self.num_frames.to_be_bytes());
+ data[4..].copy_from_slice(&self.num_plays.to_be_bytes());
+ encoder::write_chunk(w, chunk::acTL, &data)
+ }
+}
+
+/// The type and strength of applied compression.
+#[derive(Debug, Clone, Copy)]
+pub enum Compression {
+ /// Default level
+ Default,
+ /// Fast minimal compression
+ Fast,
+ /// Higher compression level
+ ///
+ /// Best in this context isn't actually the highest possible level
+ /// the encoder can do, but is meant to emulate the `Best` setting in the `Flate2`
+ /// library.
+ Best,
+ #[deprecated(
+ since = "0.17.6",
+ note = "use one of the other compression levels instead, such as 'fast'"
+ )]
+ Huffman,
+ #[deprecated(
+ since = "0.17.6",
+ note = "use one of the other compression levels instead, such as 'fast'"
+ )]
+ Rle,
+}
+
+impl Default for Compression {
+ fn default() -> Self {
+ Self::Default
+ }
+}
+
+/// An unsigned integer scaled version of a floating point value,
+/// equivalent to an integer quotient with fixed denominator (100_000)).
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub struct ScaledFloat(u32);
+
+impl ScaledFloat {
+ const SCALING: f32 = 100_000.0;
+
+ /// Gets whether the value is within the clamped range of this type.
+ pub fn in_range(value: f32) -> bool {
+ value >= 0.0 && (value * Self::SCALING).floor() <= std::u32::MAX as f32
+ }
+
+ /// Gets whether the value can be exactly converted in round-trip.
+ #[allow(clippy::float_cmp)] // Stupid tool, the exact float compare is _the entire point_.
+ pub fn exact(value: f32) -> bool {
+ let there = Self::forward(value);
+ let back = Self::reverse(there);
+ value == back
+ }
+
+ fn forward(value: f32) -> u32 {
+ (value.max(0.0) * Self::SCALING).floor() as u32
+ }
+
+ fn reverse(encoded: u32) -> f32 {
+ encoded as f32 / Self::SCALING
+ }
+
+ /// Slightly inaccurate scaling and quantization.
+ /// Clamps the value into the representable range if it is negative or too large.
+ pub fn new(value: f32) -> Self {
+ Self(Self::forward(value))
+ }
+
+ /// Fully accurate construction from a value scaled as per specification.
+ pub fn from_scaled(val: u32) -> Self {
+ Self(val)
+ }
+
+ /// Get the accurate encoded value.
+ pub fn into_scaled(self) -> u32 {
+ self.0
+ }
+
+ /// Get the unscaled value as a floating point.
+ pub fn into_value(self) -> f32 {
+ Self::reverse(self.0)
+ }
+
+ pub(crate) fn encode_gama<W: Write>(self, w: &mut W) -> encoder::Result<()> {
+ encoder::write_chunk(w, chunk::gAMA, &self.into_scaled().to_be_bytes())
+ }
+}
+
+/// Chromaticities of the color space primaries
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub struct SourceChromaticities {
+ pub white: (ScaledFloat, ScaledFloat),
+ pub red: (ScaledFloat, ScaledFloat),
+ pub green: (ScaledFloat, ScaledFloat),
+ pub blue: (ScaledFloat, ScaledFloat),
+}
+
+impl SourceChromaticities {
+ pub fn new(white: (f32, f32), red: (f32, f32), green: (f32, f32), blue: (f32, f32)) -> Self {
+ SourceChromaticities {
+ white: (ScaledFloat::new(white.0), ScaledFloat::new(white.1)),
+ red: (ScaledFloat::new(red.0), ScaledFloat::new(red.1)),
+ green: (ScaledFloat::new(green.0), ScaledFloat::new(green.1)),
+ blue: (ScaledFloat::new(blue.0), ScaledFloat::new(blue.1)),
+ }
+ }
+
+ #[rustfmt::skip]
+ pub fn to_be_bytes(self) -> [u8; 32] {
+ let white_x = self.white.0.into_scaled().to_be_bytes();
+ let white_y = self.white.1.into_scaled().to_be_bytes();
+ let red_x = self.red.0.into_scaled().to_be_bytes();
+ let red_y = self.red.1.into_scaled().to_be_bytes();
+ let green_x = self.green.0.into_scaled().to_be_bytes();
+ let green_y = self.green.1.into_scaled().to_be_bytes();
+ let blue_x = self.blue.0.into_scaled().to_be_bytes();
+ let blue_y = self.blue.1.into_scaled().to_be_bytes();
+ [
+ white_x[0], white_x[1], white_x[2], white_x[3],
+ white_y[0], white_y[1], white_y[2], white_y[3],
+ red_x[0], red_x[1], red_x[2], red_x[3],
+ red_y[0], red_y[1], red_y[2], red_y[3],
+ green_x[0], green_x[1], green_x[2], green_x[3],
+ green_y[0], green_y[1], green_y[2], green_y[3],
+ blue_x[0], blue_x[1], blue_x[2], blue_x[3],
+ blue_y[0], blue_y[1], blue_y[2], blue_y[3],
+ ]
+ }
+
+ pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
+ encoder::write_chunk(w, chunk::cHRM, &self.to_be_bytes())
+ }
+}
+
+/// The rendering intent for an sRGB image.
+///
+/// Presence of this data also indicates that the image conforms to the sRGB color space.
+#[repr(u8)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum SrgbRenderingIntent {
+ /// For images preferring good adaptation to the output device gamut at the expense of colorimetric accuracy, such as photographs.
+ Perceptual = 0,
+ /// For images requiring colour appearance matching (relative to the output device white point), such as logos.
+ RelativeColorimetric = 1,
+ /// For images preferring preservation of saturation at the expense of hue and lightness, such as charts and graphs.
+ Saturation = 2,
+ /// For images requiring preservation of absolute colorimetry, such as previews of images destined for a different output device (proofs).
+ AbsoluteColorimetric = 3,
+}
+
+impl SrgbRenderingIntent {
+ pub(crate) fn into_raw(self) -> u8 {
+ self as u8
+ }
+
+ pub(crate) fn from_raw(raw: u8) -> Option<Self> {
+ match raw {
+ 0 => Some(SrgbRenderingIntent::Perceptual),
+ 1 => Some(SrgbRenderingIntent::RelativeColorimetric),
+ 2 => Some(SrgbRenderingIntent::Saturation),
+ 3 => Some(SrgbRenderingIntent::AbsoluteColorimetric),
+ _ => None,
+ }
+ }
+
+ pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
+ encoder::write_chunk(w, chunk::sRGB, &[self.into_raw()])
+ }
+}
+
+/// PNG info struct
+#[derive(Clone, Debug)]
+#[non_exhaustive]
+pub struct Info<'a> {
+ pub width: u32,
+ pub height: u32,
+ pub bit_depth: BitDepth,
+ /// How colors are stored in the image.
+ pub color_type: ColorType,
+ pub interlaced: bool,
+ /// The image's `tRNS` chunk, if present; contains the alpha channel of the image's palette, 1 byte per entry.
+ pub trns: Option<Cow<'a, [u8]>>,
+ pub pixel_dims: Option<PixelDimensions>,
+ /// The image's `PLTE` chunk, if present; contains the RGB channels (in that order) of the image's palettes, 3 bytes per entry (1 per channel).
+ pub palette: Option<Cow<'a, [u8]>>,
+ /// The contents of the image's gAMA chunk, if present.
+ /// Prefer `source_gamma` to also get the derived replacement gamma from sRGB chunks.
+ pub gama_chunk: Option<ScaledFloat>,
+ /// The contents of the image's `cHRM` chunk, if present.
+ /// Prefer `source_chromaticities` to also get the derived replacements from sRGB chunks.
+ pub chrm_chunk: Option<SourceChromaticities>,
+
+ pub frame_control: Option<FrameControl>,
+ pub animation_control: Option<AnimationControl>,
+ pub compression: Compression,
+ /// Gamma of the source system.
+ /// Set by both `gAMA` as well as to a replacement by `sRGB` chunk.
+ pub source_gamma: Option<ScaledFloat>,
+ /// Chromaticities of the source system.
+ /// Set by both `cHRM` as well as to a replacement by `sRGB` chunk.
+ pub source_chromaticities: Option<SourceChromaticities>,
+ /// The rendering intent of an SRGB image.
+ ///
+ /// Presence of this value also indicates that the image conforms to the SRGB color space.
+ pub srgb: Option<SrgbRenderingIntent>,
+ /// The ICC profile for the image.
+ pub icc_profile: Option<Cow<'a, [u8]>>,
+ /// tEXt field
+ pub uncompressed_latin1_text: Vec<TEXtChunk>,
+ /// zTXt field
+ pub compressed_latin1_text: Vec<ZTXtChunk>,
+ /// iTXt field
+ pub utf8_text: Vec<ITXtChunk>,
+}
+
+impl Default for Info<'_> {
+ fn default() -> Info<'static> {
+ Info {
+ width: 0,
+ height: 0,
+ bit_depth: BitDepth::Eight,
+ color_type: ColorType::Grayscale,
+ interlaced: false,
+ palette: None,
+ trns: None,
+ gama_chunk: None,
+ chrm_chunk: None,
+ pixel_dims: None,
+ frame_control: None,
+ animation_control: None,
+ // Default to `deflate::Compression::Fast` and `filter::FilterType::Sub`
+ // to maintain backward compatible output.
+ compression: Compression::Fast,
+ source_gamma: None,
+ source_chromaticities: None,
+ srgb: None,
+ icc_profile: None,
+ uncompressed_latin1_text: Vec::new(),
+ compressed_latin1_text: Vec::new(),
+ utf8_text: Vec::new(),
+ }
+ }
+}
+
+impl Info<'_> {
+ /// A utility constructor for a default info with width and height.
+ pub fn with_size(width: u32, height: u32) -> Self {
+ Info {
+ width,
+ height,
+ ..Default::default()
+ }
+ }
+
+ /// Size of the image, width then height.
+ pub fn size(&self) -> (u32, u32) {
+ (self.width, self.height)
+ }
+
+ /// Returns true if the image is an APNG image.
+ pub fn is_animated(&self) -> bool {
+ self.frame_control.is_some() && self.animation_control.is_some()
+ }
+
+ /// Returns the frame control information of the image.
+ pub fn animation_control(&self) -> Option<&AnimationControl> {
+ self.animation_control.as_ref()
+ }
+
+ /// Returns the frame control information of the current frame
+ pub fn frame_control(&self) -> Option<&FrameControl> {
+ self.frame_control.as_ref()
+ }
+
+ /// Returns the number of bits per pixel.
+ pub fn bits_per_pixel(&self) -> usize {
+ self.color_type.samples() * self.bit_depth as usize
+ }
+
+ /// Returns the number of bytes per pixel.
+ pub fn bytes_per_pixel(&self) -> usize {
+ // If adjusting this for expansion or other transformation passes, remember to keep the old
+ // implementation for bpp_in_prediction, which is internal to the png specification.
+ self.color_type.samples() * ((self.bit_depth as usize + 7) >> 3)
+ }
+
+ /// Return the number of bytes for this pixel used in prediction.
+ ///
+ /// Some filters use prediction, over the raw bytes of a scanline. Where a previous pixel is
+ /// require for such forms the specification instead references previous bytes. That is, for
+ /// a gray pixel of bit depth 2, the pixel used in prediction is actually 4 pixels prior. This
+ /// has the consequence that the number of possible values is rather small. To make this fact
+ /// more obvious in the type system and the optimizer we use an explicit enum here.
+ pub(crate) fn bpp_in_prediction(&self) -> BytesPerPixel {
+ match self.bytes_per_pixel() {
+ 1 => BytesPerPixel::One,
+ 2 => BytesPerPixel::Two,
+ 3 => BytesPerPixel::Three,
+ 4 => BytesPerPixel::Four,
+ 6 => BytesPerPixel::Six, // Only rgb×16bit
+ 8 => BytesPerPixel::Eight, // Only rgba×16bit
+ _ => unreachable!("Not a possible byte rounded pixel width"),
+ }
+ }
+
+ /// Returns the number of bytes needed for one deinterlaced image.
+ pub fn raw_bytes(&self) -> usize {
+ self.height as usize * self.raw_row_length()
+ }
+
+ /// Returns the number of bytes needed for one deinterlaced row.
+ pub fn raw_row_length(&self) -> usize {
+ self.raw_row_length_from_width(self.width)
+ }
+
+ pub(crate) fn checked_raw_row_length(&self) -> Option<usize> {
+ self.color_type
+ .checked_raw_row_length(self.bit_depth, self.width)
+ }
+
+ /// Returns the number of bytes needed for one deinterlaced row of width `width`.
+ pub fn raw_row_length_from_width(&self, width: u32) -> usize {
+ self.color_type
+ .raw_row_length_from_width(self.bit_depth, width)
+ }
+
+ /// Encode this header to the writer.
+ ///
+ /// Note that this does _not_ include the PNG signature, it starts with the IHDR chunk and then
+ /// includes other chunks that were added to the header.
+ pub fn encode<W: Write>(&self, mut w: W) -> encoder::Result<()> {
+ // Encode the IHDR chunk
+ let mut data = [0; 13];
+ data[..4].copy_from_slice(&self.width.to_be_bytes());
+ data[4..8].copy_from_slice(&self.height.to_be_bytes());
+ data[8] = self.bit_depth as u8;
+ data[9] = self.color_type as u8;
+ data[12] = self.interlaced as u8;
+ encoder::write_chunk(&mut w, chunk::IHDR, &data)?;
+ // Encode the pHYs chunk
+ if let Some(pd) = self.pixel_dims {
+ let mut phys_data = [0; 9];
+ phys_data[0..4].copy_from_slice(&pd.xppu.to_be_bytes());
+ phys_data[4..8].copy_from_slice(&pd.yppu.to_be_bytes());
+ match pd.unit {
+ Unit::Meter => phys_data[8] = 1,
+ Unit::Unspecified => phys_data[8] = 0,
+ }
+ encoder::write_chunk(&mut w, chunk::pHYs, &phys_data)?;
+ }
+
+ if let Some(p) = &self.palette {
+ encoder::write_chunk(&mut w, chunk::PLTE, p)?;
+ };
+
+ if let Some(t) = &self.trns {
+ encoder::write_chunk(&mut w, chunk::tRNS, t)?;
+ }
+
+ // If specified, the sRGB information overrides the source gamma and chromaticities.
+ if let Some(srgb) = &self.srgb {
+ let gamma = crate::srgb::substitute_gamma();
+ let chromaticities = crate::srgb::substitute_chromaticities();
+ srgb.encode(&mut w)?;
+ gamma.encode_gama(&mut w)?;
+ chromaticities.encode(&mut w)?;
+ } else {
+ if let Some(gma) = self.source_gamma {
+ gma.encode_gama(&mut w)?
+ }
+ if let Some(chrms) = self.source_chromaticities {
+ chrms.encode(&mut w)?;
+ }
+ }
+ if let Some(actl) = self.animation_control {
+ actl.encode(&mut w)?;
+ }
+
+ for text_chunk in &self.uncompressed_latin1_text {
+ text_chunk.encode(&mut w)?;
+ }
+
+ for text_chunk in &self.compressed_latin1_text {
+ text_chunk.encode(&mut w)?;
+ }
+
+ for text_chunk in &self.utf8_text {
+ text_chunk.encode(&mut w)?;
+ }
+
+ Ok(())
+ }
+}
+
+impl BytesPerPixel {
+ pub(crate) fn into_usize(self) -> usize {
+ self as usize
+ }
+}
+
+bitflags! {
+ /// Output transformations
+ ///
+ /// Many flags from libpng are not yet supported. A PR discussing/adding them would be nice.
+ ///
+ #[doc = "
+ ```c
+ /// Discard the alpha channel
+ const STRIP_ALPHA = 0x0002; // read only
+ /// Expand 1; 2 and 4-bit samples to bytes
+ const PACKING = 0x0004; // read and write
+ /// Change order of packed pixels to LSB first
+ const PACKSWAP = 0x0008; // read and write
+ /// Invert monochrome images
+ const INVERT_MONO = 0x0020; // read and write
+ /// Normalize pixels to the sBIT depth
+ const SHIFT = 0x0040; // read and write
+ /// Flip RGB to BGR; RGBA to BGRA
+ const BGR = 0x0080; // read and write
+ /// Flip RGBA to ARGB or GA to AG
+ const SWAP_ALPHA = 0x0100; // read and write
+ /// Byte-swap 16-bit samples
+ const SWAP_ENDIAN = 0x0200; // read and write
+ /// Change alpha from opacity to transparency
+ const INVERT_ALPHA = 0x0400; // read and write
+ const STRIP_FILLER = 0x0800; // write only
+ const STRIP_FILLER_BEFORE = 0x0800; // write only
+ const STRIP_FILLER_AFTER = 0x1000; // write only
+ const GRAY_TO_RGB = 0x2000; // read only
+ const EXPAND_16 = 0x4000; // read only
+ /// Similar to STRIP_16 but in libpng considering gamma?
+ /// Not entirely sure the documentation says it is more
+ /// accurate but doesn't say precisely how.
+ const SCALE_16 = 0x8000; // read only
+ ```
+ "]
+ pub struct Transformations: u32 {
+ /// No transformation
+ const IDENTITY = 0x00000; // read and write */
+ /// Strip 16-bit samples to 8 bits
+ const STRIP_16 = 0x00001; // read only */
+ /// Expand paletted images to RGB; expand grayscale images of
+ /// less than 8-bit depth to 8-bit depth; and expand tRNS chunks
+ /// to alpha channels.
+ const EXPAND = 0x00010; // read only */
+ /// Expand paletted images to include an alpha channel. Implies `EXPAND`.
+ const ALPHA = 0x10000; // read only */
+ }
+}
+
+impl Transformations {
+ /// Transform every input to 8bit grayscale or color.
+ ///
+ /// This sets `EXPAND` and `STRIP_16` which is similar to the default transformation used by
+ /// this library prior to `0.17`.
+ pub fn normalize_to_color8() -> Transformations {
+ Transformations::EXPAND | Transformations::STRIP_16
+ }
+}
+
+/// Instantiate the default transformations, the identity transform.
+impl Default for Transformations {
+ fn default() -> Transformations {
+ Transformations::IDENTITY
+ }
+}
+
+#[derive(Debug)]
+pub struct ParameterError {
+ inner: ParameterErrorKind,
+}
+
+#[derive(Debug)]
+pub(crate) enum ParameterErrorKind {
+ /// A provided buffer must be have the exact size to hold the image data. Where the buffer can
+ /// be allocated by the caller, they must ensure that it has a minimum size as hinted previously.
+ /// Even though the size is calculated from image data, this does counts as a parameter error
+ /// because they must react to a value produced by this library, which can have been subjected
+ /// to limits.
+ ImageBufferSize { expected: usize, actual: usize },
+ /// A bit like return `None` from an iterator.
+ /// We use it to differentiate between failing to seek to the next image in a sequence and the
+ /// absence of a next image. This is an error of the caller because they should have checked
+ /// the number of images by inspecting the header data returned when opening the image. This
+ /// library will perform the checks necessary to ensure that data was accurate or error with a
+ /// format error otherwise.
+ PolledAfterEndOfImage,
+}
+
+impl From<ParameterErrorKind> for ParameterError {
+ fn from(inner: ParameterErrorKind) -> Self {
+ ParameterError { inner }
+ }
+}
+
+impl fmt::Display for ParameterError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ use ParameterErrorKind::*;
+ match self.inner {
+ ImageBufferSize { expected, actual } => {
+ write!(fmt, "wrong data size, expected {} got {}", expected, actual)
+ }
+ PolledAfterEndOfImage => write!(fmt, "End of image has been reached"),
+ }
+ }
+}
diff --git a/vendor/png/src/decoder/mod.rs b/vendor/png/src/decoder/mod.rs
new file mode 100644
index 0000000..09772fe
--- /dev/null
+++ b/vendor/png/src/decoder/mod.rs
@@ -0,0 +1,961 @@
+mod stream;
+mod zlib;
+
+pub use self::stream::{DecodeOptions, Decoded, DecodingError, StreamingDecoder};
+use self::stream::{FormatErrorInner, CHUNCK_BUFFER_SIZE};
+
+use std::io::{BufRead, BufReader, Read};
+use std::mem;
+use std::ops::Range;
+
+use crate::chunk;
+use crate::common::{
+ BitDepth, BytesPerPixel, ColorType, Info, ParameterErrorKind, Transformations,
+};
+use crate::filter::{unfilter, FilterType};
+use crate::utils;
+
+/*
+pub enum InterlaceHandling {
+ /// Outputs the raw rows
+ RawRows,
+ /// Fill missing the pixels from the existing ones
+ Rectangle,
+ /// Only fill the needed pixels
+ Sparkle
+}
+*/
+
+/// Output info.
+///
+/// This describes one particular frame of the image that was written into the output buffer.
+#[derive(Debug, PartialEq, Eq)]
+pub struct OutputInfo {
+ /// The pixel width of this frame.
+ pub width: u32,
+ /// The pixel height of this frame.
+ pub height: u32,
+ /// The chosen output color type.
+ pub color_type: ColorType,
+ /// The chosen output bit depth.
+ pub bit_depth: BitDepth,
+ /// The byte count of each scan line in the image.
+ pub line_size: usize,
+}
+
+impl OutputInfo {
+ /// Returns the size needed to hold a decoded frame
+ /// If the output buffer was larger then bytes after this count should be ignored. They may
+ /// still have been changed.
+ pub fn buffer_size(&self) -> usize {
+ self.line_size * self.height as usize
+ }
+}
+
+#[derive(Clone, Copy, Debug)]
+/// Limits on the resources the `Decoder` is allowed too use
+pub struct Limits {
+ /// maximum number of bytes the decoder is allowed to allocate, default is 64Mib
+ pub bytes: usize,
+}
+
+impl Default for Limits {
+ fn default() -> Limits {
+ Limits {
+ bytes: 1024 * 1024 * 64,
+ }
+ }
+}
+
+/// PNG Decoder
+pub struct Decoder<R: Read> {
+ read_decoder: ReadDecoder<R>,
+ /// Output transformations
+ transform: Transformations,
+ /// Limits on resources the Decoder is allowed to use
+ limits: Limits,
+}
+
+/// A row of data with interlace information attached.
+#[derive(Clone, Copy, Debug)]
+pub struct InterlacedRow<'data> {
+ data: &'data [u8],
+ interlace: InterlaceInfo,
+}
+
+impl<'data> InterlacedRow<'data> {
+ pub fn data(&self) -> &'data [u8] {
+ self.data
+ }
+
+ pub fn interlace(&self) -> InterlaceInfo {
+ self.interlace
+ }
+}
+
+/// PNG (2003) specifies two interlace modes, but reserves future extensions.
+#[derive(Clone, Copy, Debug)]
+pub enum InterlaceInfo {
+ /// the null method means no interlacing
+ Null,
+ /// Adam7 derives its name from doing 7 passes over the image, only decoding a subset of all pixels in each pass.
+ /// The following table shows pictorially what parts of each 8x8 area of the image is found in each pass:
+ ///
+ /// 1 6 4 6 2 6 4 6
+ /// 7 7 7 7 7 7 7 7
+ /// 5 6 5 6 5 6 5 6
+ /// 7 7 7 7 7 7 7 7
+ /// 3 6 4 6 3 6 4 6
+ /// 7 7 7 7 7 7 7 7
+ /// 5 6 5 6 5 6 5 6
+ /// 7 7 7 7 7 7 7 7
+ Adam7 { pass: u8, line: u32, width: u32 },
+}
+
+/// A row of data without interlace information.
+#[derive(Clone, Copy, Debug)]
+pub struct Row<'data> {
+ data: &'data [u8],
+}
+
+impl<'data> Row<'data> {
+ pub fn data(&self) -> &'data [u8] {
+ self.data
+ }
+}
+
+impl<R: Read> Decoder<R> {
+ /// Create a new decoder configuration with default limits.
+ pub fn new(r: R) -> Decoder<R> {
+ Decoder::new_with_limits(r, Limits::default())
+ }
+
+ /// Create a new decoder configuration with custom limits.
+ pub fn new_with_limits(r: R, limits: Limits) -> Decoder<R> {
+ Decoder {
+ read_decoder: ReadDecoder {
+ reader: BufReader::with_capacity(CHUNCK_BUFFER_SIZE, r),
+ decoder: StreamingDecoder::new(),
+ at_eof: false,
+ },
+ transform: Transformations::IDENTITY,
+ limits,
+ }
+ }
+
+ /// Create a new decoder configuration with custom `DecodeOptions`.
+ pub fn new_with_options(r: R, decode_options: DecodeOptions) -> Decoder<R> {
+ Decoder {
+ read_decoder: ReadDecoder {
+ reader: BufReader::with_capacity(CHUNCK_BUFFER_SIZE, r),
+ decoder: StreamingDecoder::new_with_options(decode_options),
+ at_eof: false,
+ },
+ transform: Transformations::IDENTITY,
+ limits: Limits::default(),
+ }
+ }
+
+ /// Limit resource usage.
+ ///
+ /// Note that your allocations, e.g. when reading into a pre-allocated buffer, are __NOT__
+ /// considered part of the limits. Nevertheless, required intermediate buffers such as for
+ /// singular lines is checked against the limit.
+ ///
+ /// Note that this is a best-effort basis.
+ ///
+ /// ```
+ /// use std::fs::File;
+ /// use png::{Decoder, Limits};
+ /// // This image is 32×32, 1bit per pixel. The reader buffers one row which requires 4 bytes.
+ /// let mut limits = Limits::default();
+ /// limits.bytes = 3;
+ /// let mut decoder = Decoder::new_with_limits(File::open("tests/pngsuite/basi0g01.png").unwrap(), limits);
+ /// assert!(decoder.read_info().is_err());
+ ///
+ /// // This image is 32x32 pixels, so the decoder will allocate less than 10Kib
+ /// let mut limits = Limits::default();
+ /// limits.bytes = 10*1024;
+ /// let mut decoder = Decoder::new_with_limits(File::open("tests/pngsuite/basi0g01.png").unwrap(), limits);
+ /// assert!(decoder.read_info().is_ok());
+ /// ```
+ pub fn set_limits(&mut self, limits: Limits) {
+ self.limits = limits;
+ }
+
+ /// Read the PNG header and return the information contained within.
+ ///
+ /// Most image metadata will not be read until `read_info` is called, so those fields will be
+ /// None or empty.
+ pub fn read_header_info(&mut self) -> Result<&Info, DecodingError> {
+ let mut buf = Vec::new();
+ while self.read_decoder.info().is_none() {
+ buf.clear();
+ if self.read_decoder.decode_next(&mut buf)?.is_none() {
+ return Err(DecodingError::Format(
+ FormatErrorInner::UnexpectedEof.into(),
+ ));
+ }
+ }
+ Ok(self.read_decoder.info().unwrap())
+ }
+
+ /// Reads all meta data until the first IDAT chunk
+ pub fn read_info(mut self) -> Result<Reader<R>, DecodingError> {
+ self.read_header_info()?;
+
+ let mut reader = Reader {
+ decoder: self.read_decoder,
+ bpp: BytesPerPixel::One,
+ subframe: SubframeInfo::not_yet_init(),
+ fctl_read: 0,
+ next_frame: SubframeIdx::Initial,
+ prev: Vec::new(),
+ current: Vec::new(),
+ scan_start: 0,
+ transform: self.transform,
+ scratch_buffer: Vec::new(),
+ limits: self.limits,
+ };
+
+ // Check if the decoding buffer of a single raw line has a valid size.
+ if reader.info().checked_raw_row_length().is_none() {
+ return Err(DecodingError::LimitsExceeded);
+ }
+
+ // Check if the output buffer has a valid size.
+ let (width, height) = reader.info().size();
+ let (color, depth) = reader.output_color_type();
+ let rowlen = color
+ .checked_raw_row_length(depth, width)
+ .ok_or(DecodingError::LimitsExceeded)?
+ - 1;
+ let height: usize =
+ std::convert::TryFrom::try_from(height).map_err(|_| DecodingError::LimitsExceeded)?;
+ if rowlen.checked_mul(height).is_none() {
+ return Err(DecodingError::LimitsExceeded);
+ }
+
+ reader.read_until_image_data()?;
+ Ok(reader)
+ }
+
+ /// Set the allowed and performed transformations.
+ ///
+ /// A transformation is a pre-processing on the raw image data modifying content or encoding.
+ /// Many options have an impact on memory or CPU usage during decoding.
+ pub fn set_transformations(&mut self, transform: Transformations) {
+ self.transform = transform;
+ }
+
+ /// Set the decoder to ignore all text chunks while parsing.
+ ///
+ /// eg.
+ /// ```
+ /// use std::fs::File;
+ /// use png::Decoder;
+ /// let mut decoder = Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap());
+ /// decoder.set_ignore_text_chunk(true);
+ /// assert!(decoder.read_info().is_ok());
+ /// ```
+ pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) {
+ self.read_decoder
+ .decoder
+ .set_ignore_text_chunk(ignore_text_chunk);
+ }
+
+ /// Set the decoder to ignore and not verify the Adler-32 checksum
+ /// and CRC code.
+ pub fn ignore_checksums(&mut self, ignore_checksums: bool) {
+ self.read_decoder
+ .decoder
+ .set_ignore_adler32(ignore_checksums);
+ self.read_decoder.decoder.set_ignore_crc(ignore_checksums);
+ }
+}
+
+struct ReadDecoder<R: Read> {
+ reader: BufReader<R>,
+ decoder: StreamingDecoder,
+ at_eof: bool,
+}
+
+impl<R: Read> ReadDecoder<R> {
+ /// Returns the next decoded chunk. If the chunk is an ImageData chunk, its contents are written
+ /// into image_data.
+ fn decode_next(&mut self, image_data: &mut Vec<u8>) -> Result<Option<Decoded>, DecodingError> {
+ while !self.at_eof {
+ let (consumed, result) = {
+ let buf = self.reader.fill_buf()?;
+ if buf.is_empty() {
+ return Err(DecodingError::Format(
+ FormatErrorInner::UnexpectedEof.into(),
+ ));
+ }
+ self.decoder.update(buf, image_data)?
+ };
+ self.reader.consume(consumed);
+ match result {
+ Decoded::Nothing => (),
+ Decoded::ImageEnd => self.at_eof = true,
+ result => return Ok(Some(result)),
+ }
+ }
+ Ok(None)
+ }
+
+ fn finish_decoding(&mut self) -> Result<(), DecodingError> {
+ while !self.at_eof {
+ let buf = self.reader.fill_buf()?;
+ if buf.is_empty() {
+ return Err(DecodingError::Format(
+ FormatErrorInner::UnexpectedEof.into(),
+ ));
+ }
+ let (consumed, event) = self.decoder.update(buf, &mut vec![])?;
+ self.reader.consume(consumed);
+ match event {
+ Decoded::Nothing => (),
+ Decoded::ImageEnd => self.at_eof = true,
+ // ignore more data
+ Decoded::ChunkComplete(_, _) | Decoded::ChunkBegin(_, _) | Decoded::ImageData => {}
+ Decoded::ImageDataFlushed => return Ok(()),
+ Decoded::PartialChunk(_) => {}
+ new => unreachable!("{:?}", new),
+ }
+ }
+
+ Err(DecodingError::Format(
+ FormatErrorInner::UnexpectedEof.into(),
+ ))
+ }
+
+ fn info(&self) -> Option<&Info> {
+ self.decoder.info.as_ref()
+ }
+}
+
+/// PNG reader (mostly high-level interface)
+///
+/// Provides a high level that iterates over lines or whole images.
+pub struct Reader<R: Read> {
+ decoder: ReadDecoder<R>,
+ bpp: BytesPerPixel,
+ subframe: SubframeInfo,
+ /// Number of frame control chunks read.
+ /// By the APNG specification the total number must equal the count specified in the animation
+ /// control chunk. The IDAT image _may_ have such a chunk applying to it.
+ fctl_read: u32,
+ next_frame: SubframeIdx,
+ /// Previous raw line
+ prev: Vec<u8>,
+ /// Current raw line
+ current: Vec<u8>,
+ /// Start index of the current scan line.
+ scan_start: usize,
+ /// Output transformations
+ transform: Transformations,
+ /// This buffer is only used so that `next_row` and `next_interlaced_row` can return reference
+ /// to a byte slice. In a future version of this library, this buffer will be removed and
+ /// `next_row` and `next_interlaced_row` will write directly into a user provided output buffer.
+ scratch_buffer: Vec<u8>,
+ /// How resources we can spend (for example, on allocation).
+ limits: Limits,
+}
+
+/// The subframe specific information.
+///
+/// In APNG the frames are constructed by combining previous frame and a new subframe (through a
+/// combination of `dispose_op` and `overlay_op`). These sub frames specify individual dimension
+/// information and reuse the global interlace options. This struct encapsulates the state of where
+/// in a particular IDAT-frame or subframe we are.
+struct SubframeInfo {
+ width: u32,
+ height: u32,
+ rowlen: usize,
+ interlace: InterlaceIter,
+ consumed_and_flushed: bool,
+}
+
+#[derive(Clone)]
+enum InterlaceIter {
+ None(Range<u32>),
+ Adam7(utils::Adam7Iterator),
+}
+
+/// Denote a frame as given by sequence numbers.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+enum SubframeIdx {
+ /// The initial frame in an IDAT chunk without fcTL chunk applying to it.
+ /// Note that this variant precedes `Some` as IDAT frames precede fdAT frames and all fdAT
+ /// frames must have a fcTL applying to it.
+ Initial,
+ /// An IDAT frame with fcTL or an fdAT frame.
+ Some(u32),
+ /// The past-the-end index.
+ End,
+}
+
+impl<R: Read> Reader<R> {
+ /// Reads all meta data until the next frame data starts.
+ /// Requires IHDR before the IDAT and fcTL before fdAT.
+ fn read_until_image_data(&mut self) -> Result<(), DecodingError> {
+ loop {
+ // This is somewhat ugly. The API requires us to pass a buffer to decode_next but we
+ // know that we will stop before reading any image data from the stream. Thus pass an
+ // empty buffer and assert that remains empty.
+ let mut buf = Vec::new();
+ let state = self.decoder.decode_next(&mut buf)?;
+ assert!(buf.is_empty());
+
+ match state {
+ Some(Decoded::ChunkBegin(_, chunk::IDAT))
+ | Some(Decoded::ChunkBegin(_, chunk::fdAT)) => break,
+ Some(Decoded::FrameControl(_)) => {
+ self.subframe = SubframeInfo::new(self.info());
+ // The next frame is the one to which this chunk applies.
+ self.next_frame = SubframeIdx::Some(self.fctl_read);
+ // TODO: what about overflow here? That would imply there are more fctl chunks
+ // than can be specified in the animation control but also that we have read
+ // several gigabytes of data.
+ self.fctl_read += 1;
+ }
+ None => {
+ return Err(DecodingError::Format(
+ FormatErrorInner::MissingImageData.into(),
+ ))
+ }
+ // Ignore all other chunk events. Any other chunk may be between IDAT chunks, fdAT
+ // chunks and their control chunks.
+ _ => {}
+ }
+ }
+
+ let info = self
+ .decoder
+ .info()
+ .ok_or(DecodingError::Format(FormatErrorInner::MissingIhdr.into()))?;
+ self.bpp = info.bpp_in_prediction();
+ self.subframe = SubframeInfo::new(info);
+
+ // Allocate output buffer.
+ let buflen = self.output_line_size(self.subframe.width);
+ if buflen > self.limits.bytes {
+ return Err(DecodingError::LimitsExceeded);
+ }
+
+ self.prev.clear();
+ self.prev.resize(self.subframe.rowlen, 0);
+
+ Ok(())
+ }
+
+ /// Get information on the image.
+ ///
+ /// The structure will change as new frames of an animated image are decoded.
+ pub fn info(&self) -> &Info {
+ self.decoder.info().unwrap()
+ }
+
+ /// Decodes the next frame into `buf`.
+ ///
+ /// Note that this decodes raw subframes that need to be mixed according to blend-op and
+ /// dispose-op by the caller.
+ ///
+ /// The caller must always provide a buffer large enough to hold a complete frame (the APNG
+ /// specification restricts subframes to the dimensions given in the image header). The region
+ /// that has been written be checked afterwards by calling `info` after a successful call and
+ /// inspecting the `frame_control` data. This requirement may be lifted in a later version of
+ /// `png`.
+ ///
+ /// Output lines will be written in row-major, packed matrix with width and height of the read
+ /// frame (or subframe), all samples are in big endian byte order where this matters.
+ pub fn next_frame(&mut self, buf: &mut [u8]) -> Result<OutputInfo, DecodingError> {
+ let subframe_idx = match self.decoder.info().unwrap().frame_control() {
+ None => SubframeIdx::Initial,
+ Some(_) => SubframeIdx::Some(self.fctl_read - 1),
+ };
+
+ if self.next_frame == SubframeIdx::End {
+ return Err(DecodingError::Parameter(
+ ParameterErrorKind::PolledAfterEndOfImage.into(),
+ ));
+ } else if self.next_frame != subframe_idx {
+ // Advance until we've read the info / fcTL for this frame.
+ self.read_until_image_data()?;
+ }
+
+ if buf.len() < self.output_buffer_size() {
+ return Err(DecodingError::Parameter(
+ ParameterErrorKind::ImageBufferSize {
+ expected: buf.len(),
+ actual: self.output_buffer_size(),
+ }
+ .into(),
+ ));
+ }
+
+ let (color_type, bit_depth) = self.output_color_type();
+ let output_info = OutputInfo {
+ width: self.subframe.width,
+ height: self.subframe.height,
+ color_type,
+ bit_depth,
+ line_size: self.output_line_size(self.subframe.width),
+ };
+
+ self.current.clear();
+ self.scan_start = 0;
+ let width = self.info().width;
+ if self.info().interlaced {
+ while let Some(InterlacedRow {
+ data: row,
+ interlace,
+ ..
+ }) = self.next_interlaced_row()?
+ {
+ let (line, pass) = match interlace {
+ InterlaceInfo::Adam7 { line, pass, .. } => (line, pass),
+ InterlaceInfo::Null => unreachable!("expected interlace information"),
+ };
+ let samples = color_type.samples() as u8;
+ utils::expand_pass(buf, width, row, pass, line, samples * (bit_depth as u8));
+ }
+ } else {
+ for row in buf
+ .chunks_exact_mut(output_info.line_size)
+ .take(self.subframe.height as usize)
+ {
+ self.next_interlaced_row_impl(self.subframe.rowlen, row)?;
+ }
+ }
+
+ // Advance over the rest of data for this (sub-)frame.
+ if !self.subframe.consumed_and_flushed {
+ self.decoder.finish_decoding()?;
+ }
+
+ // Advance our state to expect the next frame.
+ let past_end_subframe = self
+ .info()
+ .animation_control()
+ .map(|ac| ac.num_frames)
+ .unwrap_or(0);
+ self.next_frame = match self.next_frame {
+ SubframeIdx::End => unreachable!("Next frame called when already at image end"),
+ // Reached the end of non-animated image.
+ SubframeIdx::Initial if past_end_subframe == 0 => SubframeIdx::End,
+ // An animated image, expecting first subframe.
+ SubframeIdx::Initial => SubframeIdx::Some(0),
+ // This was the last subframe, slightly fuzzy condition in case of programmer error.
+ SubframeIdx::Some(idx) if past_end_subframe <= idx + 1 => SubframeIdx::End,
+ // Expecting next subframe.
+ SubframeIdx::Some(idx) => SubframeIdx::Some(idx + 1),
+ };
+
+ Ok(output_info)
+ }
+
+ /// Returns the next processed row of the image
+ pub fn next_row(&mut self) -> Result<Option<Row>, DecodingError> {
+ self.next_interlaced_row()
+ .map(|v| v.map(|v| Row { data: v.data }))
+ }
+
+ /// Returns the next processed row of the image
+ pub fn next_interlaced_row(&mut self) -> Result<Option<InterlacedRow>, DecodingError> {
+ let (rowlen, interlace) = match self.next_pass() {
+ Some((rowlen, interlace)) => (rowlen, interlace),
+ None => return Ok(None),
+ };
+
+ let width = if let InterlaceInfo::Adam7 { width, .. } = interlace {
+ width
+ } else {
+ self.subframe.width
+ };
+ let output_line_size = self.output_line_size(width);
+
+ // TODO: change the interface of `next_interlaced_row` to take an output buffer instead of
+ // making us return a reference to a buffer that we own.
+ let mut output_buffer = mem::take(&mut self.scratch_buffer);
+ output_buffer.resize(output_line_size, 0u8);
+ let ret = self.next_interlaced_row_impl(rowlen, &mut output_buffer);
+ self.scratch_buffer = output_buffer;
+ ret?;
+
+ Ok(Some(InterlacedRow {
+ data: &self.scratch_buffer[..output_line_size],
+ interlace,
+ }))
+ }
+
+ /// Fetch the next interlaced row and filter it according to our own transformations.
+ fn next_interlaced_row_impl(
+ &mut self,
+ rowlen: usize,
+ output_buffer: &mut [u8],
+ ) -> Result<(), DecodingError> {
+ self.next_raw_interlaced_row(rowlen)?;
+ let row = &self.prev[1..rowlen];
+
+ // Apply transformations and write resulting data to buffer.
+ let (color_type, bit_depth, trns) = {
+ let info = self.info();
+ (
+ info.color_type,
+ info.bit_depth as u8,
+ info.trns.is_some() || self.transform.contains(Transformations::ALPHA),
+ )
+ };
+ let expand = self.transform.contains(Transformations::EXPAND)
+ || self.transform.contains(Transformations::ALPHA);
+ let strip16 = bit_depth == 16 && self.transform.contains(Transformations::STRIP_16);
+ let info = self.decoder.info().unwrap();
+ let trns = if trns {
+ Some(info.trns.as_deref())
+ } else {
+ None
+ };
+ match (color_type, trns) {
+ (ColorType::Indexed, _) if expand => {
+ output_buffer[..row.len()].copy_from_slice(row);
+ expand_paletted(output_buffer, info, trns)?;
+ }
+ (ColorType::Grayscale | ColorType::GrayscaleAlpha, _) if bit_depth < 8 && expand => {
+ output_buffer[..row.len()].copy_from_slice(row);
+ expand_gray_u8(output_buffer, info, trns)
+ }
+ (ColorType::Grayscale | ColorType::Rgb, Some(trns)) if expand => {
+ let channels = color_type.samples();
+ if bit_depth == 8 {
+ utils::expand_trns_line(row, output_buffer, trns, channels);
+ } else if strip16 {
+ utils::expand_trns_and_strip_line16(row, output_buffer, trns, channels);
+ } else {
+ assert_eq!(bit_depth, 16);
+ utils::expand_trns_line16(row, output_buffer, trns, channels);
+ }
+ }
+ (
+ ColorType::Grayscale | ColorType::GrayscaleAlpha | ColorType::Rgb | ColorType::Rgba,
+ _,
+ ) if strip16 => {
+ for i in 0..row.len() / 2 {
+ output_buffer[i] = row[2 * i];
+ }
+ }
+ _ => output_buffer.copy_from_slice(row),
+ }
+
+ Ok(())
+ }
+
+ /// Returns the color type and the number of bits per sample
+ /// of the data returned by `Reader::next_row` and Reader::frames`.
+ pub fn output_color_type(&self) -> (ColorType, BitDepth) {
+ use crate::common::ColorType::*;
+ let t = self.transform;
+ let info = self.info();
+ if t == Transformations::IDENTITY {
+ (info.color_type, info.bit_depth)
+ } else {
+ let bits = match info.bit_depth as u8 {
+ 16 if t.intersects(Transformations::STRIP_16) => 8,
+ n if n < 8
+ && (t.contains(Transformations::EXPAND)
+ || t.contains(Transformations::ALPHA)) =>
+ {
+ 8
+ }
+ n => n,
+ };
+ let color_type =
+ if t.contains(Transformations::EXPAND) || t.contains(Transformations::ALPHA) {
+ let has_trns = info.trns.is_some() || t.contains(Transformations::ALPHA);
+ match info.color_type {
+ Grayscale if has_trns => GrayscaleAlpha,
+ Rgb if has_trns => Rgba,
+ Indexed if has_trns => Rgba,
+ Indexed => Rgb,
+ ct => ct,
+ }
+ } else {
+ info.color_type
+ };
+ (color_type, BitDepth::from_u8(bits).unwrap())
+ }
+ }
+
+ /// Returns the number of bytes required to hold a deinterlaced image frame
+ /// that is decoded using the given input transformations.
+ pub fn output_buffer_size(&self) -> usize {
+ let (width, height) = self.info().size();
+ let size = self.output_line_size(width);
+ size * height as usize
+ }
+
+ /// Returns the number of bytes required to hold a deinterlaced row.
+ pub fn output_line_size(&self, width: u32) -> usize {
+ let (color, depth) = self.output_color_type();
+ color.raw_row_length_from_width(depth, width) - 1
+ }
+
+ fn next_pass(&mut self) -> Option<(usize, InterlaceInfo)> {
+ match self.subframe.interlace {
+ InterlaceIter::Adam7(ref mut adam7) => {
+ let last_pass = adam7.current_pass();
+ let (pass, line, width) = adam7.next()?;
+ let rowlen = self.info().raw_row_length_from_width(width);
+ if last_pass != pass {
+ self.prev.clear();
+ self.prev.resize(rowlen, 0u8);
+ }
+ Some((rowlen, InterlaceInfo::Adam7 { pass, line, width }))
+ }
+ InterlaceIter::None(ref mut height) => {
+ let _ = height.next()?;
+ Some((self.subframe.rowlen, InterlaceInfo::Null))
+ }
+ }
+ }
+
+ /// Write the next raw interlaced row into `self.prev`.
+ ///
+ /// The scanline is filtered against the previous scanline according to the specification.
+ fn next_raw_interlaced_row(&mut self, rowlen: usize) -> Result<(), DecodingError> {
+ // Read image data until we have at least one full row (but possibly more than one).
+ while self.current.len() - self.scan_start < rowlen {
+ if self.subframe.consumed_and_flushed {
+ return Err(DecodingError::Format(
+ FormatErrorInner::NoMoreImageData.into(),
+ ));
+ }
+
+ // Clear the current buffer before appending more data.
+ if self.scan_start > 0 {
+ self.current.drain(..self.scan_start).for_each(drop);
+ self.scan_start = 0;
+ }
+
+ match self.decoder.decode_next(&mut self.current)? {
+ Some(Decoded::ImageData) => {}
+ Some(Decoded::ImageDataFlushed) => {
+ self.subframe.consumed_and_flushed = true;
+ }
+ None => {
+ return Err(DecodingError::Format(
+ if self.current.is_empty() {
+ FormatErrorInner::NoMoreImageData
+ } else {
+ FormatErrorInner::UnexpectedEndOfChunk
+ }
+ .into(),
+ ));
+ }
+ _ => (),
+ }
+ }
+
+ // Get a reference to the current row and point scan_start to the next one.
+ let row = &mut self.current[self.scan_start..];
+ self.scan_start += rowlen;
+
+ // Unfilter the row.
+ let filter = FilterType::from_u8(row[0]).ok_or(DecodingError::Format(
+ FormatErrorInner::UnknownFilterMethod(row[0]).into(),
+ ))?;
+ unfilter(filter, self.bpp, &self.prev[1..rowlen], &mut row[1..rowlen]);
+
+ // Save the current row for the next pass.
+ self.prev[..rowlen].copy_from_slice(&row[..rowlen]);
+
+ Ok(())
+ }
+}
+
+impl SubframeInfo {
+ fn not_yet_init() -> Self {
+ SubframeInfo {
+ width: 0,
+ height: 0,
+ rowlen: 0,
+ interlace: InterlaceIter::None(0..0),
+ consumed_and_flushed: false,
+ }
+ }
+
+ fn new(info: &Info) -> Self {
+ // The apng fctnl overrides width and height.
+ // All other data is set by the main info struct.
+ let (width, height) = if let Some(fc) = info.frame_control {
+ (fc.width, fc.height)
+ } else {
+ (info.width, info.height)
+ };
+
+ let interlace = if info.interlaced {
+ InterlaceIter::Adam7(utils::Adam7Iterator::new(width, height))
+ } else {
+ InterlaceIter::None(0..height)
+ };
+
+ SubframeInfo {
+ width,
+ height,
+ rowlen: info.raw_row_length_from_width(width),
+ interlace,
+ consumed_and_flushed: false,
+ }
+ }
+}
+
+fn expand_paletted(
+ buffer: &mut [u8],
+ info: &Info,
+ trns: Option<Option<&[u8]>>,
+) -> Result<(), DecodingError> {
+ if let Some(palette) = info.palette.as_ref() {
+ if let BitDepth::Sixteen = info.bit_depth {
+ // This should have been caught earlier but let's check again. Can't hurt.
+ Err(DecodingError::Format(
+ FormatErrorInner::InvalidColorBitDepth {
+ color_type: ColorType::Indexed,
+ bit_depth: BitDepth::Sixteen,
+ }
+ .into(),
+ ))
+ } else {
+ let black = [0, 0, 0];
+ if let Some(trns) = trns {
+ let trns = trns.unwrap_or(&[]);
+ // > The tRNS chunk shall not contain more alpha values than there are palette
+ // entries, but a tRNS chunk may contain fewer values than there are palette
+ // entries. In this case, the alpha value for all remaining palette entries is
+ // assumed to be 255.
+ //
+ // It seems, accepted reading is to fully *ignore* an invalid tRNS as if it were
+ // completely empty / all pixels are non-transparent.
+ let trns = if trns.len() <= palette.len() / 3 {
+ trns
+ } else {
+ &[]
+ };
+
+ utils::unpack_bits(buffer, 4, info.bit_depth as u8, |i, chunk| {
+ let (rgb, a) = (
+ palette
+ .get(3 * i as usize..3 * i as usize + 3)
+ .unwrap_or(&black),
+ *trns.get(i as usize).unwrap_or(&0xFF),
+ );
+ chunk[0] = rgb[0];
+ chunk[1] = rgb[1];
+ chunk[2] = rgb[2];
+ chunk[3] = a;
+ });
+ } else {
+ utils::unpack_bits(buffer, 3, info.bit_depth as u8, |i, chunk| {
+ let rgb = palette
+ .get(3 * i as usize..3 * i as usize + 3)
+ .unwrap_or(&black);
+ chunk[0] = rgb[0];
+ chunk[1] = rgb[1];
+ chunk[2] = rgb[2];
+ })
+ }
+ Ok(())
+ }
+ } else {
+ Err(DecodingError::Format(
+ FormatErrorInner::PaletteRequired.into(),
+ ))
+ }
+}
+
+fn expand_gray_u8(buffer: &mut [u8], info: &Info, trns: Option<Option<&[u8]>>) {
+ let rescale = true;
+ let scaling_factor = if rescale {
+ (255) / ((1u16 << info.bit_depth as u8) - 1) as u8
+ } else {
+ 1
+ };
+ if let Some(trns) = trns {
+ utils::unpack_bits(buffer, 2, info.bit_depth as u8, |pixel, chunk| {
+ chunk[1] = if let Some(trns) = trns {
+ if pixel == trns[0] {
+ 0
+ } else {
+ 0xFF
+ }
+ } else {
+ 0xFF
+ };
+ chunk[0] = pixel * scaling_factor
+ })
+ } else {
+ utils::unpack_bits(buffer, 1, info.bit_depth as u8, |val, chunk| {
+ chunk[0] = val * scaling_factor
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::Decoder;
+ use std::io::{BufRead, Read, Result};
+ use std::mem::discriminant;
+
+ /// A reader that reads at most `n` bytes.
+ struct SmalBuf<R: BufRead> {
+ inner: R,
+ cap: usize,
+ }
+
+ impl<R: BufRead> SmalBuf<R> {
+ fn new(inner: R, cap: usize) -> Self {
+ SmalBuf { inner, cap }
+ }
+ }
+
+ impl<R: BufRead> Read for SmalBuf<R> {
+ fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
+ let len = buf.len().min(self.cap);
+ self.inner.read(&mut buf[..len])
+ }
+ }
+
+ impl<R: BufRead> BufRead for SmalBuf<R> {
+ fn fill_buf(&mut self) -> Result<&[u8]> {
+ let buf = self.inner.fill_buf()?;
+ let len = buf.len().min(self.cap);
+ Ok(&buf[..len])
+ }
+
+ fn consume(&mut self, amt: usize) {
+ assert!(amt <= self.cap);
+ self.inner.consume(amt)
+ }
+ }
+
+ #[test]
+ fn no_data_dup_on_finish() {
+ const IMG: &[u8] = include_bytes!(concat!(
+ env!("CARGO_MANIFEST_DIR"),
+ "/tests/bugfixes/x_issue#214.png"
+ ));
+
+ let mut normal = Decoder::new(IMG).read_info().unwrap();
+
+ let mut buffer = vec![0; normal.output_buffer_size()];
+ let normal = normal.next_frame(&mut buffer).unwrap_err();
+
+ let smal = Decoder::new(SmalBuf::new(IMG, 1))
+ .read_info()
+ .unwrap()
+ .next_frame(&mut buffer)
+ .unwrap_err();
+
+ assert_eq!(discriminant(&normal), discriminant(&smal));
+ }
+}
diff --git a/vendor/png/src/decoder/stream.rs b/vendor/png/src/decoder/stream.rs
new file mode 100644
index 0000000..f5df6e9
--- /dev/null
+++ b/vendor/png/src/decoder/stream.rs
@@ -0,0 +1,1576 @@
+extern crate crc32fast;
+
+use std::convert::From;
+use std::default::Default;
+use std::error;
+use std::fmt;
+use std::io;
+use std::{borrow::Cow, cmp::min};
+
+use crc32fast::Hasher as Crc32;
+
+use super::zlib::ZlibStream;
+use crate::chunk::{self, ChunkType, IDAT, IEND, IHDR};
+use crate::common::{
+ AnimationControl, BitDepth, BlendOp, ColorType, DisposeOp, FrameControl, Info, ParameterError,
+ PixelDimensions, ScaledFloat, SourceChromaticities, Unit,
+};
+use crate::text_metadata::{ITXtChunk, TEXtChunk, TextDecodingError, ZTXtChunk};
+use crate::traits::ReadBytesExt;
+
+/// TODO check if these size are reasonable
+pub const CHUNCK_BUFFER_SIZE: usize = 32 * 1024;
+
+/// Determines if checksum checks should be disabled globally.
+///
+/// This is used only in fuzzing. `afl` automatically adds `--cfg fuzzing` to RUSTFLAGS which can
+/// be used to detect that build.
+const CHECKSUM_DISABLED: bool = cfg!(fuzzing);
+
+#[derive(Debug)]
+enum U32Value {
+ // CHUNKS
+ Length,
+ Type(u32),
+ Crc(ChunkType),
+}
+
+#[derive(Debug)]
+enum State {
+ Signature(u8, [u8; 7]),
+ U32Byte3(U32Value, u32),
+ U32Byte2(U32Value, u32),
+ U32Byte1(U32Value, u32),
+ U32(U32Value),
+ ReadChunk(ChunkType),
+ PartialChunk(ChunkType),
+ DecodeData(ChunkType, usize),
+}
+
+#[derive(Debug)]
+/// Result of the decoding process
+pub enum Decoded {
+ /// Nothing decoded yet
+ Nothing,
+ Header(u32, u32, BitDepth, ColorType, bool),
+ ChunkBegin(u32, ChunkType),
+ ChunkComplete(u32, ChunkType),
+ PixelDimensions(PixelDimensions),
+ AnimationControl(AnimationControl),
+ FrameControl(FrameControl),
+ /// Decoded raw image data.
+ ImageData,
+ /// The last of a consecutive chunk of IDAT was done.
+ /// This is distinct from ChunkComplete which only marks that some IDAT chunk was completed but
+ /// not that no additional IDAT chunk follows.
+ ImageDataFlushed,
+ PartialChunk(ChunkType),
+ ImageEnd,
+}
+
+/// Any kind of error during PNG decoding.
+///
+/// This enumeration provides a very rough analysis on the origin of the failure. That is, each
+/// variant corresponds to one kind of actor causing the error. It should not be understood as a
+/// direct blame but can inform the search for a root cause or if such a search is required.
+#[derive(Debug)]
+pub enum DecodingError {
+ /// An error in IO of the underlying reader.
+ IoError(io::Error),
+ /// The input image was not a valid PNG.
+ ///
+ /// There isn't a lot that can be done here, except if the program itself was responsible for
+ /// creating this image then investigate the generator. This is internally implemented with a
+ /// large Enum. If You are interested in accessing some of the more exact information on the
+ /// variant then we can discuss in an issue.
+ Format(FormatError),
+ /// An interface was used incorrectly.
+ ///
+ /// This is used in cases where it's expected that the programmer might trip up and stability
+ /// could be affected. For example when:
+ ///
+ /// * The decoder is polled for more animation frames despite being done (or not being animated
+ /// in the first place).
+ /// * The output buffer does not have the required size.
+ ///
+ /// As a rough guideline for introducing new variants parts of the requirements are dynamically
+ /// derived from the (untrusted) input data while the other half is from the caller. In the
+ /// above cases the number of frames respectively the size is determined by the file while the
+ /// number of calls
+ ///
+ /// If you're an application you might want to signal that a bug report is appreciated.
+ Parameter(ParameterError),
+ /// The image would have required exceeding the limits configured with the decoder.
+ ///
+ /// Note that Your allocations, e.g. when reading into a pre-allocated buffer, is __NOT__
+ /// considered part of the limits. Nevertheless, required intermediate buffers such as for
+ /// singular lines is checked against the limit.
+ ///
+ /// Note that this is a best-effort basis.
+ LimitsExceeded,
+}
+
+#[derive(Debug)]
+pub struct FormatError {
+ inner: FormatErrorInner,
+}
+
+#[derive(Debug)]
+pub(crate) enum FormatErrorInner {
+ /// Bad framing.
+ CrcMismatch {
+ /// Stored CRC32 value
+ crc_val: u32,
+ /// Calculated CRC32 sum
+ crc_sum: u32,
+ /// The chunk type that has the CRC mismatch.
+ chunk: ChunkType,
+ },
+ /// Not a PNG, the magic signature is missing.
+ InvalidSignature,
+ /// End of file, within a chunk event.
+ UnexpectedEof,
+ /// End of file, while expecting more image data.
+ UnexpectedEndOfChunk,
+ // Errors of chunk level ordering, missing etc.
+ /// Ihdr must occur.
+ MissingIhdr,
+ /// Fctl must occur if an animated chunk occurs.
+ MissingFctl,
+ /// Image data that was indicated in IHDR or acTL is missing.
+ MissingImageData,
+ /// 4.3., Must be first.
+ ChunkBeforeIhdr {
+ kind: ChunkType,
+ },
+ /// 4.3., some chunks must be before IDAT.
+ AfterIdat {
+ kind: ChunkType,
+ },
+ /// 4.3., some chunks must be before PLTE.
+ AfterPlte {
+ kind: ChunkType,
+ },
+ /// 4.3., some chunks must be between PLTE and IDAT.
+ OutsidePlteIdat {
+ kind: ChunkType,
+ },
+ /// 4.3., some chunks must be unique.
+ DuplicateChunk {
+ kind: ChunkType,
+ },
+ /// Specifically for fdat there is an embedded sequence number for chunks.
+ ApngOrder {
+ /// The sequence number in the chunk.
+ present: u32,
+ /// The one that should have been present.
+ expected: u32,
+ },
+ // Errors specific to particular chunk data to be validated.
+ /// The palette did not even contain a single pixel data.
+ ShortPalette {
+ expected: usize,
+ len: usize,
+ },
+ /// A palletized image did not have a palette.
+ PaletteRequired,
+ /// The color-depth combination is not valid according to Table 11.1.
+ InvalidColorBitDepth {
+ color_type: ColorType,
+ bit_depth: BitDepth,
+ },
+ ColorWithBadTrns(ColorType),
+ InvalidBitDepth(u8),
+ InvalidColorType(u8),
+ InvalidDisposeOp(u8),
+ InvalidBlendOp(u8),
+ InvalidUnit(u8),
+ /// The rendering intent of the sRGB chunk is invalid.
+ InvalidSrgbRenderingIntent(u8),
+ UnknownCompressionMethod(u8),
+ UnknownFilterMethod(u8),
+ UnknownInterlaceMethod(u8),
+ /// The subframe is not in bounds of the image.
+ /// TODO: fields with relevant data.
+ BadSubFrameBounds {},
+ // Errors specific to the IDAT/fDAT chunks.
+ /// The compression of the data stream was faulty.
+ CorruptFlateStream {
+ err: fdeflate::DecompressionError,
+ },
+ /// The image data chunk was too short for the expected pixel count.
+ NoMoreImageData,
+ /// Bad text encoding
+ BadTextEncoding(TextDecodingError),
+}
+
+impl error::Error for DecodingError {
+ fn cause(&self) -> Option<&(dyn error::Error + 'static)> {
+ match self {
+ DecodingError::IoError(err) => Some(err),
+ _ => None,
+ }
+ }
+}
+
+impl fmt::Display for DecodingError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ use self::DecodingError::*;
+ match self {
+ IoError(err) => write!(fmt, "{}", err),
+ Parameter(desc) => write!(fmt, "{}", &desc),
+ Format(desc) => write!(fmt, "{}", desc),
+ LimitsExceeded => write!(fmt, "limits are exceeded"),
+ }
+ }
+}
+
+impl fmt::Display for FormatError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ use FormatErrorInner::*;
+ match &self.inner {
+ CrcMismatch {
+ crc_val,
+ crc_sum,
+ chunk,
+ ..
+ } => write!(
+ fmt,
+ "CRC error: expected 0x{:x} have 0x{:x} while decoding {:?} chunk.",
+ crc_val, crc_sum, chunk
+ ),
+ MissingIhdr => write!(fmt, "IHDR chunk missing"),
+ MissingFctl => write!(fmt, "fcTL chunk missing before fdAT chunk."),
+ MissingImageData => write!(fmt, "IDAT or fDAT chunk is missing."),
+ ChunkBeforeIhdr { kind } => write!(fmt, "{:?} chunk appeared before IHDR chunk", kind),
+ AfterIdat { kind } => write!(fmt, "Chunk {:?} is invalid after IDAT chunk.", kind),
+ AfterPlte { kind } => write!(fmt, "Chunk {:?} is invalid after PLTE chunk.", kind),
+ OutsidePlteIdat { kind } => write!(
+ fmt,
+ "Chunk {:?} must appear between PLTE and IDAT chunks.",
+ kind
+ ),
+ DuplicateChunk { kind } => write!(fmt, "Chunk {:?} must appear at most once.", kind),
+ ApngOrder { present, expected } => write!(
+ fmt,
+ "Sequence is not in order, expected #{} got #{}.",
+ expected, present,
+ ),
+ ShortPalette { expected, len } => write!(
+ fmt,
+ "Not enough palette entries, expect {} got {}.",
+ expected, len
+ ),
+ PaletteRequired => write!(fmt, "Missing palette of indexed image."),
+ InvalidColorBitDepth {
+ color_type,
+ bit_depth,
+ } => write!(
+ fmt,
+ "Invalid color/depth combination in header: {:?}/{:?}",
+ color_type, bit_depth,
+ ),
+ ColorWithBadTrns(color_type) => write!(
+ fmt,
+ "Transparency chunk found for color type {:?}.",
+ color_type
+ ),
+ InvalidBitDepth(nr) => write!(fmt, "Invalid dispose operation {}.", nr),
+ InvalidColorType(nr) => write!(fmt, "Invalid color type {}.", nr),
+ InvalidDisposeOp(nr) => write!(fmt, "Invalid dispose op {}.", nr),
+ InvalidBlendOp(nr) => write!(fmt, "Invalid blend op {}.", nr),
+ InvalidUnit(nr) => write!(fmt, "Invalid physical pixel size unit {}.", nr),
+ InvalidSrgbRenderingIntent(nr) => write!(fmt, "Invalid sRGB rendering intent {}.", nr),
+ UnknownCompressionMethod(nr) => write!(fmt, "Unknown compression method {}.", nr),
+ UnknownFilterMethod(nr) => write!(fmt, "Unknown filter method {}.", nr),
+ UnknownInterlaceMethod(nr) => write!(fmt, "Unknown interlace method {}.", nr),
+ BadSubFrameBounds {} => write!(fmt, "Sub frame is out-of-bounds."),
+ InvalidSignature => write!(fmt, "Invalid PNG signature."),
+ UnexpectedEof => write!(fmt, "Unexpected end of data before image end."),
+ UnexpectedEndOfChunk => write!(fmt, "Unexpected end of data within a chunk."),
+ NoMoreImageData => write!(fmt, "IDAT or fDAT chunk is has not enough data for image."),
+ CorruptFlateStream { err } => {
+ write!(fmt, "Corrupt deflate stream. ")?;
+ write!(fmt, "{:?}", err)
+ }
+ // TODO: Wrap more info in the enum variant
+ BadTextEncoding(tde) => {
+ match tde {
+ TextDecodingError::Unrepresentable => {
+ write!(fmt, "Unrepresentable data in tEXt chunk.")
+ }
+ TextDecodingError::InvalidKeywordSize => {
+ write!(fmt, "Keyword empty or longer than 79 bytes.")
+ }
+ TextDecodingError::MissingNullSeparator => {
+ write!(fmt, "No null separator in tEXt chunk.")
+ }
+ TextDecodingError::InflationError => {
+ write!(fmt, "Invalid compressed text data.")
+ }
+ TextDecodingError::OutOfDecompressionSpace => {
+ write!(fmt, "Out of decompression space. Try with a larger limit.")
+ }
+ TextDecodingError::InvalidCompressionMethod => {
+ write!(fmt, "Using an unrecognized byte as compression method.")
+ }
+ TextDecodingError::InvalidCompressionFlag => {
+ write!(fmt, "Using a flag that is not 0 or 255 as a compression flag for iTXt chunk.")
+ }
+ TextDecodingError::MissingCompressionFlag => {
+ write!(fmt, "No compression flag in the iTXt chunk.")
+ }
+ }
+ }
+ }
+ }
+}
+
+impl From<io::Error> for DecodingError {
+ fn from(err: io::Error) -> DecodingError {
+ DecodingError::IoError(err)
+ }
+}
+
+impl From<FormatError> for DecodingError {
+ fn from(err: FormatError) -> DecodingError {
+ DecodingError::Format(err)
+ }
+}
+
+impl From<FormatErrorInner> for FormatError {
+ fn from(inner: FormatErrorInner) -> Self {
+ FormatError { inner }
+ }
+}
+
+impl From<DecodingError> for io::Error {
+ fn from(err: DecodingError) -> io::Error {
+ match err {
+ DecodingError::IoError(err) => err,
+ err => io::Error::new(io::ErrorKind::Other, err.to_string()),
+ }
+ }
+}
+
+impl From<TextDecodingError> for DecodingError {
+ fn from(tbe: TextDecodingError) -> Self {
+ DecodingError::Format(FormatError {
+ inner: FormatErrorInner::BadTextEncoding(tbe),
+ })
+ }
+}
+
+/// Decoder configuration options
+#[derive(Clone)]
+pub struct DecodeOptions {
+ ignore_adler32: bool,
+ ignore_crc: bool,
+ ignore_text_chunk: bool,
+}
+
+impl Default for DecodeOptions {
+ fn default() -> Self {
+ Self {
+ ignore_adler32: true,
+ ignore_crc: false,
+ ignore_text_chunk: false,
+ }
+ }
+}
+
+impl DecodeOptions {
+ /// When set, the decoder will not compute and verify the Adler-32 checksum.
+ ///
+ /// Defaults to `true`.
+ pub fn set_ignore_adler32(&mut self, ignore_adler32: bool) {
+ self.ignore_adler32 = ignore_adler32;
+ }
+
+ /// When set, the decoder will not compute and verify the CRC code.
+ ///
+ /// Defaults to `false`.
+ pub fn set_ignore_crc(&mut self, ignore_crc: bool) {
+ self.ignore_crc = ignore_crc;
+ }
+
+ /// Flag to ignore computing and verifying the Adler-32 checksum and CRC
+ /// code.
+ pub fn set_ignore_checksums(&mut self, ignore_checksums: bool) {
+ self.ignore_adler32 = ignore_checksums;
+ self.ignore_crc = ignore_checksums;
+ }
+
+ /// Ignore text chunks while decoding.
+ ///
+ /// Defaults to `false`.
+ pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) {
+ self.ignore_text_chunk = ignore_text_chunk;
+ }
+}
+
+/// PNG StreamingDecoder (low-level interface)
+///
+/// By default, the decoder does not verify Adler-32 checksum computation. To
+/// enable checksum verification, set it with [`StreamingDecoder::set_ignore_adler32`]
+/// before starting decompression.
+pub struct StreamingDecoder {
+ state: Option<State>,
+ current_chunk: ChunkState,
+ /// The inflater state handling consecutive `IDAT` and `fdAT` chunks.
+ inflater: ZlibStream,
+ /// The complete image info read from all prior chunks.
+ pub(crate) info: Option<Info<'static>>,
+ /// The animation chunk sequence number.
+ current_seq_no: Option<u32>,
+ /// Stores where in decoding an `fdAT` chunk we are.
+ apng_seq_handled: bool,
+ have_idat: bool,
+ decode_options: DecodeOptions,
+}
+
+struct ChunkState {
+ /// The type of the current chunk.
+ /// Relevant for `IDAT` and `fdAT` which aggregate consecutive chunks of their own type.
+ type_: ChunkType,
+
+ /// Partial crc until now.
+ crc: Crc32,
+
+ /// Remaining bytes to be read.
+ remaining: u32,
+
+ /// Non-decoded bytes in the chunk.
+ raw_bytes: Vec<u8>,
+}
+
+impl StreamingDecoder {
+ /// Creates a new StreamingDecoder
+ ///
+ /// Allocates the internal buffers.
+ pub fn new() -> StreamingDecoder {
+ StreamingDecoder::new_with_options(DecodeOptions::default())
+ }
+
+ pub fn new_with_options(decode_options: DecodeOptions) -> StreamingDecoder {
+ let mut inflater = ZlibStream::new();
+ inflater.set_ignore_adler32(decode_options.ignore_adler32);
+
+ StreamingDecoder {
+ state: Some(State::Signature(0, [0; 7])),
+ current_chunk: ChunkState::default(),
+ inflater,
+ info: None,
+ current_seq_no: None,
+ apng_seq_handled: false,
+ have_idat: false,
+ decode_options,
+ }
+ }
+
+ /// Resets the StreamingDecoder
+ pub fn reset(&mut self) {
+ self.state = Some(State::Signature(0, [0; 7]));
+ self.current_chunk.crc = Crc32::new();
+ self.current_chunk.remaining = 0;
+ self.current_chunk.raw_bytes.clear();
+ self.inflater.reset();
+ self.info = None;
+ self.current_seq_no = None;
+ self.apng_seq_handled = false;
+ self.have_idat = false;
+ }
+
+ /// Provides access to the inner `info` field
+ pub fn info(&self) -> Option<&Info<'static>> {
+ self.info.as_ref()
+ }
+
+ pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) {
+ self.decode_options.set_ignore_text_chunk(ignore_text_chunk);
+ }
+
+ /// Return whether the decoder is set to ignore the Adler-32 checksum.
+ pub fn ignore_adler32(&self) -> bool {
+ self.inflater.ignore_adler32()
+ }
+
+ /// Set whether to compute and verify the Adler-32 checksum during
+ /// decompression. Return `true` if the flag was successfully set.
+ ///
+ /// The decoder defaults to `true`.
+ ///
+ /// This flag cannot be modified after decompression has started until the
+ /// [`StreamingDecoder`] is reset.
+ pub fn set_ignore_adler32(&mut self, ignore_adler32: bool) -> bool {
+ self.inflater.set_ignore_adler32(ignore_adler32)
+ }
+
+ /// Set whether to compute and verify the Adler-32 checksum during
+ /// decompression.
+ ///
+ /// The decoder defaults to `false`.
+ pub fn set_ignore_crc(&mut self, ignore_crc: bool) {
+ self.decode_options.set_ignore_crc(ignore_crc)
+ }
+
+ /// Low level StreamingDecoder interface.
+ ///
+ /// Allows to stream partial data to the encoder. Returns a tuple containing the bytes that have
+ /// been consumed from the input buffer and the current decoding result. If the decoded chunk
+ /// was an image data chunk, it also appends the read data to `image_data`.
+ pub fn update(
+ &mut self,
+ mut buf: &[u8],
+ image_data: &mut Vec<u8>,
+ ) -> Result<(usize, Decoded), DecodingError> {
+ let len = buf.len();
+ while !buf.is_empty() && self.state.is_some() {
+ match self.next_state(buf, image_data) {
+ Ok((bytes, Decoded::Nothing)) => buf = &buf[bytes..],
+ Ok((bytes, result)) => {
+ buf = &buf[bytes..];
+ return Ok((len - buf.len(), result));
+ }
+ Err(err) => return Err(err),
+ }
+ }
+ Ok((len - buf.len(), Decoded::Nothing))
+ }
+
+ fn next_state<'a>(
+ &'a mut self,
+ buf: &[u8],
+ image_data: &mut Vec<u8>,
+ ) -> Result<(usize, Decoded), DecodingError> {
+ use self::State::*;
+
+ let current_byte = buf[0];
+
+ // Driver should ensure that state is never None
+ let state = self.state.take().unwrap();
+
+ match state {
+ Signature(i, mut signature) if i < 7 => {
+ signature[i as usize] = current_byte;
+ self.state = Some(Signature(i + 1, signature));
+ Ok((1, Decoded::Nothing))
+ }
+ Signature(_, signature)
+ if signature == [137, 80, 78, 71, 13, 10, 26] && current_byte == 10 =>
+ {
+ self.state = Some(U32(U32Value::Length));
+ Ok((1, Decoded::Nothing))
+ }
+ Signature(..) => Err(DecodingError::Format(
+ FormatErrorInner::InvalidSignature.into(),
+ )),
+ U32Byte3(type_, mut val) => {
+ use self::U32Value::*;
+ val |= u32::from(current_byte);
+ match type_ {
+ Length => {
+ self.state = Some(U32(Type(val)));
+ Ok((1, Decoded::Nothing))
+ }
+ Type(length) => {
+ let type_str = ChunkType([
+ (val >> 24) as u8,
+ (val >> 16) as u8,
+ (val >> 8) as u8,
+ val as u8,
+ ]);
+ if type_str != self.current_chunk.type_
+ && (self.current_chunk.type_ == IDAT
+ || self.current_chunk.type_ == chunk::fdAT)
+ {
+ self.current_chunk.type_ = type_str;
+ self.inflater.finish_compressed_chunks(image_data)?;
+ self.inflater.reset();
+ self.state = Some(U32Byte3(Type(length), val & !0xff));
+ return Ok((0, Decoded::ImageDataFlushed));
+ }
+ self.current_chunk.type_ = type_str;
+ if !self.decode_options.ignore_crc {
+ self.current_chunk.crc.reset();
+ self.current_chunk.crc.update(&type_str.0);
+ }
+ self.current_chunk.remaining = length;
+ self.apng_seq_handled = false;
+ self.current_chunk.raw_bytes.clear();
+ self.state = Some(ReadChunk(type_str));
+ Ok((1, Decoded::ChunkBegin(length, type_str)))
+ }
+ Crc(type_str) => {
+ // If ignore_crc is set, do not calculate CRC. We set
+ // sum=val so that it short-circuits to true in the next
+ // if-statement block
+ let sum = if self.decode_options.ignore_crc {
+ val
+ } else {
+ self.current_chunk.crc.clone().finalize()
+ };
+
+ if val == sum || CHECKSUM_DISABLED {
+ self.state = Some(State::U32(U32Value::Length));
+ if type_str == IEND {
+ Ok((1, Decoded::ImageEnd))
+ } else {
+ Ok((1, Decoded::ChunkComplete(val, type_str)))
+ }
+ } else {
+ Err(DecodingError::Format(
+ FormatErrorInner::CrcMismatch {
+ crc_val: val,
+ crc_sum: sum,
+ chunk: type_str,
+ }
+ .into(),
+ ))
+ }
+ }
+ }
+ }
+ U32Byte2(type_, val) => {
+ self.state = Some(U32Byte3(type_, val | u32::from(current_byte) << 8));
+ Ok((1, Decoded::Nothing))
+ }
+ U32Byte1(type_, val) => {
+ self.state = Some(U32Byte2(type_, val | u32::from(current_byte) << 16));
+ Ok((1, Decoded::Nothing))
+ }
+ U32(type_) => {
+ self.state = Some(U32Byte1(type_, u32::from(current_byte) << 24));
+ Ok((1, Decoded::Nothing))
+ }
+ PartialChunk(type_str) => {
+ match type_str {
+ IDAT => {
+ self.have_idat = true;
+ self.state = Some(DecodeData(type_str, 0));
+ Ok((0, Decoded::PartialChunk(type_str)))
+ }
+ chunk::fdAT => {
+ let data_start;
+ if let Some(seq_no) = self.current_seq_no {
+ if !self.apng_seq_handled {
+ data_start = 4;
+ let mut buf = &self.current_chunk.raw_bytes[..];
+ let next_seq_no = buf.read_be()?;
+ if next_seq_no != seq_no + 1 {
+ return Err(DecodingError::Format(
+ FormatErrorInner::ApngOrder {
+ present: next_seq_no,
+ expected: seq_no + 1,
+ }
+ .into(),
+ ));
+ }
+ self.current_seq_no = Some(next_seq_no);
+ self.apng_seq_handled = true;
+ } else {
+ data_start = 0;
+ }
+ } else {
+ return Err(DecodingError::Format(
+ FormatErrorInner::MissingFctl.into(),
+ ));
+ }
+ self.state = Some(DecodeData(type_str, data_start));
+ Ok((0, Decoded::PartialChunk(type_str)))
+ }
+ // Handle other chunks
+ _ => {
+ if self.current_chunk.remaining == 0 {
+ // complete chunk
+ Ok((0, self.parse_chunk(type_str)?))
+ } else {
+ // Make sure we have room to read more of the chunk.
+ // We need it fully before parsing.
+ self.reserve_current_chunk()?;
+
+ self.state = Some(ReadChunk(type_str));
+ Ok((0, Decoded::PartialChunk(type_str)))
+ }
+ }
+ }
+ }
+ ReadChunk(type_str) => {
+ // The _previous_ event wanted to return the contents of raw_bytes, and let the
+ // caller consume it,
+ if self.current_chunk.remaining == 0 {
+ self.state = Some(U32(U32Value::Crc(type_str)));
+ Ok((0, Decoded::Nothing))
+ } else {
+ let ChunkState {
+ crc,
+ remaining,
+ raw_bytes,
+ type_: _,
+ } = &mut self.current_chunk;
+
+ let buf_avail = raw_bytes.capacity() - raw_bytes.len();
+ let bytes_avail = min(buf.len(), buf_avail);
+ let n = min(*remaining, bytes_avail as u32);
+ if buf_avail == 0 {
+ self.state = Some(PartialChunk(type_str));
+ Ok((0, Decoded::Nothing))
+ } else {
+ let buf = &buf[..n as usize];
+ if !self.decode_options.ignore_crc {
+ crc.update(buf);
+ }
+ raw_bytes.extend_from_slice(buf);
+
+ *remaining -= n;
+ if *remaining == 0 {
+ self.state = Some(PartialChunk(type_str));
+ } else {
+ self.state = Some(ReadChunk(type_str));
+ }
+ Ok((n as usize, Decoded::Nothing))
+ }
+ }
+ }
+ DecodeData(type_str, mut n) => {
+ let chunk_len = self.current_chunk.raw_bytes.len();
+ let chunk_data = &self.current_chunk.raw_bytes[n..];
+ let c = self.inflater.decompress(chunk_data, image_data)?;
+ n += c;
+ if n == chunk_len && c == 0 {
+ self.current_chunk.raw_bytes.clear();
+ self.state = Some(ReadChunk(type_str));
+ Ok((0, Decoded::ImageData))
+ } else {
+ self.state = Some(DecodeData(type_str, n));
+ Ok((0, Decoded::ImageData))
+ }
+ }
+ }
+ }
+
+ fn reserve_current_chunk(&mut self) -> Result<(), DecodingError> {
+ // FIXME: use limits, also do so in iccp/zlib decompression.
+ const MAX: usize = 0x10_0000;
+ let buffer = &mut self.current_chunk.raw_bytes;
+
+ // Double if necessary, but no more than until the limit is reached.
+ let reserve_size = MAX.saturating_sub(buffer.capacity()).min(buffer.len());
+ buffer.reserve_exact(reserve_size);
+
+ if buffer.capacity() == buffer.len() {
+ Err(DecodingError::LimitsExceeded)
+ } else {
+ Ok(())
+ }
+ }
+
+ fn parse_chunk(&mut self, type_str: ChunkType) -> Result<Decoded, DecodingError> {
+ self.state = Some(State::U32(U32Value::Crc(type_str)));
+ if self.info.is_none() && type_str != IHDR {
+ return Err(DecodingError::Format(
+ FormatErrorInner::ChunkBeforeIhdr { kind: type_str }.into(),
+ ));
+ }
+ match match type_str {
+ IHDR => self.parse_ihdr(),
+ chunk::PLTE => self.parse_plte(),
+ chunk::tRNS => self.parse_trns(),
+ chunk::pHYs => self.parse_phys(),
+ chunk::gAMA => self.parse_gama(),
+ chunk::acTL => self.parse_actl(),
+ chunk::fcTL => self.parse_fctl(),
+ chunk::cHRM => self.parse_chrm(),
+ chunk::sRGB => self.parse_srgb(),
+ chunk::iCCP => self.parse_iccp(),
+ chunk::tEXt if !self.decode_options.ignore_text_chunk => self.parse_text(),
+ chunk::zTXt if !self.decode_options.ignore_text_chunk => self.parse_ztxt(),
+ chunk::iTXt if !self.decode_options.ignore_text_chunk => self.parse_itxt(),
+ _ => Ok(Decoded::PartialChunk(type_str)),
+ } {
+ Err(err) => {
+ // Borrow of self ends here, because Decoding error does not borrow self.
+ self.state = None;
+ Err(err)
+ }
+ ok => ok,
+ }
+ }
+
+ fn parse_fctl(&mut self) -> Result<Decoded, DecodingError> {
+ let mut buf = &self.current_chunk.raw_bytes[..];
+ let next_seq_no = buf.read_be()?;
+
+ // Assuming that fcTL is required before *every* fdAT-sequence
+ self.current_seq_no = Some(if let Some(seq_no) = self.current_seq_no {
+ if next_seq_no != seq_no + 1 {
+ return Err(DecodingError::Format(
+ FormatErrorInner::ApngOrder {
+ expected: seq_no + 1,
+ present: next_seq_no,
+ }
+ .into(),
+ ));
+ }
+ next_seq_no
+ } else {
+ if next_seq_no != 0 {
+ return Err(DecodingError::Format(
+ FormatErrorInner::ApngOrder {
+ expected: 0,
+ present: next_seq_no,
+ }
+ .into(),
+ ));
+ }
+ 0
+ });
+ self.inflater.reset();
+ let fc = FrameControl {
+ sequence_number: next_seq_no,
+ width: buf.read_be()?,
+ height: buf.read_be()?,
+ x_offset: buf.read_be()?,
+ y_offset: buf.read_be()?,
+ delay_num: buf.read_be()?,
+ delay_den: buf.read_be()?,
+ dispose_op: {
+ let dispose_op = buf.read_be()?;
+ match DisposeOp::from_u8(dispose_op) {
+ Some(dispose_op) => dispose_op,
+ None => {
+ return Err(DecodingError::Format(
+ FormatErrorInner::InvalidDisposeOp(dispose_op).into(),
+ ))
+ }
+ }
+ },
+ blend_op: {
+ let blend_op = buf.read_be()?;
+ match BlendOp::from_u8(blend_op) {
+ Some(blend_op) => blend_op,
+ None => {
+ return Err(DecodingError::Format(
+ FormatErrorInner::InvalidBlendOp(blend_op).into(),
+ ))
+ }
+ }
+ },
+ };
+ self.info.as_ref().unwrap().validate(&fc)?;
+ self.info.as_mut().unwrap().frame_control = Some(fc);
+ Ok(Decoded::FrameControl(fc))
+ }
+
+ fn parse_actl(&mut self) -> Result<Decoded, DecodingError> {
+ if self.have_idat {
+ Err(DecodingError::Format(
+ FormatErrorInner::AfterIdat { kind: chunk::acTL }.into(),
+ ))
+ } else {
+ let mut buf = &self.current_chunk.raw_bytes[..];
+ let actl = AnimationControl {
+ num_frames: buf.read_be()?,
+ num_plays: buf.read_be()?,
+ };
+ self.info.as_mut().unwrap().animation_control = Some(actl);
+ Ok(Decoded::AnimationControl(actl))
+ }
+ }
+
+ fn parse_plte(&mut self) -> Result<Decoded, DecodingError> {
+ let info = self.info.as_mut().unwrap();
+ if info.palette.is_some() {
+ // Only one palette is allowed
+ Err(DecodingError::Format(
+ FormatErrorInner::DuplicateChunk { kind: chunk::PLTE }.into(),
+ ))
+ } else {
+ info.palette = Some(Cow::Owned(self.current_chunk.raw_bytes.clone()));
+ Ok(Decoded::Nothing)
+ }
+ }
+
+ fn parse_trns(&mut self) -> Result<Decoded, DecodingError> {
+ let info = self.info.as_mut().unwrap();
+ if info.trns.is_some() {
+ return Err(DecodingError::Format(
+ FormatErrorInner::DuplicateChunk { kind: chunk::PLTE }.into(),
+ ));
+ }
+ let (color_type, bit_depth) = { (info.color_type, info.bit_depth as u8) };
+ let mut vec = self.current_chunk.raw_bytes.clone();
+ let len = vec.len();
+ match color_type {
+ ColorType::Grayscale => {
+ if len < 2 {
+ return Err(DecodingError::Format(
+ FormatErrorInner::ShortPalette { expected: 2, len }.into(),
+ ));
+ }
+ if bit_depth < 16 {
+ vec[0] = vec[1];
+ vec.truncate(1);
+ }
+ info.trns = Some(Cow::Owned(vec));
+ Ok(Decoded::Nothing)
+ }
+ ColorType::Rgb => {
+ if len < 6 {
+ return Err(DecodingError::Format(
+ FormatErrorInner::ShortPalette { expected: 6, len }.into(),
+ ));
+ }
+ if bit_depth < 16 {
+ vec[0] = vec[1];
+ vec[1] = vec[3];
+ vec[2] = vec[5];
+ vec.truncate(3);
+ }
+ info.trns = Some(Cow::Owned(vec));
+ Ok(Decoded::Nothing)
+ }
+ ColorType::Indexed => {
+ // The transparency chunk must be after the palette chunk and
+ // before the data chunk.
+ if info.palette.is_none() {
+ return Err(DecodingError::Format(
+ FormatErrorInner::AfterPlte { kind: chunk::tRNS }.into(),
+ ));
+ } else if self.have_idat {
+ return Err(DecodingError::Format(
+ FormatErrorInner::OutsidePlteIdat { kind: chunk::tRNS }.into(),
+ ));
+ }
+
+ info.trns = Some(Cow::Owned(vec));
+ Ok(Decoded::Nothing)
+ }
+ c => Err(DecodingError::Format(
+ FormatErrorInner::ColorWithBadTrns(c).into(),
+ )),
+ }
+ }
+
+ fn parse_phys(&mut self) -> Result<Decoded, DecodingError> {
+ let info = self.info.as_mut().unwrap();
+ if self.have_idat {
+ Err(DecodingError::Format(
+ FormatErrorInner::AfterIdat { kind: chunk::pHYs }.into(),
+ ))
+ } else if info.pixel_dims.is_some() {
+ Err(DecodingError::Format(
+ FormatErrorInner::DuplicateChunk { kind: chunk::pHYs }.into(),
+ ))
+ } else {
+ let mut buf = &self.current_chunk.raw_bytes[..];
+ let xppu = buf.read_be()?;
+ let yppu = buf.read_be()?;
+ let unit = buf.read_be()?;
+ let unit = match Unit::from_u8(unit) {
+ Some(unit) => unit,
+ None => {
+ return Err(DecodingError::Format(
+ FormatErrorInner::InvalidUnit(unit).into(),
+ ))
+ }
+ };
+ let pixel_dims = PixelDimensions { xppu, yppu, unit };
+ info.pixel_dims = Some(pixel_dims);
+ Ok(Decoded::PixelDimensions(pixel_dims))
+ }
+ }
+
+ fn parse_chrm(&mut self) -> Result<Decoded, DecodingError> {
+ let info = self.info.as_mut().unwrap();
+ if self.have_idat {
+ Err(DecodingError::Format(
+ FormatErrorInner::AfterIdat { kind: chunk::cHRM }.into(),
+ ))
+ } else if info.chrm_chunk.is_some() {
+ Err(DecodingError::Format(
+ FormatErrorInner::DuplicateChunk { kind: chunk::cHRM }.into(),
+ ))
+ } else {
+ let mut buf = &self.current_chunk.raw_bytes[..];
+ let white_x: u32 = buf.read_be()?;
+ let white_y: u32 = buf.read_be()?;
+ let red_x: u32 = buf.read_be()?;
+ let red_y: u32 = buf.read_be()?;
+ let green_x: u32 = buf.read_be()?;
+ let green_y: u32 = buf.read_be()?;
+ let blue_x: u32 = buf.read_be()?;
+ let blue_y: u32 = buf.read_be()?;
+
+ let source_chromaticities = SourceChromaticities {
+ white: (
+ ScaledFloat::from_scaled(white_x),
+ ScaledFloat::from_scaled(white_y),
+ ),
+ red: (
+ ScaledFloat::from_scaled(red_x),
+ ScaledFloat::from_scaled(red_y),
+ ),
+ green: (
+ ScaledFloat::from_scaled(green_x),
+ ScaledFloat::from_scaled(green_y),
+ ),
+ blue: (
+ ScaledFloat::from_scaled(blue_x),
+ ScaledFloat::from_scaled(blue_y),
+ ),
+ };
+
+ info.chrm_chunk = Some(source_chromaticities);
+ // Ignore chromaticities if sRGB profile is used.
+ if info.srgb.is_none() {
+ info.source_chromaticities = Some(source_chromaticities);
+ }
+
+ Ok(Decoded::Nothing)
+ }
+ }
+
+ fn parse_gama(&mut self) -> Result<Decoded, DecodingError> {
+ let info = self.info.as_mut().unwrap();
+ if self.have_idat {
+ Err(DecodingError::Format(
+ FormatErrorInner::AfterIdat { kind: chunk::gAMA }.into(),
+ ))
+ } else if info.gama_chunk.is_some() {
+ Err(DecodingError::Format(
+ FormatErrorInner::DuplicateChunk { kind: chunk::gAMA }.into(),
+ ))
+ } else {
+ let mut buf = &self.current_chunk.raw_bytes[..];
+ let source_gamma: u32 = buf.read_be()?;
+ let source_gamma = ScaledFloat::from_scaled(source_gamma);
+
+ info.gama_chunk = Some(source_gamma);
+ // Ignore chromaticities if sRGB profile is used.
+ if info.srgb.is_none() {
+ info.source_gamma = Some(source_gamma);
+ }
+
+ Ok(Decoded::Nothing)
+ }
+ }
+
+ fn parse_srgb(&mut self) -> Result<Decoded, DecodingError> {
+ let info = self.info.as_mut().unwrap();
+ if self.have_idat {
+ Err(DecodingError::Format(
+ FormatErrorInner::AfterIdat { kind: chunk::acTL }.into(),
+ ))
+ } else if info.srgb.is_some() {
+ Err(DecodingError::Format(
+ FormatErrorInner::DuplicateChunk { kind: chunk::sRGB }.into(),
+ ))
+ } else {
+ let mut buf = &self.current_chunk.raw_bytes[..];
+ let raw: u8 = buf.read_be()?; // BE is is nonsense for single bytes, but this way the size is checked.
+ let rendering_intent = crate::SrgbRenderingIntent::from_raw(raw).ok_or_else(|| {
+ FormatError::from(FormatErrorInner::InvalidSrgbRenderingIntent(raw))
+ })?;
+
+ // Set srgb and override source gamma and chromaticities.
+ info.srgb = Some(rendering_intent);
+ info.source_gamma = Some(crate::srgb::substitute_gamma());
+ info.source_chromaticities = Some(crate::srgb::substitute_chromaticities());
+ Ok(Decoded::Nothing)
+ }
+ }
+
+ fn parse_iccp(&mut self) -> Result<Decoded, DecodingError> {
+ let info = self.info.as_mut().unwrap();
+ if self.have_idat {
+ Err(DecodingError::Format(
+ FormatErrorInner::AfterIdat { kind: chunk::iCCP }.into(),
+ ))
+ } else if info.icc_profile.is_some() {
+ Err(DecodingError::Format(
+ FormatErrorInner::DuplicateChunk { kind: chunk::iCCP }.into(),
+ ))
+ } else {
+ let mut buf = &self.current_chunk.raw_bytes[..];
+
+ // read profile name
+ let _: u8 = buf.read_be()?;
+ for _ in 1..80 {
+ let raw: u8 = buf.read_be()?;
+ if raw == 0 {
+ break;
+ }
+ }
+
+ match buf.read_be()? {
+ // compression method
+ 0u8 => (),
+ n => {
+ return Err(DecodingError::Format(
+ FormatErrorInner::UnknownCompressionMethod(n).into(),
+ ))
+ }
+ }
+
+ let mut profile = Vec::new();
+ let mut inflater = ZlibStream::new();
+ while !buf.is_empty() {
+ let consumed_bytes = inflater.decompress(buf, &mut profile)?;
+ if profile.len() > 8000000 {
+ // TODO: this should use Limits.bytes
+ return Err(DecodingError::LimitsExceeded);
+ }
+ buf = &buf[consumed_bytes..];
+ }
+ inflater.finish_compressed_chunks(&mut profile)?;
+
+ info.icc_profile = Some(Cow::Owned(profile));
+ Ok(Decoded::Nothing)
+ }
+ }
+
+ fn parse_ihdr(&mut self) -> Result<Decoded, DecodingError> {
+ if self.info.is_some() {
+ return Err(DecodingError::Format(
+ FormatErrorInner::DuplicateChunk { kind: IHDR }.into(),
+ ));
+ }
+ let mut buf = &self.current_chunk.raw_bytes[..];
+ let width = buf.read_be()?;
+ let height = buf.read_be()?;
+ let bit_depth = buf.read_be()?;
+ let bit_depth = match BitDepth::from_u8(bit_depth) {
+ Some(bits) => bits,
+ None => {
+ return Err(DecodingError::Format(
+ FormatErrorInner::InvalidBitDepth(bit_depth).into(),
+ ))
+ }
+ };
+ let color_type = buf.read_be()?;
+ let color_type = match ColorType::from_u8(color_type) {
+ Some(color_type) => {
+ if color_type.is_combination_invalid(bit_depth) {
+ return Err(DecodingError::Format(
+ FormatErrorInner::InvalidColorBitDepth {
+ color_type,
+ bit_depth,
+ }
+ .into(),
+ ));
+ } else {
+ color_type
+ }
+ }
+ None => {
+ return Err(DecodingError::Format(
+ FormatErrorInner::InvalidColorType(color_type).into(),
+ ))
+ }
+ };
+ match buf.read_be()? {
+ // compression method
+ 0u8 => (),
+ n => {
+ return Err(DecodingError::Format(
+ FormatErrorInner::UnknownCompressionMethod(n).into(),
+ ))
+ }
+ }
+ match buf.read_be()? {
+ // filter method
+ 0u8 => (),
+ n => {
+ return Err(DecodingError::Format(
+ FormatErrorInner::UnknownFilterMethod(n).into(),
+ ))
+ }
+ }
+ let interlaced = match buf.read_be()? {
+ 0u8 => false,
+ 1 => true,
+ n => {
+ return Err(DecodingError::Format(
+ FormatErrorInner::UnknownInterlaceMethod(n).into(),
+ ))
+ }
+ };
+
+ self.info = Some(Info {
+ width,
+ height,
+ bit_depth,
+ color_type,
+ interlaced,
+ ..Default::default()
+ });
+
+ Ok(Decoded::Header(
+ width, height, bit_depth, color_type, interlaced,
+ ))
+ }
+
+ fn split_keyword(buf: &[u8]) -> Result<(&[u8], &[u8]), DecodingError> {
+ let null_byte_index = buf
+ .iter()
+ .position(|&b| b == 0)
+ .ok_or_else(|| DecodingError::from(TextDecodingError::MissingNullSeparator))?;
+
+ if null_byte_index == 0 || null_byte_index > 79 {
+ return Err(DecodingError::from(TextDecodingError::InvalidKeywordSize));
+ }
+
+ Ok((&buf[..null_byte_index], &buf[null_byte_index + 1..]))
+ }
+
+ fn parse_text(&mut self) -> Result<Decoded, DecodingError> {
+ let buf = &self.current_chunk.raw_bytes[..];
+
+ let (keyword_slice, value_slice) = Self::split_keyword(buf)?;
+
+ self.info
+ .as_mut()
+ .unwrap()
+ .uncompressed_latin1_text
+ .push(TEXtChunk::decode(keyword_slice, value_slice).map_err(DecodingError::from)?);
+
+ Ok(Decoded::Nothing)
+ }
+
+ fn parse_ztxt(&mut self) -> Result<Decoded, DecodingError> {
+ let buf = &self.current_chunk.raw_bytes[..];
+
+ let (keyword_slice, value_slice) = Self::split_keyword(buf)?;
+
+ let compression_method = *value_slice
+ .first()
+ .ok_or_else(|| DecodingError::from(TextDecodingError::InvalidCompressionMethod))?;
+
+ let text_slice = &value_slice[1..];
+
+ self.info.as_mut().unwrap().compressed_latin1_text.push(
+ ZTXtChunk::decode(keyword_slice, compression_method, text_slice)
+ .map_err(DecodingError::from)?,
+ );
+
+ Ok(Decoded::Nothing)
+ }
+
+ fn parse_itxt(&mut self) -> Result<Decoded, DecodingError> {
+ let buf = &self.current_chunk.raw_bytes[..];
+
+ let (keyword_slice, value_slice) = Self::split_keyword(buf)?;
+
+ let compression_flag = *value_slice
+ .first()
+ .ok_or_else(|| DecodingError::from(TextDecodingError::MissingCompressionFlag))?;
+
+ let compression_method = *value_slice
+ .get(1)
+ .ok_or_else(|| DecodingError::from(TextDecodingError::InvalidCompressionMethod))?;
+
+ let second_null_byte_index = value_slice[2..]
+ .iter()
+ .position(|&b| b == 0)
+ .ok_or_else(|| DecodingError::from(TextDecodingError::MissingNullSeparator))?
+ + 2;
+
+ let language_tag_slice = &value_slice[2..second_null_byte_index];
+
+ let third_null_byte_index = value_slice[second_null_byte_index + 1..]
+ .iter()
+ .position(|&b| b == 0)
+ .ok_or_else(|| DecodingError::from(TextDecodingError::MissingNullSeparator))?
+ + (second_null_byte_index + 1);
+
+ let translated_keyword_slice =
+ &value_slice[second_null_byte_index + 1..third_null_byte_index];
+
+ let text_slice = &value_slice[third_null_byte_index + 1..];
+
+ self.info.as_mut().unwrap().utf8_text.push(
+ ITXtChunk::decode(
+ keyword_slice,
+ compression_flag,
+ compression_method,
+ language_tag_slice,
+ translated_keyword_slice,
+ text_slice,
+ )
+ .map_err(DecodingError::from)?,
+ );
+
+ Ok(Decoded::Nothing)
+ }
+}
+
+impl Info<'_> {
+ fn validate(&self, fc: &FrameControl) -> Result<(), DecodingError> {
+ // Validate mathematically: fc.width + fc.x_offset <= self.width
+ let in_x_bounds = Some(fc.width) <= self.width.checked_sub(fc.x_offset);
+ // Validate mathematically: fc.height + fc.y_offset <= self.height
+ let in_y_bounds = Some(fc.height) <= self.height.checked_sub(fc.y_offset);
+
+ if !in_x_bounds || !in_y_bounds {
+ return Err(DecodingError::Format(
+ // TODO: do we want to display the bad bounds?
+ FormatErrorInner::BadSubFrameBounds {}.into(),
+ ));
+ }
+
+ Ok(())
+ }
+}
+
+impl Default for StreamingDecoder {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl Default for ChunkState {
+ fn default() -> Self {
+ ChunkState {
+ type_: ChunkType([0; 4]),
+ crc: Crc32::new(),
+ remaining: 0,
+ raw_bytes: Vec::with_capacity(CHUNCK_BUFFER_SIZE),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::ScaledFloat;
+ use super::SourceChromaticities;
+ use std::fs::File;
+
+ #[test]
+ fn image_gamma() -> Result<(), ()> {
+ fn trial(path: &str, expected: Option<ScaledFloat>) {
+ let decoder = crate::Decoder::new(File::open(path).unwrap());
+ let reader = decoder.read_info().unwrap();
+ let actual: Option<ScaledFloat> = reader.info().source_gamma;
+ assert!(actual == expected);
+ }
+ trial("tests/pngsuite/f00n0g08.png", None);
+ trial("tests/pngsuite/f00n2c08.png", None);
+ trial("tests/pngsuite/f01n0g08.png", None);
+ trial("tests/pngsuite/f01n2c08.png", None);
+ trial("tests/pngsuite/f02n0g08.png", None);
+ trial("tests/pngsuite/f02n2c08.png", None);
+ trial("tests/pngsuite/f03n0g08.png", None);
+ trial("tests/pngsuite/f03n2c08.png", None);
+ trial("tests/pngsuite/f04n0g08.png", None);
+ trial("tests/pngsuite/f04n2c08.png", None);
+ trial("tests/pngsuite/f99n0g04.png", None);
+ trial("tests/pngsuite/tm3n3p02.png", None);
+ trial("tests/pngsuite/g03n0g16.png", Some(ScaledFloat::new(0.35)));
+ trial("tests/pngsuite/g03n2c08.png", Some(ScaledFloat::new(0.35)));
+ trial("tests/pngsuite/g03n3p04.png", Some(ScaledFloat::new(0.35)));
+ trial("tests/pngsuite/g04n0g16.png", Some(ScaledFloat::new(0.45)));
+ trial("tests/pngsuite/g04n2c08.png", Some(ScaledFloat::new(0.45)));
+ trial("tests/pngsuite/g04n3p04.png", Some(ScaledFloat::new(0.45)));
+ trial("tests/pngsuite/g05n0g16.png", Some(ScaledFloat::new(0.55)));
+ trial("tests/pngsuite/g05n2c08.png", Some(ScaledFloat::new(0.55)));
+ trial("tests/pngsuite/g05n3p04.png", Some(ScaledFloat::new(0.55)));
+ trial("tests/pngsuite/g07n0g16.png", Some(ScaledFloat::new(0.7)));
+ trial("tests/pngsuite/g07n2c08.png", Some(ScaledFloat::new(0.7)));
+ trial("tests/pngsuite/g07n3p04.png", Some(ScaledFloat::new(0.7)));
+ trial("tests/pngsuite/g10n0g16.png", Some(ScaledFloat::new(1.0)));
+ trial("tests/pngsuite/g10n2c08.png", Some(ScaledFloat::new(1.0)));
+ trial("tests/pngsuite/g10n3p04.png", Some(ScaledFloat::new(1.0)));
+ trial("tests/pngsuite/g25n0g16.png", Some(ScaledFloat::new(2.5)));
+ trial("tests/pngsuite/g25n2c08.png", Some(ScaledFloat::new(2.5)));
+ trial("tests/pngsuite/g25n3p04.png", Some(ScaledFloat::new(2.5)));
+ Ok(())
+ }
+
+ #[test]
+ fn image_source_chromaticities() -> Result<(), ()> {
+ fn trial(path: &str, expected: Option<SourceChromaticities>) {
+ let decoder = crate::Decoder::new(File::open(path).unwrap());
+ let reader = decoder.read_info().unwrap();
+ let actual: Option<SourceChromaticities> = reader.info().source_chromaticities;
+ assert!(actual == expected);
+ }
+ trial(
+ "tests/pngsuite/ccwn2c08.png",
+ Some(SourceChromaticities::new(
+ (0.3127, 0.3290),
+ (0.64, 0.33),
+ (0.30, 0.60),
+ (0.15, 0.06),
+ )),
+ );
+ trial(
+ "tests/pngsuite/ccwn3p08.png",
+ Some(SourceChromaticities::new(
+ (0.3127, 0.3290),
+ (0.64, 0.33),
+ (0.30, 0.60),
+ (0.15, 0.06),
+ )),
+ );
+ trial("tests/pngsuite/basi0g01.png", None);
+ trial("tests/pngsuite/basi0g02.png", None);
+ trial("tests/pngsuite/basi0g04.png", None);
+ trial("tests/pngsuite/basi0g08.png", None);
+ trial("tests/pngsuite/basi0g16.png", None);
+ trial("tests/pngsuite/basi2c08.png", None);
+ trial("tests/pngsuite/basi2c16.png", None);
+ trial("tests/pngsuite/basi3p01.png", None);
+ trial("tests/pngsuite/basi3p02.png", None);
+ trial("tests/pngsuite/basi3p04.png", None);
+ trial("tests/pngsuite/basi3p08.png", None);
+ trial("tests/pngsuite/basi4a08.png", None);
+ trial("tests/pngsuite/basi4a16.png", None);
+ trial("tests/pngsuite/basi6a08.png", None);
+ trial("tests/pngsuite/basi6a16.png", None);
+ trial("tests/pngsuite/basn0g01.png", None);
+ trial("tests/pngsuite/basn0g02.png", None);
+ trial("tests/pngsuite/basn0g04.png", None);
+ trial("tests/pngsuite/basn0g08.png", None);
+ trial("tests/pngsuite/basn0g16.png", None);
+ trial("tests/pngsuite/basn2c08.png", None);
+ trial("tests/pngsuite/basn2c16.png", None);
+ trial("tests/pngsuite/basn3p01.png", None);
+ trial("tests/pngsuite/basn3p02.png", None);
+ trial("tests/pngsuite/basn3p04.png", None);
+ trial("tests/pngsuite/basn3p08.png", None);
+ trial("tests/pngsuite/basn4a08.png", None);
+ trial("tests/pngsuite/basn4a16.png", None);
+ trial("tests/pngsuite/basn6a08.png", None);
+ trial("tests/pngsuite/basn6a16.png", None);
+ trial("tests/pngsuite/bgai4a08.png", None);
+ trial("tests/pngsuite/bgai4a16.png", None);
+ trial("tests/pngsuite/bgan6a08.png", None);
+ trial("tests/pngsuite/bgan6a16.png", None);
+ trial("tests/pngsuite/bgbn4a08.png", None);
+ trial("tests/pngsuite/bggn4a16.png", None);
+ trial("tests/pngsuite/bgwn6a08.png", None);
+ trial("tests/pngsuite/bgyn6a16.png", None);
+ trial("tests/pngsuite/cdfn2c08.png", None);
+ trial("tests/pngsuite/cdhn2c08.png", None);
+ trial("tests/pngsuite/cdsn2c08.png", None);
+ trial("tests/pngsuite/cdun2c08.png", None);
+ trial("tests/pngsuite/ch1n3p04.png", None);
+ trial("tests/pngsuite/ch2n3p08.png", None);
+ trial("tests/pngsuite/cm0n0g04.png", None);
+ trial("tests/pngsuite/cm7n0g04.png", None);
+ trial("tests/pngsuite/cm9n0g04.png", None);
+ trial("tests/pngsuite/cs3n2c16.png", None);
+ trial("tests/pngsuite/cs3n3p08.png", None);
+ trial("tests/pngsuite/cs5n2c08.png", None);
+ trial("tests/pngsuite/cs5n3p08.png", None);
+ trial("tests/pngsuite/cs8n2c08.png", None);
+ trial("tests/pngsuite/cs8n3p08.png", None);
+ trial("tests/pngsuite/ct0n0g04.png", None);
+ trial("tests/pngsuite/ct1n0g04.png", None);
+ trial("tests/pngsuite/cten0g04.png", None);
+ trial("tests/pngsuite/ctfn0g04.png", None);
+ trial("tests/pngsuite/ctgn0g04.png", None);
+ trial("tests/pngsuite/cthn0g04.png", None);
+ trial("tests/pngsuite/ctjn0g04.png", None);
+ trial("tests/pngsuite/ctzn0g04.png", None);
+ trial("tests/pngsuite/f00n0g08.png", None);
+ trial("tests/pngsuite/f00n2c08.png", None);
+ trial("tests/pngsuite/f01n0g08.png", None);
+ trial("tests/pngsuite/f01n2c08.png", None);
+ trial("tests/pngsuite/f02n0g08.png", None);
+ trial("tests/pngsuite/f02n2c08.png", None);
+ trial("tests/pngsuite/f03n0g08.png", None);
+ trial("tests/pngsuite/f03n2c08.png", None);
+ trial("tests/pngsuite/f04n0g08.png", None);
+ trial("tests/pngsuite/f04n2c08.png", None);
+ trial("tests/pngsuite/f99n0g04.png", None);
+ trial("tests/pngsuite/g03n0g16.png", None);
+ trial("tests/pngsuite/g03n2c08.png", None);
+ trial("tests/pngsuite/g03n3p04.png", None);
+ trial("tests/pngsuite/g04n0g16.png", None);
+ trial("tests/pngsuite/g04n2c08.png", None);
+ trial("tests/pngsuite/g04n3p04.png", None);
+ trial("tests/pngsuite/g05n0g16.png", None);
+ trial("tests/pngsuite/g05n2c08.png", None);
+ trial("tests/pngsuite/g05n3p04.png", None);
+ trial("tests/pngsuite/g07n0g16.png", None);
+ trial("tests/pngsuite/g07n2c08.png", None);
+ trial("tests/pngsuite/g07n3p04.png", None);
+ trial("tests/pngsuite/g10n0g16.png", None);
+ trial("tests/pngsuite/g10n2c08.png", None);
+ trial("tests/pngsuite/g10n3p04.png", None);
+ trial("tests/pngsuite/g25n0g16.png", None);
+ trial("tests/pngsuite/g25n2c08.png", None);
+ trial("tests/pngsuite/g25n3p04.png", None);
+ trial("tests/pngsuite/oi1n0g16.png", None);
+ trial("tests/pngsuite/oi1n2c16.png", None);
+ trial("tests/pngsuite/oi2n0g16.png", None);
+ trial("tests/pngsuite/oi2n2c16.png", None);
+ trial("tests/pngsuite/oi4n0g16.png", None);
+ trial("tests/pngsuite/oi4n2c16.png", None);
+ trial("tests/pngsuite/oi9n0g16.png", None);
+ trial("tests/pngsuite/oi9n2c16.png", None);
+ trial("tests/pngsuite/PngSuite.png", None);
+ trial("tests/pngsuite/pp0n2c16.png", None);
+ trial("tests/pngsuite/pp0n6a08.png", None);
+ trial("tests/pngsuite/ps1n0g08.png", None);
+ trial("tests/pngsuite/ps1n2c16.png", None);
+ trial("tests/pngsuite/ps2n0g08.png", None);
+ trial("tests/pngsuite/ps2n2c16.png", None);
+ trial("tests/pngsuite/s01i3p01.png", None);
+ trial("tests/pngsuite/s01n3p01.png", None);
+ trial("tests/pngsuite/s02i3p01.png", None);
+ trial("tests/pngsuite/s02n3p01.png", None);
+ trial("tests/pngsuite/s03i3p01.png", None);
+ trial("tests/pngsuite/s03n3p01.png", None);
+ trial("tests/pngsuite/s04i3p01.png", None);
+ trial("tests/pngsuite/s04n3p01.png", None);
+ trial("tests/pngsuite/s05i3p02.png", None);
+ trial("tests/pngsuite/s05n3p02.png", None);
+ trial("tests/pngsuite/s06i3p02.png", None);
+ trial("tests/pngsuite/s06n3p02.png", None);
+ trial("tests/pngsuite/s07i3p02.png", None);
+ trial("tests/pngsuite/s07n3p02.png", None);
+ trial("tests/pngsuite/s08i3p02.png", None);
+ trial("tests/pngsuite/s08n3p02.png", None);
+ trial("tests/pngsuite/s09i3p02.png", None);
+ trial("tests/pngsuite/s09n3p02.png", None);
+ trial("tests/pngsuite/s32i3p04.png", None);
+ trial("tests/pngsuite/s32n3p04.png", None);
+ trial("tests/pngsuite/s33i3p04.png", None);
+ trial("tests/pngsuite/s33n3p04.png", None);
+ trial("tests/pngsuite/s34i3p04.png", None);
+ trial("tests/pngsuite/s34n3p04.png", None);
+ trial("tests/pngsuite/s35i3p04.png", None);
+ trial("tests/pngsuite/s35n3p04.png", None);
+ trial("tests/pngsuite/s36i3p04.png", None);
+ trial("tests/pngsuite/s36n3p04.png", None);
+ trial("tests/pngsuite/s37i3p04.png", None);
+ trial("tests/pngsuite/s37n3p04.png", None);
+ trial("tests/pngsuite/s38i3p04.png", None);
+ trial("tests/pngsuite/s38n3p04.png", None);
+ trial("tests/pngsuite/s39i3p04.png", None);
+ trial("tests/pngsuite/s39n3p04.png", None);
+ trial("tests/pngsuite/s40i3p04.png", None);
+ trial("tests/pngsuite/s40n3p04.png", None);
+ trial("tests/pngsuite/tbbn0g04.png", None);
+ trial("tests/pngsuite/tbbn2c16.png", None);
+ trial("tests/pngsuite/tbbn3p08.png", None);
+ trial("tests/pngsuite/tbgn2c16.png", None);
+ trial("tests/pngsuite/tbgn3p08.png", None);
+ trial("tests/pngsuite/tbrn2c08.png", None);
+ trial("tests/pngsuite/tbwn0g16.png", None);
+ trial("tests/pngsuite/tbwn3p08.png", None);
+ trial("tests/pngsuite/tbyn3p08.png", None);
+ trial("tests/pngsuite/tm3n3p02.png", None);
+ trial("tests/pngsuite/tp0n0g08.png", None);
+ trial("tests/pngsuite/tp0n2c08.png", None);
+ trial("tests/pngsuite/tp0n3p08.png", None);
+ trial("tests/pngsuite/tp1n3p08.png", None);
+ trial("tests/pngsuite/z00n2c08.png", None);
+ trial("tests/pngsuite/z03n2c08.png", None);
+ trial("tests/pngsuite/z06n2c08.png", None);
+ Ok(())
+ }
+}
diff --git a/vendor/png/src/decoder/zlib.rs b/vendor/png/src/decoder/zlib.rs
new file mode 100644
index 0000000..2953c95
--- /dev/null
+++ b/vendor/png/src/decoder/zlib.rs
@@ -0,0 +1,212 @@
+use super::{stream::FormatErrorInner, DecodingError, CHUNCK_BUFFER_SIZE};
+
+use fdeflate::Decompressor;
+
+/// Ergonomics wrapper around `miniz_oxide::inflate::stream` for zlib compressed data.
+pub(super) struct ZlibStream {
+ /// Current decoding state.
+ state: Box<fdeflate::Decompressor>,
+ /// If there has been a call to decompress already.
+ started: bool,
+ /// A buffer of compressed data.
+ /// We use this for a progress guarantee. The data in the input stream is chunked as given by
+ /// the underlying stream buffer. We will not read any more data until the current buffer has
+ /// been fully consumed. The zlib decompression can not fully consume all the data when it is
+ /// in the middle of the stream, it will treat full symbols and maybe the last bytes need to be
+ /// treated in a special way. The exact reason isn't as important but the interface does not
+ /// promise us this. Now, the complication is that the _current_ chunking information of PNG
+ /// alone is not enough to determine this as indeed the compressed stream is the concatenation
+ /// of all consecutive `IDAT`/`fdAT` chunks. We would need to inspect the next chunk header.
+ ///
+ /// Thus, there needs to be a buffer that allows fully clearing a chunk so that the next chunk
+ /// type can be inspected.
+ in_buffer: Vec<u8>,
+ /// The logical start of the `in_buffer`.
+ in_pos: usize,
+ /// Remaining buffered decoded bytes.
+ /// The decoder sometimes wants inspect some already finished bytes for further decoding. So we
+ /// keep a total of 32KB of decoded data available as long as more data may be appended.
+ out_buffer: Vec<u8>,
+ /// The cursor position in the output stream as a buffer index.
+ out_pos: usize,
+ /// Ignore and do not calculate the Adler-32 checksum. Defaults to `true`.
+ ///
+ /// This flag overrides `TINFL_FLAG_COMPUTE_ADLER32`.
+ ///
+ /// This flag should not be modified after decompression has started.
+ ignore_adler32: bool,
+}
+
+impl ZlibStream {
+ pub(crate) fn new() -> Self {
+ ZlibStream {
+ state: Box::new(Decompressor::new()),
+ started: false,
+ in_buffer: Vec::with_capacity(CHUNCK_BUFFER_SIZE),
+ in_pos: 0,
+ out_buffer: vec![0; 2 * CHUNCK_BUFFER_SIZE],
+ out_pos: 0,
+ ignore_adler32: true,
+ }
+ }
+
+ pub(crate) fn reset(&mut self) {
+ self.started = false;
+ self.in_buffer.clear();
+ self.in_pos = 0;
+ self.out_buffer.clear();
+ self.out_pos = 0;
+ *self.state = Decompressor::new();
+ }
+
+ /// Set the `ignore_adler32` flag and return `true` if the flag was
+ /// successfully set.
+ ///
+ /// The default is `true`.
+ ///
+ /// This flag cannot be modified after decompression has started until the
+ /// [ZlibStream] is reset.
+ pub(crate) fn set_ignore_adler32(&mut self, flag: bool) -> bool {
+ if !self.started {
+ self.ignore_adler32 = flag;
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Return the `ignore_adler32` flag.
+ pub(crate) fn ignore_adler32(&self) -> bool {
+ self.ignore_adler32
+ }
+
+ /// Fill the decoded buffer as far as possible from `data`.
+ /// On success returns the number of consumed input bytes.
+ pub(crate) fn decompress(
+ &mut self,
+ data: &[u8],
+ image_data: &mut Vec<u8>,
+ ) -> Result<usize, DecodingError> {
+ self.prepare_vec_for_appending();
+
+ if !self.started && self.ignore_adler32 {
+ self.state.ignore_adler32();
+ }
+
+ let in_data = if self.in_buffer.is_empty() {
+ data
+ } else {
+ &self.in_buffer[self.in_pos..]
+ };
+
+ let (mut in_consumed, out_consumed) = self
+ .state
+ .read(in_data, self.out_buffer.as_mut_slice(), self.out_pos, false)
+ .map_err(|err| {
+ DecodingError::Format(FormatErrorInner::CorruptFlateStream { err }.into())
+ })?;
+
+ if !self.in_buffer.is_empty() {
+ self.in_pos += in_consumed;
+ in_consumed = 0;
+ }
+
+ if self.in_buffer.len() == self.in_pos {
+ self.in_buffer.clear();
+ self.in_pos = 0;
+ }
+
+ if in_consumed == 0 {
+ self.in_buffer.extend_from_slice(data);
+ in_consumed = data.len();
+ }
+
+ self.started = true;
+ self.out_pos += out_consumed;
+ self.transfer_finished_data(image_data);
+
+ Ok(in_consumed)
+ }
+
+ /// Called after all consecutive IDAT chunks were handled.
+ ///
+ /// The compressed stream can be split on arbitrary byte boundaries. This enables some cleanup
+ /// within the decompressor and flushing additional data which may have been kept back in case
+ /// more data were passed to it.
+ pub(crate) fn finish_compressed_chunks(
+ &mut self,
+ image_data: &mut Vec<u8>,
+ ) -> Result<(), DecodingError> {
+ if !self.started {
+ return Ok(());
+ }
+
+ let tail = self.in_buffer.split_off(0);
+ let tail = &tail[self.in_pos..];
+
+ let mut start = 0;
+ loop {
+ self.prepare_vec_for_appending();
+
+ let (in_consumed, out_consumed) = self
+ .state
+ .read(
+ &tail[start..],
+ self.out_buffer.as_mut_slice(),
+ self.out_pos,
+ true,
+ )
+ .map_err(|err| {
+ DecodingError::Format(FormatErrorInner::CorruptFlateStream { err }.into())
+ })?;
+
+ start += in_consumed;
+ self.out_pos += out_consumed;
+
+ if self.state.is_done() {
+ self.out_buffer.truncate(self.out_pos);
+ image_data.append(&mut self.out_buffer);
+ return Ok(());
+ } else {
+ let transferred = self.transfer_finished_data(image_data);
+ assert!(
+ transferred > 0 || in_consumed > 0 || out_consumed > 0,
+ "No more forward progress made in stream decoding."
+ );
+ }
+ }
+ }
+
+ /// Resize the vector to allow allocation of more data.
+ fn prepare_vec_for_appending(&mut self) {
+ if self.out_buffer.len().saturating_sub(self.out_pos) >= CHUNCK_BUFFER_SIZE {
+ return;
+ }
+
+ let buffered_len = self.decoding_size(self.out_buffer.len());
+ debug_assert!(self.out_buffer.len() <= buffered_len);
+ self.out_buffer.resize(buffered_len, 0u8);
+ }
+
+ fn decoding_size(&self, len: usize) -> usize {
+ // Allocate one more chunk size than currently or double the length while ensuring that the
+ // allocation is valid and that any cursor within it will be valid.
+ len
+ // This keeps the buffer size a power-of-two, required by miniz_oxide.
+ .saturating_add(CHUNCK_BUFFER_SIZE.max(len))
+ // Ensure all buffer indices are valid cursor positions.
+ // Note: both cut off and zero extension give correct results.
+ .min(u64::max_value() as usize)
+ // Ensure the allocation request is valid.
+ // TODO: maximum allocation limits?
+ .min(isize::max_value() as usize)
+ }
+
+ fn transfer_finished_data(&mut self, image_data: &mut Vec<u8>) -> usize {
+ let safe = self.out_pos.saturating_sub(CHUNCK_BUFFER_SIZE);
+ // TODO: allocation limits.
+ image_data.extend(self.out_buffer.drain(..safe));
+ self.out_pos -= safe;
+ safe
+ }
+}
diff --git a/vendor/png/src/encoder.rs b/vendor/png/src/encoder.rs
new file mode 100644
index 0000000..812bcaa
--- /dev/null
+++ b/vendor/png/src/encoder.rs
@@ -0,0 +1,2389 @@
+use borrow::Cow;
+use io::{Read, Write};
+use ops::{Deref, DerefMut};
+use std::{borrow, error, fmt, io, mem, ops, result};
+
+use crc32fast::Hasher as Crc32;
+use flate2::write::ZlibEncoder;
+
+use crate::chunk::{self, ChunkType};
+use crate::common::{
+ AnimationControl, BitDepth, BlendOp, BytesPerPixel, ColorType, Compression, DisposeOp,
+ FrameControl, Info, ParameterError, ParameterErrorKind, PixelDimensions, ScaledFloat,
+};
+use crate::filter::{filter, AdaptiveFilterType, FilterType};
+use crate::text_metadata::{
+ EncodableTextChunk, ITXtChunk, TEXtChunk, TextEncodingError, ZTXtChunk,
+};
+use crate::traits::WriteBytesExt;
+
+pub type Result<T> = result::Result<T, EncodingError>;
+
+#[derive(Debug)]
+pub enum EncodingError {
+ IoError(io::Error),
+ Format(FormatError),
+ Parameter(ParameterError),
+ LimitsExceeded,
+}
+
+#[derive(Debug)]
+pub struct FormatError {
+ inner: FormatErrorKind,
+}
+
+#[derive(Debug)]
+enum FormatErrorKind {
+ ZeroWidth,
+ ZeroHeight,
+ InvalidColorCombination(BitDepth, ColorType),
+ NoPalette,
+ // TODO: wait, what?
+ WrittenTooMuch(usize),
+ NotAnimated,
+ OutOfBounds,
+ EndReached,
+ ZeroFrames,
+ MissingFrames,
+ MissingData(usize),
+ Unrecoverable,
+ BadTextEncoding(TextEncodingError),
+}
+
+impl error::Error for EncodingError {
+ fn cause(&self) -> Option<&(dyn error::Error + 'static)> {
+ match self {
+ EncodingError::IoError(err) => Some(err),
+ _ => None,
+ }
+ }
+}
+
+impl fmt::Display for EncodingError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
+ use self::EncodingError::*;
+ match self {
+ IoError(err) => write!(fmt, "{}", err),
+ Format(desc) => write!(fmt, "{}", desc),
+ Parameter(desc) => write!(fmt, "{}", desc),
+ LimitsExceeded => write!(fmt, "Limits are exceeded."),
+ }
+ }
+}
+
+impl fmt::Display for FormatError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> result::Result<(), fmt::Error> {
+ use FormatErrorKind::*;
+ match self.inner {
+ ZeroWidth => write!(fmt, "Zero width not allowed"),
+ ZeroHeight => write!(fmt, "Zero height not allowed"),
+ ZeroFrames => write!(fmt, "Zero frames not allowed"),
+ InvalidColorCombination(depth, color) => write!(
+ fmt,
+ "Invalid combination of bit-depth '{:?}' and color-type '{:?}'",
+ depth, color
+ ),
+ NoPalette => write!(fmt, "can't write indexed image without palette"),
+ WrittenTooMuch(index) => write!(fmt, "wrong data size, got {} bytes too many", index),
+ NotAnimated => write!(fmt, "not an animation"),
+ OutOfBounds => write!(
+ fmt,
+ "the dimension and position go over the frame boundaries"
+ ),
+ EndReached => write!(fmt, "all the frames have been already written"),
+ MissingFrames => write!(fmt, "there are still frames to be written"),
+ MissingData(n) => write!(fmt, "there are still {} bytes to be written", n),
+ Unrecoverable => write!(
+ fmt,
+ "a previous error put the writer into an unrecoverable state"
+ ),
+ BadTextEncoding(tee) => match tee {
+ TextEncodingError::Unrepresentable => write!(
+ fmt,
+ "The text metadata cannot be encoded into valid ISO 8859-1"
+ ),
+ TextEncodingError::InvalidKeywordSize => write!(fmt, "Invalid keyword size"),
+ TextEncodingError::CompressionError => {
+ write!(fmt, "Unable to compress text metadata")
+ }
+ },
+ }
+ }
+}
+
+impl From<io::Error> for EncodingError {
+ fn from(err: io::Error) -> EncodingError {
+ EncodingError::IoError(err)
+ }
+}
+
+impl From<EncodingError> for io::Error {
+ fn from(err: EncodingError) -> io::Error {
+ io::Error::new(io::ErrorKind::Other, err.to_string())
+ }
+}
+
+// Private impl.
+impl From<FormatErrorKind> for FormatError {
+ fn from(kind: FormatErrorKind) -> Self {
+ FormatError { inner: kind }
+ }
+}
+
+impl From<TextEncodingError> for EncodingError {
+ fn from(tee: TextEncodingError) -> Self {
+ EncodingError::Format(FormatError {
+ inner: FormatErrorKind::BadTextEncoding(tee),
+ })
+ }
+}
+
+/// PNG Encoder.
+///
+/// This configures the PNG format options such as animation chunks, palette use, color types,
+/// auxiliary chunks etc.
+///
+/// FIXME: Configuring APNG might be easier (less individual errors) if we had an _adapter_ which
+/// borrows this mutably but guarantees that `info.frame_control` is not `None`.
+pub struct Encoder<'a, W: Write> {
+ w: W,
+ info: Info<'a>,
+ options: Options,
+}
+
+/// Decoding options, internal type, forwarded to the Writer.
+#[derive(Default)]
+struct Options {
+ filter: FilterType,
+ adaptive_filter: AdaptiveFilterType,
+ sep_def_img: bool,
+ validate_sequence: bool,
+}
+
+impl<'a, W: Write> Encoder<'a, W> {
+ pub fn new(w: W, width: u32, height: u32) -> Encoder<'static, W> {
+ Encoder {
+ w,
+ info: Info::with_size(width, height),
+ options: Options::default(),
+ }
+ }
+
+ /// Specify that the image is animated.
+ ///
+ /// `num_frames` controls how many frames the animation has, while
+ /// `num_plays` controls how many times the animation should be
+ /// repeated until it stops, if it's zero then it will repeat
+ /// infinitely.
+ ///
+ /// When this method is returns successfully then the images written will be encoded as fdAT
+ /// chunks, except for the first image that is still encoded as `IDAT`. You can control if the
+ /// first frame should be treated as an animation frame with [`Encoder::set_sep_def_img()`].
+ ///
+ /// This method returns an error if `num_frames` is 0.
+ pub fn set_animated(&mut self, num_frames: u32, num_plays: u32) -> Result<()> {
+ if num_frames == 0 {
+ return Err(EncodingError::Format(FormatErrorKind::ZeroFrames.into()));
+ }
+
+ let actl = AnimationControl {
+ num_frames,
+ num_plays,
+ };
+
+ let fctl = FrameControl {
+ sequence_number: 0,
+ width: self.info.width,
+ height: self.info.height,
+ ..Default::default()
+ };
+
+ self.info.animation_control = Some(actl);
+ self.info.frame_control = Some(fctl);
+ Ok(())
+ }
+
+ /// Mark the first animated frame as a 'separate default image'.
+ ///
+ /// In APNG each animated frame is preceded by a special control chunk, `fcTL`. It's up to the
+ /// encoder to decide if the first image, the standard `IDAT` data, should be part of the
+ /// animation by emitting this chunk or by not doing so. A default image that is _not_ part of
+ /// the animation is often interpreted as a thumbnail.
+ ///
+ /// This method will return an error when animation control was not configured
+ /// (which is done by calling [`Encoder::set_animated`]).
+ pub fn set_sep_def_img(&mut self, sep_def_img: bool) -> Result<()> {
+ if self.info.animation_control.is_some() {
+ self.options.sep_def_img = sep_def_img;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Sets the raw byte contents of the PLTE chunk. This method accepts
+ /// both borrowed and owned byte data.
+ pub fn set_palette<T: Into<Cow<'a, [u8]>>>(&mut self, palette: T) {
+ self.info.palette = Some(palette.into());
+ }
+
+ /// Sets the raw byte contents of the tRNS chunk. This method accepts
+ /// both borrowed and owned byte data.
+ pub fn set_trns<T: Into<Cow<'a, [u8]>>>(&mut self, trns: T) {
+ self.info.trns = Some(trns.into());
+ }
+
+ /// Set the display gamma of the source system on which the image was generated or last edited.
+ pub fn set_source_gamma(&mut self, source_gamma: ScaledFloat) {
+ self.info.source_gamma = Some(source_gamma);
+ }
+
+ /// Set the chromaticities for the source system's display channels (red, green, blue) and the whitepoint
+ /// of the source system on which the image was generated or last edited.
+ pub fn set_source_chromaticities(
+ &mut self,
+ source_chromaticities: super::SourceChromaticities,
+ ) {
+ self.info.source_chromaticities = Some(source_chromaticities);
+ }
+
+ /// Mark the image data as conforming to the SRGB color space with the specified rendering intent.
+ ///
+ /// Matching source gamma and chromaticities chunks are added automatically.
+ /// Any manually specified source gamma or chromaticities will be ignored.
+ pub fn set_srgb(&mut self, rendering_intent: super::SrgbRenderingIntent) {
+ self.info.srgb = Some(rendering_intent);
+ }
+
+ /// Start encoding by writing the header data.
+ ///
+ /// The remaining data can be supplied by methods on the returned [`Writer`].
+ pub fn write_header(self) -> Result<Writer<W>> {
+ Writer::new(self.w, PartialInfo::new(&self.info), self.options).init(&self.info)
+ }
+
+ /// Set the color of the encoded image.
+ ///
+ /// These correspond to the color types in the png IHDR data that will be written. The length
+ /// of the image data that is later supplied must match the color type, otherwise an error will
+ /// be emitted.
+ pub fn set_color(&mut self, color: ColorType) {
+ self.info.color_type = color;
+ }
+
+ /// Set the indicated depth of the image data.
+ pub fn set_depth(&mut self, depth: BitDepth) {
+ self.info.bit_depth = depth;
+ }
+
+ /// Set compression parameters.
+ ///
+ /// Accepts a `Compression` or any type that can transform into a `Compression`. Notably `deflate::Compression` and
+ /// `deflate::CompressionOptions` which "just work".
+ pub fn set_compression(&mut self, compression: Compression) {
+ self.info.compression = compression;
+ }
+
+ /// Set the used filter type.
+ ///
+ /// The default filter is [`FilterType::Sub`] which provides a basic prediction algorithm for
+ /// sample values based on the previous. For a potentially better compression ratio, at the
+ /// cost of more complex processing, try out [`FilterType::Paeth`].
+ ///
+ /// [`FilterType::Sub`]: enum.FilterType.html#variant.Sub
+ /// [`FilterType::Paeth`]: enum.FilterType.html#variant.Paeth
+ pub fn set_filter(&mut self, filter: FilterType) {
+ self.options.filter = filter;
+ }
+
+ /// Set the adaptive filter type.
+ ///
+ /// Adaptive filtering attempts to select the best filter for each line
+ /// based on heuristics which minimize the file size for compression rather
+ /// than use a single filter for the entire image. The default method is
+ /// [`AdaptiveFilterType::NonAdaptive`].
+ ///
+ /// [`AdaptiveFilterType::NonAdaptive`]: enum.AdaptiveFilterType.html
+ pub fn set_adaptive_filter(&mut self, adaptive_filter: AdaptiveFilterType) {
+ self.options.adaptive_filter = adaptive_filter;
+ }
+
+ /// Set the fraction of time every frame is going to be displayed, in seconds.
+ ///
+ /// *Note that this parameter can be set for each individual frame after
+ /// [`Encoder::write_header`] is called. (see [`Writer::set_frame_delay`])*
+ ///
+ /// If the denominator is 0, it is to be treated as if it were 100
+ /// (that is, the numerator then specifies 1/100ths of a second).
+ /// If the the value of the numerator is 0 the decoder should render the next frame
+ /// as quickly as possible, though viewers may impose a reasonable lower bound.
+ ///
+ /// The default value is 0 for both the numerator and denominator.
+ ///
+ /// This method will return an error if the image is not animated.
+ /// (see [`set_animated`])
+ ///
+ /// [`write_header`]: struct.Encoder.html#method.write_header
+ /// [`set_animated`]: struct.Encoder.html#method.set_animated
+ /// [`Writer::set_frame_delay`]: struct.Writer#method.set_frame_delay
+ pub fn set_frame_delay(&mut self, numerator: u16, denominator: u16) -> Result<()> {
+ if let Some(ref mut fctl) = self.info.frame_control {
+ fctl.delay_den = denominator;
+ fctl.delay_num = numerator;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the blend operation for every frame.
+ ///
+ /// The blend operation specifies whether the frame is to be alpha blended
+ /// into the current output buffer content, or whether it should completely
+ /// replace its region in the output buffer.
+ ///
+ /// *Note that this parameter can be set for each individual frame after
+ /// [`write_header`] is called. (see [`Writer::set_blend_op`])*
+ ///
+ /// See the [`BlendOp`] documentation for the possible values and their effects.
+ ///
+ /// *Note that for the first frame the two blend modes are functionally
+ /// equivalent due to the clearing of the output buffer at the beginning
+ /// of each play.*
+ ///
+ /// The default value is [`BlendOp::Source`].
+ ///
+ /// This method will return an error if the image is not animated.
+ /// (see [`set_animated`])
+ ///
+ /// [`BlendOP`]: enum.BlendOp.html
+ /// [`BlendOP::Source`]: enum.BlendOp.html#variant.Source
+ /// [`write_header`]: struct.Encoder.html#method.write_header
+ /// [`set_animated`]: struct.Encoder.html#method.set_animated
+ /// [`Writer::set_blend_op`]: struct.Writer#method.set_blend_op
+ pub fn set_blend_op(&mut self, op: BlendOp) -> Result<()> {
+ if let Some(ref mut fctl) = self.info.frame_control {
+ fctl.blend_op = op;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the dispose operation for every frame.
+ ///
+ /// The dispose operation specifies how the output buffer should be changed
+ /// at the end of the delay (before rendering the next frame)
+ ///
+ /// *Note that this parameter can be set for each individual frame after
+ /// [`write_header`] is called (see [`Writer::set_dispose_op`])*
+ ///
+ /// See the [`DisposeOp`] documentation for the possible values and their effects.
+ ///
+ /// *Note that if the first frame uses [`DisposeOp::Previous`]
+ /// it will be treated as [`DisposeOp::Background`].*
+ ///
+ /// The default value is [`DisposeOp::None`].
+ ///
+ /// This method will return an error if the image is not animated.
+ /// (see [`set_animated`])
+ ///
+ /// [`DisposeOp`]: ../common/enum.BlendOp.html
+ /// [`DisposeOp::Previous`]: ../common/enum.BlendOp.html#variant.Previous
+ /// [`DisposeOp::Background`]: ../common/enum.BlendOp.html#variant.Background
+ /// [`DisposeOp::None`]: ../common/enum.BlendOp.html#variant.None
+ /// [`write_header`]: struct.Encoder.html#method.write_header
+ /// [`set_animated`]: struct.Encoder.html#method.set_animated
+ /// [`Writer::set_dispose_op`]: struct.Writer#method.set_dispose_op
+ pub fn set_dispose_op(&mut self, op: DisposeOp) -> Result<()> {
+ if let Some(ref mut fctl) = self.info.frame_control {
+ fctl.dispose_op = op;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+ pub fn set_pixel_dims(&mut self, pixel_dims: Option<PixelDimensions>) {
+ self.info.pixel_dims = pixel_dims
+ }
+ /// Convenience function to add tEXt chunks to [`Info`] struct
+ pub fn add_text_chunk(&mut self, keyword: String, text: String) -> Result<()> {
+ let text_chunk = TEXtChunk::new(keyword, text);
+ self.info.uncompressed_latin1_text.push(text_chunk);
+ Ok(())
+ }
+
+ /// Convenience function to add zTXt chunks to [`Info`] struct
+ pub fn add_ztxt_chunk(&mut self, keyword: String, text: String) -> Result<()> {
+ let text_chunk = ZTXtChunk::new(keyword, text);
+ self.info.compressed_latin1_text.push(text_chunk);
+ Ok(())
+ }
+
+ /// Convenience function to add iTXt chunks to [`Info`] struct
+ ///
+ /// This function only sets the `keyword` and `text` field of the iTXt chunk.
+ /// To set the other fields, create a [`ITXtChunk`] directly, and then encode it to the output stream.
+ pub fn add_itxt_chunk(&mut self, keyword: String, text: String) -> Result<()> {
+ let text_chunk = ITXtChunk::new(keyword, text);
+ self.info.utf8_text.push(text_chunk);
+ Ok(())
+ }
+
+ /// Validate the written image sequence.
+ ///
+ /// When validation is turned on (it's turned off by default) then attempts to write more than
+ /// one `IDAT` image or images beyond the number of frames indicated in the animation control
+ /// chunk will fail and return an error result instead. Attempts to [finish][finish] the image
+ /// with missing frames will also return an error.
+ ///
+ /// [finish]: StreamWriter::finish
+ ///
+ /// (It's possible to circumvent these checks by writing raw chunks instead.)
+ pub fn validate_sequence(&mut self, validate: bool) {
+ self.options.validate_sequence = validate;
+ }
+}
+
+/// PNG writer
+///
+/// Progresses through the image by writing images, frames, or raw individual chunks. This is
+/// constructed through [`Encoder::write_header()`].
+///
+/// FIXME: Writing of animated chunks might be clearer if we had an _adapter_ that you would call
+/// to guarantee the next image to be prefaced with a fcTL-chunk, and all other chunks would be
+/// guaranteed to be `IDAT`/not affected by APNG's frame control.
+pub struct Writer<W: Write> {
+ /// The underlying writer.
+ w: W,
+ /// The local version of the `Info` struct.
+ info: PartialInfo,
+ /// Global encoding options.
+ options: Options,
+ /// The total number of image frames, counting all consecutive IDAT and fdAT chunks.
+ images_written: u64,
+ /// The total number of animation frames, that is equivalent to counting fcTL chunks.
+ animation_written: u32,
+ /// A flag to note when the IEND chunk was already added.
+ /// This is only set on code paths that drop `Self` to control the destructor.
+ iend_written: bool,
+}
+
+/// Contains the subset of attributes of [Info] needed for [Writer] to function
+struct PartialInfo {
+ width: u32,
+ height: u32,
+ bit_depth: BitDepth,
+ color_type: ColorType,
+ frame_control: Option<FrameControl>,
+ animation_control: Option<AnimationControl>,
+ compression: Compression,
+ has_palette: bool,
+}
+
+impl PartialInfo {
+ fn new(info: &Info) -> Self {
+ PartialInfo {
+ width: info.width,
+ height: info.height,
+ bit_depth: info.bit_depth,
+ color_type: info.color_type,
+ frame_control: info.frame_control,
+ animation_control: info.animation_control,
+ compression: info.compression,
+ has_palette: info.palette.is_some(),
+ }
+ }
+
+ fn bpp_in_prediction(&self) -> BytesPerPixel {
+ // Passthrough
+ self.to_info().bpp_in_prediction()
+ }
+
+ fn raw_row_length(&self) -> usize {
+ // Passthrough
+ self.to_info().raw_row_length()
+ }
+
+ fn raw_row_length_from_width(&self, width: u32) -> usize {
+ // Passthrough
+ self.to_info().raw_row_length_from_width(width)
+ }
+
+ /// Converts this partial info to an owned Info struct,
+ /// setting missing values to their defaults
+ fn to_info(&self) -> Info<'static> {
+ Info {
+ width: self.width,
+ height: self.height,
+ bit_depth: self.bit_depth,
+ color_type: self.color_type,
+ frame_control: self.frame_control,
+ animation_control: self.animation_control,
+ compression: self.compression,
+ ..Default::default()
+ }
+ }
+}
+
+const DEFAULT_BUFFER_LENGTH: usize = 4 * 1024;
+
+pub(crate) fn write_chunk<W: Write>(mut w: W, name: chunk::ChunkType, data: &[u8]) -> Result<()> {
+ w.write_be(data.len() as u32)?;
+ w.write_all(&name.0)?;
+ w.write_all(data)?;
+ let mut crc = Crc32::new();
+ crc.update(&name.0);
+ crc.update(data);
+ w.write_be(crc.finalize())?;
+ Ok(())
+}
+
+impl<W: Write> Writer<W> {
+ fn new(w: W, info: PartialInfo, options: Options) -> Writer<W> {
+ Writer {
+ w,
+ info,
+ options,
+ images_written: 0,
+ animation_written: 0,
+ iend_written: false,
+ }
+ }
+
+ fn init(mut self, info: &Info<'_>) -> Result<Self> {
+ if self.info.width == 0 {
+ return Err(EncodingError::Format(FormatErrorKind::ZeroWidth.into()));
+ }
+
+ if self.info.height == 0 {
+ return Err(EncodingError::Format(FormatErrorKind::ZeroHeight.into()));
+ }
+
+ if self
+ .info
+ .color_type
+ .is_combination_invalid(self.info.bit_depth)
+ {
+ return Err(EncodingError::Format(
+ FormatErrorKind::InvalidColorCombination(self.info.bit_depth, self.info.color_type)
+ .into(),
+ ));
+ }
+
+ self.w.write_all(&[137, 80, 78, 71, 13, 10, 26, 10])?; // PNG signature
+ info.encode(&mut self.w)?;
+
+ Ok(self)
+ }
+
+ /// Write a raw chunk of PNG data.
+ ///
+ /// The chunk will have its CRC calculated and correctly. The data is not filtered in any way,
+ /// but the chunk needs to be short enough to have its length encoded correctly.
+ pub fn write_chunk(&mut self, name: ChunkType, data: &[u8]) -> Result<()> {
+ use std::convert::TryFrom;
+
+ if u32::try_from(data.len()).map_or(true, |length| length > i32::MAX as u32) {
+ let kind = FormatErrorKind::WrittenTooMuch(data.len() - i32::MAX as usize);
+ return Err(EncodingError::Format(kind.into()));
+ }
+
+ write_chunk(&mut self.w, name, data)
+ }
+
+ pub fn write_text_chunk<T: EncodableTextChunk>(&mut self, text_chunk: &T) -> Result<()> {
+ text_chunk.encode(&mut self.w)
+ }
+
+ /// Check if we should allow writing another image.
+ fn validate_new_image(&self) -> Result<()> {
+ if !self.options.validate_sequence {
+ return Ok(());
+ }
+
+ match self.info.animation_control {
+ None => {
+ if self.images_written == 0 {
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::EndReached.into()))
+ }
+ }
+ Some(_) => {
+ if self.info.frame_control.is_some() {
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::EndReached.into()))
+ }
+ }
+ }
+ }
+
+ fn validate_sequence_done(&self) -> Result<()> {
+ if !self.options.validate_sequence {
+ return Ok(());
+ }
+
+ if (self.info.animation_control.is_some() && self.info.frame_control.is_some())
+ || self.images_written == 0
+ {
+ Err(EncodingError::Format(FormatErrorKind::MissingFrames.into()))
+ } else {
+ Ok(())
+ }
+ }
+
+ const MAX_IDAT_CHUNK_LEN: u32 = std::u32::MAX >> 1;
+ #[allow(non_upper_case_globals)]
+ const MAX_fdAT_CHUNK_LEN: u32 = (std::u32::MAX >> 1) - 4;
+
+ /// Writes the next image data.
+ pub fn write_image_data(&mut self, data: &[u8]) -> Result<()> {
+ if self.info.color_type == ColorType::Indexed && !self.info.has_palette {
+ return Err(EncodingError::Format(FormatErrorKind::NoPalette.into()));
+ }
+
+ self.validate_new_image()?;
+
+ let width: usize;
+ let height: usize;
+ if let Some(ref mut fctl) = self.info.frame_control {
+ width = fctl.width as usize;
+ height = fctl.height as usize;
+ } else {
+ width = self.info.width as usize;
+ height = self.info.height as usize;
+ }
+
+ let in_len = self.info.raw_row_length_from_width(width as u32) - 1;
+ let data_size = in_len * height;
+ if data_size != data.len() {
+ return Err(EncodingError::Parameter(
+ ParameterErrorKind::ImageBufferSize {
+ expected: data_size,
+ actual: data.len(),
+ }
+ .into(),
+ ));
+ }
+
+ let prev = vec![0; in_len];
+ let mut prev = prev.as_slice();
+
+ let bpp = self.info.bpp_in_prediction();
+ let filter_method = self.options.filter;
+ let adaptive_method = self.options.adaptive_filter;
+
+ let zlib_encoded = match self.info.compression {
+ Compression::Fast => {
+ let mut compressor = fdeflate::Compressor::new(std::io::Cursor::new(Vec::new()))?;
+
+ let mut current = vec![0; in_len + 1];
+ for line in data.chunks(in_len) {
+ let filter_type = filter(
+ filter_method,
+ adaptive_method,
+ bpp,
+ prev,
+ line,
+ &mut current[1..],
+ );
+
+ current[0] = filter_type as u8;
+ compressor.write_data(&current)?;
+ prev = line;
+ }
+
+ let compressed = compressor.finish()?.into_inner();
+ if compressed.len()
+ > fdeflate::StoredOnlyCompressor::<()>::compressed_size((in_len + 1) * height)
+ {
+ // Write uncompressed data since the result from fast compression would take
+ // more space than that.
+ //
+ // We always use FilterType::NoFilter here regardless of the filter method
+ // requested by the user. Doing filtering again would only add performance
+ // cost for both encoding and subsequent decoding, without improving the
+ // compression ratio.
+ let mut compressor =
+ fdeflate::StoredOnlyCompressor::new(std::io::Cursor::new(Vec::new()))?;
+ for line in data.chunks(in_len) {
+ compressor.write_data(&[0])?;
+ compressor.write_data(line)?;
+ }
+ compressor.finish()?.into_inner()
+ } else {
+ compressed
+ }
+ }
+ _ => {
+ let mut current = vec![0; in_len];
+
+ let mut zlib = ZlibEncoder::new(Vec::new(), self.info.compression.to_options());
+ for line in data.chunks(in_len) {
+ let filter_type = filter(
+ filter_method,
+ adaptive_method,
+ bpp,
+ prev,
+ line,
+ &mut current,
+ );
+
+ zlib.write_all(&[filter_type as u8])?;
+ zlib.write_all(&current)?;
+ prev = line;
+ }
+ zlib.finish()?
+ }
+ };
+
+ match self.info.frame_control {
+ None => {
+ self.write_zlib_encoded_idat(&zlib_encoded)?;
+ }
+ Some(_) if self.should_skip_frame_control_on_default_image() => {
+ self.write_zlib_encoded_idat(&zlib_encoded)?;
+ }
+ Some(ref mut fctl) => {
+ fctl.encode(&mut self.w)?;
+ fctl.sequence_number = fctl.sequence_number.wrapping_add(1);
+ self.animation_written += 1;
+
+ // If the default image is the first frame of an animation, it's still an IDAT.
+ if self.images_written == 0 {
+ self.write_zlib_encoded_idat(&zlib_encoded)?;
+ } else {
+ let buff_size = zlib_encoded.len().min(Self::MAX_fdAT_CHUNK_LEN as usize);
+ let mut alldata = vec![0u8; 4 + buff_size];
+ for chunk in zlib_encoded.chunks(Self::MAX_fdAT_CHUNK_LEN as usize) {
+ alldata[..4].copy_from_slice(&fctl.sequence_number.to_be_bytes());
+ alldata[4..][..chunk.len()].copy_from_slice(chunk);
+ write_chunk(&mut self.w, chunk::fdAT, &alldata[..4 + chunk.len()])?;
+ fctl.sequence_number = fctl.sequence_number.wrapping_add(1);
+ }
+ }
+ }
+ }
+
+ self.increment_images_written();
+
+ Ok(())
+ }
+
+ fn increment_images_written(&mut self) {
+ self.images_written = self.images_written.saturating_add(1);
+
+ if let Some(actl) = self.info.animation_control {
+ if actl.num_frames <= self.animation_written {
+ // If we've written all animation frames, all following will be normal image chunks.
+ self.info.frame_control = None;
+ }
+ }
+ }
+
+ fn write_iend(&mut self) -> Result<()> {
+ self.iend_written = true;
+ self.write_chunk(chunk::IEND, &[])
+ }
+
+ fn should_skip_frame_control_on_default_image(&self) -> bool {
+ self.options.sep_def_img && self.images_written == 0
+ }
+
+ fn write_zlib_encoded_idat(&mut self, zlib_encoded: &[u8]) -> Result<()> {
+ for chunk in zlib_encoded.chunks(Self::MAX_IDAT_CHUNK_LEN as usize) {
+ self.write_chunk(chunk::IDAT, chunk)?;
+ }
+ Ok(())
+ }
+
+ /// Set the used filter type for the following frames.
+ ///
+ /// The default filter is [`FilterType::Sub`] which provides a basic prediction algorithm for
+ /// sample values based on the previous. For a potentially better compression ratio, at the
+ /// cost of more complex processing, try out [`FilterType::Paeth`].
+ ///
+ /// [`FilterType::Sub`]: enum.FilterType.html#variant.Sub
+ /// [`FilterType::Paeth`]: enum.FilterType.html#variant.Paeth
+ pub fn set_filter(&mut self, filter: FilterType) {
+ self.options.filter = filter;
+ }
+
+ /// Set the adaptive filter type for the following frames.
+ ///
+ /// Adaptive filtering attempts to select the best filter for each line
+ /// based on heuristics which minimize the file size for compression rather
+ /// than use a single filter for the entire image. The default method is
+ /// [`AdaptiveFilterType::NonAdaptive`].
+ ///
+ /// [`AdaptiveFilterType::NonAdaptive`]: enum.AdaptiveFilterType.html
+ pub fn set_adaptive_filter(&mut self, adaptive_filter: AdaptiveFilterType) {
+ self.options.adaptive_filter = adaptive_filter;
+ }
+
+ /// Set the fraction of time the following frames are going to be displayed,
+ /// in seconds
+ ///
+ /// If the denominator is 0, it is to be treated as if it were 100
+ /// (that is, the numerator then specifies 1/100ths of a second).
+ /// If the the value of the numerator is 0 the decoder should render the next frame
+ /// as quickly as possible, though viewers may impose a reasonable lower bound.
+ ///
+ /// This method will return an error if the image is not animated.
+ pub fn set_frame_delay(&mut self, numerator: u16, denominator: u16) -> Result<()> {
+ if let Some(ref mut fctl) = self.info.frame_control {
+ fctl.delay_den = denominator;
+ fctl.delay_num = numerator;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the dimension of the following frames.
+ ///
+ /// This function will return an error when:
+ /// - The image is not an animated;
+ ///
+ /// - The selected dimension, considering also the current frame position,
+ /// goes outside the image boundaries;
+ ///
+ /// - One or both the width and height are 0;
+ ///
+ // ??? TODO ???
+ // - The next frame is the default image
+ pub fn set_frame_dimension(&mut self, width: u32, height: u32) -> Result<()> {
+ if let Some(ref mut fctl) = self.info.frame_control {
+ if Some(width) > self.info.width.checked_sub(fctl.x_offset)
+ || Some(height) > self.info.height.checked_sub(fctl.y_offset)
+ {
+ return Err(EncodingError::Format(FormatErrorKind::OutOfBounds.into()));
+ } else if width == 0 {
+ return Err(EncodingError::Format(FormatErrorKind::ZeroWidth.into()));
+ } else if height == 0 {
+ return Err(EncodingError::Format(FormatErrorKind::ZeroHeight.into()));
+ }
+ fctl.width = width;
+ fctl.height = height;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the position of the following frames.
+ ///
+ /// An error will be returned if:
+ /// - The image is not animated;
+ ///
+ /// - The selected position, considering also the current frame dimension,
+ /// goes outside the image boundaries;
+ ///
+ // ??? TODO ???
+ // - The next frame is the default image
+ pub fn set_frame_position(&mut self, x: u32, y: u32) -> Result<()> {
+ if let Some(ref mut fctl) = self.info.frame_control {
+ if Some(x) > self.info.width.checked_sub(fctl.width)
+ || Some(y) > self.info.height.checked_sub(fctl.height)
+ {
+ return Err(EncodingError::Format(FormatErrorKind::OutOfBounds.into()));
+ }
+ fctl.x_offset = x;
+ fctl.y_offset = y;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the frame dimension to occupy all the image, starting from
+ /// the current position.
+ ///
+ /// To reset the frame to the full image size [`reset_frame_position`]
+ /// should be called first.
+ ///
+ /// This method will return an error if the image is not animated.
+ ///
+ /// [`reset_frame_position`]: struct.Writer.html#method.reset_frame_position
+ pub fn reset_frame_dimension(&mut self) -> Result<()> {
+ if let Some(ref mut fctl) = self.info.frame_control {
+ fctl.width = self.info.width - fctl.x_offset;
+ fctl.height = self.info.height - fctl.y_offset;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the frame position to (0, 0).
+ ///
+ /// Equivalent to calling [`set_frame_position(0, 0)`].
+ ///
+ /// This method will return an error if the image is not animated.
+ ///
+ /// [`set_frame_position(0, 0)`]: struct.Writer.html#method.set_frame_position
+ pub fn reset_frame_position(&mut self) -> Result<()> {
+ if let Some(ref mut fctl) = self.info.frame_control {
+ fctl.x_offset = 0;
+ fctl.y_offset = 0;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the blend operation for the following frames.
+ ///
+ /// The blend operation specifies whether the frame is to be alpha blended
+ /// into the current output buffer content, or whether it should completely
+ /// replace its region in the output buffer.
+ ///
+ /// See the [`BlendOp`] documentation for the possible values and their effects.
+ ///
+ /// *Note that for the first frame the two blend modes are functionally
+ /// equivalent due to the clearing of the output buffer at the beginning
+ /// of each play.*
+ ///
+ /// This method will return an error if the image is not animated.
+ ///
+ /// [`BlendOP`]: enum.BlendOp.html
+ pub fn set_blend_op(&mut self, op: BlendOp) -> Result<()> {
+ if let Some(ref mut fctl) = self.info.frame_control {
+ fctl.blend_op = op;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the dispose operation for the following frames.
+ ///
+ /// The dispose operation specifies how the output buffer should be changed
+ /// at the end of the delay (before rendering the next frame)
+ ///
+ /// See the [`DisposeOp`] documentation for the possible values and their effects.
+ ///
+ /// *Note that if the first frame uses [`DisposeOp::Previous`]
+ /// it will be treated as [`DisposeOp::Background`].*
+ ///
+ /// This method will return an error if the image is not animated.
+ ///
+ /// [`DisposeOp`]: ../common/enum.BlendOp.html
+ /// [`DisposeOp::Previous`]: ../common/enum.BlendOp.html#variant.Previous
+ /// [`DisposeOp::Background`]: ../common/enum.BlendOp.html#variant.Background
+ pub fn set_dispose_op(&mut self, op: DisposeOp) -> Result<()> {
+ if let Some(ref mut fctl) = self.info.frame_control {
+ fctl.dispose_op = op;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Create a stream writer.
+ ///
+ /// This allows you to create images that do not fit in memory. The default
+ /// chunk size is 4K, use `stream_writer_with_size` to set another chunk
+ /// size.
+ ///
+ /// This borrows the writer which allows for manually appending additional
+ /// chunks after the image data has been written.
+ pub fn stream_writer(&mut self) -> Result<StreamWriter<W>> {
+ self.stream_writer_with_size(DEFAULT_BUFFER_LENGTH)
+ }
+
+ /// Create a stream writer with custom buffer size.
+ ///
+ /// See [`stream_writer`].
+ ///
+ /// [`stream_writer`]: #fn.stream_writer
+ pub fn stream_writer_with_size(&mut self, size: usize) -> Result<StreamWriter<W>> {
+ StreamWriter::new(ChunkOutput::Borrowed(self), size)
+ }
+
+ /// Turn this into a stream writer for image data.
+ ///
+ /// This allows you to create images that do not fit in memory. The default
+ /// chunk size is 4K, use `stream_writer_with_size` to set another chunk
+ /// size.
+ pub fn into_stream_writer(self) -> Result<StreamWriter<'static, W>> {
+ self.into_stream_writer_with_size(DEFAULT_BUFFER_LENGTH)
+ }
+
+ /// Turn this into a stream writer with custom buffer size.
+ ///
+ /// See [`into_stream_writer`].
+ ///
+ /// [`into_stream_writer`]: #fn.into_stream_writer
+ pub fn into_stream_writer_with_size(self, size: usize) -> Result<StreamWriter<'static, W>> {
+ StreamWriter::new(ChunkOutput::Owned(self), size)
+ }
+
+ /// Consume the stream writer with validation.
+ ///
+ /// Unlike a simple drop this ensures that the final chunk was written correctly. When other
+ /// validation options (chunk sequencing) had been turned on in the configuration then it will
+ /// also do a check on their correctness _before_ writing the final chunk.
+ pub fn finish(mut self) -> Result<()> {
+ self.validate_sequence_done()?;
+ self.write_iend()?;
+ self.w.flush()?;
+
+ // Explicitly drop `self` just for clarity.
+ drop(self);
+ Ok(())
+ }
+}
+
+impl<W: Write> Drop for Writer<W> {
+ fn drop(&mut self) {
+ if !self.iend_written {
+ let _ = self.write_iend();
+ }
+ }
+}
+
+enum ChunkOutput<'a, W: Write> {
+ Borrowed(&'a mut Writer<W>),
+ Owned(Writer<W>),
+}
+
+// opted for deref for practical reasons
+impl<'a, W: Write> Deref for ChunkOutput<'a, W> {
+ type Target = Writer<W>;
+
+ fn deref(&self) -> &Self::Target {
+ match self {
+ ChunkOutput::Borrowed(writer) => writer,
+ ChunkOutput::Owned(writer) => writer,
+ }
+ }
+}
+
+impl<'a, W: Write> DerefMut for ChunkOutput<'a, W> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ match self {
+ ChunkOutput::Borrowed(writer) => writer,
+ ChunkOutput::Owned(writer) => writer,
+ }
+ }
+}
+
+/// This writer is used between the actual writer and the
+/// ZlibEncoder and has the job of packaging the compressed
+/// data into a PNG chunk, based on the image metadata
+///
+/// Currently the way it works is that the specified buffer
+/// will hold one chunk at the time and buffer the incoming
+/// data until `flush` is called or the maximum chunk size
+/// is reached.
+///
+/// The maximum chunk is the smallest between the selected buffer size
+/// and `u32::MAX >> 1` (`0x7fffffff` or `2147483647` dec)
+///
+/// When a chunk has to be flushed the length (that is now known)
+/// and the CRC will be written at the correct locations in the chunk.
+struct ChunkWriter<'a, W: Write> {
+ writer: ChunkOutput<'a, W>,
+ buffer: Vec<u8>,
+ /// keeps track of where the last byte was written
+ index: usize,
+ curr_chunk: ChunkType,
+}
+
+impl<'a, W: Write> ChunkWriter<'a, W> {
+ fn new(writer: ChunkOutput<'a, W>, buf_len: usize) -> ChunkWriter<'a, W> {
+ // currently buf_len will determine the size of each chunk
+ // the len is capped to the maximum size every chunk can hold
+ // (this wont ever overflow an u32)
+ //
+ // TODO (maybe): find a way to hold two chunks at a time if `usize`
+ // is 64 bits.
+ const CAP: usize = std::u32::MAX as usize >> 1;
+ let curr_chunk = if writer.images_written == 0 {
+ chunk::IDAT
+ } else {
+ chunk::fdAT
+ };
+ ChunkWriter {
+ writer,
+ buffer: vec![0; CAP.min(buf_len)],
+ index: 0,
+ curr_chunk,
+ }
+ }
+
+ /// Returns the size of each scanline for the next frame
+ /// paired with the size of the whole frame
+ ///
+ /// This is used by the `StreamWriter` to know when the scanline ends
+ /// so it can filter compress it and also to know when to start
+ /// the next one
+ fn next_frame_info(&self) -> (usize, usize) {
+ let wrt = self.writer.deref();
+
+ let width: usize;
+ let height: usize;
+ if let Some(fctl) = wrt.info.frame_control {
+ width = fctl.width as usize;
+ height = fctl.height as usize;
+ } else {
+ width = wrt.info.width as usize;
+ height = wrt.info.height as usize;
+ }
+
+ let in_len = wrt.info.raw_row_length_from_width(width as u32) - 1;
+ let data_size = in_len * height;
+
+ (in_len, data_size)
+ }
+
+ /// NOTE: this bypasses the internal buffer so the flush method should be called before this
+ /// in the case there is some data left in the buffer when this is called, it will panic
+ fn write_header(&mut self) -> Result<()> {
+ assert_eq!(self.index, 0, "Called when not flushed");
+ let wrt = self.writer.deref_mut();
+
+ self.curr_chunk = if wrt.images_written == 0 {
+ chunk::IDAT
+ } else {
+ chunk::fdAT
+ };
+
+ match wrt.info.frame_control {
+ Some(_) if wrt.should_skip_frame_control_on_default_image() => {}
+ Some(ref mut fctl) => {
+ fctl.encode(&mut wrt.w)?;
+ fctl.sequence_number += 1;
+ }
+ _ => {}
+ }
+
+ Ok(())
+ }
+
+ /// Set the `FrameControl` for the following frame
+ ///
+ /// It will ignore the `sequence_number` of the parameter
+ /// as it is updated internally.
+ fn set_fctl(&mut self, f: FrameControl) {
+ if let Some(ref mut fctl) = self.writer.info.frame_control {
+ // Ignore the sequence number
+ *fctl = FrameControl {
+ sequence_number: fctl.sequence_number,
+ ..f
+ };
+ } else {
+ panic!("This function must be called on an animated PNG")
+ }
+ }
+
+ /// Flushes the current chunk
+ fn flush_inner(&mut self) -> io::Result<()> {
+ if self.index > 0 {
+ // flush the chunk and reset everything
+ write_chunk(
+ &mut self.writer.w,
+ self.curr_chunk,
+ &self.buffer[..self.index],
+ )?;
+
+ self.index = 0;
+ }
+ Ok(())
+ }
+}
+
+impl<'a, W: Write> Write for ChunkWriter<'a, W> {
+ fn write(&mut self, mut data: &[u8]) -> io::Result<usize> {
+ if data.is_empty() {
+ return Ok(0);
+ }
+
+ // index == 0 means a chunk has been flushed out
+ if self.index == 0 {
+ let wrt = self.writer.deref_mut();
+
+ // Prepare the next animated frame, if any.
+ let no_fctl = wrt.should_skip_frame_control_on_default_image();
+ if wrt.info.frame_control.is_some() && !no_fctl {
+ let fctl = wrt.info.frame_control.as_mut().unwrap();
+ self.buffer[0..4].copy_from_slice(&fctl.sequence_number.to_be_bytes());
+ fctl.sequence_number += 1;
+ self.index = 4;
+ }
+ }
+
+ // Cap the buffer length to the maximum number of bytes that can't still
+ // be added to the current chunk
+ let written = data.len().min(self.buffer.len() - self.index);
+ data = &data[..written];
+
+ self.buffer[self.index..][..written].copy_from_slice(data);
+ self.index += written;
+
+ // if the maximum data for this chunk as been reached it needs to be flushed
+ if self.index == self.buffer.len() {
+ self.flush_inner()?;
+ }
+
+ Ok(written)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.flush_inner()
+ }
+}
+
+impl<W: Write> Drop for ChunkWriter<'_, W> {
+ fn drop(&mut self) {
+ let _ = self.flush();
+ }
+}
+
+// TODO: find a better name
+//
+/// This enum is used to be allow the `StreamWriter` to keep
+/// its inner `ChunkWriter` without wrapping it inside a
+/// `ZlibEncoder`. This is used in the case that between the
+/// change of state that happens when the last write of a frame
+/// is performed an error occurs, which obviously has to be returned.
+/// This creates the problem of where to store the writer before
+/// exiting the function, and this is where `Wrapper` comes in.
+///
+/// Unfortunately the `ZlibWriter` can't be used because on the
+/// write following the error, `finish` would be called and that
+/// would write some data even if 0 bytes where compressed.
+///
+/// If the `finish` function fails then there is nothing much to
+/// do as the `ChunkWriter` would get lost so the `Unrecoverable`
+/// variant is used to signal that.
+enum Wrapper<'a, W: Write> {
+ Chunk(ChunkWriter<'a, W>),
+ Zlib(ZlibEncoder<ChunkWriter<'a, W>>),
+ Unrecoverable,
+ /// This is used in-between, should never be matched
+ None,
+}
+
+impl<'a, W: Write> Wrapper<'a, W> {
+ /// Like `Option::take` this returns the `Wrapper` contained
+ /// in `self` and replaces it with `Wrapper::None`
+ fn take(&mut self) -> Wrapper<'a, W> {
+ let mut swap = Wrapper::None;
+ mem::swap(self, &mut swap);
+ swap
+ }
+}
+
+/// Streaming PNG writer
+///
+/// This may silently fail in the destructor, so it is a good idea to call
+/// [`finish`](#method.finish) or [`flush`] before dropping.
+///
+/// [`flush`]: https://doc.rust-lang.org/stable/std/io/trait.Write.html#tymethod.flush
+pub struct StreamWriter<'a, W: Write> {
+ /// The option here is needed in order to access the inner `ChunkWriter` in-between
+ /// each frame, which is needed for writing the fcTL chunks between each frame
+ writer: Wrapper<'a, W>,
+ prev_buf: Vec<u8>,
+ curr_buf: Vec<u8>,
+ /// Amount of data already written
+ index: usize,
+ /// length of the current scanline
+ line_len: usize,
+ /// size of the frame (width * height * sample_size)
+ to_write: usize,
+
+ width: u32,
+ height: u32,
+
+ bpp: BytesPerPixel,
+ filter: FilterType,
+ adaptive_filter: AdaptiveFilterType,
+ fctl: Option<FrameControl>,
+ compression: Compression,
+}
+
+impl<'a, W: Write> StreamWriter<'a, W> {
+ fn new(writer: ChunkOutput<'a, W>, buf_len: usize) -> Result<StreamWriter<'a, W>> {
+ let PartialInfo {
+ width,
+ height,
+ frame_control: fctl,
+ compression,
+ ..
+ } = writer.info;
+
+ let bpp = writer.info.bpp_in_prediction();
+ let in_len = writer.info.raw_row_length() - 1;
+ let filter = writer.options.filter;
+ let adaptive_filter = writer.options.adaptive_filter;
+ let prev_buf = vec![0; in_len];
+ let curr_buf = vec![0; in_len];
+
+ let mut chunk_writer = ChunkWriter::new(writer, buf_len);
+ let (line_len, to_write) = chunk_writer.next_frame_info();
+ chunk_writer.write_header()?;
+ let zlib = ZlibEncoder::new(chunk_writer, compression.to_options());
+
+ Ok(StreamWriter {
+ writer: Wrapper::Zlib(zlib),
+ index: 0,
+ prev_buf,
+ curr_buf,
+ bpp,
+ filter,
+ width,
+ height,
+ adaptive_filter,
+ line_len,
+ to_write,
+ fctl,
+ compression,
+ })
+ }
+
+ /// Set the used filter type for the next frame.
+ ///
+ /// The default filter is [`FilterType::Sub`] which provides a basic prediction algorithm for
+ /// sample values based on the previous. For a potentially better compression ratio, at the
+ /// cost of more complex processing, try out [`FilterType::Paeth`].
+ ///
+ /// [`FilterType::Sub`]: enum.FilterType.html#variant.Sub
+ /// [`FilterType::Paeth`]: enum.FilterType.html#variant.Paeth
+ pub fn set_filter(&mut self, filter: FilterType) {
+ self.filter = filter;
+ }
+
+ /// Set the adaptive filter type for the next frame.
+ ///
+ /// Adaptive filtering attempts to select the best filter for each line
+ /// based on heuristics which minimize the file size for compression rather
+ /// than use a single filter for the entire image. The default method is
+ /// [`AdaptiveFilterType::NonAdaptive`].
+ ///
+ /// [`AdaptiveFilterType::NonAdaptive`]: enum.AdaptiveFilterType.html
+ pub fn set_adaptive_filter(&mut self, adaptive_filter: AdaptiveFilterType) {
+ self.adaptive_filter = adaptive_filter;
+ }
+
+ /// Set the fraction of time the following frames are going to be displayed,
+ /// in seconds
+ ///
+ /// If the denominator is 0, it is to be treated as if it were 100
+ /// (that is, the numerator then specifies 1/100ths of a second).
+ /// If the the value of the numerator is 0 the decoder should render the next frame
+ /// as quickly as possible, though viewers may impose a reasonable lower bound.
+ ///
+ /// This method will return an error if the image is not animated.
+ pub fn set_frame_delay(&mut self, numerator: u16, denominator: u16) -> Result<()> {
+ if let Some(ref mut fctl) = self.fctl {
+ fctl.delay_den = denominator;
+ fctl.delay_num = numerator;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the dimension of the following frames.
+ ///
+ /// This function will return an error when:
+ /// - The image is not an animated;
+ ///
+ /// - The selected dimension, considering also the current frame position,
+ /// goes outside the image boundaries;
+ ///
+ /// - One or both the width and height are 0;
+ ///
+ pub fn set_frame_dimension(&mut self, width: u32, height: u32) -> Result<()> {
+ if let Some(ref mut fctl) = self.fctl {
+ if Some(width) > self.width.checked_sub(fctl.x_offset)
+ || Some(height) > self.height.checked_sub(fctl.y_offset)
+ {
+ return Err(EncodingError::Format(FormatErrorKind::OutOfBounds.into()));
+ } else if width == 0 {
+ return Err(EncodingError::Format(FormatErrorKind::ZeroWidth.into()));
+ } else if height == 0 {
+ return Err(EncodingError::Format(FormatErrorKind::ZeroHeight.into()));
+ }
+ fctl.width = width;
+ fctl.height = height;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the position of the following frames.
+ ///
+ /// An error will be returned if:
+ /// - The image is not animated;
+ ///
+ /// - The selected position, considering also the current frame dimension,
+ /// goes outside the image boundaries;
+ ///
+ pub fn set_frame_position(&mut self, x: u32, y: u32) -> Result<()> {
+ if let Some(ref mut fctl) = self.fctl {
+ if Some(x) > self.width.checked_sub(fctl.width)
+ || Some(y) > self.height.checked_sub(fctl.height)
+ {
+ return Err(EncodingError::Format(FormatErrorKind::OutOfBounds.into()));
+ }
+ fctl.x_offset = x;
+ fctl.y_offset = y;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the frame dimension to occupy all the image, starting from
+ /// the current position.
+ ///
+ /// To reset the frame to the full image size [`reset_frame_position`]
+ /// should be called first.
+ ///
+ /// This method will return an error if the image is not animated.
+ ///
+ /// [`reset_frame_position`]: struct.Writer.html#method.reset_frame_position
+ pub fn reset_frame_dimension(&mut self) -> Result<()> {
+ if let Some(ref mut fctl) = self.fctl {
+ fctl.width = self.width - fctl.x_offset;
+ fctl.height = self.height - fctl.y_offset;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the frame position to (0, 0).
+ ///
+ /// Equivalent to calling [`set_frame_position(0, 0)`].
+ ///
+ /// This method will return an error if the image is not animated.
+ ///
+ /// [`set_frame_position(0, 0)`]: struct.Writer.html#method.set_frame_position
+ pub fn reset_frame_position(&mut self) -> Result<()> {
+ if let Some(ref mut fctl) = self.fctl {
+ fctl.x_offset = 0;
+ fctl.y_offset = 0;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the blend operation for the following frames.
+ ///
+ /// The blend operation specifies whether the frame is to be alpha blended
+ /// into the current output buffer content, or whether it should completely
+ /// replace its region in the output buffer.
+ ///
+ /// See the [`BlendOp`] documentation for the possible values and their effects.
+ ///
+ /// *Note that for the first frame the two blend modes are functionally
+ /// equivalent due to the clearing of the output buffer at the beginning
+ /// of each play.*
+ ///
+ /// This method will return an error if the image is not animated.
+ ///
+ /// [`BlendOP`]: enum.BlendOp.html
+ pub fn set_blend_op(&mut self, op: BlendOp) -> Result<()> {
+ if let Some(ref mut fctl) = self.fctl {
+ fctl.blend_op = op;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ /// Set the dispose operation for the following frames.
+ ///
+ /// The dispose operation specifies how the output buffer should be changed
+ /// at the end of the delay (before rendering the next frame)
+ ///
+ /// See the [`DisposeOp`] documentation for the possible values and their effects.
+ ///
+ /// *Note that if the first frame uses [`DisposeOp::Previous`]
+ /// it will be treated as [`DisposeOp::Background`].*
+ ///
+ /// This method will return an error if the image is not animated.
+ ///
+ /// [`DisposeOp`]: ../common/enum.BlendOp.html
+ /// [`DisposeOp::Previous`]: ../common/enum.BlendOp.html#variant.Previous
+ /// [`DisposeOp::Background`]: ../common/enum.BlendOp.html#variant.Background
+ pub fn set_dispose_op(&mut self, op: DisposeOp) -> Result<()> {
+ if let Some(ref mut fctl) = self.fctl {
+ fctl.dispose_op = op;
+ Ok(())
+ } else {
+ Err(EncodingError::Format(FormatErrorKind::NotAnimated.into()))
+ }
+ }
+
+ pub fn finish(mut self) -> Result<()> {
+ if self.to_write > 0 {
+ let err = FormatErrorKind::MissingData(self.to_write).into();
+ return Err(EncodingError::Format(err));
+ }
+
+ // TODO: call `writer.finish` somehow?
+ self.flush()?;
+
+ if let Wrapper::Chunk(wrt) = self.writer.take() {
+ wrt.writer.validate_sequence_done()?;
+ }
+
+ Ok(())
+ }
+
+ /// Flushes the buffered chunk, checks if it was the last frame,
+ /// writes the next frame header and gets the next frame scanline size
+ /// and image size.
+ /// NOTE: This method must only be called when the writer is the variant Chunk(_)
+ fn new_frame(&mut self) -> Result<()> {
+ let wrt = match &mut self.writer {
+ Wrapper::Chunk(wrt) => wrt,
+ Wrapper::Unrecoverable => {
+ let err = FormatErrorKind::Unrecoverable.into();
+ return Err(EncodingError::Format(err));
+ }
+ Wrapper::Zlib(_) => unreachable!("never called on a half-finished frame"),
+ Wrapper::None => unreachable!(),
+ };
+ wrt.flush()?;
+ wrt.writer.validate_new_image()?;
+
+ if let Some(fctl) = self.fctl {
+ wrt.set_fctl(fctl);
+ }
+ let (scansize, size) = wrt.next_frame_info();
+ self.line_len = scansize;
+ self.to_write = size;
+
+ wrt.write_header()?;
+ wrt.writer.increment_images_written();
+
+ // now it can be taken because the next statements cannot cause any errors
+ match self.writer.take() {
+ Wrapper::Chunk(wrt) => {
+ let encoder = ZlibEncoder::new(wrt, self.compression.to_options());
+ self.writer = Wrapper::Zlib(encoder);
+ }
+ _ => unreachable!(),
+ };
+
+ Ok(())
+ }
+}
+
+impl<'a, W: Write> Write for StreamWriter<'a, W> {
+ fn write(&mut self, mut data: &[u8]) -> io::Result<usize> {
+ if let Wrapper::Unrecoverable = self.writer {
+ let err = FormatErrorKind::Unrecoverable.into();
+ return Err(EncodingError::Format(err).into());
+ }
+
+ if data.is_empty() {
+ return Ok(0);
+ }
+
+ if self.to_write == 0 {
+ match self.writer.take() {
+ Wrapper::Zlib(wrt) => match wrt.finish() {
+ Ok(chunk) => self.writer = Wrapper::Chunk(chunk),
+ Err(err) => {
+ self.writer = Wrapper::Unrecoverable;
+ return Err(err);
+ }
+ },
+ chunk @ Wrapper::Chunk(_) => self.writer = chunk,
+ Wrapper::Unrecoverable => unreachable!(),
+ Wrapper::None => unreachable!(),
+ };
+
+ // Transition Wrapper::Chunk to Wrapper::Zlib.
+ self.new_frame()?;
+ }
+
+ let written = data.read(&mut self.curr_buf[..self.line_len][self.index..])?;
+ self.index += written;
+ self.to_write -= written;
+
+ if self.index == self.line_len {
+ // TODO: reuse this buffer between rows.
+ let mut filtered = vec![0; self.curr_buf.len()];
+ let filter_type = filter(
+ self.filter,
+ self.adaptive_filter,
+ self.bpp,
+ &self.prev_buf,
+ &self.curr_buf,
+ &mut filtered,
+ );
+ // This can't fail as the other variant is used only to allow the zlib encoder to finish
+ let wrt = match &mut self.writer {
+ Wrapper::Zlib(wrt) => wrt,
+ _ => unreachable!(),
+ };
+
+ wrt.write_all(&[filter_type as u8])?;
+ wrt.write_all(&filtered)?;
+ mem::swap(&mut self.prev_buf, &mut self.curr_buf);
+ self.index = 0;
+ }
+
+ Ok(written)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ match &mut self.writer {
+ Wrapper::Zlib(wrt) => wrt.flush()?,
+ Wrapper::Chunk(wrt) => wrt.flush()?,
+ // This handles both the case where we entered an unrecoverable state after zlib
+ // decoding failure and after a panic while we had taken the chunk/zlib reader.
+ Wrapper::Unrecoverable | Wrapper::None => {
+ let err = FormatErrorKind::Unrecoverable.into();
+ return Err(EncodingError::Format(err).into());
+ }
+ }
+
+ if self.index > 0 {
+ let err = FormatErrorKind::WrittenTooMuch(self.index).into();
+ return Err(EncodingError::Format(err).into());
+ }
+
+ Ok(())
+ }
+}
+
+impl<W: Write> Drop for StreamWriter<'_, W> {
+ fn drop(&mut self) {
+ let _ = self.flush();
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::Decoder;
+
+ use rand::{thread_rng, Rng};
+ use std::fs::File;
+ use std::io::{Cursor, Write};
+ use std::{cmp, io};
+
+ #[test]
+ fn roundtrip() {
+ // More loops = more random testing, but also more test wait time
+ for _ in 0..10 {
+ for path in glob::glob("tests/pngsuite/*.png")
+ .unwrap()
+ .map(|r| r.unwrap())
+ {
+ if path.file_name().unwrap().to_str().unwrap().starts_with('x') {
+ // x* files are expected to fail to decode
+ continue;
+ }
+ eprintln!("{}", path.display());
+ // Decode image
+ let decoder = Decoder::new(File::open(path).unwrap());
+ let mut reader = decoder.read_info().unwrap();
+ let mut buf = vec![0; reader.output_buffer_size()];
+ let info = reader.next_frame(&mut buf).unwrap();
+ // Encode decoded image
+ let mut out = Vec::new();
+ {
+ let mut wrapper = RandomChunkWriter {
+ rng: thread_rng(),
+ w: &mut out,
+ };
+
+ let mut encoder = Encoder::new(&mut wrapper, info.width, info.height);
+ encoder.set_color(info.color_type);
+ encoder.set_depth(info.bit_depth);
+ if let Some(palette) = &reader.info().palette {
+ encoder.set_palette(palette.clone());
+ }
+ let mut encoder = encoder.write_header().unwrap();
+ encoder.write_image_data(&buf).unwrap();
+ }
+ // Decode encoded decoded image
+ let decoder = Decoder::new(&*out);
+ let mut reader = decoder.read_info().unwrap();
+ let mut buf2 = vec![0; reader.output_buffer_size()];
+ reader.next_frame(&mut buf2).unwrap();
+ // check if the encoded image is ok:
+ assert_eq!(buf, buf2);
+ }
+ }
+ }
+
+ #[test]
+ fn roundtrip_stream() {
+ // More loops = more random testing, but also more test wait time
+ for _ in 0..10 {
+ for path in glob::glob("tests/pngsuite/*.png")
+ .unwrap()
+ .map(|r| r.unwrap())
+ {
+ if path.file_name().unwrap().to_str().unwrap().starts_with('x') {
+ // x* files are expected to fail to decode
+ continue;
+ }
+ // Decode image
+ let decoder = Decoder::new(File::open(path).unwrap());
+ let mut reader = decoder.read_info().unwrap();
+ let mut buf = vec![0; reader.output_buffer_size()];
+ let info = reader.next_frame(&mut buf).unwrap();
+ // Encode decoded image
+ let mut out = Vec::new();
+ {
+ let mut wrapper = RandomChunkWriter {
+ rng: thread_rng(),
+ w: &mut out,
+ };
+
+ let mut encoder = Encoder::new(&mut wrapper, info.width, info.height);
+ encoder.set_color(info.color_type);
+ encoder.set_depth(info.bit_depth);
+ if let Some(palette) = &reader.info().palette {
+ encoder.set_palette(palette.clone());
+ }
+ let mut encoder = encoder.write_header().unwrap();
+ let mut stream_writer = encoder.stream_writer().unwrap();
+
+ let mut outer_wrapper = RandomChunkWriter {
+ rng: thread_rng(),
+ w: &mut stream_writer,
+ };
+
+ outer_wrapper.write_all(&buf).unwrap();
+ }
+ // Decode encoded decoded image
+ let decoder = Decoder::new(&*out);
+ let mut reader = decoder.read_info().unwrap();
+ let mut buf2 = vec![0; reader.output_buffer_size()];
+ reader.next_frame(&mut buf2).unwrap();
+ // check if the encoded image is ok:
+ assert_eq!(buf, buf2);
+ }
+ }
+ }
+
+ #[test]
+ fn image_palette() -> Result<()> {
+ for &bit_depth in &[1u8, 2, 4, 8] {
+ // Do a reference decoding, choose a fitting palette image from pngsuite
+ let path = format!("tests/pngsuite/basn3p0{}.png", bit_depth);
+ let decoder = Decoder::new(File::open(&path).unwrap());
+ let mut reader = decoder.read_info().unwrap();
+
+ let mut decoded_pixels = vec![0; reader.output_buffer_size()];
+ let info = reader.info();
+ assert_eq!(
+ info.width as usize * info.height as usize * usize::from(bit_depth),
+ decoded_pixels.len() * 8
+ );
+ let info = reader.next_frame(&mut decoded_pixels).unwrap();
+ let indexed_data = decoded_pixels;
+
+ let palette = reader.info().palette.as_ref().unwrap();
+ let mut out = Vec::new();
+ {
+ let mut encoder = Encoder::new(&mut out, info.width, info.height);
+ encoder.set_depth(BitDepth::from_u8(bit_depth).unwrap());
+ encoder.set_color(ColorType::Indexed);
+ encoder.set_palette(palette.as_ref());
+
+ let mut writer = encoder.write_header().unwrap();
+ writer.write_image_data(&indexed_data).unwrap();
+ }
+
+ // Decode re-encoded image
+ let decoder = Decoder::new(&*out);
+ let mut reader = decoder.read_info().unwrap();
+ let mut redecoded = vec![0; reader.output_buffer_size()];
+ reader.next_frame(&mut redecoded).unwrap();
+ // check if the encoded image is ok:
+ assert_eq!(indexed_data, redecoded);
+ }
+ Ok(())
+ }
+
+ #[test]
+ fn expect_error_on_wrong_image_len() -> Result<()> {
+ let width = 10;
+ let height = 10;
+
+ let output = vec![0u8; 1024];
+ let writer = Cursor::new(output);
+ let mut encoder = Encoder::new(writer, width as u32, height as u32);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Rgb);
+ let mut png_writer = encoder.write_header()?;
+
+ let correct_image_size = width * height * 3;
+ let image = vec![0u8; correct_image_size + 1];
+ let result = png_writer.write_image_data(image.as_ref());
+ assert!(result.is_err());
+
+ Ok(())
+ }
+
+ #[test]
+ fn expect_error_on_empty_image() -> Result<()> {
+ let output = vec![0u8; 1024];
+ let mut writer = Cursor::new(output);
+
+ let encoder = Encoder::new(&mut writer, 0, 0);
+ assert!(encoder.write_header().is_err());
+
+ let encoder = Encoder::new(&mut writer, 100, 0);
+ assert!(encoder.write_header().is_err());
+
+ let encoder = Encoder::new(&mut writer, 0, 100);
+ assert!(encoder.write_header().is_err());
+
+ Ok(())
+ }
+
+ #[test]
+ fn expect_error_on_invalid_bit_depth_color_type_combination() -> Result<()> {
+ let output = vec![0u8; 1024];
+ let mut writer = Cursor::new(output);
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::One);
+ encoder.set_color(ColorType::Rgb);
+ assert!(encoder.write_header().is_err());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::One);
+ encoder.set_color(ColorType::GrayscaleAlpha);
+ assert!(encoder.write_header().is_err());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::One);
+ encoder.set_color(ColorType::Rgba);
+ assert!(encoder.write_header().is_err());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Two);
+ encoder.set_color(ColorType::Rgb);
+ assert!(encoder.write_header().is_err());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Two);
+ encoder.set_color(ColorType::GrayscaleAlpha);
+ assert!(encoder.write_header().is_err());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Two);
+ encoder.set_color(ColorType::Rgba);
+ assert!(encoder.write_header().is_err());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Four);
+ encoder.set_color(ColorType::Rgb);
+ assert!(encoder.write_header().is_err());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Four);
+ encoder.set_color(ColorType::GrayscaleAlpha);
+ assert!(encoder.write_header().is_err());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Four);
+ encoder.set_color(ColorType::Rgba);
+ assert!(encoder.write_header().is_err());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Sixteen);
+ encoder.set_color(ColorType::Indexed);
+ assert!(encoder.write_header().is_err());
+
+ Ok(())
+ }
+
+ #[test]
+ fn can_write_header_with_valid_bit_depth_color_type_combination() -> Result<()> {
+ let output = vec![0u8; 1024];
+ let mut writer = Cursor::new(output);
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::One);
+ encoder.set_color(ColorType::Grayscale);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::One);
+ encoder.set_color(ColorType::Indexed);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Two);
+ encoder.set_color(ColorType::Grayscale);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Two);
+ encoder.set_color(ColorType::Indexed);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Four);
+ encoder.set_color(ColorType::Grayscale);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Four);
+ encoder.set_color(ColorType::Indexed);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Grayscale);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Rgb);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Indexed);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::GrayscaleAlpha);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Rgba);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Sixteen);
+ encoder.set_color(ColorType::Grayscale);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Sixteen);
+ encoder.set_color(ColorType::Rgb);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Sixteen);
+ encoder.set_color(ColorType::GrayscaleAlpha);
+ assert!(encoder.write_header().is_ok());
+
+ let mut encoder = Encoder::new(&mut writer, 1, 1);
+ encoder.set_depth(BitDepth::Sixteen);
+ encoder.set_color(ColorType::Rgba);
+ assert!(encoder.write_header().is_ok());
+
+ Ok(())
+ }
+
+ #[test]
+ fn all_filters_roundtrip() -> io::Result<()> {
+ let pixel: Vec<_> = (0..48).collect();
+
+ let roundtrip = |filter: FilterType| -> io::Result<()> {
+ let mut buffer = vec![];
+ let mut encoder = Encoder::new(&mut buffer, 4, 4);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Rgb);
+ encoder.set_filter(filter);
+ encoder.write_header()?.write_image_data(&pixel)?;
+
+ let decoder = crate::Decoder::new(Cursor::new(buffer));
+ let mut reader = decoder.read_info()?;
+ let info = reader.info();
+ assert_eq!(info.width, 4);
+ assert_eq!(info.height, 4);
+ let mut dest = vec![0; pixel.len()];
+ reader.next_frame(&mut dest)?;
+ assert_eq!(dest, pixel, "Deviation with filter type {:?}", filter);
+
+ Ok(())
+ };
+
+ roundtrip(FilterType::NoFilter)?;
+ roundtrip(FilterType::Sub)?;
+ roundtrip(FilterType::Up)?;
+ roundtrip(FilterType::Avg)?;
+ roundtrip(FilterType::Paeth)?;
+
+ Ok(())
+ }
+
+ #[test]
+ fn some_gamma_roundtrip() -> io::Result<()> {
+ let pixel: Vec<_> = (0..48).collect();
+
+ let roundtrip = |gamma: Option<ScaledFloat>| -> io::Result<()> {
+ let mut buffer = vec![];
+ let mut encoder = Encoder::new(&mut buffer, 4, 4);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Rgb);
+ encoder.set_filter(FilterType::Avg);
+ if let Some(gamma) = gamma {
+ encoder.set_source_gamma(gamma);
+ }
+ encoder.write_header()?.write_image_data(&pixel)?;
+
+ let decoder = crate::Decoder::new(Cursor::new(buffer));
+ let mut reader = decoder.read_info()?;
+ assert_eq!(
+ reader.info().source_gamma,
+ gamma,
+ "Deviation with gamma {:?}",
+ gamma
+ );
+ let mut dest = vec![0; pixel.len()];
+ let info = reader.next_frame(&mut dest)?;
+ assert_eq!(info.width, 4);
+ assert_eq!(info.height, 4);
+
+ Ok(())
+ };
+
+ roundtrip(None)?;
+ roundtrip(Some(ScaledFloat::new(0.35)))?;
+ roundtrip(Some(ScaledFloat::new(0.45)))?;
+ roundtrip(Some(ScaledFloat::new(0.55)))?;
+ roundtrip(Some(ScaledFloat::new(0.7)))?;
+ roundtrip(Some(ScaledFloat::new(1.0)))?;
+ roundtrip(Some(ScaledFloat::new(2.5)))?;
+
+ Ok(())
+ }
+
+ #[test]
+ fn write_image_chunks_beyond_first() -> Result<()> {
+ let width = 10;
+ let height = 10;
+
+ let output = vec![0u8; 1024];
+ let writer = Cursor::new(output);
+
+ // Not an animation but we should still be able to write multiple images
+ // See issue: <https://github.com/image-rs/image-png/issues/301>
+ // This is technically all valid png so there is no issue with correctness.
+ let mut encoder = Encoder::new(writer, width, height);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Grayscale);
+ let mut png_writer = encoder.write_header()?;
+
+ for _ in 0..3 {
+ let correct_image_size = (width * height) as usize;
+ let image = vec![0u8; correct_image_size];
+ png_writer.write_image_data(image.as_ref())?;
+ }
+
+ Ok(())
+ }
+
+ #[test]
+ fn image_validate_sequence_without_animation() -> Result<()> {
+ let width = 10;
+ let height = 10;
+
+ let output = vec![0u8; 1024];
+ let writer = Cursor::new(output);
+
+ let mut encoder = Encoder::new(writer, width, height);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Grayscale);
+ encoder.validate_sequence(true);
+ let mut png_writer = encoder.write_header()?;
+
+ let correct_image_size = (width * height) as usize;
+ let image = vec![0u8; correct_image_size];
+ png_writer.write_image_data(image.as_ref())?;
+
+ assert!(png_writer.write_image_data(image.as_ref()).is_err());
+ Ok(())
+ }
+
+ #[test]
+ fn image_validate_animation() -> Result<()> {
+ let width = 10;
+ let height = 10;
+
+ let output = vec![0u8; 1024];
+ let writer = Cursor::new(output);
+ let correct_image_size = (width * height) as usize;
+ let image = vec![0u8; correct_image_size];
+
+ let mut encoder = Encoder::new(writer, width, height);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Grayscale);
+ encoder.set_animated(1, 0)?;
+ encoder.validate_sequence(true);
+ let mut png_writer = encoder.write_header()?;
+
+ png_writer.write_image_data(image.as_ref())?;
+
+ Ok(())
+ }
+
+ #[test]
+ fn image_validate_animation2() -> Result<()> {
+ let width = 10;
+ let height = 10;
+
+ let output = vec![0u8; 1024];
+ let writer = Cursor::new(output);
+ let correct_image_size = (width * height) as usize;
+ let image = vec![0u8; correct_image_size];
+
+ let mut encoder = Encoder::new(writer, width, height);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Grayscale);
+ encoder.set_animated(2, 0)?;
+ encoder.validate_sequence(true);
+ let mut png_writer = encoder.write_header()?;
+
+ png_writer.write_image_data(image.as_ref())?;
+ png_writer.write_image_data(image.as_ref())?;
+ png_writer.finish()?;
+
+ Ok(())
+ }
+
+ #[test]
+ fn image_validate_animation_sep_def_image() -> Result<()> {
+ let width = 10;
+ let height = 10;
+
+ let output = vec![0u8; 1024];
+ let writer = Cursor::new(output);
+ let correct_image_size = (width * height) as usize;
+ let image = vec![0u8; correct_image_size];
+
+ let mut encoder = Encoder::new(writer, width, height);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Grayscale);
+ encoder.set_animated(1, 0)?;
+ encoder.set_sep_def_img(true)?;
+ encoder.validate_sequence(true);
+ let mut png_writer = encoder.write_header()?;
+
+ png_writer.write_image_data(image.as_ref())?;
+ png_writer.write_image_data(image.as_ref())?;
+ png_writer.finish()?;
+
+ Ok(())
+ }
+
+ #[test]
+ fn image_validate_missing_image() -> Result<()> {
+ let width = 10;
+ let height = 10;
+
+ let output = vec![0u8; 1024];
+ let writer = Cursor::new(output);
+
+ let mut encoder = Encoder::new(writer, width, height);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Grayscale);
+ encoder.validate_sequence(true);
+ let png_writer = encoder.write_header()?;
+
+ assert!(png_writer.finish().is_err());
+ Ok(())
+ }
+
+ #[test]
+ fn image_validate_missing_animated_frame() -> Result<()> {
+ let width = 10;
+ let height = 10;
+
+ let output = vec![0u8; 1024];
+ let writer = Cursor::new(output);
+ let correct_image_size = (width * height) as usize;
+ let image = vec![0u8; correct_image_size];
+
+ let mut encoder = Encoder::new(writer, width, height);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Grayscale);
+ encoder.set_animated(2, 0)?;
+ encoder.validate_sequence(true);
+ let mut png_writer = encoder.write_header()?;
+
+ png_writer.write_image_data(image.as_ref())?;
+ assert!(png_writer.finish().is_err());
+
+ Ok(())
+ }
+
+ #[test]
+ fn issue_307_stream_validation() -> Result<()> {
+ let output = vec![0u8; 1024];
+ let mut cursor = Cursor::new(output);
+
+ let encoder = Encoder::new(&mut cursor, 1, 1); // Create a 1-pixel image
+ let mut writer = encoder.write_header()?;
+ let mut stream = writer.stream_writer()?;
+
+ let written = stream.write(&[1, 2, 3, 4])?;
+ assert_eq!(written, 1);
+ stream.finish()?;
+ drop(writer);
+
+ {
+ cursor.set_position(0);
+ let mut decoder = Decoder::new(cursor).read_info().expect("A valid image");
+ let mut buffer = [0u8; 1];
+ decoder.next_frame(&mut buffer[..]).expect("Valid read");
+ assert_eq!(buffer, [1]);
+ }
+
+ Ok(())
+ }
+
+ #[test]
+ fn stream_filtering() -> Result<()> {
+ let output = vec![0u8; 1024];
+ let mut cursor = Cursor::new(output);
+
+ let mut encoder = Encoder::new(&mut cursor, 8, 8);
+ encoder.set_color(ColorType::Rgba);
+ encoder.set_filter(FilterType::Paeth);
+ let mut writer = encoder.write_header()?;
+ let mut stream = writer.stream_writer()?;
+
+ for _ in 0..8 {
+ let written = stream.write(&[1; 32])?;
+ assert_eq!(written, 32);
+ }
+ stream.finish()?;
+ drop(writer);
+
+ {
+ cursor.set_position(0);
+ let mut decoder = Decoder::new(cursor).read_info().expect("A valid image");
+ let mut buffer = [0u8; 256];
+ decoder.next_frame(&mut buffer[..]).expect("Valid read");
+ assert_eq!(buffer, [1; 256]);
+ }
+
+ Ok(())
+ }
+
+ #[test]
+ #[cfg(all(unix, not(target_pointer_width = "32")))]
+ fn exper_error_on_huge_chunk() -> Result<()> {
+ // Okay, so we want a proper 4 GB chunk but not actually spend the memory for reserving it.
+ // Let's rely on overcommit? Otherwise we got the rather dumb option of mmap-ing /dev/zero.
+ let empty = vec![0; 1usize << 31];
+ let writer = Cursor::new(vec![0u8; 1024]);
+
+ let mut encoder = Encoder::new(writer, 10, 10);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Grayscale);
+ let mut png_writer = encoder.write_header()?;
+
+ assert!(png_writer.write_chunk(chunk::fdAT, &empty).is_err());
+ Ok(())
+ }
+
+ #[test]
+ #[cfg(all(unix, not(target_pointer_width = "32")))]
+ fn exper_error_on_non_u32_chunk() -> Result<()> {
+ // Okay, so we want a proper 4 GB chunk but not actually spend the memory for reserving it.
+ // Let's rely on overcommit? Otherwise we got the rather dumb option of mmap-ing /dev/zero.
+ let empty = vec![0; 1usize << 32];
+ let writer = Cursor::new(vec![0u8; 1024]);
+
+ let mut encoder = Encoder::new(writer, 10, 10);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Grayscale);
+ let mut png_writer = encoder.write_header()?;
+
+ assert!(png_writer.write_chunk(chunk::fdAT, &empty).is_err());
+ Ok(())
+ }
+
+ #[test]
+ fn finish_drops_inner_writer() -> Result<()> {
+ struct NoWriter<'flag>(&'flag mut bool);
+
+ impl Write for NoWriter<'_> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ Ok(buf.len())
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+ }
+ impl Drop for NoWriter<'_> {
+ fn drop(&mut self) {
+ *self.0 = true;
+ }
+ }
+
+ let mut flag = false;
+
+ {
+ let mut encoder = Encoder::new(NoWriter(&mut flag), 10, 10);
+ encoder.set_depth(BitDepth::Eight);
+ encoder.set_color(ColorType::Grayscale);
+
+ let mut writer = encoder.write_header()?;
+ writer.write_image_data(&[0; 100])?;
+ writer.finish()?;
+ }
+
+ assert!(flag, "PNG finished but writer was not dropped");
+ Ok(())
+ }
+
+ /// A Writer that only writes a few bytes at a time
+ struct RandomChunkWriter<R: Rng, W: Write> {
+ rng: R,
+ w: W,
+ }
+
+ impl<R: Rng, W: Write> Write for RandomChunkWriter<R, W> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ // choose a random length to write
+ let len = cmp::min(self.rng.gen_range(1..50), buf.len());
+
+ self.w.write(&buf[0..len])
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.w.flush()
+ }
+ }
+}
+
+/// Mod to encapsulate the converters depending on the `deflate` crate.
+///
+/// Since this only contains trait impls, there is no need to make this public, they are simply
+/// available when the mod is compiled as well.
+impl Compression {
+ fn to_options(self) -> flate2::Compression {
+ #[allow(deprecated)]
+ match self {
+ Compression::Default => flate2::Compression::default(),
+ Compression::Fast => flate2::Compression::fast(),
+ Compression::Best => flate2::Compression::best(),
+ #[allow(deprecated)]
+ Compression::Huffman => flate2::Compression::none(),
+ #[allow(deprecated)]
+ Compression::Rle => flate2::Compression::none(),
+ }
+ }
+}
diff --git a/vendor/png/src/filter.rs b/vendor/png/src/filter.rs
new file mode 100644
index 0000000..b561e4e
--- /dev/null
+++ b/vendor/png/src/filter.rs
@@ -0,0 +1,801 @@
+use core::convert::TryInto;
+
+use crate::common::BytesPerPixel;
+
+/// The byte level filter applied to scanlines to prepare them for compression.
+///
+/// Compression in general benefits from repetitive data. The filter is a content-aware method of
+/// compressing the range of occurring byte values to help the compression algorithm. Note that
+/// this does not operate on pixels but on raw bytes of a scanline.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[repr(u8)]
+pub enum FilterType {
+ NoFilter = 0,
+ Sub = 1,
+ Up = 2,
+ Avg = 3,
+ Paeth = 4,
+}
+
+impl Default for FilterType {
+ fn default() -> Self {
+ FilterType::Sub
+ }
+}
+
+impl FilterType {
+ /// u8 -> Self. Temporary solution until Rust provides a canonical one.
+ pub fn from_u8(n: u8) -> Option<FilterType> {
+ match n {
+ 0 => Some(FilterType::NoFilter),
+ 1 => Some(FilterType::Sub),
+ 2 => Some(FilterType::Up),
+ 3 => Some(FilterType::Avg),
+ 4 => Some(FilterType::Paeth),
+ _ => None,
+ }
+ }
+}
+
+/// The filtering method for preprocessing scanline data before compression.
+///
+/// Adaptive filtering performs additional computation in an attempt to maximize
+/// the compression of the data. [`NonAdaptive`] filtering is the default.
+///
+/// [`NonAdaptive`]: enum.AdaptiveFilterType.html#variant.NonAdaptive
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[repr(u8)]
+pub enum AdaptiveFilterType {
+ Adaptive,
+ NonAdaptive,
+}
+
+impl Default for AdaptiveFilterType {
+ fn default() -> Self {
+ AdaptiveFilterType::NonAdaptive
+ }
+}
+
+fn filter_paeth_decode(a: u8, b: u8, c: u8) -> u8 {
+ // Decoding seems to optimize better with this algorithm
+ let pa = (i16::from(b) - i16::from(c)).abs();
+ let pb = (i16::from(a) - i16::from(c)).abs();
+ let pc = ((i16::from(a) - i16::from(c)) + (i16::from(b) - i16::from(c))).abs();
+
+ let mut out = a;
+ let mut min = pa;
+
+ if pb < min {
+ min = pb;
+ out = b;
+ }
+ if pc < min {
+ out = c;
+ }
+
+ out
+}
+
+fn filter_paeth(a: u8, b: u8, c: u8) -> u8 {
+ // This is an optimized version of the paeth filter from the PNG specification, proposed by
+ // Luca Versari for [FPNGE](https://www.lucaversari.it/FJXL_and_FPNGE.pdf). It operates
+ // entirely on unsigned 8-bit quantities, making it more conducive to vectorization.
+ //
+ // p = a + b - c
+ // pa = |p - a| = |a + b - c - a| = |b - c| = max(b, c) - min(b, c)
+ // pb = |p - b| = |a + b - c - b| = |a - c| = max(a, c) - min(a, c)
+ // pc = |p - c| = |a + b - c - c| = |(b - c) + (a - c)| = ...
+ //
+ // Further optimizing the calculation of `pc` a bit tricker. However, notice that:
+ //
+ // a > c && b > c
+ // ==> (a - c) > 0 && (b - c) > 0
+ // ==> pc > (a - c) && pc > (b - c)
+ // ==> pc > |a - c| && pc > |b - c|
+ // ==> pc > pb && pc > pa
+ //
+ // Meaning that if `c` is smaller than `a` and `b`, the value of `pc` is irrelevant. Similar
+ // reasoning applies if `c` is larger than the other two inputs. Assuming that `c >= b` and
+ // `c <= b` or vice versa:
+ //
+ // pc = ||b - c| - |a - c|| = |pa - pb| = max(pa, pb) - min(pa, pb)
+ //
+ let pa = b.max(c) - c.min(b);
+ let pb = a.max(c) - c.min(a);
+ let pc = if (a < c) == (c < b) {
+ pa.max(pb) - pa.min(pb)
+ } else {
+ 255
+ };
+
+ if pa <= pb && pa <= pc {
+ a
+ } else if pb <= pc {
+ b
+ } else {
+ c
+ }
+}
+
+pub(crate) fn unfilter(
+ filter: FilterType,
+ tbpp: BytesPerPixel,
+ previous: &[u8],
+ current: &mut [u8],
+) {
+ use self::FilterType::*;
+
+ // [2023/01 @okaneco] - Notes on optimizing decoding filters
+ //
+ // Links:
+ // [PR]: https://github.com/image-rs/image-png/pull/382
+ // [SWAR]: http://aggregate.org/SWAR/over.html
+ // [AVG]: http://aggregate.org/MAGIC/#Average%20of%20Integers
+ //
+ // #382 heavily refactored and optimized the following filters making the
+ // implementation nonobvious. These comments function as a summary of that
+ // PR with an explanation of the choices made below.
+ //
+ // #382 originally started with trying to optimize using a technique called
+ // SWAR, SIMD Within a Register. SWAR uses regular integer types like `u32`
+ // and `u64` as SIMD registers to perform vertical operations in parallel,
+ // usually involving bit-twiddling. This allowed each `BytesPerPixel` (bpp)
+ // pixel to be decoded in parallel: 3bpp and 4bpp in a `u32`, 6bpp and 8pp
+ // in a `u64`. The `Sub` filter looked like the following code block, `Avg`
+ // was similar but used a bitwise average method from [AVG]:
+ // ```
+ // // See "Unpartitioned Operations With Correction Code" from [SWAR]
+ // fn swar_add_u32(x: u32, y: u32) -> u32 {
+ // // 7-bit addition so there's no carry over the most significant bit
+ // let n = (x & 0x7f7f7f7f) + (y & 0x7f7f7f7f); // 0x7F = 0b_0111_1111
+ // // 1-bit parity/XOR addition to fill in the missing MSB
+ // n ^ (x ^ y) & 0x80808080 // 0x80 = 0b_1000_0000
+ // }
+ //
+ // let mut prev =
+ // u32::from_ne_bytes([current[0], current[1], current[2], current[3]]);
+ // for chunk in current[4..].chunks_exact_mut(4) {
+ // let cur = u32::from_ne_bytes([chunk[0], chunk[1], chunk[2], chunk[3]]);
+ // let new_chunk = swar_add_u32(cur, prev);
+ // chunk.copy_from_slice(&new_chunk.to_ne_bytes());
+ // prev = new_chunk;
+ // }
+ // ```
+ // While this provided a measurable increase, @fintelia found that this idea
+ // could be taken even further by unrolling the chunks component-wise and
+ // avoiding unnecessary byte-shuffling by using byte arrays instead of
+ // `u32::from|to_ne_bytes`. The bitwise operations were no longer necessary
+ // so they were reverted to their obvious arithmetic equivalent. Lastly,
+ // `TryInto` was used instead of `copy_from_slice`. The `Sub` code now
+ // looked like this (with asserts to remove `0..bpp` bounds checks):
+ // ```
+ // assert!(len > 3);
+ // let mut prev = [current[0], current[1], current[2], current[3]];
+ // for chunk in current[4..].chunks_exact_mut(4) {
+ // let new_chunk = [
+ // chunk[0].wrapping_add(prev[0]),
+ // chunk[1].wrapping_add(prev[1]),
+ // chunk[2].wrapping_add(prev[2]),
+ // chunk[3].wrapping_add(prev[3]),
+ // ];
+ // *TryInto::<&mut [u8; 4]>::try_into(chunk).unwrap() = new_chunk;
+ // prev = new_chunk;
+ // }
+ // ```
+ // The compiler was able to optimize the code to be even faster and this
+ // method even sped up Paeth filtering! Assertions were experimentally
+ // added within loop bodies which produced better instructions but no
+ // difference in speed. Finally, the code was refactored to remove manual
+ // slicing and start the previous pixel chunks with arrays of `[0; N]`.
+ // ```
+ // let mut prev = [0; 4];
+ // for chunk in current.chunks_exact_mut(4) {
+ // let new_chunk = [
+ // chunk[0].wrapping_add(prev[0]),
+ // chunk[1].wrapping_add(prev[1]),
+ // chunk[2].wrapping_add(prev[2]),
+ // chunk[3].wrapping_add(prev[3]),
+ // ];
+ // *TryInto::<&mut [u8; 4]>::try_into(chunk).unwrap() = new_chunk;
+ // prev = new_chunk;
+ // }
+ // ```
+ // While we're not manually bit-twiddling anymore, a possible takeaway from
+ // this is to "think in SWAR" when dealing with small byte arrays. Unrolling
+ // array operations and performing them component-wise may unlock previously
+ // unavailable optimizations from the compiler, even when using the
+ // `chunks_exact` methods for their potential auto-vectorization benefits.
+ match filter {
+ NoFilter => {}
+ Sub => match tbpp {
+ BytesPerPixel::One => {
+ current.iter_mut().reduce(|&mut prev, curr| {
+ *curr = curr.wrapping_add(prev);
+ curr
+ });
+ }
+ BytesPerPixel::Two => {
+ let mut prev = [0; 2];
+ for chunk in current.chunks_exact_mut(2) {
+ let new_chunk = [
+ chunk[0].wrapping_add(prev[0]),
+ chunk[1].wrapping_add(prev[1]),
+ ];
+ *TryInto::<&mut [u8; 2]>::try_into(chunk).unwrap() = new_chunk;
+ prev = new_chunk;
+ }
+ }
+ BytesPerPixel::Three => {
+ let mut prev = [0; 3];
+ for chunk in current.chunks_exact_mut(3) {
+ let new_chunk = [
+ chunk[0].wrapping_add(prev[0]),
+ chunk[1].wrapping_add(prev[1]),
+ chunk[2].wrapping_add(prev[2]),
+ ];
+ *TryInto::<&mut [u8; 3]>::try_into(chunk).unwrap() = new_chunk;
+ prev = new_chunk;
+ }
+ }
+ BytesPerPixel::Four => {
+ let mut prev = [0; 4];
+ for chunk in current.chunks_exact_mut(4) {
+ let new_chunk = [
+ chunk[0].wrapping_add(prev[0]),
+ chunk[1].wrapping_add(prev[1]),
+ chunk[2].wrapping_add(prev[2]),
+ chunk[3].wrapping_add(prev[3]),
+ ];
+ *TryInto::<&mut [u8; 4]>::try_into(chunk).unwrap() = new_chunk;
+ prev = new_chunk;
+ }
+ }
+ BytesPerPixel::Six => {
+ let mut prev = [0; 6];
+ for chunk in current.chunks_exact_mut(6) {
+ let new_chunk = [
+ chunk[0].wrapping_add(prev[0]),
+ chunk[1].wrapping_add(prev[1]),
+ chunk[2].wrapping_add(prev[2]),
+ chunk[3].wrapping_add(prev[3]),
+ chunk[4].wrapping_add(prev[4]),
+ chunk[5].wrapping_add(prev[5]),
+ ];
+ *TryInto::<&mut [u8; 6]>::try_into(chunk).unwrap() = new_chunk;
+ prev = new_chunk;
+ }
+ }
+ BytesPerPixel::Eight => {
+ let mut prev = [0; 8];
+ for chunk in current.chunks_exact_mut(8) {
+ let new_chunk = [
+ chunk[0].wrapping_add(prev[0]),
+ chunk[1].wrapping_add(prev[1]),
+ chunk[2].wrapping_add(prev[2]),
+ chunk[3].wrapping_add(prev[3]),
+ chunk[4].wrapping_add(prev[4]),
+ chunk[5].wrapping_add(prev[5]),
+ chunk[6].wrapping_add(prev[6]),
+ chunk[7].wrapping_add(prev[7]),
+ ];
+ *TryInto::<&mut [u8; 8]>::try_into(chunk).unwrap() = new_chunk;
+ prev = new_chunk;
+ }
+ }
+ },
+ Up => {
+ for (curr, &above) in current.iter_mut().zip(previous) {
+ *curr = curr.wrapping_add(above);
+ }
+ }
+ Avg => match tbpp {
+ BytesPerPixel::One => {
+ let mut lprev = [0; 1];
+ for (chunk, above) in current.chunks_exact_mut(1).zip(previous.chunks_exact(1)) {
+ let new_chunk =
+ [chunk[0].wrapping_add(((above[0] as u16 + lprev[0] as u16) / 2) as u8)];
+ *TryInto::<&mut [u8; 1]>::try_into(chunk).unwrap() = new_chunk;
+ lprev = new_chunk;
+ }
+ }
+ BytesPerPixel::Two => {
+ let mut lprev = [0; 2];
+ for (chunk, above) in current.chunks_exact_mut(2).zip(previous.chunks_exact(2)) {
+ let new_chunk = [
+ chunk[0].wrapping_add(((above[0] as u16 + lprev[0] as u16) / 2) as u8),
+ chunk[1].wrapping_add(((above[1] as u16 + lprev[1] as u16) / 2) as u8),
+ ];
+ *TryInto::<&mut [u8; 2]>::try_into(chunk).unwrap() = new_chunk;
+ lprev = new_chunk;
+ }
+ }
+ BytesPerPixel::Three => {
+ let mut lprev = [0; 3];
+ for (chunk, above) in current.chunks_exact_mut(3).zip(previous.chunks_exact(3)) {
+ let new_chunk = [
+ chunk[0].wrapping_add(((above[0] as u16 + lprev[0] as u16) / 2) as u8),
+ chunk[1].wrapping_add(((above[1] as u16 + lprev[1] as u16) / 2) as u8),
+ chunk[2].wrapping_add(((above[2] as u16 + lprev[2] as u16) / 2) as u8),
+ ];
+ *TryInto::<&mut [u8; 3]>::try_into(chunk).unwrap() = new_chunk;
+ lprev = new_chunk;
+ }
+ }
+ BytesPerPixel::Four => {
+ let mut lprev = [0; 4];
+ for (chunk, above) in current.chunks_exact_mut(4).zip(previous.chunks_exact(4)) {
+ let new_chunk = [
+ chunk[0].wrapping_add(((above[0] as u16 + lprev[0] as u16) / 2) as u8),
+ chunk[1].wrapping_add(((above[1] as u16 + lprev[1] as u16) / 2) as u8),
+ chunk[2].wrapping_add(((above[2] as u16 + lprev[2] as u16) / 2) as u8),
+ chunk[3].wrapping_add(((above[3] as u16 + lprev[3] as u16) / 2) as u8),
+ ];
+ *TryInto::<&mut [u8; 4]>::try_into(chunk).unwrap() = new_chunk;
+ lprev = new_chunk;
+ }
+ }
+ BytesPerPixel::Six => {
+ let mut lprev = [0; 6];
+ for (chunk, above) in current.chunks_exact_mut(6).zip(previous.chunks_exact(6)) {
+ let new_chunk = [
+ chunk[0].wrapping_add(((above[0] as u16 + lprev[0] as u16) / 2) as u8),
+ chunk[1].wrapping_add(((above[1] as u16 + lprev[1] as u16) / 2) as u8),
+ chunk[2].wrapping_add(((above[2] as u16 + lprev[2] as u16) / 2) as u8),
+ chunk[3].wrapping_add(((above[3] as u16 + lprev[3] as u16) / 2) as u8),
+ chunk[4].wrapping_add(((above[4] as u16 + lprev[4] as u16) / 2) as u8),
+ chunk[5].wrapping_add(((above[5] as u16 + lprev[5] as u16) / 2) as u8),
+ ];
+ *TryInto::<&mut [u8; 6]>::try_into(chunk).unwrap() = new_chunk;
+ lprev = new_chunk;
+ }
+ }
+ BytesPerPixel::Eight => {
+ let mut lprev = [0; 8];
+ for (chunk, above) in current.chunks_exact_mut(8).zip(previous.chunks_exact(8)) {
+ let new_chunk = [
+ chunk[0].wrapping_add(((above[0] as u16 + lprev[0] as u16) / 2) as u8),
+ chunk[1].wrapping_add(((above[1] as u16 + lprev[1] as u16) / 2) as u8),
+ chunk[2].wrapping_add(((above[2] as u16 + lprev[2] as u16) / 2) as u8),
+ chunk[3].wrapping_add(((above[3] as u16 + lprev[3] as u16) / 2) as u8),
+ chunk[4].wrapping_add(((above[4] as u16 + lprev[4] as u16) / 2) as u8),
+ chunk[5].wrapping_add(((above[5] as u16 + lprev[5] as u16) / 2) as u8),
+ chunk[6].wrapping_add(((above[6] as u16 + lprev[6] as u16) / 2) as u8),
+ chunk[7].wrapping_add(((above[7] as u16 + lprev[7] as u16) / 2) as u8),
+ ];
+ *TryInto::<&mut [u8; 8]>::try_into(chunk).unwrap() = new_chunk;
+ lprev = new_chunk;
+ }
+ }
+ },
+ Paeth => {
+ // Paeth filter pixels:
+ // C B D
+ // A X
+ match tbpp {
+ BytesPerPixel::One => {
+ let mut a_bpp = [0; 1];
+ let mut c_bpp = [0; 1];
+ for (chunk, b_bpp) in current.chunks_exact_mut(1).zip(previous.chunks_exact(1))
+ {
+ let new_chunk = [chunk[0]
+ .wrapping_add(filter_paeth_decode(a_bpp[0], b_bpp[0], c_bpp[0]))];
+ *TryInto::<&mut [u8; 1]>::try_into(chunk).unwrap() = new_chunk;
+ a_bpp = new_chunk;
+ c_bpp = b_bpp.try_into().unwrap();
+ }
+ }
+ BytesPerPixel::Two => {
+ let mut a_bpp = [0; 2];
+ let mut c_bpp = [0; 2];
+ for (chunk, b_bpp) in current.chunks_exact_mut(2).zip(previous.chunks_exact(2))
+ {
+ let new_chunk = [
+ chunk[0]
+ .wrapping_add(filter_paeth_decode(a_bpp[0], b_bpp[0], c_bpp[0])),
+ chunk[1]
+ .wrapping_add(filter_paeth_decode(a_bpp[1], b_bpp[1], c_bpp[1])),
+ ];
+ *TryInto::<&mut [u8; 2]>::try_into(chunk).unwrap() = new_chunk;
+ a_bpp = new_chunk;
+ c_bpp = b_bpp.try_into().unwrap();
+ }
+ }
+ BytesPerPixel::Three => {
+ let mut a_bpp = [0; 3];
+ let mut c_bpp = [0; 3];
+ for (chunk, b_bpp) in current.chunks_exact_mut(3).zip(previous.chunks_exact(3))
+ {
+ let new_chunk = [
+ chunk[0]
+ .wrapping_add(filter_paeth_decode(a_bpp[0], b_bpp[0], c_bpp[0])),
+ chunk[1]
+ .wrapping_add(filter_paeth_decode(a_bpp[1], b_bpp[1], c_bpp[1])),
+ chunk[2]
+ .wrapping_add(filter_paeth_decode(a_bpp[2], b_bpp[2], c_bpp[2])),
+ ];
+ *TryInto::<&mut [u8; 3]>::try_into(chunk).unwrap() = new_chunk;
+ a_bpp = new_chunk;
+ c_bpp = b_bpp.try_into().unwrap();
+ }
+ }
+ BytesPerPixel::Four => {
+ let mut a_bpp = [0; 4];
+ let mut c_bpp = [0; 4];
+ for (chunk, b_bpp) in current.chunks_exact_mut(4).zip(previous.chunks_exact(4))
+ {
+ let new_chunk = [
+ chunk[0]
+ .wrapping_add(filter_paeth_decode(a_bpp[0], b_bpp[0], c_bpp[0])),
+ chunk[1]
+ .wrapping_add(filter_paeth_decode(a_bpp[1], b_bpp[1], c_bpp[1])),
+ chunk[2]
+ .wrapping_add(filter_paeth_decode(a_bpp[2], b_bpp[2], c_bpp[2])),
+ chunk[3]
+ .wrapping_add(filter_paeth_decode(a_bpp[3], b_bpp[3], c_bpp[3])),
+ ];
+ *TryInto::<&mut [u8; 4]>::try_into(chunk).unwrap() = new_chunk;
+ a_bpp = new_chunk;
+ c_bpp = b_bpp.try_into().unwrap();
+ }
+ }
+ BytesPerPixel::Six => {
+ let mut a_bpp = [0; 6];
+ let mut c_bpp = [0; 6];
+ for (chunk, b_bpp) in current.chunks_exact_mut(6).zip(previous.chunks_exact(6))
+ {
+ let new_chunk = [
+ chunk[0]
+ .wrapping_add(filter_paeth_decode(a_bpp[0], b_bpp[0], c_bpp[0])),
+ chunk[1]
+ .wrapping_add(filter_paeth_decode(a_bpp[1], b_bpp[1], c_bpp[1])),
+ chunk[2]
+ .wrapping_add(filter_paeth_decode(a_bpp[2], b_bpp[2], c_bpp[2])),
+ chunk[3]
+ .wrapping_add(filter_paeth_decode(a_bpp[3], b_bpp[3], c_bpp[3])),
+ chunk[4]
+ .wrapping_add(filter_paeth_decode(a_bpp[4], b_bpp[4], c_bpp[4])),
+ chunk[5]
+ .wrapping_add(filter_paeth_decode(a_bpp[5], b_bpp[5], c_bpp[5])),
+ ];
+ *TryInto::<&mut [u8; 6]>::try_into(chunk).unwrap() = new_chunk;
+ a_bpp = new_chunk;
+ c_bpp = b_bpp.try_into().unwrap();
+ }
+ }
+ BytesPerPixel::Eight => {
+ let mut a_bpp = [0; 8];
+ let mut c_bpp = [0; 8];
+ for (chunk, b_bpp) in current.chunks_exact_mut(8).zip(previous.chunks_exact(8))
+ {
+ let new_chunk = [
+ chunk[0]
+ .wrapping_add(filter_paeth_decode(a_bpp[0], b_bpp[0], c_bpp[0])),
+ chunk[1]
+ .wrapping_add(filter_paeth_decode(a_bpp[1], b_bpp[1], c_bpp[1])),
+ chunk[2]
+ .wrapping_add(filter_paeth_decode(a_bpp[2], b_bpp[2], c_bpp[2])),
+ chunk[3]
+ .wrapping_add(filter_paeth_decode(a_bpp[3], b_bpp[3], c_bpp[3])),
+ chunk[4]
+ .wrapping_add(filter_paeth_decode(a_bpp[4], b_bpp[4], c_bpp[4])),
+ chunk[5]
+ .wrapping_add(filter_paeth_decode(a_bpp[5], b_bpp[5], c_bpp[5])),
+ chunk[6]
+ .wrapping_add(filter_paeth_decode(a_bpp[6], b_bpp[6], c_bpp[6])),
+ chunk[7]
+ .wrapping_add(filter_paeth_decode(a_bpp[7], b_bpp[7], c_bpp[7])),
+ ];
+ *TryInto::<&mut [u8; 8]>::try_into(chunk).unwrap() = new_chunk;
+ a_bpp = new_chunk;
+ c_bpp = b_bpp.try_into().unwrap();
+ }
+ }
+ }
+ }
+ }
+}
+
+fn filter_internal(
+ method: FilterType,
+ bpp: usize,
+ len: usize,
+ previous: &[u8],
+ current: &[u8],
+ output: &mut [u8],
+) -> FilterType {
+ use self::FilterType::*;
+
+ // This value was chosen experimentally based on what acheived the best performance. The
+ // Rust compiler does auto-vectorization, and 32-bytes per loop iteration seems to enable
+ // the fastest code when doing so.
+ const CHUNK_SIZE: usize = 32;
+
+ match method {
+ NoFilter => {
+ output.copy_from_slice(current);
+ NoFilter
+ }
+ Sub => {
+ let mut out_chunks = output[bpp..].chunks_exact_mut(CHUNK_SIZE);
+ let mut cur_chunks = current[bpp..].chunks_exact(CHUNK_SIZE);
+ let mut prev_chunks = current[..len - bpp].chunks_exact(CHUNK_SIZE);
+
+ for ((out, cur), prev) in (&mut out_chunks).zip(&mut cur_chunks).zip(&mut prev_chunks) {
+ for i in 0..CHUNK_SIZE {
+ out[i] = cur[i].wrapping_sub(prev[i]);
+ }
+ }
+
+ for ((out, cur), &prev) in out_chunks
+ .into_remainder()
+ .iter_mut()
+ .zip(cur_chunks.remainder())
+ .zip(prev_chunks.remainder())
+ {
+ *out = cur.wrapping_sub(prev);
+ }
+
+ output[..bpp].copy_from_slice(&current[..bpp]);
+ Sub
+ }
+ Up => {
+ let mut out_chunks = output.chunks_exact_mut(CHUNK_SIZE);
+ let mut cur_chunks = current.chunks_exact(CHUNK_SIZE);
+ let mut prev_chunks = previous.chunks_exact(CHUNK_SIZE);
+
+ for ((out, cur), prev) in (&mut out_chunks).zip(&mut cur_chunks).zip(&mut prev_chunks) {
+ for i in 0..CHUNK_SIZE {
+ out[i] = cur[i].wrapping_sub(prev[i]);
+ }
+ }
+
+ for ((out, cur), &prev) in out_chunks
+ .into_remainder()
+ .iter_mut()
+ .zip(cur_chunks.remainder())
+ .zip(prev_chunks.remainder())
+ {
+ *out = cur.wrapping_sub(prev);
+ }
+ Up
+ }
+ Avg => {
+ let mut out_chunks = output[bpp..].chunks_exact_mut(CHUNK_SIZE);
+ let mut cur_chunks = current[bpp..].chunks_exact(CHUNK_SIZE);
+ let mut cur_minus_bpp_chunks = current[..len - bpp].chunks_exact(CHUNK_SIZE);
+ let mut prev_chunks = previous[bpp..].chunks_exact(CHUNK_SIZE);
+
+ for (((out, cur), cur_minus_bpp), prev) in (&mut out_chunks)
+ .zip(&mut cur_chunks)
+ .zip(&mut cur_minus_bpp_chunks)
+ .zip(&mut prev_chunks)
+ {
+ for i in 0..CHUNK_SIZE {
+ // Bitwise average of two integers without overflow and
+ // without converting to a wider bit-width. See:
+ // http://aggregate.org/MAGIC/#Average%20of%20Integers
+ // If this is unrolled by component, consider reverting to
+ // `((cur_minus_bpp[i] as u16 + prev[i] as u16) / 2) as u8`
+ out[i] = cur[i].wrapping_sub(
+ (cur_minus_bpp[i] & prev[i]) + ((cur_minus_bpp[i] ^ prev[i]) >> 1),
+ );
+ }
+ }
+
+ for (((out, cur), &cur_minus_bpp), &prev) in out_chunks
+ .into_remainder()
+ .iter_mut()
+ .zip(cur_chunks.remainder())
+ .zip(cur_minus_bpp_chunks.remainder())
+ .zip(prev_chunks.remainder())
+ {
+ *out = cur.wrapping_sub((cur_minus_bpp & prev) + ((cur_minus_bpp ^ prev) >> 1));
+ }
+
+ for i in 0..bpp {
+ output[i] = current[i].wrapping_sub(previous[i] / 2);
+ }
+ Avg
+ }
+ Paeth => {
+ let mut out_chunks = output[bpp..].chunks_exact_mut(CHUNK_SIZE);
+ let mut cur_chunks = current[bpp..].chunks_exact(CHUNK_SIZE);
+ let mut a_chunks = current[..len - bpp].chunks_exact(CHUNK_SIZE);
+ let mut b_chunks = previous[bpp..].chunks_exact(CHUNK_SIZE);
+ let mut c_chunks = previous[..len - bpp].chunks_exact(CHUNK_SIZE);
+
+ for ((((out, cur), a), b), c) in (&mut out_chunks)
+ .zip(&mut cur_chunks)
+ .zip(&mut a_chunks)
+ .zip(&mut b_chunks)
+ .zip(&mut c_chunks)
+ {
+ for i in 0..CHUNK_SIZE {
+ out[i] = cur[i].wrapping_sub(filter_paeth(a[i], b[i], c[i]));
+ }
+ }
+
+ for ((((out, cur), &a), &b), &c) in out_chunks
+ .into_remainder()
+ .iter_mut()
+ .zip(cur_chunks.remainder())
+ .zip(a_chunks.remainder())
+ .zip(b_chunks.remainder())
+ .zip(c_chunks.remainder())
+ {
+ *out = cur.wrapping_sub(filter_paeth(a, b, c));
+ }
+
+ for i in 0..bpp {
+ output[i] = current[i].wrapping_sub(filter_paeth(0, previous[i], 0));
+ }
+ Paeth
+ }
+ }
+}
+
+pub(crate) fn filter(
+ method: FilterType,
+ adaptive: AdaptiveFilterType,
+ bpp: BytesPerPixel,
+ previous: &[u8],
+ current: &[u8],
+ output: &mut [u8],
+) -> FilterType {
+ use FilterType::*;
+ let bpp = bpp.into_usize();
+ let len = current.len();
+
+ match adaptive {
+ AdaptiveFilterType::NonAdaptive => {
+ filter_internal(method, bpp, len, previous, current, output)
+ }
+ AdaptiveFilterType::Adaptive => {
+ let mut min_sum: u64 = u64::MAX;
+ let mut filter_choice = FilterType::NoFilter;
+ for &filter in [Sub, Up, Avg, Paeth].iter() {
+ filter_internal(filter, bpp, len, previous, current, output);
+ let sum = sum_buffer(output);
+ if sum <= min_sum {
+ min_sum = sum;
+ filter_choice = filter;
+ }
+ }
+
+ if filter_choice != Paeth {
+ filter_internal(filter_choice, bpp, len, previous, current, output);
+ }
+ filter_choice
+ }
+ }
+}
+
+// Helper function for Adaptive filter buffer summation
+fn sum_buffer(buf: &[u8]) -> u64 {
+ const CHUNK_SIZE: usize = 32;
+
+ let mut buf_chunks = buf.chunks_exact(CHUNK_SIZE);
+ let mut sum = 0_u64;
+
+ for chunk in &mut buf_chunks {
+ // At most, `acc` can be `32 * (i8::MIN as u8) = 32 * 128 = 4096`.
+ let mut acc = 0;
+ for &b in chunk {
+ acc += u64::from((b as i8).unsigned_abs());
+ }
+ sum = sum.saturating_add(acc);
+ }
+
+ let mut acc = 0;
+ for &b in buf_chunks.remainder() {
+ acc += u64::from((b as i8).unsigned_abs());
+ }
+
+ sum.saturating_add(acc)
+}
+
+#[cfg(test)]
+mod test {
+ use super::{filter, unfilter, AdaptiveFilterType, BytesPerPixel, FilterType};
+ use core::iter;
+
+ #[test]
+ fn roundtrip() {
+ // A multiple of 8, 6, 4, 3, 2, 1
+ const LEN: u8 = 240;
+ let previous: Vec<_> = iter::repeat(1).take(LEN.into()).collect();
+ let current: Vec<_> = (0..LEN).collect();
+ let expected = current.clone();
+ let adaptive = AdaptiveFilterType::NonAdaptive;
+
+ let roundtrip = |kind, bpp: BytesPerPixel| {
+ let mut output = vec![0; LEN.into()];
+ filter(kind, adaptive, bpp, &previous, &current, &mut output);
+ unfilter(kind, bpp, &previous, &mut output);
+ assert_eq!(
+ output, expected,
+ "Filtering {:?} with {:?} does not roundtrip",
+ bpp, kind
+ );
+ };
+
+ let filters = [
+ FilterType::NoFilter,
+ FilterType::Sub,
+ FilterType::Up,
+ FilterType::Avg,
+ FilterType::Paeth,
+ ];
+
+ let bpps = [
+ BytesPerPixel::One,
+ BytesPerPixel::Two,
+ BytesPerPixel::Three,
+ BytesPerPixel::Four,
+ BytesPerPixel::Six,
+ BytesPerPixel::Eight,
+ ];
+
+ for &filter in filters.iter() {
+ for &bpp in bpps.iter() {
+ roundtrip(filter, bpp);
+ }
+ }
+ }
+
+ #[test]
+ fn roundtrip_ascending_previous_line() {
+ // A multiple of 8, 6, 4, 3, 2, 1
+ const LEN: u8 = 240;
+ let previous: Vec<_> = (0..LEN).collect();
+ let current: Vec<_> = (0..LEN).collect();
+ let expected = current.clone();
+ let adaptive = AdaptiveFilterType::NonAdaptive;
+
+ let roundtrip = |kind, bpp: BytesPerPixel| {
+ let mut output = vec![0; LEN.into()];
+ filter(kind, adaptive, bpp, &previous, &current, &mut output);
+ unfilter(kind, bpp, &previous, &mut output);
+ assert_eq!(
+ output, expected,
+ "Filtering {:?} with {:?} does not roundtrip",
+ bpp, kind
+ );
+ };
+
+ let filters = [
+ FilterType::NoFilter,
+ FilterType::Sub,
+ FilterType::Up,
+ FilterType::Avg,
+ FilterType::Paeth,
+ ];
+
+ let bpps = [
+ BytesPerPixel::One,
+ BytesPerPixel::Two,
+ BytesPerPixel::Three,
+ BytesPerPixel::Four,
+ BytesPerPixel::Six,
+ BytesPerPixel::Eight,
+ ];
+
+ for &filter in filters.iter() {
+ for &bpp in bpps.iter() {
+ roundtrip(filter, bpp);
+ }
+ }
+ }
+
+ #[test]
+ // This tests that converting u8 to i8 doesn't overflow when taking the
+ // absolute value for adaptive filtering: -128_i8.abs() will panic in debug
+ // or produce garbage in release mode. The sum of 0..=255u8 should equal the
+ // sum of the absolute values of -128_i8..=127, or abs(-128..=0) + 1..=127.
+ fn sum_buffer_test() {
+ let sum = (0..=128).sum::<u64>() + (1..=127).sum::<u64>();
+ let buf: Vec<u8> = (0_u8..=255).collect();
+
+ assert_eq!(sum, crate::filter::sum_buffer(&buf));
+ }
+}
diff --git a/vendor/png/src/lib.rs b/vendor/png/src/lib.rs
new file mode 100644
index 0000000..b3bb15b
--- /dev/null
+++ b/vendor/png/src/lib.rs
@@ -0,0 +1,81 @@
+//! # PNG encoder and decoder
+//!
+//! This crate contains a PNG encoder and decoder. It supports reading of single lines or whole frames.
+//!
+//! ## The decoder
+//!
+//! The most important types for decoding purposes are [`Decoder`](struct.Decoder.html) and
+//! [`Reader`](struct.Reader.html). They both wrap a `std::io::Read`.
+//! `Decoder` serves as a builder for `Reader`. Calling `Decoder::read_info` reads from the `Read` until the
+//! image data is reached.
+//!
+//! ### Using the decoder
+//! ```
+//! use std::fs::File;
+//! // The decoder is a build for reader and can be used to set various decoding options
+//! // via `Transformations`. The default output transformation is `Transformations::IDENTITY`.
+//! let decoder = png::Decoder::new(File::open("tests/pngsuite/basi0g01.png").unwrap());
+//! let mut reader = decoder.read_info().unwrap();
+//! // Allocate the output buffer.
+//! let mut buf = vec![0; reader.output_buffer_size()];
+//! // Read the next frame. An APNG might contain multiple frames.
+//! let info = reader.next_frame(&mut buf).unwrap();
+//! // Grab the bytes of the image.
+//! let bytes = &buf[..info.buffer_size()];
+//! // Inspect more details of the last read frame.
+//! let in_animation = reader.info().frame_control.is_some();
+//! ```
+//!
+//! ## Encoder
+//! ### Using the encoder
+//!
+//! ```no_run
+//! // For reading and opening files
+//! use std::path::Path;
+//! use std::fs::File;
+//! use std::io::BufWriter;
+//!
+//! let path = Path::new(r"/path/to/image.png");
+//! let file = File::create(path).unwrap();
+//! let ref mut w = BufWriter::new(file);
+//!
+//! let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1.
+//! encoder.set_color(png::ColorType::Rgba);
+//! encoder.set_depth(png::BitDepth::Eight);
+//! encoder.set_source_gamma(png::ScaledFloat::from_scaled(45455)); // 1.0 / 2.2, scaled by 100000
+//! encoder.set_source_gamma(png::ScaledFloat::new(1.0 / 2.2)); // 1.0 / 2.2, unscaled, but rounded
+//! let source_chromaticities = png::SourceChromaticities::new( // Using unscaled instantiation here
+//! (0.31270, 0.32900),
+//! (0.64000, 0.33000),
+//! (0.30000, 0.60000),
+//! (0.15000, 0.06000)
+//! );
+//! encoder.set_source_chromaticities(source_chromaticities);
+//! let mut writer = encoder.write_header().unwrap();
+//!
+//! let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black.
+//! writer.write_image_data(&data).unwrap(); // Save
+//! ```
+//!
+
+#![forbid(unsafe_code)]
+
+#[macro_use]
+extern crate bitflags;
+
+pub mod chunk;
+mod common;
+mod decoder;
+mod encoder;
+mod filter;
+mod srgb;
+pub mod text_metadata;
+mod traits;
+mod utils;
+
+pub use crate::common::*;
+pub use crate::decoder::{
+ DecodeOptions, Decoded, Decoder, DecodingError, Limits, OutputInfo, Reader, StreamingDecoder,
+};
+pub use crate::encoder::{Encoder, EncodingError, StreamWriter, Writer};
+pub use crate::filter::{AdaptiveFilterType, FilterType};
diff --git a/vendor/png/src/srgb.rs b/vendor/png/src/srgb.rs
new file mode 100644
index 0000000..2780e42
--- /dev/null
+++ b/vendor/png/src/srgb.rs
@@ -0,0 +1,30 @@
+use crate::{ScaledFloat, SourceChromaticities};
+
+/// Get the gamma that should be substituted for images conforming to the sRGB color space.
+pub fn substitute_gamma() -> ScaledFloat {
+ // Value taken from https://www.w3.org/TR/2003/REC-PNG-20031110/#11sRGB
+ ScaledFloat::from_scaled(45455)
+}
+
+/// Get the chromaticities that should be substituted for images conforming to the sRGB color space.
+pub fn substitute_chromaticities() -> SourceChromaticities {
+ // Values taken from https://www.w3.org/TR/2003/REC-PNG-20031110/#11sRGB
+ SourceChromaticities {
+ white: (
+ ScaledFloat::from_scaled(31270),
+ ScaledFloat::from_scaled(32900),
+ ),
+ red: (
+ ScaledFloat::from_scaled(64000),
+ ScaledFloat::from_scaled(33000),
+ ),
+ green: (
+ ScaledFloat::from_scaled(30000),
+ ScaledFloat::from_scaled(60000),
+ ),
+ blue: (
+ ScaledFloat::from_scaled(15000),
+ ScaledFloat::from_scaled(6000),
+ ),
+ }
+}
diff --git a/vendor/png/src/text_metadata.rs b/vendor/png/src/text_metadata.rs
new file mode 100644
index 0000000..42f8df3
--- /dev/null
+++ b/vendor/png/src/text_metadata.rs
@@ -0,0 +1,586 @@
+//! # Text chunks (tEXt/zTXt/iTXt) structs and functions
+//!
+//! The [PNG spec](https://www.w3.org/TR/2003/REC-PNG-20031110/#11textinfo) optionally allows for
+//! embedded text chunks in the file. They may appear either before or after the image data
+//! chunks. There are three kinds of text chunks.
+//! - `tEXt`: This has a `keyword` and `text` field, and is ISO 8859-1 encoded.
+//! - `zTXt`: This is semantically the same as `tEXt`, i.e. it has the same fields and
+//! encoding, but the `text` field is compressed before being written into the PNG file.
+//! - `iTXt`: This chunk allows for its `text` field to be any valid UTF-8, and supports
+//! compression of the text field as well.
+//!
+//! The `ISO 8859-1` encoding technically doesn't allow any control characters
+//! to be used, but in practice these values are encountered anyway. This can
+//! either be the extended `ISO-8859-1` encoding with control characters or the
+//! `Windows-1252` encoding. This crate assumes the `ISO-8859-1` encoding is
+//! used.
+//!
+//! ## Reading text chunks
+//!
+//! As a PNG is decoded, any text chunk encountered is appended the
+//! [`Info`](`crate::common::Info`) struct, in the `uncompressed_latin1_text`,
+//! `compressed_latin1_text`, and the `utf8_text` fields depending on whether the encountered
+//! chunk is `tEXt`, `zTXt`, or `iTXt`.
+//!
+//! ```
+//! use std::fs::File;
+//! use std::iter::FromIterator;
+//! use std::path::PathBuf;
+//!
+//! // Opening a png file that has a zTXt chunk
+//! let decoder = png::Decoder::new(
+//! File::open(PathBuf::from_iter([
+//! "tests",
+//! "text_chunk_examples",
+//! "ztxt_example.png",
+//! ]))
+//! .unwrap(),
+//! );
+//! let mut reader = decoder.read_info().unwrap();
+//! // If the text chunk is before the image data frames, `reader.info()` already contains the text.
+//! for text_chunk in &reader.info().compressed_latin1_text {
+//! println!("{:?}", text_chunk.keyword); // Prints the keyword
+//! println!("{:#?}", text_chunk); // Prints out the text chunk.
+//! // To get the uncompressed text, use the `get_text` method.
+//! println!("{}", text_chunk.get_text().unwrap());
+//! }
+//! ```
+//!
+//! ## Writing text chunks
+//!
+//! There are two ways to write text chunks: the first is to add the appropriate text structs directly to the encoder header before the header is written to file.
+//! To add a text chunk at any point in the stream, use the `write_text_chunk` method.
+//!
+//! ```
+//! # use png::text_metadata::{ITXtChunk, ZTXtChunk};
+//! # use std::env;
+//! # use std::fs::File;
+//! # use std::io::BufWriter;
+//! # use std::iter::FromIterator;
+//! # use std::path::PathBuf;
+//! # let file = File::create(PathBuf::from_iter(["target", "text_chunk.png"])).unwrap();
+//! # let ref mut w = BufWriter::new(file);
+//! let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1.
+//! encoder.set_color(png::ColorType::Rgba);
+//! encoder.set_depth(png::BitDepth::Eight);
+//! // Adding text chunks to the header
+//! encoder
+//! .add_text_chunk(
+//! "Testing tEXt".to_string(),
+//! "This is a tEXt chunk that will appear before the IDAT chunks.".to_string(),
+//! )
+//! .unwrap();
+//! encoder
+//! .add_ztxt_chunk(
+//! "Testing zTXt".to_string(),
+//! "This is a zTXt chunk that is compressed in the png file.".to_string(),
+//! )
+//! .unwrap();
+//! encoder
+//! .add_itxt_chunk(
+//! "Testing iTXt".to_string(),
+//! "iTXt chunks support all of UTF8. Example: हिंदी.".to_string(),
+//! )
+//! .unwrap();
+//!
+//! let mut writer = encoder.write_header().unwrap();
+//!
+//! let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black.
+//! writer.write_image_data(&data).unwrap(); // Save
+//!
+//! // We can add a tEXt/zTXt/iTXt at any point before the encoder is dropped from scope. These chunks will be at the end of the png file.
+//! let tail_ztxt_chunk = ZTXtChunk::new("Comment".to_string(), "A zTXt chunk after the image data.".to_string());
+//! writer.write_text_chunk(&tail_ztxt_chunk).unwrap();
+//!
+//! // The fields of the text chunk are public, so they can be mutated before being written to the file.
+//! let mut tail_itxt_chunk = ITXtChunk::new("Author".to_string(), "सायंतन खान".to_string());
+//! tail_itxt_chunk.compressed = true;
+//! tail_itxt_chunk.language_tag = "hi".to_string();
+//! tail_itxt_chunk.translated_keyword = "लेखक".to_string();
+//! writer.write_text_chunk(&tail_itxt_chunk).unwrap();
+//! ```
+
+#![warn(missing_docs)]
+
+use crate::{chunk, encoder, DecodingError, EncodingError};
+use flate2::write::ZlibEncoder;
+use flate2::Compression;
+use miniz_oxide::inflate::{decompress_to_vec_zlib, decompress_to_vec_zlib_with_limit};
+use std::{convert::TryFrom, io::Write};
+
+/// Default decompression limit for compressed text chunks.
+pub const DECOMPRESSION_LIMIT: usize = 2097152; // 2 MiB
+
+/// Text encoding errors that is wrapped by the standard EncodingError type
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum TextEncodingError {
+ /// Unrepresentable characters in string
+ Unrepresentable,
+ /// Keyword longer than 79 bytes or empty
+ InvalidKeywordSize,
+ /// Error encountered while compressing text
+ CompressionError,
+}
+
+/// Text decoding error that is wrapped by the standard DecodingError type
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum TextDecodingError {
+ /// Unrepresentable characters in string
+ Unrepresentable,
+ /// Keyword longer than 79 bytes or empty
+ InvalidKeywordSize,
+ /// Missing null separator
+ MissingNullSeparator,
+ /// Compressed text cannot be uncompressed
+ InflationError,
+ /// Needs more space to decompress
+ OutOfDecompressionSpace,
+ /// Using an unspecified value for the compression method
+ InvalidCompressionMethod,
+ /// Using a byte that is not 0 or 255 as compression flag in iTXt chunk
+ InvalidCompressionFlag,
+ /// Missing the compression flag
+ MissingCompressionFlag,
+}
+
+/// A generalized text chunk trait
+pub trait EncodableTextChunk {
+ /// Encode text chunk as Vec<u8> to a `Write`
+ fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError>;
+}
+
+/// Struct representing a tEXt chunk
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct TEXtChunk {
+ /// Keyword field of the tEXt chunk. Needs to be between 1-79 bytes when encoded as Latin-1.
+ pub keyword: String,
+ /// Text field of tEXt chunk. Can be at most 2GB.
+ pub text: String,
+}
+
+fn decode_iso_8859_1(text: &[u8]) -> String {
+ text.iter().map(|&b| b as char).collect()
+}
+
+fn encode_iso_8859_1(text: &str) -> Result<Vec<u8>, TextEncodingError> {
+ encode_iso_8859_1_iter(text).collect()
+}
+
+fn encode_iso_8859_1_into(buf: &mut Vec<u8>, text: &str) -> Result<(), TextEncodingError> {
+ for b in encode_iso_8859_1_iter(text) {
+ buf.push(b?);
+ }
+ Ok(())
+}
+
+fn encode_iso_8859_1_iter(text: &str) -> impl Iterator<Item = Result<u8, TextEncodingError>> + '_ {
+ text.chars()
+ .map(|c| u8::try_from(c as u32).map_err(|_| TextEncodingError::Unrepresentable))
+}
+
+fn decode_ascii(text: &[u8]) -> Result<&str, TextDecodingError> {
+ if text.is_ascii() {
+ // `from_utf8` cannot panic because we're already checked that `text` is ASCII-7.
+ // And this is the only safe way to get ASCII-7 string from `&[u8]`.
+ Ok(std::str::from_utf8(text).expect("unreachable"))
+ } else {
+ Err(TextDecodingError::Unrepresentable)
+ }
+}
+
+impl TEXtChunk {
+ /// Constructs a new TEXtChunk.
+ /// Not sure whether it should take &str or String.
+ pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self {
+ Self {
+ keyword: keyword.into(),
+ text: text.into(),
+ }
+ }
+
+ /// Decodes a slice of bytes to a String using Latin-1 decoding.
+ /// The decoder runs in strict mode, and any decoding errors are passed along to the caller.
+ pub(crate) fn decode(
+ keyword_slice: &[u8],
+ text_slice: &[u8],
+ ) -> Result<Self, TextDecodingError> {
+ if keyword_slice.is_empty() || keyword_slice.len() > 79 {
+ return Err(TextDecodingError::InvalidKeywordSize);
+ }
+
+ Ok(Self {
+ keyword: decode_iso_8859_1(keyword_slice),
+ text: decode_iso_8859_1(text_slice),
+ })
+ }
+}
+
+impl EncodableTextChunk for TEXtChunk {
+ /// Encodes TEXtChunk to a Writer. The keyword and text are separated by a byte of zeroes.
+ fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> {
+ let mut data = encode_iso_8859_1(&self.keyword)?;
+
+ if data.is_empty() || data.len() > 79 {
+ return Err(TextEncodingError::InvalidKeywordSize.into());
+ }
+
+ data.push(0);
+
+ encode_iso_8859_1_into(&mut data, &self.text)?;
+
+ encoder::write_chunk(w, chunk::tEXt, &data)
+ }
+}
+
+/// Struct representing a zTXt chunk
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct ZTXtChunk {
+ /// Keyword field of the tEXt chunk. Needs to be between 1-79 bytes when encoded as Latin-1.
+ pub keyword: String,
+ /// Text field of zTXt chunk. It is compressed by default, but can be uncompressed if necessary.
+ text: OptCompressed,
+}
+
+/// Private enum encoding the compressed and uncompressed states of zTXt/iTXt text field.
+#[derive(Clone, Debug, PartialEq, Eq)]
+enum OptCompressed {
+ /// Compressed version of text field. Can be at most 2GB.
+ Compressed(Vec<u8>),
+ /// Uncompressed text field.
+ Uncompressed(String),
+}
+
+impl ZTXtChunk {
+ /// Creates a new ZTXt chunk.
+ pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self {
+ Self {
+ keyword: keyword.into(),
+ text: OptCompressed::Uncompressed(text.into()),
+ }
+ }
+
+ pub(crate) fn decode(
+ keyword_slice: &[u8],
+ compression_method: u8,
+ text_slice: &[u8],
+ ) -> Result<Self, TextDecodingError> {
+ if keyword_slice.is_empty() || keyword_slice.len() > 79 {
+ return Err(TextDecodingError::InvalidKeywordSize);
+ }
+
+ if compression_method != 0 {
+ return Err(TextDecodingError::InvalidCompressionMethod);
+ }
+
+ Ok(Self {
+ keyword: decode_iso_8859_1(keyword_slice),
+ text: OptCompressed::Compressed(text_slice.to_vec()),
+ })
+ }
+
+ /// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `DECOMPRESSION_LIMIT` bytes.
+ pub fn decompress_text(&mut self) -> Result<(), DecodingError> {
+ self.decompress_text_with_limit(DECOMPRESSION_LIMIT)
+ }
+
+ /// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `limit` bytes.
+ pub fn decompress_text_with_limit(&mut self, limit: usize) -> Result<(), DecodingError> {
+ match &self.text {
+ OptCompressed::Compressed(v) => {
+ let uncompressed_raw = match decompress_to_vec_zlib_with_limit(&v[..], limit) {
+ Ok(s) => s,
+ Err(err) if err.status == miniz_oxide::inflate::TINFLStatus::HasMoreOutput => {
+ return Err(DecodingError::from(
+ TextDecodingError::OutOfDecompressionSpace,
+ ));
+ }
+ Err(_) => {
+ return Err(DecodingError::from(TextDecodingError::InflationError));
+ }
+ };
+ self.text = OptCompressed::Uncompressed(decode_iso_8859_1(&uncompressed_raw));
+ }
+ OptCompressed::Uncompressed(_) => {}
+ };
+ Ok(())
+ }
+
+ /// Decompresses the inner text, and returns it as a `String`.
+ /// If decompression uses more the 2MiB, first call decompress with limit, and then this method.
+ pub fn get_text(&self) -> Result<String, DecodingError> {
+ match &self.text {
+ OptCompressed::Compressed(v) => {
+ let uncompressed_raw = decompress_to_vec_zlib(&v[..])
+ .map_err(|_| DecodingError::from(TextDecodingError::InflationError))?;
+ Ok(decode_iso_8859_1(&uncompressed_raw))
+ }
+ OptCompressed::Uncompressed(s) => Ok(s.clone()),
+ }
+ }
+
+ /// Compresses the inner text, mutating its own state.
+ pub fn compress_text(&mut self) -> Result<(), EncodingError> {
+ match &self.text {
+ OptCompressed::Uncompressed(s) => {
+ let uncompressed_raw = encode_iso_8859_1(s)?;
+ let mut encoder = ZlibEncoder::new(Vec::new(), Compression::fast());
+ encoder
+ .write_all(&uncompressed_raw)
+ .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
+ self.text = OptCompressed::Compressed(
+ encoder
+ .finish()
+ .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?,
+ );
+ }
+ OptCompressed::Compressed(_) => {}
+ }
+
+ Ok(())
+ }
+}
+
+impl EncodableTextChunk for ZTXtChunk {
+ fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> {
+ let mut data = encode_iso_8859_1(&self.keyword)?;
+
+ if data.is_empty() || data.len() > 79 {
+ return Err(TextEncodingError::InvalidKeywordSize.into());
+ }
+
+ // Null separator
+ data.push(0);
+
+ // Compression method: the only valid value is 0, as of 2021.
+ data.push(0);
+
+ match &self.text {
+ OptCompressed::Compressed(v) => {
+ data.extend_from_slice(&v[..]);
+ }
+ OptCompressed::Uncompressed(s) => {
+ // This code may have a bug. Check for correctness.
+ let uncompressed_raw = encode_iso_8859_1(s)?;
+ let mut encoder = ZlibEncoder::new(data, Compression::fast());
+ encoder
+ .write_all(&uncompressed_raw)
+ .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
+ data = encoder
+ .finish()
+ .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
+ }
+ };
+
+ encoder::write_chunk(w, chunk::zTXt, &data)
+ }
+}
+
+/// Struct encoding an iTXt chunk
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct ITXtChunk {
+ /// The keyword field. This needs to be between 1-79 bytes when encoded as Latin-1.
+ pub keyword: String,
+ /// Indicates whether the text will be (or was) compressed in the PNG.
+ pub compressed: bool,
+ /// A hyphen separated list of languages that the keyword is translated to. This is ASCII-7 encoded.
+ pub language_tag: String,
+ /// Translated keyword. This is UTF-8 encoded.
+ pub translated_keyword: String,
+ /// Text field of iTXt chunk. It is compressed by default, but can be uncompressed if necessary.
+ text: OptCompressed,
+}
+
+impl ITXtChunk {
+ /// Constructs a new iTXt chunk. Leaves all but keyword and text to default values.
+ pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self {
+ Self {
+ keyword: keyword.into(),
+ compressed: false,
+ language_tag: "".to_string(),
+ translated_keyword: "".to_string(),
+ text: OptCompressed::Uncompressed(text.into()),
+ }
+ }
+
+ pub(crate) fn decode(
+ keyword_slice: &[u8],
+ compression_flag: u8,
+ compression_method: u8,
+ language_tag_slice: &[u8],
+ translated_keyword_slice: &[u8],
+ text_slice: &[u8],
+ ) -> Result<Self, TextDecodingError> {
+ if keyword_slice.is_empty() || keyword_slice.len() > 79 {
+ return Err(TextDecodingError::InvalidKeywordSize);
+ }
+ let keyword = decode_iso_8859_1(keyword_slice);
+
+ let compressed = match compression_flag {
+ 0 => false,
+ 1 => true,
+ _ => return Err(TextDecodingError::InvalidCompressionFlag),
+ };
+
+ if compressed && compression_method != 0 {
+ return Err(TextDecodingError::InvalidCompressionMethod);
+ }
+
+ let language_tag = decode_ascii(language_tag_slice)?.to_owned();
+
+ let translated_keyword = std::str::from_utf8(translated_keyword_slice)
+ .map_err(|_| TextDecodingError::Unrepresentable)?
+ .to_string();
+ let text = if compressed {
+ OptCompressed::Compressed(text_slice.to_vec())
+ } else {
+ OptCompressed::Uncompressed(
+ String::from_utf8(text_slice.to_vec())
+ .map_err(|_| TextDecodingError::Unrepresentable)?,
+ )
+ };
+
+ Ok(Self {
+ keyword,
+ compressed,
+ language_tag,
+ translated_keyword,
+ text,
+ })
+ }
+
+ /// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `DECOMPRESSION_LIMIT` bytes.
+ pub fn decompress_text(&mut self) -> Result<(), DecodingError> {
+ self.decompress_text_with_limit(DECOMPRESSION_LIMIT)
+ }
+
+ /// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `limit` bytes.
+ pub fn decompress_text_with_limit(&mut self, limit: usize) -> Result<(), DecodingError> {
+ match &self.text {
+ OptCompressed::Compressed(v) => {
+ let uncompressed_raw = match decompress_to_vec_zlib_with_limit(&v[..], limit) {
+ Ok(s) => s,
+ Err(err) if err.status == miniz_oxide::inflate::TINFLStatus::HasMoreOutput => {
+ return Err(DecodingError::from(
+ TextDecodingError::OutOfDecompressionSpace,
+ ));
+ }
+ Err(_) => {
+ return Err(DecodingError::from(TextDecodingError::InflationError));
+ }
+ };
+ self.text = OptCompressed::Uncompressed(
+ String::from_utf8(uncompressed_raw)
+ .map_err(|_| TextDecodingError::Unrepresentable)?,
+ );
+ }
+ OptCompressed::Uncompressed(_) => {}
+ };
+ Ok(())
+ }
+
+ /// Decompresses the inner text, and returns it as a `String`.
+ /// If decompression takes more than 2 MiB, try `decompress_text_with_limit` followed by this method.
+ pub fn get_text(&self) -> Result<String, DecodingError> {
+ match &self.text {
+ OptCompressed::Compressed(v) => {
+ let uncompressed_raw = decompress_to_vec_zlib(&v[..])
+ .map_err(|_| DecodingError::from(TextDecodingError::InflationError))?;
+ String::from_utf8(uncompressed_raw)
+ .map_err(|_| TextDecodingError::Unrepresentable.into())
+ }
+ OptCompressed::Uncompressed(s) => Ok(s.clone()),
+ }
+ }
+
+ /// Compresses the inner text, mutating its own state.
+ pub fn compress_text(&mut self) -> Result<(), EncodingError> {
+ match &self.text {
+ OptCompressed::Uncompressed(s) => {
+ let uncompressed_raw = s.as_bytes();
+ let mut encoder = ZlibEncoder::new(Vec::new(), Compression::fast());
+ encoder
+ .write_all(uncompressed_raw)
+ .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
+ self.text = OptCompressed::Compressed(
+ encoder
+ .finish()
+ .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?,
+ );
+ }
+ OptCompressed::Compressed(_) => {}
+ }
+
+ Ok(())
+ }
+}
+
+impl EncodableTextChunk for ITXtChunk {
+ fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> {
+ // Keyword
+ let mut data = encode_iso_8859_1(&self.keyword)?;
+
+ if data.is_empty() || data.len() > 79 {
+ return Err(TextEncodingError::InvalidKeywordSize.into());
+ }
+
+ // Null separator
+ data.push(0);
+
+ // Compression flag
+ if self.compressed {
+ data.push(1);
+ } else {
+ data.push(0);
+ }
+
+ // Compression method
+ data.push(0);
+
+ // Language tag
+ if !self.language_tag.is_ascii() {
+ return Err(EncodingError::from(TextEncodingError::Unrepresentable));
+ }
+ data.extend(self.language_tag.as_bytes());
+
+ // Null separator
+ data.push(0);
+
+ // Translated keyword
+ data.extend_from_slice(self.translated_keyword.as_bytes());
+
+ // Null separator
+ data.push(0);
+
+ // Text
+ if self.compressed {
+ match &self.text {
+ OptCompressed::Compressed(v) => {
+ data.extend_from_slice(&v[..]);
+ }
+ OptCompressed::Uncompressed(s) => {
+ let uncompressed_raw = s.as_bytes();
+ let mut encoder = ZlibEncoder::new(data, Compression::fast());
+ encoder
+ .write_all(uncompressed_raw)
+ .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
+ data = encoder
+ .finish()
+ .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
+ }
+ }
+ } else {
+ match &self.text {
+ OptCompressed::Compressed(v) => {
+ let uncompressed_raw = decompress_to_vec_zlib(&v[..])
+ .map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
+ data.extend_from_slice(&uncompressed_raw[..]);
+ }
+ OptCompressed::Uncompressed(s) => {
+ data.extend_from_slice(s.as_bytes());
+ }
+ }
+ }
+
+ encoder::write_chunk(w, chunk::iTXt, &data)
+ }
+}
diff --git a/vendor/png/src/traits.rs b/vendor/png/src/traits.rs
new file mode 100644
index 0000000..ffc10e7
--- /dev/null
+++ b/vendor/png/src/traits.rs
@@ -0,0 +1,43 @@
+use std::io;
+
+macro_rules! read_bytes_ext {
+ ($output_type:ty) => {
+ impl<W: io::Read + ?Sized> ReadBytesExt<$output_type> for W {
+ #[inline]
+ fn read_be(&mut self) -> io::Result<$output_type> {
+ let mut bytes = [0u8; std::mem::size_of::<$output_type>()];
+ self.read_exact(&mut bytes)?;
+ Ok(<$output_type>::from_be_bytes(bytes))
+ }
+ }
+ };
+}
+
+macro_rules! write_bytes_ext {
+ ($input_type:ty) => {
+ impl<W: io::Write + ?Sized> WriteBytesExt<$input_type> for W {
+ #[inline]
+ fn write_be(&mut self, n: $input_type) -> io::Result<()> {
+ self.write_all(&n.to_be_bytes())
+ }
+ }
+ };
+}
+
+/// Read extension to read big endian data
+pub trait ReadBytesExt<T>: io::Read {
+ /// Read `T` from a bytes stream. Most significant byte first.
+ fn read_be(&mut self) -> io::Result<T>;
+}
+
+/// Write extension to write big endian data
+pub trait WriteBytesExt<T>: io::Write {
+ /// Writes `T` to a bytes stream. Most significant byte first.
+ fn write_be(&mut self, _: T) -> io::Result<()>;
+}
+
+read_bytes_ext!(u8);
+read_bytes_ext!(u16);
+read_bytes_ext!(u32);
+
+write_bytes_ext!(u32);
diff --git a/vendor/png/src/utils.rs b/vendor/png/src/utils.rs
new file mode 100644
index 0000000..d43753b
--- /dev/null
+++ b/vendor/png/src/utils.rs
@@ -0,0 +1,463 @@
+//! Utility functions
+use std::iter::{repeat, StepBy};
+use std::ops::Range;
+
+#[inline(always)]
+pub fn unpack_bits<F>(buf: &mut [u8], channels: usize, bit_depth: u8, func: F)
+where
+ F: Fn(u8, &mut [u8]),
+{
+ // Return early if empty. This enables to subtract `channels` later without overflow.
+ if buf.len() < channels {
+ return;
+ }
+
+ let bits = buf.len() / channels * bit_depth as usize;
+ let extra_bits = bits % 8;
+ let entries = bits / 8
+ + match extra_bits {
+ 0 => 0,
+ _ => 1,
+ };
+ let skip = match extra_bits {
+ 0 => 0,
+ n => (8 - n) / bit_depth as usize,
+ };
+ let mask = ((1u16 << bit_depth) - 1) as u8;
+ let i = (0..entries)
+ .rev() // reverse iterator
+ .flat_map(|idx|
+ // this has to be reversed too
+ (0..8).step_by(bit_depth.into())
+ .zip(repeat(idx)))
+ .skip(skip);
+ let j = (0..=buf.len() - channels).rev().step_by(channels);
+ for ((shift, i), j) in i.zip(j) {
+ let pixel = (buf[i] & (mask << shift)) >> shift;
+ func(pixel, &mut buf[j..(j + channels)])
+ }
+}
+
+pub fn expand_trns_line(input: &[u8], output: &mut [u8], trns: Option<&[u8]>, channels: usize) {
+ for (input, output) in input
+ .chunks_exact(channels)
+ .zip(output.chunks_exact_mut(channels + 1))
+ {
+ output[..channels].copy_from_slice(input);
+ output[channels] = if Some(input) == trns { 0 } else { 0xFF };
+ }
+}
+
+pub fn expand_trns_line16(input: &[u8], output: &mut [u8], trns: Option<&[u8]>, channels: usize) {
+ for (input, output) in input
+ .chunks_exact(channels * 2)
+ .zip(output.chunks_exact_mut(channels * 2 + 2))
+ {
+ output[..channels * 2].copy_from_slice(input);
+ if Some(input) == trns {
+ output[channels * 2] = 0;
+ output[channels * 2 + 1] = 0
+ } else {
+ output[channels * 2] = 0xFF;
+ output[channels * 2 + 1] = 0xFF
+ };
+ }
+}
+
+pub fn expand_trns_and_strip_line16(
+ input: &[u8],
+ output: &mut [u8],
+ trns: Option<&[u8]>,
+ channels: usize,
+) {
+ for (input, output) in input
+ .chunks_exact(channels * 2)
+ .zip(output.chunks_exact_mut(channels + 1))
+ {
+ for i in 0..channels {
+ output[i] = input[i * 2];
+ }
+ output[channels] = if Some(input) == trns { 0 } else { 0xFF };
+ }
+}
+
+/// This iterator iterates over the different passes of an image Adam7 encoded
+/// PNG image
+/// The pattern is:
+/// 16462646
+/// 77777777
+/// 56565656
+/// 77777777
+/// 36463646
+/// 77777777
+/// 56565656
+/// 77777777
+///
+#[derive(Clone)]
+pub(crate) struct Adam7Iterator {
+ line: u32,
+ lines: u32,
+ line_width: u32,
+ current_pass: u8,
+ width: u32,
+ height: u32,
+}
+
+impl Adam7Iterator {
+ pub fn new(width: u32, height: u32) -> Adam7Iterator {
+ let mut this = Adam7Iterator {
+ line: 0,
+ lines: 0,
+ line_width: 0,
+ current_pass: 1,
+ width,
+ height,
+ };
+ this.init_pass();
+ this
+ }
+
+ /// Calculates the bounds of the current pass
+ fn init_pass(&mut self) {
+ let w = f64::from(self.width);
+ let h = f64::from(self.height);
+ let (line_width, lines) = match self.current_pass {
+ 1 => (w / 8.0, h / 8.0),
+ 2 => ((w - 4.0) / 8.0, h / 8.0),
+ 3 => (w / 4.0, (h - 4.0) / 8.0),
+ 4 => ((w - 2.0) / 4.0, h / 4.0),
+ 5 => (w / 2.0, (h - 2.0) / 4.0),
+ 6 => ((w - 1.0) / 2.0, h / 2.0),
+ 7 => (w, (h - 1.0) / 2.0),
+ _ => unreachable!(),
+ };
+ self.line_width = line_width.ceil() as u32;
+ self.lines = lines.ceil() as u32;
+ self.line = 0;
+ }
+
+ /// The current pass#.
+ pub fn current_pass(&self) -> u8 {
+ self.current_pass
+ }
+}
+
+/// Iterates over the (passes, lines, widths)
+impl Iterator for Adam7Iterator {
+ type Item = (u8, u32, u32);
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.line < self.lines && self.line_width > 0 {
+ let this_line = self.line;
+ self.line += 1;
+ Some((self.current_pass, this_line, self.line_width))
+ } else if self.current_pass < 7 {
+ self.current_pass += 1;
+ self.init_pass();
+ self.next()
+ } else {
+ None
+ }
+ }
+}
+
+fn subbyte_pixels(scanline: &[u8], bits_pp: usize) -> impl Iterator<Item = u8> + '_ {
+ (0..scanline.len() * 8)
+ .step_by(bits_pp)
+ .map(move |bit_idx| {
+ let byte_idx = bit_idx / 8;
+
+ // sub-byte samples start in the high-order bits
+ let rem = 8 - bit_idx % 8 - bits_pp;
+
+ match bits_pp {
+ // evenly divides bytes
+ 1 => (scanline[byte_idx] >> rem) & 1,
+ 2 => (scanline[byte_idx] >> rem) & 3,
+ 4 => (scanline[byte_idx] >> rem) & 15,
+ _ => unreachable!(),
+ }
+ })
+}
+
+/// Given pass, image width, and line number, produce an iterator of bit positions of pixels to copy
+/// from the input scanline to the image buffer.
+fn expand_adam7_bits(
+ pass: u8,
+ width: usize,
+ line_no: usize,
+ bits_pp: usize,
+) -> StepBy<Range<usize>> {
+ let (line_mul, line_off, samp_mul, samp_off) = match pass {
+ 1 => (8, 0, 8, 0),
+ 2 => (8, 0, 8, 4),
+ 3 => (8, 4, 4, 0),
+ 4 => (4, 0, 4, 2),
+ 5 => (4, 2, 2, 0),
+ 6 => (2, 0, 2, 1),
+ 7 => (2, 1, 1, 0),
+ _ => panic!("Adam7 pass out of range: {}", pass),
+ };
+
+ // the equivalent line number in progressive scan
+ let prog_line = line_mul * line_no + line_off;
+ // line width is rounded up to the next byte
+ let line_width = (width * bits_pp + 7) & !7;
+ let line_start = prog_line * line_width;
+ let start = line_start + (samp_off * bits_pp);
+ let stop = line_start + (width * bits_pp);
+
+ (start..stop).step_by(bits_pp * samp_mul)
+}
+
+/// Expands an Adam 7 pass
+pub fn expand_pass(
+ img: &mut [u8],
+ width: u32,
+ scanline: &[u8],
+ pass: u8,
+ line_no: u32,
+ bits_pp: u8,
+) {
+ let width = width as usize;
+ let line_no = line_no as usize;
+ let bits_pp = bits_pp as usize;
+
+ // pass is out of range but don't blow up
+ if pass == 0 || pass > 7 {
+ return;
+ }
+
+ let bit_indices = expand_adam7_bits(pass, width, line_no, bits_pp);
+
+ if bits_pp < 8 {
+ for (pos, px) in bit_indices.zip(subbyte_pixels(scanline, bits_pp)) {
+ let rem = 8 - pos % 8 - bits_pp;
+ img[pos / 8] |= px << rem as u8;
+ }
+ } else {
+ let bytes_pp = bits_pp / 8;
+
+ for (bitpos, px) in bit_indices.zip(scanline.chunks(bytes_pp)) {
+ for (offset, val) in px.iter().enumerate() {
+ img[bitpos / 8 + offset] = *val;
+ }
+ }
+ }
+}
+
+#[test]
+fn test_adam7() {
+ /*
+ 1646
+ 7777
+ 5656
+ 7777
+ */
+ let it = Adam7Iterator::new(4, 4);
+ let passes: Vec<_> = it.collect();
+ assert_eq!(
+ &*passes,
+ &[
+ (1, 0, 1),
+ (4, 0, 1),
+ (5, 0, 2),
+ (6, 0, 2),
+ (6, 1, 2),
+ (7, 0, 4),
+ (7, 1, 4)
+ ]
+ );
+}
+
+#[test]
+fn test_subbyte_pixels() {
+ let scanline = &[0b10101010, 0b10101010];
+
+ let pixels = subbyte_pixels(scanline, 1).collect::<Vec<_>>();
+ assert_eq!(pixels.len(), 16);
+ assert_eq!(pixels, [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]);
+}
+
+#[test]
+fn test_expand_adam7_bits() {
+ let width = 32;
+ let bits_pp = 1;
+
+ let expected = |offset: usize, step: usize, count: usize| {
+ (0..count)
+ .map(move |i| step * i + offset)
+ .collect::<Vec<_>>()
+ };
+
+ for line_no in 0..8 {
+ let start = 8 * line_no * width;
+
+ assert_eq!(
+ expand_adam7_bits(1, width, line_no, bits_pp).collect::<Vec<_>>(),
+ expected(start, 8, 4)
+ );
+
+ let start = start + 4;
+
+ assert_eq!(
+ expand_adam7_bits(2, width, line_no, bits_pp).collect::<Vec<_>>(),
+ expected(start, 8, 4)
+ );
+
+ let start = (8 * line_no + 4) as usize * width as usize;
+
+ assert_eq!(
+ expand_adam7_bits(3, width, line_no, bits_pp).collect::<Vec<_>>(),
+ expected(start, 4, 8)
+ );
+ }
+
+ for line_no in 0..16 {
+ let start = 4 * line_no * width + 2;
+
+ assert_eq!(
+ expand_adam7_bits(4, width, line_no, bits_pp).collect::<Vec<_>>(),
+ expected(start, 4, 8)
+ );
+
+ let start = (4 * line_no + 2) * width;
+
+ assert_eq!(
+ expand_adam7_bits(5, width, line_no, bits_pp).collect::<Vec<_>>(),
+ expected(start, 2, 16)
+ )
+ }
+
+ for line_no in 0..32 {
+ let start = 2 * line_no * width + 1;
+
+ assert_eq!(
+ expand_adam7_bits(6, width, line_no, bits_pp).collect::<Vec<_>>(),
+ expected(start, 2, 16),
+ "line_no: {}",
+ line_no
+ );
+
+ let start = (2 * line_no + 1) * width;
+
+ assert_eq!(
+ expand_adam7_bits(7, width, line_no, bits_pp).collect::<Vec<_>>(),
+ expected(start, 1, 32)
+ );
+ }
+}
+
+#[test]
+fn test_expand_pass_subbyte() {
+ let mut img = [0u8; 8];
+ let width = 8;
+ let bits_pp = 1;
+
+ expand_pass(&mut img, width, &[0b10000000], 1, 0, bits_pp);
+ assert_eq!(img, [0b10000000u8, 0, 0, 0, 0, 0, 0, 0]);
+
+ expand_pass(&mut img, width, &[0b10000000], 2, 0, bits_pp);
+ assert_eq!(img, [0b10001000u8, 0, 0, 0, 0, 0, 0, 0]);
+
+ expand_pass(&mut img, width, &[0b11000000], 3, 0, bits_pp);
+ assert_eq!(img, [0b10001000u8, 0, 0, 0, 0b10001000, 0, 0, 0]);
+
+ expand_pass(&mut img, width, &[0b11000000], 4, 0, bits_pp);
+ assert_eq!(img, [0b10101010u8, 0, 0, 0, 0b10001000, 0, 0, 0]);
+
+ expand_pass(&mut img, width, &[0b11000000], 4, 1, bits_pp);
+ assert_eq!(img, [0b10101010u8, 0, 0, 0, 0b10101010, 0, 0, 0]);
+
+ expand_pass(&mut img, width, &[0b11110000], 5, 0, bits_pp);
+ assert_eq!(img, [0b10101010u8, 0, 0b10101010, 0, 0b10101010, 0, 0, 0]);
+
+ expand_pass(&mut img, width, &[0b11110000], 5, 1, bits_pp);
+ assert_eq!(
+ img,
+ [0b10101010u8, 0, 0b10101010, 0, 0b10101010, 0, 0b10101010, 0]
+ );
+
+ expand_pass(&mut img, width, &[0b11110000], 6, 0, bits_pp);
+ assert_eq!(
+ img,
+ [0b11111111u8, 0, 0b10101010, 0, 0b10101010, 0, 0b10101010, 0]
+ );
+
+ expand_pass(&mut img, width, &[0b11110000], 6, 1, bits_pp);
+ assert_eq!(
+ img,
+ [0b11111111u8, 0, 0b11111111, 0, 0b10101010, 0, 0b10101010, 0]
+ );
+
+ expand_pass(&mut img, width, &[0b11110000], 6, 2, bits_pp);
+ assert_eq!(
+ img,
+ [0b11111111u8, 0, 0b11111111, 0, 0b11111111, 0, 0b10101010, 0]
+ );
+
+ expand_pass(&mut img, width, &[0b11110000], 6, 3, bits_pp);
+ assert_eq!(
+ [0b11111111u8, 0, 0b11111111, 0, 0b11111111, 0, 0b11111111, 0],
+ img
+ );
+
+ expand_pass(&mut img, width, &[0b11111111], 7, 0, bits_pp);
+ assert_eq!(
+ [
+ 0b11111111u8,
+ 0b11111111,
+ 0b11111111,
+ 0,
+ 0b11111111,
+ 0,
+ 0b11111111,
+ 0
+ ],
+ img
+ );
+
+ expand_pass(&mut img, width, &[0b11111111], 7, 1, bits_pp);
+ assert_eq!(
+ [
+ 0b11111111u8,
+ 0b11111111,
+ 0b11111111,
+ 0b11111111,
+ 0b11111111,
+ 0,
+ 0b11111111,
+ 0
+ ],
+ img
+ );
+
+ expand_pass(&mut img, width, &[0b11111111], 7, 2, bits_pp);
+ assert_eq!(
+ [
+ 0b11111111u8,
+ 0b11111111,
+ 0b11111111,
+ 0b11111111,
+ 0b11111111,
+ 0b11111111,
+ 0b11111111,
+ 0
+ ],
+ img
+ );
+
+ expand_pass(&mut img, width, &[0b11111111], 7, 3, bits_pp);
+ assert_eq!(
+ [
+ 0b11111111u8,
+ 0b11111111,
+ 0b11111111,
+ 0b11111111,
+ 0b11111111,
+ 0b11111111,
+ 0b11111111,
+ 0b11111111
+ ],
+ img
+ );
+}