summaryrefslogtreecommitdiff
path: root/vendor/image
diff options
context:
space:
mode:
authorValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
committerValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
commit1b6a04ca5504955c571d1c97504fb45ea0befee4 (patch)
tree7579f518b23313e8a9748a88ab6173d5e030b227 /vendor/image
parent5ecd8cf2cba827454317368b68571df0d13d7842 (diff)
downloadfparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.tar.xz
fparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.zip
Initial vendor packages
Signed-off-by: Valentin Popov <valentin@popov.link>
Diffstat (limited to 'vendor/image')
-rw-r--r--vendor/image/.cargo-checksum.json1
-rw-r--r--vendor/image/CHANGES.md582
-rw-r--r--vendor/image/Cargo.lock.msrv2311
-rw-r--r--vendor/image/Cargo.toml188
-rw-r--r--vendor/image/Cargo.toml.public-private-dependencies98
-rw-r--r--vendor/image/LICENSE21
-rw-r--r--vendor/image/README.md250
-rw-r--r--vendor/image/benches/README.md6
-rw-r--r--vendor/image/benches/copy_from.rs14
-rw-r--r--vendor/image/benches/decode.rs109
-rw-r--r--vendor/image/benches/encode.rs134
-rw-r--r--vendor/image/deny.toml38
-rw-r--r--vendor/image/docs/2019-04-23-memory-unsafety.md54
-rwxr-xr-xvendor/image/release.sh24
-rw-r--r--vendor/image/src/animation.rs342
-rw-r--r--vendor/image/src/buffer.rs1768
-rw-r--r--vendor/image/src/codecs/avif/decoder.rs177
-rw-r--r--vendor/image/src/codecs/avif/encoder.rs274
-rw-r--r--vendor/image/src/codecs/avif/mod.rs14
-rw-r--r--vendor/image/src/codecs/bmp/decoder.rs1483
-rw-r--r--vendor/image/src/codecs/bmp/encoder.rs388
-rw-r--r--vendor/image/src/codecs/bmp/mod.rs14
-rw-r--r--vendor/image/src/codecs/dds.rs375
-rw-r--r--vendor/image/src/codecs/dxt.rs869
-rw-r--r--vendor/image/src/codecs/farbfeld.rs400
-rw-r--r--vendor/image/src/codecs/gif.rs606
-rw-r--r--vendor/image/src/codecs/hdr/decoder.rs1033
-rw-r--r--vendor/image/src/codecs/hdr/encoder.rs433
-rw-r--r--vendor/image/src/codecs/hdr/mod.rs15
-rw-r--r--vendor/image/src/codecs/ico/decoder.rs470
-rw-r--r--vendor/image/src/codecs/ico/encoder.rs194
-rw-r--r--vendor/image/src/codecs/ico/mod.rs14
-rw-r--r--vendor/image/src/codecs/jpeg/decoder.rs1289
-rw-r--r--vendor/image/src/codecs/jpeg/encoder.rs1074
-rw-r--r--vendor/image/src/codecs/jpeg/entropy.rs63
-rw-r--r--vendor/image/src/codecs/jpeg/mod.rs16
-rw-r--r--vendor/image/src/codecs/jpeg/transform.rs196
-rw-r--r--vendor/image/src/codecs/openexr.rs592
-rw-r--r--vendor/image/src/codecs/png.rs778
-rw-r--r--vendor/image/src/codecs/pnm/autobreak.rs124
-rw-r--r--vendor/image/src/codecs/pnm/decoder.rs1272
-rw-r--r--vendor/image/src/codecs/pnm/encoder.rs673
-rw-r--r--vendor/image/src/codecs/pnm/header.rs354
-rw-r--r--vendor/image/src/codecs/pnm/mod.rs184
-rw-r--r--vendor/image/src/codecs/qoi.rs104
-rw-r--r--vendor/image/src/codecs/tga/decoder.rs502
-rw-r--r--vendor/image/src/codecs/tga/encoder.rs215
-rw-r--r--vendor/image/src/codecs/tga/header.rs150
-rw-r--r--vendor/image/src/codecs/tga/mod.rs17
-rw-r--r--vendor/image/src/codecs/tiff.rs353
-rw-r--r--vendor/image/src/codecs/webp/decoder.rs399
-rw-r--r--vendor/image/src/codecs/webp/encoder.rs242
-rw-r--r--vendor/image/src/codecs/webp/extended.rs839
-rw-r--r--vendor/image/src/codecs/webp/huffman.rs202
-rw-r--r--vendor/image/src/codecs/webp/loop_filter.rs147
-rw-r--r--vendor/image/src/codecs/webp/lossless.rs783
-rw-r--r--vendor/image/src/codecs/webp/lossless_transform.rs464
-rw-r--r--vendor/image/src/codecs/webp/mod.rs28
-rw-r--r--vendor/image/src/codecs/webp/transform.rs77
-rw-r--r--vendor/image/src/codecs/webp/vp8.rs2932
-rw-r--r--vendor/image/src/color.rs985
-rw-r--r--vendor/image/src/dynimage.rs1353
-rw-r--r--vendor/image/src/error.rs506
-rw-r--r--vendor/image/src/flat.rs1735
-rw-r--r--vendor/image/src/image.rs1915
-rw-r--r--vendor/image/src/imageops/affine.rs410
-rw-r--r--vendor/image/src/imageops/colorops.rs646
-rw-r--r--vendor/image/src/imageops/mod.rs485
-rw-r--r--vendor/image/src/imageops/sample.rs1228
-rw-r--r--vendor/image/src/io/free_functions.rs312
-rw-r--r--vendor/image/src/io/mod.rs166
-rw-r--r--vendor/image/src/io/reader.rs239
-rw-r--r--vendor/image/src/lib.rs310
-rw-r--r--vendor/image/src/math/mod.rs6
-rw-r--r--vendor/image/src/math/rect.rs12
-rw-r--r--vendor/image/src/math/utils.rs123
-rw-r--r--vendor/image/src/traits.rs370
-rw-r--r--vendor/image/src/utils/mod.rs128
78 files changed, 37697 insertions, 0 deletions
diff --git a/vendor/image/.cargo-checksum.json b/vendor/image/.cargo-checksum.json
new file mode 100644
index 0000000..c2da09c
--- /dev/null
+++ b/vendor/image/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGES.md":"5eb61048e077e0dd2d2213c16639cb1c78f836d6a2f1131b79e9fd6a51017949","Cargo.lock.msrv":"2f08a957a73046a0527f17e76f99625d9bd127c5b29d70274ac3b60fe10f369f","Cargo.toml":"914b3eb0ba682daeb9751c04650c206b61bf4dd428f79838b5cd8e48fa867ce4","Cargo.toml.public-private-dependencies":"7679d35d69b4c0f8adfcccbe00347a8a086250fd81c80cfe019ffec875f9ddc7","LICENSE":"c7766d2e29f88a4be81b6ac6216c62d2d0918c7d3f2fc98be6fecac8f6595e60","README.md":"33691061c7bea717f00810f0373a1355772355e00195539bc6d3a585caf83ef7","benches/README.md":"87b2ea4d1cea80c1e24fc18f1222a7ee1a38f5a92e98110bb84a4827fb997b62","benches/copy_from.rs":"62570f462abe7b9765dcfb8ba4af2aab37868058de3d5b7c29180b6ed536f6b7","benches/decode.rs":"c6679ca929913d3864c97f70047ccbad7ec1e783d5c9e6bf0643fb96f87e7baf","benches/encode.rs":"73f626189a6beb1872b8dd27c5190e40af616c14e9e407ac085fb979ce15cd1c","deny.toml":"2f4b1af2ccba0115897da29ee1ed89a2a87072248c709a6873a5c7945fbc3afc","docs/2019-04-23-memory-unsafety.md":"b59a9af84bdb5efa1bc1f33c8aa22ff42590701817fb03f55ca2dd50af91bb8d","release.sh":"70f8d6272ab65f5ca80ac95e6ceeeb5041f9c183c87a1fdac2b7f2d16e0827d4","src/animation.rs":"135fdba4a866412a90831503cb4a99babe17f820b14de7607aa8226fbe2d60d2","src/buffer.rs":"5acd34ed50c4044ace9b77fe382c00790599683567160db6fa2aeaa58b377ca6","src/codecs/avif/decoder.rs":"9b2cdd2cecbedda71780a0e496ae71ac1da6933cf8ef8ee4012800100218fef1","src/codecs/avif/encoder.rs":"d1fd9f0df8665a26a06f4ae095ce4b7c9903dc1484b9f3270f4b83176224d9da","src/codecs/avif/mod.rs":"31fcc5c582aefb2db95a8d95b6f99b4e69cbeb1cef16719390a84ee8002f432e","src/codecs/bmp/decoder.rs":"a4ac82713f781a6fb8a5ebab21d54b819ea5340f64abed9c274343af77f8505f","src/codecs/bmp/encoder.rs":"e7bb614ae92f53b0e0ab0815309b95d60c77f651896d991677b72c086b3861dc","src/codecs/bmp/mod.rs":"9c86c6ebb956bd7f5ad2c41bc1b34e59957a1dc08a6ecdb61bfbcf5599eeae59","src/codecs/dds.rs":"e7a0e41f2eb8e7bfd60a5ef679fb6fb8e4946c98b1f6f177b23d69494e4f5b8c","src/codecs/dxt.rs":"39a2185bfdb5c26901bb9e8d823759ec3bae47b64ce9fa5134e548cc736fe907","src/codecs/farbfeld.rs":"5b33a903e9bbcd6f1ac79da0014fb63368aacaad44bb8737a9b27ca1688c93ba","src/codecs/gif.rs":"2b8f593078e07dd7d7c3a0cdc5bc3d1efc05c0f8d2131c7b8b8af2c17b902003","src/codecs/hdr/decoder.rs":"8d88821ab470b809c8f244fb30656885cd52af1fb38627acf42ac77365b760cb","src/codecs/hdr/encoder.rs":"076aa4077f5e07ed8cc4c8d8ccb86351c3c9b1db2a290b3ab22fffe0e34c4933","src/codecs/hdr/mod.rs":"96eb63b7e43f117cb46b8ffe705b477426dde75881f2dea63cab29ffd820e01d","src/codecs/ico/decoder.rs":"9b5ef3a461ece96a73af4b2a4833de8a7e602cd89129f8d9ed9b6ac48324cdee","src/codecs/ico/encoder.rs":"0dfd9868a93c0187ada8bb91af4592bffbe0ba7b20e9c036ea5ae4cbff6c9b5c","src/codecs/ico/mod.rs":"cd5f9c67256b2083049bb6f8c8963aedca2be8178e198f9e0851c18e6f956845","src/codecs/jpeg/decoder.rs":"507fe1cc5fc3998393eb00578d20cff9b7d34045e46df9ed20590e4d7881f075","src/codecs/jpeg/encoder.rs":"68e93e7fa4cc9bce7506df421b65eef499f97752f8e1cd2405eeb9e70a310519","src/codecs/jpeg/entropy.rs":"074975ec5b0040d6caea59f2682e328550d367688d99ee9d0b5674bc7a68682a","src/codecs/jpeg/mod.rs":"cc9fc9c6dad3135184b2f43171f601336d0b53bc75606d5f25cfa8b2f4ba35b9","src/codecs/jpeg/transform.rs":"65490ae7ce990dda33044b4c52afc1bdcfa836b474a1b6a06c611af6e53a70db","src/codecs/openexr.rs":"bee522ec1c2f05a4dcd4f89f428e464ae483eed3b63ba0b576ff95577d71ca97","src/codecs/png.rs":"8cb86fc24ce568b02470da9b7f936ff20556bbc9e7753b1fa8e6a0c367e75e5e","src/codecs/pnm/autobreak.rs":"f52a14475c13c1a360f95cd53835d8a234c9b1fa8b675442d0485c5eaa42dfc6","src/codecs/pnm/decoder.rs":"9ecefc99136b2485f58f856fba978a3825aba49e9c41a3ea0b50d6421fceb36e","src/codecs/pnm/encoder.rs":"dc76491d584ed33ffbf2b54847503d0cfab96eb279a01e7f4004c83a3a8ce430","src/codecs/pnm/header.rs":"1479bceb32a4edf761b6ed75d392e915e9667ed81c157f244ba0b81436ba79f7","src/codecs/pnm/mod.rs":"9106f90a9bda10d97bf3393189a88f4f6dac27172dac21af0e8e514ac9127867","src/codecs/qoi.rs":"a1c799caae19ec68316e0cd44a948e8d19d1c4555f1035340565559b085c9c54","src/codecs/tga/decoder.rs":"83ad95260579dcfa63ac9e778f32090fd267b6b5961e768d33197800fa215ec8","src/codecs/tga/encoder.rs":"d294fd562122e6816365330074be66fb8f248b9226ae4e09f3d30b5a9c26a64d","src/codecs/tga/header.rs":"5a23bb191885b0d17b207f171d6f1804f653e528a330deed2ee449780b1da2b8","src/codecs/tga/mod.rs":"e25e77180883c56657097d851427d4f1f3c2add5a9eec24e24bc42d65953b0df","src/codecs/tiff.rs":"da6dd7cfc116099b6128b4f94acf4489eb4d7b2b91f25e8f6dee52fd38a9a511","src/codecs/webp/decoder.rs":"e1235ac4f8d566ef15e1f90b7d1a4f72525a07946fa64fff2ae59cfa376a9e4c","src/codecs/webp/encoder.rs":"94b6d6be50b5d0eb695df5ae37fae4b49b54c066e7b014c7c26d2f764595008b","src/codecs/webp/extended.rs":"24e9abc8a91df6f99a86aabcb1a31c8d1d892823a6574be9484782e77f873462","src/codecs/webp/huffman.rs":"b1b3dadab0d43147d58e03c69ac683d59da705f08354e66e723efb5a86bb18d0","src/codecs/webp/loop_filter.rs":"58b5291a1a9a574d4f43c912b8f9ce965ac246668f98ea0202aaa3b039a4c020","src/codecs/webp/lossless.rs":"e7af39797d1d3d849ec323336ab1d9997bd937a60a5f56a4a5897e2a3c758259","src/codecs/webp/lossless_transform.rs":"5a040607919951c9ab982d54f5c8cc909a2e2a575f5fb1742c2abdc2e325bb0e","src/codecs/webp/mod.rs":"d244a8e2d409e4f175183c3d35d631153abdfb3f6f2d7b2739087649745dd45c","src/codecs/webp/transform.rs":"26e747e1bff0a8b88125ffc6d2a6e27ec2fcd1246a7d0e66c49f58ee80cc4847","src/codecs/webp/vp8.rs":"781c658783d8a62c6efba519b0b8fd0dcd99f2b507c98bf671d1502bf2107640","src/color.rs":"dc23f4bc3d5b08a2b29a5df12bff4962b4337530e383eb057a48f62c16b8c308","src/dynimage.rs":"e1bf872bd9db37b2cd8e2bd341fa55b50bb84829d4d3656afd9687582827cda0","src/error.rs":"2be92e707fd535bd4aa6db4c76805c0fbd2aedaf73de8c06949d702fffc91e1e","src/flat.rs":"87ab0776d2e0396d9832b9949a97c3be64a3e1eb8f458beaf358f89237d04433","src/image.rs":"f137a8af99a393d9e82172d4d4d61d4d72f101b81a17f770804fc82d754cdc76","src/imageops/affine.rs":"f5627e435ff244a823b140fa57aa79581b1f35e5886ff7390048a70cbd2ee080","src/imageops/colorops.rs":"f4eb97022e3e98c6c876c2fe4771825f3fb6362017160d9426bc153d4ac8b35f","src/imageops/mod.rs":"75632984f1a620f9866f1707f18510b71d6a326c6f1a0c976396c42b32f16963","src/imageops/sample.rs":"a6425d3a3ab1705c268933d235a5b21f77626e28a319aadbb072e3d68e71b0cc","src/io/free_functions.rs":"17b0778ff8e1f2920f50d5bf0fea90a1e853bbb75834af5b40400f039aa29deb","src/io/mod.rs":"324143ab04327106d2bcc6cef0474370f2487b217838894333ecac9fffec4669","src/io/reader.rs":"768e219db06c940f4ff7314dc2e9e78308244a49f3643ba64ad503a6d0cbba2f","src/lib.rs":"d2d6efb5f1d5ece2ec16199391d9b560a56b5d62529b02b2f677446a6c980dfd","src/math/mod.rs":"2ee5ea1d5187cbd6e106900f67bd515d0b276e9962e9cda1a2203d536e0052e5","src/math/rect.rs":"10a6f8c76988ff7583f2824be6c3fe005a171777080e3ad0cbe169c6dc574edd","src/math/utils.rs":"3b26fa11ff20b4c45d6cb436c2a7151b2c312210f139d8d51febae37c99b9c09","src/traits.rs":"2037030ca88b4095b2b0f1b1e1dd087f527ec59d1eea11e7756937d2dd462b92","src/utils/mod.rs":"34c729e6f1a1e7b72ecda7271c2fbec084edd4ced89f1cabcab6826e8634c038"},"package":"6f3dfdbdd72063086ff443e297b61695500514b1e41095b6fb9a5ab48a70a711"} \ No newline at end of file
diff --git a/vendor/image/CHANGES.md b/vendor/image/CHANGES.md
new file mode 100644
index 0000000..e7f4cc9
--- /dev/null
+++ b/vendor/image/CHANGES.md
@@ -0,0 +1,582 @@
+# Release Notes
+
+## Known issues
+- Many decoders will panic on malicous input. In most cases, this is caused by
+ not enforcing memory limits, though other panics have been seen from fuzzing.
+- The color space information of pixels is not clearly communicated.
+
+## Changes
+
+### Unreleased
+
+- More convenient to use buffers will be added in the future. In particular,
+ improving initialization, passing of output buffers, and adding a more
+ complete representation for layouts. The plan is for these to interact with
+ the rest of the library through a byte-based interface similar to
+ `ImageDecoder`.
+ See ongoing work on [`image-canvas`](https://github.com/image-rs/canvas) if
+ you want to participate.
+
+### Version 0.24.7
+
+New features:
+- Added `{ImageBuffer, DynamicImage}::write_with_encoder` to simplify writing
+ images with custom settings.
+- Expose ICC profiles stored in tiff and wepb files.
+- Added option to set the background color of animated webp images.
+- New methods for sampling and interpolation of `GenericImageView`s
+
+Bug fixes:
+- Fix panic on empty dxt.
+- Fix several panics in webp decoder.
+- Allow unknown chunks at the end of webp files.
+
+### Version 0.24.6
+
+- Add support for QOI.
+- ImageDecoders now expose ICC profiles on supported formats.
+- Add support for BMPs without a file header.
+- Improved AVIF encoder.
+- WebP decoding fixes.
+
+### Version 0.24.5
+
+Structural changes:
+- Increased the minimum supported Rust version (MSRV) to 1.61.
+- Increased the version requirement for the `tiff` crate to 0.8.0.
+- Increased the version requirement for the `jpeg` crate to 0.3.0.
+
+Bug fixes:
+- The `as_rgb32f` function of `DynamicImage` is now correctly documented.
+- Fixed a crash when decoding ICO images. Added a regression test.
+- Fixed a panic when transforming webp images. Added a regression test.
+- Added a check to prevent integer overflow when calculating file size for BMP
+ images. The missing check could panic in debug mode or else set an incorrect
+ file size in release mode.
+- Upgraded the PNG image encoder to use the newer `PngEncoder::write_image`
+ instead of the deprecated `PngEncoder::encode` which did not account for byte
+ order and could result in images with incorrect colors.
+- Fixed `InsufficientMemory` error when trying to decode a PNG image.
+- Fix warnings and CI issues.
+- Typos and links in the documentation have been corrected.
+
+Performance:
+- Added check for dynamic image dimensions before resizing. This improves
+ performance in cases where the image does not need to be resized or has
+ already been resized.
+
+### Version 0.24.4
+
+New Features:
+- Encoding for `webp` is now available with the native library. This needs to
+ be activate explicitly with the `web-encoder` feature.
+- `exr` decoding has gained basic limit support.
+
+Bug fixes:
+- The `Iterator::size_hint` implementation of pixel iterators has been fixed to
+ return the current length indicated by its `ExactSizeIterator` hint.
+- Typos and bad references in the documentation have been removed.
+
+Performance:
+- `ImageBuffer::get_pixel{,_mut}` is now marked inline.
+- `resize` now short-circuits when image dimensions are unchanged.
+
+### Version 0.24.3
+
+New Features:
+- `TiffDecoder` now supports setting resource limits.
+
+Bug fixes:
+- Fix compile issues on little endian systems.
+- Various panics discovered by fuzzing.
+
+### Version 0.24.2
+
+Structural changes:
+- CI now runs `cargo-deny`, checking dependent crates to an OSS license list
+ and against RUSTSEC advisories.
+
+New Features:
+- The WebP decoder recognizes and decodes images with `VP8X` header.
+- The DDS decoder recognizes and decodes images with `DX10` headers.
+
+Bug fixes:
+- Calling `DynamicImage`/`ImageBuffer`'s methods `write_to` and `save` will now
+ work properly even if the backing container is larger than the image layout
+ requires. Only the relevant slice of pixel data is passed to the encoder.
+- Fixed a OOM-panic caused by malformed images in the `gif` decoder.
+
+### Version 0.24.1
+
+Bug Fixes:
+- ImageBuffer::get_pixel_checked would sometimes return the incorrect pixel.
+- PNG encoding would sometimes not recognize unsupported color.
+
+### Version 0.24.0
+
+Breaking changes
+
+Structural changes:
+- Minimum Rust version is now `1.56` and may change in minor versions until
+ further notice. It is now tracked in the library's `Cargo.toml`, instead, by
+ the standard `[package.rust-version]` field. Note: this applies _to the
+ library itself_. You may need different version resolutions for dependencies
+ when using a non-stable version of Rust.
+- The `math::utils::{nq, utils}` modules have been removed. These are better
+ served through the `color_quant` crate and the standard library respectively.
+- All codecs are now available through `image::codecs`, no longer top-level.
+- `ExtendedColorType` and `DynamicImage` have been made `#[non_exhaustive]`,
+ providing more methods instead of exhaustive matching.
+- Reading images through the generic `io::Reader`, as well as generic
+ convenience interfaces, now requires the underlying reader to be `BufRead +
+ Seek`. This allows more efficient support more formats. Similarly, writing
+ now requires writers to be `Write + Seek`.
+- The `Bgra*` variants of buffers, which were only half-supported, have been
+ removed. The owning buffer types `ImageBuffer` and `DynamicImage`
+ fundamentally already make a choice in supported pixel representations. This
+ allows for more consistent internal behavior. Callers are expected to convert
+ formats when using those buffers, which they are required to do in any case
+ already, and which is routinely performed by decoders.
+
+Trait reworks:
+- The `Pixel` trait is no longer implemented quite as liberally for structs
+ defined in the crate. Instead, it is now restricted to a set of known channel
+ which ensures accuracy in computations involving those channels.
+- The `ImageDecoderExt` trait has been renamed to `ImageDecoderRect`, according
+ to its actual functionality.
+- The `Pixel` trait and its `Subpixel` field no longer require (or provide) a
+ `'static` lifetime bound.
+- The `Pixel` trait no longer requires specifying an associated, constant
+ `ColorType`. This was of little relevance to computation but made it much
+ harder to implement and extend correctly. Instead, the _private_
+ `PixelWithColorType` extension is added for interfaces that require a
+ properly known variant.
+- Reworked how `SubImage` interacts with the `GenericImage` trait. It is now a
+ default implementation. Note that `SubImage` now has _inherent_ methods that
+ avoid double-indirection, the trait's method will no longer avoid this.
+- The `Primitive` trait now requires implementations to provide a minimum and
+ maximum logical bound for the purpose of converting to other primitive
+ representations.
+
+Additions
+
+Image formats:
+- Reading lossless WebP is now supported.
+- The OpenEXR format is now supported.
+- The `jpeg` decoder has been upgraded to Lossless JPEG.
+- The `AvifEncoder` now correctly handles alpha-less images. Some additional
+ color formats are converted to RGBA as well.
+- The `Bmp` codec now decodes more valid images. It can decode a raw image
+ without performing the palette mapping. It provides a method to access the
+ palette. The encoder provides the inverse capabilities.
+- `Tiff` is now an output format.
+
+Buffers and Operations:
+- The channel / primitive type `f32` is now supported. Currently only the
+ OpenEXR codec makes full use of it but this is expected to change.
+- `ImageBuffer::{get_pixel_checked, get_pixel_mut_checked}` provide panic-free
+ access to pixels and channels by returning `Option<&P>` and `Option<&mut P>`.
+- `ImageBuffer::write_to` has been added, encoding the buffer to a writer. This
+ method already existed on `DynamicImage`.
+- `DynamicImage` now implements `From<_>` for all supported buffer types.
+- `DynamicImage` now implements `Default`, an empty `Rgba8` image.
+- `imageops::overlay` now takes coordinates as `i64`.
+
+Limits:
+- Added `Limits` and `LimitSupport`, utilized in `io::Reader`. These can be
+ configured for rudimentary protection against resource exhaustion (images
+ pretending to require a very large buffer). These types are not yet
+ exhaustive by design, and more and stricter limits may be added in the
+ future.
+- Encoders that do provide inherent support for limits, or reserve a
+ significant amount of internal memory, are urged to implement the
+ `set_limits` extension to `ImageDecoder`. Some strict limit are opt-in, which
+ may cause decoding to fail if not supported.
+
+Miscellaneous:
+- `PNMSubtype` has been renamed to `PnmSubtype`, by Rust's naming scheme.
+- Several incorrectly capitalized `PNM*` aliases have been removed.
+- Several `enum` types that had previously used a hidden variant now use the
+ official `#[non_exhaustive]` attribute instead.
+
+### Version 0.23.14
+
+- Unified gif blending in different decode methods, fixing out-of-bounds checks
+ in a number of weirdly positioned frames.
+- Hardened TGA decoder against a number of malicious inputs.
+- Fix forward incompatible usage of the panic macro.
+- Fix load_rect for gif reaching `unreachable!()` code.
+
+- Added `ExtendedColorType::A8`.
+- Allow TGA to load alpha-only images.
+- Optimized load_rect to avoid unnecessary seeks.
+
+### Version 0.23.13
+
+- Fix an inconsistency in supported formats of different methods for encoding
+ an image.
+- Fix `thumbnail` choosing an empty image. It now always prefer non-empty image
+ dimensions.
+- Fix integer overflow in calculating requires bytes for decoded image buffers
+ for farbfeld, hdr, and pnm decoders. These will now error early.
+- Fix a panic decoding certain `jpeg` image without frames or meta data.
+- Optimized the `jpeg` encoder.
+- Optimized `GenericImage::copy_from` default impl in various cases.
+
+- Add `avif` decoders. You must enable it explicitly and it is not covered by
+ our usual MSRV policy of Rust 1.34. Instead, only latest stable is supported.
+- Add `ImageFormat::{can_read, can_write}`
+- Add `Frame::buffer_mut`
+- Add speed and quality options on `avif` encoder.
+- Add speed parameter to `gif` encoder.
+- Expose control over sequence repeat to the `gif` encoder.
+- Add `{contrast,brighten,huerotate}_in_place` functions in imageproc.
+
+- Relax `Default` impl of `ImageBuffer`, removing the bound on the color type.
+- Derive Debug, Hash, PartialEq, Eq for DynamicImage
+
+### Version 0.23.12
+
+- Fix a soundness issue affecting the impls of `Pixel::from_slice_mut`. This
+ would previously reborrow the mutable input reference as a shared one but
+ then proceed to construct the mutable result reference from it. While UB
+ according to Rust's memory model, we're fairly certain that no miscompilation
+ can happen with the LLVM codegen in practice.
+ See 5cbe1e6767d11aff3f14c7ad69a06b04e8d583c7 for more details.
+- Fix `imageops::blur` panicking when `sigma = 0.0`. It now defaults to `1.0`
+ as all negative values.
+- Fix re-exporting `png::{CompressionType, FilterType}` to maintain SemVer
+ compatibility with the `0.23` releases.
+
+- Add `ImageFormat::from_extension`
+- Add copyless DynamicImage to byte slice/vec conversion.
+- Add bit-depth specific `into_` and `to_` DynamicImage conversion methods.
+
+
+### Version 0.23.11
+
+- The `NeuQuant` implementation is now supplied by `color_quant`. Use of the
+ type defined by this library is discouraged.
+- The `jpeg` decoder can now downscale images that are decoded by 1,2,4,8.
+- Optimized the jpeg encoding ~5-15%.
+- Deprecated the `clamp` function. Use `num-traits` instead.
+- The ICO decoder now accepts an empty mask.
+- Fixed an overflow in ICO mask decoding potentially leading to panic.
+- Added `ImageOutputFormat` for `AVIF`
+- Updated `tiff` to `0.6` with lzw performance improvements.
+
+### Version 0.23.10
+
+- Added AVIF encoding capabilities using the `ravif` crate. Please note that
+ the feature targets the latest stable compiler and is not enabled by default.
+- Added `ImageBuffer::as_raw` to inspect the underlying container.
+- Updated `gif` to `0.11` with large performance improvements.
+
+### Version 0.23.9
+
+- Introduced correctly capitalized aliases for some scream case types
+- Introduced `imageops::{vertical_gradient, horizontal_gradient}` for writing
+ simple color gradients into an image.
+- Sped up methods iterating over `Pixels`, `PixelsMut`, etc. by using exact
+ chunks internally. This should auto-vectorize `ImageBuffer::from_pixel`.
+- Adjusted `Clone` impls of iterators to not require a bound on the pixel.
+- Add `Debug` impls for iterators where the pixel's channel implements it.
+- Add comparison impls for `FilterType`
+
+### Version 0.23.8
+
+- `flat::Error` now implements the standard `Error` trait
+- The type parameter of `Map` has been relaxed to `?Sized`
+- Added the `imageops::tile` function that repeats one image across another
+
+### Version 0.23.7
+
+- Iterators over immutable pixels of `ImageBuffer` can now be cloned
+- Added a `tga` encoder
+- Added `ColorMap::lookup`, an optional reversal of the map
+- The `EncodableLayout` trait is now exported
+
+### Version 0.23.6
+
+- Added `png::ApngDecoder`, an adapter decoding the animation in an APNG.
+- Fixed a bug in `jpeg` encoding that would darken output colors.
+- Added a utility constructor `FlatSamples::with_monocolor`.
+- Added `ImageBuffer::as_flat_samples_mut` which is a mutable variant of the
+ existing ffi-helper `ImageBuffer::as_flat_samples`.
+
+### Version 0.23.5
+
+- The `png` encoder now allows configuring compression and filter type. The
+ output is not part of stability guarantees, see its documentation.
+- The `jpeg` encoder now accepts any implementor of `GenericImageView`. This
+ allows images that are only partially present in memory to be encoded.
+- `ImageBuffer` now derives `Hash`, `PartialEq`, `Eq`.
+- The `Pixels`/`PixelsMut` iterator no longer yields out-of-bounds pixels when
+ the underlying buffer is larger than required.
+- The `pbm` decoder correctly decodes ascii data again, fixing a regression
+ where it would use the sample value `1` as white instead of `255`.
+- Fix encoding of RGBA data in `gif` frames.
+- Constructing a `Rows`/`RowsMut` iterator no longer panics when the image has
+ a width or height of `0`.
+
+### Version 0.23.4
+
+- Improved the performance of decoding animated gifs
+- Added `crop_imm` which functions like `crop` but on a shared reference
+- The gif `DisposalMethod::Any` is treated as `Keep`, consistent with browsers
+- Most errors no longer allocate a string, instead implement Display.
+- Add some implementations of `Error::source`
+
+### Version 0.23.3
+
+- Added `ColorType::has_alpha` to facilitate lossless conversion
+- Recognize extended WebP formats for decoding
+- Added decoding and encoding for the `farbfeld` format
+- Export named iterator types created from various `ImageBuffer` methods
+- Error in jpeg encoder for images larger than 65536 pixels, fixes panic
+
+### Version 0.23.2
+
+- The dependency on `jpeg-decoder` now reflects minimum requirements.
+
+### Version 0.23.1
+
+- Fix cmyk_to_rgb (jpeg) causing off by one rounding errors.
+- A number of performance improvements for jpeg (encode and decode), bmp, vp8
+- Added more details to errors for many formats
+
+### Version 0.23.0
+
+This major release intends to improve the interface with regards to handling of
+color format data and errors for both decoding and encoding. This necessitated
+many breaking changes anyways so it was used to improve the compliance to the
+interface guidelines such as outstanding renaming.
+
+It is not yet perfect with regards to color spaces but it was designed mainly
+as an improvement over the current interface with regards to in-memory color
+formats, first. We'll get to color spaces in a later major version.
+
+- Heavily reworked `ColorType`:
+ - This type is now used for denoting formats for which we support operations
+ on buffers in these memory representations. Particularly, all channels in
+ pixel types are assumed to be an integer number of bytes (In terms of the
+ Rust type system, these are `Sized` and one can crate slices of channel
+ values).
+ - An `ExtendedColorType` is used to express more generic color formats for
+ which the library has limited support but can be converted/scaled/mapped
+ into a `ColorType` buffer. This operation might be fallible but, for
+ example, includes sources with 1/2/4-bit components.
+ - Both types are non-exhaustive to add more formats in a minor release.
+ - A work-in-progress (#1085) will further separate the color model from the
+ specific channel instantiation, e.g. both `8-bit RGB` and `16-bit BGR`
+ are instantiations of `RGB` color model.
+- Heavily rework `ImageError`:
+ - The top-level enum type now serves to differentiate cause with multiple
+ opaque representations for the actual error. These are no longer simple
+ Strings but contains useful types. Third-party decoders that have no
+ variant in `ImageFormat` have also been considered.
+ - Support for `Error::source` that can be downcast to an error from a
+ matching version of the underlying decoders. Note that the version is not
+ part of the stable interface guarantees, this should not be relied upon
+ for correctness and only be used as an optimization.
+ - Added image format indications to errors.
+ - The error values produced by decoder will be upgraded incrementally. See
+ something that still produces plain old String messages? Feel free to
+ send a PR.
+- Reworked the `ImageDecoder` trait:
+ - `read_image` takes an output buffer argument instead of allocating all
+ memory on its own.
+ - The return type of `dimensions` now aligns with `GenericImage` sizes.
+ - The `colortype` method was renamed to `color_type` for conformity.
+- The enums `ColorType`, `DynamicImage`, `imageops::FilterType`, `ImageFormat`
+ no longer re-export all of their variants in the top-level of the crate. This
+ removes the growing pollution in the documentation and usage. You can still
+ insert the equivalent statement on your own:
+ `use image::ImageFormat::{self, *};`
+- The result of `encode` operations is now uniformly an `ImageResult<()>`.
+- Removed public converters from some `tiff`, `png`, `gif`, `jpeg` types,
+ mainly such as error conversion. This allows upgrading the dependency across
+ major versions without a major release in `image` itself.
+- On that note, the public interface of `gif` encoder no longer takes a
+ `gif::Frame` but rather deals with `image::Frame` only. If you require to
+ specify the disposal method, transparency, etc. then you may want to wait
+ with upgrading but (see next change).
+- The `gif` encoder now errors on invalid dimensions or unsupported color
+ formats. It would previously silently reinterpret bytes as RGB/RGBA.
+- The capitalization of `ImageFormat` and other enum variants has been
+ adjusted to adhere to the API guidelines. These variants are now spelled
+ `Gif`, `Png`, etc. The same change has been made to the name of types such as
+ `HDRDecoder`.
+- The `Progress` type has finally received public accessor method. Strange that
+ no one reported them missing.
+- Introduced `PixelDensity` and `PixelDensityUnit` to store DPI information in
+ formats that support encoding this form of meta data (e.g. in `jpeg`).
+
+### Version 0.22.5
+
+- Added `GenericImage::copy_within`, specialized for `ImageBuffer`
+- Fixed decoding of interlaced `gif` files
+- Prepare for future compatibility of array `IntoIterator` in example code
+
+### Version 0.22.4
+
+- Added in-place variants for flip and rotate operations.
+- The bmp encoder now checks if dimensions are valid for the format. It would
+ previously write a subset or panic.
+- Removed deprecated implementations of `Error::description`
+- Added `DynamicImage::into_*` which convert without an additional allocation.
+- The PNG encoder errors on unsupported color types where it had previously
+ silently swapped color channels.
+- Enabled saving images as `gif` with `save_buffer`.
+
+### Version 0.22.3
+
+- Added a new module `io` containing a configurable `Reader`. It can replace
+ the bunch of free functions: `image::{load_*, open, image_dimensions}` while
+ enabling new combinations such as `open` but with format deduced from content
+ instead of file path.
+- Fixed `const_err` lint in the macro expanded implementations of `Pixel`. This
+ can only affect your crate if `image` is used as a path dependency.
+
+### Version 0.22.2
+
+- Undeprecate `unsafe` trait accessors. Further evaluation showed that their
+ deprecation should be delayed until trait `impl` specialization is available.
+- Fixed magic bytes used to detect `tiff` images.
+- Added `DynamicImage::from_decoder`.
+- Fixed a bug in the `PNGReader` that caused an infinite loop.
+- Added `ColorType::{bits_per_pixel, num_components}`.
+- Added `ImageFormat::from_path`, same format deduction as the `open` method.
+- Fixed a panic in the gif decoder.
+- Aligned background color handling of `gif` to web browser implementations.
+- Fixed handling of partial frames in animated `gif`.
+- Removed unused direct `lzw` dependency, an indirect dependency in `tiff`.
+
+### Version 0.22.1
+
+- Fixed build without no features enabled
+
+### Version 0.22
+
+- The required Rust version is now `1.34.2`.
+- Note the website and blog: [image-rs.org][1] and [blog.image-rs.org][2]
+- `PixelMut` now only on `ImageBuffer` and removed from `GenericImage`
+ interface. Prefer iterating manually in the generic case.
+- Replaced an unsafe interface in the hdr decoder with a safe variant.
+- Support loading 2-bit BMP images
+- Add method to save an `ImageBuffer`/`DynamicImage` with specified format
+- Update tiff to `0.3` with a writer
+- Update png to `0.15`, fixes reading of interlaced sub-byte pixels
+- Always use custom struct for `ImageDecoder::Reader`
+- Added `apply_without_alpha` and `map_without_alpha` to `Pixel` trait
+- Pixel information now with associated constants instead of static methods
+- Changed color structs to tuple types with single component. Improves
+ ergonomics of destructuring assignment and construction.
+- Add lifetime parameter on `ImageDecoder` trait.
+- Remove unnecessary `'static` bounds on affine operations
+- Add function to retrieve image dimensions without loading full image
+- Allow different image types in overlay and replace
+- Iterators over rows of `ImageBuffer`, mutable variants
+
+[1]: https://www.image-rs.org
+[2]: https://blog.image-rs.org
+
+### Version 0.21.2
+
+- Fixed a variety of crashes and opaque errors in webp
+- Updated the png limits to be less restrictive
+- Reworked even more `unsafe` operations into safe alternatives
+- Derived Debug on FilterType and Deref on Pixel
+- Removed a restriction on DXT to always require power of two dimensions
+- Change the encoding of RGBA in bmp using bitfields
+- Corrected various urls
+
+### Version 0.21.1
+
+- A fairly important bugfix backport
+- Fixed a potentially memory safety issue in the hdr and tiff decoders, see #885
+- See [the full advisory](docs/2019-04-23-memory-unsafety.md) for an analysis
+- Fixes `ImageBuffer` index calculation for very, very large images
+- Fix some crashes while parsing specific incomplete pnm images
+- Added comprehensive fuzzing for the pam image types
+
+### Version 0.21
+
+- Updated README to use `GenericImageView`
+- Removed outdated version number from CHANGES
+- Compiles now with wasm-unknown-emscripten target
+- Restructured `ImageDecoder` trait
+- Updated README with a more colorful example for the Julia fractal
+- Use Rust 1.24.1 as minimum supported version
+- Support for loading GIF frames one at a time with `animation::Frames`
+- The TGA decoder now recognizes 32 bpp as RGBA(8)
+- Fixed `to_bgra` document comment
+- Added release test script
+- Removed unsafe code blocks several places
+- Fixed overlay overflow bug issues with documented proofs
+
+### Version 0.20
+
+- Clippy lint pass
+- Updated num-rational dependency
+- Added BGRA and BGR color types
+- Improved performance of image resizing
+- Improved PBM decoding
+- PNM P4 decoding now returns bits instead of bytes
+- Fixed move of overlapping buffers in BMP decoder
+- Fixed some document comments
+- `GenericImage` and `GenericImageView` is now object-safe
+- Moved TIFF code to its own library
+- Fixed README examples
+- Fixed ordering of interpolated parameters in TIFF decode error string
+- Thumbnail now handles upscaling
+- GIF encoding for multiple frames
+- Improved subimages API
+- Cargo fmt fixes
+
+### Version 0.19
+
+- Fixed panic when blending with alpha zero.
+- Made `save` consistent.
+- Consistent size calculation.
+- Fixed bug in `apply_with_alpha`.
+- Implemented `TGADecoder::read_scanline`.
+- Use deprecated attribute for `pixels_mut`.
+- Fixed bug in JPEG grayscale encoding.
+- Fixed multi image TIFF.
+- PNM encoder.
+- Added `#[derive(Hash)]` for `ColorType`.
+- Use `num-derive` for `#[derive(FromPrimitive)]`.
+- Added `into_frames` implementation for GIF.
+- Made rayon an optional dependency.
+- Fixed issue where resizing image did not give exact width/height.
+- Improved downscale.
+- Added a way to expose options when saving files.
+- Fixed some compiler warnings.
+- Switched to lzw crate instead of using built-in version.
+- Added `ExactSizeIterator` implementations to buffer structs.
+- Added `resize_to_fill` method.
+- DXT encoding support.
+- Applied clippy suggestions.
+
+### Version 0.4
+ - Various improvements.
+ - Additional supported image formats (BMP and ICO).
+ - GIF and PNG codec moved into separate crates.
+
+### Version 0.3
+ - Replace `std::old_io` with `std::io`.
+
+### Version 0.2
+ - Support for interlaced PNG images.
+ - Writing support for GIF images (full color and paletted).
+ - Color quantizer that converts 32bit images to paletted including the alpha channel.
+ - Initial support for reading TGA images.
+ - Reading support for TIFF images (packbits and FAX compression not supported).
+ - Various bug fixes and improvements.
+
+### Version 0.1
+- Initial release
+- Basic reading support for png, jpeg, gif, ppm and webp.
+- Basic writing support for png and jpeg.
+- A collection of basic imaging processing function like `blur` or `invert`
diff --git a/vendor/image/Cargo.lock.msrv b/vendor/image/Cargo.lock.msrv
new file mode 100644
index 0000000..1072318
--- /dev/null
+++ b/vendor/image/Cargo.lock.msrv
@@ -0,0 +1,2311 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+
+[[package]]
+name = "ahash"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+ "version_check",
+]
+
+[[package]]
+name = "aho-corasick"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "anes"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
+
+[[package]]
+name = "ansi_term"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.71"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c7d0618f0e0b7e8ff11427422b64564d5fb0be1940354bfe2e0529b18a9d9b8"
+
+[[package]]
+name = "arbitrary"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db55d72333851e17d572bec876e390cd3b11eb1ef53ae821dd9f3b653d2b4569"
+
+[[package]]
+name = "arg_enum_proc_macro"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7c29b43ee8654590587cd033b3eca2f9c4f8cdff945ec0e6ee91ceb057d87f3"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "arrayvec"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi 0.1.19",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "av-metrics"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13638b394190295622c0d2493d0c8c39210b92c2110895bfb14c58db213c2b39"
+dependencies = [
+ "crossbeam",
+ "itertools",
+ "lab",
+ "num-traits",
+ "rayon",
+ "thiserror",
+ "v_frame",
+]
+
+[[package]]
+name = "av1-grain"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6f6ca6f0c18c02c2fbfc119df551b8aeb8a385f6d5980f1475ba0255f1e97f1e"
+dependencies = [
+ "anyhow",
+ "arrayvec",
+ "itertools",
+ "log",
+ "nom",
+ "num-rational",
+ "serde",
+ "v_frame",
+]
+
+[[package]]
+name = "avif-serialize"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "876c75a42f6364451a033496a14c44bffe41f5f4a8236f697391f11024e596d2"
+dependencies = [
+ "arrayvec",
+]
+
+[[package]]
+name = "bindgen"
+version = "0.59.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8"
+dependencies = [
+ "bitflags 1.3.2",
+ "cexpr",
+ "clang-sys",
+ "clap 2.34.0",
+ "env_logger 0.9.3",
+ "lazy_static",
+ "lazycell",
+ "log",
+ "peeking_take_while",
+ "proc-macro2",
+ "quote",
+ "regex",
+ "rustc-hash",
+ "shlex",
+ "which",
+]
+
+[[package]]
+name = "bit_field"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "bitflags"
+version = "2.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42"
+
+[[package]]
+name = "bitreader"
+version = "0.3.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f10043e4864d975e7f197f993ec4018636ad93946724b2571c4474d51845869b"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "bitstream-io"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9d28070975aaf4ef1fd0bd1f29b739c06c2cdd9972e090617fb6dca3b2cb564e"
+
+[[package]]
+name = "built"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b9c056b9ed43aee5e064b683aa1ec783e19c6acec7559e3ae931b7490472fbe"
+dependencies = [
+ "cargo-lock",
+ "git2",
+]
+
+[[package]]
+name = "bumpalo"
+version = "3.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
+
+[[package]]
+name = "bytemuck"
+version = "1.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea"
+
+[[package]]
+name = "byteorder"
+version = "1.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
+
+[[package]]
+name = "cargo-lock"
+version = "8.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "031718ddb8f78aa5def78a09e90defe30151d1f6c672f937af4dd916429ed996"
+dependencies = [
+ "semver",
+ "serde",
+ "toml 0.5.11",
+ "url",
+]
+
+[[package]]
+name = "cast"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
+
+[[package]]
+name = "cc"
+version = "1.0.79"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
+dependencies = [
+ "jobserver",
+]
+
+[[package]]
+name = "cexpr"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766"
+dependencies = [
+ "nom",
+]
+
+[[package]]
+name = "cfg-expr"
+version = "0.15.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "215c0072ecc28f92eeb0eea38ba63ddfcb65c2828c46311d646f1a3ff5f9841c"
+dependencies = [
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "ciborium"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926"
+dependencies = [
+ "ciborium-io",
+ "ciborium-ll",
+ "serde",
+]
+
+[[package]]
+name = "ciborium-io"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656"
+
+[[package]]
+name = "ciborium-ll"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b"
+dependencies = [
+ "ciborium-io",
+ "half 1.8.2",
+]
+
+[[package]]
+name = "clang-sys"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f"
+dependencies = [
+ "glob",
+ "libc",
+ "libloading",
+]
+
+[[package]]
+name = "clap"
+version = "2.34.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
+dependencies = [
+ "ansi_term",
+ "atty",
+ "bitflags 1.3.2",
+ "strsim",
+ "textwrap 0.11.0",
+ "unicode-width",
+ "vec_map",
+]
+
+[[package]]
+name = "clap"
+version = "3.2.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
+dependencies = [
+ "bitflags 1.3.2",
+ "clap_lex 0.2.4",
+ "indexmap 1.9.3",
+ "textwrap 0.16.0",
+]
+
+[[package]]
+name = "clap"
+version = "4.0.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7db700bc935f9e43e88d00b0850dae18a63773cfbec6d8e070fccf7fef89a39"
+dependencies = [
+ "bitflags 1.3.2",
+ "clap_derive",
+ "clap_lex 0.3.3",
+ "is-terminal",
+ "once_cell",
+ "termcolor",
+ "terminal_size",
+]
+
+[[package]]
+name = "clap_complete"
+version = "4.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "10861370d2ba66b0f5989f83ebf35db6421713fd92351790e7fdd6c36774c56b"
+dependencies = [
+ "clap 4.0.32",
+]
+
+[[package]]
+name = "clap_derive"
+version = "4.0.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014"
+dependencies = [
+ "heck",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
+dependencies = [
+ "os_str_bytes",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "033f6b7a4acb1f358c742aaca805c939ee73b4c6209ae4318ec7aca81c42e646"
+dependencies = [
+ "os_str_bytes",
+]
+
+[[package]]
+name = "color_quant"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
+
+[[package]]
+name = "console"
+version = "0.15.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8"
+dependencies = [
+ "encode_unicode",
+ "lazy_static",
+ "libc",
+ "unicode-width",
+ "windows-sys 0.45.0",
+]
+
+[[package]]
+name = "crc32fast"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "criterion"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb"
+dependencies = [
+ "anes",
+ "atty",
+ "cast",
+ "ciborium",
+ "clap 3.2.25",
+ "criterion-plot",
+ "itertools",
+ "lazy_static",
+ "num-traits",
+ "oorandom",
+ "plotters",
+ "rayon",
+ "regex",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "tinytemplate",
+ "walkdir",
+]
+
+[[package]]
+name = "criterion-plot"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
+dependencies = [
+ "cast",
+ "itertools",
+]
+
+[[package]]
+name = "crossbeam"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c"
+dependencies = [
+ "cfg-if",
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-epoch",
+ "crossbeam-queue",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-channel"
+version = "0.5.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
+dependencies = [
+ "cfg-if",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7"
+dependencies = [
+ "autocfg",
+ "cfg-if",
+ "crossbeam-utils",
+ "memoffset",
+ "scopeguard",
+]
+
+[[package]]
+name = "crossbeam-queue"
+version = "0.3.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "crunchy"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
+
+[[package]]
+name = "dav1d"
+version = "0.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7284148338177cb1cd0d0cdd7bf26440f8326999063eed294aa7d77b46a7e263"
+dependencies = [
+ "dav1d-sys",
+]
+
+[[package]]
+name = "dav1d-sys"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "88e40c4c77d141a3b70113ee45a1502b9c80e24f176958d39a8361abcf30c883"
+dependencies = [
+ "bindgen",
+ "system-deps",
+]
+
+[[package]]
+name = "dcv-color-primitives"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1457f4dd8395fef9f61996b5783b82ed7b234b4b55e1843d04e07fded0538005"
+dependencies = [
+ "paste",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "either"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91"
+
+[[package]]
+name = "encode_unicode"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
+
+[[package]]
+name = "env_logger"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3"
+dependencies = [
+ "log",
+ "regex",
+]
+
+[[package]]
+name = "env_logger"
+version = "0.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7"
+dependencies = [
+ "atty",
+ "humantime",
+ "log",
+ "regex",
+ "termcolor",
+]
+
+[[package]]
+name = "equivalent"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1"
+
+[[package]]
+name = "errno"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a"
+dependencies = [
+ "errno-dragonfly",
+ "libc",
+ "windows-sys 0.48.0",
+]
+
+[[package]]
+name = "errno-dragonfly"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
+dependencies = [
+ "cc",
+ "libc",
+]
+
+[[package]]
+name = "exr"
+version = "1.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "279d3efcc55e19917fff7ab3ddd6c14afb6a90881a0078465196fe2f99d08c56"
+dependencies = [
+ "bit_field",
+ "flume",
+ "half 2.2.1",
+ "lebe",
+ "miniz_oxide",
+ "rayon-core",
+ "smallvec",
+ "zune-inflate",
+]
+
+[[package]]
+name = "fallible_collections"
+version = "0.4.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "618bf220e692a59c50e7b281149f53c3fe93e0cf0b40c050fc2af8c9ecb28505"
+dependencies = [
+ "hashbrown 0.13.2",
+]
+
+[[package]]
+name = "fdeflate"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d329bdeac514ee06249dabc27877490f17f5d371ec693360768b838e19f3ae10"
+dependencies = [
+ "simd-adler32",
+]
+
+[[package]]
+name = "fern"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9f0c14694cbd524c8720dd69b0e3179344f04ebb5f90f2e4a440c6ea3b2f1ee"
+dependencies = [
+ "log",
+]
+
+[[package]]
+name = "flate2"
+version = "1.0.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743"
+dependencies = [
+ "crc32fast",
+ "miniz_oxide",
+]
+
+[[package]]
+name = "flume"
+version = "0.10.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577"
+dependencies = [
+ "futures-core",
+ "futures-sink",
+ "nanorand",
+ "pin-project",
+ "spin",
+]
+
+[[package]]
+name = "form_urlencoded"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652"
+dependencies = [
+ "percent-encoding",
+]
+
+[[package]]
+name = "futures-core"
+version = "0.3.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c"
+
+[[package]]
+name = "futures-sink"
+version = "0.3.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e"
+
+[[package]]
+name = "getrandom"
+version = "0.2.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"
+dependencies = [
+ "cfg-if",
+ "js-sys",
+ "libc",
+ "wasi",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "gif"
+version = "0.12.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "80792593675e051cf94a4b111980da2ba60d4a83e43e0048c5693baab3977045"
+dependencies = [
+ "color_quant",
+ "weezl",
+]
+
+[[package]]
+name = "git2"
+version = "0.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2994bee4a3a6a51eb90c218523be382fd7ea09b16380b9312e9dbe955ff7c7d1"
+dependencies = [
+ "bitflags 1.3.2",
+ "libc",
+ "libgit2-sys",
+ "log",
+ "url",
+]
+
+[[package]]
+name = "glob"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
+
+[[package]]
+name = "half"
+version = "1.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
+
+[[package]]
+name = "half"
+version = "2.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "02b4af3693f1b705df946e9fe5631932443781d0aabb423b62fcd4d73f6d2fd0"
+dependencies = [
+ "crunchy",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+
+[[package]]
+name = "hashbrown"
+version = "0.13.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e"
+dependencies = [
+ "ahash",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
+
+[[package]]
+name = "heck"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
+
+[[package]]
+name = "humantime"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
+
+[[package]]
+name = "idna"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c"
+dependencies = [
+ "unicode-bidi",
+ "unicode-normalization",
+]
+
+[[package]]
+name = "image"
+version = "0.24.6"
+dependencies = [
+ "bytemuck",
+ "byteorder",
+ "color_quant",
+ "crc32fast",
+ "criterion",
+ "dav1d",
+ "dcv-color-primitives",
+ "exr",
+ "gif",
+ "glob",
+ "jpeg-decoder",
+ "mp4parse",
+ "num-complex",
+ "num-rational",
+ "num-traits",
+ "png",
+ "qoi",
+ "quickcheck",
+ "ravif",
+ "rgb",
+ "tiff",
+ "webp",
+]
+
+[[package]]
+name = "imgref"
+version = "1.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b2cf49df1085dcfb171460e4592597b84abe50d900fb83efb6e41b20fefd6c2c"
+
+[[package]]
+name = "indexmap"
+version = "1.9.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
+dependencies = [
+ "autocfg",
+ "hashbrown 0.12.3",
+]
+
+[[package]]
+name = "indexmap"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d"
+dependencies = [
+ "equivalent",
+ "hashbrown 0.14.0",
+]
+
+[[package]]
+name = "interpolate_name"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4b35f4a811037cfdcd44c5db40678464b2d5d248fc1abeeaaa125b370d47f17"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "io-lifetimes"
+version = "1.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
+dependencies = [
+ "hermit-abi 0.3.1",
+ "libc",
+ "windows-sys 0.48.0",
+]
+
+[[package]]
+name = "is-terminal"
+version = "0.4.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24fddda5af7e54bf7da53067d6e802dbcc381d0a8eef629df528e3ebf68755cb"
+dependencies = [
+ "hermit-abi 0.3.1",
+ "rustix 0.38.1",
+ "windows-sys 0.48.0",
+]
+
+[[package]]
+name = "itertools"
+version = "0.10.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+dependencies = [
+ "either",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6"
+
+[[package]]
+name = "ivf"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fb01c64361a3a67b511439f0dcd54fa3aa5581c861a17e2ede76e46b9c5b7e2"
+dependencies = [
+ "bitstream-io",
+]
+
+[[package]]
+name = "jobserver"
+version = "0.1.26"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "jpeg-decoder"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bc0000e42512c92e31c2252315bda326620a4e034105e900c98ec492fa077b3e"
+dependencies = [
+ "rayon",
+]
+
+[[package]]
+name = "js-sys"
+version = "0.3.64"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a"
+dependencies = [
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "lab"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf36173d4167ed999940f804952e6b08197cae5ad5d572eb4db150ce8ad5d58f"
+
+[[package]]
+name = "lazy_static"
+version = "1.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
+
+[[package]]
+name = "lazycell"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
+
+[[package]]
+name = "lebe"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8"
+
+[[package]]
+name = "libc"
+version = "0.2.147"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3"
+
+[[package]]
+name = "libfuzzer-sys"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcf184a4b6b274f82a5df6b357da6055d3e82272327bba281c28bbba6f1664ef"
+dependencies = [
+ "arbitrary",
+ "cc",
+]
+
+[[package]]
+name = "libgit2-sys"
+version = "0.14.2+1.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f3d95f6b51075fe9810a7ae22c7095f12b98005ab364d8544797a825ce946a4"
+dependencies = [
+ "cc",
+ "libc",
+ "libz-sys",
+ "pkg-config",
+]
+
+[[package]]
+name = "libloading"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f"
+dependencies = [
+ "cfg-if",
+ "winapi",
+]
+
+[[package]]
+name = "libwebp-sys"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "439fd1885aa28937e7edcd68d2e793cb4a22f8733460d2519fbafd2b215672bf"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "libz-sys"
+version = "1.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db"
+dependencies = [
+ "cc",
+ "libc",
+ "pkg-config",
+ "vcpkg",
+]
+
+[[package]]
+name = "linux-raw-sys"
+version = "0.3.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
+
+[[package]]
+name = "linux-raw-sys"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0"
+
+[[package]]
+name = "lock_api"
+version = "0.4.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16"
+dependencies = [
+ "autocfg",
+ "scopeguard",
+]
+
+[[package]]
+name = "log"
+version = "0.4.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
+
+[[package]]
+name = "loop9"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a703804431e5927454bcaf2b2a162595e95db931130c2728c18d050090f69940"
+dependencies = [
+ "imgref",
+]
+
+[[package]]
+name = "maybe-rayon"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519"
+dependencies = [
+ "cfg-if",
+ "rayon",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+
+[[package]]
+name = "memoffset"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "minimal-lexical"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
+
+[[package]]
+name = "miniz_oxide"
+version = "0.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
+dependencies = [
+ "adler",
+ "simd-adler32",
+]
+
+[[package]]
+name = "mp4parse"
+version = "0.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "63a35203d3c6ce92d5251c77520acb2e57108c88728695aa883f70023624c570"
+dependencies = [
+ "bitreader",
+ "byteorder",
+ "fallible_collections",
+ "log",
+ "num-traits",
+ "static_assertions",
+]
+
+[[package]]
+name = "nanorand"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "nasm-rs"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fe4d98d0065f4b1daf164b3eafb11974c94662e5e2396cf03f32d0bb5c17da51"
+dependencies = [
+ "rayon",
+]
+
+[[package]]
+name = "new_debug_unreachable"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54"
+
+[[package]]
+name = "nom"
+version = "7.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
+dependencies = [
+ "memchr",
+ "minimal-lexical",
+]
+
+[[package]]
+name = "noop_proc_macro"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0676bb32a98c1a483ce53e500a81ad9c3d5b3f7c920c28c24e9cb0980d0b5bc8"
+
+[[package]]
+name = "num-bigint"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f"
+dependencies = [
+ "autocfg",
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-complex"
+version = "0.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d"
+dependencies = [
+ "num-traits",
+]
+
+[[package]]
+name = "num-derive"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+]
+
+[[package]]
+name = "num-integer"
+version = "0.1.45"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
+dependencies = [
+ "autocfg",
+ "num-traits",
+]
+
+[[package]]
+name = "num-rational"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0"
+dependencies = [
+ "autocfg",
+ "num-bigint",
+ "num-integer",
+ "num-traits",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"
+dependencies = [
+ "hermit-abi 0.3.1",
+ "libc",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.18.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
+
+[[package]]
+name = "oorandom"
+version = "11.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
+
+[[package]]
+name = "os_str_bytes"
+version = "6.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4d5d9eb14b174ee9aa2ef96dc2b94637a2d4b6e7cb873c7e171f0c20c6cf3eac"
+
+[[package]]
+name = "paste"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79"
+
+[[package]]
+name = "peeking_take_while"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
+
+[[package]]
+name = "percent-encoding"
+version = "2.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
+
+[[package]]
+name = "pin-project"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e138fdd8263907a2b0e1b4e80b7e58c721126479b6e6eedfb1b402acea7b9bd"
+dependencies = [
+ "pin-project-internal",
+]
+
+[[package]]
+name = "pin-project-internal"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d1fef411b303e3e12d534fb6e7852de82da56edd937d895125821fb7c09436c7"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.22",
+]
+
+[[package]]
+name = "pkg-config"
+version = "0.3.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964"
+
+[[package]]
+name = "plotters"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45"
+dependencies = [
+ "num-traits",
+ "plotters-backend",
+ "plotters-svg",
+ "wasm-bindgen",
+ "web-sys",
+]
+
+[[package]]
+name = "plotters-backend"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609"
+
+[[package]]
+name = "plotters-svg"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab"
+dependencies = [
+ "plotters-backend",
+]
+
+[[package]]
+name = "png"
+version = "0.17.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "59871cc5b6cce7eaccca5a802b4173377a1c2ba90654246789a8fa2334426d11"
+dependencies = [
+ "bitflags 1.3.2",
+ "crc32fast",
+ "fdeflate",
+ "flate2",
+ "miniz_oxide",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
+
+[[package]]
+name = "proc-macro-error"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
+dependencies = [
+ "proc-macro-error-attr",
+ "proc-macro2",
+ "quote",
+ "syn 1.0.109",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro-error-attr"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.63"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7b368fba921b0dce7e60f5e04ec15e565b3303972b42bcfde1d0713b881959eb"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "qoi"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f6d64c71eb498fe9eae14ce4ec935c555749aef511cca85b5568910d6e48001"
+dependencies = [
+ "bytemuck",
+]
+
+[[package]]
+name = "quick-error"
+version = "2.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3"
+
+[[package]]
+name = "quickcheck"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6"
+dependencies = [
+ "env_logger 0.8.4",
+ "log",
+ "rand",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rand"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
+dependencies = [
+ "libc",
+ "rand_chacha",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "rav1e"
+version = "0.6.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "16c383692a5e7abd9f6d1eddb1a5e0269f859392387883361bb09e5555852ec1"
+dependencies = [
+ "arbitrary",
+ "arg_enum_proc_macro",
+ "arrayvec",
+ "av-metrics",
+ "av1-grain",
+ "bitstream-io",
+ "built",
+ "cc",
+ "cfg-if",
+ "clap 4.0.32",
+ "clap_complete",
+ "console",
+ "fern",
+ "interpolate_name",
+ "itertools",
+ "ivf",
+ "libc",
+ "libfuzzer-sys",
+ "log",
+ "maybe-rayon",
+ "nasm-rs",
+ "new_debug_unreachable",
+ "nom",
+ "noop_proc_macro",
+ "num-derive",
+ "num-traits",
+ "once_cell",
+ "paste",
+ "rand",
+ "rand_chacha",
+ "rust_hawktracer",
+ "rustc_version",
+ "scan_fmt",
+ "signal-hook",
+ "simd_helpers",
+ "system-deps",
+ "thiserror",
+ "v_frame",
+ "wasm-bindgen",
+ "y4m",
+]
+
+[[package]]
+name = "ravif"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0cd36aa2bc280b60619e0c4386a73ff2ac343551dcf400168562ce08cc0c32e0"
+dependencies = [
+ "avif-serialize",
+ "imgref",
+ "loop9",
+ "quick-error",
+ "rav1e",
+ "rayon",
+ "rgb",
+]
+
+[[package]]
+name = "rayon"
+version = "1.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b"
+dependencies = [
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d"
+dependencies = [
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-utils",
+ "num_cpus",
+]
+
+[[package]]
+name = "regex"
+version = "1.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78"
+
+[[package]]
+name = "rgb"
+version = "0.8.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "20ec2d3e3fc7a92ced357df9cebd5a10b6fb2aa1ee797bf7e9ce2f17dffc8f59"
+dependencies = [
+ "bytemuck",
+]
+
+[[package]]
+name = "rust_hawktracer"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e3480a29b927f66c6e06527be7f49ef4d291a01d694ec1fe85b0de71d6b02ac1"
+dependencies = [
+ "rust_hawktracer_normal_macro",
+ "rust_hawktracer_proc_macro",
+]
+
+[[package]]
+name = "rust_hawktracer_normal_macro"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a570059949e1dcdc6f35228fa389f54c2c84dfe0c94c05022baacd56eacd2e9"
+
+[[package]]
+name = "rust_hawktracer_proc_macro"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cb626abdbed5e93f031baae60d72032f56bc964e11ac2ff65f2ba3ed98d6d3e1"
+
+[[package]]
+name = "rustc-hash"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
+
+[[package]]
+name = "rustc_version"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
+dependencies = [
+ "semver",
+]
+
+[[package]]
+name = "rustix"
+version = "0.37.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62f25693a73057a1b4cb56179dd3c7ea21a7c6c5ee7d85781f5749b46f34b79c"
+dependencies = [
+ "bitflags 1.3.2",
+ "errno",
+ "io-lifetimes",
+ "libc",
+ "linux-raw-sys 0.3.8",
+ "windows-sys 0.48.0",
+]
+
+[[package]]
+name = "rustix"
+version = "0.38.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fbc6396159432b5c8490d4e301d8c705f61860b8b6c863bf79942ce5401968f3"
+dependencies = [
+ "bitflags 2.3.3",
+ "errno",
+ "libc",
+ "linux-raw-sys 0.4.3",
+ "windows-sys 0.48.0",
+]
+
+[[package]]
+name = "ryu"
+version = "1.0.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041"
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "scan_fmt"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b53b0a5db882a8e2fdaae0a43f7b39e7e9082389e978398bdf223a55b581248"
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+
+[[package]]
+name = "semver"
+version = "1.0.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "serde"
+version = "1.0.164"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.164"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.22",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.99"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3"
+dependencies = [
+ "itoa",
+ "ryu",
+ "serde",
+]
+
+[[package]]
+name = "serde_spanned"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "shlex"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
+
+[[package]]
+name = "signal-hook"
+version = "0.3.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9"
+dependencies = [
+ "libc",
+ "signal-hook-registry",
+]
+
+[[package]]
+name = "signal-hook-registry"
+version = "1.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "simd-adler32"
+version = "0.3.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "238abfbb77c1915110ad968465608b68e869e0772622c9656714e73e5a1a522f"
+
+[[package]]
+name = "simd_helpers"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95890f873bec569a0362c235787f3aca6e1e887302ba4840839bcc6459c42da6"
+dependencies = [
+ "quote",
+]
+
+[[package]]
+name = "smallvec"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
+
+[[package]]
+name = "spin"
+version = "0.9.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
+dependencies = [
+ "lock_api",
+]
+
+[[package]]
+name = "static_assertions"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
+
+[[package]]
+name = "strsim"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn"
+version = "2.0.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2efbeae7acf4eabd6bcdcbd11c92f45231ddda7539edc7806bd1a04a03b24616"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "system-deps"
+version = "6.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "30c2de8a4d8f4b823d634affc9cd2a74ec98c53a756f317e529a48046cbf71f3"
+dependencies = [
+ "cfg-expr",
+ "heck",
+ "pkg-config",
+ "toml 0.7.5",
+ "version-compare",
+]
+
+[[package]]
+name = "target-lexicon"
+version = "0.12.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b1c7f239eb94671427157bd93b3694320f3668d4e1eff08c7285366fd777fac"
+
+[[package]]
+name = "termcolor"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "terminal_size"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e6bf6f19e9f8ed8d4048dc22981458ebcf406d67e94cd422e5ecd73d63b3237"
+dependencies = [
+ "rustix 0.37.21",
+ "windows-sys 0.48.0",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
+dependencies = [
+ "unicode-width",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d"
+
+[[package]]
+name = "thiserror"
+version = "1.0.40"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.40"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.22",
+]
+
+[[package]]
+name = "tiff"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7449334f9ff2baf290d55d73983a7d6fa15e01198faef72af07e2a8db851e471"
+dependencies = [
+ "flate2",
+ "jpeg-decoder",
+ "weezl",
+]
+
+[[package]]
+name = "tinytemplate"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
+dependencies = [
+ "serde",
+ "serde_json",
+]
+
+[[package]]
+name = "tinyvec"
+version = "1.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50"
+dependencies = [
+ "tinyvec_macros",
+]
+
+[[package]]
+name = "tinyvec_macros"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
+
+[[package]]
+name = "toml"
+version = "0.5.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "toml"
+version = "0.7.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ebafdf5ad1220cb59e7d17cf4d2c72015297b75b19a10472f99b89225089240"
+dependencies = [
+ "serde",
+ "serde_spanned",
+ "toml_datetime",
+ "toml_edit",
+]
+
+[[package]]
+name = "toml_datetime"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "toml_edit"
+version = "0.19.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "266f016b7f039eec8a1a80dfe6156b633d208b9fccca5e4db1d6775b0c4e34a7"
+dependencies = [
+ "indexmap 2.0.0",
+ "serde",
+ "serde_spanned",
+ "toml_datetime",
+ "winnow",
+]
+
+[[package]]
+name = "unicode-bidi"
+version = "0.3.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460"
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0"
+
+[[package]]
+name = "unicode-normalization"
+version = "0.1.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921"
+dependencies = [
+ "tinyvec",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
+
+[[package]]
+name = "url"
+version = "2.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb"
+dependencies = [
+ "form_urlencoded",
+ "idna",
+ "percent-encoding",
+]
+
+[[package]]
+name = "v_frame"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e3753f70d50a77f5d381103ba2693a889fed0d84273dd5cbdf4eb8bda720f0c6"
+dependencies = [
+ "cfg-if",
+ "noop_proc_macro",
+ "num-derive",
+ "num-traits",
+ "rust_hawktracer",
+]
+
+[[package]]
+name = "vcpkg"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
+
+[[package]]
+name = "vec_map"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
+
+[[package]]
+name = "version-compare"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "579a42fc0b8e0c63b76519a339be31bed574929511fa53c1a3acae26eb258f29"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "walkdir"
+version = "2.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698"
+dependencies = [
+ "same-file",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342"
+dependencies = [
+ "cfg-if",
+ "wasm-bindgen-macro",
+]
+
+[[package]]
+name = "wasm-bindgen-backend"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd"
+dependencies = [
+ "bumpalo",
+ "log",
+ "once_cell",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.22",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.22",
+ "wasm-bindgen-backend",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.87"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1"
+
+[[package]]
+name = "web-sys"
+version = "0.3.64"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b"
+dependencies = [
+ "js-sys",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "webp"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf022f821f166079a407d000ab57e84de020e66ffbbf4edde999bc7d6e371cae"
+dependencies = [
+ "libwebp-sys",
+]
+
+[[package]]
+name = "weezl"
+version = "0.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9193164d4de03a926d909d3bc7c30543cecb35400c02114792c2cae20d5e2dbb"
+
+[[package]]
+name = "which"
+version = "4.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269"
+dependencies = [
+ "either",
+ "libc",
+ "once_cell",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-sys"
+version = "0.45.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
+dependencies = [
+ "windows-targets 0.42.2",
+]
+
+[[package]]
+name = "windows-sys"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
+dependencies = [
+ "windows-targets 0.48.1",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
+dependencies = [
+ "windows_aarch64_gnullvm 0.42.2",
+ "windows_aarch64_msvc 0.42.2",
+ "windows_i686_gnu 0.42.2",
+ "windows_i686_msvc 0.42.2",
+ "windows_x86_64_gnu 0.42.2",
+ "windows_x86_64_gnullvm 0.42.2",
+ "windows_x86_64_msvc 0.42.2",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.48.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f"
+dependencies = [
+ "windows_aarch64_gnullvm 0.48.0",
+ "windows_aarch64_msvc 0.48.0",
+ "windows_i686_gnu 0.48.0",
+ "windows_i686_msvc 0.48.0",
+ "windows_x86_64_gnu 0.48.0",
+ "windows_x86_64_gnullvm 0.48.0",
+ "windows_x86_64_msvc 0.48.0",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.48.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a"
+
+[[package]]
+name = "winnow"
+version = "0.4.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "y4m"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a5a4b21e1a62b67a2970e6831bc091d7b87e119e7f9791aef9702e3bef04448"
+
+[[package]]
+name = "zune-inflate"
+version = "0.2.54"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73ab332fe2f6680068f3582b16a24f90ad7096d5d39b974d1c0aff0125116f02"
+dependencies = [
+ "simd-adler32",
+]
diff --git a/vendor/image/Cargo.toml b/vendor/image/Cargo.toml
new file mode 100644
index 0000000..6864f9b
--- /dev/null
+++ b/vendor/image/Cargo.toml
@@ -0,0 +1,188 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.61.0"
+name = "image"
+version = "0.24.7"
+authors = ["The image-rs Developers"]
+exclude = [
+ "src/png/testdata/*",
+ "examples/*",
+ "tests/*",
+]
+description = "Imaging library. Provides basic image processing and encoders/decoders for common image formats."
+homepage = "https://github.com/image-rs/image"
+documentation = "https://docs.rs/image"
+readme = "README.md"
+categories = [
+ "multimedia::images",
+ "multimedia::encoding",
+]
+license = "MIT"
+repository = "https://github.com/image-rs/image"
+resolver = "2"
+
+[lib]
+name = "image"
+path = "./src/lib.rs"
+
+[[bench]]
+name = "decode"
+path = "benches/decode.rs"
+harness = false
+
+[[bench]]
+name = "encode"
+path = "benches/encode.rs"
+harness = false
+
+[[bench]]
+name = "copy_from"
+harness = false
+
+[dependencies.bytemuck]
+version = "1.7.0"
+features = ["extern_crate_alloc"]
+
+[dependencies.byteorder]
+version = "1.3.2"
+
+[dependencies.color_quant]
+version = "1.1"
+
+[dependencies.dav1d]
+version = "0.6.0"
+optional = true
+
+[dependencies.dcv-color-primitives]
+version = "0.4.0"
+optional = true
+
+[dependencies.exr]
+version = "1.5.0"
+optional = true
+
+[dependencies.gif]
+version = "0.12"
+optional = true
+
+[dependencies.jpeg]
+version = "0.3.0"
+optional = true
+default-features = false
+package = "jpeg-decoder"
+
+[dependencies.libwebp]
+version = "0.2.2"
+optional = true
+default-features = false
+package = "webp"
+
+[dependencies.mp4parse]
+version = "0.17.0"
+optional = true
+
+[dependencies.num-rational]
+version = "0.4"
+default-features = false
+
+[dependencies.num-traits]
+version = "0.2.0"
+
+[dependencies.png]
+version = "0.17.6"
+optional = true
+
+[dependencies.qoi]
+version = "0.4"
+optional = true
+
+[dependencies.ravif]
+version = "0.11.0"
+optional = true
+
+[dependencies.rgb]
+version = "0.8.25"
+optional = true
+
+[dependencies.tiff]
+version = "0.9.0"
+optional = true
+
+[dev-dependencies.crc32fast]
+version = "1.2.0"
+
+[dev-dependencies.criterion]
+version = "0.4"
+
+[dev-dependencies.glob]
+version = "0.3"
+
+[dev-dependencies.jpeg]
+version = "0.3.0"
+features = ["platform_independent"]
+default-features = false
+package = "jpeg-decoder"
+
+[dev-dependencies.num-complex]
+version = "0.4"
+
+[dev-dependencies.quickcheck]
+version = "1"
+
+[features]
+avif = ["avif-encoder"]
+avif-decoder = [
+ "mp4parse",
+ "dcv-color-primitives",
+ "dav1d",
+]
+avif-encoder = [
+ "ravif",
+ "rgb",
+]
+benchmarks = []
+bmp = []
+dds = ["dxt"]
+default = [
+ "gif",
+ "jpeg",
+ "ico",
+ "png",
+ "pnm",
+ "tga",
+ "tiff",
+ "webp",
+ "bmp",
+ "hdr",
+ "dxt",
+ "dds",
+ "farbfeld",
+ "jpeg_rayon",
+ "openexr",
+ "qoi",
+]
+dxt = []
+farbfeld = []
+hdr = []
+ico = [
+ "bmp",
+ "png",
+]
+jpeg_rayon = ["jpeg/rayon"]
+openexr = ["exr"]
+pnm = []
+qoi = ["dep:qoi"]
+tga = []
+webp = []
+webp-encoder = ["libwebp"]
diff --git a/vendor/image/Cargo.toml.public-private-dependencies b/vendor/image/Cargo.toml.public-private-dependencies
new file mode 100644
index 0000000..671dea9
--- /dev/null
+++ b/vendor/image/Cargo.toml.public-private-dependencies
@@ -0,0 +1,98 @@
+cargo-features = ["public-dependency"]
+
+[package]
+name = "image"
+version = "0.24.0-alpha"
+edition = "2018"
+rust-version = "1.56"
+
+license = "MIT"
+description = "Imaging library written in Rust. Provides basic filters and decoders for the most common image formats."
+authors = ["The image-rs Developers"]
+readme = "README.md"
+
+# crates.io metadata
+documentation = "https://docs.rs/image"
+repository = "https://github.com/image-rs/image"
+homepage = "https://github.com/image-rs/image"
+categories = ["multimedia::images", "multimedia::encoding"]
+
+# Crate build related
+exclude = [
+ "src/png/testdata/*",
+ "examples/*",
+ "tests/*",
+]
+
+[lib]
+name = "image"
+path = "./src/lib.rs"
+
+[dependencies]
+bytemuck = { version = "1.7.0", features = ["extern_crate_alloc"] } # includes cast_vec
+byteorder = "1.3.2"
+num-iter = "0.1.32"
+num-rational = { version = "0.4", default-features = false }
+num-traits = { version = "0.2.0", public = true }
+gif = { version = "0.11.1", optional = true }
+jpeg = { package = "jpeg-decoder", version = "0.2.1", default-features = false, optional = true }
+png = { version = "0.17.0", optional = true }
+tiff = { version = "0.9.0", optional = true }
+ravif = { version = "0.8.0", optional = true }
+rgb = { version = "0.8.25", optional = true }
+mp4parse = { version = "0.12.0", optional = true }
+dav1d = { version = "0.6.0", optional = true }
+dcv-color-primitives = { version = "0.4.0", optional = true }
+exr = { version = "1.4.1", optional = true }
+color_quant = { version = "1.1", public = true }
+
+[dev-dependencies]
+crc32fast = "1.2.0"
+num-complex = "0.4"
+glob = "0.3"
+quickcheck = "1"
+criterion = "0.3"
+
+[features]
+# TODO: Add "avif" to this list while preparing for 0.24.0
+default = ["gif", "jpeg", "ico", "png", "pnm", "tga", "tiff", "webp", "bmp", "hdr", "dxt", "dds", "farbfeld", "jpeg_rayon", "openexr"]
+
+ico = ["bmp", "png"]
+pnm = []
+tga = []
+webp = []
+bmp = []
+hdr = []
+dxt = []
+dds = ["dxt"]
+farbfeld = []
+openexr = ["exr"]
+
+# Enables multi-threading.
+# Requires latest stable Rust.
+jpeg_rayon = ["jpeg/rayon"]
+# Non-default, enables avif support.
+# Requires latest stable Rust.
+avif = ["avif-encoder"]
+# Requires latest stable Rust and recent nasm (>= 2.14).
+avif-encoder = ["ravif", "rgb"]
+# Non-default, even in `avif`. Requires stable Rust and native dependency libdav1d.
+avif-decoder = ["mp4parse", "dcv-color-primitives", "dav1d"]
+
+# Build some inline benchmarks. Useful only during development.
+# Requires rustc nightly for feature test.
+benchmarks = []
+
+[[bench]]
+path = "benches/decode.rs"
+name = "decode"
+harness = false
+
+[[bench]]
+path = "benches/encode.rs"
+name = "encode"
+harness = false
+
+[[bench]]
+name = "copy_from"
+harness = false
diff --git a/vendor/image/LICENSE b/vendor/image/LICENSE
new file mode 100644
index 0000000..25bfe60
--- /dev/null
+++ b/vendor/image/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 PistonDevelopers
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE. \ No newline at end of file
diff --git a/vendor/image/README.md b/vendor/image/README.md
new file mode 100644
index 0000000..e77a627
--- /dev/null
+++ b/vendor/image/README.md
@@ -0,0 +1,250 @@
+# Image
+[![crates.io](https://img.shields.io/crates/v/image.svg)](https://crates.io/crates/image)
+[![Documentation](https://docs.rs/image/badge.svg)](https://docs.rs/image)
+[![Build Status](https://github.com/image-rs/image/workflows/Rust%20CI/badge.svg)](https://github.com/image-rs/image/actions)
+[![Gitter](https://badges.gitter.im/image-rs/image.svg)](https://gitter.im/image-rs/image?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+
+Maintainers: [@HeroicKatora](https://github.com/HeroicKatora), [@fintelia](https://github.com/fintelia)
+
+[How to contribute](https://github.com/image-rs/organization/blob/master/CONTRIBUTING.md)
+
+## An Image Processing Library
+
+This crate provides basic image processing functions and methods for converting to and from various image formats.
+
+All image processing functions provided operate on types that implement the `GenericImageView` and `GenericImage` traits and return an `ImageBuffer`.
+
+## Supported Image Formats
+
+`image` provides implementations of common image format encoders and decoders.
+
+<!--- NOTE: Make sure to keep this table in sync with the one in src/lib.rs -->
+
+| Format | Decoding | Encoding |
+| ------ | -------- | -------- |
+| AVIF | Only 8-bit \*\* | Lossy |
+| BMP | Yes | Rgb8, Rgba8, Gray8, GrayA8 |
+| DDS | DXT1, DXT3, DXT5 | No |
+| Farbfeld | Yes | Yes |
+| GIF | Yes | Yes |
+| ICO | Yes | Yes |
+| JPEG | Baseline and progressive | Baseline JPEG |
+| OpenEXR | Rgb32F, Rgba32F (no dwa compression) | Rgb32F, Rgba32F (no dwa compression) |
+| PNG | All supported color types | Same as decoding |
+| PNM | PBM, PGM, PPM, standard PAM | Yes |
+| QOI | Yes | Yes |
+| TGA | Yes | Rgb8, Rgba8, Bgr8, Bgra8, Gray8, GrayA8 |
+| TIFF | Baseline(no fax support) + LZW + PackBits | Rgb8, Rgba8, Gray8 |
+| WebP | Yes | Rgb8, Rgba8 \* |
+
+- \* Requires the `webp-encoder` feature, uses the libwebp C library.
+- \*\* Requires the `avif-decoder` feature, uses the libdav1d C library.
+
+### The [`ImageDecoder`](https://docs.rs/image/*/image/trait.ImageDecoder.html) and [`ImageDecoderRect`](https://docs.rs/image/*/image/trait.ImageDecoderRect.html) Traits
+
+All image format decoders implement the `ImageDecoder` trait which provide
+basic methods for getting image metadata and decoding images. Some formats
+additionally provide `ImageDecoderRect` implementations which allow for
+decoding only part of an image at once.
+
+The most important methods for decoders are...
++ **dimensions**: Return a tuple containing the width and height of the image.
++ **color_type**: Return the color type of the image data produced by this decoder.
++ **read_image**: Decode the entire image into a slice of bytes.
+
+## Pixels
+
+`image` provides the following pixel types:
++ **Rgb**: RGB pixel
++ **Rgba**: RGB with alpha (RGBA pixel)
++ **Luma**: Grayscale pixel
++ **LumaA**: Grayscale with alpha
+
+All pixels are parameterised by their component type.
+
+## Images
+Individual pixels within images are indexed with (0,0) at the top left corner.
+### The [`GenericImageView`](https://docs.rs/image/*/image/trait.GenericImageView.html) and [`GenericImage`](https://docs.rs/image/*/image/trait.GenericImage.html) Traits
+
+Traits that provide methods for inspecting (`GenericImageView`) and manipulating (`GenericImage`) images, parameterised over the image's pixel type.
+
+Some of these methods for `GenericImageView` are...
++ **dimensions**: Return a tuple containing the width and height of the image.
++ **get_pixel**: Returns the pixel located at (x, y).
++ **pixels**: Returns an Iterator over the pixels of this image.
+
+While some of the methods for `GenericImage` are...
++ **put_pixel**: Put a pixel at location (x, y).
++ **copy_from**: Copies all of the pixels from another image into this image.
+
+### Representation of Images
+`image` provides two main ways of representing image data:
+
+#### [`ImageBuffer`](https://docs.rs/image/*/image/struct.ImageBuffer.html)
+An image parameterised by its Pixel types, represented by a width and height and a vector of pixels. It provides direct access to its pixels and implements the `GenericImageView` and `GenericImage` traits.
+
+```rust
+use image::{GenericImage, GenericImageView, ImageBuffer, RgbImage};
+
+// Construct a new RGB ImageBuffer with the specified width and height.
+let img: RgbImage = ImageBuffer::new(512, 512);
+
+// Construct a new by repeated calls to the supplied closure.
+let mut img = ImageBuffer::from_fn(512, 512, |x, y| {
+ if x % 2 == 0 {
+ image::Luma([0u8])
+ } else {
+ image::Luma([255u8])
+ }
+});
+
+// Obtain the image's width and height.
+let (width, height) = img.dimensions();
+
+// Access the pixel at coordinate (100, 100).
+let pixel = img[(100, 100)];
+
+// Or use the `get_pixel` method from the `GenericImage` trait.
+let pixel = *img.get_pixel(100, 100);
+
+// Put a pixel at coordinate (100, 100).
+img.put_pixel(100, 100, pixel);
+
+// Iterate over all pixels in the image.
+for pixel in img.pixels() {
+ // Do something with pixel.
+}
+```
+
+#### [`DynamicImage`](https://docs.rs/image/*/image/enum.DynamicImage.html)
+A `DynamicImage` is an enumeration over all supported `ImageBuffer<P>` types.
+Its exact image type is determined at runtime. It is the type returned when opening an image.
+For convenience `DynamicImage` reimplements all image processing functions.
+
+`DynamicImage` implement the `GenericImageView` and `GenericImage` traits for RGBA pixels.
+
+#### [`SubImage`](https://docs.rs/image/*/image/struct.SubImage.html)
+A view into another image, delimited by the coordinates of a rectangle.
+The coordinates given set the position of the top left corner of the rectangle.
+This is used to perform image processing functions on a subregion of an image.
+
+```rust
+use image::{GenericImageView, ImageBuffer, RgbImage, imageops};
+
+let mut img: RgbImage = ImageBuffer::new(512, 512);
+let subimg = imageops::crop(&mut img, 0, 0, 100, 100);
+
+assert!(subimg.dimensions() == (100, 100));
+```
+
+## Image Processing Functions
+These are the functions defined in the `imageops` module. All functions operate on types that implement the `GenericImage` trait.
+Note that some of the functions are very slow in debug mode. Make sure to use release mode if you experience any performance issues.
+
++ **blur**: Performs a Gaussian blur on the supplied image.
++ **brighten**: Brighten the supplied image.
++ **huerotate**: Hue rotate the supplied image by degrees.
++ **contrast**: Adjust the contrast of the supplied image.
++ **crop**: Return a mutable view into an image.
++ **filter3x3**: Perform a 3x3 box filter on the supplied image.
++ **flip_horizontal**: Flip an image horizontally.
++ **flip_vertical**: Flip an image vertically.
++ **grayscale**: Convert the supplied image to grayscale.
++ **invert**: Invert each pixel within the supplied image This function operates in place.
++ **resize**: Resize the supplied image to the specified dimensions.
++ **rotate180**: Rotate an image 180 degrees clockwise.
++ **rotate270**: Rotate an image 270 degrees clockwise.
++ **rotate90**: Rotate an image 90 degrees clockwise.
++ **unsharpen**: Performs an unsharpen mask on the supplied image.
+
+For more options, see the [`imageproc`](https://crates.io/crates/imageproc) crate.
+
+## Examples
+### Opening and Saving Images
+
+`image` provides the `open` function for opening images from a path. The image
+format is determined from the path's file extension. An `io` module provides a
+reader which offer some more control.
+
+```rust,no_run
+use image::GenericImageView;
+
+fn main() {
+ // Use the open function to load an image from a Path.
+ // `open` returns a `DynamicImage` on success.
+ let img = image::open("tests/images/jpg/progressive/cat.jpg").unwrap();
+
+ // The dimensions method returns the images width and height.
+ println!("dimensions {:?}", img.dimensions());
+
+ // The color method returns the image's `ColorType`.
+ println!("{:?}", img.color());
+
+ // Write the contents of this image to the Writer in PNG format.
+ img.save("test.png").unwrap();
+}
+```
+
+### Generating Fractals
+
+```rust,no_run
+//! An example of generating julia fractals.
+fn main() {
+ let imgx = 800;
+ let imgy = 800;
+
+ let scalex = 3.0 / imgx as f32;
+ let scaley = 3.0 / imgy as f32;
+
+ // Create a new ImgBuf with width: imgx and height: imgy
+ let mut imgbuf = image::ImageBuffer::new(imgx, imgy);
+
+ // Iterate over the coordinates and pixels of the image
+ for (x, y, pixel) in imgbuf.enumerate_pixels_mut() {
+ let r = (0.3 * x as f32) as u8;
+ let b = (0.3 * y as f32) as u8;
+ *pixel = image::Rgb([r, 0, b]);
+ }
+
+ // A redundant loop to demonstrate reading image data
+ for x in 0..imgx {
+ for y in 0..imgy {
+ let cx = y as f32 * scalex - 1.5;
+ let cy = x as f32 * scaley - 1.5;
+
+ let c = num_complex::Complex::new(-0.4, 0.6);
+ let mut z = num_complex::Complex::new(cx, cy);
+
+ let mut i = 0;
+ while i < 255 && z.norm() <= 2.0 {
+ z = z * z + c;
+ i += 1;
+ }
+
+ let pixel = imgbuf.get_pixel_mut(x, y);
+ let image::Rgb(data) = *pixel;
+ *pixel = image::Rgb([data[0], i as u8, data[2]]);
+ }
+ }
+
+ // Save the image as “fractal.png”, the format is deduced from the path
+ imgbuf.save("fractal.png").unwrap();
+}
+```
+
+Example output:
+
+<img src="examples/fractal.png" alt="A Julia Fractal, c: -0.4 + 0.6i" width="500" />
+
+### Writing raw buffers
+If the high level interface is not needed because the image was obtained by other means, `image` provides the function `save_buffer` to save a buffer to a file.
+
+```rust,no_run
+fn main() {
+
+ let buffer: &[u8] = unimplemented!(); // Generate the image data
+
+ // Save the buffer as "image.png"
+ image::save_buffer("image.png", buffer, 800, 600, image::ColorType::Rgb8).unwrap()
+}
+```
diff --git a/vendor/image/benches/README.md b/vendor/image/benches/README.md
new file mode 100644
index 0000000..9516f2c
--- /dev/null
+++ b/vendor/image/benches/README.md
@@ -0,0 +1,6 @@
+# Getting started with benchmarking
+
+To run the benchmarks you need a nightly rust toolchain.
+Then you launch it with
+
+ cargo +nightly bench --features=benchmarks
diff --git a/vendor/image/benches/copy_from.rs b/vendor/image/benches/copy_from.rs
new file mode 100644
index 0000000..37a4af8
--- /dev/null
+++ b/vendor/image/benches/copy_from.rs
@@ -0,0 +1,14 @@
+use criterion::{black_box, criterion_group, criterion_main, Criterion};
+use image::{GenericImage, ImageBuffer, Rgba};
+
+pub fn bench_copy_from(c: &mut Criterion) {
+ let src = ImageBuffer::from_pixel(2048, 2048, Rgba([255u8, 0, 0, 255]));
+ let mut dst = ImageBuffer::from_pixel(2048, 2048, Rgba([0u8, 0, 0, 255]));
+
+ c.bench_function("copy_from", |b| {
+ b.iter(|| dst.copy_from(black_box(&src), 0, 0))
+ });
+}
+
+criterion_group!(benches, bench_copy_from);
+criterion_main!(benches);
diff --git a/vendor/image/benches/decode.rs b/vendor/image/benches/decode.rs
new file mode 100644
index 0000000..3702d69
--- /dev/null
+++ b/vendor/image/benches/decode.rs
@@ -0,0 +1,109 @@
+use std::{fs, iter, path};
+
+use criterion::{criterion_group, criterion_main, Criterion};
+use image::ImageFormat;
+
+#[derive(Clone, Copy)]
+struct BenchDef {
+ dir: &'static [&'static str],
+ files: &'static [&'static str],
+ format: ImageFormat,
+}
+
+fn load_all(c: &mut Criterion) {
+ const BENCH_DEFS: &'static [BenchDef] = &[
+ BenchDef {
+ dir: &["bmp", "images"],
+ files: &[
+ "Core_1_Bit.bmp",
+ "Core_4_Bit.bmp",
+ "Core_8_Bit.bmp",
+ "rgb16.bmp",
+ "rgb24.bmp",
+ "rgb32.bmp",
+ "pal4rle.bmp",
+ "pal8rle.bmp",
+ "rgb16-565.bmp",
+ "rgb32bf.bmp",
+ ],
+ format: ImageFormat::Bmp,
+ },
+ BenchDef {
+ dir: &["gif", "simple"],
+ files: &["alpha_gif_a.gif", "sample_1.gif"],
+ format: ImageFormat::Gif,
+ },
+ BenchDef {
+ dir: &["hdr", "images"],
+ files: &["image1.hdr", "rgbr4x4.hdr"],
+ format: ImageFormat::Hdr,
+ },
+ BenchDef {
+ dir: &["ico", "images"],
+ files: &[
+ "bmp-24bpp-mask.ico",
+ "bmp-32bpp-alpha.ico",
+ "png-32bpp-alpha.ico",
+ "smile.ico",
+ ],
+ format: ImageFormat::Ico,
+ },
+ BenchDef {
+ dir: &["jpg", "progressive"],
+ files: &["3.jpg", "cat.jpg", "test.jpg"],
+ format: ImageFormat::Jpeg,
+ },
+ // TODO: pnm
+ // TODO: png
+ BenchDef {
+ dir: &["tga", "testsuite"],
+ files: &["cbw8.tga", "ctc24.tga", "ubw8.tga", "utc24.tga"],
+ format: ImageFormat::Tga,
+ },
+ BenchDef {
+ dir: &["tiff", "testsuite"],
+ files: &[
+ "hpredict.tiff",
+ "hpredict_packbits.tiff",
+ "mandrill.tiff",
+ "rgb-3c-16b.tiff",
+ ],
+ format: ImageFormat::Tiff,
+ },
+ BenchDef {
+ dir: &["webp", "images"],
+ files: &[
+ "simple-gray.webp",
+ "simple-rgb.webp",
+ "vp8x-gray.webp",
+ "vp8x-rgb.webp",
+ ],
+ format: ImageFormat::WebP,
+ },
+ ];
+
+ for bench in BENCH_DEFS {
+ bench_load(c, bench);
+ }
+}
+
+criterion_group!(benches, load_all);
+criterion_main!(benches);
+
+fn bench_load(c: &mut Criterion, def: &BenchDef) {
+ let group_name = format!("load-{:?}", def.format);
+ let mut group = c.benchmark_group(&group_name);
+ let paths = IMAGE_DIR.iter().chain(def.dir);
+
+ for file_name in def.files {
+ let path: path::PathBuf = paths.clone().chain(iter::once(file_name)).collect();
+ let buf = fs::read(path).unwrap();
+ group.bench_function(file_name.to_owned(), |b| {
+ b.iter(|| {
+ image::load_from_memory_with_format(&buf, def.format).unwrap();
+ })
+ });
+ }
+}
+
+const IMAGE_DIR: [&'static str; 3] = [".", "tests", "images"];
diff --git a/vendor/image/benches/encode.rs b/vendor/image/benches/encode.rs
new file mode 100644
index 0000000..0ca4b2a
--- /dev/null
+++ b/vendor/image/benches/encode.rs
@@ -0,0 +1,134 @@
+extern crate criterion;
+
+use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
+use image::{codecs::bmp::BmpEncoder, codecs::jpeg::JpegEncoder, ColorType};
+
+use std::fs::File;
+use std::io::{BufWriter, Seek, SeekFrom, Write};
+
+trait Encoder {
+ fn encode_raw(&self, into: &mut Vec<u8>, im: &[u8], dims: u32, color: ColorType);
+ fn encode_bufvec(&self, into: &mut Vec<u8>, im: &[u8], dims: u32, color: ColorType);
+ fn encode_file(&self, file: &File, im: &[u8], dims: u32, color: ColorType);
+}
+
+#[derive(Clone, Copy)]
+struct BenchDef {
+ with: &'static dyn Encoder,
+ name: &'static str,
+ sizes: &'static [u32],
+ colors: &'static [ColorType],
+}
+
+fn encode_all(c: &mut Criterion) {
+ const BENCH_DEFS: &'static [BenchDef] = &[
+ BenchDef {
+ with: &Bmp,
+ name: "bmp",
+ sizes: &[100u32, 200, 400],
+ colors: &[ColorType::L8, ColorType::Rgb8, ColorType::Rgba8],
+ },
+ BenchDef {
+ with: &Jpeg,
+ name: "jpeg",
+ sizes: &[64u32, 128, 256],
+ colors: &[ColorType::L8, ColorType::Rgb8, ColorType::Rgba8],
+ },
+ ];
+
+ for definition in BENCH_DEFS {
+ encode_definition(c, definition)
+ }
+}
+
+criterion_group!(benches, encode_all);
+criterion_main!(benches);
+
+type BenchGroup<'a> = criterion::BenchmarkGroup<'a, criterion::measurement::WallTime>;
+
+/// Benchmarks encoding a zeroed image.
+///
+/// For compressed formats this is surely not representative of encoding a normal image but it's a
+/// start for benchmarking.
+fn encode_zeroed(group: &mut BenchGroup, with: &dyn Encoder, size: u32, color: ColorType) {
+ let bytes = size as usize * usize::from(color.bytes_per_pixel());
+ let im = vec![0; bytes * bytes];
+
+ group.bench_with_input(
+ BenchmarkId::new(format!("zero-{:?}-rawvec", color), size),
+ &im,
+ |b, image| {
+ let mut v = vec![];
+ with.encode_raw(&mut v, &im, size, color);
+ b.iter(|| with.encode_raw(&mut v, image, size, color));
+ },
+ );
+ group.bench_with_input(
+ BenchmarkId::new(format!("zero-{:?}-bufvec", color), size),
+ &im,
+ |b, image| {
+ let mut v = vec![];
+ with.encode_raw(&mut v, &im, size, color);
+ b.iter(|| with.encode_bufvec(&mut v, image, size, color));
+ },
+ );
+ group.bench_with_input(
+ BenchmarkId::new(format!("zero-{:?}-file", color), size),
+ &im,
+ |b, image| {
+ let file = File::create("temp.bmp").unwrap();
+ b.iter(|| with.encode_file(&file, image, size, color));
+ },
+ );
+}
+
+fn encode_definition(criterion: &mut Criterion, def: &BenchDef) {
+ let mut group = criterion.benchmark_group(format!("encode-{}", def.name));
+
+ for &color in def.colors {
+ for &size in def.sizes {
+ encode_zeroed(&mut group, def.with, size, color);
+ }
+ }
+}
+
+struct Bmp;
+
+struct Jpeg;
+
+trait EncoderBase {
+ fn encode(&self, into: impl Write, im: &[u8], dims: u32, color: ColorType);
+}
+
+impl<T: EncoderBase> Encoder for T {
+ fn encode_raw(&self, into: &mut Vec<u8>, im: &[u8], dims: u32, color: ColorType) {
+ into.clear();
+ self.encode(into, im, dims, color);
+ }
+
+ fn encode_bufvec(&self, into: &mut Vec<u8>, im: &[u8], dims: u32, color: ColorType) {
+ into.clear();
+ let buf = BufWriter::new(into);
+ self.encode(buf, im, dims, color);
+ }
+
+ fn encode_file(&self, mut file: &File, im: &[u8], dims: u32, color: ColorType) {
+ file.seek(SeekFrom::Start(0)).unwrap();
+ let buf = BufWriter::new(file);
+ self.encode(buf, im, dims, color);
+ }
+}
+
+impl EncoderBase for Bmp {
+ fn encode(&self, mut into: impl Write, im: &[u8], size: u32, color: ColorType) {
+ let mut x = BmpEncoder::new(&mut into);
+ x.encode(im, size, size, color).unwrap();
+ }
+}
+
+impl EncoderBase for Jpeg {
+ fn encode(&self, mut into: impl Write, im: &[u8], size: u32, color: ColorType) {
+ let mut x = JpegEncoder::new(&mut into);
+ x.encode(im, size, size, color).unwrap();
+ }
+}
diff --git a/vendor/image/deny.toml b/vendor/image/deny.toml
new file mode 100644
index 0000000..13cbe87
--- /dev/null
+++ b/vendor/image/deny.toml
@@ -0,0 +1,38 @@
+# https://embarkstudios.github.io/cargo-deny/
+
+targets = [
+ { triple = "aarch64-apple-darwin" },
+ { triple = "aarch64-linux-android" },
+ { triple = "x86_64-apple-darwin" },
+ { triple = "x86_64-pc-windows-msvc" },
+ { triple = "x86_64-unknown-linux-gnu" },
+ { triple = "x86_64-unknown-linux-musl" },
+]
+
+
+[advisories]
+vulnerability = "deny"
+unmaintained = "warn"
+yanked = "deny"
+ignore = []
+
+
+[bans]
+multiple-versions = "deny"
+wildcards = "allow" # at least until https://github.com/EmbarkStudios/cargo-deny/issues/241 is fixed
+deny = []
+skip = [
+ { name = "num-derive" } # ravif transatively depends on 0.3 and 0.4.
+]
+skip-tree = [
+ { name = "criterion" }, # dev-dependency
+ { name = "quickcheck" }, # dev-dependency
+ { name = "dav1d" }, # TODO: needs upgrade
+ { name = "clap" },
+]
+
+
+[licenses]
+unlicensed = "allow"
+allow-osi-fsf-free = "either"
+copyleft = "allow"
diff --git a/vendor/image/docs/2019-04-23-memory-unsafety.md b/vendor/image/docs/2019-04-23-memory-unsafety.md
new file mode 100644
index 0000000..3989eb2
--- /dev/null
+++ b/vendor/image/docs/2019-04-23-memory-unsafety.md
@@ -0,0 +1,54 @@
+# Advisory about potential memory unsafety issues
+
+[While reviewing][i885] some `unsafe Vec::from_raw_parts` operations within the
+library, trying to justify their existence with stronger reasoning, we noticed
+that they instead did not meet the required conditions set by the standard
+library. This unsoundness was quickly removed, but we noted that the same
+unjustified reasoning had been applied by a dependency introduced in `0.21`.
+
+For efficiency reasons, we had tried to reuse the allocations made by decoders
+for the buffer of the final image. However, that process is error prone. Most
+image decoding algorithms change the representation type of color samples to
+some degree. Notably, the output pixel type may have a different size and
+alignment than the type used in the temporary decoding buffer. In this specific
+instance, the `ImageBuffer` of the output expects a linear arrangement of `u8`
+samples while the implementation of the `hdr` decoder uses a pixel
+representation of `Rgb<u8>`, which has three times the size. One of the
+requirements of `Vec::from_raw_parts` reads:
+
+> ptr's T needs to have the same size and alignment as it was allocated with.
+
+This requirement is not present on slices `[T]`, as it is motivated by the
+allocator interface. The validity invariant of a reference and slice only
+requires the correct alignment here, which was considered in the design of
+`Rgb<_>` by giving it a well-defined representation, `#[repr(C)]`. But
+critically, this does not guarantee that we can reuse the existing allocation
+through effectively transmuting a `Vec<_>`!
+
+The actual impact of this issue is, in real world implementations, limited to
+allocators which handle allocations for types of size `1` and `3`/`4`
+differently. To the best of my knowledge, this does not apply to `jemalloc` and
+the `libc` allocator. However, we decided to proceed with caution here.
+
+## Lessons for the future
+
+New library dependencies will be under a stricter policy. Not only would they
+need to be justified by functionality but also require at least some level of
+reasoning how they solve that problem better than alternatives. Some appearance
+of maintenance, or the existence of `#[deny(unsafe)]`, will help. We'll
+additionally look into existing dependencies trying to identify similar issues
+and minimizing the potential surface for implementation risks.
+
+## Sound and safe buffer reuse
+
+It seems that the `Vec` representation is entirely unfit for buffer reuse in
+the style which an image library requires. In particular, using pixel types of
+different sizes is likely common to handle either whole (encoded) pixels or
+individual samples. Thus, we started a new sub-project to address this use
+case, [image-canvas][image-canvas]. Contributions and review of its safety are
+very welcome, we ask for the communities help here. The release of `v0.1` will
+not occur until at least one such review has occurred.
+
+
+[i885]: https://github.com/image-rs/image/pull/885
+[image-canvas]: https://github.com/image-rs/canvas
diff --git a/vendor/image/release.sh b/vendor/image/release.sh
new file mode 100755
index 0000000..ae164d3
--- /dev/null
+++ b/vendor/image/release.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Checks automatic preconditions for a release
+determine_new_version() {
+ grep "version = " Cargo.toml | sed -Ee 's/version = "(.*)"/\1/' | head -1
+}
+
+check_notexists_version() {
+ # Does the api information start with: '{"errors":'
+ [[ $(wget "https://crates.io/api/v1/crates/image/$1" -qO -) == "{\"errors\":"* ]]
+}
+
+check_release_description() {
+ major=${1%%.*}
+ minor_patch=${1#$major.}
+ minor=${minor_patch%%.*}
+ patch=${minor_patch#$minor.}
+ # We just need to find a fitting header line
+ grep -Eq "^### Version ${major}.${minor}$" CHANGES.md
+}
+
+version="$(determine_new_version)"
+check_release_description $version || { echo "Version does not have a release description"; exit 1; }
+check_notexists_version $version || { echo "Version $version appears already published"; exit 1; }
+
diff --git a/vendor/image/src/animation.rs b/vendor/image/src/animation.rs
new file mode 100644
index 0000000..aad57b4
--- /dev/null
+++ b/vendor/image/src/animation.rs
@@ -0,0 +1,342 @@
+use std::iter::Iterator;
+use std::time::Duration;
+
+use num_rational::Ratio;
+
+use crate::error::ImageResult;
+use crate::RgbaImage;
+
+/// An implementation dependent iterator, reading the frames as requested
+pub struct Frames<'a> {
+ iterator: Box<dyn Iterator<Item = ImageResult<Frame>> + 'a>,
+}
+
+impl<'a> Frames<'a> {
+ /// Creates a new `Frames` from an implementation specific iterator.
+ pub fn new(iterator: Box<dyn Iterator<Item = ImageResult<Frame>> + 'a>) -> Self {
+ Frames { iterator }
+ }
+
+ /// Steps through the iterator from the current frame until the end and pushes each frame into
+ /// a `Vec`.
+ /// If en error is encountered that error is returned instead.
+ ///
+ /// Note: This is equivalent to `Frames::collect::<ImageResult<Vec<Frame>>>()`
+ pub fn collect_frames(self) -> ImageResult<Vec<Frame>> {
+ self.collect()
+ }
+}
+
+impl<'a> Iterator for Frames<'a> {
+ type Item = ImageResult<Frame>;
+ fn next(&mut self) -> Option<ImageResult<Frame>> {
+ self.iterator.next()
+ }
+}
+
+/// A single animation frame
+#[derive(Clone)]
+pub struct Frame {
+ /// Delay between the frames in milliseconds
+ delay: Delay,
+ /// x offset
+ left: u32,
+ /// y offset
+ top: u32,
+ buffer: RgbaImage,
+}
+
+/// The delay of a frame relative to the previous one.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd)]
+pub struct Delay {
+ ratio: Ratio<u32>,
+}
+
+impl Frame {
+ /// Constructs a new frame without any delay.
+ pub fn new(buffer: RgbaImage) -> Frame {
+ Frame {
+ delay: Delay::from_ratio(Ratio::from_integer(0)),
+ left: 0,
+ top: 0,
+ buffer,
+ }
+ }
+
+ /// Constructs a new frame
+ pub fn from_parts(buffer: RgbaImage, left: u32, top: u32, delay: Delay) -> Frame {
+ Frame {
+ delay,
+ left,
+ top,
+ buffer,
+ }
+ }
+
+ /// Delay of this frame
+ pub fn delay(&self) -> Delay {
+ self.delay
+ }
+
+ /// Returns the image buffer
+ pub fn buffer(&self) -> &RgbaImage {
+ &self.buffer
+ }
+
+ /// Returns a mutable image buffer
+ pub fn buffer_mut(&mut self) -> &mut RgbaImage {
+ &mut self.buffer
+ }
+
+ /// Returns the image buffer
+ pub fn into_buffer(self) -> RgbaImage {
+ self.buffer
+ }
+
+ /// Returns the x offset
+ pub fn left(&self) -> u32 {
+ self.left
+ }
+
+ /// Returns the y offset
+ pub fn top(&self) -> u32 {
+ self.top
+ }
+}
+
+impl Delay {
+ /// Create a delay from a ratio of milliseconds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use image::Delay;
+ /// let delay_10ms = Delay::from_numer_denom_ms(10, 1);
+ /// ```
+ pub fn from_numer_denom_ms(numerator: u32, denominator: u32) -> Self {
+ Delay {
+ ratio: Ratio::new_raw(numerator, denominator),
+ }
+ }
+
+ /// Convert from a duration, clamped between 0 and an implemented defined maximum.
+ ///
+ /// The maximum is *at least* `i32::MAX` milliseconds. It should be noted that the accuracy of
+ /// the result may be relative and very large delays have a coarse resolution.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::time::Duration;
+ /// use image::Delay;
+ ///
+ /// let duration = Duration::from_millis(20);
+ /// let delay = Delay::from_saturating_duration(duration);
+ /// ```
+ pub fn from_saturating_duration(duration: Duration) -> Self {
+ // A few notes: The largest number we can represent as a ratio is u32::MAX but we can
+ // sometimes represent much smaller numbers.
+ //
+ // We can represent duration as `millis+a/b` (where a < b, b > 0).
+ // We must thus bound b with `b·millis + (b-1) <= u32::MAX` or
+ // > `0 < b <= (u32::MAX + 1)/(millis + 1)`
+ // Corollary: millis <= u32::MAX
+
+ const MILLIS_BOUND: u128 = u32::max_value() as u128;
+
+ let millis = duration.as_millis().min(MILLIS_BOUND);
+ let submillis = (duration.as_nanos() % 1_000_000) as u32;
+
+ let max_b = if millis > 0 {
+ ((MILLIS_BOUND + 1) / (millis + 1)) as u32
+ } else {
+ MILLIS_BOUND as u32
+ };
+ let millis = millis as u32;
+
+ let (a, b) = Self::closest_bounded_fraction(max_b, submillis, 1_000_000);
+ Self::from_numer_denom_ms(a + b * millis, b)
+ }
+
+ /// The numerator and denominator of the delay in milliseconds.
+ ///
+ /// This is guaranteed to be an exact conversion if the `Delay` was previously created with the
+ /// `from_numer_denom_ms` constructor.
+ pub fn numer_denom_ms(self) -> (u32, u32) {
+ (*self.ratio.numer(), *self.ratio.denom())
+ }
+
+ pub(crate) fn from_ratio(ratio: Ratio<u32>) -> Self {
+ Delay { ratio }
+ }
+
+ pub(crate) fn into_ratio(self) -> Ratio<u32> {
+ self.ratio
+ }
+
+ /// Given some fraction, compute an approximation with denominator bounded.
+ ///
+ /// Note that `denom_bound` bounds nominator and denominator of all intermediate
+ /// approximations and the end result.
+ fn closest_bounded_fraction(denom_bound: u32, nom: u32, denom: u32) -> (u32, u32) {
+ use std::cmp::Ordering::{self, *};
+ assert!(0 < denom);
+ assert!(0 < denom_bound);
+ assert!(nom < denom);
+
+ // Avoid a few type troubles. All intermediate results are bounded by `denom_bound` which
+ // is in turn bounded by u32::MAX. Representing with u64 allows multiplication of any two
+ // values without fears of overflow.
+
+ // Compare two fractions whose parts fit into a u32.
+ fn compare_fraction((an, ad): (u64, u64), (bn, bd): (u64, u64)) -> Ordering {
+ (an * bd).cmp(&(bn * ad))
+ }
+
+ // Computes the nominator of the absolute difference between two such fractions.
+ fn abs_diff_nom((an, ad): (u64, u64), (bn, bd): (u64, u64)) -> u64 {
+ let c0 = an * bd;
+ let c1 = ad * bn;
+
+ let d0 = c0.max(c1);
+ let d1 = c0.min(c1);
+ d0 - d1
+ }
+
+ let exact = (u64::from(nom), u64::from(denom));
+ // The lower bound fraction, numerator and denominator.
+ let mut lower = (0u64, 1u64);
+ // The upper bound fraction, numerator and denominator.
+ let mut upper = (1u64, 1u64);
+ // The closest approximation for now.
+ let mut guess = (u64::from(nom * 2 > denom), 1u64);
+
+ // loop invariant: ad, bd <= denom_bound
+ // iterates the Farey sequence.
+ loop {
+ // Break if we are done.
+ if compare_fraction(guess, exact) == Equal {
+ break;
+ }
+
+ // Break if next Farey number is out-of-range.
+ if u64::from(denom_bound) - lower.1 < upper.1 {
+ break;
+ }
+
+ // Next Farey approximation n between a and b
+ let next = (lower.0 + upper.0, lower.1 + upper.1);
+ // if F < n then replace the upper bound, else replace lower.
+ if compare_fraction(exact, next) == Less {
+ upper = next;
+ } else {
+ lower = next;
+ }
+
+ // Now correct the closest guess.
+ // In other words, if |c - f| > |n - f| then replace it with the new guess.
+ // This favors the guess with smaller denominator on equality.
+
+ // |g - f| = |g_diff_nom|/(gd*fd);
+ let g_diff_nom = abs_diff_nom(guess, exact);
+ // |n - f| = |n_diff_nom|/(nd*fd);
+ let n_diff_nom = abs_diff_nom(next, exact);
+
+ // The difference |n - f| is smaller than |g - f| if either the integral part of the
+ // fraction |n_diff_nom|/nd is smaller than the one of |g_diff_nom|/gd or if they are
+ // the same but the fractional part is larger.
+ if match (n_diff_nom / next.1).cmp(&(g_diff_nom / guess.1)) {
+ Less => true,
+ Greater => false,
+ // Note that the nominator for the fractional part is smaller than its denominator
+ // which is smaller than u32 and can't overflow the multiplication with the other
+ // denominator, that is we can compare these fractions by multiplication with the
+ // respective other denominator.
+ Equal => {
+ compare_fraction(
+ (n_diff_nom % next.1, next.1),
+ (g_diff_nom % guess.1, guess.1),
+ ) == Less
+ }
+ } {
+ guess = next;
+ }
+ }
+
+ (guess.0 as u32, guess.1 as u32)
+ }
+}
+
+impl From<Delay> for Duration {
+ fn from(delay: Delay) -> Self {
+ let ratio = delay.into_ratio();
+ let ms = ratio.to_integer();
+ let rest = ratio.numer() % ratio.denom();
+ let nanos = (u64::from(rest) * 1_000_000) / u64::from(*ratio.denom());
+ Duration::from_millis(ms.into()) + Duration::from_nanos(nanos)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{Delay, Duration, Ratio};
+
+ #[test]
+ fn simple() {
+ let second = Delay::from_numer_denom_ms(1000, 1);
+ assert_eq!(Duration::from(second), Duration::from_secs(1));
+ }
+
+ #[test]
+ fn fps_30() {
+ let thirtieth = Delay::from_numer_denom_ms(1000, 30);
+ let duration = Duration::from(thirtieth);
+ assert_eq!(duration.as_secs(), 0);
+ assert_eq!(duration.subsec_millis(), 33);
+ assert_eq!(duration.subsec_nanos(), 33_333_333);
+ }
+
+ #[test]
+ fn duration_outlier() {
+ let oob = Duration::from_secs(0xFFFF_FFFF);
+ let delay = Delay::from_saturating_duration(oob);
+ assert_eq!(delay.numer_denom_ms(), (0xFFFF_FFFF, 1));
+ }
+
+ #[test]
+ fn duration_approx() {
+ let oob = Duration::from_millis(0xFFFF_FFFF) + Duration::from_micros(1);
+ let delay = Delay::from_saturating_duration(oob);
+ assert_eq!(delay.numer_denom_ms(), (0xFFFF_FFFF, 1));
+
+ let inbounds = Duration::from_millis(0xFFFF_FFFF) - Duration::from_micros(1);
+ let delay = Delay::from_saturating_duration(inbounds);
+ assert_eq!(delay.numer_denom_ms(), (0xFFFF_FFFF, 1));
+
+ let fine =
+ Duration::from_millis(0xFFFF_FFFF / 1000) + Duration::from_micros(0xFFFF_FFFF % 1000);
+ let delay = Delay::from_saturating_duration(fine);
+ // Funnily, 0xFFFF_FFFF is divisble by 5, thus we compare with a `Ratio`.
+ assert_eq!(delay.into_ratio(), Ratio::new(0xFFFF_FFFF, 1000));
+ }
+
+ #[test]
+ fn precise() {
+ // The ratio has only 32 bits in the numerator, too imprecise to get more than 11 digits
+ // correct. But it may be expressed as 1_000_000/3 instead.
+ let exceed = Duration::from_secs(333) + Duration::from_nanos(333_333_333);
+ let delay = Delay::from_saturating_duration(exceed);
+ assert_eq!(Duration::from(delay), exceed);
+ }
+
+ #[test]
+ fn small() {
+ // Not quite a delay of `1 ms`.
+ let delay = Delay::from_numer_denom_ms(1 << 16, (1 << 16) + 1);
+ let duration = Duration::from(delay);
+ assert_eq!(duration.as_millis(), 0);
+ // Not precisely the original but should be smaller than 0.
+ let delay = Delay::from_saturating_duration(duration);
+ assert_eq!(delay.into_ratio().to_integer(), 0);
+ }
+}
diff --git a/vendor/image/src/buffer.rs b/vendor/image/src/buffer.rs
new file mode 100644
index 0000000..765a9de
--- /dev/null
+++ b/vendor/image/src/buffer.rs
@@ -0,0 +1,1768 @@
+//! Contains the generic `ImageBuffer` struct.
+use num_traits::Zero;
+use std::fmt;
+use std::marker::PhantomData;
+use std::ops::{Deref, DerefMut, Index, IndexMut, Range};
+use std::path::Path;
+use std::slice::{ChunksExact, ChunksExactMut};
+
+use crate::color::{FromColor, Luma, LumaA, Rgb, Rgba};
+use crate::dynimage::{save_buffer, save_buffer_with_format, write_buffer_with_format};
+use crate::error::ImageResult;
+use crate::flat::{FlatSamples, SampleLayout};
+use crate::image::{GenericImage, GenericImageView, ImageEncoder, ImageFormat, ImageOutputFormat};
+use crate::math::Rect;
+use crate::traits::{EncodableLayout, Pixel, PixelWithColorType};
+use crate::utils::expand_packed;
+
+/// Iterate over pixel refs.
+pub struct Pixels<'a, P: Pixel + 'a>
+where
+ P::Subpixel: 'a,
+{
+ chunks: ChunksExact<'a, P::Subpixel>,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for Pixels<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = &'a P;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<&'a P> {
+ self.chunks.next().map(|v| <P as Pixel>::from_slice(v))
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for Pixels<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.chunks.len()
+ }
+}
+
+impl<'a, P: Pixel + 'a> DoubleEndedIterator for Pixels<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ #[inline(always)]
+ fn next_back(&mut self) -> Option<&'a P> {
+ self.chunks.next_back().map(|v| <P as Pixel>::from_slice(v))
+ }
+}
+
+impl<P: Pixel> Clone for Pixels<'_, P> {
+ fn clone(&self) -> Self {
+ Pixels {
+ chunks: self.chunks.clone(),
+ }
+ }
+}
+
+impl<P: Pixel> fmt::Debug for Pixels<'_, P>
+where
+ P::Subpixel: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Pixels")
+ .field("chunks", &self.chunks)
+ .finish()
+ }
+}
+
+/// Iterate over mutable pixel refs.
+pub struct PixelsMut<'a, P: Pixel + 'a>
+where
+ P::Subpixel: 'a,
+{
+ chunks: ChunksExactMut<'a, P::Subpixel>,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for PixelsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = &'a mut P;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<&'a mut P> {
+ self.chunks.next().map(|v| <P as Pixel>::from_slice_mut(v))
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for PixelsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.chunks.len()
+ }
+}
+
+impl<'a, P: Pixel + 'a> DoubleEndedIterator for PixelsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ #[inline(always)]
+ fn next_back(&mut self) -> Option<&'a mut P> {
+ self.chunks
+ .next_back()
+ .map(|v| <P as Pixel>::from_slice_mut(v))
+ }
+}
+
+impl<P: Pixel> fmt::Debug for PixelsMut<'_, P>
+where
+ P::Subpixel: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("PixelsMut")
+ .field("chunks", &self.chunks)
+ .finish()
+ }
+}
+
+/// Iterate over rows of an image
+///
+/// This iterator is created with [`ImageBuffer::rows`]. See its document for details.
+///
+/// [`ImageBuffer::rows`]: ../struct.ImageBuffer.html#method.rows
+pub struct Rows<'a, P: Pixel + 'a>
+where
+ <P as Pixel>::Subpixel: 'a,
+{
+ pixels: ChunksExact<'a, P::Subpixel>,
+}
+
+impl<'a, P: Pixel + 'a> Rows<'a, P> {
+ /// Construct the iterator from image pixels. This is not public since it has a (hidden) panic
+ /// condition. The `pixels` slice must be large enough so that all pixels are addressable.
+ fn with_image(pixels: &'a [P::Subpixel], width: u32, height: u32) -> Self {
+ let row_len = (width as usize) * usize::from(<P as Pixel>::CHANNEL_COUNT);
+ if row_len == 0 {
+ Rows {
+ pixels: [].chunks_exact(1),
+ }
+ } else {
+ let pixels = pixels
+ .get(..row_len * height as usize)
+ .expect("Pixel buffer has too few subpixels");
+ // Rows are physically present. In particular, height is smaller than `usize::MAX` as
+ // all subpixels can be indexed.
+ Rows {
+ pixels: pixels.chunks_exact(row_len),
+ }
+ }
+ }
+}
+
+impl<'a, P: Pixel + 'a> Iterator for Rows<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = Pixels<'a, P>;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<Pixels<'a, P>> {
+ let row = self.pixels.next()?;
+ Some(Pixels {
+ // Note: this is not reached when CHANNEL_COUNT is 0.
+ chunks: row.chunks_exact(<P as Pixel>::CHANNEL_COUNT as usize),
+ })
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for Rows<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.pixels.len()
+ }
+}
+
+impl<'a, P: Pixel + 'a> DoubleEndedIterator for Rows<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ #[inline(always)]
+ fn next_back(&mut self) -> Option<Pixels<'a, P>> {
+ let row = self.pixels.next_back()?;
+ Some(Pixels {
+ // Note: this is not reached when CHANNEL_COUNT is 0.
+ chunks: row.chunks_exact(<P as Pixel>::CHANNEL_COUNT as usize),
+ })
+ }
+}
+
+impl<P: Pixel> Clone for Rows<'_, P> {
+ fn clone(&self) -> Self {
+ Rows {
+ pixels: self.pixels.clone(),
+ }
+ }
+}
+
+impl<P: Pixel> fmt::Debug for Rows<'_, P>
+where
+ P::Subpixel: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("Rows")
+ .field("pixels", &self.pixels)
+ .finish()
+ }
+}
+
+/// Iterate over mutable rows of an image
+///
+/// This iterator is created with [`ImageBuffer::rows_mut`]. See its document for details.
+///
+/// [`ImageBuffer::rows_mut`]: ../struct.ImageBuffer.html#method.rows_mut
+pub struct RowsMut<'a, P: Pixel + 'a>
+where
+ <P as Pixel>::Subpixel: 'a,
+{
+ pixels: ChunksExactMut<'a, P::Subpixel>,
+}
+
+impl<'a, P: Pixel + 'a> RowsMut<'a, P> {
+ /// Construct the iterator from image pixels. This is not public since it has a (hidden) panic
+ /// condition. The `pixels` slice must be large enough so that all pixels are addressable.
+ fn with_image(pixels: &'a mut [P::Subpixel], width: u32, height: u32) -> Self {
+ let row_len = (width as usize) * usize::from(<P as Pixel>::CHANNEL_COUNT);
+ if row_len == 0 {
+ RowsMut {
+ pixels: [].chunks_exact_mut(1),
+ }
+ } else {
+ let pixels = pixels
+ .get_mut(..row_len * height as usize)
+ .expect("Pixel buffer has too few subpixels");
+ // Rows are physically present. In particular, height is smaller than `usize::MAX` as
+ // all subpixels can be indexed.
+ RowsMut {
+ pixels: pixels.chunks_exact_mut(row_len),
+ }
+ }
+ }
+}
+
+impl<'a, P: Pixel + 'a> Iterator for RowsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = PixelsMut<'a, P>;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<PixelsMut<'a, P>> {
+ let row = self.pixels.next()?;
+ Some(PixelsMut {
+ // Note: this is not reached when CHANNEL_COUNT is 0.
+ chunks: row.chunks_exact_mut(<P as Pixel>::CHANNEL_COUNT as usize),
+ })
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for RowsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.pixels.len()
+ }
+}
+
+impl<'a, P: Pixel + 'a> DoubleEndedIterator for RowsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ #[inline(always)]
+ fn next_back(&mut self) -> Option<PixelsMut<'a, P>> {
+ let row = self.pixels.next_back()?;
+ Some(PixelsMut {
+ // Note: this is not reached when CHANNEL_COUNT is 0.
+ chunks: row.chunks_exact_mut(<P as Pixel>::CHANNEL_COUNT as usize),
+ })
+ }
+}
+
+impl<P: Pixel> fmt::Debug for RowsMut<'_, P>
+where
+ P::Subpixel: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("RowsMut")
+ .field("pixels", &self.pixels)
+ .finish()
+ }
+}
+
+/// Enumerate the pixels of an image.
+pub struct EnumeratePixels<'a, P: Pixel + 'a>
+where
+ <P as Pixel>::Subpixel: 'a,
+{
+ pixels: Pixels<'a, P>,
+ x: u32,
+ y: u32,
+ width: u32,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for EnumeratePixels<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = (u32, u32, &'a P);
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<(u32, u32, &'a P)> {
+ if self.x >= self.width {
+ self.x = 0;
+ self.y += 1;
+ }
+ let (x, y) = (self.x, self.y);
+ self.x += 1;
+ self.pixels.next().map(|p| (x, y, p))
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for EnumeratePixels<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.pixels.len()
+ }
+}
+
+impl<P: Pixel> Clone for EnumeratePixels<'_, P> {
+ fn clone(&self) -> Self {
+ EnumeratePixels {
+ pixels: self.pixels.clone(),
+ ..*self
+ }
+ }
+}
+
+impl<P: Pixel> fmt::Debug for EnumeratePixels<'_, P>
+where
+ P::Subpixel: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("EnumeratePixels")
+ .field("pixels", &self.pixels)
+ .field("x", &self.x)
+ .field("y", &self.y)
+ .field("width", &self.width)
+ .finish()
+ }
+}
+
+/// Enumerate the rows of an image.
+pub struct EnumerateRows<'a, P: Pixel + 'a>
+where
+ <P as Pixel>::Subpixel: 'a,
+{
+ rows: Rows<'a, P>,
+ y: u32,
+ width: u32,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for EnumerateRows<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = (u32, EnumeratePixels<'a, P>);
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<(u32, EnumeratePixels<'a, P>)> {
+ let y = self.y;
+ self.y += 1;
+ self.rows.next().map(|r| {
+ (
+ y,
+ EnumeratePixels {
+ x: 0,
+ y,
+ width: self.width,
+ pixels: r,
+ },
+ )
+ })
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for EnumerateRows<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.rows.len()
+ }
+}
+
+impl<P: Pixel> Clone for EnumerateRows<'_, P> {
+ fn clone(&self) -> Self {
+ EnumerateRows {
+ rows: self.rows.clone(),
+ ..*self
+ }
+ }
+}
+
+impl<P: Pixel> fmt::Debug for EnumerateRows<'_, P>
+where
+ P::Subpixel: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("EnumerateRows")
+ .field("rows", &self.rows)
+ .field("y", &self.y)
+ .field("width", &self.width)
+ .finish()
+ }
+}
+
+/// Enumerate the pixels of an image.
+pub struct EnumeratePixelsMut<'a, P: Pixel + 'a>
+where
+ <P as Pixel>::Subpixel: 'a,
+{
+ pixels: PixelsMut<'a, P>,
+ x: u32,
+ y: u32,
+ width: u32,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for EnumeratePixelsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = (u32, u32, &'a mut P);
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<(u32, u32, &'a mut P)> {
+ if self.x >= self.width {
+ self.x = 0;
+ self.y += 1;
+ }
+ let (x, y) = (self.x, self.y);
+ self.x += 1;
+ self.pixels.next().map(|p| (x, y, p))
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for EnumeratePixelsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.pixels.len()
+ }
+}
+
+impl<P: Pixel> fmt::Debug for EnumeratePixelsMut<'_, P>
+where
+ P::Subpixel: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("EnumeratePixelsMut")
+ .field("pixels", &self.pixels)
+ .field("x", &self.x)
+ .field("y", &self.y)
+ .field("width", &self.width)
+ .finish()
+ }
+}
+
+/// Enumerate the rows of an image.
+pub struct EnumerateRowsMut<'a, P: Pixel + 'a>
+where
+ <P as Pixel>::Subpixel: 'a,
+{
+ rows: RowsMut<'a, P>,
+ y: u32,
+ width: u32,
+}
+
+impl<'a, P: Pixel + 'a> Iterator for EnumerateRowsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ type Item = (u32, EnumeratePixelsMut<'a, P>);
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<(u32, EnumeratePixelsMut<'a, P>)> {
+ let y = self.y;
+ self.y += 1;
+ self.rows.next().map(|r| {
+ (
+ y,
+ EnumeratePixelsMut {
+ x: 0,
+ y,
+ width: self.width,
+ pixels: r,
+ },
+ )
+ })
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+}
+
+impl<'a, P: Pixel + 'a> ExactSizeIterator for EnumerateRowsMut<'a, P>
+where
+ P::Subpixel: 'a,
+{
+ fn len(&self) -> usize {
+ self.rows.len()
+ }
+}
+
+impl<P: Pixel> fmt::Debug for EnumerateRowsMut<'_, P>
+where
+ P::Subpixel: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_struct("EnumerateRowsMut")
+ .field("rows", &self.rows)
+ .field("y", &self.y)
+ .field("width", &self.width)
+ .finish()
+ }
+}
+
+/// Generic image buffer
+///
+/// This is an image parameterised by its Pixel types, represented by a width and height and a
+/// container of channel data. It provides direct access to its pixels and implements the
+/// [`GenericImageView`] and [`GenericImage`] traits. In many ways, this is the standard buffer
+/// implementing those traits. Using this concrete type instead of a generic type parameter has
+/// been shown to improve performance.
+///
+/// The crate defines a few type aliases with regularly used pixel types for your convenience, such
+/// as [`RgbImage`], [`GrayImage`] etc.
+///
+/// [`GenericImage`]: trait.GenericImage.html
+/// [`GenericImageView`]: trait.GenericImageView.html
+/// [`RgbImage`]: type.RgbImage.html
+/// [`GrayImage`]: type.GrayImage.html
+///
+/// To convert between images of different Pixel types use [`DynamicImage`].
+///
+/// You can retrieve a complete description of the buffer's layout and contents through
+/// [`as_flat_samples`] and [`as_flat_samples_mut`]. This can be handy to also use the contents in
+/// a foreign language, map it as a GPU host buffer or other similar tasks.
+///
+/// [`DynamicImage`]: enum.DynamicImage.html
+/// [`as_flat_samples`]: #method.as_flat_samples
+/// [`as_flat_samples_mut`]: #method.as_flat_samples_mut
+///
+/// ## Examples
+///
+/// Create a simple canvas and paint a small cross.
+///
+/// ```
+/// use image::{RgbImage, Rgb};
+///
+/// let mut img = RgbImage::new(32, 32);
+///
+/// for x in 15..=17 {
+/// for y in 8..24 {
+/// img.put_pixel(x, y, Rgb([255, 0, 0]));
+/// img.put_pixel(y, x, Rgb([255, 0, 0]));
+/// }
+/// }
+/// ```
+///
+/// Overlays an image on top of a larger background raster.
+///
+/// ```no_run
+/// use image::{GenericImage, GenericImageView, ImageBuffer, open};
+///
+/// let on_top = open("path/to/some.png").unwrap().into_rgb8();
+/// let mut img = ImageBuffer::from_fn(512, 512, |x, y| {
+/// if (x + y) % 2 == 0 {
+/// image::Rgb([0, 0, 0])
+/// } else {
+/// image::Rgb([255, 255, 255])
+/// }
+/// });
+///
+/// image::imageops::overlay(&mut img, &on_top, 128, 128);
+/// ```
+///
+/// Convert an RgbaImage to a GrayImage.
+///
+/// ```no_run
+/// use image::{open, DynamicImage};
+///
+/// let rgba = open("path/to/some.png").unwrap().into_rgba8();
+/// let gray = DynamicImage::ImageRgba8(rgba).into_luma8();
+/// ```
+#[derive(Debug, Hash, PartialEq, Eq)]
+pub struct ImageBuffer<P: Pixel, Container> {
+ width: u32,
+ height: u32,
+ _phantom: PhantomData<P>,
+ data: Container,
+}
+
+// generic implementation, shared along all image buffers
+impl<P, Container> ImageBuffer<P, Container>
+where
+ P: Pixel,
+ Container: Deref<Target = [P::Subpixel]>,
+{
+ /// Constructs a buffer from a generic container
+ /// (for example a `Vec` or a slice)
+ ///
+ /// Returns `None` if the container is not big enough (including when the image dimensions
+ /// necessitate an allocation of more bytes than supported by the container).
+ pub fn from_raw(width: u32, height: u32, buf: Container) -> Option<ImageBuffer<P, Container>> {
+ if Self::check_image_fits(width, height, buf.len()) {
+ Some(ImageBuffer {
+ data: buf,
+ width,
+ height,
+ _phantom: PhantomData,
+ })
+ } else {
+ None
+ }
+ }
+
+ /// Returns the underlying raw buffer
+ pub fn into_raw(self) -> Container {
+ self.data
+ }
+
+ /// Returns the underlying raw buffer
+ pub fn as_raw(&self) -> &Container {
+ &self.data
+ }
+
+ /// The width and height of this image.
+ pub fn dimensions(&self) -> (u32, u32) {
+ (self.width, self.height)
+ }
+
+ /// The width of this image.
+ pub fn width(&self) -> u32 {
+ self.width
+ }
+
+ /// The height of this image.
+ pub fn height(&self) -> u32 {
+ self.height
+ }
+
+ // TODO: choose name under which to expose.
+ pub(crate) fn inner_pixels(&self) -> &[P::Subpixel] {
+ let len = Self::image_buffer_len(self.width, self.height).unwrap();
+ &self.data[..len]
+ }
+
+ /// Returns an iterator over the pixels of this image.
+ /// The iteration order is x = 0 to width then y = 0 to height
+ pub fn pixels(&self) -> Pixels<P> {
+ Pixels {
+ chunks: self
+ .inner_pixels()
+ .chunks_exact(<P as Pixel>::CHANNEL_COUNT as usize),
+ }
+ }
+
+ /// Returns an iterator over the rows of this image.
+ ///
+ /// Only non-empty rows can be iterated in this manner. In particular the iterator will not
+ /// yield any item when the width of the image is `0` or a pixel type without any channels is
+ /// used. This ensures that its length can always be represented by `usize`.
+ pub fn rows(&self) -> Rows<P> {
+ Rows::with_image(&self.data, self.width, self.height)
+ }
+
+ /// Enumerates over the pixels of the image.
+ /// The iterator yields the coordinates of each pixel
+ /// along with a reference to them.
+ /// The iteration order is x = 0 to width then y = 0 to height
+ /// Starting from the top left.
+ pub fn enumerate_pixels(&self) -> EnumeratePixels<P> {
+ EnumeratePixels {
+ pixels: self.pixels(),
+ x: 0,
+ y: 0,
+ width: self.width,
+ }
+ }
+
+ /// Enumerates over the rows of the image.
+ /// The iterator yields the y-coordinate of each row
+ /// along with a reference to them.
+ pub fn enumerate_rows(&self) -> EnumerateRows<P> {
+ EnumerateRows {
+ rows: self.rows(),
+ y: 0,
+ width: self.width,
+ }
+ }
+
+ /// Gets a reference to the pixel at location `(x, y)`
+ ///
+ /// # Panics
+ ///
+ /// Panics if `(x, y)` is out of the bounds `(width, height)`.
+ #[inline]
+ #[track_caller]
+ pub fn get_pixel(&self, x: u32, y: u32) -> &P {
+ match self.pixel_indices(x, y) {
+ None => panic!(
+ "Image index {:?} out of bounds {:?}",
+ (x, y),
+ (self.width, self.height)
+ ),
+ Some(pixel_indices) => <P as Pixel>::from_slice(&self.data[pixel_indices]),
+ }
+ }
+
+ /// Gets a reference to the pixel at location `(x, y)` or returns `None` if
+ /// the index is out of the bounds `(width, height)`.
+ pub fn get_pixel_checked(&self, x: u32, y: u32) -> Option<&P> {
+ if x >= self.width {
+ return None;
+ }
+ let num_channels = <P as Pixel>::CHANNEL_COUNT as usize;
+ let i = (y as usize)
+ .saturating_mul(self.width as usize)
+ .saturating_add(x as usize)
+ .saturating_mul(num_channels);
+
+ self.data
+ .get(i..i + num_channels)
+ .map(|pixel_indices| <P as Pixel>::from_slice(pixel_indices))
+ }
+
+ /// Test that the image fits inside the buffer.
+ ///
+ /// Verifies that the maximum image of pixels inside the bounds is smaller than the provided
+ /// length. Note that as a corrolary we also have that the index calculation of pixels inside
+ /// the bounds will not overflow.
+ fn check_image_fits(width: u32, height: u32, len: usize) -> bool {
+ let checked_len = Self::image_buffer_len(width, height);
+ checked_len.map(|min_len| min_len <= len).unwrap_or(false)
+ }
+
+ fn image_buffer_len(width: u32, height: u32) -> Option<usize> {
+ Some(<P as Pixel>::CHANNEL_COUNT as usize)
+ .and_then(|size| size.checked_mul(width as usize))
+ .and_then(|size| size.checked_mul(height as usize))
+ }
+
+ #[inline(always)]
+ fn pixel_indices(&self, x: u32, y: u32) -> Option<Range<usize>> {
+ if x >= self.width || y >= self.height {
+ return None;
+ }
+
+ Some(self.pixel_indices_unchecked(x, y))
+ }
+
+ #[inline(always)]
+ fn pixel_indices_unchecked(&self, x: u32, y: u32) -> Range<usize> {
+ let no_channels = <P as Pixel>::CHANNEL_COUNT as usize;
+ // If in bounds, this can't overflow as we have tested that at construction!
+ let min_index = (y as usize * self.width as usize + x as usize) * no_channels;
+ min_index..min_index + no_channels
+ }
+
+ /// Get the format of the buffer when viewed as a matrix of samples.
+ pub fn sample_layout(&self) -> SampleLayout {
+ // None of these can overflow, as all our memory is addressable.
+ SampleLayout::row_major_packed(<P as Pixel>::CHANNEL_COUNT, self.width, self.height)
+ }
+
+ /// Return the raw sample buffer with its stride an dimension information.
+ ///
+ /// The returned buffer is guaranteed to be well formed in all cases. It is laid out by
+ /// colors, width then height, meaning `channel_stride <= width_stride <= height_stride`. All
+ /// strides are in numbers of elements but those are mostly `u8` in which case the strides are
+ /// also byte strides.
+ pub fn into_flat_samples(self) -> FlatSamples<Container>
+ where
+ Container: AsRef<[P::Subpixel]>,
+ {
+ // None of these can overflow, as all our memory is addressable.
+ let layout = self.sample_layout();
+ FlatSamples {
+ samples: self.data,
+ layout,
+ color_hint: None, // TODO: the pixel type might contain P::COLOR_TYPE if it satisfies PixelWithColorType
+ }
+ }
+
+ /// Return a view on the raw sample buffer.
+ ///
+ /// See [`into_flat_samples`](#method.into_flat_samples) for more details.
+ pub fn as_flat_samples(&self) -> FlatSamples<&[P::Subpixel]>
+ where
+ Container: AsRef<[P::Subpixel]>,
+ {
+ let layout = self.sample_layout();
+ FlatSamples {
+ samples: self.data.as_ref(),
+ layout,
+ color_hint: None, // TODO: the pixel type might contain P::COLOR_TYPE if it satisfies PixelWithColorType
+ }
+ }
+
+ /// Return a mutable view on the raw sample buffer.
+ ///
+ /// See [`into_flat_samples`](#method.into_flat_samples) for more details.
+ pub fn as_flat_samples_mut(&mut self) -> FlatSamples<&mut [P::Subpixel]>
+ where
+ Container: AsMut<[P::Subpixel]>,
+ {
+ let layout = self.sample_layout();
+ FlatSamples {
+ samples: self.data.as_mut(),
+ layout,
+ color_hint: None, // TODO: the pixel type might contain P::COLOR_TYPE if it satisfies PixelWithColorType
+ }
+ }
+}
+
+impl<P, Container> ImageBuffer<P, Container>
+where
+ P: Pixel,
+ Container: Deref<Target = [P::Subpixel]> + DerefMut,
+{
+ // TODO: choose name under which to expose.
+ fn inner_pixels_mut(&mut self) -> &mut [P::Subpixel] {
+ let len = Self::image_buffer_len(self.width, self.height).unwrap();
+ &mut self.data[..len]
+ }
+
+ /// Returns an iterator over the mutable pixels of this image.
+ pub fn pixels_mut(&mut self) -> PixelsMut<P> {
+ PixelsMut {
+ chunks: self
+ .inner_pixels_mut()
+ .chunks_exact_mut(<P as Pixel>::CHANNEL_COUNT as usize),
+ }
+ }
+
+ /// Returns an iterator over the mutable rows of this image.
+ ///
+ /// Only non-empty rows can be iterated in this manner. In particular the iterator will not
+ /// yield any item when the width of the image is `0` or a pixel type without any channels is
+ /// used. This ensures that its length can always be represented by `usize`.
+ pub fn rows_mut(&mut self) -> RowsMut<P> {
+ RowsMut::with_image(&mut self.data, self.width, self.height)
+ }
+
+ /// Enumerates over the pixels of the image.
+ /// The iterator yields the coordinates of each pixel
+ /// along with a mutable reference to them.
+ pub fn enumerate_pixels_mut(&mut self) -> EnumeratePixelsMut<P> {
+ let width = self.width;
+ EnumeratePixelsMut {
+ pixels: self.pixels_mut(),
+ x: 0,
+ y: 0,
+ width,
+ }
+ }
+
+ /// Enumerates over the rows of the image.
+ /// The iterator yields the y-coordinate of each row
+ /// along with a mutable reference to them.
+ pub fn enumerate_rows_mut(&mut self) -> EnumerateRowsMut<P> {
+ let width = self.width;
+ EnumerateRowsMut {
+ rows: self.rows_mut(),
+ y: 0,
+ width,
+ }
+ }
+
+ /// Gets a reference to the mutable pixel at location `(x, y)`
+ ///
+ /// # Panics
+ ///
+ /// Panics if `(x, y)` is out of the bounds `(width, height)`.
+ #[inline]
+ #[track_caller]
+ pub fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut P {
+ match self.pixel_indices(x, y) {
+ None => panic!(
+ "Image index {:?} out of bounds {:?}",
+ (x, y),
+ (self.width, self.height)
+ ),
+ Some(pixel_indices) => <P as Pixel>::from_slice_mut(&mut self.data[pixel_indices]),
+ }
+ }
+
+ /// Gets a reference to the mutable pixel at location `(x, y)` or returns
+ /// `None` if the index is out of the bounds `(width, height)`.
+ pub fn get_pixel_mut_checked(&mut self, x: u32, y: u32) -> Option<&mut P> {
+ if x >= self.width {
+ return None;
+ }
+ let num_channels = <P as Pixel>::CHANNEL_COUNT as usize;
+ let i = (y as usize)
+ .saturating_mul(self.width as usize)
+ .saturating_add(x as usize)
+ .saturating_mul(num_channels);
+
+ self.data
+ .get_mut(i..i + num_channels)
+ .map(|pixel_indices| <P as Pixel>::from_slice_mut(pixel_indices))
+ }
+
+ /// Puts a pixel at location `(x, y)`
+ ///
+ /// # Panics
+ ///
+ /// Panics if `(x, y)` is out of the bounds `(width, height)`.
+ #[inline]
+ #[track_caller]
+ pub fn put_pixel(&mut self, x: u32, y: u32, pixel: P) {
+ *self.get_pixel_mut(x, y) = pixel
+ }
+}
+
+impl<P, Container> ImageBuffer<P, Container>
+where
+ P: Pixel,
+ [P::Subpixel]: EncodableLayout,
+ Container: Deref<Target = [P::Subpixel]>,
+{
+ /// Saves the buffer to a file at the path specified.
+ ///
+ /// The image format is derived from the file extension.
+ pub fn save<Q>(&self, path: Q) -> ImageResult<()>
+ where
+ Q: AsRef<Path>,
+ P: PixelWithColorType,
+ {
+ save_buffer(
+ path,
+ self.inner_pixels().as_bytes(),
+ self.width(),
+ self.height(),
+ <P as PixelWithColorType>::COLOR_TYPE,
+ )
+ }
+}
+
+impl<P, Container> ImageBuffer<P, Container>
+where
+ P: Pixel,
+ [P::Subpixel]: EncodableLayout,
+ Container: Deref<Target = [P::Subpixel]>,
+{
+ /// Saves the buffer to a file at the specified path in
+ /// the specified format.
+ ///
+ /// See [`save_buffer_with_format`](fn.save_buffer_with_format.html) for
+ /// supported types.
+ pub fn save_with_format<Q>(&self, path: Q, format: ImageFormat) -> ImageResult<()>
+ where
+ Q: AsRef<Path>,
+ P: PixelWithColorType,
+ {
+ // This is valid as the subpixel is u8.
+ save_buffer_with_format(
+ path,
+ self.inner_pixels().as_bytes(),
+ self.width(),
+ self.height(),
+ <P as PixelWithColorType>::COLOR_TYPE,
+ format,
+ )
+ }
+}
+
+impl<P, Container> ImageBuffer<P, Container>
+where
+ P: Pixel,
+ [P::Subpixel]: EncodableLayout,
+ Container: Deref<Target = [P::Subpixel]>,
+{
+ /// Writes the buffer to a writer in the specified format.
+ ///
+ /// Assumes the writer is buffered. In most cases,
+ /// you should wrap your writer in a `BufWriter` for best performance.
+ ///
+ /// See [`ImageOutputFormat`](enum.ImageOutputFormat.html) for
+ /// supported types.
+ pub fn write_to<W, F>(&self, writer: &mut W, format: F) -> ImageResult<()>
+ where
+ W: std::io::Write + std::io::Seek,
+ F: Into<ImageOutputFormat>,
+ P: PixelWithColorType,
+ {
+ // This is valid as the subpixel is u8.
+ write_buffer_with_format(
+ writer,
+ self.inner_pixels().as_bytes(),
+ self.width(),
+ self.height(),
+ <P as PixelWithColorType>::COLOR_TYPE,
+ format,
+ )
+ }
+}
+
+impl<P, Container> ImageBuffer<P, Container>
+where
+ P: Pixel,
+ [P::Subpixel]: EncodableLayout,
+ Container: Deref<Target = [P::Subpixel]>,
+{
+ /// Writes the buffer with the given encoder.
+ pub fn write_with_encoder<E>(&self, encoder: E) -> ImageResult<()>
+ where
+ E: ImageEncoder,
+ P: PixelWithColorType,
+ {
+ // This is valid as the subpixel is u8.
+ encoder.write_image(
+ self.inner_pixels().as_bytes(),
+ self.width(),
+ self.height(),
+ <P as PixelWithColorType>::COLOR_TYPE,
+ )
+ }
+}
+
+impl<P, Container> Default for ImageBuffer<P, Container>
+where
+ P: Pixel,
+ Container: Default,
+{
+ fn default() -> Self {
+ Self {
+ width: 0,
+ height: 0,
+ _phantom: PhantomData,
+ data: Default::default(),
+ }
+ }
+}
+
+impl<P, Container> Deref for ImageBuffer<P, Container>
+where
+ P: Pixel,
+ Container: Deref<Target = [P::Subpixel]>,
+{
+ type Target = [P::Subpixel];
+
+ fn deref(&self) -> &<Self as Deref>::Target {
+ &self.data
+ }
+}
+
+impl<P, Container> DerefMut for ImageBuffer<P, Container>
+where
+ P: Pixel,
+ Container: Deref<Target = [P::Subpixel]> + DerefMut,
+{
+ fn deref_mut(&mut self) -> &mut <Self as Deref>::Target {
+ &mut self.data
+ }
+}
+
+impl<P, Container> Index<(u32, u32)> for ImageBuffer<P, Container>
+where
+ P: Pixel,
+ Container: Deref<Target = [P::Subpixel]>,
+{
+ type Output = P;
+
+ fn index(&self, (x, y): (u32, u32)) -> &P {
+ self.get_pixel(x, y)
+ }
+}
+
+impl<P, Container> IndexMut<(u32, u32)> for ImageBuffer<P, Container>
+where
+ P: Pixel,
+ Container: Deref<Target = [P::Subpixel]> + DerefMut,
+{
+ fn index_mut(&mut self, (x, y): (u32, u32)) -> &mut P {
+ self.get_pixel_mut(x, y)
+ }
+}
+
+impl<P, Container> Clone for ImageBuffer<P, Container>
+where
+ P: Pixel,
+ Container: Deref<Target = [P::Subpixel]> + Clone,
+{
+ fn clone(&self) -> ImageBuffer<P, Container> {
+ ImageBuffer {
+ data: self.data.clone(),
+ width: self.width,
+ height: self.height,
+ _phantom: PhantomData,
+ }
+ }
+}
+
+impl<P, Container> GenericImageView for ImageBuffer<P, Container>
+where
+ P: Pixel,
+ Container: Deref<Target = [P::Subpixel]> + Deref,
+{
+ type Pixel = P;
+
+ fn dimensions(&self) -> (u32, u32) {
+ self.dimensions()
+ }
+
+ fn bounds(&self) -> (u32, u32, u32, u32) {
+ (0, 0, self.width, self.height)
+ }
+
+ fn get_pixel(&self, x: u32, y: u32) -> P {
+ *self.get_pixel(x, y)
+ }
+
+ /// Returns the pixel located at (x, y), ignoring bounds checking.
+ #[inline(always)]
+ unsafe fn unsafe_get_pixel(&self, x: u32, y: u32) -> P {
+ let indices = self.pixel_indices_unchecked(x, y);
+ *<P as Pixel>::from_slice(self.data.get_unchecked(indices))
+ }
+}
+
+impl<P, Container> GenericImage for ImageBuffer<P, Container>
+where
+ P: Pixel,
+ Container: Deref<Target = [P::Subpixel]> + DerefMut,
+{
+ fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut P {
+ self.get_pixel_mut(x, y)
+ }
+
+ fn put_pixel(&mut self, x: u32, y: u32, pixel: P) {
+ *self.get_pixel_mut(x, y) = pixel
+ }
+
+ /// Puts a pixel at location (x, y), ignoring bounds checking.
+ #[inline(always)]
+ unsafe fn unsafe_put_pixel(&mut self, x: u32, y: u32, pixel: P) {
+ let indices = self.pixel_indices_unchecked(x, y);
+ let p = <P as Pixel>::from_slice_mut(self.data.get_unchecked_mut(indices));
+ *p = pixel
+ }
+
+ /// Put a pixel at location (x, y), taking into account alpha channels
+ ///
+ /// DEPRECATED: This method will be removed. Blend the pixel directly instead.
+ fn blend_pixel(&mut self, x: u32, y: u32, p: P) {
+ self.get_pixel_mut(x, y).blend(&p)
+ }
+
+ fn copy_within(&mut self, source: Rect, x: u32, y: u32) -> bool {
+ let Rect {
+ x: sx,
+ y: sy,
+ width,
+ height,
+ } = source;
+ let dx = x;
+ let dy = y;
+ assert!(sx < self.width() && dx < self.width());
+ assert!(sy < self.height() && dy < self.height());
+ if self.width() - dx.max(sx) < width || self.height() - dy.max(sy) < height {
+ return false;
+ }
+
+ if sy < dy {
+ for y in (0..height).rev() {
+ let sy = sy + y;
+ let dy = dy + y;
+ let Range { start, .. } = self.pixel_indices_unchecked(sx, sy);
+ let Range { end, .. } = self.pixel_indices_unchecked(sx + width - 1, sy);
+ let dst = self.pixel_indices_unchecked(dx, dy).start;
+ self.data.copy_within(start..end, dst);
+ }
+ } else {
+ for y in 0..height {
+ let sy = sy + y;
+ let dy = dy + y;
+ let Range { start, .. } = self.pixel_indices_unchecked(sx, sy);
+ let Range { end, .. } = self.pixel_indices_unchecked(sx + width - 1, sy);
+ let dst = self.pixel_indices_unchecked(dx, dy).start;
+ self.data.copy_within(start..end, dst);
+ }
+ }
+ true
+ }
+}
+
+// concrete implementation for `Vec`-backed buffers
+// TODO: I think that rustc does not "see" this impl any more: the impl with
+// Container meets the same requirements. At least, I got compile errors that
+// there is no such function as `into_vec`, whereas `into_raw` did work, and
+// `into_vec` is redundant anyway, because `into_raw` will give you the vector,
+// and it is more generic.
+impl<P: Pixel> ImageBuffer<P, Vec<P::Subpixel>> {
+ /// Creates a new image buffer based on a `Vec<P::Subpixel>`.
+ ///
+ /// # Panics
+ ///
+ /// Panics when the resulting image is larger the the maximum size of a vector.
+ pub fn new(width: u32, height: u32) -> ImageBuffer<P, Vec<P::Subpixel>> {
+ let size = Self::image_buffer_len(width, height)
+ .expect("Buffer length in `ImageBuffer::new` overflows usize");
+ ImageBuffer {
+ data: vec![Zero::zero(); size],
+ width,
+ height,
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Constructs a new ImageBuffer by copying a pixel
+ ///
+ /// # Panics
+ ///
+ /// Panics when the resulting image is larger the the maximum size of a vector.
+ pub fn from_pixel(width: u32, height: u32, pixel: P) -> ImageBuffer<P, Vec<P::Subpixel>> {
+ let mut buf = ImageBuffer::new(width, height);
+ for p in buf.pixels_mut() {
+ *p = pixel
+ }
+ buf
+ }
+
+ /// Constructs a new ImageBuffer by repeated application of the supplied function.
+ ///
+ /// The arguments to the function are the pixel's x and y coordinates.
+ ///
+ /// # Panics
+ ///
+ /// Panics when the resulting image is larger the the maximum size of a vector.
+ pub fn from_fn<F>(width: u32, height: u32, mut f: F) -> ImageBuffer<P, Vec<P::Subpixel>>
+ where
+ F: FnMut(u32, u32) -> P,
+ {
+ let mut buf = ImageBuffer::new(width, height);
+ for (x, y, p) in buf.enumerate_pixels_mut() {
+ *p = f(x, y)
+ }
+ buf
+ }
+
+ /// Creates an image buffer out of an existing buffer.
+ /// Returns None if the buffer is not big enough.
+ pub fn from_vec(
+ width: u32,
+ height: u32,
+ buf: Vec<P::Subpixel>,
+ ) -> Option<ImageBuffer<P, Vec<P::Subpixel>>> {
+ ImageBuffer::from_raw(width, height, buf)
+ }
+
+ /// Consumes the image buffer and returns the underlying data
+ /// as an owned buffer
+ pub fn into_vec(self) -> Vec<P::Subpixel> {
+ self.into_raw()
+ }
+}
+
+/// Provides color conversions for whole image buffers.
+pub trait ConvertBuffer<T> {
+ /// Converts `self` to a buffer of type T
+ ///
+ /// A generic implementation is provided to convert any image buffer to a image buffer
+ /// based on a `Vec<T>`.
+ fn convert(&self) -> T;
+}
+
+// concrete implementation Luma -> Rgba
+impl GrayImage {
+ /// Expands a color palette by re-using the existing buffer.
+ /// Assumes 8 bit per pixel. Uses an optionally transparent index to
+ /// adjust it's alpha value accordingly.
+ pub fn expand_palette(
+ self,
+ palette: &[(u8, u8, u8)],
+ transparent_idx: Option<u8>,
+ ) -> RgbaImage {
+ let (width, height) = self.dimensions();
+ let mut data = self.into_raw();
+ let entries = data.len();
+ data.resize(entries.checked_mul(4).unwrap(), 0);
+ let mut buffer = ImageBuffer::from_vec(width, height, data).unwrap();
+ expand_packed(&mut buffer, 4, 8, |idx, pixel| {
+ let (r, g, b) = palette[idx as usize];
+ let a = if let Some(t_idx) = transparent_idx {
+ if t_idx == idx {
+ 0
+ } else {
+ 255
+ }
+ } else {
+ 255
+ };
+ pixel[0] = r;
+ pixel[1] = g;
+ pixel[2] = b;
+ pixel[3] = a;
+ });
+ buffer
+ }
+}
+
+// TODO: Equality constraints are not yet supported in where clauses, when they
+// are, the T parameter should be removed in favor of ToType::Subpixel, which
+// will then be FromType::Subpixel.
+impl<Container, FromType: Pixel, ToType: Pixel>
+ ConvertBuffer<ImageBuffer<ToType, Vec<ToType::Subpixel>>> for ImageBuffer<FromType, Container>
+where
+ Container: Deref<Target = [FromType::Subpixel]>,
+ ToType: FromColor<FromType>,
+{
+ /// # Examples
+ /// Convert RGB image to gray image.
+ /// ```no_run
+ /// use image::buffer::ConvertBuffer;
+ /// use image::GrayImage;
+ ///
+ /// let image_path = "examples/fractal.png";
+ /// let image = image::open(&image_path)
+ /// .expect("Open file failed")
+ /// .to_rgba8();
+ ///
+ /// let gray_image: GrayImage = image.convert();
+ /// ```
+ fn convert(&self) -> ImageBuffer<ToType, Vec<ToType::Subpixel>> {
+ let mut buffer: ImageBuffer<ToType, Vec<ToType::Subpixel>> =
+ ImageBuffer::new(self.width, self.height);
+ for (to, from) in buffer.pixels_mut().zip(self.pixels()) {
+ to.from_color(from)
+ }
+ buffer
+ }
+}
+
+/// Sendable Rgb image buffer
+pub type RgbImage = ImageBuffer<Rgb<u8>, Vec<u8>>;
+/// Sendable Rgb + alpha channel image buffer
+pub type RgbaImage = ImageBuffer<Rgba<u8>, Vec<u8>>;
+/// Sendable grayscale image buffer
+pub type GrayImage = ImageBuffer<Luma<u8>, Vec<u8>>;
+/// Sendable grayscale + alpha channel image buffer
+pub type GrayAlphaImage = ImageBuffer<LumaA<u8>, Vec<u8>>;
+/// Sendable 16-bit Rgb image buffer
+pub(crate) type Rgb16Image = ImageBuffer<Rgb<u16>, Vec<u16>>;
+/// Sendable 16-bit Rgb + alpha channel image buffer
+pub(crate) type Rgba16Image = ImageBuffer<Rgba<u16>, Vec<u16>>;
+/// Sendable 16-bit grayscale image buffer
+pub(crate) type Gray16Image = ImageBuffer<Luma<u16>, Vec<u16>>;
+/// Sendable 16-bit grayscale + alpha channel image buffer
+pub(crate) type GrayAlpha16Image = ImageBuffer<LumaA<u16>, Vec<u16>>;
+
+/// An image buffer for 32-bit float RGB pixels,
+/// where the backing container is a flattened vector of floats.
+pub type Rgb32FImage = ImageBuffer<Rgb<f32>, Vec<f32>>;
+
+/// An image buffer for 32-bit float RGBA pixels,
+/// where the backing container is a flattened vector of floats.
+pub type Rgba32FImage = ImageBuffer<Rgba<f32>, Vec<f32>>;
+
+#[cfg(test)]
+mod test {
+ use super::{GrayImage, ImageBuffer, ImageOutputFormat, RgbImage};
+ use crate::math::Rect;
+ use crate::GenericImage as _;
+ use crate::{color, Rgb};
+
+ #[test]
+ /// Tests if image buffers from slices work
+ fn slice_buffer() {
+ let data = [0; 9];
+ let buf: ImageBuffer<color::Luma<u8>, _> = ImageBuffer::from_raw(3, 3, &data[..]).unwrap();
+ assert_eq!(&*buf, &data[..])
+ }
+
+ #[test]
+ fn get_pixel() {
+ let mut a: RgbImage = ImageBuffer::new(10, 10);
+ {
+ let b = a.get_mut(3 * 10).unwrap();
+ *b = 255;
+ }
+ assert_eq!(a.get_pixel(0, 1)[0], 255)
+ }
+
+ #[test]
+ fn get_pixel_checked() {
+ let mut a: RgbImage = ImageBuffer::new(10, 10);
+ a.get_pixel_mut_checked(0, 1).map(|b| b[0] = 255);
+
+ assert_eq!(a.get_pixel_checked(0, 1), Some(&Rgb([255, 0, 0])));
+ assert_eq!(a.get_pixel_checked(0, 1).unwrap(), a.get_pixel(0, 1));
+ assert_eq!(a.get_pixel_checked(10, 0), None);
+ assert_eq!(a.get_pixel_checked(0, 10), None);
+ assert_eq!(a.get_pixel_mut_checked(10, 0), None);
+ assert_eq!(a.get_pixel_mut_checked(0, 10), None);
+
+ // From image/issues/1672
+ const WHITE: Rgb<u8> = Rgb([255_u8, 255, 255]);
+ let mut a = RgbImage::new(2, 1);
+ a.put_pixel(1, 0, WHITE);
+
+ assert_eq!(a.get_pixel_checked(1, 0), Some(&WHITE));
+ assert_eq!(a.get_pixel_checked(1, 0).unwrap(), a.get_pixel(1, 0));
+ }
+
+ #[test]
+ fn mut_iter() {
+ let mut a: RgbImage = ImageBuffer::new(10, 10);
+ {
+ let val = a.pixels_mut().next().unwrap();
+ *val = Rgb([42, 0, 0]);
+ }
+ assert_eq!(a.data[0], 42)
+ }
+
+ #[test]
+ fn zero_width_zero_height() {
+ let mut image = RgbImage::new(0, 0);
+
+ assert_eq!(image.rows_mut().count(), 0);
+ assert_eq!(image.pixels_mut().count(), 0);
+ assert_eq!(image.rows().count(), 0);
+ assert_eq!(image.pixels().count(), 0);
+ }
+
+ #[test]
+ fn zero_width_nonzero_height() {
+ let mut image = RgbImage::new(0, 2);
+
+ assert_eq!(image.rows_mut().count(), 0);
+ assert_eq!(image.pixels_mut().count(), 0);
+ assert_eq!(image.rows().count(), 0);
+ assert_eq!(image.pixels().count(), 0);
+ }
+
+ #[test]
+ fn nonzero_width_zero_height() {
+ let mut image = RgbImage::new(2, 0);
+
+ assert_eq!(image.rows_mut().count(), 0);
+ assert_eq!(image.pixels_mut().count(), 0);
+ assert_eq!(image.rows().count(), 0);
+ assert_eq!(image.pixels().count(), 0);
+ }
+
+ #[test]
+ fn pixels_on_large_buffer() {
+ let mut image = RgbImage::from_raw(1, 1, vec![0; 6]).unwrap();
+
+ assert_eq!(image.pixels().count(), 1);
+ assert_eq!(image.enumerate_pixels().count(), 1);
+ assert_eq!(image.pixels_mut().count(), 1);
+ assert_eq!(image.enumerate_pixels_mut().count(), 1);
+
+ assert_eq!(image.rows().count(), 1);
+ assert_eq!(image.rows_mut().count(), 1);
+ }
+
+ #[test]
+ fn default() {
+ let image = ImageBuffer::<Rgb<u8>, Vec<u8>>::default();
+ assert_eq!(image.dimensions(), (0, 0));
+ }
+
+ #[test]
+ #[rustfmt::skip]
+ fn test_image_buffer_copy_within_oob() {
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, vec![0u8; 16]).unwrap();
+ assert!(!image.copy_within(Rect { x: 0, y: 0, width: 5, height: 4 }, 0, 0));
+ assert!(!image.copy_within(Rect { x: 0, y: 0, width: 4, height: 5 }, 0, 0));
+ assert!(!image.copy_within(Rect { x: 1, y: 0, width: 4, height: 4 }, 0, 0));
+ assert!(!image.copy_within(Rect { x: 0, y: 0, width: 4, height: 4 }, 1, 0));
+ assert!(!image.copy_within(Rect { x: 0, y: 1, width: 4, height: 4 }, 0, 0));
+ assert!(!image.copy_within(Rect { x: 0, y: 0, width: 4, height: 4 }, 0, 1));
+ assert!(!image.copy_within(Rect { x: 1, y: 1, width: 4, height: 4 }, 0, 0));
+ }
+
+ #[test]
+ fn test_image_buffer_copy_within_tl() {
+ let data = &[
+ 00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15,
+ ];
+ let expected = [
+ 00, 01, 02, 03, 04, 00, 01, 02, 08, 04, 05, 06, 12, 08, 09, 10,
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.copy_within(
+ Rect {
+ x: 0,
+ y: 0,
+ width: 3,
+ height: 3
+ },
+ 1,
+ 1
+ ));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn test_image_buffer_copy_within_tr() {
+ let data = &[
+ 00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15,
+ ];
+ let expected = [
+ 00, 01, 02, 03, 01, 02, 03, 07, 05, 06, 07, 11, 09, 10, 11, 15,
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.copy_within(
+ Rect {
+ x: 1,
+ y: 0,
+ width: 3,
+ height: 3
+ },
+ 0,
+ 1
+ ));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn test_image_buffer_copy_within_bl() {
+ let data = &[
+ 00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15,
+ ];
+ let expected = [
+ 00, 04, 05, 06, 04, 08, 09, 10, 08, 12, 13, 14, 12, 13, 14, 15,
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.copy_within(
+ Rect {
+ x: 0,
+ y: 1,
+ width: 3,
+ height: 3
+ },
+ 1,
+ 0
+ ));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn test_image_buffer_copy_within_br() {
+ let data = &[
+ 00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15,
+ ];
+ let expected = [
+ 05, 06, 07, 03, 09, 10, 11, 07, 13, 14, 15, 11, 12, 13, 14, 15,
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.copy_within(
+ Rect {
+ x: 1,
+ y: 1,
+ width: 3,
+ height: 3
+ },
+ 0,
+ 0
+ ));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ #[cfg(feature = "png")]
+ fn write_to_with_large_buffer() {
+ // A buffer of 1 pixel, padded to 4 bytes as would be common in, e.g. BMP.
+ let img: GrayImage = ImageBuffer::from_raw(1, 1, vec![0u8; 4]).unwrap();
+ let mut buffer = std::io::Cursor::new(vec![]);
+ assert!(img.write_to(&mut buffer, ImageOutputFormat::Png).is_ok());
+ }
+
+ #[test]
+ fn exact_size_iter_size_hint() {
+ // The docs for `std::iter::ExactSizeIterator` requires that the implementation of
+ // `size_hint` on the iterator returns the same value as the `len` implementation.
+
+ // This test should work for any size image.
+ const N: u32 = 10;
+
+ let mut image = RgbImage::from_raw(N, N, vec![0; (N * N * 3) as usize]).unwrap();
+
+ let iter = image.pixels();
+ let exact_len = ExactSizeIterator::len(&iter);
+ assert_eq!(iter.size_hint(), (exact_len, Some(exact_len)));
+
+ let iter = image.pixels_mut();
+ let exact_len = ExactSizeIterator::len(&iter);
+ assert_eq!(iter.size_hint(), (exact_len, Some(exact_len)));
+
+ let iter = image.rows();
+ let exact_len = ExactSizeIterator::len(&iter);
+ assert_eq!(iter.size_hint(), (exact_len, Some(exact_len)));
+
+ let iter = image.rows_mut();
+ let exact_len = ExactSizeIterator::len(&iter);
+ assert_eq!(iter.size_hint(), (exact_len, Some(exact_len)));
+
+ let iter = image.enumerate_pixels();
+ let exact_len = ExactSizeIterator::len(&iter);
+ assert_eq!(iter.size_hint(), (exact_len, Some(exact_len)));
+
+ let iter = image.enumerate_rows();
+ let exact_len = ExactSizeIterator::len(&iter);
+ assert_eq!(iter.size_hint(), (exact_len, Some(exact_len)));
+
+ let iter = image.enumerate_pixels_mut();
+ let exact_len = ExactSizeIterator::len(&iter);
+ assert_eq!(iter.size_hint(), (exact_len, Some(exact_len)));
+
+ let iter = image.enumerate_rows_mut();
+ let exact_len = ExactSizeIterator::len(&iter);
+ assert_eq!(iter.size_hint(), (exact_len, Some(exact_len)));
+ }
+}
+
+#[cfg(test)]
+#[cfg(feature = "benchmarks")]
+mod benchmarks {
+ use super::{ConvertBuffer, GrayImage, ImageBuffer, Pixel, RgbImage};
+ use crate::GenericImage;
+ use test;
+
+ #[bench]
+ fn conversion(b: &mut test::Bencher) {
+ let mut a: RgbImage = ImageBuffer::new(1000, 1000);
+ for p in a.pixels_mut() {
+ let rgb = p.channels_mut();
+ rgb[0] = 255;
+ rgb[1] = 23;
+ rgb[2] = 42;
+ }
+ assert!(a.data[0] != 0);
+ b.iter(|| {
+ let b: GrayImage = a.convert();
+ assert!(0 != b.data[0]);
+ assert!(a.data[0] != b.data[0]);
+ test::black_box(b);
+ });
+ b.bytes = 1000 * 1000 * 3
+ }
+
+ #[bench]
+ fn image_access_row_by_row(b: &mut test::Bencher) {
+ let mut a: RgbImage = ImageBuffer::new(1000, 1000);
+ for p in a.pixels_mut() {
+ let rgb = p.channels_mut();
+ rgb[0] = 255;
+ rgb[1] = 23;
+ rgb[2] = 42;
+ }
+
+ b.iter(move || {
+ let image: &RgbImage = test::black_box(&a);
+ let mut sum: usize = 0;
+ for y in 0..1000 {
+ for x in 0..1000 {
+ let pixel = image.get_pixel(x, y);
+ sum = sum.wrapping_add(pixel[0] as usize);
+ sum = sum.wrapping_add(pixel[1] as usize);
+ sum = sum.wrapping_add(pixel[2] as usize);
+ }
+ }
+ test::black_box(sum)
+ });
+
+ b.bytes = 1000 * 1000 * 3;
+ }
+
+ #[bench]
+ fn image_access_col_by_col(b: &mut test::Bencher) {
+ let mut a: RgbImage = ImageBuffer::new(1000, 1000);
+ for p in a.pixels_mut() {
+ let rgb = p.channels_mut();
+ rgb[0] = 255;
+ rgb[1] = 23;
+ rgb[2] = 42;
+ }
+
+ b.iter(move || {
+ let image: &RgbImage = test::black_box(&a);
+ let mut sum: usize = 0;
+ for x in 0..1000 {
+ for y in 0..1000 {
+ let pixel = image.get_pixel(x, y);
+ sum = sum.wrapping_add(pixel[0] as usize);
+ sum = sum.wrapping_add(pixel[1] as usize);
+ sum = sum.wrapping_add(pixel[2] as usize);
+ }
+ }
+ test::black_box(sum)
+ });
+
+ b.bytes = 1000 * 1000 * 3;
+ }
+}
diff --git a/vendor/image/src/codecs/avif/decoder.rs b/vendor/image/src/codecs/avif/decoder.rs
new file mode 100644
index 0000000..acba4f8
--- /dev/null
+++ b/vendor/image/src/codecs/avif/decoder.rs
@@ -0,0 +1,177 @@
+//! Decoding of AVIF images.
+///
+/// The [AVIF] specification defines an image derivative of the AV1 bitstream, an open video codec.
+///
+/// [AVIF]: https://aomediacodec.github.io/av1-avif/
+use std::convert::TryFrom;
+use std::error::Error;
+use std::io::{self, Cursor, Read};
+use std::marker::PhantomData;
+use std::mem;
+
+use crate::error::DecodingError;
+use crate::{ColorType, ImageDecoder, ImageError, ImageFormat, ImageResult};
+
+use dav1d::{PixelLayout, PlanarImageComponent};
+use dcv_color_primitives as dcp;
+use mp4parse::{read_avif, ParseStrictness};
+
+fn error_map<E: Into<Box<dyn Error + Send + Sync>>>(err: E) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Avif.into(), err))
+}
+
+/// AVIF Decoder.
+///
+/// Reads one image into the chosen input.
+pub struct AvifDecoder<R> {
+ inner: PhantomData<R>,
+ picture: dav1d::Picture,
+ alpha_picture: Option<dav1d::Picture>,
+ icc_profile: Option<Vec<u8>>,
+}
+
+impl<R: Read> AvifDecoder<R> {
+ /// Create a new decoder that reads its input from `r`.
+ pub fn new(mut r: R) -> ImageResult<Self> {
+ let ctx = read_avif(&mut r, ParseStrictness::Normal).map_err(error_map)?;
+ let coded = ctx.primary_item_coded_data().unwrap_or_default();
+
+ let mut primary_decoder = dav1d::Decoder::new();
+ primary_decoder
+ .send_data(coded, None, None, None)
+ .map_err(error_map)?;
+ let picture = primary_decoder.get_picture().map_err(error_map)?;
+ let alpha_item = ctx.alpha_item_coded_data().unwrap_or_default();
+ let alpha_picture = if !alpha_item.is_empty() {
+ let mut alpha_decoder = dav1d::Decoder::new();
+ alpha_decoder
+ .send_data(alpha_item, None, None, None)
+ .map_err(error_map)?;
+ Some(alpha_decoder.get_picture().map_err(error_map)?)
+ } else {
+ None
+ };
+ let icc_profile = ctx
+ .icc_colour_information()
+ .map(|x| x.ok().unwrap_or_default())
+ .map(|x| x.to_vec());
+
+ assert_eq!(picture.bit_depth(), 8);
+ Ok(AvifDecoder {
+ inner: PhantomData,
+ picture,
+ alpha_picture,
+ icc_profile,
+ })
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct AvifReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for AvifReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for AvifDecoder<R> {
+ type Reader = AvifReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.picture.width(), self.picture.height())
+ }
+
+ fn color_type(&self) -> ColorType {
+ ColorType::Rgba8
+ }
+
+ fn icc_profile(&mut self) -> Option<Vec<u8>> {
+ self.icc_profile.clone()
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ let plane = self.picture.plane(PlanarImageComponent::Y);
+ Ok(AvifReader(
+ Cursor::new(plane.as_ref().to_vec()),
+ PhantomData,
+ ))
+ }
+
+ fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ dcp::initialize();
+
+ if self.picture.pixel_layout() != PixelLayout::I400 {
+ let pixel_format = match self.picture.pixel_layout() {
+ PixelLayout::I400 => todo!(),
+ PixelLayout::I420 => dcp::PixelFormat::I420,
+ PixelLayout::I422 => dcp::PixelFormat::I422,
+ PixelLayout::I444 => dcp::PixelFormat::I444,
+ PixelLayout::Unknown => panic!("Unknown pixel layout"),
+ };
+ let src_format = dcp::ImageFormat {
+ pixel_format,
+ color_space: dcp::ColorSpace::Bt601,
+ num_planes: 3,
+ };
+ let dst_format = dcp::ImageFormat {
+ pixel_format: dcp::PixelFormat::Rgba,
+ color_space: dcp::ColorSpace::Lrgb,
+ num_planes: 1,
+ };
+ let (width, height) = self.dimensions();
+ let planes = &[
+ self.picture.plane(PlanarImageComponent::Y),
+ self.picture.plane(PlanarImageComponent::U),
+ self.picture.plane(PlanarImageComponent::V),
+ ];
+ let src_buffers = planes.iter().map(AsRef::as_ref).collect::<Vec<_>>();
+ let strides = &[
+ self.picture.stride(PlanarImageComponent::Y) as usize,
+ self.picture.stride(PlanarImageComponent::U) as usize,
+ self.picture.stride(PlanarImageComponent::V) as usize,
+ ];
+ let dst_buffers = &mut [&mut buf[..]];
+ dcp::convert_image(
+ width,
+ height,
+ &src_format,
+ Some(strides),
+ &src_buffers,
+ &dst_format,
+ None,
+ dst_buffers,
+ )
+ .map_err(error_map)?;
+ } else {
+ let plane = self.picture.plane(PlanarImageComponent::Y);
+ buf.copy_from_slice(plane.as_ref());
+ }
+
+ if let Some(picture) = self.alpha_picture {
+ assert_eq!(picture.pixel_layout(), PixelLayout::I400);
+ let stride = picture.stride(PlanarImageComponent::Y) as usize;
+ let plane = picture.plane(PlanarImageComponent::Y);
+ let width = picture.width();
+ for (buf, slice) in Iterator::zip(
+ buf.chunks_exact_mut(width as usize * 4),
+ plane.as_ref().chunks_exact(stride),
+ ) {
+ for i in 0..width as usize {
+ buf[3 + i * 4] = slice[i];
+ }
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/vendor/image/src/codecs/avif/encoder.rs b/vendor/image/src/codecs/avif/encoder.rs
new file mode 100644
index 0000000..7484ff1
--- /dev/null
+++ b/vendor/image/src/codecs/avif/encoder.rs
@@ -0,0 +1,274 @@
+//! Encoding of AVIF images.
+///
+/// The [AVIF] specification defines an image derivative of the AV1 bitstream, an open video codec.
+///
+/// [AVIF]: https://aomediacodec.github.io/av1-avif/
+use std::borrow::Cow;
+use std::cmp::min;
+use std::io::Write;
+
+use crate::buffer::ConvertBuffer;
+use crate::color::{FromColor, Luma, LumaA, Rgb, Rgba};
+use crate::error::{
+ EncodingError, ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::{ColorType, ImageBuffer, ImageEncoder, ImageFormat, Pixel};
+use crate::{ImageError, ImageResult};
+
+use bytemuck::{try_cast_slice, try_cast_slice_mut, Pod, PodCastError};
+use num_traits::Zero;
+use ravif::{Encoder, Img, RGB8, RGBA8};
+use rgb::AsPixels;
+
+/// AVIF Encoder.
+///
+/// Writes one image into the chosen output.
+pub struct AvifEncoder<W> {
+ inner: W,
+ encoder: Encoder,
+}
+
+/// An enumeration over supported AVIF color spaces
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+#[non_exhaustive]
+pub enum ColorSpace {
+ /// sRGB colorspace
+ Srgb,
+ /// BT.709 colorspace
+ Bt709,
+}
+
+impl ColorSpace {
+ fn to_ravif(self) -> ravif::ColorSpace {
+ match self {
+ Self::Srgb => ravif::ColorSpace::RGB,
+ Self::Bt709 => ravif::ColorSpace::YCbCr,
+ }
+ }
+}
+
+enum RgbColor<'buf> {
+ Rgb8(Img<&'buf [RGB8]>),
+ Rgba8(Img<&'buf [RGBA8]>),
+}
+
+impl<W: Write> AvifEncoder<W> {
+ /// Create a new encoder that writes its output to `w`.
+ pub fn new(w: W) -> Self {
+ AvifEncoder::new_with_speed_quality(w, 4, 80) // `cavif` uses these defaults
+ }
+
+ /// Create a new encoder with specified speed and quality, that writes its output to `w`.
+ /// `speed` accepts a value in the range 0-10, where 0 is the slowest and 10 is the fastest.
+ /// `quality` accepts a value in the range 0-100, where 0 is the worst and 100 is the best.
+ pub fn new_with_speed_quality(w: W, speed: u8, quality: u8) -> Self {
+ // Clamp quality and speed to range
+ let quality = min(quality, 100);
+ let speed = min(speed, 10);
+
+ let encoder = Encoder::new()
+ .with_quality(f32::from(quality))
+ .with_alpha_quality(f32::from(quality))
+ .with_speed(speed);
+
+ AvifEncoder { inner: w, encoder }
+ }
+
+ /// Encode with the specified `color_space`.
+ pub fn with_colorspace(mut self, color_space: ColorSpace) -> Self {
+ self.encoder = self
+ .encoder
+ .with_internal_color_space(color_space.to_ravif());
+ self
+ }
+
+ /// Configures `rayon` thread pool size.
+ /// The default `None` is to use all threads in the default `rayon` thread pool.
+ pub fn with_num_threads(mut self, num_threads: Option<usize>) -> Self {
+ self.encoder = self.encoder.with_num_threads(num_threads);
+ self
+ }
+}
+
+impl<W: Write> ImageEncoder for AvifEncoder<W> {
+ /// Encode image data with the indicated color type.
+ ///
+ /// The encoder currently requires all data to be RGBA8, it will be converted internally if
+ /// necessary. When data is suitably aligned, i.e. u16 channels to two bytes, then the
+ /// conversion may be more efficient.
+ fn write_image(
+ mut self,
+ data: &[u8],
+ width: u32,
+ height: u32,
+ color: ColorType,
+ ) -> ImageResult<()> {
+ self.set_color(color);
+ // `ravif` needs strongly typed data so let's convert. We can either use a temporarily
+ // owned version in our own buffer or zero-copy if possible by using the input buffer.
+ // This requires going through `rgb`.
+ let mut fallback = vec![]; // This vector is used if we need to do a color conversion.
+ let result = match Self::encode_as_img(&mut fallback, data, width, height, color)? {
+ RgbColor::Rgb8(buffer) => self.encoder.encode_rgb(buffer),
+ RgbColor::Rgba8(buffer) => self.encoder.encode_rgba(buffer),
+ };
+ let data = result.map_err(|err| {
+ ImageError::Encoding(EncodingError::new(ImageFormat::Avif.into(), err))
+ })?;
+ self.inner.write_all(&data.avif_file)?;
+ Ok(())
+ }
+}
+
+impl<W: Write> AvifEncoder<W> {
+ // Does not currently do anything. Mirrors behaviour of old config function.
+ fn set_color(&mut self, _color: ColorType) {
+ // self.config.color_space = ColorSpace::RGB;
+ }
+
+ fn encode_as_img<'buf>(
+ fallback: &'buf mut Vec<u8>,
+ data: &'buf [u8],
+ width: u32,
+ height: u32,
+ color: ColorType,
+ ) -> ImageResult<RgbColor<'buf>> {
+ // Error wrapping utility for color dependent buffer dimensions.
+ fn try_from_raw<P: Pixel + 'static>(
+ data: &[P::Subpixel],
+ width: u32,
+ height: u32,
+ ) -> ImageResult<ImageBuffer<P, &[P::Subpixel]>> {
+ ImageBuffer::from_raw(width, height, data).ok_or_else(|| {
+ ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ ))
+ })
+ }
+
+ // Convert to target color type using few buffer allocations.
+ fn convert_into<'buf, P>(
+ buf: &'buf mut Vec<u8>,
+ image: ImageBuffer<P, &[P::Subpixel]>,
+ ) -> Img<&'buf [RGBA8]>
+ where
+ P: Pixel + 'static,
+ Rgba<u8>: FromColor<P>,
+ {
+ let (width, height) = image.dimensions();
+ // TODO: conversion re-using the target buffer?
+ let image: ImageBuffer<Rgba<u8>, _> = image.convert();
+ *buf = image.into_raw();
+ Img::new(buf.as_pixels(), width as usize, height as usize)
+ }
+
+ // Cast the input slice using few buffer allocations if possible.
+ // In particular try not to allocate if the caller did the infallible reverse.
+ fn cast_buffer<Channel>(buf: &[u8]) -> ImageResult<Cow<[Channel]>>
+ where
+ Channel: Pod + Zero,
+ {
+ match try_cast_slice(buf) {
+ Ok(slice) => Ok(Cow::Borrowed(slice)),
+ Err(PodCastError::OutputSliceWouldHaveSlop) => Err(ImageError::Parameter(
+ ParameterError::from_kind(ParameterErrorKind::DimensionMismatch),
+ )),
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned) => {
+ // Sad, but let's allocate.
+ // bytemuck checks alignment _before_ slop but size mismatch before this..
+ if buf.len() % std::mem::size_of::<Channel>() != 0 {
+ Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )))
+ } else {
+ let len = buf.len() / std::mem::size_of::<Channel>();
+ let mut data = vec![Channel::zero(); len];
+ let view = try_cast_slice_mut::<_, u8>(data.as_mut_slice()).unwrap();
+ view.copy_from_slice(buf);
+ Ok(Cow::Owned(data))
+ }
+ }
+ Err(err) => {
+ // Are you trying to encode a ZST??
+ Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(format!("{:?}", err)),
+ )))
+ }
+ }
+ }
+
+ match color {
+ ColorType::Rgb8 => {
+ // ravif doesn't do any checks but has some asserts, so we do the checks.
+ let img = try_from_raw::<Rgb<u8>>(data, width, height)?;
+ // Now, internally ravif uses u32 but it takes usize. We could do some checked
+ // conversion but instead we use that a non-empty image must be addressable.
+ if img.pixels().len() == 0 {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+
+ Ok(RgbColor::Rgb8(Img::new(
+ rgb::AsPixels::as_pixels(data),
+ width as usize,
+ height as usize,
+ )))
+ }
+ ColorType::Rgba8 => {
+ // ravif doesn't do any checks but has some asserts, so we do the checks.
+ let img = try_from_raw::<Rgba<u8>>(data, width, height)?;
+ // Now, internally ravif uses u32 but it takes usize. We could do some checked
+ // conversion but instead we use that a non-empty image must be addressable.
+ if img.pixels().len() == 0 {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+
+ Ok(RgbColor::Rgba8(Img::new(
+ rgb::AsPixels::as_pixels(data),
+ width as usize,
+ height as usize,
+ )))
+ }
+ // we need a separate buffer..
+ ColorType::L8 => {
+ let image = try_from_raw::<Luma<u8>>(data, width, height)?;
+ Ok(RgbColor::Rgba8(convert_into(fallback, image)))
+ }
+ ColorType::La8 => {
+ let image = try_from_raw::<LumaA<u8>>(data, width, height)?;
+ Ok(RgbColor::Rgba8(convert_into(fallback, image)))
+ }
+ // we need to really convert data..
+ ColorType::L16 => {
+ let buffer = cast_buffer(data)?;
+ let image = try_from_raw::<Luma<u16>>(&buffer, width, height)?;
+ Ok(RgbColor::Rgba8(convert_into(fallback, image)))
+ }
+ ColorType::La16 => {
+ let buffer = cast_buffer(data)?;
+ let image = try_from_raw::<LumaA<u16>>(&buffer, width, height)?;
+ Ok(RgbColor::Rgba8(convert_into(fallback, image)))
+ }
+ ColorType::Rgb16 => {
+ let buffer = cast_buffer(data)?;
+ let image = try_from_raw::<Rgb<u16>>(&buffer, width, height)?;
+ Ok(RgbColor::Rgba8(convert_into(fallback, image)))
+ }
+ ColorType::Rgba16 => {
+ let buffer = cast_buffer(data)?;
+ let image = try_from_raw::<Rgba<u16>>(&buffer, width, height)?;
+ Ok(RgbColor::Rgba8(convert_into(fallback, image)))
+ }
+ // for cases we do not support at all?
+ _ => Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Avif.into(),
+ UnsupportedErrorKind::Color(color.into()),
+ ),
+ )),
+ }
+ }
+}
diff --git a/vendor/image/src/codecs/avif/mod.rs b/vendor/image/src/codecs/avif/mod.rs
new file mode 100644
index 0000000..f74217c
--- /dev/null
+++ b/vendor/image/src/codecs/avif/mod.rs
@@ -0,0 +1,14 @@
+//! Encoding of AVIF images.
+///
+/// The [AVIF] specification defines an image derivative of the AV1 bitstream, an open video codec.
+///
+/// [AVIF]: https://aomediacodec.github.io/av1-avif/
+#[cfg(feature = "avif-decoder")]
+pub use self::decoder::AvifDecoder;
+#[cfg(feature = "avif-encoder")]
+pub use self::encoder::{AvifEncoder, ColorSpace};
+
+#[cfg(feature = "avif-decoder")]
+mod decoder;
+#[cfg(feature = "avif-encoder")]
+mod encoder;
diff --git a/vendor/image/src/codecs/bmp/decoder.rs b/vendor/image/src/codecs/bmp/decoder.rs
new file mode 100644
index 0000000..58c0650
--- /dev/null
+++ b/vendor/image/src/codecs/bmp/decoder.rs
@@ -0,0 +1,1483 @@
+use std::cmp::{self, Ordering};
+use std::convert::TryFrom;
+use std::io::{self, Cursor, Read, Seek, SeekFrom};
+use std::iter::{repeat, Iterator, Rev};
+use std::marker::PhantomData;
+use std::slice::ChunksMut;
+use std::{error, fmt, mem};
+
+use byteorder::{LittleEndian, ReadBytesExt};
+
+use crate::color::ColorType;
+use crate::error::{
+ DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::{self, ImageDecoder, ImageDecoderRect, ImageFormat, Progress};
+
+const BITMAPCOREHEADER_SIZE: u32 = 12;
+const BITMAPINFOHEADER_SIZE: u32 = 40;
+const BITMAPV2HEADER_SIZE: u32 = 52;
+const BITMAPV3HEADER_SIZE: u32 = 56;
+const BITMAPV4HEADER_SIZE: u32 = 108;
+const BITMAPV5HEADER_SIZE: u32 = 124;
+
+static LOOKUP_TABLE_3_BIT_TO_8_BIT: [u8; 8] = [0, 36, 73, 109, 146, 182, 219, 255];
+static LOOKUP_TABLE_4_BIT_TO_8_BIT: [u8; 16] = [
+ 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255,
+];
+static LOOKUP_TABLE_5_BIT_TO_8_BIT: [u8; 32] = [
+ 0, 8, 16, 25, 33, 41, 49, 58, 66, 74, 82, 90, 99, 107, 115, 123, 132, 140, 148, 156, 165, 173,
+ 181, 189, 197, 206, 214, 222, 230, 239, 247, 255,
+];
+static LOOKUP_TABLE_6_BIT_TO_8_BIT: [u8; 64] = [
+ 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 45, 49, 53, 57, 61, 65, 69, 73, 77, 81, 85, 89, 93,
+ 97, 101, 105, 109, 113, 117, 121, 125, 130, 134, 138, 142, 146, 150, 154, 158, 162, 166, 170,
+ 174, 178, 182, 186, 190, 194, 198, 202, 206, 210, 215, 219, 223, 227, 231, 235, 239, 243, 247,
+ 251, 255,
+];
+
+static R5_G5_B5_COLOR_MASK: Bitfields = Bitfields {
+ r: Bitfield { len: 5, shift: 10 },
+ g: Bitfield { len: 5, shift: 5 },
+ b: Bitfield { len: 5, shift: 0 },
+ a: Bitfield { len: 0, shift: 0 },
+};
+const R8_G8_B8_COLOR_MASK: Bitfields = Bitfields {
+ r: Bitfield { len: 8, shift: 24 },
+ g: Bitfield { len: 8, shift: 16 },
+ b: Bitfield { len: 8, shift: 8 },
+ a: Bitfield { len: 0, shift: 0 },
+};
+const R8_G8_B8_A8_COLOR_MASK: Bitfields = Bitfields {
+ r: Bitfield { len: 8, shift: 16 },
+ g: Bitfield { len: 8, shift: 8 },
+ b: Bitfield { len: 8, shift: 0 },
+ a: Bitfield { len: 8, shift: 24 },
+};
+
+const RLE_ESCAPE: u8 = 0;
+const RLE_ESCAPE_EOL: u8 = 0;
+const RLE_ESCAPE_EOF: u8 = 1;
+const RLE_ESCAPE_DELTA: u8 = 2;
+
+/// The maximum width/height the decoder will process.
+const MAX_WIDTH_HEIGHT: i32 = 0xFFFF;
+
+#[derive(PartialEq, Copy, Clone)]
+enum ImageType {
+ Palette,
+ RGB16,
+ RGB24,
+ RGB32,
+ RGBA32,
+ RLE8,
+ RLE4,
+ Bitfields16,
+ Bitfields32,
+}
+
+#[derive(PartialEq)]
+enum BMPHeaderType {
+ Core,
+ Info,
+ V2,
+ V3,
+ V4,
+ V5,
+}
+
+#[derive(PartialEq)]
+enum FormatFullBytes {
+ RGB24,
+ RGB32,
+ RGBA32,
+ Format888,
+}
+
+enum Chunker<'a> {
+ FromTop(ChunksMut<'a, u8>),
+ FromBottom(Rev<ChunksMut<'a, u8>>),
+}
+
+pub(crate) struct RowIterator<'a> {
+ chunks: Chunker<'a>,
+}
+
+impl<'a> Iterator for RowIterator<'a> {
+ type Item = &'a mut [u8];
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<&'a mut [u8]> {
+ match self.chunks {
+ Chunker::FromTop(ref mut chunks) => chunks.next(),
+ Chunker::FromBottom(ref mut chunks) => chunks.next(),
+ }
+ }
+}
+
+/// All errors that can occur when attempting to parse a BMP
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
+enum DecoderError {
+ // Failed to decompress RLE data.
+ CorruptRleData,
+
+ /// The bitfield mask interleaves set and unset bits
+ BitfieldMaskNonContiguous,
+ /// Bitfield mask invalid (e.g. too long for specified type)
+ BitfieldMaskInvalid,
+ /// Bitfield (of the specified width – 16- or 32-bit) mask not present
+ BitfieldMaskMissing(u32),
+ /// Bitfield (of the specified width – 16- or 32-bit) masks not present
+ BitfieldMasksMissing(u32),
+
+ /// BMP's "BM" signature wrong or missing
+ BmpSignatureInvalid,
+ /// More than the exactly one allowed plane specified by the format
+ MoreThanOnePlane,
+ /// Invalid amount of bits per channel for the specified image type
+ InvalidChannelWidth(ChannelWidthError, u16),
+
+ /// The width is negative
+ NegativeWidth(i32),
+ /// One of the dimensions is larger than a soft limit
+ ImageTooLarge(i32, i32),
+ /// The height is `i32::min_value()`
+ ///
+ /// General negative heights specify top-down DIBs
+ InvalidHeight,
+
+ /// Specified image type is invalid for top-down BMPs (i.e. is compressed)
+ ImageTypeInvalidForTopDown(u32),
+ /// Image type not currently recognized by the decoder
+ ImageTypeUnknown(u32),
+
+ /// Bitmap header smaller than the core header
+ HeaderTooSmall(u32),
+
+ /// The palette is bigger than allowed by the bit count of the BMP
+ PaletteSizeExceeded {
+ colors_used: u32,
+ bit_count: u16,
+ },
+}
+
+impl fmt::Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DecoderError::CorruptRleData => f.write_str("Corrupt RLE data"),
+ DecoderError::BitfieldMaskNonContiguous => f.write_str("Non-contiguous bitfield mask"),
+ DecoderError::BitfieldMaskInvalid => f.write_str("Invalid bitfield mask"),
+ DecoderError::BitfieldMaskMissing(bb) => {
+ f.write_fmt(format_args!("Missing {}-bit bitfield mask", bb))
+ }
+ DecoderError::BitfieldMasksMissing(bb) => {
+ f.write_fmt(format_args!("Missing {}-bit bitfield masks", bb))
+ }
+ DecoderError::BmpSignatureInvalid => f.write_str("BMP signature not found"),
+ DecoderError::MoreThanOnePlane => f.write_str("More than one plane"),
+ DecoderError::InvalidChannelWidth(tp, n) => {
+ f.write_fmt(format_args!("Invalid channel bit count for {}: {}", tp, n))
+ }
+ DecoderError::NegativeWidth(w) => f.write_fmt(format_args!("Negative width ({})", w)),
+ DecoderError::ImageTooLarge(w, h) => f.write_fmt(format_args!(
+ "Image too large (one of ({}, {}) > soft limit of {})",
+ w, h, MAX_WIDTH_HEIGHT
+ )),
+ DecoderError::InvalidHeight => f.write_str("Invalid height"),
+ DecoderError::ImageTypeInvalidForTopDown(tp) => f.write_fmt(format_args!(
+ "Invalid image type {} for top-down image.",
+ tp
+ )),
+ DecoderError::ImageTypeUnknown(tp) => {
+ f.write_fmt(format_args!("Unknown image compression type {}", tp))
+ }
+ DecoderError::HeaderTooSmall(s) => {
+ f.write_fmt(format_args!("Bitmap header too small ({} bytes)", s))
+ }
+ DecoderError::PaletteSizeExceeded {
+ colors_used,
+ bit_count,
+ } => f.write_fmt(format_args!(
+ "Palette size {} exceeds maximum size for BMP with bit count of {}",
+ colors_used, bit_count
+ )),
+ }
+ }
+}
+
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Bmp.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {}
+
+/// Distinct image types whose saved channel width can be invalid
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
+enum ChannelWidthError {
+ /// RGB
+ Rgb,
+ /// 8-bit run length encoding
+ Rle8,
+ /// 4-bit run length encoding
+ Rle4,
+ /// Bitfields (16- or 32-bit)
+ Bitfields,
+}
+
+impl fmt::Display for ChannelWidthError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match self {
+ ChannelWidthError::Rgb => "RGB",
+ ChannelWidthError::Rle8 => "RLE8",
+ ChannelWidthError::Rle4 => "RLE4",
+ ChannelWidthError::Bitfields => "bitfields",
+ })
+ }
+}
+
+/// Convenience function to check if the combination of width, length and number of
+/// channels would result in a buffer that would overflow.
+fn check_for_overflow(width: i32, length: i32, channels: usize) -> ImageResult<()> {
+ num_bytes(width, length, channels)
+ .map(|_| ())
+ .ok_or_else(|| {
+ ImageError::Unsupported(UnsupportedError::from_format_and_kind(
+ ImageFormat::Bmp.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "Image dimensions ({}x{} w/{} channels) are too large",
+ width, length, channels
+ )),
+ ))
+ })
+}
+
+/// Calculate how many many bytes a buffer holding a decoded image with these properties would
+/// require. Returns `None` if the buffer size would overflow or if one of the sizes are negative.
+fn num_bytes(width: i32, length: i32, channels: usize) -> Option<usize> {
+ if width <= 0 || length <= 0 {
+ None
+ } else {
+ match channels.checked_mul(width as usize) {
+ Some(n) => n.checked_mul(length as usize),
+ None => None,
+ }
+ }
+}
+
+/// Call the provided function on each row of the provided buffer, returning Err if the provided
+/// function returns an error, extends the buffer if it's not large enough.
+fn with_rows<F>(
+ buffer: &mut [u8],
+ width: i32,
+ height: i32,
+ channels: usize,
+ top_down: bool,
+ mut func: F,
+) -> io::Result<()>
+where
+ F: FnMut(&mut [u8]) -> io::Result<()>,
+{
+ // An overflow should already have been checked for when this is called,
+ // though we check anyhow, as it somehow seems to increase performance slightly.
+ let row_width = channels.checked_mul(width as usize).unwrap();
+ let full_image_size = row_width.checked_mul(height as usize).unwrap();
+ assert_eq!(buffer.len(), full_image_size);
+
+ if !top_down {
+ for row in buffer.chunks_mut(row_width).rev() {
+ func(row)?;
+ }
+ } else {
+ for row in buffer.chunks_mut(row_width) {
+ func(row)?;
+ }
+ }
+ Ok(())
+}
+
+fn set_8bit_pixel_run<'a, T: Iterator<Item = &'a u8>>(
+ pixel_iter: &mut ChunksMut<u8>,
+ palette: &[[u8; 3]],
+ indices: T,
+ n_pixels: usize,
+) -> bool {
+ for idx in indices.take(n_pixels) {
+ if let Some(pixel) = pixel_iter.next() {
+ let rgb = palette[*idx as usize];
+ pixel[0] = rgb[0];
+ pixel[1] = rgb[1];
+ pixel[2] = rgb[2];
+ } else {
+ return false;
+ }
+ }
+ true
+}
+
+fn set_4bit_pixel_run<'a, T: Iterator<Item = &'a u8>>(
+ pixel_iter: &mut ChunksMut<u8>,
+ palette: &[[u8; 3]],
+ indices: T,
+ mut n_pixels: usize,
+) -> bool {
+ for idx in indices {
+ macro_rules! set_pixel {
+ ($i:expr) => {
+ if n_pixels == 0 {
+ break;
+ }
+ if let Some(pixel) = pixel_iter.next() {
+ let rgb = palette[$i as usize];
+ pixel[0] = rgb[0];
+ pixel[1] = rgb[1];
+ pixel[2] = rgb[2];
+ } else {
+ return false;
+ }
+ n_pixels -= 1;
+ };
+ }
+ set_pixel!(idx >> 4);
+ set_pixel!(idx & 0xf);
+ }
+ true
+}
+
+#[rustfmt::skip]
+fn set_2bit_pixel_run<'a, T: Iterator<Item = &'a u8>>(
+ pixel_iter: &mut ChunksMut<u8>,
+ palette: &[[u8; 3]],
+ indices: T,
+ mut n_pixels: usize,
+) -> bool {
+ for idx in indices {
+ macro_rules! set_pixel {
+ ($i:expr) => {
+ if n_pixels == 0 {
+ break;
+ }
+ if let Some(pixel) = pixel_iter.next() {
+ let rgb = palette[$i as usize];
+ pixel[0] = rgb[0];
+ pixel[1] = rgb[1];
+ pixel[2] = rgb[2];
+ } else {
+ return false;
+ }
+ n_pixels -= 1;
+ };
+ }
+ set_pixel!((idx >> 6) & 0x3u8);
+ set_pixel!((idx >> 4) & 0x3u8);
+ set_pixel!((idx >> 2) & 0x3u8);
+ set_pixel!( idx & 0x3u8);
+ }
+ true
+}
+
+fn set_1bit_pixel_run<'a, T: Iterator<Item = &'a u8>>(
+ pixel_iter: &mut ChunksMut<u8>,
+ palette: &[[u8; 3]],
+ indices: T,
+) {
+ for idx in indices {
+ let mut bit = 0x80;
+ loop {
+ if let Some(pixel) = pixel_iter.next() {
+ let rgb = palette[((idx & bit) != 0) as usize];
+ pixel[0] = rgb[0];
+ pixel[1] = rgb[1];
+ pixel[2] = rgb[2];
+ } else {
+ return;
+ }
+
+ bit >>= 1;
+ if bit == 0 {
+ break;
+ }
+ }
+ }
+}
+
+#[derive(PartialEq, Eq)]
+struct Bitfield {
+ shift: u32,
+ len: u32,
+}
+
+impl Bitfield {
+ fn from_mask(mask: u32, max_len: u32) -> ImageResult<Bitfield> {
+ if mask == 0 {
+ return Ok(Bitfield { shift: 0, len: 0 });
+ }
+ let mut shift = mask.trailing_zeros();
+ let mut len = (!(mask >> shift)).trailing_zeros();
+ if len != mask.count_ones() {
+ return Err(DecoderError::BitfieldMaskNonContiguous.into());
+ }
+ if len + shift > max_len {
+ return Err(DecoderError::BitfieldMaskInvalid.into());
+ }
+ if len > 8 {
+ shift += len - 8;
+ len = 8;
+ }
+ Ok(Bitfield { shift, len })
+ }
+
+ fn read(&self, data: u32) -> u8 {
+ let data = data >> self.shift;
+ match self.len {
+ 1 => ((data & 0b1) * 0xff) as u8,
+ 2 => ((data & 0b11) * 0x55) as u8,
+ 3 => LOOKUP_TABLE_3_BIT_TO_8_BIT[(data & 0b00_0111) as usize],
+ 4 => LOOKUP_TABLE_4_BIT_TO_8_BIT[(data & 0b00_1111) as usize],
+ 5 => LOOKUP_TABLE_5_BIT_TO_8_BIT[(data & 0b01_1111) as usize],
+ 6 => LOOKUP_TABLE_6_BIT_TO_8_BIT[(data & 0b11_1111) as usize],
+ 7 => ((data & 0x7f) << 1 | (data & 0x7f) >> 6) as u8,
+ 8 => (data & 0xff) as u8,
+ _ => panic!(),
+ }
+ }
+}
+
+#[derive(PartialEq, Eq)]
+struct Bitfields {
+ r: Bitfield,
+ g: Bitfield,
+ b: Bitfield,
+ a: Bitfield,
+}
+
+impl Bitfields {
+ fn from_mask(
+ r_mask: u32,
+ g_mask: u32,
+ b_mask: u32,
+ a_mask: u32,
+ max_len: u32,
+ ) -> ImageResult<Bitfields> {
+ let bitfields = Bitfields {
+ r: Bitfield::from_mask(r_mask, max_len)?,
+ g: Bitfield::from_mask(g_mask, max_len)?,
+ b: Bitfield::from_mask(b_mask, max_len)?,
+ a: Bitfield::from_mask(a_mask, max_len)?,
+ };
+ if bitfields.r.len == 0 || bitfields.g.len == 0 || bitfields.b.len == 0 {
+ return Err(DecoderError::BitfieldMaskMissing(max_len).into());
+ }
+ Ok(bitfields)
+ }
+}
+
+/// A bmp decoder
+pub struct BmpDecoder<R> {
+ reader: R,
+
+ bmp_header_type: BMPHeaderType,
+ indexed_color: bool,
+
+ width: i32,
+ height: i32,
+ data_offset: u64,
+ top_down: bool,
+ no_file_header: bool,
+ add_alpha_channel: bool,
+ has_loaded_metadata: bool,
+ image_type: ImageType,
+
+ bit_count: u16,
+ colors_used: u32,
+ palette: Option<Vec<[u8; 3]>>,
+ bitfields: Option<Bitfields>,
+}
+
+enum RLEInsn {
+ EndOfFile,
+ EndOfRow,
+ Delta(u8, u8),
+ Absolute(u8, Vec<u8>),
+ PixelRun(u8, u8),
+}
+
+impl<R: Read + Seek> BmpDecoder<R> {
+ fn new_decoder(reader: R) -> BmpDecoder<R> {
+ BmpDecoder {
+ reader,
+
+ bmp_header_type: BMPHeaderType::Info,
+ indexed_color: false,
+
+ width: 0,
+ height: 0,
+ data_offset: 0,
+ top_down: false,
+ no_file_header: false,
+ add_alpha_channel: false,
+ has_loaded_metadata: false,
+ image_type: ImageType::Palette,
+
+ bit_count: 0,
+ colors_used: 0,
+ palette: None,
+ bitfields: None,
+ }
+ }
+
+ /// Create a new decoder that decodes from the stream ```r```
+ pub fn new(reader: R) -> ImageResult<BmpDecoder<R>> {
+ let mut decoder = Self::new_decoder(reader);
+ decoder.read_metadata()?;
+ Ok(decoder)
+ }
+
+ /// Create a new decoder that decodes from the stream ```r``` without first
+ /// reading a BITMAPFILEHEADER. This is useful for decoding the CF_DIB format
+ /// directly from the Windows clipboard.
+ pub fn new_without_file_header(reader: R) -> ImageResult<BmpDecoder<R>> {
+ let mut decoder = Self::new_decoder(reader);
+ decoder.no_file_header = true;
+ decoder.read_metadata()?;
+ Ok(decoder)
+ }
+
+ #[cfg(feature = "ico")]
+ pub(crate) fn new_with_ico_format(reader: R) -> ImageResult<BmpDecoder<R>> {
+ let mut decoder = Self::new_decoder(reader);
+ decoder.read_metadata_in_ico_format()?;
+ Ok(decoder)
+ }
+
+ /// If true, the palette in BMP does not apply to the image even if it is found.
+ /// In other words, the output image is the indexed color.
+ pub fn set_indexed_color(&mut self, indexed_color: bool) {
+ self.indexed_color = indexed_color;
+ }
+
+ #[cfg(feature = "ico")]
+ pub(crate) fn reader(&mut self) -> &mut R {
+ &mut self.reader
+ }
+
+ fn read_file_header(&mut self) -> ImageResult<()> {
+ if self.no_file_header {
+ return Ok(());
+ }
+ let mut signature = [0; 2];
+ self.reader.read_exact(&mut signature)?;
+
+ if signature != b"BM"[..] {
+ return Err(DecoderError::BmpSignatureInvalid.into());
+ }
+
+ // The next 8 bytes represent file size, followed the 4 reserved bytes
+ // We're not interesting these values
+ self.reader.read_u32::<LittleEndian>()?;
+ self.reader.read_u32::<LittleEndian>()?;
+
+ self.data_offset = u64::from(self.reader.read_u32::<LittleEndian>()?);
+
+ Ok(())
+ }
+
+ /// Read BITMAPCOREHEADER https://msdn.microsoft.com/en-us/library/vs/alm/dd183372(v=vs.85).aspx
+ ///
+ /// returns Err if any of the values are invalid.
+ fn read_bitmap_core_header(&mut self) -> ImageResult<()> {
+ // As height/width values in BMP files with core headers are only 16 bits long,
+ // they won't be larger than `MAX_WIDTH_HEIGHT`.
+ self.width = i32::from(self.reader.read_u16::<LittleEndian>()?);
+ self.height = i32::from(self.reader.read_u16::<LittleEndian>()?);
+
+ check_for_overflow(self.width, self.height, self.num_channels())?;
+
+ // Number of planes (format specifies that this should be 1).
+ if self.reader.read_u16::<LittleEndian>()? != 1 {
+ return Err(DecoderError::MoreThanOnePlane.into());
+ }
+
+ self.bit_count = self.reader.read_u16::<LittleEndian>()?;
+ self.image_type = match self.bit_count {
+ 1 | 4 | 8 => ImageType::Palette,
+ 24 => ImageType::RGB24,
+ _ => {
+ return Err(DecoderError::InvalidChannelWidth(
+ ChannelWidthError::Rgb,
+ self.bit_count,
+ )
+ .into())
+ }
+ };
+
+ Ok(())
+ }
+
+ /// Read BITMAPINFOHEADER https://msdn.microsoft.com/en-us/library/vs/alm/dd183376(v=vs.85).aspx
+ /// or BITMAPV{2|3|4|5}HEADER.
+ ///
+ /// returns Err if any of the values are invalid.
+ fn read_bitmap_info_header(&mut self) -> ImageResult<()> {
+ self.width = self.reader.read_i32::<LittleEndian>()?;
+ self.height = self.reader.read_i32::<LittleEndian>()?;
+
+ // Width can not be negative
+ if self.width < 0 {
+ return Err(DecoderError::NegativeWidth(self.width).into());
+ } else if self.width > MAX_WIDTH_HEIGHT || self.height > MAX_WIDTH_HEIGHT {
+ // Limit very large image sizes to avoid OOM issues. Images with these sizes are
+ // unlikely to be valid anyhow.
+ return Err(DecoderError::ImageTooLarge(self.width, self.height).into());
+ }
+
+ if self.height == i32::min_value() {
+ return Err(DecoderError::InvalidHeight.into());
+ }
+
+ // A negative height indicates a top-down DIB.
+ if self.height < 0 {
+ self.height *= -1;
+ self.top_down = true;
+ }
+
+ check_for_overflow(self.width, self.height, self.num_channels())?;
+
+ // Number of planes (format specifies that this should be 1).
+ if self.reader.read_u16::<LittleEndian>()? != 1 {
+ return Err(DecoderError::MoreThanOnePlane.into());
+ }
+
+ self.bit_count = self.reader.read_u16::<LittleEndian>()?;
+ let image_type_u32 = self.reader.read_u32::<LittleEndian>()?;
+
+ // Top-down dibs can not be compressed.
+ if self.top_down && image_type_u32 != 0 && image_type_u32 != 3 {
+ return Err(DecoderError::ImageTypeInvalidForTopDown(image_type_u32).into());
+ }
+ self.image_type = match image_type_u32 {
+ 0 => match self.bit_count {
+ 1 | 2 | 4 | 8 => ImageType::Palette,
+ 16 => ImageType::RGB16,
+ 24 => ImageType::RGB24,
+ 32 if self.add_alpha_channel => ImageType::RGBA32,
+ 32 => ImageType::RGB32,
+ _ => {
+ return Err(DecoderError::InvalidChannelWidth(
+ ChannelWidthError::Rgb,
+ self.bit_count,
+ )
+ .into())
+ }
+ },
+ 1 => match self.bit_count {
+ 8 => ImageType::RLE8,
+ _ => {
+ return Err(DecoderError::InvalidChannelWidth(
+ ChannelWidthError::Rle8,
+ self.bit_count,
+ )
+ .into())
+ }
+ },
+ 2 => match self.bit_count {
+ 4 => ImageType::RLE4,
+ _ => {
+ return Err(DecoderError::InvalidChannelWidth(
+ ChannelWidthError::Rle4,
+ self.bit_count,
+ )
+ .into())
+ }
+ },
+ 3 => match self.bit_count {
+ 16 => ImageType::Bitfields16,
+ 32 => ImageType::Bitfields32,
+ _ => {
+ return Err(DecoderError::InvalidChannelWidth(
+ ChannelWidthError::Bitfields,
+ self.bit_count,
+ )
+ .into())
+ }
+ },
+ 4 => {
+ // JPEG compression is not implemented yet.
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Bmp.into(),
+ UnsupportedErrorKind::GenericFeature("JPEG compression".to_owned()),
+ ),
+ ));
+ }
+ 5 => {
+ // PNG compression is not implemented yet.
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Bmp.into(),
+ UnsupportedErrorKind::GenericFeature("PNG compression".to_owned()),
+ ),
+ ));
+ }
+ 11 | 12 | 13 => {
+ // CMYK types are not implemented yet.
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Bmp.into(),
+ UnsupportedErrorKind::GenericFeature("CMYK format".to_owned()),
+ ),
+ ));
+ }
+ _ => {
+ // Unknown compression type.
+ return Err(DecoderError::ImageTypeUnknown(image_type_u32).into());
+ }
+ };
+
+ // The next 12 bytes represent data array size in bytes,
+ // followed the horizontal and vertical printing resolutions
+ // We will calculate the pixel array size using width & height of image
+ // We're not interesting the horz or vert printing resolutions
+ self.reader.read_u32::<LittleEndian>()?;
+ self.reader.read_u32::<LittleEndian>()?;
+ self.reader.read_u32::<LittleEndian>()?;
+
+ self.colors_used = self.reader.read_u32::<LittleEndian>()?;
+
+ // The next 4 bytes represent number of "important" colors
+ // We're not interested in this value, so we'll skip it
+ self.reader.read_u32::<LittleEndian>()?;
+
+ Ok(())
+ }
+
+ fn read_bitmasks(&mut self) -> ImageResult<()> {
+ let r_mask = self.reader.read_u32::<LittleEndian>()?;
+ let g_mask = self.reader.read_u32::<LittleEndian>()?;
+ let b_mask = self.reader.read_u32::<LittleEndian>()?;
+
+ let a_mask = match self.bmp_header_type {
+ BMPHeaderType::V3 | BMPHeaderType::V4 | BMPHeaderType::V5 => {
+ self.reader.read_u32::<LittleEndian>()?
+ }
+ _ => 0,
+ };
+
+ self.bitfields = match self.image_type {
+ ImageType::Bitfields16 => {
+ Some(Bitfields::from_mask(r_mask, g_mask, b_mask, a_mask, 16)?)
+ }
+ ImageType::Bitfields32 => {
+ Some(Bitfields::from_mask(r_mask, g_mask, b_mask, a_mask, 32)?)
+ }
+ _ => None,
+ };
+
+ if self.bitfields.is_some() && a_mask != 0 {
+ self.add_alpha_channel = true;
+ }
+
+ Ok(())
+ }
+
+ fn read_metadata(&mut self) -> ImageResult<()> {
+ if !self.has_loaded_metadata {
+ self.read_file_header()?;
+ let bmp_header_offset = self.reader.stream_position()?;
+ let bmp_header_size = self.reader.read_u32::<LittleEndian>()?;
+ let bmp_header_end = bmp_header_offset + u64::from(bmp_header_size);
+
+ self.bmp_header_type = match bmp_header_size {
+ BITMAPCOREHEADER_SIZE => BMPHeaderType::Core,
+ BITMAPINFOHEADER_SIZE => BMPHeaderType::Info,
+ BITMAPV2HEADER_SIZE => BMPHeaderType::V2,
+ BITMAPV3HEADER_SIZE => BMPHeaderType::V3,
+ BITMAPV4HEADER_SIZE => BMPHeaderType::V4,
+ BITMAPV5HEADER_SIZE => BMPHeaderType::V5,
+ _ if bmp_header_size < BITMAPCOREHEADER_SIZE => {
+ // Size of any valid header types won't be smaller than core header type.
+ return Err(DecoderError::HeaderTooSmall(bmp_header_size).into());
+ }
+ _ => {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Bmp.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "Unknown bitmap header type (size={})",
+ bmp_header_size
+ )),
+ ),
+ ))
+ }
+ };
+
+ match self.bmp_header_type {
+ BMPHeaderType::Core => {
+ self.read_bitmap_core_header()?;
+ }
+ BMPHeaderType::Info
+ | BMPHeaderType::V2
+ | BMPHeaderType::V3
+ | BMPHeaderType::V4
+ | BMPHeaderType::V5 => {
+ self.read_bitmap_info_header()?;
+ }
+ };
+
+ match self.image_type {
+ ImageType::Bitfields16 | ImageType::Bitfields32 => self.read_bitmasks()?,
+ _ => {}
+ };
+
+ self.reader.seek(SeekFrom::Start(bmp_header_end))?;
+
+ match self.image_type {
+ ImageType::Palette | ImageType::RLE4 | ImageType::RLE8 => self.read_palette()?,
+ _ => {}
+ };
+
+ if self.no_file_header {
+ // Use the offset of the end of metadata instead of reading a BMP file header.
+ self.data_offset = self.reader.stream_position()?;
+ }
+
+ self.has_loaded_metadata = true;
+ }
+ Ok(())
+ }
+
+ #[cfg(feature = "ico")]
+ #[doc(hidden)]
+ pub fn read_metadata_in_ico_format(&mut self) -> ImageResult<()> {
+ self.no_file_header = true;
+ self.add_alpha_channel = true;
+ self.read_metadata()?;
+
+ // The height field in an ICO file is doubled to account for the AND mask
+ // (whether or not an AND mask is actually present).
+ self.height /= 2;
+ Ok(())
+ }
+
+ fn get_palette_size(&mut self) -> ImageResult<usize> {
+ match self.colors_used {
+ 0 => Ok(1 << self.bit_count),
+ _ => {
+ if self.colors_used > 1 << self.bit_count {
+ return Err(DecoderError::PaletteSizeExceeded {
+ colors_used: self.colors_used,
+ bit_count: self.bit_count,
+ }
+ .into());
+ }
+ Ok(self.colors_used as usize)
+ }
+ }
+ }
+
+ fn bytes_per_color(&self) -> usize {
+ match self.bmp_header_type {
+ BMPHeaderType::Core => 3,
+ _ => 4,
+ }
+ }
+
+ fn read_palette(&mut self) -> ImageResult<()> {
+ const MAX_PALETTE_SIZE: usize = 256; // Palette indices are u8.
+
+ let bytes_per_color = self.bytes_per_color();
+ let palette_size = self.get_palette_size()?;
+ let max_length = MAX_PALETTE_SIZE * bytes_per_color;
+
+ let length = palette_size * bytes_per_color;
+ let mut buf = Vec::with_capacity(max_length);
+
+ // Resize and read the palette entries to the buffer.
+ // We limit the buffer to at most 256 colours to avoid any oom issues as
+ // 8-bit images can't reference more than 256 indexes anyhow.
+ buf.resize(cmp::min(length, max_length), 0);
+ self.reader.by_ref().read_exact(&mut buf)?;
+
+ // Allocate 256 entries even if palette_size is smaller, to prevent corrupt files from
+ // causing an out-of-bounds array access.
+ match length.cmp(&max_length) {
+ Ordering::Greater => {
+ self.reader
+ .seek(SeekFrom::Current((length - max_length) as i64))?;
+ }
+ Ordering::Less => buf.resize(max_length, 0),
+ Ordering::Equal => (),
+ }
+
+ let p: Vec<[u8; 3]> = (0..MAX_PALETTE_SIZE)
+ .map(|i| {
+ let b = buf[bytes_per_color * i];
+ let g = buf[bytes_per_color * i + 1];
+ let r = buf[bytes_per_color * i + 2];
+ [r, g, b]
+ })
+ .collect();
+
+ self.palette = Some(p);
+
+ Ok(())
+ }
+
+ /// Get the palette that is embedded in the BMP image, if any.
+ pub fn get_palette(&self) -> Option<&[[u8; 3]]> {
+ self.palette.as_ref().map(|vec| &vec[..])
+ }
+
+ fn num_channels(&self) -> usize {
+ if self.indexed_color {
+ 1
+ } else if self.add_alpha_channel {
+ 4
+ } else {
+ 3
+ }
+ }
+
+ fn rows<'a>(&self, pixel_data: &'a mut [u8]) -> RowIterator<'a> {
+ let stride = self.width as usize * self.num_channels();
+ if self.top_down {
+ RowIterator {
+ chunks: Chunker::FromTop(pixel_data.chunks_mut(stride)),
+ }
+ } else {
+ RowIterator {
+ chunks: Chunker::FromBottom(pixel_data.chunks_mut(stride).rev()),
+ }
+ }
+ }
+
+ fn read_palettized_pixel_data(&mut self, buf: &mut [u8]) -> ImageResult<()> {
+ let num_channels = self.num_channels();
+ let row_byte_length = ((i32::from(self.bit_count) * self.width + 31) / 32 * 4) as usize;
+ let mut indices = vec![0; row_byte_length];
+ let palette = self.palette.as_ref().unwrap();
+ let bit_count = self.bit_count;
+ let reader = &mut self.reader;
+ let width = self.width as usize;
+ let skip_palette = self.indexed_color;
+
+ reader.seek(SeekFrom::Start(self.data_offset))?;
+
+ if num_channels == 4 {
+ buf.chunks_exact_mut(4).for_each(|c| c[3] = 0xFF);
+ }
+
+ with_rows(
+ buf,
+ self.width,
+ self.height,
+ num_channels,
+ self.top_down,
+ |row| {
+ reader.read_exact(&mut indices)?;
+ if skip_palette {
+ row.clone_from_slice(&indices[0..width]);
+ } else {
+ let mut pixel_iter = row.chunks_mut(num_channels);
+ match bit_count {
+ 1 => {
+ set_1bit_pixel_run(&mut pixel_iter, palette, indices.iter());
+ }
+ 2 => {
+ set_2bit_pixel_run(&mut pixel_iter, palette, indices.iter(), width);
+ }
+ 4 => {
+ set_4bit_pixel_run(&mut pixel_iter, palette, indices.iter(), width);
+ }
+ 8 => {
+ set_8bit_pixel_run(&mut pixel_iter, palette, indices.iter(), width);
+ }
+ _ => panic!(),
+ };
+ }
+ Ok(())
+ },
+ )?;
+
+ Ok(())
+ }
+
+ fn read_16_bit_pixel_data(
+ &mut self,
+ buf: &mut [u8],
+ bitfields: Option<&Bitfields>,
+ ) -> ImageResult<()> {
+ let num_channels = self.num_channels();
+ let row_padding_len = self.width as usize % 2 * 2;
+ let row_padding = &mut [0; 2][..row_padding_len];
+ let bitfields = match bitfields {
+ Some(b) => b,
+ None => self.bitfields.as_ref().unwrap(),
+ };
+ let reader = &mut self.reader;
+
+ reader.seek(SeekFrom::Start(self.data_offset))?;
+
+ with_rows(
+ buf,
+ self.width,
+ self.height,
+ num_channels,
+ self.top_down,
+ |row| {
+ for pixel in row.chunks_mut(num_channels) {
+ let data = u32::from(reader.read_u16::<LittleEndian>()?);
+
+ pixel[0] = bitfields.r.read(data);
+ pixel[1] = bitfields.g.read(data);
+ pixel[2] = bitfields.b.read(data);
+ if num_channels == 4 {
+ if bitfields.a.len != 0 {
+ pixel[3] = bitfields.a.read(data);
+ } else {
+ pixel[3] = 0xFF;
+ }
+ }
+ }
+ reader.read_exact(row_padding)
+ },
+ )?;
+
+ Ok(())
+ }
+
+ /// Read image data from a reader in 32-bit formats that use bitfields.
+ fn read_32_bit_pixel_data(&mut self, buf: &mut [u8]) -> ImageResult<()> {
+ let num_channels = self.num_channels();
+
+ let bitfields = self.bitfields.as_ref().unwrap();
+
+ let reader = &mut self.reader;
+ reader.seek(SeekFrom::Start(self.data_offset))?;
+
+ with_rows(
+ buf,
+ self.width,
+ self.height,
+ num_channels,
+ self.top_down,
+ |row| {
+ for pixel in row.chunks_mut(num_channels) {
+ let data = reader.read_u32::<LittleEndian>()?;
+
+ pixel[0] = bitfields.r.read(data);
+ pixel[1] = bitfields.g.read(data);
+ pixel[2] = bitfields.b.read(data);
+ if num_channels == 4 {
+ if bitfields.a.len != 0 {
+ pixel[3] = bitfields.a.read(data);
+ } else {
+ pixel[3] = 0xff;
+ }
+ }
+ }
+ Ok(())
+ },
+ )?;
+
+ Ok(())
+ }
+
+ /// Read image data from a reader where the colours are stored as 8-bit values (24 or 32-bit).
+ fn read_full_byte_pixel_data(
+ &mut self,
+ buf: &mut [u8],
+ format: &FormatFullBytes,
+ ) -> ImageResult<()> {
+ let num_channels = self.num_channels();
+ let row_padding_len = match *format {
+ FormatFullBytes::RGB24 => (4 - (self.width as usize * 3) % 4) % 4,
+ _ => 0,
+ };
+ let row_padding = &mut [0; 4][..row_padding_len];
+
+ self.reader.seek(SeekFrom::Start(self.data_offset))?;
+
+ let reader = &mut self.reader;
+
+ with_rows(
+ buf,
+ self.width,
+ self.height,
+ num_channels,
+ self.top_down,
+ |row| {
+ for pixel in row.chunks_mut(num_channels) {
+ if *format == FormatFullBytes::Format888 {
+ reader.read_u8()?;
+ }
+
+ // Read the colour values (b, g, r).
+ // Reading 3 bytes and reversing them is significantly faster than reading one
+ // at a time.
+ reader.read_exact(&mut pixel[0..3])?;
+ pixel[0..3].reverse();
+
+ if *format == FormatFullBytes::RGB32 {
+ reader.read_u8()?;
+ }
+
+ // Read the alpha channel if present
+ if *format == FormatFullBytes::RGBA32 {
+ reader.read_exact(&mut pixel[3..4])?;
+ } else if num_channels == 4 {
+ pixel[3] = 0xFF;
+ }
+ }
+ reader.read_exact(row_padding)
+ },
+ )?;
+
+ Ok(())
+ }
+
+ fn read_rle_data(&mut self, buf: &mut [u8], image_type: ImageType) -> ImageResult<()> {
+ // Seek to the start of the actual image data.
+ self.reader.seek(SeekFrom::Start(self.data_offset))?;
+
+ let num_channels = self.num_channels();
+ let p = self.palette.as_ref().unwrap();
+
+ // Handling deltas in the RLE scheme means that we need to manually
+ // iterate through rows and pixels. Even if we didn't have to handle
+ // deltas, we have to ensure that a single runlength doesn't straddle
+ // two rows.
+ let mut row_iter = self.rows(buf);
+
+ while let Some(row) = row_iter.next() {
+ let mut pixel_iter = row.chunks_mut(num_channels);
+
+ let mut x = 0;
+ loop {
+ let instruction = {
+ let control_byte = self.reader.read_u8()?;
+ match control_byte {
+ RLE_ESCAPE => {
+ let op = self.reader.read_u8()?;
+
+ match op {
+ RLE_ESCAPE_EOL => RLEInsn::EndOfRow,
+ RLE_ESCAPE_EOF => RLEInsn::EndOfFile,
+ RLE_ESCAPE_DELTA => {
+ let xdelta = self.reader.read_u8()?;
+ let ydelta = self.reader.read_u8()?;
+ RLEInsn::Delta(xdelta, ydelta)
+ }
+ _ => {
+ let mut length = op as usize;
+ if self.image_type == ImageType::RLE4 {
+ length = (length + 1) / 2;
+ }
+ length += length & 1;
+ let mut buffer = vec![0; length];
+ self.reader.read_exact(&mut buffer)?;
+ RLEInsn::Absolute(op, buffer)
+ }
+ }
+ }
+ _ => {
+ let palette_index = self.reader.read_u8()?;
+ RLEInsn::PixelRun(control_byte, palette_index)
+ }
+ }
+ };
+
+ match instruction {
+ RLEInsn::EndOfFile => {
+ pixel_iter.for_each(|p| p.fill(0));
+ row_iter.for_each(|r| r.fill(0));
+ return Ok(());
+ }
+ RLEInsn::EndOfRow => {
+ pixel_iter.for_each(|p| p.fill(0));
+ break;
+ }
+ RLEInsn::Delta(x_delta, y_delta) => {
+ // The msdn site on bitmap compression doesn't specify
+ // what happens to the values skipped when encountering
+ // a delta code, however IE and the windows image
+ // preview seems to replace them with black pixels,
+ // so we stick to that.
+
+ if y_delta > 0 {
+ // Zero out the remainder of the current row.
+ pixel_iter.for_each(|p| p.fill(0));
+
+ // If any full rows are skipped, zero them out.
+ for _ in 1..y_delta {
+ let row = row_iter.next().ok_or(DecoderError::CorruptRleData)?;
+ row.fill(0);
+ }
+
+ // Set the pixel iterator to the start of the next row.
+ pixel_iter = row_iter
+ .next()
+ .ok_or(DecoderError::CorruptRleData)?
+ .chunks_mut(num_channels);
+
+ // Zero out the pixels up to the current point in the row.
+ for _ in 0..x {
+ pixel_iter
+ .next()
+ .ok_or(DecoderError::CorruptRleData)?
+ .fill(0);
+ }
+ }
+
+ for _ in 0..x_delta {
+ let pixel = pixel_iter.next().ok_or(DecoderError::CorruptRleData)?;
+ pixel.fill(0);
+ }
+ x += x_delta as usize;
+ }
+ RLEInsn::Absolute(length, indices) => {
+ // Absolute mode cannot span rows, so if we run
+ // out of pixels to process, we should stop
+ // processing the image.
+ match image_type {
+ ImageType::RLE8 => {
+ if !set_8bit_pixel_run(
+ &mut pixel_iter,
+ p,
+ indices.iter(),
+ length as usize,
+ ) {
+ return Err(DecoderError::CorruptRleData.into());
+ }
+ }
+ ImageType::RLE4 => {
+ if !set_4bit_pixel_run(
+ &mut pixel_iter,
+ p,
+ indices.iter(),
+ length as usize,
+ ) {
+ return Err(DecoderError::CorruptRleData.into());
+ }
+ }
+ _ => unreachable!(),
+ }
+ x += length as usize;
+ }
+ RLEInsn::PixelRun(n_pixels, palette_index) => {
+ // A pixel run isn't allowed to span rows, but we
+ // simply continue on to the next row if we run
+ // out of pixels to set.
+ match image_type {
+ ImageType::RLE8 => {
+ if !set_8bit_pixel_run(
+ &mut pixel_iter,
+ p,
+ repeat(&palette_index),
+ n_pixels as usize,
+ ) {
+ return Err(DecoderError::CorruptRleData.into());
+ }
+ }
+ ImageType::RLE4 => {
+ if !set_4bit_pixel_run(
+ &mut pixel_iter,
+ p,
+ repeat(&palette_index),
+ n_pixels as usize,
+ ) {
+ return Err(DecoderError::CorruptRleData.into());
+ }
+ }
+ _ => unreachable!(),
+ }
+ x += n_pixels as usize;
+ }
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Read the actual data of the image. This function is deliberately not public because it
+ /// cannot be called multiple times without seeking back the underlying reader in between.
+ pub(crate) fn read_image_data(&mut self, buf: &mut [u8]) -> ImageResult<()> {
+ match self.image_type {
+ ImageType::Palette => self.read_palettized_pixel_data(buf),
+ ImageType::RGB16 => self.read_16_bit_pixel_data(buf, Some(&R5_G5_B5_COLOR_MASK)),
+ ImageType::RGB24 => self.read_full_byte_pixel_data(buf, &FormatFullBytes::RGB24),
+ ImageType::RGB32 => self.read_full_byte_pixel_data(buf, &FormatFullBytes::RGB32),
+ ImageType::RGBA32 => self.read_full_byte_pixel_data(buf, &FormatFullBytes::RGBA32),
+ ImageType::RLE8 => self.read_rle_data(buf, ImageType::RLE8),
+ ImageType::RLE4 => self.read_rle_data(buf, ImageType::RLE4),
+ ImageType::Bitfields16 => match self.bitfields {
+ Some(_) => self.read_16_bit_pixel_data(buf, None),
+ None => Err(DecoderError::BitfieldMasksMissing(16).into()),
+ },
+ ImageType::Bitfields32 => match self.bitfields {
+ Some(R8_G8_B8_COLOR_MASK) => {
+ self.read_full_byte_pixel_data(buf, &FormatFullBytes::Format888)
+ }
+ Some(R8_G8_B8_A8_COLOR_MASK) => {
+ self.read_full_byte_pixel_data(buf, &FormatFullBytes::RGBA32)
+ }
+ Some(_) => self.read_32_bit_pixel_data(buf),
+ None => Err(DecoderError::BitfieldMasksMissing(32).into()),
+ },
+ }
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct BmpReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for BmpReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for BmpDecoder<R> {
+ type Reader = BmpReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.width as u32, self.height as u32)
+ }
+
+ fn color_type(&self) -> ColorType {
+ if self.indexed_color {
+ ColorType::L8
+ } else if self.add_alpha_channel {
+ ColorType::Rgba8
+ } else {
+ ColorType::Rgb8
+ }
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(BmpReader(
+ Cursor::new(image::decoder_to_vec(self)?),
+ PhantomData,
+ ))
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ self.read_image_data(buf)
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoderRect<'a> for BmpDecoder<R> {
+ fn read_rect_with_progress<F: Fn(Progress)>(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ buf: &mut [u8],
+ progress_callback: F,
+ ) -> ImageResult<()> {
+ let start = self.reader.stream_position()?;
+ image::load_rect(
+ x,
+ y,
+ width,
+ height,
+ buf,
+ progress_callback,
+ self,
+ |_, _| Ok(()),
+ |s, buf| s.read_image_data(buf),
+ )?;
+ self.reader.seek(SeekFrom::Start(start))?;
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_bitfield_len() {
+ for len in 1..9 {
+ let bitfield = Bitfield { shift: 0, len };
+ for i in 0..(1 << len) {
+ let read = bitfield.read(i);
+ let calc = (i as f64 / ((1 << len) - 1) as f64 * 255f64).round() as u8;
+ if read != calc {
+ println!("len:{} i:{} read:{} calc:{}", len, i, read, calc);
+ }
+ assert_eq!(read, calc);
+ }
+ }
+ }
+
+ #[test]
+ fn read_rect() {
+ let f = std::fs::File::open("tests/images/bmp/images/Core_8_Bit.bmp").unwrap();
+ let mut decoder = super::BmpDecoder::new(f).unwrap();
+
+ let mut buf: Vec<u8> = vec![0; 8 * 8 * 3];
+ decoder.read_rect(0, 0, 8, 8, &mut *buf).unwrap();
+ }
+
+ #[test]
+ fn read_rle_too_short() {
+ let data = vec![
+ 0x42, 0x4d, 0x04, 0xee, 0xfe, 0xff, 0xff, 0x10, 0xff, 0x00, 0x04, 0x00, 0x00, 0x00,
+ 0x7c, 0x00, 0x00, 0x00, 0x0c, 0x41, 0x00, 0x00, 0x07, 0x10, 0x00, 0x00, 0x01, 0x00,
+ 0x04, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x21,
+ 0xff, 0x00, 0x66, 0x61, 0x72, 0x62, 0x66, 0x65, 0x6c, 0x64, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xd8, 0xff, 0x00, 0x00, 0x19, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0xff, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00,
+ 0x00, 0x00, 0x00, 0x2d, 0x31, 0x31, 0x35, 0x36, 0x00, 0xff, 0x00, 0x00, 0x52, 0x3a,
+ 0x37, 0x30, 0x7e, 0x71, 0x63, 0x91, 0x5a, 0x04, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x35, 0x37, 0x00, 0xff, 0x00, 0x00, 0x52,
+ 0x3a, 0x37, 0x30, 0x7e, 0x71, 0x63, 0x91, 0x5a, 0x04, 0x05, 0x3c, 0x00, 0x00, 0x11,
+ 0x00, 0x5d, 0x7a, 0x82, 0xb7, 0xca, 0x2d, 0x31, 0xff, 0xff, 0xc7, 0x95, 0x33, 0x2e,
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x66, 0x00, 0x4d,
+ 0x4d, 0x00, 0x2a, 0x00,
+ ];
+
+ let decoder = BmpDecoder::new(Cursor::new(&data)).unwrap();
+ let mut buf = vec![0; usize::try_from(decoder.total_bytes()).unwrap()];
+ assert!(decoder.read_image(&mut buf).is_ok());
+ }
+
+ #[test]
+ fn test_no_header() {
+ let tests = [
+ "Info_R8_G8_B8.bmp",
+ "Info_A8_R8_G8_B8.bmp",
+ "Info_8_Bit.bmp",
+ "Info_4_Bit.bmp",
+ "Info_1_Bit.bmp",
+ ];
+
+ for name in &tests {
+ let path = format!("tests/images/bmp/images/{name}");
+ let ref_img = crate::open(&path).unwrap();
+ let mut data = std::fs::read(&path).unwrap();
+ // skip the BITMAPFILEHEADER
+ let slice = &mut data[14..];
+ let decoder = BmpDecoder::new_without_file_header(Cursor::new(slice)).unwrap();
+ let no_hdr_img = crate::DynamicImage::from_decoder(decoder).unwrap();
+ assert_eq!(ref_img, no_hdr_img);
+ }
+ }
+}
diff --git a/vendor/image/src/codecs/bmp/encoder.rs b/vendor/image/src/codecs/bmp/encoder.rs
new file mode 100644
index 0000000..c90c063
--- /dev/null
+++ b/vendor/image/src/codecs/bmp/encoder.rs
@@ -0,0 +1,388 @@
+use byteorder::{LittleEndian, WriteBytesExt};
+use std::io::{self, Write};
+
+use crate::error::{
+ EncodingError, ImageError, ImageFormatHint, ImageResult, ParameterError, ParameterErrorKind,
+};
+use crate::image::ImageEncoder;
+use crate::{color, ImageFormat};
+
+const BITMAPFILEHEADER_SIZE: u32 = 14;
+const BITMAPINFOHEADER_SIZE: u32 = 40;
+const BITMAPV4HEADER_SIZE: u32 = 108;
+
+/// The representation of a BMP encoder.
+pub struct BmpEncoder<'a, W: 'a> {
+ writer: &'a mut W,
+}
+
+impl<'a, W: Write + 'a> BmpEncoder<'a, W> {
+ /// Create a new encoder that writes its output to ```w```.
+ pub fn new(w: &'a mut W) -> Self {
+ BmpEncoder { writer: w }
+ }
+
+ /// Encodes the image ```image```
+ /// that has dimensions ```width``` and ```height```
+ /// and ```ColorType``` ```c```.
+ pub fn encode(
+ &mut self,
+ image: &[u8],
+ width: u32,
+ height: u32,
+ c: color::ColorType,
+ ) -> ImageResult<()> {
+ self.encode_with_palette(image, width, height, c, None)
+ }
+
+ /// Same as ```encode```, but allow a palette to be passed in.
+ /// The ```palette``` is ignored for color types other than Luma/Luma-with-alpha.
+ pub fn encode_with_palette(
+ &mut self,
+ image: &[u8],
+ width: u32,
+ height: u32,
+ c: color::ColorType,
+ palette: Option<&[[u8; 3]]>,
+ ) -> ImageResult<()> {
+ if palette.is_some() && c != color::ColorType::L8 && c != color::ColorType::La8 {
+ return Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ format!(
+ "Unsupported color type {:?} when using a non-empty palette. Supported types: Gray(8), GrayA(8).",
+ c
+ ),
+ )));
+ }
+
+ let bmp_header_size = BITMAPFILEHEADER_SIZE;
+
+ let (dib_header_size, written_pixel_size, palette_color_count) =
+ get_pixel_info(c, palette)?;
+ let row_pad_size = (4 - (width * written_pixel_size) % 4) % 4; // each row must be padded to a multiple of 4 bytes
+ let image_size = width
+ .checked_mul(height)
+ .and_then(|v| v.checked_mul(written_pixel_size))
+ .and_then(|v| v.checked_add(height * row_pad_size))
+ .ok_or_else(|| {
+ ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ ))
+ })?;
+ let palette_size = palette_color_count * 4; // all palette colors are BGRA
+ let file_size = bmp_header_size
+ .checked_add(dib_header_size)
+ .and_then(|v| v.checked_add(palette_size))
+ .and_then(|v| v.checked_add(image_size))
+ .ok_or_else(|| {
+ ImageError::Encoding(EncodingError::new(
+ ImageFormatHint::Exact(ImageFormat::Bmp),
+ "calculated BMP header size larger than 2^32",
+ ))
+ })?;
+
+ // write BMP header
+ self.writer.write_u8(b'B')?;
+ self.writer.write_u8(b'M')?;
+ self.writer.write_u32::<LittleEndian>(file_size)?; // file size
+ self.writer.write_u16::<LittleEndian>(0)?; // reserved 1
+ self.writer.write_u16::<LittleEndian>(0)?; // reserved 2
+ self.writer
+ .write_u32::<LittleEndian>(bmp_header_size + dib_header_size + palette_size)?; // image data offset
+
+ // write DIB header
+ self.writer.write_u32::<LittleEndian>(dib_header_size)?;
+ self.writer.write_i32::<LittleEndian>(width as i32)?;
+ self.writer.write_i32::<LittleEndian>(height as i32)?;
+ self.writer.write_u16::<LittleEndian>(1)?; // color planes
+ self.writer
+ .write_u16::<LittleEndian>((written_pixel_size * 8) as u16)?; // bits per pixel
+ if dib_header_size >= BITMAPV4HEADER_SIZE {
+ // Assume BGRA32
+ self.writer.write_u32::<LittleEndian>(3)?; // compression method - bitfields
+ } else {
+ self.writer.write_u32::<LittleEndian>(0)?; // compression method - no compression
+ }
+ self.writer.write_u32::<LittleEndian>(image_size)?;
+ self.writer.write_i32::<LittleEndian>(0)?; // horizontal ppm
+ self.writer.write_i32::<LittleEndian>(0)?; // vertical ppm
+ self.writer.write_u32::<LittleEndian>(palette_color_count)?;
+ self.writer.write_u32::<LittleEndian>(0)?; // all colors are important
+ if dib_header_size >= BITMAPV4HEADER_SIZE {
+ // Assume BGRA32
+ self.writer.write_u32::<LittleEndian>(0xff << 16)?; // red mask
+ self.writer.write_u32::<LittleEndian>(0xff << 8)?; // green mask
+ self.writer.write_u32::<LittleEndian>(0xff)?; // blue mask
+ self.writer.write_u32::<LittleEndian>(0xff << 24)?; // alpha mask
+ self.writer.write_u32::<LittleEndian>(0x73524742)?; // colorspace - sRGB
+
+ // endpoints (3x3) and gamma (3)
+ for _ in 0..12 {
+ self.writer.write_u32::<LittleEndian>(0)?;
+ }
+ }
+
+ // write image data
+ match c {
+ color::ColorType::Rgb8 => self.encode_rgb(image, width, height, row_pad_size, 3)?,
+ color::ColorType::Rgba8 => self.encode_rgba(image, width, height, row_pad_size, 4)?,
+ color::ColorType::L8 => {
+ self.encode_gray(image, width, height, row_pad_size, 1, palette)?
+ }
+ color::ColorType::La8 => {
+ self.encode_gray(image, width, height, row_pad_size, 2, palette)?
+ }
+ _ => {
+ return Err(ImageError::IoError(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ &get_unsupported_error_message(c)[..],
+ )))
+ }
+ }
+
+ Ok(())
+ }
+
+ fn encode_rgb(
+ &mut self,
+ image: &[u8],
+ width: u32,
+ height: u32,
+ row_pad_size: u32,
+ bytes_per_pixel: u32,
+ ) -> io::Result<()> {
+ let width = width as usize;
+ let height = height as usize;
+ let x_stride = bytes_per_pixel as usize;
+ let y_stride = width * x_stride;
+ for row in (0..height).rev() {
+ // from the bottom up
+ let row_start = row * y_stride;
+ for px in image[row_start..][..y_stride].chunks_exact(x_stride) {
+ let r = px[0];
+ let g = px[1];
+ let b = px[2];
+ // written as BGR
+ self.writer.write_all(&[b, g, r])?;
+ }
+ self.write_row_pad(row_pad_size)?;
+ }
+
+ Ok(())
+ }
+
+ fn encode_rgba(
+ &mut self,
+ image: &[u8],
+ width: u32,
+ height: u32,
+ row_pad_size: u32,
+ bytes_per_pixel: u32,
+ ) -> io::Result<()> {
+ let width = width as usize;
+ let height = height as usize;
+ let x_stride = bytes_per_pixel as usize;
+ let y_stride = width * x_stride;
+ for row in (0..height).rev() {
+ // from the bottom up
+ let row_start = row * y_stride;
+ for px in image[row_start..][..y_stride].chunks_exact(x_stride) {
+ let r = px[0];
+ let g = px[1];
+ let b = px[2];
+ let a = px[3];
+ // written as BGRA
+ self.writer.write_all(&[b, g, r, a])?;
+ }
+ self.write_row_pad(row_pad_size)?;
+ }
+
+ Ok(())
+ }
+
+ fn encode_gray(
+ &mut self,
+ image: &[u8],
+ width: u32,
+ height: u32,
+ row_pad_size: u32,
+ bytes_per_pixel: u32,
+ palette: Option<&[[u8; 3]]>,
+ ) -> io::Result<()> {
+ // write grayscale palette
+ if let Some(palette) = palette {
+ for item in palette {
+ // each color is written as BGRA, where A is always 0
+ self.writer.write_all(&[item[2], item[1], item[0], 0])?;
+ }
+ } else {
+ for val in 0u8..=255 {
+ // each color is written as BGRA, where A is always 0 and since only grayscale is being written, B = G = R = index
+ self.writer.write_all(&[val, val, val, 0])?;
+ }
+ }
+
+ // write image data
+ let x_stride = bytes_per_pixel;
+ let y_stride = width * x_stride;
+ for row in (0..height).rev() {
+ // from the bottom up
+ let row_start = row * y_stride;
+ for col in 0..width {
+ let pixel_start = (row_start + (col * x_stride)) as usize;
+ // color value is equal to the palette index
+ self.writer.write_u8(image[pixel_start])?;
+ // alpha is never written as it's not widely supported
+ }
+
+ self.write_row_pad(row_pad_size)?;
+ }
+
+ Ok(())
+ }
+
+ fn write_row_pad(&mut self, row_pad_size: u32) -> io::Result<()> {
+ for _ in 0..row_pad_size {
+ self.writer.write_u8(0)?;
+ }
+
+ Ok(())
+ }
+}
+
+impl<'a, W: Write> ImageEncoder for BmpEncoder<'a, W> {
+ fn write_image(
+ mut self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: color::ColorType,
+ ) -> ImageResult<()> {
+ self.encode(buf, width, height, color_type)
+ }
+}
+
+fn get_unsupported_error_message(c: color::ColorType) -> String {
+ format!(
+ "Unsupported color type {:?}. Supported types: RGB(8), RGBA(8), Gray(8), GrayA(8).",
+ c
+ )
+}
+
+/// Returns a tuple representing: (dib header size, written pixel size, palette color count).
+fn get_pixel_info(c: color::ColorType, palette: Option<&[[u8; 3]]>) -> io::Result<(u32, u32, u32)> {
+ let sizes = match c {
+ color::ColorType::Rgb8 => (BITMAPINFOHEADER_SIZE, 3, 0),
+ color::ColorType::Rgba8 => (BITMAPV4HEADER_SIZE, 4, 0),
+ color::ColorType::L8 => (
+ BITMAPINFOHEADER_SIZE,
+ 1,
+ palette.map(|p| p.len()).unwrap_or(256) as u32,
+ ),
+ color::ColorType::La8 => (
+ BITMAPINFOHEADER_SIZE,
+ 1,
+ palette.map(|p| p.len()).unwrap_or(256) as u32,
+ ),
+ _ => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ &get_unsupported_error_message(c)[..],
+ ))
+ }
+ };
+
+ Ok(sizes)
+}
+
+#[cfg(test)]
+mod tests {
+ use super::super::BmpDecoder;
+ use super::BmpEncoder;
+ use crate::color::ColorType;
+ use crate::image::ImageDecoder;
+ use std::io::Cursor;
+
+ fn round_trip_image(image: &[u8], width: u32, height: u32, c: ColorType) -> Vec<u8> {
+ let mut encoded_data = Vec::new();
+ {
+ let mut encoder = BmpEncoder::new(&mut encoded_data);
+ encoder
+ .encode(&image, width, height, c)
+ .expect("could not encode image");
+ }
+
+ let decoder = BmpDecoder::new(Cursor::new(&encoded_data)).expect("failed to decode");
+
+ let mut buf = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut buf).expect("failed to decode");
+ buf
+ }
+
+ #[test]
+ fn round_trip_single_pixel_rgb() {
+ let image = [255u8, 0, 0]; // single red pixel
+ let decoded = round_trip_image(&image, 1, 1, ColorType::Rgb8);
+ assert_eq!(3, decoded.len());
+ assert_eq!(255, decoded[0]);
+ assert_eq!(0, decoded[1]);
+ assert_eq!(0, decoded[2]);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "64")]
+ fn huge_files_return_error() {
+ let mut encoded_data = Vec::new();
+ let image = vec![0u8; 3 * 40_000 * 40_000]; // 40_000x40_000 pixels, 3 bytes per pixel, allocated on the heap
+ let mut encoder = BmpEncoder::new(&mut encoded_data);
+ let result = encoder.encode(&image, 40_000, 40_000, ColorType::Rgb8);
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn round_trip_single_pixel_rgba() {
+ let image = [1, 2, 3, 4];
+ let decoded = round_trip_image(&image, 1, 1, ColorType::Rgba8);
+ assert_eq!(&decoded[..], &image[..]);
+ }
+
+ #[test]
+ fn round_trip_3px_rgb() {
+ let image = [0u8; 3 * 3 * 3]; // 3x3 pixels, 3 bytes per pixel
+ let _decoded = round_trip_image(&image, 3, 3, ColorType::Rgb8);
+ }
+
+ #[test]
+ fn round_trip_gray() {
+ let image = [0u8, 1, 2]; // 3 pixels
+ let decoded = round_trip_image(&image, 3, 1, ColorType::L8);
+ // should be read back as 3 RGB pixels
+ assert_eq!(9, decoded.len());
+ assert_eq!(0, decoded[0]);
+ assert_eq!(0, decoded[1]);
+ assert_eq!(0, decoded[2]);
+ assert_eq!(1, decoded[3]);
+ assert_eq!(1, decoded[4]);
+ assert_eq!(1, decoded[5]);
+ assert_eq!(2, decoded[6]);
+ assert_eq!(2, decoded[7]);
+ assert_eq!(2, decoded[8]);
+ }
+
+ #[test]
+ fn round_trip_graya() {
+ let image = [0u8, 0, 1, 0, 2, 0]; // 3 pixels, each with an alpha channel
+ let decoded = round_trip_image(&image, 1, 3, ColorType::La8);
+ // should be read back as 3 RGB pixels
+ assert_eq!(9, decoded.len());
+ assert_eq!(0, decoded[0]);
+ assert_eq!(0, decoded[1]);
+ assert_eq!(0, decoded[2]);
+ assert_eq!(1, decoded[3]);
+ assert_eq!(1, decoded[4]);
+ assert_eq!(1, decoded[5]);
+ assert_eq!(2, decoded[6]);
+ assert_eq!(2, decoded[7]);
+ assert_eq!(2, decoded[8]);
+ }
+}
diff --git a/vendor/image/src/codecs/bmp/mod.rs b/vendor/image/src/codecs/bmp/mod.rs
new file mode 100644
index 0000000..549b1cf
--- /dev/null
+++ b/vendor/image/src/codecs/bmp/mod.rs
@@ -0,0 +1,14 @@
+//! Decoding and Encoding of BMP Images
+//!
+//! A decoder and encoder for BMP (Windows Bitmap) images
+//!
+//! # Related Links
+//! * <https://msdn.microsoft.com/en-us/library/windows/desktop/dd183375%28v=vs.85%29.aspx>
+//! * <https://en.wikipedia.org/wiki/BMP_file_format>
+//!
+
+pub use self::decoder::BmpDecoder;
+pub use self::encoder::BmpEncoder;
+
+mod decoder;
+mod encoder;
diff --git a/vendor/image/src/codecs/dds.rs b/vendor/image/src/codecs/dds.rs
new file mode 100644
index 0000000..f0a7357
--- /dev/null
+++ b/vendor/image/src/codecs/dds.rs
@@ -0,0 +1,375 @@
+//! Decoding of DDS images
+//!
+//! DDS (DirectDraw Surface) is a container format for storing DXT (S3TC) compressed images.
+//!
+//! # Related Links
+//! * <https://docs.microsoft.com/en-us/windows/win32/direct3ddds/dx-graphics-dds-pguide> - Description of the DDS format.
+
+use std::io::Read;
+use std::{error, fmt};
+
+use byteorder::{LittleEndian, ReadBytesExt};
+
+#[allow(deprecated)]
+use crate::codecs::dxt::{DxtDecoder, DxtReader, DxtVariant};
+use crate::color::ColorType;
+use crate::error::{
+ DecodingError, ImageError, ImageFormatHint, ImageResult, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::{ImageDecoder, ImageFormat};
+
+/// Errors that can occur during decoding and parsing a DDS image
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
+enum DecoderError {
+ /// Wrong DDS channel width
+ PixelFormatSizeInvalid(u32),
+ /// Wrong DDS header size
+ HeaderSizeInvalid(u32),
+ /// Wrong DDS header flags
+ HeaderFlagsInvalid(u32),
+
+ /// Invalid DXGI format in DX10 header
+ DxgiFormatInvalid(u32),
+ /// Invalid resource dimension
+ ResourceDimensionInvalid(u32),
+ /// Invalid flags in DX10 header
+ Dx10FlagsInvalid(u32),
+ /// Invalid array size in DX10 header
+ Dx10ArraySizeInvalid(u32),
+
+ /// DDS "DDS " signature invalid or missing
+ DdsSignatureInvalid,
+}
+
+impl fmt::Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DecoderError::PixelFormatSizeInvalid(s) => {
+ f.write_fmt(format_args!("Invalid DDS PixelFormat size: {}", s))
+ }
+ DecoderError::HeaderSizeInvalid(s) => {
+ f.write_fmt(format_args!("Invalid DDS header size: {}", s))
+ }
+ DecoderError::HeaderFlagsInvalid(fs) => {
+ f.write_fmt(format_args!("Invalid DDS header flags: {:#010X}", fs))
+ }
+ DecoderError::DxgiFormatInvalid(df) => {
+ f.write_fmt(format_args!("Invalid DDS DXGI format: {}", df))
+ }
+ DecoderError::ResourceDimensionInvalid(d) => {
+ f.write_fmt(format_args!("Invalid DDS resource dimension: {}", d))
+ }
+ DecoderError::Dx10FlagsInvalid(fs) => {
+ f.write_fmt(format_args!("Invalid DDS DX10 header flags: {:#010X}", fs))
+ }
+ DecoderError::Dx10ArraySizeInvalid(s) => {
+ f.write_fmt(format_args!("Invalid DDS DX10 array size: {}", s))
+ }
+ DecoderError::DdsSignatureInvalid => f.write_str("DDS signature not found"),
+ }
+ }
+}
+
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Dds.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {}
+
+/// Header used by DDS image files
+#[derive(Debug)]
+struct Header {
+ _flags: u32,
+ height: u32,
+ width: u32,
+ _pitch_or_linear_size: u32,
+ _depth: u32,
+ _mipmap_count: u32,
+ pixel_format: PixelFormat,
+ _caps: u32,
+ _caps2: u32,
+}
+
+/// Extended DX10 header used by some DDS image files
+#[derive(Debug)]
+struct DX10Header {
+ dxgi_format: u32,
+ resource_dimension: u32,
+ misc_flag: u32,
+ array_size: u32,
+ misc_flags_2: u32,
+}
+
+/// DDS pixel format
+#[derive(Debug)]
+struct PixelFormat {
+ flags: u32,
+ fourcc: [u8; 4],
+ _rgb_bit_count: u32,
+ _r_bit_mask: u32,
+ _g_bit_mask: u32,
+ _b_bit_mask: u32,
+ _a_bit_mask: u32,
+}
+
+impl PixelFormat {
+ fn from_reader(r: &mut dyn Read) -> ImageResult<Self> {
+ let size = r.read_u32::<LittleEndian>()?;
+ if size != 32 {
+ return Err(DecoderError::PixelFormatSizeInvalid(size).into());
+ }
+
+ Ok(Self {
+ flags: r.read_u32::<LittleEndian>()?,
+ fourcc: {
+ let mut v = [0; 4];
+ r.read_exact(&mut v)?;
+ v
+ },
+ _rgb_bit_count: r.read_u32::<LittleEndian>()?,
+ _r_bit_mask: r.read_u32::<LittleEndian>()?,
+ _g_bit_mask: r.read_u32::<LittleEndian>()?,
+ _b_bit_mask: r.read_u32::<LittleEndian>()?,
+ _a_bit_mask: r.read_u32::<LittleEndian>()?,
+ })
+ }
+}
+
+impl Header {
+ fn from_reader(r: &mut dyn Read) -> ImageResult<Self> {
+ let size = r.read_u32::<LittleEndian>()?;
+ if size != 124 {
+ return Err(DecoderError::HeaderSizeInvalid(size).into());
+ }
+
+ const REQUIRED_FLAGS: u32 = 0x1 | 0x2 | 0x4 | 0x1000;
+ const VALID_FLAGS: u32 = 0x1 | 0x2 | 0x4 | 0x8 | 0x1000 | 0x20000 | 0x80000 | 0x800000;
+ let flags = r.read_u32::<LittleEndian>()?;
+ if flags & (REQUIRED_FLAGS | !VALID_FLAGS) != REQUIRED_FLAGS {
+ return Err(DecoderError::HeaderFlagsInvalid(flags).into());
+ }
+
+ let height = r.read_u32::<LittleEndian>()?;
+ let width = r.read_u32::<LittleEndian>()?;
+ let pitch_or_linear_size = r.read_u32::<LittleEndian>()?;
+ let depth = r.read_u32::<LittleEndian>()?;
+ let mipmap_count = r.read_u32::<LittleEndian>()?;
+ // Skip `dwReserved1`
+ {
+ let mut skipped = [0; 4 * 11];
+ r.read_exact(&mut skipped)?;
+ }
+ let pixel_format = PixelFormat::from_reader(r)?;
+ let caps = r.read_u32::<LittleEndian>()?;
+ let caps2 = r.read_u32::<LittleEndian>()?;
+ // Skip `dwCaps3`, `dwCaps4`, `dwReserved2` (unused)
+ {
+ let mut skipped = [0; 4 + 4 + 4];
+ r.read_exact(&mut skipped)?;
+ }
+
+ Ok(Self {
+ _flags: flags,
+ height,
+ width,
+ _pitch_or_linear_size: pitch_or_linear_size,
+ _depth: depth,
+ _mipmap_count: mipmap_count,
+ pixel_format,
+ _caps: caps,
+ _caps2: caps2,
+ })
+ }
+}
+
+impl DX10Header {
+ fn from_reader(r: &mut dyn Read) -> ImageResult<Self> {
+ let dxgi_format = r.read_u32::<LittleEndian>()?;
+ let resource_dimension = r.read_u32::<LittleEndian>()?;
+ let misc_flag = r.read_u32::<LittleEndian>()?;
+ let array_size = r.read_u32::<LittleEndian>()?;
+ let misc_flags_2 = r.read_u32::<LittleEndian>()?;
+
+ let dx10_header = Self {
+ dxgi_format,
+ resource_dimension,
+ misc_flag,
+ array_size,
+ misc_flags_2,
+ };
+ dx10_header.validate()?;
+
+ Ok(dx10_header)
+ }
+
+ fn validate(&self) -> Result<(), ImageError> {
+ // Note: see https://docs.microsoft.com/en-us/windows/win32/direct3ddds/dds-header-dxt10 for info on valid values
+ if self.dxgi_format > 132 {
+ // Invalid format
+ return Err(DecoderError::DxgiFormatInvalid(self.dxgi_format).into());
+ }
+
+ if self.resource_dimension < 2 || self.resource_dimension > 4 {
+ // Invalid dimension
+ // Only 1D (2), 2D (3) and 3D (4) resource dimensions are allowed
+ return Err(DecoderError::ResourceDimensionInvalid(self.resource_dimension).into());
+ }
+
+ if self.misc_flag != 0x0 && self.misc_flag != 0x4 {
+ // Invalid flag
+ // Only no (0x0) and DDS_RESOURCE_MISC_TEXTURECUBE (0x4) flags are allowed
+ return Err(DecoderError::Dx10FlagsInvalid(self.misc_flag).into());
+ }
+
+ if self.resource_dimension == 4 && self.array_size != 1 {
+ // Invalid array size
+ // 3D textures (resource dimension == 4) must have an array size of 1
+ return Err(DecoderError::Dx10ArraySizeInvalid(self.array_size).into());
+ }
+
+ if self.misc_flags_2 > 0x4 {
+ // Invalid alpha flags
+ return Err(DecoderError::Dx10FlagsInvalid(self.misc_flags_2).into());
+ }
+
+ Ok(())
+ }
+}
+
+/// The representation of a DDS decoder
+pub struct DdsDecoder<R: Read> {
+ #[allow(deprecated)]
+ inner: DxtDecoder<R>,
+}
+
+impl<R: Read> DdsDecoder<R> {
+ /// Create a new decoder that decodes from the stream `r`
+ pub fn new(mut r: R) -> ImageResult<Self> {
+ let mut magic = [0; 4];
+ r.read_exact(&mut magic)?;
+ if magic != b"DDS "[..] {
+ return Err(DecoderError::DdsSignatureInvalid.into());
+ }
+
+ let header = Header::from_reader(&mut r)?;
+
+ if header.pixel_format.flags & 0x4 != 0 {
+ #[allow(deprecated)]
+ let variant = match &header.pixel_format.fourcc {
+ b"DXT1" => DxtVariant::DXT1,
+ b"DXT3" => DxtVariant::DXT3,
+ b"DXT5" => DxtVariant::DXT5,
+ b"DX10" => {
+ let dx10_header = DX10Header::from_reader(&mut r)?;
+ // Format equivalents were taken from https://docs.microsoft.com/en-us/windows/win32/direct3d11/texture-block-compression-in-direct3d-11
+ // The enum integer values were taken from https://docs.microsoft.com/en-us/windows/win32/api/dxgiformat/ne-dxgiformat-dxgi_format
+ // DXT1 represents the different BC1 variants, DTX3 represents the different BC2 variants and DTX5 represents the different BC3 variants
+ match dx10_header.dxgi_format {
+ 70 | 71 | 72 => DxtVariant::DXT1, // DXGI_FORMAT_BC1_TYPELESS, DXGI_FORMAT_BC1_UNORM or DXGI_FORMAT_BC1_UNORM_SRGB
+ 73 | 74 | 75 => DxtVariant::DXT3, // DXGI_FORMAT_BC2_TYPELESS, DXGI_FORMAT_BC2_UNORM or DXGI_FORMAT_BC2_UNORM_SRGB
+ 76 | 77 | 78 => DxtVariant::DXT5, // DXGI_FORMAT_BC3_TYPELESS, DXGI_FORMAT_BC3_UNORM or DXGI_FORMAT_BC3_UNORM_SRGB
+ _ => {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Dds.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "DDS DXGI Format {}",
+ dx10_header.dxgi_format
+ )),
+ ),
+ ))
+ }
+ }
+ }
+ fourcc => {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Dds.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "DDS FourCC {:?}",
+ fourcc
+ )),
+ ),
+ ))
+ }
+ };
+
+ #[allow(deprecated)]
+ let bytes_per_pixel = variant.color_type().bytes_per_pixel();
+
+ if crate::utils::check_dimension_overflow(header.width, header.height, bytes_per_pixel)
+ {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Dds.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "Image dimensions ({}x{}) are too large",
+ header.width, header.height
+ )),
+ ),
+ ));
+ }
+
+ #[allow(deprecated)]
+ let inner = DxtDecoder::new(r, header.width, header.height, variant)?;
+ Ok(Self { inner })
+ } else {
+ // For now, supports only DXT variants
+ Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Dds.into(),
+ UnsupportedErrorKind::Format(ImageFormatHint::Name("DDS".to_string())),
+ ),
+ ))
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for DdsDecoder<R> {
+ #[allow(deprecated)]
+ type Reader = DxtReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ self.inner.dimensions()
+ }
+
+ fn color_type(&self) -> ColorType {
+ self.inner.color_type()
+ }
+
+ fn scanline_bytes(&self) -> u64 {
+ self.inner.scanline_bytes()
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ self.inner.into_reader()
+ }
+
+ fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
+ self.inner.read_image(buf)
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn dimension_overflow() {
+ // A DXT1 header set to 0xFFFF_FFFC width and height (the highest u32%4 == 0)
+ let header = vec![
+ 0x44, 0x44, 0x53, 0x20, 0x7C, 0x0, 0x0, 0x0, 0x7, 0x10, 0x8, 0x0, 0xFC, 0xFF, 0xFF,
+ 0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0x0, 0xC0, 0x12, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0,
+ 0x0, 0x49, 0x4D, 0x41, 0x47, 0x45, 0x4D, 0x41, 0x47, 0x49, 0x43, 0x4B, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0,
+ 0x4, 0x0, 0x0, 0x0, 0x44, 0x58, 0x54, 0x31, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ ];
+
+ assert!(DdsDecoder::new(&header[..]).is_err());
+ }
+}
diff --git a/vendor/image/src/codecs/dxt.rs b/vendor/image/src/codecs/dxt.rs
new file mode 100644
index 0000000..8737fb3
--- /dev/null
+++ b/vendor/image/src/codecs/dxt.rs
@@ -0,0 +1,869 @@
+//! Decoding of DXT (S3TC) compression
+//!
+//! DXT is an image format that supports lossy compression
+//!
+//! # Related Links
+//! * <https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_compression_s3tc.txt> - Description of the DXT compression OpenGL extensions.
+//!
+//! Note: this module only implements bare DXT encoding/decoding, it does not parse formats that can contain DXT files like .dds
+
+use std::convert::TryFrom;
+use std::io::{self, Read, Seek, SeekFrom, Write};
+
+use crate::color::ColorType;
+use crate::error::{ImageError, ImageResult, ParameterError, ParameterErrorKind};
+use crate::image::{self, ImageDecoder, ImageDecoderRect, ImageReadBuffer, Progress};
+
+/// What version of DXT compression are we using?
+/// Note that DXT2 and DXT4 are left away as they're
+/// just DXT3 and DXT5 with premultiplied alpha
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum DxtVariant {
+ /// The DXT1 format. 48 bytes of RGB data in a 4x4 pixel square is
+ /// compressed into an 8 byte block of DXT1 data
+ DXT1,
+ /// The DXT3 format. 64 bytes of RGBA data in a 4x4 pixel square is
+ /// compressed into a 16 byte block of DXT3 data
+ DXT3,
+ /// The DXT5 format. 64 bytes of RGBA data in a 4x4 pixel square is
+ /// compressed into a 16 byte block of DXT5 data
+ DXT5,
+}
+
+impl DxtVariant {
+ /// Returns the amount of bytes of raw image data
+ /// that is encoded in a single DXTn block
+ fn decoded_bytes_per_block(self) -> usize {
+ match self {
+ DxtVariant::DXT1 => 48,
+ DxtVariant::DXT3 | DxtVariant::DXT5 => 64,
+ }
+ }
+
+ /// Returns the amount of bytes per block of encoded DXTn data
+ fn encoded_bytes_per_block(self) -> usize {
+ match self {
+ DxtVariant::DXT1 => 8,
+ DxtVariant::DXT3 | DxtVariant::DXT5 => 16,
+ }
+ }
+
+ /// Returns the color type that is stored in this DXT variant
+ pub fn color_type(self) -> ColorType {
+ match self {
+ DxtVariant::DXT1 => ColorType::Rgb8,
+ DxtVariant::DXT3 | DxtVariant::DXT5 => ColorType::Rgba8,
+ }
+ }
+}
+
+/// DXT decoder
+pub struct DxtDecoder<R: Read> {
+ inner: R,
+ width_blocks: u32,
+ height_blocks: u32,
+ variant: DxtVariant,
+ row: u32,
+}
+
+impl<R: Read> DxtDecoder<R> {
+ /// Create a new DXT decoder that decodes from the stream ```r```.
+ /// As DXT is often stored as raw buffers with the width/height
+ /// somewhere else the width and height of the image need
+ /// to be passed in ```width``` and ```height```, as well as the
+ /// DXT variant in ```variant```.
+ /// width and height are required to be powers of 2 and at least 4.
+ /// otherwise an error will be returned
+ pub fn new(
+ r: R,
+ width: u32,
+ height: u32,
+ variant: DxtVariant,
+ ) -> Result<DxtDecoder<R>, ImageError> {
+ if width % 4 != 0 || height % 4 != 0 {
+ // TODO: this is actually a bit of a weird case. We could return `DecodingError` but
+ // it's not really the format that is wrong However, the encoder should surely return
+ // `EncodingError` so it would be the logical choice for symmetry.
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+ let width_blocks = width / 4;
+ let height_blocks = height / 4;
+ Ok(DxtDecoder {
+ inner: r,
+ width_blocks,
+ height_blocks,
+ variant,
+ row: 0,
+ })
+ }
+
+ fn read_scanline(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.scanline_bytes()));
+
+ let mut src =
+ vec![0u8; self.variant.encoded_bytes_per_block() * self.width_blocks as usize];
+ self.inner.read_exact(&mut src)?;
+ match self.variant {
+ DxtVariant::DXT1 => decode_dxt1_row(&src, buf),
+ DxtVariant::DXT3 => decode_dxt3_row(&src, buf),
+ DxtVariant::DXT5 => decode_dxt5_row(&src, buf),
+ }
+ self.row += 1;
+ Ok(buf.len())
+ }
+}
+
+// Note that, due to the way that DXT compression works, a scanline is considered to consist out of
+// 4 lines of pixels.
+impl<'a, R: 'a + Read> ImageDecoder<'a> for DxtDecoder<R> {
+ type Reader = DxtReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.width_blocks * 4, self.height_blocks * 4)
+ }
+
+ fn color_type(&self) -> ColorType {
+ self.variant.color_type()
+ }
+
+ fn scanline_bytes(&self) -> u64 {
+ self.variant.decoded_bytes_per_block() as u64 * u64::from(self.width_blocks)
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(DxtReader {
+ buffer: ImageReadBuffer::new(self.scanline_bytes(), self.total_bytes()),
+ decoder: self,
+ })
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ for chunk in buf.chunks_mut(self.scanline_bytes().max(1) as usize) {
+ self.read_scanline(chunk)?;
+ }
+ Ok(())
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoderRect<'a> for DxtDecoder<R> {
+ fn read_rect_with_progress<F: Fn(Progress)>(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ buf: &mut [u8],
+ progress_callback: F,
+ ) -> ImageResult<()> {
+ let encoded_scanline_bytes =
+ self.variant.encoded_bytes_per_block() as u64 * u64::from(self.width_blocks);
+
+ let start = self.inner.stream_position()?;
+ image::load_rect(
+ x,
+ y,
+ width,
+ height,
+ buf,
+ progress_callback,
+ self,
+ |s, scanline| {
+ s.inner
+ .seek(SeekFrom::Start(start + scanline * encoded_scanline_bytes))?;
+ Ok(())
+ },
+ |s, buf| s.read_scanline(buf).map(|_| ()),
+ )?;
+ self.inner.seek(SeekFrom::Start(start))?;
+ Ok(())
+ }
+}
+
+/// DXT reader
+pub struct DxtReader<R: Read> {
+ buffer: ImageReadBuffer,
+ decoder: DxtDecoder<R>,
+}
+
+impl<R: Read> Read for DxtReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let decoder = &mut self.decoder;
+ self.buffer.read(buf, |buf| decoder.read_scanline(buf))
+ }
+}
+
+/// DXT encoder
+pub struct DxtEncoder<W: Write> {
+ w: W,
+}
+
+impl<W: Write> DxtEncoder<W> {
+ /// Create a new encoder that writes its output to ```w```
+ pub fn new(w: W) -> DxtEncoder<W> {
+ DxtEncoder { w }
+ }
+
+ /// Encodes the image data ```data```
+ /// that has dimensions ```width``` and ```height```
+ /// in ```DxtVariant``` ```variant```
+ /// data is assumed to be in variant.color_type()
+ pub fn encode(
+ mut self,
+ data: &[u8],
+ width: u32,
+ height: u32,
+ variant: DxtVariant,
+ ) -> ImageResult<()> {
+ if width % 4 != 0 || height % 4 != 0 {
+ // TODO: this is not very idiomatic yet. Should return an EncodingError.
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+ let width_blocks = width / 4;
+ let height_blocks = height / 4;
+
+ let stride = variant.decoded_bytes_per_block();
+
+ assert!(data.len() >= width_blocks as usize * height_blocks as usize * stride);
+
+ for chunk in data.chunks(width_blocks as usize * stride) {
+ let data = match variant {
+ DxtVariant::DXT1 => encode_dxt1_row(chunk),
+ DxtVariant::DXT3 => encode_dxt3_row(chunk),
+ DxtVariant::DXT5 => encode_dxt5_row(chunk),
+ };
+ self.w.write_all(&data)?;
+ }
+ Ok(())
+ }
+}
+
+/**
+ * Actual encoding/decoding logic below.
+ */
+use std::mem::swap;
+
+type Rgb = [u8; 3];
+
+/// decodes a 5-bit R, 6-bit G, 5-bit B 16-bit packed color value into 8-bit RGB
+/// mapping is done so min/max range values are preserved. So for 5-bit
+/// values 0x00 -> 0x00 and 0x1F -> 0xFF
+fn enc565_decode(value: u16) -> Rgb {
+ let red = (value >> 11) & 0x1F;
+ let green = (value >> 5) & 0x3F;
+ let blue = (value) & 0x1F;
+ [
+ (red * 0xFF / 0x1F) as u8,
+ (green * 0xFF / 0x3F) as u8,
+ (blue * 0xFF / 0x1F) as u8,
+ ]
+}
+
+/// encodes an 8-bit RGB value into a 5-bit R, 6-bit G, 5-bit B 16-bit packed color value
+/// mapping preserves min/max values. It is guaranteed that i == encode(decode(i)) for all i
+fn enc565_encode(rgb: Rgb) -> u16 {
+ let red = (u16::from(rgb[0]) * 0x1F + 0x7E) / 0xFF;
+ let green = (u16::from(rgb[1]) * 0x3F + 0x7E) / 0xFF;
+ let blue = (u16::from(rgb[2]) * 0x1F + 0x7E) / 0xFF;
+ (red << 11) | (green << 5) | blue
+}
+
+/// utility function: squares a value
+fn square(a: i32) -> i32 {
+ a * a
+}
+
+/// returns the squared error between two RGB values
+fn diff(a: Rgb, b: Rgb) -> i32 {
+ square(i32::from(a[0]) - i32::from(b[0]))
+ + square(i32::from(a[1]) - i32::from(b[1]))
+ + square(i32::from(a[2]) - i32::from(b[2]))
+}
+
+/*
+ * Functions for decoding DXT compression
+ */
+
+/// Constructs the DXT5 alpha lookup table from the two alpha entries
+/// if alpha0 > alpha1, constructs a table of [a0, a1, 6 linearly interpolated values from a0 to a1]
+/// if alpha0 <= alpha1, constructs a table of [a0, a1, 4 linearly interpolated values from a0 to a1, 0, 0xFF]
+fn alpha_table_dxt5(alpha0: u8, alpha1: u8) -> [u8; 8] {
+ let mut table = [alpha0, alpha1, 0, 0, 0, 0, 0, 0xFF];
+ if alpha0 > alpha1 {
+ for i in 2..8u16 {
+ table[i as usize] =
+ (((8 - i) * u16::from(alpha0) + (i - 1) * u16::from(alpha1)) / 7) as u8;
+ }
+ } else {
+ for i in 2..6u16 {
+ table[i as usize] =
+ (((6 - i) * u16::from(alpha0) + (i - 1) * u16::from(alpha1)) / 5) as u8;
+ }
+ }
+ table
+}
+
+/// decodes an 8-byte dxt color block into the RGB channels of a 16xRGB or 16xRGBA block.
+/// source should have a length of 8, dest a length of 48 (RGB) or 64 (RGBA)
+fn decode_dxt_colors(source: &[u8], dest: &mut [u8], is_dxt1: bool) {
+ // sanity checks, also enable the compiler to elide all following bound checks
+ assert!(source.len() == 8 && (dest.len() == 48 || dest.len() == 64));
+ // calculate pitch to store RGB values in dest (3 for RGB, 4 for RGBA)
+ let pitch = dest.len() / 16;
+
+ // extract color data
+ let color0 = u16::from(source[0]) | (u16::from(source[1]) << 8);
+ let color1 = u16::from(source[2]) | (u16::from(source[3]) << 8);
+ let color_table = u32::from(source[4])
+ | (u32::from(source[5]) << 8)
+ | (u32::from(source[6]) << 16)
+ | (u32::from(source[7]) << 24);
+ // let color_table = source[4..8].iter().rev().fold(0, |t, &b| (t << 8) | b as u32);
+
+ // decode the colors to rgb format
+ let mut colors = [[0; 3]; 4];
+ colors[0] = enc565_decode(color0);
+ colors[1] = enc565_decode(color1);
+
+ // determine color interpolation method
+ if color0 > color1 || !is_dxt1 {
+ // linearly interpolate the other two color table entries
+ for i in 0..3 {
+ colors[2][i] = ((u16::from(colors[0][i]) * 2 + u16::from(colors[1][i]) + 1) / 3) as u8;
+ colors[3][i] = ((u16::from(colors[0][i]) + u16::from(colors[1][i]) * 2 + 1) / 3) as u8;
+ }
+ } else {
+ // linearly interpolate one other entry, keep the other at 0
+ for i in 0..3 {
+ colors[2][i] = ((u16::from(colors[0][i]) + u16::from(colors[1][i]) + 1) / 2) as u8;
+ }
+ }
+
+ // serialize the result. Every color is determined by looking up
+ // two bits in color_table which identify which color to actually pick from the 4 possible colors
+ for i in 0..16 {
+ dest[i * pitch..i * pitch + 3]
+ .copy_from_slice(&colors[(color_table >> (i * 2)) as usize & 3]);
+ }
+}
+
+/// Decodes a 16-byte bock of dxt5 data to a 16xRGBA block
+fn decode_dxt5_block(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() == 16 && dest.len() == 64);
+
+ // extract alpha index table (stored as little endian 64-bit value)
+ let alpha_table = source[2..8]
+ .iter()
+ .rev()
+ .fold(0, |t, &b| (t << 8) | u64::from(b));
+
+ // alhpa level decode
+ let alphas = alpha_table_dxt5(source[0], source[1]);
+
+ // serialize alpha
+ for i in 0..16 {
+ dest[i * 4 + 3] = alphas[(alpha_table >> (i * 3)) as usize & 7];
+ }
+
+ // handle colors
+ decode_dxt_colors(&source[8..16], dest, false);
+}
+
+/// Decodes a 16-byte bock of dxt3 data to a 16xRGBA block
+fn decode_dxt3_block(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() == 16 && dest.len() == 64);
+
+ // extract alpha index table (stored as little endian 64-bit value)
+ let alpha_table = source[0..8]
+ .iter()
+ .rev()
+ .fold(0, |t, &b| (t << 8) | u64::from(b));
+
+ // serialize alpha (stored as 4-bit values)
+ for i in 0..16 {
+ dest[i * 4 + 3] = ((alpha_table >> (i * 4)) as u8 & 0xF) * 0x11;
+ }
+
+ // handle colors
+ decode_dxt_colors(&source[8..16], dest, false);
+}
+
+/// Decodes a 8-byte bock of dxt5 data to a 16xRGB block
+fn decode_dxt1_block(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() == 8 && dest.len() == 48);
+ decode_dxt_colors(source, dest, true);
+}
+
+/// Decode a row of DXT1 data to four rows of RGB data.
+/// source.len() should be a multiple of 8, otherwise this panics.
+fn decode_dxt1_row(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() % 8 == 0);
+ let block_count = source.len() / 8;
+ assert!(dest.len() >= block_count * 48);
+
+ // contains the 16 decoded pixels per block
+ let mut decoded_block = [0u8; 48];
+
+ for (x, encoded_block) in source.chunks(8).enumerate() {
+ decode_dxt1_block(encoded_block, &mut decoded_block);
+
+ // copy the values from the decoded block to linewise RGB layout
+ for line in 0..4 {
+ let offset = (block_count * line + x) * 12;
+ dest[offset..offset + 12].copy_from_slice(&decoded_block[line * 12..(line + 1) * 12]);
+ }
+ }
+}
+
+/// Decode a row of DXT3 data to four rows of RGBA data.
+/// source.len() should be a multiple of 16, otherwise this panics.
+fn decode_dxt3_row(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() % 16 == 0);
+ let block_count = source.len() / 16;
+ assert!(dest.len() >= block_count * 64);
+
+ // contains the 16 decoded pixels per block
+ let mut decoded_block = [0u8; 64];
+
+ for (x, encoded_block) in source.chunks(16).enumerate() {
+ decode_dxt3_block(encoded_block, &mut decoded_block);
+
+ // copy the values from the decoded block to linewise RGB layout
+ for line in 0..4 {
+ let offset = (block_count * line + x) * 16;
+ dest[offset..offset + 16].copy_from_slice(&decoded_block[line * 16..(line + 1) * 16]);
+ }
+ }
+}
+
+/// Decode a row of DXT5 data to four rows of RGBA data.
+/// source.len() should be a multiple of 16, otherwise this panics.
+fn decode_dxt5_row(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() % 16 == 0);
+ let block_count = source.len() / 16;
+ assert!(dest.len() >= block_count * 64);
+
+ // contains the 16 decoded pixels per block
+ let mut decoded_block = [0u8; 64];
+
+ for (x, encoded_block) in source.chunks(16).enumerate() {
+ decode_dxt5_block(encoded_block, &mut decoded_block);
+
+ // copy the values from the decoded block to linewise RGB layout
+ for line in 0..4 {
+ let offset = (block_count * line + x) * 16;
+ dest[offset..offset + 16].copy_from_slice(&decoded_block[line * 16..(line + 1) * 16]);
+ }
+ }
+}
+
+/*
+ * Functions for encoding DXT compression
+ */
+
+/// Tries to perform the color encoding part of dxt compression
+/// the approach taken is simple, it picks unique combinations
+/// of the colors present in the block, and attempts to encode the
+/// block with each, picking the encoding that yields the least
+/// squared error out of all of them.
+///
+/// This could probably be faster but is already reasonably fast
+/// and a good reference impl to optimize others against.
+///
+/// Another way to perform this analysis would be to perform a
+/// singular value decomposition of the different colors, and
+/// then pick 2 points on this line as the base colors. But
+/// this is still rather unwieldy math and has issues
+/// with the 3-linear-colors-and-0 case, it's also worse
+/// at conserving the original colors.
+///
+/// source: should be RGBAx16 or RGBx16 bytes of data,
+/// dest 8 bytes of resulting encoded color data
+fn encode_dxt_colors(source: &[u8], dest: &mut [u8], is_dxt1: bool) {
+ // sanity checks and determine stride when parsing the source data
+ assert!((source.len() == 64 || source.len() == 48) && dest.len() == 8);
+ let stride = source.len() / 16;
+
+ // reference colors array
+ let mut colors = [[0u8; 3]; 4];
+
+ // Put the colors we're going to be processing in an array with pure RGB layout
+ // note: we reverse the pixel order here. The reason for this is found in the inner quantization loop.
+ let mut targets = [[0u8; 3]; 16];
+ for (s, d) in source.chunks(stride).rev().zip(&mut targets) {
+ *d = [s[0], s[1], s[2]];
+ }
+
+ // roundtrip all colors through the r5g6b5 encoding
+ for rgb in &mut targets {
+ *rgb = enc565_decode(enc565_encode(*rgb));
+ }
+
+ // and deduplicate the set of colors to choose from as the algorithm is O(N^2) in this
+ let mut colorspace_ = [[0u8; 3]; 16];
+ let mut colorspace_len = 0;
+ for color in &targets {
+ if !colorspace_[..colorspace_len].contains(color) {
+ colorspace_[colorspace_len] = *color;
+ colorspace_len += 1;
+ }
+ }
+ let mut colorspace = &colorspace_[..colorspace_len];
+
+ // in case of slight gradients it can happen that there's only one entry left in the color table.
+ // as the resulting banding can be quite bad if we would just left the block at the closest
+ // encodable color, we have a special path here that tries to emulate the wanted color
+ // using the linear interpolation between gradients
+ if colorspace.len() == 1 {
+ // the base color we got from colorspace reduction
+ let ref_rgb = colorspace[0];
+ // the unreduced color in this block that's the furthest away from the actual block
+ let mut rgb = targets
+ .iter()
+ .cloned()
+ .max_by_key(|rgb| diff(*rgb, ref_rgb))
+ .unwrap();
+ // amplify differences by 2.5, which should push them to the next quantized value
+ // if possible without overshoot
+ for i in 0..3 {
+ rgb[i] =
+ ((i16::from(rgb[i]) - i16::from(ref_rgb[i])) * 5 / 2 + i16::from(ref_rgb[i])) as u8;
+ }
+
+ // roundtrip it through quantization
+ let encoded = enc565_encode(rgb);
+ let rgb = enc565_decode(encoded);
+
+ // in case this didn't land us a different color the best way to represent this field is
+ // as a single color block
+ if rgb == ref_rgb {
+ dest[0] = encoded as u8;
+ dest[1] = (encoded >> 8) as u8;
+
+ for d in dest.iter_mut().take(8).skip(2) {
+ *d = 0;
+ }
+ return;
+ }
+
+ // we did find a separate value: add it to the options so after one round of quantization
+ // we're done
+ colorspace_[1] = rgb;
+ colorspace = &colorspace_[..2];
+ }
+
+ // block quantization loop: we basically just try every possible combination, returning
+ // the combination with the least squared error
+ // stores the best candidate colors
+ let mut chosen_colors = [[0; 3]; 4];
+ // did this index table use the [0,0,0] variant
+ let mut chosen_use_0 = false;
+ // error calculated for the last entry
+ let mut chosen_error = 0xFFFF_FFFFu32;
+
+ // loop through unique permutations of the colorspace, where c1 != c2
+ 'search: for (i, &c1) in colorspace.iter().enumerate() {
+ colors[0] = c1;
+
+ for &c2 in &colorspace[0..i] {
+ colors[1] = c2;
+
+ if is_dxt1 {
+ // what's inside here is ran at most 120 times.
+ for use_0 in 0..2 {
+ // and 240 times here.
+
+ if use_0 != 0 {
+ // interpolate one color, set the other to 0
+ for i in 0..3 {
+ colors[2][i] =
+ ((u16::from(colors[0][i]) + u16::from(colors[1][i]) + 1) / 2) as u8;
+ }
+ colors[3] = [0, 0, 0];
+ } else {
+ // interpolate to get 2 more colors
+ for i in 0..3 {
+ colors[2][i] =
+ ((u16::from(colors[0][i]) * 2 + u16::from(colors[1][i]) + 1) / 3)
+ as u8;
+ colors[3][i] =
+ ((u16::from(colors[0][i]) + u16::from(colors[1][i]) * 2 + 1) / 3)
+ as u8;
+ }
+ }
+
+ // calculate the total error if we were to quantize the block with these color combinations
+ // both these loops have statically known iteration counts and are well vectorizable
+ // note that the inside of this can be run about 15360 times worst case, i.e. 960 times per
+ // pixel.
+ let total_error = targets
+ .iter()
+ .map(|t| colors.iter().map(|c| diff(*c, *t) as u32).min().unwrap())
+ .sum();
+
+ // update the match if we found a better one
+ if total_error < chosen_error {
+ chosen_colors = colors;
+ chosen_use_0 = use_0 != 0;
+ chosen_error = total_error;
+
+ // if we've got a perfect or at most 1 LSB off match, we're done
+ if total_error < 4 {
+ break 'search;
+ }
+ }
+ }
+ } else {
+ // what's inside here is ran at most 120 times.
+
+ // interpolate to get 2 more colors
+ for i in 0..3 {
+ colors[2][i] =
+ ((u16::from(colors[0][i]) * 2 + u16::from(colors[1][i]) + 1) / 3) as u8;
+ colors[3][i] =
+ ((u16::from(colors[0][i]) + u16::from(colors[1][i]) * 2 + 1) / 3) as u8;
+ }
+
+ // calculate the total error if we were to quantize the block with these color combinations
+ // both these loops have statically known iteration counts and are well vectorizable
+ // note that the inside of this can be run about 15360 times worst case, i.e. 960 times per
+ // pixel.
+ let total_error = targets
+ .iter()
+ .map(|t| colors.iter().map(|c| diff(*c, *t) as u32).min().unwrap())
+ .sum();
+
+ // update the match if we found a better one
+ if total_error < chosen_error {
+ chosen_colors = colors;
+ chosen_error = total_error;
+
+ // if we've got a perfect or at most 1 LSB off match, we're done
+ if total_error < 4 {
+ break 'search;
+ }
+ }
+ }
+ }
+ }
+
+ // calculate the final indices
+ // note that targets is already in reverse pixel order, to make the index computation easy.
+ let mut chosen_indices = 0u32;
+ for t in &targets {
+ let (idx, _) = chosen_colors
+ .iter()
+ .enumerate()
+ .min_by_key(|&(_, c)| diff(*c, *t))
+ .unwrap();
+ chosen_indices = (chosen_indices << 2) | idx as u32;
+ }
+
+ // encode the colors
+ let mut color0 = enc565_encode(chosen_colors[0]);
+ let mut color1 = enc565_encode(chosen_colors[1]);
+
+ // determine encoding. Note that color0 == color1 is impossible at this point
+ if is_dxt1 {
+ if color0 > color1 {
+ if chosen_use_0 {
+ swap(&mut color0, &mut color1);
+ // Indexes are packed 2 bits wide, swap index 0/1 but preserve 2/3.
+ let filter = (chosen_indices & 0xAAAA_AAAA) >> 1;
+ chosen_indices ^= filter ^ 0x5555_5555;
+ }
+ } else if !chosen_use_0 {
+ swap(&mut color0, &mut color1);
+ // Indexes are packed 2 bits wide, swap index 0/1 and 2/3.
+ chosen_indices ^= 0x5555_5555;
+ }
+ }
+
+ // encode everything.
+ dest[0] = color0 as u8;
+ dest[1] = (color0 >> 8) as u8;
+ dest[2] = color1 as u8;
+ dest[3] = (color1 >> 8) as u8;
+ for i in 0..4 {
+ dest[i + 4] = (chosen_indices >> (i * 8)) as u8;
+ }
+}
+
+/// Encodes a buffer of 16 alpha bytes into a dxt5 alpha index table,
+/// where the alpha table they are indexed against is created by
+/// calling alpha_table_dxt5(alpha0, alpha1)
+/// returns the resulting error and alpha table
+fn encode_dxt5_alpha(alpha0: u8, alpha1: u8, alphas: &[u8; 16]) -> (i32, u64) {
+ // create a table for the given alpha ranges
+ let table = alpha_table_dxt5(alpha0, alpha1);
+ let mut indices = 0u64;
+ let mut total_error = 0i32;
+
+ // least error brute force search
+ for (i, &a) in alphas.iter().enumerate() {
+ let (index, error) = table
+ .iter()
+ .enumerate()
+ .map(|(i, &e)| (i, square(i32::from(e) - i32::from(a))))
+ .min_by_key(|&(_, e)| e)
+ .unwrap();
+ total_error += error;
+ indices |= (index as u64) << (i * 3);
+ }
+
+ (total_error, indices)
+}
+
+/// Encodes a RGBAx16 sequence of bytes to a 16 bytes DXT5 block
+fn encode_dxt5_block(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() == 64 && dest.len() == 16);
+
+ // perform dxt color encoding
+ encode_dxt_colors(source, &mut dest[8..16], false);
+
+ // copy out the alpha bytes
+ let mut alphas = [0; 16];
+ for i in 0..16 {
+ alphas[i] = source[i * 4 + 3];
+ }
+
+ // try both alpha compression methods, see which has the least error.
+ let alpha07 = alphas.iter().cloned().min().unwrap();
+ let alpha17 = alphas.iter().cloned().max().unwrap();
+ let (error7, indices7) = encode_dxt5_alpha(alpha07, alpha17, &alphas);
+
+ // if all alphas are 0 or 255 it doesn't particularly matter what we do here.
+ let alpha05 = alphas
+ .iter()
+ .cloned()
+ .filter(|&i| i != 255)
+ .max()
+ .unwrap_or(255);
+ let alpha15 = alphas
+ .iter()
+ .cloned()
+ .filter(|&i| i != 0)
+ .min()
+ .unwrap_or(0);
+ let (error5, indices5) = encode_dxt5_alpha(alpha05, alpha15, &alphas);
+
+ // pick the best one, encode the min/max values
+ let mut alpha_table = if error5 < error7 {
+ dest[0] = alpha05;
+ dest[1] = alpha15;
+ indices5
+ } else {
+ dest[0] = alpha07;
+ dest[1] = alpha17;
+ indices7
+ };
+
+ // encode the alphas
+ for byte in dest[2..8].iter_mut() {
+ *byte = alpha_table as u8;
+ alpha_table >>= 8;
+ }
+}
+
+/// Encodes a RGBAx16 sequence of bytes into a 16 bytes DXT3 block
+fn encode_dxt3_block(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() == 64 && dest.len() == 16);
+
+ // perform dxt color encoding
+ encode_dxt_colors(source, &mut dest[8..16], false);
+
+ // DXT3 alpha compression is very simple, just round towards the nearest value
+
+ // index the alpha values into the 64bit alpha table
+ let mut alpha_table = 0u64;
+ for i in 0..16 {
+ let alpha = u64::from(source[i * 4 + 3]);
+ let alpha = (alpha + 0x8) / 0x11;
+ alpha_table |= alpha << (i * 4);
+ }
+
+ // encode the alpha values
+ for byte in &mut dest[0..8] {
+ *byte = alpha_table as u8;
+ alpha_table >>= 8;
+ }
+}
+
+/// Encodes a RGBx16 sequence of bytes into a 8 bytes DXT1 block
+fn encode_dxt1_block(source: &[u8], dest: &mut [u8]) {
+ assert!(source.len() == 48 && dest.len() == 8);
+
+ // perform dxt color encoding
+ encode_dxt_colors(source, dest, true);
+}
+
+/// Decode a row of DXT1 data to four rows of RGBA data.
+/// source.len() should be a multiple of 8, otherwise this panics.
+fn encode_dxt1_row(source: &[u8]) -> Vec<u8> {
+ assert!(source.len() % 48 == 0);
+ let block_count = source.len() / 48;
+
+ let mut dest = vec![0u8; block_count * 8];
+ // contains the 16 decoded pixels per block
+ let mut decoded_block = [0u8; 48];
+
+ for (x, encoded_block) in dest.chunks_mut(8).enumerate() {
+ // copy the values from the decoded block to linewise RGB layout
+ for line in 0..4 {
+ let offset = (block_count * line + x) * 12;
+ decoded_block[line * 12..(line + 1) * 12].copy_from_slice(&source[offset..offset + 12]);
+ }
+
+ encode_dxt1_block(&decoded_block, encoded_block);
+ }
+ dest
+}
+
+/// Decode a row of DXT3 data to four rows of RGBA data.
+/// source.len() should be a multiple of 16, otherwise this panics.
+fn encode_dxt3_row(source: &[u8]) -> Vec<u8> {
+ assert!(source.len() % 64 == 0);
+ let block_count = source.len() / 64;
+
+ let mut dest = vec![0u8; block_count * 16];
+ // contains the 16 decoded pixels per block
+ let mut decoded_block = [0u8; 64];
+
+ for (x, encoded_block) in dest.chunks_mut(16).enumerate() {
+ // copy the values from the decoded block to linewise RGB layout
+ for line in 0..4 {
+ let offset = (block_count * line + x) * 16;
+ decoded_block[line * 16..(line + 1) * 16].copy_from_slice(&source[offset..offset + 16]);
+ }
+
+ encode_dxt3_block(&decoded_block, encoded_block);
+ }
+ dest
+}
+
+/// Decode a row of DXT5 data to four rows of RGBA data.
+/// source.len() should be a multiple of 16, otherwise this panics.
+fn encode_dxt5_row(source: &[u8]) -> Vec<u8> {
+ assert!(source.len() % 64 == 0);
+ let block_count = source.len() / 64;
+
+ let mut dest = vec![0u8; block_count * 16];
+ // contains the 16 decoded pixels per block
+ let mut decoded_block = [0u8; 64];
+
+ for (x, encoded_block) in dest.chunks_mut(16).enumerate() {
+ // copy the values from the decoded block to linewise RGB layout
+ for line in 0..4 {
+ let offset = (block_count * line + x) * 16;
+ decoded_block[line * 16..(line + 1) * 16].copy_from_slice(&source[offset..offset + 16]);
+ }
+
+ encode_dxt5_block(&decoded_block, encoded_block);
+ }
+ dest
+}
diff --git a/vendor/image/src/codecs/farbfeld.rs b/vendor/image/src/codecs/farbfeld.rs
new file mode 100644
index 0000000..b543ade
--- /dev/null
+++ b/vendor/image/src/codecs/farbfeld.rs
@@ -0,0 +1,400 @@
+//! Decoding of farbfeld images
+//!
+//! farbfeld is a lossless image format which is easy to parse, pipe and compress.
+//!
+//! It has the following format:
+//!
+//! | Bytes | Description |
+//! |--------|---------------------------------------------------------|
+//! | 8 | "farbfeld" magic value |
+//! | 4 | 32-Bit BE unsigned integer (width) |
+//! | 4 | 32-Bit BE unsigned integer (height) |
+//! | [2222] | 4⋅16-Bit BE unsigned integers [RGBA] / pixel, row-major |
+//!
+//! The RGB-data should be sRGB for best interoperability and not alpha-premultiplied.
+//!
+//! # Related Links
+//! * <https://tools.suckless.org/farbfeld/> - the farbfeld specification
+
+use std::convert::TryFrom;
+use std::i64;
+use std::io::{self, Read, Seek, SeekFrom, Write};
+
+use byteorder::{BigEndian, ByteOrder, NativeEndian};
+
+use crate::color::ColorType;
+use crate::error::{
+ DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::{self, ImageDecoder, ImageDecoderRect, ImageEncoder, ImageFormat, Progress};
+
+/// farbfeld Reader
+pub struct FarbfeldReader<R: Read> {
+ width: u32,
+ height: u32,
+ inner: R,
+ /// Relative to the start of the pixel data
+ current_offset: u64,
+ cached_byte: Option<u8>,
+}
+
+impl<R: Read> FarbfeldReader<R> {
+ fn new(mut buffered_read: R) -> ImageResult<FarbfeldReader<R>> {
+ fn read_dimm<R: Read>(from: &mut R) -> ImageResult<u32> {
+ let mut buf = [0u8; 4];
+ from.read_exact(&mut buf).map_err(|err| {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Farbfeld.into(), err))
+ })?;
+ Ok(BigEndian::read_u32(&buf))
+ }
+
+ let mut magic = [0u8; 8];
+ buffered_read.read_exact(&mut magic).map_err(|err| {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Farbfeld.into(), err))
+ })?;
+ if &magic != b"farbfeld" {
+ return Err(ImageError::Decoding(DecodingError::new(
+ ImageFormat::Farbfeld.into(),
+ format!("Invalid magic: {:02x?}", magic),
+ )));
+ }
+
+ let reader = FarbfeldReader {
+ width: read_dimm(&mut buffered_read)?,
+ height: read_dimm(&mut buffered_read)?,
+ inner: buffered_read,
+ current_offset: 0,
+ cached_byte: None,
+ };
+
+ if crate::utils::check_dimension_overflow(
+ reader.width,
+ reader.height,
+ // ColorType is always rgba16
+ ColorType::Rgba16.bytes_per_pixel(),
+ ) {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Farbfeld.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "Image dimensions ({}x{}) are too large",
+ reader.width, reader.height
+ )),
+ ),
+ ));
+ }
+
+ Ok(reader)
+ }
+}
+
+impl<R: Read> Read for FarbfeldReader<R> {
+ fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> {
+ let mut bytes_written = 0;
+ if let Some(byte) = self.cached_byte.take() {
+ buf[0] = byte;
+ buf = &mut buf[1..];
+ bytes_written = 1;
+ self.current_offset += 1;
+ }
+
+ if buf.len() == 1 {
+ buf[0] = cache_byte(&mut self.inner, &mut self.cached_byte)?;
+ bytes_written += 1;
+ self.current_offset += 1;
+ } else {
+ for channel_out in buf.chunks_exact_mut(2) {
+ consume_channel(&mut self.inner, channel_out)?;
+ bytes_written += 2;
+ self.current_offset += 2;
+ }
+ }
+
+ Ok(bytes_written)
+ }
+}
+
+impl<R: Read + Seek> Seek for FarbfeldReader<R> {
+ fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
+ fn parse_offset(original_offset: u64, end_offset: u64, pos: SeekFrom) -> Option<i64> {
+ match pos {
+ SeekFrom::Start(off) => i64::try_from(off)
+ .ok()?
+ .checked_sub(i64::try_from(original_offset).ok()?),
+ SeekFrom::End(off) => {
+ if off < i64::try_from(end_offset).unwrap_or(i64::MAX) {
+ None
+ } else {
+ Some(i64::try_from(end_offset.checked_sub(original_offset)?).ok()? + off)
+ }
+ }
+ SeekFrom::Current(off) => {
+ if off < i64::try_from(original_offset).unwrap_or(i64::MAX) {
+ None
+ } else {
+ Some(off)
+ }
+ }
+ }
+ }
+
+ let original_offset = self.current_offset;
+ let end_offset = self.width as u64 * self.height as u64 * 2;
+ let offset_from_current =
+ parse_offset(original_offset, end_offset, pos).ok_or_else(|| {
+ io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "invalid seek to a negative or overflowing position",
+ )
+ })?;
+
+ // TODO: convert to seek_relative() once that gets stabilised
+ self.inner.seek(SeekFrom::Current(offset_from_current))?;
+ self.current_offset = if offset_from_current < 0 {
+ original_offset.checked_sub(offset_from_current.wrapping_neg() as u64)
+ } else {
+ original_offset.checked_add(offset_from_current as u64)
+ }
+ .expect("This should've been checked above");
+
+ if self.current_offset < end_offset && self.current_offset % 2 == 1 {
+ let curr = self.inner.seek(SeekFrom::Current(-1))?;
+ cache_byte(&mut self.inner, &mut self.cached_byte)?;
+ self.inner.seek(SeekFrom::Start(curr))?;
+ } else {
+ self.cached_byte = None;
+ }
+
+ Ok(original_offset)
+ }
+}
+
+fn consume_channel<R: Read>(from: &mut R, to: &mut [u8]) -> io::Result<()> {
+ let mut ibuf = [0u8; 2];
+ from.read_exact(&mut ibuf)?;
+ NativeEndian::write_u16(to, BigEndian::read_u16(&ibuf));
+ Ok(())
+}
+
+fn cache_byte<R: Read>(from: &mut R, cached_byte: &mut Option<u8>) -> io::Result<u8> {
+ let mut obuf = [0u8; 2];
+ consume_channel(from, &mut obuf)?;
+ *cached_byte = Some(obuf[1]);
+ Ok(obuf[0])
+}
+
+/// farbfeld decoder
+pub struct FarbfeldDecoder<R: Read> {
+ reader: FarbfeldReader<R>,
+}
+
+impl<R: Read> FarbfeldDecoder<R> {
+ /// Creates a new decoder that decodes from the stream ```r```
+ pub fn new(buffered_read: R) -> ImageResult<FarbfeldDecoder<R>> {
+ Ok(FarbfeldDecoder {
+ reader: FarbfeldReader::new(buffered_read)?,
+ })
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for FarbfeldDecoder<R> {
+ type Reader = FarbfeldReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.reader.width, self.reader.height)
+ }
+
+ fn color_type(&self) -> ColorType {
+ ColorType::Rgba16
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(self.reader)
+ }
+
+ fn scanline_bytes(&self) -> u64 {
+ 2
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoderRect<'a> for FarbfeldDecoder<R> {
+ fn read_rect_with_progress<F: Fn(Progress)>(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ buf: &mut [u8],
+ progress_callback: F,
+ ) -> ImageResult<()> {
+ // A "scanline" (defined as "shortest non-caching read" in the doc) is just one channel in this case
+
+ let start = self.reader.stream_position()?;
+ image::load_rect(
+ x,
+ y,
+ width,
+ height,
+ buf,
+ progress_callback,
+ self,
+ |s, scanline| s.reader.seek(SeekFrom::Start(scanline * 2)).map(|_| ()),
+ |s, buf| s.reader.read_exact(buf),
+ )?;
+ self.reader.seek(SeekFrom::Start(start))?;
+ Ok(())
+ }
+}
+
+/// farbfeld encoder
+pub struct FarbfeldEncoder<W: Write> {
+ w: W,
+}
+
+impl<W: Write> FarbfeldEncoder<W> {
+ /// Create a new encoder that writes its output to ```w```. The writer should be buffered.
+ pub fn new(buffered_writer: W) -> FarbfeldEncoder<W> {
+ FarbfeldEncoder { w: buffered_writer }
+ }
+
+ /// Encodes the image ```data``` (native endian)
+ /// that has dimensions ```width``` and ```height```
+ pub fn encode(self, data: &[u8], width: u32, height: u32) -> ImageResult<()> {
+ self.encode_impl(data, width, height)?;
+ Ok(())
+ }
+
+ fn encode_impl(mut self, data: &[u8], width: u32, height: u32) -> io::Result<()> {
+ self.w.write_all(b"farbfeld")?;
+
+ let mut buf = [0u8; 4];
+ BigEndian::write_u32(&mut buf, width);
+ self.w.write_all(&buf)?;
+
+ BigEndian::write_u32(&mut buf, height);
+ self.w.write_all(&buf)?;
+
+ for channel in data.chunks_exact(2) {
+ BigEndian::write_u16(&mut buf, NativeEndian::read_u16(channel));
+ self.w.write_all(&buf[..2])?;
+ }
+
+ Ok(())
+ }
+}
+
+impl<W: Write> ImageEncoder for FarbfeldEncoder<W> {
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ if color_type != ColorType::Rgba16 {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Farbfeld.into(),
+ UnsupportedErrorKind::Color(color_type.into()),
+ ),
+ ));
+ }
+
+ self.encode(buf, width, height)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::codecs::farbfeld::FarbfeldDecoder;
+ use crate::ImageDecoderRect;
+ use byteorder::{ByteOrder, NativeEndian};
+ use std::io::{Cursor, Seek, SeekFrom};
+
+ static RECTANGLE_IN: &[u8] = b"farbfeld\
+ \x00\x00\x00\x02\x00\x00\x00\x03\
+ \xFF\x01\xFE\x02\xFD\x03\xFC\x04\xFB\x05\xFA\x06\xF9\x07\xF8\x08\
+ \xF7\x09\xF6\x0A\xF5\x0B\xF4\x0C\xF3\x0D\xF2\x0E\xF1\x0F\xF0\x10\
+ \xEF\x11\xEE\x12\xED\x13\xEC\x14\xEB\x15\xEA\x16\xE9\x17\xE8\x18";
+
+ #[test]
+ fn read_rect_1x2() {
+ static RECTANGLE_OUT: &[u16] = &[
+ 0xF30D, 0xF20E, 0xF10F, 0xF010, 0xEB15, 0xEA16, 0xE917, 0xE818,
+ ];
+
+ read_rect(1, 1, 1, 2, RECTANGLE_OUT);
+ }
+
+ #[test]
+ fn read_rect_2x2() {
+ static RECTANGLE_OUT: &[u16] = &[
+ 0xFF01, 0xFE02, 0xFD03, 0xFC04, 0xFB05, 0xFA06, 0xF907, 0xF808, 0xF709, 0xF60A, 0xF50B,
+ 0xF40C, 0xF30D, 0xF20E, 0xF10F, 0xF010,
+ ];
+
+ read_rect(0, 0, 2, 2, RECTANGLE_OUT);
+ }
+
+ #[test]
+ fn read_rect_2x1() {
+ static RECTANGLE_OUT: &[u16] = &[
+ 0xEF11, 0xEE12, 0xED13, 0xEC14, 0xEB15, 0xEA16, 0xE917, 0xE818,
+ ];
+
+ read_rect(0, 2, 2, 1, RECTANGLE_OUT);
+ }
+
+ #[test]
+ fn read_rect_2x3() {
+ static RECTANGLE_OUT: &[u16] = &[
+ 0xFF01, 0xFE02, 0xFD03, 0xFC04, 0xFB05, 0xFA06, 0xF907, 0xF808, 0xF709, 0xF60A, 0xF50B,
+ 0xF40C, 0xF30D, 0xF20E, 0xF10F, 0xF010, 0xEF11, 0xEE12, 0xED13, 0xEC14, 0xEB15, 0xEA16,
+ 0xE917, 0xE818,
+ ];
+
+ read_rect(0, 0, 2, 3, RECTANGLE_OUT);
+ }
+
+ #[test]
+ fn read_rect_in_stream() {
+ static RECTANGLE_OUT: &[u16] = &[0xEF11, 0xEE12, 0xED13, 0xEC14];
+
+ let mut input = vec![];
+ input.extend_from_slice(b"This is a 31-byte-long prologue");
+ input.extend_from_slice(RECTANGLE_IN);
+ let mut input_cur = Cursor::new(input);
+ input_cur.seek(SeekFrom::Start(31)).unwrap();
+
+ let mut out_buf = [0u8; 64];
+ FarbfeldDecoder::new(input_cur)
+ .unwrap()
+ .read_rect(0, 2, 1, 1, &mut out_buf)
+ .unwrap();
+ let exp = degenerate_pixels(RECTANGLE_OUT);
+ assert_eq!(&out_buf[..exp.len()], &exp[..]);
+ }
+
+ #[test]
+ fn dimension_overflow() {
+ let header = b"farbfeld\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF";
+
+ assert!(FarbfeldDecoder::new(Cursor::new(header)).is_err());
+ }
+
+ fn read_rect(x: u32, y: u32, width: u32, height: u32, exp_wide: &[u16]) {
+ let mut out_buf = [0u8; 64];
+ FarbfeldDecoder::new(Cursor::new(RECTANGLE_IN))
+ .unwrap()
+ .read_rect(x, y, width, height, &mut out_buf)
+ .unwrap();
+ let exp = degenerate_pixels(exp_wide);
+ assert_eq!(&out_buf[..exp.len()], &exp[..]);
+ }
+
+ fn degenerate_pixels(exp_wide: &[u16]) -> Vec<u8> {
+ let mut exp = vec![0u8; exp_wide.len() * 2];
+ NativeEndian::write_u16_into(exp_wide, &mut exp);
+ exp
+ }
+}
diff --git a/vendor/image/src/codecs/gif.rs b/vendor/image/src/codecs/gif.rs
new file mode 100644
index 0000000..dcbd841
--- /dev/null
+++ b/vendor/image/src/codecs/gif.rs
@@ -0,0 +1,606 @@
+//! Decoding of GIF Images
+//!
+//! GIF (Graphics Interchange Format) is an image format that supports lossless compression.
+//!
+//! # Related Links
+//! * <http://www.w3.org/Graphics/GIF/spec-gif89a.txt> - The GIF Specification
+//!
+//! # Examples
+//! ```rust,no_run
+//! use image::codecs::gif::{GifDecoder, GifEncoder};
+//! use image::{ImageDecoder, AnimationDecoder};
+//! use std::fs::File;
+//! # fn main() -> std::io::Result<()> {
+//! // Decode a gif into frames
+//! let file_in = File::open("foo.gif")?;
+//! let mut decoder = GifDecoder::new(file_in).unwrap();
+//! let frames = decoder.into_frames();
+//! let frames = frames.collect_frames().expect("error decoding gif");
+//!
+//! // Encode frames into a gif and save to a file
+//! let mut file_out = File::open("out.gif")?;
+//! let mut encoder = GifEncoder::new(file_out);
+//! encoder.encode_frames(frames.into_iter());
+//! # Ok(())
+//! # }
+//! ```
+#![allow(clippy::while_let_loop)]
+
+use std::convert::TryFrom;
+use std::convert::TryInto;
+use std::io::{self, Cursor, Read, Write};
+use std::marker::PhantomData;
+use std::mem;
+
+use gif::ColorOutput;
+use gif::{DisposalMethod, Frame};
+use num_rational::Ratio;
+
+use crate::animation;
+use crate::color::{ColorType, Rgba};
+use crate::error::{
+ DecodingError, EncodingError, ImageError, ImageResult, ParameterError, ParameterErrorKind,
+ UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::{self, AnimationDecoder, ImageDecoder, ImageFormat};
+use crate::io::Limits;
+use crate::traits::Pixel;
+use crate::ImageBuffer;
+
+/// GIF decoder
+pub struct GifDecoder<R: Read> {
+ reader: gif::Decoder<R>,
+ limits: Limits,
+}
+
+impl<R: Read> GifDecoder<R> {
+ /// Creates a new decoder that decodes the input steam `r`
+ pub fn new(r: R) -> ImageResult<GifDecoder<R>> {
+ let mut decoder = gif::DecodeOptions::new();
+ decoder.set_color_output(ColorOutput::RGBA);
+
+ Ok(GifDecoder {
+ reader: decoder.read_info(r).map_err(ImageError::from_decoding)?,
+ limits: Limits::default(),
+ })
+ }
+
+ /// Creates a new decoder that decodes the input steam `r`, using limits `limits`
+ pub fn with_limits(r: R, limits: Limits) -> ImageResult<GifDecoder<R>> {
+ let mut decoder = gif::DecodeOptions::new();
+ decoder.set_color_output(ColorOutput::RGBA);
+
+ Ok(GifDecoder {
+ reader: decoder.read_info(r).map_err(ImageError::from_decoding)?,
+ limits,
+ })
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct GifReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for GifReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for GifDecoder<R> {
+ type Reader = GifReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (
+ u32::from(self.reader.width()),
+ u32::from(self.reader.height()),
+ )
+ }
+
+ fn color_type(&self) -> ColorType {
+ ColorType::Rgba8
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(GifReader(
+ Cursor::new(image::decoder_to_vec(self)?),
+ PhantomData,
+ ))
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ let frame = match self
+ .reader
+ .next_frame_info()
+ .map_err(ImageError::from_decoding)?
+ {
+ Some(frame) => FrameInfo::new_from_frame(frame),
+ None => {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::NoMoreData,
+ )))
+ }
+ };
+
+ let (width, height) = self.dimensions();
+
+ if frame.left == 0
+ && frame.width == width
+ && (frame.top as u64 + frame.height as u64 <= height as u64)
+ {
+ // If the frame matches the logical screen, or, as a more general case,
+ // fits into it and touches its left and right borders, then
+ // we can directly write it into the buffer without causing line wraparound.
+ let line_length = usize::try_from(width)
+ .unwrap()
+ .checked_mul(self.color_type().bytes_per_pixel() as usize)
+ .unwrap();
+
+ // isolate the portion of the buffer to read the frame data into.
+ // the chunks above and below it are going to be zeroed.
+ let (blank_top, rest) =
+ buf.split_at_mut(line_length.checked_mul(frame.top as usize).unwrap());
+ let (buf, blank_bottom) =
+ rest.split_at_mut(line_length.checked_mul(frame.height as usize).unwrap());
+
+ debug_assert_eq!(buf.len(), self.reader.buffer_size());
+
+ // this is only necessary in case the buffer is not zeroed
+ for b in blank_top {
+ *b = 0;
+ }
+ // fill the middle section with the frame data
+ self.reader
+ .read_into_buffer(buf)
+ .map_err(ImageError::from_decoding)?;
+ // this is only necessary in case the buffer is not zeroed
+ for b in blank_bottom {
+ *b = 0;
+ }
+ } else {
+ // If the frame does not match the logical screen, read into an extra buffer
+ // and 'insert' the frame from left/top to logical screen width/height.
+ let buffer_size = self.reader.buffer_size();
+
+ self.limits.reserve_usize(buffer_size)?;
+
+ let mut frame_buffer = vec![0; buffer_size];
+
+ self.limits.free_usize(buffer_size);
+
+ self.reader
+ .read_into_buffer(&mut frame_buffer[..])
+ .map_err(ImageError::from_decoding)?;
+
+ let frame_buffer = ImageBuffer::from_raw(frame.width, frame.height, frame_buffer);
+ let image_buffer = ImageBuffer::from_raw(width, height, buf);
+
+ // `buffer_size` uses wrapping arithmetic, thus might not report the
+ // correct storage requirement if the result does not fit in `usize`.
+ // `ImageBuffer::from_raw` detects overflow and reports by returning `None`.
+ if frame_buffer.is_none() || image_buffer.is_none() {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Gif.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "Image dimensions ({}, {}) are too large",
+ frame.width, frame.height
+ )),
+ ),
+ ));
+ }
+
+ let frame_buffer = frame_buffer.unwrap();
+ let mut image_buffer = image_buffer.unwrap();
+
+ for (x, y, pixel) in image_buffer.enumerate_pixels_mut() {
+ let frame_x = x.wrapping_sub(frame.left);
+ let frame_y = y.wrapping_sub(frame.top);
+
+ if frame_x < frame.width && frame_y < frame.height {
+ *pixel = *frame_buffer.get_pixel(frame_x, frame_y);
+ } else {
+ // this is only necessary in case the buffer is not zeroed
+ *pixel = Rgba([0, 0, 0, 0]);
+ }
+ }
+ }
+
+ Ok(())
+ }
+}
+
+struct GifFrameIterator<R: Read> {
+ reader: gif::Decoder<R>,
+
+ width: u32,
+ height: u32,
+
+ non_disposed_frame: ImageBuffer<Rgba<u8>, Vec<u8>>,
+}
+
+impl<R: Read> GifFrameIterator<R> {
+ fn new(decoder: GifDecoder<R>) -> GifFrameIterator<R> {
+ let (width, height) = decoder.dimensions();
+
+ // intentionally ignore the background color for web compatibility
+
+ // create the first non disposed frame
+ let non_disposed_frame = ImageBuffer::from_pixel(width, height, Rgba([0, 0, 0, 0]));
+
+ GifFrameIterator {
+ reader: decoder.reader,
+ width,
+ height,
+ non_disposed_frame,
+ }
+ }
+}
+
+impl<R: Read> Iterator for GifFrameIterator<R> {
+ type Item = ImageResult<animation::Frame>;
+
+ fn next(&mut self) -> Option<ImageResult<animation::Frame>> {
+ // begin looping over each frame
+
+ let frame = match self.reader.next_frame_info() {
+ Ok(frame_info) => {
+ if let Some(frame) = frame_info {
+ FrameInfo::new_from_frame(frame)
+ } else {
+ // no more frames
+ return None;
+ }
+ }
+ Err(err) => return Some(Err(ImageError::from_decoding(err))),
+ };
+
+ let mut vec = vec![0; self.reader.buffer_size()];
+ if let Err(err) = self.reader.read_into_buffer(&mut vec) {
+ return Some(Err(ImageError::from_decoding(err)));
+ }
+
+ // create the image buffer from the raw frame.
+ // `buffer_size` uses wrapping arithmetic, thus might not report the
+ // correct storage requirement if the result does not fit in `usize`.
+ // on the other hand, `ImageBuffer::from_raw` detects overflow and
+ // reports by returning `None`.
+ let mut frame_buffer = match ImageBuffer::from_raw(frame.width, frame.height, vec) {
+ Some(frame_buffer) => frame_buffer,
+ None => {
+ return Some(Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Gif.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "Image dimensions ({}, {}) are too large",
+ frame.width, frame.height
+ )),
+ ),
+ )))
+ }
+ };
+
+ // blend the current frame with the non-disposed frame, then update
+ // the non-disposed frame according to the disposal method.
+ fn blend_and_dispose_pixel(
+ dispose: DisposalMethod,
+ previous: &mut Rgba<u8>,
+ current: &mut Rgba<u8>,
+ ) {
+ let pixel_alpha = current.channels()[3];
+ if pixel_alpha == 0 {
+ *current = *previous;
+ }
+
+ match dispose {
+ DisposalMethod::Any | DisposalMethod::Keep => {
+ // do not dispose
+ // (keep pixels from this frame)
+ // note: the `Any` disposal method is underspecified in the GIF
+ // spec, but most viewers treat it identically to `Keep`
+ *previous = *current;
+ }
+ DisposalMethod::Background => {
+ // restore to background color
+ // (background shows through transparent pixels in the next frame)
+ *previous = Rgba([0, 0, 0, 0]);
+ }
+ DisposalMethod::Previous => {
+ // restore to previous
+ // (dispose frames leaving the last none disposal frame)
+ }
+ }
+ }
+
+ // if `frame_buffer`'s frame exactly matches the entire image, then
+ // use it directly, else create a new buffer to hold the composited
+ // image.
+ let image_buffer = if (frame.left, frame.top) == (0, 0)
+ && (self.width, self.height) == frame_buffer.dimensions()
+ {
+ for (x, y, pixel) in frame_buffer.enumerate_pixels_mut() {
+ let previous_pixel = self.non_disposed_frame.get_pixel_mut(x, y);
+ blend_and_dispose_pixel(frame.disposal_method, previous_pixel, pixel);
+ }
+ frame_buffer
+ } else {
+ ImageBuffer::from_fn(self.width, self.height, |x, y| {
+ let frame_x = x.wrapping_sub(frame.left);
+ let frame_y = y.wrapping_sub(frame.top);
+ let previous_pixel = self.non_disposed_frame.get_pixel_mut(x, y);
+
+ if frame_x < frame_buffer.width() && frame_y < frame_buffer.height() {
+ let mut pixel = *frame_buffer.get_pixel(frame_x, frame_y);
+ blend_and_dispose_pixel(frame.disposal_method, previous_pixel, &mut pixel);
+ pixel
+ } else {
+ // out of bounds, return pixel from previous frame
+ *previous_pixel
+ }
+ })
+ };
+
+ Some(Ok(animation::Frame::from_parts(
+ image_buffer,
+ 0,
+ 0,
+ frame.delay,
+ )))
+ }
+}
+
+impl<'a, R: Read + 'a> AnimationDecoder<'a> for GifDecoder<R> {
+ fn into_frames(self) -> animation::Frames<'a> {
+ animation::Frames::new(Box::new(GifFrameIterator::new(self)))
+ }
+}
+
+struct FrameInfo {
+ left: u32,
+ top: u32,
+ width: u32,
+ height: u32,
+ disposal_method: DisposalMethod,
+ delay: animation::Delay,
+}
+
+impl FrameInfo {
+ fn new_from_frame(frame: &Frame) -> FrameInfo {
+ FrameInfo {
+ left: u32::from(frame.left),
+ top: u32::from(frame.top),
+ width: u32::from(frame.width),
+ height: u32::from(frame.height),
+ disposal_method: frame.dispose,
+ // frame.delay is in units of 10ms so frame.delay*10 is in ms
+ delay: animation::Delay::from_ratio(Ratio::new(u32::from(frame.delay) * 10, 1)),
+ }
+ }
+}
+
+/// Number of repetitions for a GIF animation
+#[derive(Clone, Copy, Debug)]
+pub enum Repeat {
+ /// Finite number of repetitions
+ Finite(u16),
+ /// Looping GIF
+ Infinite,
+}
+
+impl Repeat {
+ pub(crate) fn to_gif_enum(&self) -> gif::Repeat {
+ match self {
+ Repeat::Finite(n) => gif::Repeat::Finite(*n),
+ Repeat::Infinite => gif::Repeat::Infinite,
+ }
+ }
+}
+
+/// GIF encoder.
+pub struct GifEncoder<W: Write> {
+ w: Option<W>,
+ gif_encoder: Option<gif::Encoder<W>>,
+ speed: i32,
+ repeat: Option<Repeat>,
+}
+
+impl<W: Write> GifEncoder<W> {
+ /// Creates a new GIF encoder with a speed of 1. This prioritizes quality over performance at any cost.
+ pub fn new(w: W) -> GifEncoder<W> {
+ Self::new_with_speed(w, 1)
+ }
+
+ /// Create a new GIF encoder, and has the speed parameter `speed`. See
+ /// [`Frame::from_rgba_speed`](https://docs.rs/gif/latest/gif/struct.Frame.html#method.from_rgba_speed)
+ /// for more information.
+ pub fn new_with_speed(w: W, speed: i32) -> GifEncoder<W> {
+ assert!(
+ (1..=30).contains(&speed),
+ "speed needs to be in the range [1, 30]"
+ );
+ GifEncoder {
+ w: Some(w),
+ gif_encoder: None,
+ speed,
+ repeat: None,
+ }
+ }
+
+ /// Set the repeat behaviour of the encoded GIF
+ pub fn set_repeat(&mut self, repeat: Repeat) -> ImageResult<()> {
+ if let Some(ref mut encoder) = self.gif_encoder {
+ encoder
+ .set_repeat(repeat.to_gif_enum())
+ .map_err(ImageError::from_encoding)?;
+ }
+ self.repeat = Some(repeat);
+ Ok(())
+ }
+
+ /// Encode a single image.
+ pub fn encode(
+ &mut self,
+ data: &[u8],
+ width: u32,
+ height: u32,
+ color: ColorType,
+ ) -> ImageResult<()> {
+ let (width, height) = self.gif_dimensions(width, height)?;
+ match color {
+ ColorType::Rgb8 => self.encode_gif(Frame::from_rgb(width, height, data)),
+ ColorType::Rgba8 => {
+ self.encode_gif(Frame::from_rgba(width, height, &mut data.to_owned()))
+ }
+ _ => Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Gif.into(),
+ UnsupportedErrorKind::Color(color.into()),
+ ),
+ )),
+ }
+ }
+
+ /// Encode one frame of animation.
+ pub fn encode_frame(&mut self, img_frame: animation::Frame) -> ImageResult<()> {
+ let frame = self.convert_frame(img_frame)?;
+ self.encode_gif(frame)
+ }
+
+ /// Encodes Frames.
+ /// Consider using `try_encode_frames` instead to encode an `animation::Frames` like iterator.
+ pub fn encode_frames<F>(&mut self, frames: F) -> ImageResult<()>
+ where
+ F: IntoIterator<Item = animation::Frame>,
+ {
+ for img_frame in frames {
+ self.encode_frame(img_frame)?;
+ }
+ Ok(())
+ }
+
+ /// Try to encode a collection of `ImageResult<animation::Frame>` objects.
+ /// Use this function to encode an `animation::Frames` like iterator.
+ /// Whenever an `Err` item is encountered, that value is returned without further actions.
+ pub fn try_encode_frames<F>(&mut self, frames: F) -> ImageResult<()>
+ where
+ F: IntoIterator<Item = ImageResult<animation::Frame>>,
+ {
+ for img_frame in frames {
+ self.encode_frame(img_frame?)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn convert_frame(
+ &mut self,
+ img_frame: animation::Frame,
+ ) -> ImageResult<Frame<'static>> {
+ // get the delay before converting img_frame
+ let frame_delay = img_frame.delay().into_ratio().to_integer();
+ // convert img_frame into RgbaImage
+ let mut rbga_frame = img_frame.into_buffer();
+ let (width, height) = self.gif_dimensions(rbga_frame.width(), rbga_frame.height())?;
+
+ // Create the gif::Frame from the animation::Frame
+ let mut frame = Frame::from_rgba_speed(width, height, &mut rbga_frame, self.speed);
+ // Saturate the conversion to u16::MAX instead of returning an error as that
+ // would require a new special cased variant in ParameterErrorKind which most
+ // likely couldn't be reused for other cases. This isn't a bad trade-off given
+ // that the current algorithm is already lossy.
+ frame.delay = (frame_delay / 10).try_into().unwrap_or(std::u16::MAX);
+
+ Ok(frame)
+ }
+
+ fn gif_dimensions(&self, width: u32, height: u32) -> ImageResult<(u16, u16)> {
+ fn inner_dimensions(width: u32, height: u32) -> Option<(u16, u16)> {
+ let width = u16::try_from(width).ok()?;
+ let height = u16::try_from(height).ok()?;
+ Some((width, height))
+ }
+
+ // TODO: this is not very idiomatic yet. Should return an EncodingError.
+ inner_dimensions(width, height).ok_or_else(|| {
+ ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ ))
+ })
+ }
+
+ pub(crate) fn encode_gif(&mut self, mut frame: Frame) -> ImageResult<()> {
+ let gif_encoder;
+ if let Some(ref mut encoder) = self.gif_encoder {
+ gif_encoder = encoder;
+ } else {
+ let writer = self.w.take().unwrap();
+ let mut encoder = gif::Encoder::new(writer, frame.width, frame.height, &[])
+ .map_err(ImageError::from_encoding)?;
+ if let Some(ref repeat) = self.repeat {
+ encoder
+ .set_repeat(repeat.to_gif_enum())
+ .map_err(ImageError::from_encoding)?;
+ }
+ self.gif_encoder = Some(encoder);
+ gif_encoder = self.gif_encoder.as_mut().unwrap()
+ }
+
+ frame.dispose = gif::DisposalMethod::Background;
+
+ gif_encoder
+ .write_frame(&frame)
+ .map_err(ImageError::from_encoding)
+ }
+}
+
+impl ImageError {
+ fn from_decoding(err: gif::DecodingError) -> ImageError {
+ use gif::DecodingError::*;
+ match err {
+ err @ Format(_) => {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Gif.into(), err))
+ }
+ Io(io_err) => ImageError::IoError(io_err),
+ }
+ }
+
+ fn from_encoding(err: gif::EncodingError) -> ImageError {
+ use gif::EncodingError::*;
+ match err {
+ err @ Format(_) => {
+ ImageError::Encoding(EncodingError::new(ImageFormat::Gif.into(), err))
+ }
+ Io(io_err) => ImageError::IoError(io_err),
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn frames_exceeding_logical_screen_size() {
+ // This is a gif with 10x10 logical screen, but a 16x16 frame + 6px offset inside.
+ let data = vec![
+ 0x47, 0x49, 0x46, 0x38, 0x39, 0x61, 0x0A, 0x00, 0x0A, 0x00, 0xF0, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x0E, 0xFF, 0x1F, 0x21, 0xF9, 0x04, 0x09, 0x64, 0x00, 0x00, 0x00, 0x2C,
+ 0x06, 0x00, 0x06, 0x00, 0x10, 0x00, 0x10, 0x00, 0x00, 0x02, 0x23, 0x84, 0x8F, 0xA9,
+ 0xBB, 0xE1, 0xE8, 0x42, 0x8A, 0x0F, 0x50, 0x79, 0xAE, 0xD1, 0xF9, 0x7A, 0xE8, 0x71,
+ 0x5B, 0x48, 0x81, 0x64, 0xD5, 0x91, 0xCA, 0x89, 0x4D, 0x21, 0x63, 0x89, 0x4C, 0x09,
+ 0x77, 0xF5, 0x6D, 0x14, 0x00, 0x3B,
+ ];
+
+ let decoder = GifDecoder::new(Cursor::new(data)).unwrap();
+ let mut buf = vec![0u8; decoder.total_bytes() as usize];
+
+ assert!(decoder.read_image(&mut buf).is_ok());
+ }
+}
diff --git a/vendor/image/src/codecs/hdr/decoder.rs b/vendor/image/src/codecs/hdr/decoder.rs
new file mode 100644
index 0000000..8329d57
--- /dev/null
+++ b/vendor/image/src/codecs/hdr/decoder.rs
@@ -0,0 +1,1033 @@
+use crate::Primitive;
+use num_traits::identities::Zero;
+#[cfg(test)]
+use std::borrow::Cow;
+use std::convert::TryFrom;
+use std::io::{self, BufRead, Cursor, Read, Seek};
+use std::iter::Iterator;
+use std::marker::PhantomData;
+use std::num::{ParseFloatError, ParseIntError};
+use std::path::Path;
+use std::{error, fmt, mem};
+
+use crate::color::{ColorType, Rgb};
+use crate::error::{
+ DecodingError, ImageError, ImageFormatHint, ImageResult, ParameterError, ParameterErrorKind,
+ UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::{self, ImageDecoder, ImageDecoderRect, ImageFormat, Progress};
+
+/// Errors that can occur during decoding and parsing of a HDR image
+#[derive(Debug, Clone, PartialEq, Eq)]
+enum DecoderError {
+ /// HDR's "#?RADIANCE" signature wrong or missing
+ RadianceHdrSignatureInvalid,
+ /// EOF before end of header
+ TruncatedHeader,
+ /// EOF instead of image dimensions
+ TruncatedDimensions,
+
+ /// A value couldn't be parsed
+ UnparsableF32(LineType, ParseFloatError),
+ /// A value couldn't be parsed
+ UnparsableU32(LineType, ParseIntError),
+ /// Not enough numbers in line
+ LineTooShort(LineType),
+
+ /// COLORCORR contains too many numbers in strict mode
+ ExtraneousColorcorrNumbers,
+
+ /// Dimensions line had too few elements
+ DimensionsLineTooShort(usize, usize),
+ /// Dimensions line had too many elements
+ DimensionsLineTooLong(usize),
+
+ /// The length of a scanline (1) wasn't a match for the specified length (2)
+ WrongScanlineLength(usize, usize),
+ /// First pixel of a scanline is a run length marker
+ FirstPixelRlMarker,
+}
+
+impl fmt::Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DecoderError::RadianceHdrSignatureInvalid => {
+ f.write_str("Radiance HDR signature not found")
+ }
+ DecoderError::TruncatedHeader => f.write_str("EOF in header"),
+ DecoderError::TruncatedDimensions => f.write_str("EOF in dimensions line"),
+ DecoderError::UnparsableF32(line, pe) => {
+ f.write_fmt(format_args!("Cannot parse {} value as f32: {}", line, pe))
+ }
+ DecoderError::UnparsableU32(line, pe) => {
+ f.write_fmt(format_args!("Cannot parse {} value as u32: {}", line, pe))
+ }
+ DecoderError::LineTooShort(line) => {
+ f.write_fmt(format_args!("Not enough numbers in {}", line))
+ }
+ DecoderError::ExtraneousColorcorrNumbers => f.write_str("Extra numbers in COLORCORR"),
+ DecoderError::DimensionsLineTooShort(elements, expected) => f.write_fmt(format_args!(
+ "Dimensions line too short: have {} elements, expected {}",
+ elements, expected
+ )),
+ DecoderError::DimensionsLineTooLong(expected) => f.write_fmt(format_args!(
+ "Dimensions line too long, expected {} elements",
+ expected
+ )),
+ DecoderError::WrongScanlineLength(len, expected) => f.write_fmt(format_args!(
+ "Wrong length of decoded scanline: got {}, expected {}",
+ len, expected
+ )),
+ DecoderError::FirstPixelRlMarker => {
+ f.write_str("First pixel of a scanline shouldn't be run length marker")
+ }
+ }
+ }
+}
+
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Hdr.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {
+ fn source(&self) -> Option<&(dyn error::Error + 'static)> {
+ match self {
+ DecoderError::UnparsableF32(_, err) => Some(err),
+ DecoderError::UnparsableU32(_, err) => Some(err),
+ _ => None,
+ }
+ }
+}
+
+/// Lines which contain parsable data that can fail
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
+enum LineType {
+ Exposure,
+ Pixaspect,
+ Colorcorr,
+ DimensionsHeight,
+ DimensionsWidth,
+}
+
+impl fmt::Display for LineType {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match self {
+ LineType::Exposure => "EXPOSURE",
+ LineType::Pixaspect => "PIXASPECT",
+ LineType::Colorcorr => "COLORCORR",
+ LineType::DimensionsHeight => "height dimension",
+ LineType::DimensionsWidth => "width dimension",
+ })
+ }
+}
+
+/// Adapter to conform to `ImageDecoder` trait
+#[derive(Debug)]
+pub struct HdrAdapter<R: Read> {
+ inner: Option<HdrDecoder<R>>,
+ // data: Option<Vec<u8>>,
+ meta: HdrMetadata,
+}
+
+impl<R: BufRead> HdrAdapter<R> {
+ /// Creates adapter
+ pub fn new(r: R) -> ImageResult<HdrAdapter<R>> {
+ let decoder = HdrDecoder::new(r)?;
+ let meta = decoder.metadata();
+ Ok(HdrAdapter {
+ inner: Some(decoder),
+ meta,
+ })
+ }
+
+ /// Allows reading old Radiance HDR images
+ pub fn new_nonstrict(r: R) -> ImageResult<HdrAdapter<R>> {
+ let decoder = HdrDecoder::with_strictness(r, false)?;
+ let meta = decoder.metadata();
+ Ok(HdrAdapter {
+ inner: Some(decoder),
+ meta,
+ })
+ }
+
+ /// Read the actual data of the image, and store it in Self::data.
+ fn read_image_data(&mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ match self.inner.take() {
+ Some(decoder) => {
+ let img: Vec<Rgb<u8>> = decoder.read_image_ldr()?;
+ for (i, Rgb(data)) in img.into_iter().enumerate() {
+ buf[(i * 3)..][..3].copy_from_slice(&data);
+ }
+
+ Ok(())
+ }
+ None => Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::NoMoreData,
+ ))),
+ }
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct HdrReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for HdrReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + BufRead> ImageDecoder<'a> for HdrAdapter<R> {
+ type Reader = HdrReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.meta.width, self.meta.height)
+ }
+
+ fn color_type(&self) -> ColorType {
+ ColorType::Rgb8
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(HdrReader(
+ Cursor::new(image::decoder_to_vec(self)?),
+ PhantomData,
+ ))
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ self.read_image_data(buf)
+ }
+}
+
+impl<'a, R: 'a + BufRead + Seek> ImageDecoderRect<'a> for HdrAdapter<R> {
+ fn read_rect_with_progress<F: Fn(Progress)>(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ buf: &mut [u8],
+ progress_callback: F,
+ ) -> ImageResult<()> {
+ image::load_rect(
+ x,
+ y,
+ width,
+ height,
+ buf,
+ progress_callback,
+ self,
+ |_, _| unreachable!(),
+ |s, buf| s.read_image_data(buf),
+ )
+ }
+}
+
+/// Radiance HDR file signature
+pub const SIGNATURE: &[u8] = b"#?RADIANCE";
+const SIGNATURE_LENGTH: usize = 10;
+
+/// An Radiance HDR decoder
+#[derive(Debug)]
+pub struct HdrDecoder<R> {
+ r: R,
+ width: u32,
+ height: u32,
+ meta: HdrMetadata,
+}
+
+/// Refer to [wikipedia](https://en.wikipedia.org/wiki/RGBE_image_format)
+#[repr(C)]
+#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
+pub struct Rgbe8Pixel {
+ /// Color components
+ pub c: [u8; 3],
+ /// Exponent
+ pub e: u8,
+}
+
+/// Creates `Rgbe8Pixel` from components
+pub fn rgbe8(r: u8, g: u8, b: u8, e: u8) -> Rgbe8Pixel {
+ Rgbe8Pixel { c: [r, g, b], e }
+}
+
+impl Rgbe8Pixel {
+ /// Converts `Rgbe8Pixel` into `Rgb<f32>` linearly
+ #[inline]
+ pub fn to_hdr(self) -> Rgb<f32> {
+ if self.e == 0 {
+ Rgb([0.0, 0.0, 0.0])
+ } else {
+ // let exp = f32::ldexp(1., self.e as isize - (128 + 8)); // unstable
+ let exp = f32::exp2(<f32 as From<_>>::from(self.e) - (128.0 + 8.0));
+ Rgb([
+ exp * <f32 as From<_>>::from(self.c[0]),
+ exp * <f32 as From<_>>::from(self.c[1]),
+ exp * <f32 as From<_>>::from(self.c[2]),
+ ])
+ }
+ }
+
+ /// Converts `Rgbe8Pixel` into `Rgb<T>` with scale=1 and gamma=2.2
+ ///
+ /// color_ldr = (color_hdr*scale)<sup>gamma</sup>
+ ///
+ /// # Panic
+ ///
+ /// Panics when `T::max_value()` cannot be represented as f32.
+ #[inline]
+ pub fn to_ldr<T: Primitive + Zero>(self) -> Rgb<T> {
+ self.to_ldr_scale_gamma(1.0, 2.2)
+ }
+
+ /// Converts `Rgbe8Pixel` into `Rgb<T>` using provided scale and gamma
+ ///
+ /// color_ldr = (color_hdr*scale)<sup>gamma</sup>
+ ///
+ /// # Panic
+ ///
+ /// Panics when `T::max_value()` cannot be represented as f32.
+ /// Panics when scale or gamma is NaN
+ #[inline]
+ pub fn to_ldr_scale_gamma<T: Primitive + Zero>(self, scale: f32, gamma: f32) -> Rgb<T> {
+ let Rgb(data) = self.to_hdr();
+ let (r, g, b) = (data[0], data[1], data[2]);
+ #[inline]
+ fn sg<T: Primitive + Zero>(v: f32, scale: f32, gamma: f32) -> T {
+ let t_max = T::max_value();
+ // Disassembly shows that t_max_f32 is compiled into constant
+ let t_max_f32: f32 = num_traits::NumCast::from(t_max)
+ .expect("to_ldr_scale_gamma: maximum value of type is not representable as f32");
+ let fv = f32::powf(v * scale, gamma) * t_max_f32 + 0.5;
+ if fv < 0.0 {
+ T::zero()
+ } else if fv > t_max_f32 {
+ t_max
+ } else {
+ num_traits::NumCast::from(fv)
+ .expect("to_ldr_scale_gamma: cannot convert f32 to target type. NaN?")
+ }
+ }
+ Rgb([
+ sg(r, scale, gamma),
+ sg(g, scale, gamma),
+ sg(b, scale, gamma),
+ ])
+ }
+}
+
+impl<R: BufRead> HdrDecoder<R> {
+ /// Reads Radiance HDR image header from stream `r`
+ /// if the header is valid, creates HdrDecoder
+ /// strict mode is enabled
+ pub fn new(reader: R) -> ImageResult<HdrDecoder<R>> {
+ HdrDecoder::with_strictness(reader, true)
+ }
+
+ /// Reads Radiance HDR image header from stream `reader`,
+ /// if the header is valid, creates `HdrDecoder`.
+ ///
+ /// strict enables strict mode
+ ///
+ /// Warning! Reading wrong file in non-strict mode
+ /// could consume file size worth of memory in the process.
+ pub fn with_strictness(mut reader: R, strict: bool) -> ImageResult<HdrDecoder<R>> {
+ let mut attributes = HdrMetadata::new();
+
+ {
+ // scope to make borrowck happy
+ let r = &mut reader;
+ if strict {
+ let mut signature = [0; SIGNATURE_LENGTH];
+ r.read_exact(&mut signature)?;
+ if signature != SIGNATURE {
+ return Err(DecoderError::RadianceHdrSignatureInvalid.into());
+ } // no else
+ // skip signature line ending
+ read_line_u8(r)?;
+ } else {
+ // Old Radiance HDR files (*.pic) don't use signature
+ // Let them be parsed in non-strict mode
+ }
+ // read header data until empty line
+ loop {
+ match read_line_u8(r)? {
+ None => {
+ // EOF before end of header
+ return Err(DecoderError::TruncatedHeader.into());
+ }
+ Some(line) => {
+ if line.is_empty() {
+ // end of header
+ break;
+ } else if line[0] == b'#' {
+ // line[0] will not panic, line.len() == 0 is false here
+ // skip comments
+ continue;
+ } // no else
+ // process attribute line
+ let line = String::from_utf8_lossy(&line[..]);
+ attributes.update_header_info(&line, strict)?;
+ } // <= Some(line)
+ } // match read_line_u8()
+ } // loop
+ } // scope to end borrow of reader
+ // parse dimensions
+ let (width, height) = match read_line_u8(&mut reader)? {
+ None => {
+ // EOF instead of image dimensions
+ return Err(DecoderError::TruncatedDimensions.into());
+ }
+ Some(dimensions) => {
+ let dimensions = String::from_utf8_lossy(&dimensions[..]);
+ parse_dimensions_line(&dimensions, strict)?
+ }
+ };
+
+ // color type is always rgb8
+ if crate::utils::check_dimension_overflow(width, height, ColorType::Rgb8.bytes_per_pixel())
+ {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Hdr.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "Image dimensions ({}x{}) are too large",
+ width, height
+ )),
+ ),
+ ));
+ }
+
+ Ok(HdrDecoder {
+ r: reader,
+
+ width,
+ height,
+ meta: HdrMetadata {
+ width,
+ height,
+ ..attributes
+ },
+ })
+ } // end with_strictness
+
+ /// Returns file metadata. Refer to `HdrMetadata` for details.
+ pub fn metadata(&self) -> HdrMetadata {
+ self.meta.clone()
+ }
+
+ /// Consumes decoder and returns a vector of RGBE8 pixels
+ pub fn read_image_native(mut self) -> ImageResult<Vec<Rgbe8Pixel>> {
+ // Don't read anything if image is empty
+ if self.width == 0 || self.height == 0 {
+ return Ok(vec![]);
+ }
+ // expression self.width > 0 && self.height > 0 is true from now to the end of this method
+ let pixel_count = self.width as usize * self.height as usize;
+ let mut ret = vec![Default::default(); pixel_count];
+ for chunk in ret.chunks_mut(self.width as usize) {
+ read_scanline(&mut self.r, chunk)?;
+ }
+ Ok(ret)
+ }
+
+ /// Consumes decoder and returns a vector of transformed pixels
+ pub fn read_image_transform<T: Send, F: Send + Sync + Fn(Rgbe8Pixel) -> T>(
+ mut self,
+ f: F,
+ output_slice: &mut [T],
+ ) -> ImageResult<()> {
+ assert_eq!(
+ output_slice.len(),
+ self.width as usize * self.height as usize
+ );
+
+ // Don't read anything if image is empty
+ if self.width == 0 || self.height == 0 {
+ return Ok(());
+ }
+
+ let chunks_iter = output_slice.chunks_mut(self.width as usize);
+
+ let mut buf = vec![Default::default(); self.width as usize];
+ for chunk in chunks_iter {
+ // read_scanline overwrites the entire buffer or returns an Err,
+ // so not resetting the buffer here is ok.
+ read_scanline(&mut self.r, &mut buf[..])?;
+ for (dst, &pix) in chunk.iter_mut().zip(buf.iter()) {
+ *dst = f(pix);
+ }
+ }
+ Ok(())
+ }
+
+ /// Consumes decoder and returns a vector of `Rgb<u8>` pixels.
+ /// scale = 1, gamma = 2.2
+ pub fn read_image_ldr(self) -> ImageResult<Vec<Rgb<u8>>> {
+ let mut ret = vec![Rgb([0, 0, 0]); self.width as usize * self.height as usize];
+ self.read_image_transform(|pix| pix.to_ldr(), &mut ret[..])?;
+ Ok(ret)
+ }
+
+ /// Consumes decoder and returns a vector of `Rgb<f32>` pixels.
+ ///
+ pub fn read_image_hdr(self) -> ImageResult<Vec<Rgb<f32>>> {
+ let mut ret = vec![Rgb([0.0, 0.0, 0.0]); self.width as usize * self.height as usize];
+ self.read_image_transform(|pix| pix.to_hdr(), &mut ret[..])?;
+ Ok(ret)
+ }
+}
+
+impl<R: Read> IntoIterator for HdrDecoder<R> {
+ type Item = ImageResult<Rgbe8Pixel>;
+ type IntoIter = HdrImageDecoderIterator<R>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ HdrImageDecoderIterator {
+ r: self.r,
+ scanline_cnt: self.height as usize,
+ buf: vec![Default::default(); self.width as usize],
+ col: 0,
+ scanline: 0,
+ trouble: true, // make first call to `next()` read scanline
+ error_encountered: false,
+ }
+ }
+}
+
+/// Scanline buffered pixel by pixel iterator
+pub struct HdrImageDecoderIterator<R: Read> {
+ r: R,
+ scanline_cnt: usize,
+ buf: Vec<Rgbe8Pixel>, // scanline buffer
+ col: usize, // current position in scanline
+ scanline: usize, // current scanline
+ trouble: bool, // optimization, true indicates that we need to check something
+ error_encountered: bool,
+}
+
+impl<R: Read> HdrImageDecoderIterator<R> {
+ // Advances counter to the next pixel
+ #[inline]
+ fn advance(&mut self) {
+ self.col += 1;
+ if self.col == self.buf.len() {
+ self.col = 0;
+ self.scanline += 1;
+ self.trouble = true;
+ }
+ }
+}
+
+impl<R: Read> Iterator for HdrImageDecoderIterator<R> {
+ type Item = ImageResult<Rgbe8Pixel>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if !self.trouble {
+ let ret = self.buf[self.col];
+ self.advance();
+ Some(Ok(ret))
+ } else {
+ // some condition is pending
+ if self.buf.is_empty() || self.scanline == self.scanline_cnt {
+ // No more pixels
+ return None;
+ } // no else
+ if self.error_encountered {
+ self.advance();
+ // Error was encountered. Keep producing errors.
+ // ImageError can't implement Clone, so just dump some error
+ return Some(Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::FailedAlready,
+ ))));
+ } // no else
+ if self.col == 0 {
+ // fill scanline buffer
+ match read_scanline(&mut self.r, &mut self.buf[..]) {
+ Ok(_) => {
+ // no action required
+ }
+ Err(err) => {
+ self.advance();
+ self.error_encountered = true;
+ self.trouble = true;
+ return Some(Err(err));
+ }
+ }
+ } // no else
+ self.trouble = false;
+ let ret = self.buf[0];
+ self.advance();
+ Some(Ok(ret))
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let total_cnt = self.buf.len() * self.scanline_cnt;
+ let cur_cnt = self.buf.len() * self.scanline + self.col;
+ let remaining = total_cnt - cur_cnt;
+ (remaining, Some(remaining))
+ }
+}
+
+impl<R: Read> ExactSizeIterator for HdrImageDecoderIterator<R> {}
+
+// Precondition: buf.len() > 0
+fn read_scanline<R: Read>(r: &mut R, buf: &mut [Rgbe8Pixel]) -> ImageResult<()> {
+ assert!(!buf.is_empty());
+ let width = buf.len();
+ // first 4 bytes in scanline allow to determine compression method
+ let fb = read_rgbe(r)?;
+ if fb.c[0] == 2 && fb.c[1] == 2 && fb.c[2] < 128 {
+ // denormalized pixel value (2,2,<128,_) indicates new per component RLE method
+ // decode_component guarantees that offset is within 0 .. width
+ // therefore we can skip bounds checking here, but we will not
+ decode_component(r, width, |offset, value| buf[offset].c[0] = value)?;
+ decode_component(r, width, |offset, value| buf[offset].c[1] = value)?;
+ decode_component(r, width, |offset, value| buf[offset].c[2] = value)?;
+ decode_component(r, width, |offset, value| buf[offset].e = value)?;
+ } else {
+ // old RLE method (it was considered old around 1991, should it be here?)
+ decode_old_rle(r, fb, buf)?;
+ }
+ Ok(())
+}
+
+#[inline(always)]
+fn read_byte<R: Read>(r: &mut R) -> io::Result<u8> {
+ let mut buf = [0u8];
+ r.read_exact(&mut buf[..])?;
+ Ok(buf[0])
+}
+
+// Guarantees that first parameter of set_component will be within pos .. pos+width
+#[inline]
+fn decode_component<R: Read, S: FnMut(usize, u8)>(
+ r: &mut R,
+ width: usize,
+ mut set_component: S,
+) -> ImageResult<()> {
+ let mut buf = [0; 128];
+ let mut pos = 0;
+ while pos < width {
+ // increment position by a number of decompressed values
+ pos += {
+ let rl = read_byte(r)?;
+ if rl <= 128 {
+ // sanity check
+ if pos + rl as usize > width {
+ return Err(DecoderError::WrongScanlineLength(pos + rl as usize, width).into());
+ }
+ // read values
+ r.read_exact(&mut buf[0..rl as usize])?;
+ for (offset, &value) in buf[0..rl as usize].iter().enumerate() {
+ set_component(pos + offset, value);
+ }
+ rl as usize
+ } else {
+ // run
+ let rl = rl - 128;
+ // sanity check
+ if pos + rl as usize > width {
+ return Err(DecoderError::WrongScanlineLength(pos + rl as usize, width).into());
+ }
+ // fill with same value
+ let value = read_byte(r)?;
+ for offset in 0..rl as usize {
+ set_component(pos + offset, value);
+ }
+ rl as usize
+ }
+ };
+ }
+ if pos != width {
+ return Err(DecoderError::WrongScanlineLength(pos, width).into());
+ }
+ Ok(())
+}
+
+// Decodes scanline, places it into buf
+// Precondition: buf.len() > 0
+// fb - first 4 bytes of scanline
+fn decode_old_rle<R: Read>(r: &mut R, fb: Rgbe8Pixel, buf: &mut [Rgbe8Pixel]) -> ImageResult<()> {
+ assert!(!buf.is_empty());
+ let width = buf.len();
+ // convenience function.
+ // returns run length if pixel is a run length marker
+ #[inline]
+ fn rl_marker(pix: Rgbe8Pixel) -> Option<usize> {
+ if pix.c == [1, 1, 1] {
+ Some(pix.e as usize)
+ } else {
+ None
+ }
+ }
+ // first pixel in scanline should not be run length marker
+ // it is error if it is
+ if rl_marker(fb).is_some() {
+ return Err(DecoderError::FirstPixelRlMarker.into());
+ }
+ buf[0] = fb; // set first pixel of scanline
+
+ let mut x_off = 1; // current offset from beginning of a scanline
+ let mut rl_mult = 1; // current run length multiplier
+ let mut prev_pixel = fb;
+ while x_off < width {
+ let pix = read_rgbe(r)?;
+ // it's harder to forget to increase x_off if I write this this way.
+ x_off += {
+ if let Some(rl) = rl_marker(pix) {
+ // rl_mult takes care of consecutive RL markers
+ let rl = rl * rl_mult;
+ rl_mult *= 256;
+ if x_off + rl <= width {
+ // do run
+ for b in &mut buf[x_off..x_off + rl] {
+ *b = prev_pixel;
+ }
+ } else {
+ return Err(DecoderError::WrongScanlineLength(x_off + rl, width).into());
+ };
+ rl // value to increase x_off by
+ } else {
+ rl_mult = 1; // chain of consecutive RL markers is broken
+ prev_pixel = pix;
+ buf[x_off] = pix;
+ 1 // value to increase x_off by
+ }
+ };
+ }
+ if x_off != width {
+ return Err(DecoderError::WrongScanlineLength(x_off, width).into());
+ }
+ Ok(())
+}
+
+fn read_rgbe<R: Read>(r: &mut R) -> io::Result<Rgbe8Pixel> {
+ let mut buf = [0u8; 4];
+ r.read_exact(&mut buf[..])?;
+ Ok(Rgbe8Pixel {
+ c: [buf[0], buf[1], buf[2]],
+ e: buf[3],
+ })
+}
+
+/// Metadata for Radiance HDR image
+#[derive(Debug, Clone)]
+pub struct HdrMetadata {
+ /// Width of decoded image. It could be either scanline length,
+ /// or scanline count, depending on image orientation.
+ pub width: u32,
+ /// Height of decoded image. It depends on orientation too.
+ pub height: u32,
+ /// Orientation matrix. For standard orientation it is ((1,0),(0,1)) - left to right, top to bottom.
+ /// First pair tells how resulting pixel coordinates change along a scanline.
+ /// Second pair tells how they change from one scanline to the next.
+ pub orientation: ((i8, i8), (i8, i8)),
+ /// Divide color values by exposure to get to get physical radiance in
+ /// watts/steradian/m<sup>2</sup>
+ ///
+ /// Image may not contain physical data, even if this field is set.
+ pub exposure: Option<f32>,
+ /// Divide color values by corresponding tuple member (r, g, b) to get to get physical radiance
+ /// in watts/steradian/m<sup>2</sup>
+ ///
+ /// Image may not contain physical data, even if this field is set.
+ pub color_correction: Option<(f32, f32, f32)>,
+ /// Pixel height divided by pixel width
+ pub pixel_aspect_ratio: Option<f32>,
+ /// All lines contained in image header are put here. Ordering of lines is preserved.
+ /// Lines in the form "key=value" are represented as ("key", "value").
+ /// All other lines are ("", "line")
+ pub custom_attributes: Vec<(String, String)>,
+}
+
+impl HdrMetadata {
+ fn new() -> HdrMetadata {
+ HdrMetadata {
+ width: 0,
+ height: 0,
+ orientation: ((1, 0), (0, 1)),
+ exposure: None,
+ color_correction: None,
+ pixel_aspect_ratio: None,
+ custom_attributes: vec![],
+ }
+ }
+
+ // Updates header info, in strict mode returns error for malformed lines (no '=' separator)
+ // unknown attributes are skipped
+ fn update_header_info(&mut self, line: &str, strict: bool) -> ImageResult<()> {
+ // split line at first '='
+ // old Radiance HDR files (*.pic) feature tabs in key, so vvv trim
+ let maybe_key_value = split_at_first(line, "=").map(|(key, value)| (key.trim(), value));
+ // save all header lines in custom_attributes
+ match maybe_key_value {
+ Some((key, val)) => self
+ .custom_attributes
+ .push((key.to_owned(), val.to_owned())),
+ None => self.custom_attributes.push(("".into(), line.to_owned())),
+ }
+ // parse known attributes
+ match maybe_key_value {
+ Some(("FORMAT", val)) => {
+ if val.trim() != "32-bit_rle_rgbe" {
+ // XYZE isn't supported yet
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Hdr.into(),
+ UnsupportedErrorKind::Format(ImageFormatHint::Name(limit_string_len(
+ val, 20,
+ ))),
+ ),
+ ));
+ }
+ }
+ Some(("EXPOSURE", val)) => {
+ match val.trim().parse::<f32>() {
+ Ok(v) => {
+ self.exposure = Some(self.exposure.unwrap_or(1.0) * v); // all encountered exposure values should be multiplied
+ }
+ Err(parse_error) => {
+ if strict {
+ return Err(DecoderError::UnparsableF32(
+ LineType::Exposure,
+ parse_error,
+ )
+ .into());
+ } // no else, skip this line in non-strict mode
+ }
+ };
+ }
+ Some(("PIXASPECT", val)) => {
+ match val.trim().parse::<f32>() {
+ Ok(v) => {
+ self.pixel_aspect_ratio = Some(self.pixel_aspect_ratio.unwrap_or(1.0) * v);
+ // all encountered exposure values should be multiplied
+ }
+ Err(parse_error) => {
+ if strict {
+ return Err(DecoderError::UnparsableF32(
+ LineType::Pixaspect,
+ parse_error,
+ )
+ .into());
+ } // no else, skip this line in non-strict mode
+ }
+ };
+ }
+ Some(("COLORCORR", val)) => {
+ let mut rgbcorr = [1.0, 1.0, 1.0];
+ match parse_space_separated_f32(val, &mut rgbcorr, LineType::Colorcorr) {
+ Ok(extra_numbers) => {
+ if strict && extra_numbers {
+ return Err(DecoderError::ExtraneousColorcorrNumbers.into());
+ } // no else, just ignore extra numbers
+ let (rc, gc, bc) = self.color_correction.unwrap_or((1.0, 1.0, 1.0));
+ self.color_correction =
+ Some((rc * rgbcorr[0], gc * rgbcorr[1], bc * rgbcorr[2]));
+ }
+ Err(err) => {
+ if strict {
+ return Err(err);
+ } // no else, skip malformed line in non-strict mode
+ }
+ }
+ }
+ None => {
+ // old Radiance HDR files (*.pic) contain commands in a header
+ // just skip them
+ }
+ _ => {
+ // skip unknown attribute
+ }
+ } // match attributes
+ Ok(())
+ }
+}
+
+fn parse_space_separated_f32(line: &str, vals: &mut [f32], line_tp: LineType) -> ImageResult<bool> {
+ let mut nums = line.split_whitespace();
+ for val in vals.iter_mut() {
+ if let Some(num) = nums.next() {
+ match num.parse::<f32>() {
+ Ok(v) => *val = v,
+ Err(err) => return Err(DecoderError::UnparsableF32(line_tp, err).into()),
+ }
+ } else {
+ // not enough numbers in line
+ return Err(DecoderError::LineTooShort(line_tp).into());
+ }
+ }
+ Ok(nums.next().is_some())
+}
+
+// Parses dimension line "-Y height +X width"
+// returns (width, height) or error
+fn parse_dimensions_line(line: &str, strict: bool) -> ImageResult<(u32, u32)> {
+ const DIMENSIONS_COUNT: usize = 4;
+
+ let mut dim_parts = line.split_whitespace();
+ let c1_tag = dim_parts
+ .next()
+ .ok_or(DecoderError::DimensionsLineTooShort(0, DIMENSIONS_COUNT))?;
+ let c1_str = dim_parts
+ .next()
+ .ok_or(DecoderError::DimensionsLineTooShort(1, DIMENSIONS_COUNT))?;
+ let c2_tag = dim_parts
+ .next()
+ .ok_or(DecoderError::DimensionsLineTooShort(2, DIMENSIONS_COUNT))?;
+ let c2_str = dim_parts
+ .next()
+ .ok_or(DecoderError::DimensionsLineTooShort(3, DIMENSIONS_COUNT))?;
+ if strict && dim_parts.next().is_some() {
+ // extra data in dimensions line
+ return Err(DecoderError::DimensionsLineTooLong(DIMENSIONS_COUNT).into());
+ } // no else
+ // dimensions line is in the form "-Y 10 +X 20"
+ // There are 8 possible orientations: +Y +X, +X -Y and so on
+ match (c1_tag, c2_tag) {
+ ("-Y", "+X") => {
+ // Common orientation (left-right, top-down)
+ // c1_str is height, c2_str is width
+ let height = c1_str
+ .parse::<u32>()
+ .map_err(|pe| DecoderError::UnparsableU32(LineType::DimensionsHeight, pe))?;
+ let width = c2_str
+ .parse::<u32>()
+ .map_err(|pe| DecoderError::UnparsableU32(LineType::DimensionsWidth, pe))?;
+ Ok((width, height))
+ }
+ _ => Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Hdr.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "Orientation {} {}",
+ limit_string_len(c1_tag, 4),
+ limit_string_len(c2_tag, 4)
+ )),
+ ),
+ )),
+ } // final expression. Returns value
+}
+
+// Returns string with no more than len+3 characters
+fn limit_string_len(s: &str, len: usize) -> String {
+ let s_char_len = s.chars().count();
+ if s_char_len > len {
+ s.chars().take(len).chain("...".chars()).collect()
+ } else {
+ s.into()
+ }
+}
+
+// Splits string into (before separator, after separator) tuple
+// or None if separator isn't found
+fn split_at_first<'a>(s: &'a str, separator: &str) -> Option<(&'a str, &'a str)> {
+ match s.find(separator) {
+ None | Some(0) => None,
+ Some(p) if p >= s.len() - separator.len() => None,
+ Some(p) => Some((&s[..p], &s[(p + separator.len())..])),
+ }
+}
+
+#[test]
+fn split_at_first_test() {
+ assert_eq!(split_at_first(&Cow::Owned("".into()), "="), None);
+ assert_eq!(split_at_first(&Cow::Owned("=".into()), "="), None);
+ assert_eq!(split_at_first(&Cow::Owned("= ".into()), "="), None);
+ assert_eq!(
+ split_at_first(&Cow::Owned(" = ".into()), "="),
+ Some((" ", " "))
+ );
+ assert_eq!(
+ split_at_first(&Cow::Owned("EXPOSURE= ".into()), "="),
+ Some(("EXPOSURE", " "))
+ );
+ assert_eq!(
+ split_at_first(&Cow::Owned("EXPOSURE= =".into()), "="),
+ Some(("EXPOSURE", " ="))
+ );
+ assert_eq!(
+ split_at_first(&Cow::Owned("EXPOSURE== =".into()), "=="),
+ Some(("EXPOSURE", " ="))
+ );
+ assert_eq!(split_at_first(&Cow::Owned("EXPOSURE".into()), ""), None);
+}
+
+// Reads input until b"\n" or EOF
+// Returns vector of read bytes NOT including end of line characters
+// or return None to indicate end of file
+fn read_line_u8<R: BufRead>(r: &mut R) -> ::std::io::Result<Option<Vec<u8>>> {
+ let mut ret = Vec::with_capacity(16);
+ match r.read_until(b'\n', &mut ret) {
+ Ok(0) => Ok(None),
+ Ok(_) => {
+ if let Some(&b'\n') = ret[..].last() {
+ let _ = ret.pop();
+ }
+ Ok(Some(ret))
+ }
+ Err(err) => Err(err),
+ }
+}
+
+#[test]
+fn read_line_u8_test() {
+ let buf: Vec<_> = (&b"One\nTwo\nThree\nFour\n\n\n"[..]).into();
+ let input = &mut ::std::io::Cursor::new(buf);
+ assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"One"[..]);
+ assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"Two"[..]);
+ assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"Three"[..]);
+ assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"Four"[..]);
+ assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b""[..]);
+ assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b""[..]);
+ assert_eq!(read_line_u8(input).unwrap(), None);
+}
+
+/// Helper function for reading raw 3-channel f32 images
+pub fn read_raw_file<P: AsRef<Path>>(path: P) -> ::std::io::Result<Vec<Rgb<f32>>> {
+ use byteorder::{LittleEndian as LE, ReadBytesExt};
+ use std::fs::File;
+ use std::io::BufReader;
+
+ let mut r = BufReader::new(File::open(path)?);
+ let w = r.read_u32::<LE>()? as usize;
+ let h = r.read_u32::<LE>()? as usize;
+ let c = r.read_u32::<LE>()? as usize;
+ assert_eq!(c, 3);
+ let cnt = w * h;
+ let mut ret = Vec::with_capacity(cnt);
+ for _ in 0..cnt {
+ let cr = r.read_f32::<LE>()?;
+ let cg = r.read_f32::<LE>()?;
+ let cb = r.read_f32::<LE>()?;
+ ret.push(Rgb([cr, cg, cb]));
+ }
+ Ok(ret)
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use std::io::Cursor;
+
+ #[test]
+ fn dimension_overflow() {
+ let data = b"#?RADIANCE\nFORMAT=32-bit_rle_rgbe\n\n -Y 4294967295 +X 4294967295";
+
+ assert!(HdrAdapter::new(Cursor::new(data)).is_err());
+ assert!(HdrAdapter::new_nonstrict(Cursor::new(data)).is_err());
+ }
+}
diff --git a/vendor/image/src/codecs/hdr/encoder.rs b/vendor/image/src/codecs/hdr/encoder.rs
new file mode 100644
index 0000000..c3a176d
--- /dev/null
+++ b/vendor/image/src/codecs/hdr/encoder.rs
@@ -0,0 +1,433 @@
+use crate::codecs::hdr::{rgbe8, Rgbe8Pixel, SIGNATURE};
+use crate::color::Rgb;
+use crate::error::ImageResult;
+use std::cmp::Ordering;
+use std::io::{Result, Write};
+
+/// Radiance HDR encoder
+pub struct HdrEncoder<W: Write> {
+ w: W,
+}
+
+impl<W: Write> HdrEncoder<W> {
+ /// Creates encoder
+ pub fn new(w: W) -> HdrEncoder<W> {
+ HdrEncoder { w }
+ }
+
+ /// Encodes the image ```data```
+ /// that has dimensions ```width``` and ```height```
+ pub fn encode(mut self, data: &[Rgb<f32>], width: usize, height: usize) -> ImageResult<()> {
+ assert!(data.len() >= width * height);
+ let w = &mut self.w;
+ w.write_all(SIGNATURE)?;
+ w.write_all(b"\n")?;
+ w.write_all(b"# Rust HDR encoder\n")?;
+ w.write_all(b"FORMAT=32-bit_rle_rgbe\n\n")?;
+ w.write_all(format!("-Y {} +X {}\n", height, width).as_bytes())?;
+
+ if !(8..=32_768).contains(&width) {
+ for &pix in data {
+ write_rgbe8(w, to_rgbe8(pix))?;
+ }
+ } else {
+ // new RLE marker contains scanline width
+ let marker = rgbe8(2, 2, (width / 256) as u8, (width % 256) as u8);
+ // buffers for encoded pixels
+ let mut bufr = vec![0; width];
+ let mut bufg = vec![0; width];
+ let mut bufb = vec![0; width];
+ let mut bufe = vec![0; width];
+ let mut rle_buf = vec![0; width];
+ for scanline in data.chunks(width) {
+ for ((((r, g), b), e), &pix) in bufr
+ .iter_mut()
+ .zip(bufg.iter_mut())
+ .zip(bufb.iter_mut())
+ .zip(bufe.iter_mut())
+ .zip(scanline.iter())
+ {
+ let cp = to_rgbe8(pix);
+ *r = cp.c[0];
+ *g = cp.c[1];
+ *b = cp.c[2];
+ *e = cp.e;
+ }
+ write_rgbe8(w, marker)?; // New RLE encoding marker
+ rle_buf.clear();
+ rle_compress(&bufr[..], &mut rle_buf);
+ w.write_all(&rle_buf[..])?;
+ rle_buf.clear();
+ rle_compress(&bufg[..], &mut rle_buf);
+ w.write_all(&rle_buf[..])?;
+ rle_buf.clear();
+ rle_compress(&bufb[..], &mut rle_buf);
+ w.write_all(&rle_buf[..])?;
+ rle_buf.clear();
+ rle_compress(&bufe[..], &mut rle_buf);
+ w.write_all(&rle_buf[..])?;
+ }
+ }
+ Ok(())
+ }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+enum RunOrNot {
+ Run(u8, usize),
+ Norun(usize, usize),
+}
+use self::RunOrNot::{Norun, Run};
+
+const RUN_MAX_LEN: usize = 127;
+const NORUN_MAX_LEN: usize = 128;
+
+struct RunIterator<'a> {
+ data: &'a [u8],
+ curidx: usize,
+}
+
+impl<'a> RunIterator<'a> {
+ fn new(data: &'a [u8]) -> RunIterator<'a> {
+ RunIterator { data, curidx: 0 }
+ }
+}
+
+impl<'a> Iterator for RunIterator<'a> {
+ type Item = RunOrNot;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.curidx == self.data.len() {
+ None
+ } else {
+ let cv = self.data[self.curidx];
+ let crun = self.data[self.curidx..]
+ .iter()
+ .take_while(|&&v| v == cv)
+ .take(RUN_MAX_LEN)
+ .count();
+ let ret = if crun > 2 {
+ Run(cv, crun)
+ } else {
+ Norun(self.curidx, crun)
+ };
+ self.curidx += crun;
+ Some(ret)
+ }
+ }
+}
+
+struct NorunCombineIterator<'a> {
+ runiter: RunIterator<'a>,
+ prev: Option<RunOrNot>,
+}
+
+impl<'a> NorunCombineIterator<'a> {
+ fn new(data: &'a [u8]) -> NorunCombineIterator<'a> {
+ NorunCombineIterator {
+ runiter: RunIterator::new(data),
+ prev: None,
+ }
+ }
+}
+
+// Combines sequential noruns produced by RunIterator
+impl<'a> Iterator for NorunCombineIterator<'a> {
+ type Item = RunOrNot;
+ fn next(&mut self) -> Option<Self::Item> {
+ loop {
+ match self.prev.take() {
+ Some(Run(c, len)) => {
+ // Just return stored run
+ return Some(Run(c, len));
+ }
+ Some(Norun(idx, len)) => {
+ // Let's see if we need to continue norun
+ match self.runiter.next() {
+ Some(Norun(_, len1)) => {
+ // norun continues
+ let clen = len + len1; // combined length
+ match clen.cmp(&NORUN_MAX_LEN) {
+ Ordering::Equal => return Some(Norun(idx, clen)),
+ Ordering::Greater => {
+ // combined norun exceeds maximum length. store extra part of norun
+ self.prev =
+ Some(Norun(idx + NORUN_MAX_LEN, clen - NORUN_MAX_LEN));
+ // then return maximal norun
+ return Some(Norun(idx, NORUN_MAX_LEN));
+ }
+ Ordering::Less => {
+ // len + len1 < NORUN_MAX_LEN
+ self.prev = Some(Norun(idx, len + len1));
+ // combine and continue loop
+ }
+ }
+ }
+ Some(Run(c, len1)) => {
+ // Run encountered. Store it
+ self.prev = Some(Run(c, len1));
+ return Some(Norun(idx, len)); // and return combined norun
+ }
+ None => {
+ // End of sequence
+ return Some(Norun(idx, len)); // return combined norun
+ }
+ }
+ } // End match self.prev.take() == Some(NoRun())
+ None => {
+ // No norun to combine
+ match self.runiter.next() {
+ Some(Norun(idx, len)) => {
+ self.prev = Some(Norun(idx, len));
+ // store for combine and continue the loop
+ }
+ Some(Run(c, len)) => {
+ // Some run. Just return it
+ return Some(Run(c, len));
+ }
+ None => {
+ // That's all, folks
+ return None;
+ }
+ }
+ } // End match self.prev.take() == None
+ } // End match
+ } // End loop
+ }
+}
+
+// Appends RLE compressed ```data``` to ```rle```
+fn rle_compress(data: &[u8], rle: &mut Vec<u8>) {
+ rle.clear();
+ if data.is_empty() {
+ rle.push(0); // Technically correct. It means read next 0 bytes.
+ return;
+ }
+ // Task: split data into chunks of repeating (max 127) and non-repeating bytes (max 128)
+ // Prepend non-repeating chunk with its length
+ // Replace repeating byte with (run length + 128) and the byte
+ for rnr in NorunCombineIterator::new(data) {
+ match rnr {
+ Run(c, len) => {
+ assert!(len <= 127);
+ rle.push(128u8 + len as u8);
+ rle.push(c);
+ }
+ Norun(idx, len) => {
+ assert!(len <= 128);
+ rle.push(len as u8);
+ rle.extend_from_slice(&data[idx..idx + len]);
+ }
+ }
+ }
+}
+
+fn write_rgbe8<W: Write>(w: &mut W, v: Rgbe8Pixel) -> Result<()> {
+ w.write_all(&[v.c[0], v.c[1], v.c[2], v.e])
+}
+
+/// Converts ```Rgb<f32>``` into ```Rgbe8Pixel```
+pub fn to_rgbe8(pix: Rgb<f32>) -> Rgbe8Pixel {
+ let pix = pix.0;
+ let mx = f32::max(pix[0], f32::max(pix[1], pix[2]));
+ if mx <= 0.0 {
+ Rgbe8Pixel { c: [0, 0, 0], e: 0 }
+ } else {
+ // let (frac, exp) = mx.frexp(); // unstable yet
+ let exp = mx.log2().floor() as i32 + 1;
+ let mul = f32::powi(2.0, exp);
+ let mut conv = [0u8; 3];
+ for (cv, &sv) in conv.iter_mut().zip(pix.iter()) {
+ *cv = f32::trunc(sv / mul * 256.0) as u8;
+ }
+ Rgbe8Pixel {
+ c: conv,
+ e: (exp + 128) as u8,
+ }
+ }
+}
+
+#[test]
+fn to_rgbe8_test() {
+ use crate::codecs::hdr::rgbe8;
+ let test_cases = vec![rgbe8(0, 0, 0, 0), rgbe8(1, 1, 128, 128)];
+ for &pix in &test_cases {
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ }
+ for mc in 128..255 {
+ // TODO: use inclusive range when stable
+ let pix = rgbe8(mc, mc, mc, 100);
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ let pix = rgbe8(mc, 0, mc, 130);
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ let pix = rgbe8(0, 0, mc, 140);
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ let pix = rgbe8(1, 0, mc, 150);
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ let pix = rgbe8(1, mc, 10, 128);
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ for c in 0..255 {
+ // Radiance HDR seems to be pre IEEE 754.
+ // exponent can be -128 (represented as 0u8), so some colors cannot be represented in normalized f32
+ // Let's exclude exponent value of -128 (0u8) from testing
+ let pix = rgbe8(1, mc, c, if c == 0 { 1 } else { c });
+ assert_eq!(pix, to_rgbe8(pix.to_hdr()));
+ }
+ }
+ fn relative_dist(a: Rgb<f32>, b: Rgb<f32>) -> f32 {
+ // maximal difference divided by maximal value
+ let max_diff =
+ a.0.iter()
+ .zip(b.0.iter())
+ .fold(0.0, |diff, (&a, &b)| f32::max(diff, (a - b).abs()));
+ let max_val =
+ a.0.iter()
+ .chain(b.0.iter())
+ .fold(0.0, |maxv, &a| f32::max(maxv, a));
+ if max_val == 0.0 {
+ 0.0
+ } else {
+ max_diff / max_val
+ }
+ }
+ let test_values = vec![
+ 0.000_001, 0.000_02, 0.000_3, 0.004, 0.05, 0.6, 7.0, 80.0, 900.0, 1_000.0, 20_000.0,
+ 300_000.0,
+ ];
+ for &r in &test_values {
+ for &g in &test_values {
+ for &b in &test_values {
+ let c1 = Rgb([r, g, b]);
+ let c2 = to_rgbe8(c1).to_hdr();
+ let rel_dist = relative_dist(c1, c2);
+ // Maximal value is normalized to the range 128..256, thus we have 1/128 precision
+ assert!(
+ rel_dist <= 1.0 / 128.0,
+ "Relative distance ({}) exceeds 1/128 for {:?} and {:?}",
+ rel_dist,
+ c1,
+ c2
+ );
+ }
+ }
+ }
+}
+
+#[test]
+fn runiterator_test() {
+ let data = [];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), None);
+ let data = [5];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Norun(0, 1)));
+ assert_eq!(run_iter.next(), None);
+ let data = [1, 1];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Norun(0, 2)));
+ assert_eq!(run_iter.next(), None);
+ let data = [0, 0, 0];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Run(0u8, 3)));
+ assert_eq!(run_iter.next(), None);
+ let data = [0, 0, 1, 1];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Norun(0, 2)));
+ assert_eq!(run_iter.next(), Some(Norun(2, 2)));
+ assert_eq!(run_iter.next(), None);
+ let data = [0, 0, 0, 1, 1];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Run(0u8, 3)));
+ assert_eq!(run_iter.next(), Some(Norun(3, 2)));
+ assert_eq!(run_iter.next(), None);
+ let data = [1, 2, 2, 2];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Norun(0, 1)));
+ assert_eq!(run_iter.next(), Some(Run(2u8, 3)));
+ assert_eq!(run_iter.next(), None);
+ let data = [1, 1, 2, 2, 2];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Norun(0, 2)));
+ assert_eq!(run_iter.next(), Some(Run(2u8, 3)));
+ assert_eq!(run_iter.next(), None);
+ let data = [2; 128];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Run(2u8, 127)));
+ assert_eq!(run_iter.next(), Some(Norun(127, 1)));
+ assert_eq!(run_iter.next(), None);
+ let data = [2; 129];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Run(2u8, 127)));
+ assert_eq!(run_iter.next(), Some(Norun(127, 2)));
+ assert_eq!(run_iter.next(), None);
+ let data = [2; 130];
+ let mut run_iter = RunIterator::new(&data[..]);
+ assert_eq!(run_iter.next(), Some(Run(2u8, 127)));
+ assert_eq!(run_iter.next(), Some(Run(2u8, 3)));
+ assert_eq!(run_iter.next(), None);
+}
+
+#[test]
+fn noruncombine_test() {
+ fn a<T>(mut v: Vec<T>, mut other: Vec<T>) -> Vec<T> {
+ v.append(&mut other);
+ v
+ }
+
+ let v = vec![];
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), None);
+
+ let v = vec![1];
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Norun(0, 1)));
+ assert_eq!(rsi.next(), None);
+
+ let v = vec![2, 2];
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Norun(0, 2)));
+ assert_eq!(rsi.next(), None);
+
+ let v = vec![3, 3, 3];
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Run(3, 3)));
+ assert_eq!(rsi.next(), None);
+
+ let v = vec![4, 4, 3, 3, 3];
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Norun(0, 2)));
+ assert_eq!(rsi.next(), Some(Run(3, 3)));
+ assert_eq!(rsi.next(), None);
+
+ let v = vec![40; 400];
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Run(40, 127)));
+ assert_eq!(rsi.next(), Some(Run(40, 127)));
+ assert_eq!(rsi.next(), Some(Run(40, 127)));
+ assert_eq!(rsi.next(), Some(Run(40, 19)));
+ assert_eq!(rsi.next(), None);
+
+ let v = a(a(vec![5; 3], vec![6; 129]), vec![7, 3, 7, 10, 255]);
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Run(5, 3)));
+ assert_eq!(rsi.next(), Some(Run(6, 127)));
+ assert_eq!(rsi.next(), Some(Norun(130, 7)));
+ assert_eq!(rsi.next(), None);
+
+ let v = a(a(vec![5; 2], vec![6; 129]), vec![7, 3, 7, 7, 255]);
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Norun(0, 2)));
+ assert_eq!(rsi.next(), Some(Run(6, 127)));
+ assert_eq!(rsi.next(), Some(Norun(129, 7)));
+ assert_eq!(rsi.next(), None);
+
+ let v: Vec<_> = ::std::iter::repeat(())
+ .flat_map(|_| (0..2))
+ .take(257)
+ .collect();
+ let mut rsi = NorunCombineIterator::new(&v[..]);
+ assert_eq!(rsi.next(), Some(Norun(0, 128)));
+ assert_eq!(rsi.next(), Some(Norun(128, 128)));
+ assert_eq!(rsi.next(), Some(Norun(256, 1)));
+ assert_eq!(rsi.next(), None);
+}
diff --git a/vendor/image/src/codecs/hdr/mod.rs b/vendor/image/src/codecs/hdr/mod.rs
new file mode 100644
index 0000000..b3325bc
--- /dev/null
+++ b/vendor/image/src/codecs/hdr/mod.rs
@@ -0,0 +1,15 @@
+//! Decoding of Radiance HDR Images
+//!
+//! A decoder for Radiance HDR images
+//!
+//! # Related Links
+//!
+//! * <http://radsite.lbl.gov/radiance/refer/filefmts.pdf>
+//! * <http://www.graphics.cornell.edu/~bjw/rgbe/rgbe.c>
+//!
+
+mod decoder;
+mod encoder;
+
+pub use self::decoder::*;
+pub use self::encoder::*;
diff --git a/vendor/image/src/codecs/ico/decoder.rs b/vendor/image/src/codecs/ico/decoder.rs
new file mode 100644
index 0000000..4f02787
--- /dev/null
+++ b/vendor/image/src/codecs/ico/decoder.rs
@@ -0,0 +1,470 @@
+use byteorder::{LittleEndian, ReadBytesExt};
+use std::convert::TryFrom;
+use std::io::{self, Cursor, Read, Seek, SeekFrom};
+use std::marker::PhantomData;
+use std::{error, fmt, mem};
+
+use crate::color::ColorType;
+use crate::error::{
+ DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::{self, ImageDecoder, ImageFormat};
+
+use self::InnerDecoder::*;
+use crate::codecs::bmp::BmpDecoder;
+use crate::codecs::png::{PngDecoder, PNG_SIGNATURE};
+
+/// Errors that can occur during decoding and parsing an ICO image or one of its enclosed images.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
+enum DecoderError {
+ /// The ICO directory is empty
+ NoEntries,
+ /// The number of color planes (0 or 1), or the horizontal coordinate of the hotspot for CUR files too big.
+ IcoEntryTooManyPlanesOrHotspot,
+ /// The bit depth (may be 0 meaning unspecified), or the vertical coordinate of the hotspot for CUR files too big.
+ IcoEntryTooManyBitsPerPixelOrHotspot,
+
+ /// The entry is in PNG format and specified a length that is shorter than PNG header.
+ PngShorterThanHeader,
+ /// The enclosed PNG is not in RGBA, which is invalid: https://blogs.msdn.microsoft.com/oldnewthing/20101022-00/?p=12473/.
+ PngNotRgba,
+
+ /// The entry is in BMP format and specified a data size that is not correct for the image and optional mask data.
+ InvalidDataSize,
+
+ /// The dimensions specified by the entry does not match the dimensions in the header of the enclosed image.
+ ImageEntryDimensionMismatch {
+ /// The mismatched subimage's type
+ format: IcoEntryImageFormat,
+ /// The dimensions specified by the entry
+ entry: (u16, u16),
+ /// The dimensions of the image itself
+ image: (u32, u32),
+ },
+}
+
+impl fmt::Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DecoderError::NoEntries => f.write_str("ICO directory contains no image"),
+ DecoderError::IcoEntryTooManyPlanesOrHotspot => {
+ f.write_str("ICO image entry has too many color planes or too large hotspot value")
+ }
+ DecoderError::IcoEntryTooManyBitsPerPixelOrHotspot => f.write_str(
+ "ICO image entry has too many bits per pixel or too large hotspot value",
+ ),
+ DecoderError::PngShorterThanHeader => {
+ f.write_str("Entry specified a length that is shorter than PNG header!")
+ }
+ DecoderError::PngNotRgba => f.write_str("The PNG is not in RGBA format!"),
+ DecoderError::InvalidDataSize => {
+ f.write_str("ICO image data size did not match expected size")
+ }
+ DecoderError::ImageEntryDimensionMismatch {
+ format,
+ entry,
+ image,
+ } => f.write_fmt(format_args!(
+ "Entry{:?} and {}{:?} dimensions do not match!",
+ entry, format, image
+ )),
+ }
+ }
+}
+
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Ico.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {}
+
+/// The image formats an ICO may contain
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
+enum IcoEntryImageFormat {
+ /// PNG in ARGB
+ Png,
+ /// BMP with optional alpha mask
+ Bmp,
+}
+
+impl fmt::Display for IcoEntryImageFormat {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match self {
+ IcoEntryImageFormat::Png => "PNG",
+ IcoEntryImageFormat::Bmp => "BMP",
+ })
+ }
+}
+
+impl From<IcoEntryImageFormat> for ImageFormat {
+ fn from(val: IcoEntryImageFormat) -> Self {
+ match val {
+ IcoEntryImageFormat::Png => ImageFormat::Png,
+ IcoEntryImageFormat::Bmp => ImageFormat::Bmp,
+ }
+ }
+}
+
+/// An ico decoder
+pub struct IcoDecoder<R: Read> {
+ selected_entry: DirEntry,
+ inner_decoder: InnerDecoder<R>,
+}
+
+enum InnerDecoder<R: Read> {
+ Bmp(BmpDecoder<R>),
+ Png(Box<PngDecoder<R>>),
+}
+
+#[derive(Clone, Copy, Default)]
+struct DirEntry {
+ width: u8,
+ height: u8,
+ // We ignore some header fields as they will be replicated in the PNG, BMP and they are not
+ // necessary for determining the best_entry.
+ #[allow(unused)]
+ color_count: u8,
+ // Wikipedia has this to say:
+ // Although Microsoft's technical documentation states that this value must be zero, the icon
+ // encoder built into .NET (System.Drawing.Icon.Save) sets this value to 255. It appears that
+ // the operating system ignores this value altogether.
+ #[allow(unused)]
+ reserved: u8,
+
+ // We ignore some header fields as they will be replicated in the PNG, BMP and they are not
+ // necessary for determining the best_entry.
+ #[allow(unused)]
+ num_color_planes: u16,
+ bits_per_pixel: u16,
+
+ image_length: u32,
+ image_offset: u32,
+}
+
+impl<R: Read + Seek> IcoDecoder<R> {
+ /// Create a new decoder that decodes from the stream ```r```
+ pub fn new(mut r: R) -> ImageResult<IcoDecoder<R>> {
+ let entries = read_entries(&mut r)?;
+ let entry = best_entry(entries)?;
+ let decoder = entry.decoder(r)?;
+
+ Ok(IcoDecoder {
+ selected_entry: entry,
+ inner_decoder: decoder,
+ })
+ }
+}
+
+fn read_entries<R: Read>(r: &mut R) -> ImageResult<Vec<DirEntry>> {
+ let _reserved = r.read_u16::<LittleEndian>()?;
+ let _type = r.read_u16::<LittleEndian>()?;
+ let count = r.read_u16::<LittleEndian>()?;
+ (0..count).map(|_| read_entry(r)).collect()
+}
+
+fn read_entry<R: Read>(r: &mut R) -> ImageResult<DirEntry> {
+ Ok(DirEntry {
+ width: r.read_u8()?,
+ height: r.read_u8()?,
+ color_count: r.read_u8()?,
+ reserved: r.read_u8()?,
+ num_color_planes: {
+ // This may be either the number of color planes (0 or 1), or the horizontal coordinate
+ // of the hotspot for CUR files.
+ let num = r.read_u16::<LittleEndian>()?;
+ if num > 256 {
+ return Err(DecoderError::IcoEntryTooManyPlanesOrHotspot.into());
+ }
+ num
+ },
+ bits_per_pixel: {
+ // This may be either the bit depth (may be 0 meaning unspecified),
+ // or the vertical coordinate of the hotspot for CUR files.
+ let num = r.read_u16::<LittleEndian>()?;
+ if num > 256 {
+ return Err(DecoderError::IcoEntryTooManyBitsPerPixelOrHotspot.into());
+ }
+ num
+ },
+ image_length: r.read_u32::<LittleEndian>()?,
+ image_offset: r.read_u32::<LittleEndian>()?,
+ })
+}
+
+/// Find the entry with the highest (color depth, size).
+fn best_entry(mut entries: Vec<DirEntry>) -> ImageResult<DirEntry> {
+ let mut best = entries.pop().ok_or(DecoderError::NoEntries)?;
+
+ let mut best_score = (
+ best.bits_per_pixel,
+ u32::from(best.real_width()) * u32::from(best.real_height()),
+ );
+
+ for entry in entries {
+ let score = (
+ entry.bits_per_pixel,
+ u32::from(entry.real_width()) * u32::from(entry.real_height()),
+ );
+ if score > best_score {
+ best = entry;
+ best_score = score;
+ }
+ }
+ Ok(best)
+}
+
+impl DirEntry {
+ fn real_width(&self) -> u16 {
+ match self.width {
+ 0 => 256,
+ w => u16::from(w),
+ }
+ }
+
+ fn real_height(&self) -> u16 {
+ match self.height {
+ 0 => 256,
+ h => u16::from(h),
+ }
+ }
+
+ fn matches_dimensions(&self, width: u32, height: u32) -> bool {
+ u32::from(self.real_width()) == width.min(256)
+ && u32::from(self.real_height()) == height.min(256)
+ }
+
+ fn seek_to_start<R: Read + Seek>(&self, r: &mut R) -> ImageResult<()> {
+ r.seek(SeekFrom::Start(u64::from(self.image_offset)))?;
+ Ok(())
+ }
+
+ fn is_png<R: Read + Seek>(&self, r: &mut R) -> ImageResult<bool> {
+ self.seek_to_start(r)?;
+
+ // Read the first 8 bytes to sniff the image.
+ let mut signature = [0u8; 8];
+ r.read_exact(&mut signature)?;
+
+ Ok(signature == PNG_SIGNATURE)
+ }
+
+ fn decoder<R: Read + Seek>(&self, mut r: R) -> ImageResult<InnerDecoder<R>> {
+ let is_png = self.is_png(&mut r)?;
+ self.seek_to_start(&mut r)?;
+
+ if is_png {
+ Ok(Png(Box::new(PngDecoder::new(r)?)))
+ } else {
+ Ok(Bmp(BmpDecoder::new_with_ico_format(r)?))
+ }
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct IcoReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for IcoReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for IcoDecoder<R> {
+ type Reader = IcoReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ match self.inner_decoder {
+ Bmp(ref decoder) => decoder.dimensions(),
+ Png(ref decoder) => decoder.dimensions(),
+ }
+ }
+
+ fn color_type(&self) -> ColorType {
+ match self.inner_decoder {
+ Bmp(ref decoder) => decoder.color_type(),
+ Png(ref decoder) => decoder.color_type(),
+ }
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(IcoReader(
+ Cursor::new(image::decoder_to_vec(self)?),
+ PhantomData,
+ ))
+ }
+
+ fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ match self.inner_decoder {
+ Png(decoder) => {
+ if self.selected_entry.image_length < PNG_SIGNATURE.len() as u32 {
+ return Err(DecoderError::PngShorterThanHeader.into());
+ }
+
+ // Check if the image dimensions match the ones in the image data.
+ let (width, height) = decoder.dimensions();
+ if !self.selected_entry.matches_dimensions(width, height) {
+ return Err(DecoderError::ImageEntryDimensionMismatch {
+ format: IcoEntryImageFormat::Png,
+ entry: (
+ self.selected_entry.real_width(),
+ self.selected_entry.real_height(),
+ ),
+ image: (width, height),
+ }
+ .into());
+ }
+
+ // Embedded PNG images can only be of the 32BPP RGBA format.
+ // https://blogs.msdn.microsoft.com/oldnewthing/20101022-00/?p=12473/
+ if decoder.color_type() != ColorType::Rgba8 {
+ return Err(DecoderError::PngNotRgba.into());
+ }
+
+ decoder.read_image(buf)
+ }
+ Bmp(mut decoder) => {
+ let (width, height) = decoder.dimensions();
+ if !self.selected_entry.matches_dimensions(width, height) {
+ return Err(DecoderError::ImageEntryDimensionMismatch {
+ format: IcoEntryImageFormat::Bmp,
+ entry: (
+ self.selected_entry.real_width(),
+ self.selected_entry.real_height(),
+ ),
+ image: (width, height),
+ }
+ .into());
+ }
+
+ // The ICO decoder needs an alpha channel to apply the AND mask.
+ if decoder.color_type() != ColorType::Rgba8 {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Bmp.into(),
+ UnsupportedErrorKind::Color(decoder.color_type().into()),
+ ),
+ ));
+ }
+
+ decoder.read_image_data(buf)?;
+
+ let r = decoder.reader();
+ let image_end = r.stream_position()?;
+ let data_end = u64::from(self.selected_entry.image_offset)
+ + u64::from(self.selected_entry.image_length);
+
+ let mask_row_bytes = ((width + 31) / 32) * 4;
+ let mask_length = u64::from(mask_row_bytes) * u64::from(height);
+
+ // data_end should be image_end + the mask length (mask_row_bytes * height).
+ // According to
+ // https://devblogs.microsoft.com/oldnewthing/20101021-00/?p=12483
+ // the mask is required, but according to Wikipedia
+ // https://en.wikipedia.org/wiki/ICO_(file_format)
+ // the mask is not required. Unfortunately, Wikipedia does not have a citation
+ // for that claim, so we can't be sure which is correct.
+ if data_end >= image_end + mask_length {
+ // If there's an AND mask following the image, read and apply it.
+ for y in 0..height {
+ let mut x = 0;
+ for _ in 0..mask_row_bytes {
+ // Apply the bits of each byte until we reach the end of the row.
+ let mask_byte = r.read_u8()?;
+ for bit in (0..8).rev() {
+ if x >= width {
+ break;
+ }
+ if mask_byte & (1 << bit) != 0 {
+ // Set alpha channel to transparent.
+ buf[((height - y - 1) * width + x) as usize * 4 + 3] = 0;
+ }
+ x += 1;
+ }
+ }
+ }
+
+ Ok(())
+ } else if data_end == image_end {
+ // accept images with no mask data
+ Ok(())
+ } else {
+ Err(DecoderError::InvalidDataSize.into())
+ }
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ // Test if BMP images without alpha channel inside ICOs don't panic.
+ // Because the test data is invalid decoding should produce an error.
+ #[test]
+ fn bmp_16_with_missing_alpha_channel() {
+ let data = vec![
+ 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x0e, 0x04, 0xc3, 0x7e, 0x00, 0x00, 0x00, 0x00,
+ 0x7c, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0x01, 0x00,
+ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x8f, 0xf6, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x20, 0x66, 0x74, 0x83, 0x70, 0x61, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xeb, 0x00, 0x9b, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x47, 0x0d,
+ 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x62, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0c,
+ 0x00, 0x00, 0x00, 0xc3, 0x3f, 0x94, 0x61, 0xaa, 0x17, 0x4d, 0x8d, 0x79, 0x1d, 0x8b,
+ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x2e, 0x28, 0x40, 0xe5, 0x9f,
+ 0x4b, 0x4d, 0xe9, 0x87, 0xd3, 0xda, 0xd6, 0x89, 0x81, 0xc5, 0xa4, 0xa1, 0x60, 0x98,
+ 0x31, 0xc7, 0x1d, 0xb6, 0x8f, 0x20, 0xc8, 0x3e, 0xee, 0xd8, 0xe4, 0x8f, 0xee, 0x7b,
+ 0x48, 0x9b, 0x88, 0x25, 0x13, 0xda, 0xa4, 0x13, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x40,
+ 0x16, 0x01, 0xff, 0xff, 0xff, 0xff, 0xe9, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xa3, 0x66, 0x64, 0x41, 0x54, 0xa3, 0xa3, 0x00, 0x00, 0x00, 0xb8, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x66, 0x64, 0x41, 0x54, 0xa3, 0xa3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0xf6, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x83, 0x70, 0x61, 0x76,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff,
+ 0xeb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x62, 0x49,
+ 0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xc8, 0x00, 0x02, 0x0c, 0x00, 0xff, 0xff, 0xc6,
+ 0x84, 0x00, 0x2a, 0x75, 0x03, 0xa3, 0x05, 0xfb, 0xe1, 0x6e, 0xe8, 0x27, 0xd6, 0xd3,
+ 0x96, 0xc1, 0xe4, 0x30, 0x0c, 0x05, 0xb9, 0xa3, 0x8b, 0x29, 0xda, 0xa4, 0xf1, 0x4d,
+ 0xf3, 0xb2, 0x98, 0x2b, 0xe6, 0x93, 0x07, 0xf9, 0xca, 0x2b, 0xc2, 0x39, 0x20, 0xba,
+ 0x7c, 0xa0, 0xb1, 0x43, 0xe6, 0xf9, 0xdc, 0xd1, 0xc2, 0x52, 0xdc, 0x41, 0xc1, 0x2f,
+ 0x29, 0xf7, 0x46, 0x32, 0xda, 0x1b, 0x72, 0x8c, 0xe6, 0x2b, 0x01, 0xe5, 0x49, 0x21,
+ 0x89, 0x89, 0xe4, 0x3d, 0xa1, 0xdb, 0x3b, 0x4a, 0x0b, 0x52, 0x86, 0x52, 0x33, 0x9d,
+ 0xb2, 0xcf, 0x4a, 0x86, 0x53, 0xd7, 0xa9, 0x4b, 0xaf, 0x62, 0x06, 0x49, 0x53, 0x00,
+ 0xc3, 0x3f, 0x94, 0x61, 0xaa, 0x17, 0x4d, 0x8d, 0x79, 0x1d, 0x8b, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x2e, 0x28, 0x40, 0xe5, 0x9f, 0x4b, 0x4d, 0xe9,
+ 0x87, 0xd3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe7, 0xc5, 0x00,
+ 0x02, 0x00, 0x00, 0x00, 0x06, 0x00, 0x0b, 0x00, 0x50, 0x31, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x76, 0x76, 0x01, 0x00, 0x00, 0x00, 0x76, 0x00,
+ 0x00, 0x23, 0x3f, 0x52, 0x41, 0x44, 0x49, 0x41, 0x4e, 0x43, 0x45, 0x61, 0x50, 0x35,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x4d, 0x47, 0x49, 0x46, 0x38, 0x37, 0x61, 0x05,
+ 0x50, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc7, 0x37, 0x61,
+ ];
+
+ let decoder = IcoDecoder::new(Cursor::new(&data)).unwrap();
+ let mut buf = vec![0; usize::try_from(decoder.total_bytes()).unwrap()];
+ assert!(decoder.read_image(&mut buf).is_err());
+ }
+}
diff --git a/vendor/image/src/codecs/ico/encoder.rs b/vendor/image/src/codecs/ico/encoder.rs
new file mode 100644
index 0000000..dd5961b
--- /dev/null
+++ b/vendor/image/src/codecs/ico/encoder.rs
@@ -0,0 +1,194 @@
+use byteorder::{LittleEndian, WriteBytesExt};
+use std::borrow::Cow;
+use std::io::{self, Write};
+
+use crate::color::ColorType;
+use crate::error::{ImageError, ImageResult, ParameterError, ParameterErrorKind};
+use crate::image::ImageEncoder;
+
+use crate::codecs::png::PngEncoder;
+
+// Enum value indicating an ICO image (as opposed to a CUR image):
+const ICO_IMAGE_TYPE: u16 = 1;
+// The length of an ICO file ICONDIR structure, in bytes:
+const ICO_ICONDIR_SIZE: u32 = 6;
+// The length of an ICO file DIRENTRY structure, in bytes:
+const ICO_DIRENTRY_SIZE: u32 = 16;
+
+/// ICO encoder
+pub struct IcoEncoder<W: Write> {
+ w: W,
+}
+
+/// An ICO image entry
+pub struct IcoFrame<'a> {
+ // Pre-encoded PNG or BMP
+ encoded_image: Cow<'a, [u8]>,
+ // Stored as `0 => 256, n => n`
+ width: u8,
+ // Stored as `0 => 256, n => n`
+ height: u8,
+ color_type: ColorType,
+}
+
+impl<'a> IcoFrame<'a> {
+ /// Construct a new `IcoFrame` using a pre-encoded PNG or BMP
+ ///
+ /// The `width` and `height` must be between 1 and 256 (inclusive).
+ pub fn with_encoded(
+ encoded_image: impl Into<Cow<'a, [u8]>>,
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<Self> {
+ let encoded_image = encoded_image.into();
+
+ if !(1..=256).contains(&width) {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(format!(
+ "the image width must be `1..=256`, instead width {} was provided",
+ width,
+ )),
+ )));
+ }
+
+ if !(1..=256).contains(&height) {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(format!(
+ "the image height must be `1..=256`, instead height {} was provided",
+ height,
+ )),
+ )));
+ }
+
+ Ok(Self {
+ encoded_image,
+ width: width as u8,
+ height: height as u8,
+ color_type,
+ })
+ }
+
+ /// Construct a new `IcoFrame` by encoding `buf` as a PNG
+ ///
+ /// The `width` and `height` must be between 1 and 256 (inclusive)
+ pub fn as_png(buf: &[u8], width: u32, height: u32, color_type: ColorType) -> ImageResult<Self> {
+ let mut image_data: Vec<u8> = Vec::new();
+ PngEncoder::new(&mut image_data).write_image(buf, width, height, color_type)?;
+
+ let frame = Self::with_encoded(image_data, width, height, color_type)?;
+ Ok(frame)
+ }
+}
+
+impl<W: Write> IcoEncoder<W> {
+ /// Create a new encoder that writes its output to ```w```.
+ pub fn new(w: W) -> IcoEncoder<W> {
+ IcoEncoder { w }
+ }
+
+ /// Encodes the image ```image``` that has dimensions ```width``` and
+ /// ```height``` and ```ColorType``` ```c```. The dimensions of the image
+ /// must be between 1 and 256 (inclusive) or an error will be returned.
+ ///
+ /// Expects data to be big endian.
+ #[deprecated = "Use `IcoEncoder::write_image` instead. Beware that `write_image` has a different endianness convention"]
+ pub fn encode(self, data: &[u8], width: u32, height: u32, color: ColorType) -> ImageResult<()> {
+ let mut image_data: Vec<u8> = Vec::new();
+ #[allow(deprecated)]
+ PngEncoder::new(&mut image_data).encode(data, width, height, color)?;
+
+ let image = IcoFrame::with_encoded(&image_data, width, height, color)?;
+ self.encode_images(&[image])
+ }
+
+ /// Takes some [`IcoFrame`]s and encodes them into an ICO.
+ ///
+ /// `images` is a list of images, usually ordered by dimension, which
+ /// must be between 1 and 65535 (inclusive) in length.
+ pub fn encode_images(mut self, images: &[IcoFrame<'_>]) -> ImageResult<()> {
+ if !(1..=usize::from(u16::MAX)).contains(&images.len()) {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(format!(
+ "the number of images must be `1..=u16::MAX`, instead {} images were provided",
+ images.len(),
+ )),
+ )));
+ }
+ let num_images = images.len() as u16;
+
+ let mut offset = ICO_ICONDIR_SIZE + (ICO_DIRENTRY_SIZE * (images.len() as u32));
+ write_icondir(&mut self.w, num_images)?;
+ for image in images {
+ write_direntry(
+ &mut self.w,
+ image.width,
+ image.height,
+ image.color_type,
+ offset,
+ image.encoded_image.len() as u32,
+ )?;
+
+ offset += image.encoded_image.len() as u32;
+ }
+ for image in images {
+ self.w.write_all(&image.encoded_image)?;
+ }
+ Ok(())
+ }
+}
+
+impl<W: Write> ImageEncoder for IcoEncoder<W> {
+ /// Write an ICO image with the specified width, height, and color type.
+ ///
+ /// For color types with 16-bit per channel or larger, the contents of `buf` should be in
+ /// native endian.
+ ///
+ /// WARNING: In image 0.23.14 and earlier this method erroneously expected buf to be in big endian.
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ let image = IcoFrame::as_png(buf, width, height, color_type)?;
+ self.encode_images(&[image])
+ }
+}
+
+fn write_icondir<W: Write>(w: &mut W, num_images: u16) -> io::Result<()> {
+ // Reserved field (must be zero):
+ w.write_u16::<LittleEndian>(0)?;
+ // Image type (ICO or CUR):
+ w.write_u16::<LittleEndian>(ICO_IMAGE_TYPE)?;
+ // Number of images in the file:
+ w.write_u16::<LittleEndian>(num_images)?;
+ Ok(())
+}
+
+fn write_direntry<W: Write>(
+ w: &mut W,
+ width: u8,
+ height: u8,
+ color: ColorType,
+ data_start: u32,
+ data_size: u32,
+) -> io::Result<()> {
+ // Image dimensions:
+ w.write_u8(width)?;
+ w.write_u8(height)?;
+ // Number of colors in palette (or zero for no palette):
+ w.write_u8(0)?;
+ // Reserved field (must be zero):
+ w.write_u8(0)?;
+ // Color planes:
+ w.write_u16::<LittleEndian>(0)?;
+ // Bits per pixel:
+ w.write_u16::<LittleEndian>(color.bits_per_pixel())?;
+ // Image data size, in bytes:
+ w.write_u32::<LittleEndian>(data_size)?;
+ // Image data offset, in bytes:
+ w.write_u32::<LittleEndian>(data_start)?;
+ Ok(())
+}
diff --git a/vendor/image/src/codecs/ico/mod.rs b/vendor/image/src/codecs/ico/mod.rs
new file mode 100644
index 0000000..11493ac
--- /dev/null
+++ b/vendor/image/src/codecs/ico/mod.rs
@@ -0,0 +1,14 @@
+//! Decoding and Encoding of ICO files
+//!
+//! A decoder and encoder for ICO (Windows Icon) image container files.
+//!
+//! # Related Links
+//! * <https://msdn.microsoft.com/en-us/library/ms997538.aspx>
+//! * <https://en.wikipedia.org/wiki/ICO_%28file_format%29>
+
+pub use self::decoder::IcoDecoder;
+#[allow(deprecated)]
+pub use self::encoder::{IcoEncoder, IcoFrame};
+
+mod decoder;
+mod encoder;
diff --git a/vendor/image/src/codecs/jpeg/decoder.rs b/vendor/image/src/codecs/jpeg/decoder.rs
new file mode 100644
index 0000000..9625e33
--- /dev/null
+++ b/vendor/image/src/codecs/jpeg/decoder.rs
@@ -0,0 +1,1289 @@
+use std::convert::TryFrom;
+use std::io::{self, Cursor, Read};
+use std::marker::PhantomData;
+use std::mem;
+
+use crate::color::ColorType;
+use crate::error::{
+ DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::{ImageDecoder, ImageFormat};
+
+/// JPEG decoder
+pub struct JpegDecoder<R> {
+ decoder: jpeg::Decoder<R>,
+ metadata: jpeg::ImageInfo,
+}
+
+impl<R: Read> JpegDecoder<R> {
+ /// Create a new decoder that decodes from the stream ```r```
+ pub fn new(r: R) -> ImageResult<JpegDecoder<R>> {
+ let mut decoder = jpeg::Decoder::new(r);
+
+ decoder.read_info().map_err(ImageError::from_jpeg)?;
+ let mut metadata = decoder.info().ok_or_else(|| {
+ ImageError::Decoding(DecodingError::from_format_hint(ImageFormat::Jpeg.into()))
+ })?;
+
+ // We convert CMYK data to RGB before returning it to the user.
+ if metadata.pixel_format == jpeg::PixelFormat::CMYK32 {
+ metadata.pixel_format = jpeg::PixelFormat::RGB24;
+ }
+
+ Ok(JpegDecoder { decoder, metadata })
+ }
+
+ /// Configure the decoder to scale the image during decoding.
+ ///
+ /// This efficiently scales the image by the smallest supported
+ /// scale factor that produces an image larger than or equal to
+ /// the requested size in at least one axis. The currently
+ /// implemented scale factors are 1/8, 1/4, 1/2 and 1.
+ ///
+ /// To generate a thumbnail of an exact size, pass the desired
+ /// size and then scale to the final size using a traditional
+ /// resampling algorithm.
+ ///
+ /// The size of the image to be loaded, with the scale factor
+ /// applied, is returned.
+ pub fn scale(
+ &mut self,
+ requested_width: u16,
+ requested_height: u16,
+ ) -> ImageResult<(u16, u16)> {
+ let result = self
+ .decoder
+ .scale(requested_width, requested_height)
+ .map_err(ImageError::from_jpeg)?;
+
+ self.metadata.width = result.0;
+ self.metadata.height = result.1;
+
+ Ok(result)
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct JpegReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for JpegReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for JpegDecoder<R> {
+ type Reader = JpegReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (
+ u32::from(self.metadata.width),
+ u32::from(self.metadata.height),
+ )
+ }
+
+ fn color_type(&self) -> ColorType {
+ ColorType::from_jpeg(self.metadata.pixel_format)
+ }
+
+ fn icc_profile(&mut self) -> Option<Vec<u8>> {
+ self.decoder.icc_profile()
+ }
+
+ fn into_reader(mut self) -> ImageResult<Self::Reader> {
+ let mut data = self.decoder.decode().map_err(ImageError::from_jpeg)?;
+ data = match self.decoder.info().unwrap().pixel_format {
+ jpeg::PixelFormat::CMYK32 => cmyk_to_rgb(&data),
+ _ => data,
+ };
+
+ Ok(JpegReader(Cursor::new(data), PhantomData))
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ let mut data = self.decoder.decode().map_err(ImageError::from_jpeg)?;
+ data = match self.decoder.info().unwrap().pixel_format {
+ jpeg::PixelFormat::CMYK32 => cmyk_to_rgb(&data),
+ _ => data,
+ };
+
+ buf.copy_from_slice(&data);
+ Ok(())
+ }
+}
+
+fn cmyk_to_rgb(input: &[u8]) -> Vec<u8> {
+ let count = input.len() / 4;
+ let mut output = vec![0; 3 * count];
+
+ let in_pixels = input[..4 * count].chunks_exact(4);
+ let out_pixels = output[..3 * count].chunks_exact_mut(3);
+
+ for (pixel, outp) in in_pixels.zip(out_pixels) {
+ let c = 255 - u16::from(pixel[0]);
+ let m = 255 - u16::from(pixel[1]);
+ let y = 255 - u16::from(pixel[2]);
+ let k = 255 - u16::from(pixel[3]);
+ // CMY -> RGB
+ let r = (k * c) / 255;
+ let g = (k * m) / 255;
+ let b = (k * y) / 255;
+
+ outp[0] = r as u8;
+ outp[1] = g as u8;
+ outp[2] = b as u8;
+ }
+
+ output
+}
+
+impl ColorType {
+ fn from_jpeg(pixel_format: jpeg::PixelFormat) -> ColorType {
+ use jpeg::PixelFormat::*;
+ match pixel_format {
+ L8 => ColorType::L8,
+ L16 => ColorType::L16,
+ RGB24 => ColorType::Rgb8,
+ CMYK32 => panic!(),
+ }
+ }
+}
+
+impl ImageError {
+ fn from_jpeg(err: jpeg::Error) -> ImageError {
+ use jpeg::Error::*;
+ match err {
+ err @ Format(_) => {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Jpeg.into(), err))
+ }
+ Unsupported(desc) => ImageError::Unsupported(UnsupportedError::from_format_and_kind(
+ ImageFormat::Jpeg.into(),
+ UnsupportedErrorKind::GenericFeature(format!("{:?}", desc)),
+ )),
+ Io(err) => ImageError::IoError(err),
+ Internal(err) => {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Jpeg.into(), err))
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #[cfg(feature = "benchmarks")]
+ extern crate test;
+
+ use super::cmyk_to_rgb;
+ #[cfg(feature = "benchmarks")]
+ use test::Bencher;
+
+ #[cfg(feature = "benchmarks")]
+ const W: usize = 256;
+ #[cfg(feature = "benchmarks")]
+ const H: usize = 256;
+
+ #[test]
+ fn cmyk_to_rgb_correct() {
+ for c in 0..=255 {
+ for k in 0..=255 {
+ // Based on R = 255 * (1-C/255) * (1-K/255)
+ let r = (255.0 - f32::from(c)) * (255.0 - f32::from(k)) / 255.0;
+ let r_u8 = r as u8;
+ let convert_r = cmyk_to_rgb(&[c, 0, 0, k])[0];
+ let convert_g = cmyk_to_rgb(&[0, c, 0, k])[1];
+ let convert_b = cmyk_to_rgb(&[0, 0, c, k])[2];
+
+ assert_eq!(
+ convert_r, r_u8,
+ "c = {}, k = {}, cymk_to_rgb[0] = {}, should be {}",
+ c, k, convert_r, r_u8
+ );
+ assert_eq!(
+ convert_g, r_u8,
+ "m = {}, k = {}, cymk_to_rgb[1] = {}, should be {}",
+ c, k, convert_g, r_u8
+ );
+ assert_eq!(
+ convert_b, r_u8,
+ "y = {}, k = {}, cymk_to_rgb[2] = {}, should be {}",
+ c, k, convert_b, r_u8
+ );
+ }
+ }
+ }
+
+ fn single_pix_correct(cmyk_pix: [u8; 4], rgb_pix_true: [u8; 3]) {
+ let rgb_pix = cmyk_to_rgb(&cmyk_pix);
+ assert_eq!(
+ rgb_pix_true[0], rgb_pix[0],
+ "With CMYK {:?} expected {:?}, got {:?}",
+ cmyk_pix, rgb_pix_true, rgb_pix
+ );
+ assert_eq!(
+ rgb_pix_true[1], rgb_pix[1],
+ "With CMYK {:?} expected {:?}, got {:?}",
+ cmyk_pix, rgb_pix_true, rgb_pix
+ );
+ assert_eq!(
+ rgb_pix_true[2], rgb_pix[2],
+ "With CMYK {:?} expected {:?}, got {:?}",
+ cmyk_pix, rgb_pix_true, rgb_pix
+ );
+ }
+
+ #[test]
+ fn test_assorted_colors() {
+ let cmyk_pixels = vec![
+ [0, 51, 102, 65],
+ [153, 204, 0, 65],
+ [0, 0, 0, 67],
+ [0, 85, 170, 69],
+ [0, 0, 0, 71],
+ [0, 0, 0, 73],
+ [0, 17, 34, 75],
+ [51, 68, 85, 75],
+ [102, 119, 136, 75],
+ [153, 170, 187, 75],
+ [204, 221, 238, 75],
+ [0, 0, 0, 77],
+ [0, 0, 0, 79],
+ [0, 85, 170, 81],
+ [0, 0, 0, 83],
+ [0, 3, 6, 85],
+ [9, 12, 15, 85],
+ [18, 21, 24, 85],
+ [27, 30, 33, 85],
+ [36, 39, 42, 85],
+ [45, 48, 51, 85],
+ [54, 57, 60, 85],
+ [63, 66, 69, 85],
+ [72, 75, 78, 85],
+ [81, 84, 87, 85],
+ [90, 93, 96, 85],
+ [99, 102, 105, 85],
+ [108, 111, 114, 85],
+ [117, 120, 123, 85],
+ [126, 129, 132, 85],
+ [135, 138, 141, 85],
+ [144, 147, 150, 85],
+ [153, 156, 159, 85],
+ [162, 165, 168, 85],
+ [171, 174, 177, 85],
+ [180, 183, 186, 85],
+ [189, 192, 195, 85],
+ [198, 201, 204, 85],
+ [207, 210, 213, 85],
+ [216, 219, 222, 85],
+ [225, 228, 231, 85],
+ [234, 237, 240, 85],
+ [243, 246, 249, 85],
+ [252, 0, 0, 85],
+ [0, 85, 170, 87],
+ [0, 0, 0, 89],
+ [0, 0, 0, 91],
+ [0, 85, 170, 93],
+ [0, 51, 102, 95],
+ [153, 204, 0, 95],
+ [0, 0, 0, 97],
+ [0, 85, 170, 99],
+ [0, 0, 0, 101],
+ [0, 0, 0, 103],
+ [0, 17, 34, 105],
+ [51, 68, 85, 105],
+ [102, 119, 136, 105],
+ [153, 170, 187, 105],
+ [204, 221, 238, 105],
+ [0, 0, 0, 107],
+ [0, 0, 0, 109],
+ [0, 85, 170, 111],
+ [0, 0, 0, 113],
+ [0, 51, 102, 115],
+ [153, 204, 0, 115],
+ [0, 85, 170, 117],
+ [0, 15, 30, 119],
+ [45, 60, 75, 119],
+ [90, 105, 120, 119],
+ [135, 150, 165, 119],
+ [180, 195, 210, 119],
+ [225, 240, 0, 119],
+ [0, 0, 0, 121],
+ [0, 85, 170, 123],
+ [0, 51, 102, 125],
+ [153, 204, 0, 125],
+ [0, 0, 0, 127],
+ [0, 0, 0, 128],
+ [0, 85, 170, 129],
+ [0, 51, 102, 130],
+ [153, 204, 0, 130],
+ [0, 0, 0, 131],
+ [0, 85, 170, 132],
+ [0, 0, 0, 133],
+ [0, 0, 0, 134],
+ [0, 17, 34, 135],
+ [51, 68, 85, 135],
+ [102, 119, 136, 135],
+ [153, 170, 187, 135],
+ [204, 221, 238, 135],
+ [0, 15, 30, 136],
+ [45, 60, 75, 136],
+ [90, 105, 120, 136],
+ [135, 150, 165, 136],
+ [180, 195, 210, 136],
+ [225, 240, 0, 136],
+ [0, 0, 0, 137],
+ [0, 85, 170, 138],
+ [0, 0, 0, 139],
+ [0, 51, 102, 140],
+ [153, 204, 0, 140],
+ [0, 85, 170, 141],
+ [0, 0, 0, 142],
+ [0, 0, 0, 143],
+ [0, 85, 170, 144],
+ [0, 51, 102, 145],
+ [153, 204, 0, 145],
+ [0, 0, 0, 146],
+ [0, 85, 170, 147],
+ [0, 0, 0, 148],
+ [0, 0, 0, 149],
+ [0, 17, 34, 150],
+ [51, 68, 85, 150],
+ [102, 119, 136, 150],
+ [153, 170, 187, 150],
+ [204, 221, 238, 150],
+ [0, 0, 0, 151],
+ [0, 0, 0, 152],
+ [0, 5, 10, 153],
+ [15, 20, 25, 153],
+ [30, 35, 40, 153],
+ [45, 50, 55, 153],
+ [60, 65, 70, 153],
+ [75, 80, 85, 153],
+ [90, 95, 100, 153],
+ [105, 110, 115, 153],
+ [120, 125, 130, 153],
+ [135, 140, 145, 153],
+ [150, 155, 160, 153],
+ [165, 170, 175, 153],
+ [180, 185, 190, 153],
+ [195, 200, 205, 153],
+ [210, 215, 220, 153],
+ [225, 230, 235, 153],
+ [240, 245, 250, 153],
+ [0, 0, 0, 154],
+ [0, 51, 102, 155],
+ [153, 204, 0, 155],
+ [0, 85, 170, 156],
+ [0, 0, 0, 157],
+ [0, 0, 0, 158],
+ [0, 85, 170, 159],
+ [0, 51, 102, 160],
+ [153, 204, 0, 160],
+ [0, 0, 0, 161],
+ [0, 85, 170, 162],
+ [0, 0, 0, 163],
+ [0, 0, 0, 164],
+ [0, 17, 34, 165],
+ [51, 68, 85, 165],
+ [102, 119, 136, 165],
+ [153, 170, 187, 165],
+ [204, 221, 238, 165],
+ [0, 0, 0, 166],
+ [0, 0, 0, 167],
+ [0, 85, 170, 168],
+ [0, 0, 0, 169],
+ [0, 3, 6, 170],
+ [9, 12, 15, 170],
+ [18, 21, 24, 170],
+ [27, 30, 33, 170],
+ [36, 39, 42, 170],
+ [45, 48, 51, 170],
+ [54, 57, 60, 170],
+ [63, 66, 69, 170],
+ [72, 75, 78, 170],
+ [81, 84, 87, 170],
+ [90, 93, 96, 170],
+ [99, 102, 105, 170],
+ [108, 111, 114, 170],
+ [117, 120, 123, 170],
+ [126, 129, 132, 170],
+ [135, 138, 141, 170],
+ [144, 147, 150, 170],
+ [153, 156, 159, 170],
+ [162, 165, 168, 170],
+ [171, 174, 177, 170],
+ [180, 183, 186, 170],
+ [189, 192, 195, 170],
+ [198, 201, 204, 170],
+ [207, 210, 213, 170],
+ [216, 219, 222, 170],
+ [225, 228, 231, 170],
+ [234, 237, 240, 170],
+ [243, 246, 249, 170],
+ [252, 0, 0, 170],
+ [0, 85, 170, 171],
+ [0, 0, 0, 172],
+ [0, 0, 0, 173],
+ [0, 85, 170, 174],
+ [0, 51, 102, 175],
+ [153, 204, 0, 175],
+ [0, 0, 0, 176],
+ [0, 85, 170, 177],
+ [0, 0, 0, 178],
+ [0, 0, 0, 179],
+ [0, 17, 34, 180],
+ [51, 68, 85, 180],
+ [102, 119, 136, 180],
+ [153, 170, 187, 180],
+ [204, 221, 238, 180],
+ [0, 0, 0, 181],
+ [0, 0, 0, 182],
+ [0, 85, 170, 183],
+ [0, 0, 0, 184],
+ [0, 51, 102, 185],
+ [153, 204, 0, 185],
+ [0, 85, 170, 186],
+ [0, 15, 30, 187],
+ [45, 60, 75, 187],
+ [90, 105, 120, 187],
+ [135, 150, 165, 187],
+ [180, 195, 210, 187],
+ [225, 240, 0, 187],
+ [0, 0, 0, 188],
+ [0, 85, 170, 189],
+ [0, 51, 102, 190],
+ [153, 204, 0, 190],
+ [0, 0, 0, 191],
+ [0, 85, 170, 192],
+ [0, 0, 0, 193],
+ [0, 0, 0, 194],
+ [0, 17, 34, 195],
+ [51, 68, 85, 195],
+ [102, 119, 136, 195],
+ [153, 170, 187, 195],
+ [204, 221, 238, 195],
+ [0, 0, 0, 196],
+ [0, 0, 0, 197],
+ [0, 85, 170, 198],
+ [0, 0, 0, 199],
+ [0, 51, 102, 200],
+ [153, 204, 0, 200],
+ [0, 85, 170, 201],
+ [0, 0, 0, 202],
+ [0, 0, 0, 203],
+ [0, 5, 10, 204],
+ [15, 20, 25, 204],
+ [30, 35, 40, 204],
+ [45, 50, 55, 204],
+ [60, 65, 70, 204],
+ [75, 80, 85, 204],
+ [90, 95, 100, 204],
+ [105, 110, 115, 204],
+ [120, 125, 130, 204],
+ [135, 140, 145, 204],
+ [150, 155, 160, 204],
+ [165, 170, 175, 204],
+ [180, 185, 190, 204],
+ [195, 200, 205, 204],
+ [210, 215, 220, 204],
+ [225, 230, 235, 204],
+ [240, 245, 250, 204],
+ [0, 51, 102, 205],
+ [153, 204, 0, 205],
+ [0, 0, 0, 206],
+ [0, 85, 170, 207],
+ [0, 0, 0, 208],
+ [0, 0, 0, 209],
+ [0, 17, 34, 210],
+ [51, 68, 85, 210],
+ [102, 119, 136, 210],
+ [153, 170, 187, 210],
+ [204, 221, 238, 210],
+ [0, 0, 0, 211],
+ [0, 0, 0, 212],
+ [0, 85, 170, 213],
+ [0, 0, 0, 214],
+ [0, 51, 102, 215],
+ [153, 204, 0, 215],
+ [0, 85, 170, 216],
+ [0, 0, 0, 217],
+ [0, 0, 0, 218],
+ [0, 85, 170, 219],
+ [0, 51, 102, 220],
+ [153, 204, 0, 220],
+ [0, 15, 30, 221],
+ [45, 60, 75, 221],
+ [90, 105, 120, 221],
+ [135, 150, 165, 221],
+ [180, 195, 210, 221],
+ [225, 240, 0, 221],
+ [0, 85, 170, 222],
+ [0, 0, 0, 223],
+ [0, 0, 0, 224],
+ [0, 17, 34, 225],
+ [51, 68, 85, 225],
+ [102, 119, 136, 225],
+ [153, 170, 187, 225],
+ [204, 221, 238, 225],
+ [0, 0, 0, 226],
+ [0, 0, 0, 227],
+ [0, 85, 170, 228],
+ [0, 0, 0, 229],
+ [0, 51, 102, 230],
+ [153, 204, 0, 230],
+ [0, 85, 170, 231],
+ [0, 0, 0, 232],
+ [0, 0, 0, 233],
+ [0, 85, 170, 234],
+ [0, 51, 102, 235],
+ [153, 204, 0, 235],
+ [0, 0, 0, 236],
+ [0, 85, 170, 237],
+ [0, 15, 30, 238],
+ [45, 60, 75, 238],
+ [90, 105, 120, 238],
+ [135, 150, 165, 238],
+ [180, 195, 210, 238],
+ [225, 240, 0, 238],
+ [0, 0, 0, 239],
+ [0, 17, 34, 240],
+ [51, 68, 85, 240],
+ [102, 119, 136, 240],
+ [153, 170, 187, 240],
+ [204, 221, 238, 240],
+ [0, 0, 0, 241],
+ [0, 0, 0, 242],
+ [0, 85, 170, 243],
+ [0, 0, 0, 244],
+ [0, 51, 102, 245],
+ [153, 204, 0, 245],
+ [0, 85, 170, 246],
+ [0, 0, 0, 247],
+ [0, 0, 0, 248],
+ [0, 85, 170, 249],
+ [0, 51, 102, 250],
+ [153, 204, 0, 250],
+ [0, 0, 0, 251],
+ [0, 85, 170, 252],
+ [0, 0, 0, 253],
+ [0, 0, 0, 254],
+ [5, 15, 25, 102],
+ [35, 40, 45, 102],
+ [50, 55, 60, 102],
+ [65, 70, 75, 102],
+ [80, 85, 90, 102],
+ [95, 100, 105, 102],
+ [110, 115, 120, 102],
+ [125, 130, 135, 102],
+ [140, 145, 150, 102],
+ [155, 160, 165, 102],
+ [170, 175, 180, 102],
+ [185, 190, 195, 102],
+ [200, 205, 210, 102],
+ [215, 220, 225, 102],
+ [230, 235, 240, 102],
+ [245, 250, 0, 102],
+ [15, 45, 60, 68],
+ [75, 90, 105, 68],
+ [120, 135, 150, 68],
+ [165, 180, 195, 68],
+ [210, 225, 240, 68],
+ [17, 34, 51, 45],
+ [68, 85, 102, 45],
+ [119, 136, 153, 45],
+ [170, 187, 204, 45],
+ [221, 238, 0, 45],
+ [17, 51, 68, 60],
+ [85, 102, 119, 60],
+ [136, 153, 170, 60],
+ [187, 204, 221, 60],
+ [238, 0, 0, 60],
+ [17, 34, 51, 90],
+ [68, 85, 102, 90],
+ [119, 136, 153, 90],
+ [170, 187, 204, 90],
+ [221, 238, 0, 90],
+ [17, 34, 51, 120],
+ [68, 85, 102, 120],
+ [119, 136, 153, 120],
+ [170, 187, 204, 120],
+ [221, 238, 0, 120],
+ [20, 25, 30, 51],
+ [35, 40, 45, 51],
+ [50, 55, 60, 51],
+ [65, 70, 75, 51],
+ [80, 85, 90, 51],
+ [95, 100, 105, 51],
+ [110, 115, 120, 51],
+ [125, 130, 135, 51],
+ [140, 145, 150, 51],
+ [155, 160, 165, 51],
+ [170, 175, 180, 51],
+ [185, 190, 195, 51],
+ [200, 205, 210, 51],
+ [215, 220, 225, 51],
+ [230, 235, 240, 51],
+ [245, 250, 0, 51],
+ [45, 60, 75, 17],
+ [90, 105, 120, 17],
+ [135, 150, 165, 17],
+ [180, 195, 210, 17],
+ [225, 240, 0, 17],
+ [45, 75, 90, 34],
+ [105, 120, 135, 34],
+ [150, 165, 180, 34],
+ [195, 210, 225, 34],
+ [240, 0, 0, 34],
+ [51, 153, 204, 20],
+ [51, 102, 153, 25],
+ [204, 0, 0, 25],
+ [51, 85, 119, 30],
+ [136, 153, 170, 30],
+ [187, 204, 221, 30],
+ [238, 0, 0, 30],
+ [51, 102, 153, 35],
+ [204, 0, 0, 35],
+ [51, 102, 153, 40],
+ [204, 0, 0, 40],
+ [51, 102, 153, 50],
+ [204, 0, 0, 50],
+ [51, 102, 153, 55],
+ [204, 0, 0, 55],
+ [51, 102, 153, 70],
+ [204, 0, 0, 70],
+ [51, 102, 153, 80],
+ [204, 0, 0, 80],
+ [51, 102, 153, 100],
+ [204, 0, 0, 100],
+ [51, 102, 153, 110],
+ [204, 0, 0, 110],
+ [65, 67, 69, 0],
+ [71, 73, 75, 0],
+ [77, 79, 81, 0],
+ [83, 85, 87, 0],
+ [89, 91, 93, 0],
+ [95, 97, 99, 0],
+ [101, 103, 105, 0],
+ [107, 109, 111, 0],
+ [113, 115, 117, 0],
+ [119, 121, 123, 0],
+ [125, 127, 128, 0],
+ [129, 130, 131, 0],
+ [132, 133, 134, 0],
+ [135, 136, 137, 0],
+ [138, 139, 140, 0],
+ [141, 142, 143, 0],
+ [144, 145, 146, 0],
+ [147, 148, 149, 0],
+ [150, 151, 152, 0],
+ [153, 154, 155, 0],
+ [156, 157, 158, 0],
+ [159, 160, 161, 0],
+ [162, 163, 164, 0],
+ [165, 166, 167, 0],
+ [168, 169, 170, 0],
+ [171, 172, 173, 0],
+ [174, 175, 176, 0],
+ [177, 178, 179, 0],
+ [180, 181, 182, 0],
+ [183, 184, 185, 0],
+ [186, 187, 188, 0],
+ [189, 190, 191, 0],
+ [192, 193, 194, 0],
+ [195, 196, 197, 0],
+ [198, 199, 200, 0],
+ [201, 202, 203, 0],
+ [204, 205, 206, 0],
+ [207, 208, 209, 0],
+ [210, 211, 212, 0],
+ [213, 214, 215, 0],
+ [216, 217, 218, 0],
+ [219, 220, 221, 0],
+ [222, 223, 224, 0],
+ [225, 226, 227, 0],
+ [228, 229, 230, 0],
+ [231, 232, 233, 0],
+ [234, 235, 236, 0],
+ [237, 238, 239, 0],
+ [240, 241, 242, 0],
+ [243, 244, 245, 0],
+ [246, 247, 248, 0],
+ [249, 250, 251, 0],
+ [252, 253, 254, 0],
+ [68, 85, 102, 15],
+ [119, 136, 153, 15],
+ [170, 187, 204, 15],
+ [221, 238, 0, 15],
+ [85, 170, 0, 3],
+ [85, 170, 0, 6],
+ [85, 170, 0, 9],
+ [85, 170, 0, 12],
+ [85, 170, 0, 18],
+ [85, 170, 0, 21],
+ [85, 170, 0, 24],
+ [85, 170, 0, 27],
+ [85, 170, 0, 33],
+ [85, 170, 0, 36],
+ [85, 170, 0, 39],
+ [85, 170, 0, 42],
+ [85, 170, 0, 48],
+ [85, 170, 0, 54],
+ [85, 170, 0, 57],
+ [85, 170, 0, 63],
+ [85, 170, 0, 66],
+ [85, 170, 0, 72],
+ [85, 170, 0, 78],
+ [85, 170, 0, 84],
+ [85, 170, 0, 96],
+ [85, 170, 0, 108],
+ [85, 170, 0, 114],
+ [85, 170, 0, 126],
+ [102, 153, 204, 5],
+ [153, 204, 0, 10],
+ ];
+ let rgb_pixels = vec![
+ [190, 152, 114],
+ [76, 38, 190],
+ [188, 188, 188],
+ [186, 124, 62],
+ [184, 184, 184],
+ [182, 182, 182],
+ [180, 168, 156],
+ [144, 132, 120],
+ [108, 96, 84],
+ [72, 60, 48],
+ [36, 24, 12],
+ [178, 178, 178],
+ [176, 176, 176],
+ [174, 116, 58],
+ [172, 172, 172],
+ [170, 168, 166],
+ [164, 162, 160],
+ [158, 156, 154],
+ [152, 150, 148],
+ [146, 144, 142],
+ [140, 138, 136],
+ [134, 132, 130],
+ [128, 126, 124],
+ [122, 120, 118],
+ [116, 114, 112],
+ [110, 108, 106],
+ [104, 102, 100],
+ [98, 96, 94],
+ [92, 90, 88],
+ [86, 84, 82],
+ [80, 78, 76],
+ [74, 72, 70],
+ [68, 66, 64],
+ [62, 60, 58],
+ [56, 54, 52],
+ [50, 48, 46],
+ [44, 42, 40],
+ [38, 36, 34],
+ [32, 30, 28],
+ [26, 24, 22],
+ [20, 18, 16],
+ [14, 12, 10],
+ [8, 6, 4],
+ [2, 170, 170],
+ [168, 112, 56],
+ [166, 166, 166],
+ [164, 164, 164],
+ [162, 108, 54],
+ [160, 128, 96],
+ [64, 32, 160],
+ [158, 158, 158],
+ [156, 104, 52],
+ [154, 154, 154],
+ [152, 152, 152],
+ [150, 140, 130],
+ [120, 110, 100],
+ [90, 80, 70],
+ [60, 50, 40],
+ [30, 20, 10],
+ [148, 148, 148],
+ [146, 146, 146],
+ [144, 96, 48],
+ [142, 142, 142],
+ [140, 112, 84],
+ [56, 28, 140],
+ [138, 92, 46],
+ [136, 128, 120],
+ [112, 104, 96],
+ [88, 80, 72],
+ [64, 56, 48],
+ [40, 32, 24],
+ [16, 8, 136],
+ [134, 134, 134],
+ [132, 88, 44],
+ [130, 104, 78],
+ [52, 26, 130],
+ [128, 128, 128],
+ [127, 127, 127],
+ [126, 84, 42],
+ [125, 100, 75],
+ [50, 25, 125],
+ [124, 124, 124],
+ [123, 82, 41],
+ [122, 122, 122],
+ [121, 121, 121],
+ [120, 112, 104],
+ [96, 88, 80],
+ [72, 64, 56],
+ [48, 40, 32],
+ [24, 16, 8],
+ [119, 112, 105],
+ [98, 91, 84],
+ [77, 70, 63],
+ [56, 49, 42],
+ [35, 28, 21],
+ [14, 7, 119],
+ [118, 118, 118],
+ [117, 78, 39],
+ [116, 116, 116],
+ [115, 92, 69],
+ [46, 23, 115],
+ [114, 76, 38],
+ [113, 113, 113],
+ [112, 112, 112],
+ [111, 74, 37],
+ [110, 88, 66],
+ [44, 22, 110],
+ [109, 109, 109],
+ [108, 72, 36],
+ [107, 107, 107],
+ [106, 106, 106],
+ [105, 98, 91],
+ [84, 77, 70],
+ [63, 56, 49],
+ [42, 35, 28],
+ [21, 14, 7],
+ [104, 104, 104],
+ [103, 103, 103],
+ [102, 100, 98],
+ [96, 94, 92],
+ [90, 88, 86],
+ [84, 82, 80],
+ [78, 76, 74],
+ [72, 70, 68],
+ [66, 64, 62],
+ [60, 58, 56],
+ [54, 52, 50],
+ [48, 46, 44],
+ [42, 40, 38],
+ [36, 34, 32],
+ [30, 28, 26],
+ [24, 22, 20],
+ [18, 16, 14],
+ [12, 10, 8],
+ [6, 4, 2],
+ [101, 101, 101],
+ [100, 80, 60],
+ [40, 20, 100],
+ [99, 66, 33],
+ [98, 98, 98],
+ [97, 97, 97],
+ [96, 64, 32],
+ [95, 76, 57],
+ [38, 19, 95],
+ [94, 94, 94],
+ [93, 62, 31],
+ [92, 92, 92],
+ [91, 91, 91],
+ [90, 84, 78],
+ [72, 66, 60],
+ [54, 48, 42],
+ [36, 30, 24],
+ [18, 12, 6],
+ [89, 89, 89],
+ [88, 88, 88],
+ [87, 58, 29],
+ [86, 86, 86],
+ [85, 84, 83],
+ [82, 81, 80],
+ [79, 78, 77],
+ [76, 75, 74],
+ [73, 72, 71],
+ [70, 69, 68],
+ [67, 66, 65],
+ [64, 63, 62],
+ [61, 60, 59],
+ [58, 57, 56],
+ [55, 54, 53],
+ [52, 51, 50],
+ [49, 48, 47],
+ [46, 45, 44],
+ [43, 42, 41],
+ [40, 39, 38],
+ [37, 36, 35],
+ [34, 33, 32],
+ [31, 30, 29],
+ [28, 27, 26],
+ [25, 24, 23],
+ [22, 21, 20],
+ [19, 18, 17],
+ [16, 15, 14],
+ [13, 12, 11],
+ [10, 9, 8],
+ [7, 6, 5],
+ [4, 3, 2],
+ [1, 85, 85],
+ [84, 56, 28],
+ [83, 83, 83],
+ [82, 82, 82],
+ [81, 54, 27],
+ [80, 64, 48],
+ [32, 16, 80],
+ [79, 79, 79],
+ [78, 52, 26],
+ [77, 77, 77],
+ [76, 76, 76],
+ [75, 70, 65],
+ [60, 55, 50],
+ [45, 40, 35],
+ [30, 25, 20],
+ [15, 10, 5],
+ [74, 74, 74],
+ [73, 73, 73],
+ [72, 48, 24],
+ [71, 71, 71],
+ [70, 56, 42],
+ [28, 14, 70],
+ [69, 46, 23],
+ [68, 64, 60],
+ [56, 52, 48],
+ [44, 40, 36],
+ [32, 28, 24],
+ [20, 16, 12],
+ [8, 4, 68],
+ [67, 67, 67],
+ [66, 44, 22],
+ [65, 52, 39],
+ [26, 13, 65],
+ [64, 64, 64],
+ [63, 42, 21],
+ [62, 62, 62],
+ [61, 61, 61],
+ [60, 56, 52],
+ [48, 44, 40],
+ [36, 32, 28],
+ [24, 20, 16],
+ [12, 8, 4],
+ [59, 59, 59],
+ [58, 58, 58],
+ [57, 38, 19],
+ [56, 56, 56],
+ [55, 44, 33],
+ [22, 11, 55],
+ [54, 36, 18],
+ [53, 53, 53],
+ [52, 52, 52],
+ [51, 50, 49],
+ [48, 47, 46],
+ [45, 44, 43],
+ [42, 41, 40],
+ [39, 38, 37],
+ [36, 35, 34],
+ [33, 32, 31],
+ [30, 29, 28],
+ [27, 26, 25],
+ [24, 23, 22],
+ [21, 20, 19],
+ [18, 17, 16],
+ [15, 14, 13],
+ [12, 11, 10],
+ [9, 8, 7],
+ [6, 5, 4],
+ [3, 2, 1],
+ [50, 40, 30],
+ [20, 10, 50],
+ [49, 49, 49],
+ [48, 32, 16],
+ [47, 47, 47],
+ [46, 46, 46],
+ [45, 42, 39],
+ [36, 33, 30],
+ [27, 24, 21],
+ [18, 15, 12],
+ [9, 6, 3],
+ [44, 44, 44],
+ [43, 43, 43],
+ [42, 28, 14],
+ [41, 41, 41],
+ [40, 32, 24],
+ [16, 8, 40],
+ [39, 26, 13],
+ [38, 38, 38],
+ [37, 37, 37],
+ [36, 24, 12],
+ [35, 28, 21],
+ [14, 7, 35],
+ [34, 32, 30],
+ [28, 26, 24],
+ [22, 20, 18],
+ [16, 14, 12],
+ [10, 8, 6],
+ [4, 2, 34],
+ [33, 22, 11],
+ [32, 32, 32],
+ [31, 31, 31],
+ [30, 28, 26],
+ [24, 22, 20],
+ [18, 16, 14],
+ [12, 10, 8],
+ [6, 4, 2],
+ [29, 29, 29],
+ [28, 28, 28],
+ [27, 18, 9],
+ [26, 26, 26],
+ [25, 20, 15],
+ [10, 5, 25],
+ [24, 16, 8],
+ [23, 23, 23],
+ [22, 22, 22],
+ [21, 14, 7],
+ [20, 16, 12],
+ [8, 4, 20],
+ [19, 19, 19],
+ [18, 12, 6],
+ [17, 16, 15],
+ [14, 13, 12],
+ [11, 10, 9],
+ [8, 7, 6],
+ [5, 4, 3],
+ [2, 1, 17],
+ [16, 16, 16],
+ [15, 14, 13],
+ [12, 11, 10],
+ [9, 8, 7],
+ [6, 5, 4],
+ [3, 2, 1],
+ [14, 14, 14],
+ [13, 13, 13],
+ [12, 8, 4],
+ [11, 11, 11],
+ [10, 8, 6],
+ [4, 2, 10],
+ [9, 6, 3],
+ [8, 8, 8],
+ [7, 7, 7],
+ [6, 4, 2],
+ [5, 4, 3],
+ [2, 1, 5],
+ [4, 4, 4],
+ [3, 2, 1],
+ [2, 2, 2],
+ [1, 1, 1],
+ [150, 144, 138],
+ [132, 129, 126],
+ [123, 120, 117],
+ [114, 111, 108],
+ [105, 102, 99],
+ [96, 93, 90],
+ [87, 84, 81],
+ [78, 75, 72],
+ [69, 66, 63],
+ [60, 57, 54],
+ [51, 48, 45],
+ [42, 39, 36],
+ [33, 30, 27],
+ [24, 21, 18],
+ [15, 12, 9],
+ [6, 3, 153],
+ [176, 154, 143],
+ [132, 121, 110],
+ [99, 88, 77],
+ [66, 55, 44],
+ [33, 22, 11],
+ [196, 182, 168],
+ [154, 140, 126],
+ [112, 98, 84],
+ [70, 56, 42],
+ [28, 14, 210],
+ [182, 156, 143],
+ [130, 117, 104],
+ [91, 78, 65],
+ [52, 39, 26],
+ [13, 195, 195],
+ [154, 143, 132],
+ [121, 110, 99],
+ [88, 77, 66],
+ [55, 44, 33],
+ [22, 11, 165],
+ [126, 117, 108],
+ [99, 90, 81],
+ [72, 63, 54],
+ [45, 36, 27],
+ [18, 9, 135],
+ [188, 184, 180],
+ [176, 172, 168],
+ [164, 160, 156],
+ [152, 148, 144],
+ [140, 136, 132],
+ [128, 124, 120],
+ [116, 112, 108],
+ [104, 100, 96],
+ [92, 88, 84],
+ [80, 76, 72],
+ [68, 64, 60],
+ [56, 52, 48],
+ [44, 40, 36],
+ [32, 28, 24],
+ [20, 16, 12],
+ [8, 4, 204],
+ [196, 182, 168],
+ [154, 140, 126],
+ [112, 98, 84],
+ [70, 56, 42],
+ [28, 14, 238],
+ [182, 156, 143],
+ [130, 117, 104],
+ [91, 78, 65],
+ [52, 39, 26],
+ [13, 221, 221],
+ [188, 94, 47],
+ [184, 138, 92],
+ [46, 230, 230],
+ [180, 150, 120],
+ [105, 90, 75],
+ [60, 45, 30],
+ [15, 225, 225],
+ [176, 132, 88],
+ [44, 220, 220],
+ [172, 129, 86],
+ [43, 215, 215],
+ [164, 123, 82],
+ [41, 205, 205],
+ [160, 120, 80],
+ [40, 200, 200],
+ [148, 111, 74],
+ [37, 185, 185],
+ [140, 105, 70],
+ [35, 175, 175],
+ [124, 93, 62],
+ [31, 155, 155],
+ [116, 87, 58],
+ [29, 145, 145],
+ [190, 188, 186],
+ [184, 182, 180],
+ [178, 176, 174],
+ [172, 170, 168],
+ [166, 164, 162],
+ [160, 158, 156],
+ [154, 152, 150],
+ [148, 146, 144],
+ [142, 140, 138],
+ [136, 134, 132],
+ [130, 128, 127],
+ [126, 125, 124],
+ [123, 122, 121],
+ [120, 119, 118],
+ [117, 116, 115],
+ [114, 113, 112],
+ [111, 110, 109],
+ [108, 107, 106],
+ [105, 104, 103],
+ [102, 101, 100],
+ [99, 98, 97],
+ [96, 95, 94],
+ [93, 92, 91],
+ [90, 89, 88],
+ [87, 86, 85],
+ [84, 83, 82],
+ [81, 80, 79],
+ [78, 77, 76],
+ [75, 74, 73],
+ [72, 71, 70],
+ [69, 68, 67],
+ [66, 65, 64],
+ [63, 62, 61],
+ [60, 59, 58],
+ [57, 56, 55],
+ [54, 53, 52],
+ [51, 50, 49],
+ [48, 47, 46],
+ [45, 44, 43],
+ [42, 41, 40],
+ [39, 38, 37],
+ [36, 35, 34],
+ [33, 32, 31],
+ [30, 29, 28],
+ [27, 26, 25],
+ [24, 23, 22],
+ [21, 20, 19],
+ [18, 17, 16],
+ [15, 14, 13],
+ [12, 11, 10],
+ [9, 8, 7],
+ [6, 5, 4],
+ [3, 2, 1],
+ [176, 160, 144],
+ [128, 112, 96],
+ [80, 64, 48],
+ [32, 16, 240],
+ [168, 84, 252],
+ [166, 83, 249],
+ [164, 82, 246],
+ [162, 81, 243],
+ [158, 79, 237],
+ [156, 78, 234],
+ [154, 77, 231],
+ [152, 76, 228],
+ [148, 74, 222],
+ [146, 73, 219],
+ [144, 72, 216],
+ [142, 71, 213],
+ [138, 69, 207],
+ [134, 67, 201],
+ [132, 66, 198],
+ [128, 64, 192],
+ [126, 63, 189],
+ [122, 61, 183],
+ [118, 59, 177],
+ [114, 57, 171],
+ [106, 53, 159],
+ [98, 49, 147],
+ [94, 47, 141],
+ [86, 43, 129],
+ [150, 100, 50],
+ [98, 49, 245],
+ ];
+ for (&cmyk_pixel, rgb_pixel) in cmyk_pixels.iter().zip(rgb_pixels) {
+ single_pix_correct(cmyk_pixel, rgb_pixel);
+ }
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_cmyk_to_rgb(b: &mut Bencher) {
+ let mut v = Vec::with_capacity((W * H * 4) as usize);
+ for c in 0..=255 {
+ for k in 0..=255 {
+ v.push(c as u8);
+ v.push(0);
+ v.push(0);
+ v.push(k as u8);
+ }
+ }
+
+ b.iter(|| {
+ cmyk_to_rgb(&v);
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_cmyk_to_rgb_single(b: &mut Bencher) {
+ b.iter(|| {
+ cmyk_to_rgb(&[128, 128, 128, 128]);
+ });
+ }
+}
diff --git a/vendor/image/src/codecs/jpeg/encoder.rs b/vendor/image/src/codecs/jpeg/encoder.rs
new file mode 100644
index 0000000..edb2a05
--- /dev/null
+++ b/vendor/image/src/codecs/jpeg/encoder.rs
@@ -0,0 +1,1074 @@
+#![allow(clippy::too_many_arguments)]
+
+use std::borrow::Cow;
+use std::convert::TryFrom;
+use std::io::{self, Write};
+
+use crate::error::{
+ ImageError, ImageResult, ParameterError, ParameterErrorKind, UnsupportedError,
+ UnsupportedErrorKind,
+};
+use crate::image::{ImageEncoder, ImageFormat};
+use crate::utils::clamp;
+use crate::{ColorType, GenericImageView, ImageBuffer, Luma, LumaA, Pixel, Rgb, Rgba};
+
+use super::entropy::build_huff_lut_const;
+use super::transform;
+use crate::traits::PixelWithColorType;
+
+// Markers
+// Baseline DCT
+static SOF0: u8 = 0xC0;
+// Huffman Tables
+static DHT: u8 = 0xC4;
+// Start of Image (standalone)
+static SOI: u8 = 0xD8;
+// End of image (standalone)
+static EOI: u8 = 0xD9;
+// Start of Scan
+static SOS: u8 = 0xDA;
+// Quantization Tables
+static DQT: u8 = 0xDB;
+// Application segments start and end
+static APP0: u8 = 0xE0;
+
+// section K.1
+// table K.1
+#[rustfmt::skip]
+static STD_LUMA_QTABLE: [u8; 64] = [
+ 16, 11, 10, 16, 24, 40, 51, 61,
+ 12, 12, 14, 19, 26, 58, 60, 55,
+ 14, 13, 16, 24, 40, 57, 69, 56,
+ 14, 17, 22, 29, 51, 87, 80, 62,
+ 18, 22, 37, 56, 68, 109, 103, 77,
+ 24, 35, 55, 64, 81, 104, 113, 92,
+ 49, 64, 78, 87, 103, 121, 120, 101,
+ 72, 92, 95, 98, 112, 100, 103, 99,
+];
+
+// table K.2
+#[rustfmt::skip]
+static STD_CHROMA_QTABLE: [u8; 64] = [
+ 17, 18, 24, 47, 99, 99, 99, 99,
+ 18, 21, 26, 66, 99, 99, 99, 99,
+ 24, 26, 56, 99, 99, 99, 99, 99,
+ 47, 66, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+];
+
+// section K.3
+// Code lengths and values for table K.3
+static STD_LUMA_DC_CODE_LENGTHS: [u8; 16] = [
+ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+];
+
+static STD_LUMA_DC_VALUES: [u8; 12] = [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+];
+
+static STD_LUMA_DC_HUFF_LUT: [(u8, u16); 256] =
+ build_huff_lut_const(&STD_LUMA_DC_CODE_LENGTHS, &STD_LUMA_DC_VALUES);
+
+// Code lengths and values for table K.4
+static STD_CHROMA_DC_CODE_LENGTHS: [u8; 16] = [
+ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+];
+
+static STD_CHROMA_DC_VALUES: [u8; 12] = [
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+];
+
+static STD_CHROMA_DC_HUFF_LUT: [(u8, u16); 256] =
+ build_huff_lut_const(&STD_CHROMA_DC_CODE_LENGTHS, &STD_CHROMA_DC_VALUES);
+
+// Code lengths and values for table k.5
+static STD_LUMA_AC_CODE_LENGTHS: [u8; 16] = [
+ 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D,
+];
+
+static STD_LUMA_AC_VALUES: [u8; 162] = [
+ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+ 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5,
+ 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2,
+ 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
+ 0xF9, 0xFA,
+];
+
+static STD_LUMA_AC_HUFF_LUT: [(u8, u16); 256] =
+ build_huff_lut_const(&STD_LUMA_AC_CODE_LENGTHS, &STD_LUMA_AC_VALUES);
+
+// Code lengths and values for table k.6
+static STD_CHROMA_AC_CODE_LENGTHS: [u8; 16] = [
+ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
+];
+static STD_CHROMA_AC_VALUES: [u8; 162] = [
+ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0,
+ 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26,
+ 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5,
+ 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3,
+ 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA,
+ 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8,
+ 0xF9, 0xFA,
+];
+
+static STD_CHROMA_AC_HUFF_LUT: [(u8, u16); 256] =
+ build_huff_lut_const(&STD_CHROMA_AC_CODE_LENGTHS, &STD_CHROMA_AC_VALUES);
+
+static DCCLASS: u8 = 0;
+static ACCLASS: u8 = 1;
+
+static LUMADESTINATION: u8 = 0;
+static CHROMADESTINATION: u8 = 1;
+
+static LUMAID: u8 = 1;
+static CHROMABLUEID: u8 = 2;
+static CHROMAREDID: u8 = 3;
+
+/// The permutation of dct coefficients.
+#[rustfmt::skip]
+static UNZIGZAG: [u8; 64] = [
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63,
+];
+
+/// A representation of a JPEG component
+#[derive(Copy, Clone)]
+struct Component {
+ /// The Component's identifier
+ id: u8,
+
+ /// Horizontal sampling factor
+ h: u8,
+
+ /// Vertical sampling factor
+ v: u8,
+
+ /// The quantization table selector
+ tq: u8,
+
+ /// Index to the Huffman DC Table
+ dc_table: u8,
+
+ /// Index to the AC Huffman Table
+ ac_table: u8,
+
+ /// The dc prediction of the component
+ _dc_pred: i32,
+}
+
+pub(crate) struct BitWriter<W> {
+ w: W,
+ accumulator: u32,
+ nbits: u8,
+}
+
+impl<W: Write> BitWriter<W> {
+ fn new(w: W) -> Self {
+ BitWriter {
+ w,
+ accumulator: 0,
+ nbits: 0,
+ }
+ }
+
+ fn write_bits(&mut self, bits: u16, size: u8) -> io::Result<()> {
+ if size == 0 {
+ return Ok(());
+ }
+
+ self.nbits += size;
+ self.accumulator |= u32::from(bits) << (32 - self.nbits) as usize;
+
+ while self.nbits >= 8 {
+ let byte = self.accumulator >> 24;
+ self.w.write_all(&[byte as u8])?;
+
+ if byte == 0xFF {
+ self.w.write_all(&[0x00])?;
+ }
+
+ self.nbits -= 8;
+ self.accumulator <<= 8;
+ }
+
+ Ok(())
+ }
+
+ fn pad_byte(&mut self) -> io::Result<()> {
+ self.write_bits(0x7F, 7)
+ }
+
+ fn huffman_encode(&mut self, val: u8, table: &[(u8, u16); 256]) -> io::Result<()> {
+ let (size, code) = table[val as usize];
+
+ if size > 16 {
+ panic!("bad huffman value");
+ }
+
+ self.write_bits(code, size)
+ }
+
+ fn write_block(
+ &mut self,
+ block: &[i32; 64],
+ prevdc: i32,
+ dctable: &[(u8, u16); 256],
+ actable: &[(u8, u16); 256],
+ ) -> io::Result<i32> {
+ // Differential DC encoding
+ let dcval = block[0];
+ let diff = dcval - prevdc;
+ let (size, value) = encode_coefficient(diff);
+
+ self.huffman_encode(size, dctable)?;
+ self.write_bits(value, size)?;
+
+ // Figure F.2
+ let mut zero_run = 0;
+
+ for &k in &UNZIGZAG[1..] {
+ if block[k as usize] == 0 {
+ zero_run += 1;
+ } else {
+ while zero_run > 15 {
+ self.huffman_encode(0xF0, actable)?;
+ zero_run -= 16;
+ }
+
+ let (size, value) = encode_coefficient(block[k as usize]);
+ let symbol = (zero_run << 4) | size;
+
+ self.huffman_encode(symbol, actable)?;
+ self.write_bits(value, size)?;
+
+ zero_run = 0;
+ }
+ }
+
+ if block[UNZIGZAG[63] as usize] == 0 {
+ self.huffman_encode(0x00, actable)?;
+ }
+
+ Ok(dcval)
+ }
+
+ fn write_marker(&mut self, marker: u8) -> io::Result<()> {
+ self.w.write_all(&[0xFF, marker])
+ }
+
+ fn write_segment(&mut self, marker: u8, data: &[u8]) -> io::Result<()> {
+ self.w.write_all(&[0xFF, marker])?;
+ self.w.write_all(&(data.len() as u16 + 2).to_be_bytes())?;
+ self.w.write_all(data)
+ }
+}
+
+/// Represents a unit in which the density of an image is measured
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum PixelDensityUnit {
+ /// Represents the absence of a unit, the values indicate only a
+ /// [pixel aspect ratio](https://en.wikipedia.org/wiki/Pixel_aspect_ratio)
+ PixelAspectRatio,
+
+ /// Pixels per inch (2.54 cm)
+ Inches,
+
+ /// Pixels per centimeter
+ Centimeters,
+}
+
+/// Represents the pixel density of an image
+///
+/// For example, a 300 DPI image is represented by:
+///
+/// ```rust
+/// use image::codecs::jpeg::*;
+/// let hdpi = PixelDensity::dpi(300);
+/// assert_eq!(hdpi, PixelDensity {density: (300,300), unit: PixelDensityUnit::Inches})
+/// ```
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub struct PixelDensity {
+ /// A couple of values for (Xdensity, Ydensity)
+ pub density: (u16, u16),
+ /// The unit in which the density is measured
+ pub unit: PixelDensityUnit,
+}
+
+impl PixelDensity {
+ /// Creates the most common pixel density type:
+ /// the horizontal and the vertical density are equal,
+ /// and measured in pixels per inch.
+ pub fn dpi(density: u16) -> Self {
+ PixelDensity {
+ density: (density, density),
+ unit: PixelDensityUnit::Inches,
+ }
+ }
+}
+
+impl Default for PixelDensity {
+ /// Returns a pixel density with a pixel aspect ratio of 1
+ fn default() -> Self {
+ PixelDensity {
+ density: (1, 1),
+ unit: PixelDensityUnit::PixelAspectRatio,
+ }
+ }
+}
+
+/// The representation of a JPEG encoder
+pub struct JpegEncoder<W> {
+ writer: BitWriter<W>,
+
+ components: Vec<Component>,
+ tables: Vec<[u8; 64]>,
+
+ luma_dctable: Cow<'static, [(u8, u16); 256]>,
+ luma_actable: Cow<'static, [(u8, u16); 256]>,
+ chroma_dctable: Cow<'static, [(u8, u16); 256]>,
+ chroma_actable: Cow<'static, [(u8, u16); 256]>,
+
+ pixel_density: PixelDensity,
+}
+
+impl<W: Write> JpegEncoder<W> {
+ /// Create a new encoder that writes its output to ```w```
+ pub fn new(w: W) -> JpegEncoder<W> {
+ JpegEncoder::new_with_quality(w, 75)
+ }
+
+ /// Create a new encoder that writes its output to ```w```, and has
+ /// the quality parameter ```quality``` with a value in the range 1-100
+ /// where 1 is the worst and 100 is the best.
+ pub fn new_with_quality(w: W, quality: u8) -> JpegEncoder<W> {
+ let components = vec![
+ Component {
+ id: LUMAID,
+ h: 1,
+ v: 1,
+ tq: LUMADESTINATION,
+ dc_table: LUMADESTINATION,
+ ac_table: LUMADESTINATION,
+ _dc_pred: 0,
+ },
+ Component {
+ id: CHROMABLUEID,
+ h: 1,
+ v: 1,
+ tq: CHROMADESTINATION,
+ dc_table: CHROMADESTINATION,
+ ac_table: CHROMADESTINATION,
+ _dc_pred: 0,
+ },
+ Component {
+ id: CHROMAREDID,
+ h: 1,
+ v: 1,
+ tq: CHROMADESTINATION,
+ dc_table: CHROMADESTINATION,
+ ac_table: CHROMADESTINATION,
+ _dc_pred: 0,
+ },
+ ];
+
+ // Derive our quantization table scaling value using the libjpeg algorithm
+ let scale = u32::from(clamp(quality, 1, 100));
+ let scale = if scale < 50 {
+ 5000 / scale
+ } else {
+ 200 - scale * 2
+ };
+
+ let mut tables = vec![STD_LUMA_QTABLE, STD_CHROMA_QTABLE];
+ tables.iter_mut().for_each(|t| {
+ t.iter_mut().for_each(|v| {
+ *v = clamp(
+ (u32::from(*v) * scale + 50) / 100,
+ 1,
+ u32::from(u8::max_value()),
+ ) as u8;
+ })
+ });
+
+ JpegEncoder {
+ writer: BitWriter::new(w),
+
+ components,
+ tables,
+
+ luma_dctable: Cow::Borrowed(&STD_LUMA_DC_HUFF_LUT),
+ luma_actable: Cow::Borrowed(&STD_LUMA_AC_HUFF_LUT),
+ chroma_dctable: Cow::Borrowed(&STD_CHROMA_DC_HUFF_LUT),
+ chroma_actable: Cow::Borrowed(&STD_CHROMA_AC_HUFF_LUT),
+
+ pixel_density: PixelDensity::default(),
+ }
+ }
+
+ /// Set the pixel density of the images the encoder will encode.
+ /// If this method is not called, then a default pixel aspect ratio of 1x1 will be applied,
+ /// and no DPI information will be stored in the image.
+ pub fn set_pixel_density(&mut self, pixel_density: PixelDensity) {
+ self.pixel_density = pixel_density;
+ }
+
+ /// Encodes the image stored in the raw byte buffer ```image```
+ /// that has dimensions ```width``` and ```height```
+ /// and ```ColorType``` ```c```
+ ///
+ /// The Image in encoded with subsampling ratio 4:2:2
+ pub fn encode(
+ &mut self,
+ image: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ match color_type {
+ ColorType::L8 => {
+ let image: ImageBuffer<Luma<_>, _> =
+ ImageBuffer::from_raw(width, height, image).unwrap();
+ self.encode_image(&image)
+ }
+ ColorType::La8 => {
+ let image: ImageBuffer<LumaA<_>, _> =
+ ImageBuffer::from_raw(width, height, image).unwrap();
+ self.encode_image(&image)
+ }
+ ColorType::Rgb8 => {
+ let image: ImageBuffer<Rgb<_>, _> =
+ ImageBuffer::from_raw(width, height, image).unwrap();
+ self.encode_image(&image)
+ }
+ ColorType::Rgba8 => {
+ let image: ImageBuffer<Rgba<_>, _> =
+ ImageBuffer::from_raw(width, height, image).unwrap();
+ self.encode_image(&image)
+ }
+ _ => Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Jpeg.into(),
+ UnsupportedErrorKind::Color(color_type.into()),
+ ),
+ )),
+ }
+ }
+
+ /// Encodes the given image.
+ ///
+ /// As a special feature this does not require the whole image to be present in memory at the
+ /// same time such that it may be computed on the fly, which is why this method exists on this
+ /// encoder but not on others. Instead the encoder will iterate over 8-by-8 blocks of pixels at
+ /// a time, inspecting each pixel exactly once. You can rely on this behaviour when calling
+ /// this method.
+ ///
+ /// The Image in encoded with subsampling ratio 4:2:2
+ pub fn encode_image<I: GenericImageView>(&mut self, image: &I) -> ImageResult<()>
+ where
+ I::Pixel: PixelWithColorType,
+ {
+ let n = I::Pixel::CHANNEL_COUNT;
+ let color_type = I::Pixel::COLOR_TYPE;
+ let num_components = if n == 1 || n == 2 { 1 } else { 3 };
+
+ self.writer.write_marker(SOI)?;
+
+ let mut buf = Vec::new();
+
+ build_jfif_header(&mut buf, self.pixel_density);
+ self.writer.write_segment(APP0, &buf)?;
+
+ build_frame_header(
+ &mut buf,
+ 8,
+ // TODO: not idiomatic yet. Should be an EncodingError and mention jpg. Further it
+ // should check dimensions prior to writing.
+ u16::try_from(image.width()).map_err(|_| {
+ ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ ))
+ })?,
+ u16::try_from(image.height()).map_err(|_| {
+ ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ ))
+ })?,
+ &self.components[..num_components],
+ );
+ self.writer.write_segment(SOF0, &buf)?;
+
+ assert_eq!(self.tables.len(), 2);
+ let numtables = if num_components == 1 { 1 } else { 2 };
+
+ for (i, table) in self.tables[..numtables].iter().enumerate() {
+ build_quantization_segment(&mut buf, 8, i as u8, table);
+ self.writer.write_segment(DQT, &buf)?;
+ }
+
+ build_huffman_segment(
+ &mut buf,
+ DCCLASS,
+ LUMADESTINATION,
+ &STD_LUMA_DC_CODE_LENGTHS,
+ &STD_LUMA_DC_VALUES,
+ );
+ self.writer.write_segment(DHT, &buf)?;
+
+ build_huffman_segment(
+ &mut buf,
+ ACCLASS,
+ LUMADESTINATION,
+ &STD_LUMA_AC_CODE_LENGTHS,
+ &STD_LUMA_AC_VALUES,
+ );
+ self.writer.write_segment(DHT, &buf)?;
+
+ if num_components == 3 {
+ build_huffman_segment(
+ &mut buf,
+ DCCLASS,
+ CHROMADESTINATION,
+ &STD_CHROMA_DC_CODE_LENGTHS,
+ &STD_CHROMA_DC_VALUES,
+ );
+ self.writer.write_segment(DHT, &buf)?;
+
+ build_huffman_segment(
+ &mut buf,
+ ACCLASS,
+ CHROMADESTINATION,
+ &STD_CHROMA_AC_CODE_LENGTHS,
+ &STD_CHROMA_AC_VALUES,
+ );
+ self.writer.write_segment(DHT, &buf)?;
+ }
+
+ build_scan_header(&mut buf, &self.components[..num_components]);
+ self.writer.write_segment(SOS, &buf)?;
+
+ if color_type.has_color() {
+ self.encode_rgb(image)
+ } else {
+ self.encode_gray(image)
+ }?;
+
+ self.writer.pad_byte()?;
+ self.writer.write_marker(EOI)?;
+ Ok(())
+ }
+
+ fn encode_gray<I: GenericImageView>(&mut self, image: &I) -> io::Result<()> {
+ let mut yblock = [0u8; 64];
+ let mut y_dcprev = 0;
+ let mut dct_yblock = [0i32; 64];
+
+ for y in (0..image.height()).step_by(8) {
+ for x in (0..image.width()).step_by(8) {
+ copy_blocks_gray(image, x, y, &mut yblock);
+
+ // Level shift and fdct
+ // Coeffs are scaled by 8
+ transform::fdct(&yblock, &mut dct_yblock);
+
+ // Quantization
+ for (i, dct) in dct_yblock.iter_mut().enumerate() {
+ *dct = ((*dct / 8) as f32 / f32::from(self.tables[0][i])).round() as i32;
+ }
+
+ let la = &*self.luma_actable;
+ let ld = &*self.luma_dctable;
+
+ y_dcprev = self.writer.write_block(&dct_yblock, y_dcprev, ld, la)?;
+ }
+ }
+
+ Ok(())
+ }
+
+ fn encode_rgb<I: GenericImageView>(&mut self, image: &I) -> io::Result<()> {
+ let mut y_dcprev = 0;
+ let mut cb_dcprev = 0;
+ let mut cr_dcprev = 0;
+
+ let mut dct_yblock = [0i32; 64];
+ let mut dct_cb_block = [0i32; 64];
+ let mut dct_cr_block = [0i32; 64];
+
+ let mut yblock = [0u8; 64];
+ let mut cb_block = [0u8; 64];
+ let mut cr_block = [0u8; 64];
+
+ for y in (0..image.height()).step_by(8) {
+ for x in (0..image.width()).step_by(8) {
+ // RGB -> YCbCr
+ copy_blocks_ycbcr(image, x, y, &mut yblock, &mut cb_block, &mut cr_block);
+
+ // Level shift and fdct
+ // Coeffs are scaled by 8
+ transform::fdct(&yblock, &mut dct_yblock);
+ transform::fdct(&cb_block, &mut dct_cb_block);
+ transform::fdct(&cr_block, &mut dct_cr_block);
+
+ // Quantization
+ for i in 0usize..64 {
+ dct_yblock[i] =
+ ((dct_yblock[i] / 8) as f32 / f32::from(self.tables[0][i])).round() as i32;
+ dct_cb_block[i] = ((dct_cb_block[i] / 8) as f32 / f32::from(self.tables[1][i]))
+ .round() as i32;
+ dct_cr_block[i] = ((dct_cr_block[i] / 8) as f32 / f32::from(self.tables[1][i]))
+ .round() as i32;
+ }
+
+ let la = &*self.luma_actable;
+ let ld = &*self.luma_dctable;
+ let cd = &*self.chroma_dctable;
+ let ca = &*self.chroma_actable;
+
+ y_dcprev = self.writer.write_block(&dct_yblock, y_dcprev, ld, la)?;
+ cb_dcprev = self.writer.write_block(&dct_cb_block, cb_dcprev, cd, ca)?;
+ cr_dcprev = self.writer.write_block(&dct_cr_block, cr_dcprev, cd, ca)?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl<W: Write> ImageEncoder for JpegEncoder<W> {
+ fn write_image(
+ mut self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ self.encode(buf, width, height, color_type)
+ }
+}
+
+fn build_jfif_header(m: &mut Vec<u8>, density: PixelDensity) {
+ m.clear();
+ m.extend_from_slice(b"JFIF");
+ m.extend_from_slice(&[
+ 0,
+ 0x01,
+ 0x02,
+ match density.unit {
+ PixelDensityUnit::PixelAspectRatio => 0x00,
+ PixelDensityUnit::Inches => 0x01,
+ PixelDensityUnit::Centimeters => 0x02,
+ },
+ ]);
+ m.extend_from_slice(&density.density.0.to_be_bytes());
+ m.extend_from_slice(&density.density.1.to_be_bytes());
+ m.extend_from_slice(&[0, 0]);
+}
+
+fn build_frame_header(
+ m: &mut Vec<u8>,
+ precision: u8,
+ width: u16,
+ height: u16,
+ components: &[Component],
+) {
+ m.clear();
+
+ m.push(precision);
+ m.extend_from_slice(&height.to_be_bytes());
+ m.extend_from_slice(&width.to_be_bytes());
+ m.push(components.len() as u8);
+
+ for &comp in components.iter() {
+ let hv = (comp.h << 4) | comp.v;
+ m.extend_from_slice(&[comp.id, hv, comp.tq]);
+ }
+}
+
+fn build_scan_header(m: &mut Vec<u8>, components: &[Component]) {
+ m.clear();
+
+ m.push(components.len() as u8);
+
+ for &comp in components.iter() {
+ let tables = (comp.dc_table << 4) | comp.ac_table;
+ m.extend_from_slice(&[comp.id, tables]);
+ }
+
+ // spectral start and end, approx. high and low
+ m.extend_from_slice(&[0, 63, 0]);
+}
+
+fn build_huffman_segment(
+ m: &mut Vec<u8>,
+ class: u8,
+ destination: u8,
+ numcodes: &[u8; 16],
+ values: &[u8],
+) {
+ m.clear();
+
+ let tcth = (class << 4) | destination;
+ m.push(tcth);
+
+ m.extend_from_slice(numcodes);
+
+ let sum: usize = numcodes.iter().map(|&x| x as usize).sum();
+
+ assert_eq!(sum, values.len());
+
+ m.extend_from_slice(values);
+}
+
+fn build_quantization_segment(m: &mut Vec<u8>, precision: u8, identifier: u8, qtable: &[u8; 64]) {
+ m.clear();
+
+ let p = if precision == 8 { 0 } else { 1 };
+
+ let pqtq = (p << 4) | identifier;
+ m.push(pqtq);
+
+ for &i in &UNZIGZAG[..] {
+ m.push(qtable[i as usize]);
+ }
+}
+
+fn encode_coefficient(coefficient: i32) -> (u8, u16) {
+ let mut magnitude = coefficient.unsigned_abs() as u16;
+ let mut num_bits = 0u8;
+
+ while magnitude > 0 {
+ magnitude >>= 1;
+ num_bits += 1;
+ }
+
+ let mask = (1 << num_bits as usize) - 1;
+
+ let val = if coefficient < 0 {
+ (coefficient - 1) as u16 & mask
+ } else {
+ coefficient as u16 & mask
+ };
+
+ (num_bits, val)
+}
+
+#[inline]
+fn rgb_to_ycbcr<P: Pixel>(pixel: P) -> (u8, u8, u8) {
+ use crate::traits::Primitive;
+ use num_traits::cast::ToPrimitive;
+
+ let [r, g, b] = pixel.to_rgb().0;
+ let max: f32 = P::Subpixel::DEFAULT_MAX_VALUE.to_f32().unwrap();
+ let r: f32 = r.to_f32().unwrap();
+ let g: f32 = g.to_f32().unwrap();
+ let b: f32 = b.to_f32().unwrap();
+
+ // Coefficients from JPEG File Interchange Format (Version 1.02), multiplied for 255 maximum.
+ let y = 76.245 / max * r + 149.685 / max * g + 29.07 / max * b;
+ let cb = -43.0185 / max * r - 84.4815 / max * g + 127.5 / max * b + 128.;
+ let cr = 127.5 / max * r - 106.7685 / max * g - 20.7315 / max * b + 128.;
+
+ (y as u8, cb as u8, cr as u8)
+}
+
+/// Returns the pixel at (x,y) if (x,y) is in the image,
+/// otherwise the closest pixel in the image
+#[inline]
+fn pixel_at_or_near<I: GenericImageView>(source: &I, x: u32, y: u32) -> I::Pixel {
+ if source.in_bounds(x, y) {
+ source.get_pixel(x, y)
+ } else {
+ source.get_pixel(x.min(source.width() - 1), y.min(source.height() - 1))
+ }
+}
+
+fn copy_blocks_ycbcr<I: GenericImageView>(
+ source: &I,
+ x0: u32,
+ y0: u32,
+ yb: &mut [u8; 64],
+ cbb: &mut [u8; 64],
+ crb: &mut [u8; 64],
+) {
+ for y in 0..8 {
+ for x in 0..8 {
+ let pixel = pixel_at_or_near(source, x + x0, y + y0);
+ let (yc, cb, cr) = rgb_to_ycbcr(pixel);
+
+ yb[(y * 8 + x) as usize] = yc;
+ cbb[(y * 8 + x) as usize] = cb;
+ crb[(y * 8 + x) as usize] = cr;
+ }
+ }
+}
+
+fn copy_blocks_gray<I: GenericImageView>(source: &I, x0: u32, y0: u32, gb: &mut [u8; 64]) {
+ use num_traits::cast::ToPrimitive;
+ for y in 0..8 {
+ for x in 0..8 {
+ let pixel = pixel_at_or_near(source, x0 + x, y0 + y);
+ let [luma] = pixel.to_luma().0;
+ gb[(y * 8 + x) as usize] = luma.to_u8().unwrap();
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::io::Cursor;
+
+ #[cfg(feature = "benchmarks")]
+ extern crate test;
+ #[cfg(feature = "benchmarks")]
+ use test::Bencher;
+
+ use crate::color::ColorType;
+ use crate::error::ParameterErrorKind::DimensionMismatch;
+ use crate::image::ImageDecoder;
+ use crate::{ImageEncoder, ImageError};
+
+ use super::super::JpegDecoder;
+ use super::{
+ build_frame_header, build_huffman_segment, build_jfif_header, build_quantization_segment,
+ build_scan_header, Component, JpegEncoder, PixelDensity, DCCLASS, LUMADESTINATION,
+ STD_LUMA_DC_CODE_LENGTHS, STD_LUMA_DC_VALUES,
+ };
+
+ fn decode(encoded: &[u8]) -> Vec<u8> {
+ let decoder = JpegDecoder::new(Cursor::new(encoded)).expect("Could not decode image");
+
+ let mut decoded = vec![0; decoder.total_bytes() as usize];
+ decoder
+ .read_image(&mut decoded)
+ .expect("Could not decode image");
+ decoded
+ }
+
+ #[test]
+ fn roundtrip_sanity_check() {
+ // create a 1x1 8-bit image buffer containing a single red pixel
+ let img = [255u8, 0, 0];
+
+ // encode it into a memory buffer
+ let mut encoded_img = Vec::new();
+ {
+ let encoder = JpegEncoder::new_with_quality(&mut encoded_img, 100);
+ encoder
+ .write_image(&img, 1, 1, ColorType::Rgb8)
+ .expect("Could not encode image");
+ }
+
+ // decode it from the memory buffer
+ {
+ let decoded = decode(&encoded_img);
+ // note that, even with the encode quality set to 100, we do not get the same image
+ // back. Therefore, we're going to assert that it's at least red-ish:
+ assert_eq!(3, decoded.len());
+ assert!(decoded[0] > 0x80);
+ assert!(decoded[1] < 0x80);
+ assert!(decoded[2] < 0x80);
+ }
+ }
+
+ #[test]
+ fn grayscale_roundtrip_sanity_check() {
+ // create a 2x2 8-bit image buffer containing a white diagonal
+ let img = [255u8, 0, 0, 255];
+
+ // encode it into a memory buffer
+ let mut encoded_img = Vec::new();
+ {
+ let encoder = JpegEncoder::new_with_quality(&mut encoded_img, 100);
+ encoder
+ .write_image(&img[..], 2, 2, ColorType::L8)
+ .expect("Could not encode image");
+ }
+
+ // decode it from the memory buffer
+ {
+ let decoded = decode(&encoded_img);
+ // note that, even with the encode quality set to 100, we do not get the same image
+ // back. Therefore, we're going to assert that the diagonal is at least white-ish:
+ assert_eq!(4, decoded.len());
+ assert!(decoded[0] > 0x80);
+ assert!(decoded[1] < 0x80);
+ assert!(decoded[2] < 0x80);
+ assert!(decoded[3] > 0x80);
+ }
+ }
+
+ #[test]
+ fn jfif_header_density_check() {
+ let mut buffer = Vec::new();
+ build_jfif_header(&mut buffer, PixelDensity::dpi(300));
+ assert_eq!(
+ buffer,
+ vec![
+ b'J',
+ b'F',
+ b'I',
+ b'F',
+ 0,
+ 1,
+ 2, // JFIF version 1.2
+ 1, // density is in dpi
+ 300u16.to_be_bytes()[0],
+ 300u16.to_be_bytes()[1],
+ 300u16.to_be_bytes()[0],
+ 300u16.to_be_bytes()[1],
+ 0,
+ 0, // No thumbnail
+ ]
+ );
+ }
+
+ #[test]
+ fn test_image_too_large() {
+ // JPEG cannot encode images larger than 65,535×65,535
+ // create a 65,536×1 8-bit black image buffer
+ let img = [0; 65_536];
+ // Try to encode an image that is too large
+ let mut encoded = Vec::new();
+ let encoder = JpegEncoder::new_with_quality(&mut encoded, 100);
+ let result = encoder.write_image(&img, 65_536, 1, ColorType::L8);
+ match result {
+ Err(ImageError::Parameter(err)) => {
+ assert_eq!(err.kind(), DimensionMismatch)
+ }
+ other => {
+ assert!(
+ false,
+ "Encoding an image that is too large should return a DimensionError \
+ it returned {:?} instead",
+ other
+ )
+ }
+ }
+ }
+
+ #[test]
+ fn test_build_jfif_header() {
+ let mut buf = vec![];
+ let density = PixelDensity::dpi(100);
+ build_jfif_header(&mut buf, density);
+ assert_eq!(
+ buf,
+ [0x4A, 0x46, 0x49, 0x46, 0x00, 0x01, 0x02, 0x01, 0, 100, 0, 100, 0, 0]
+ );
+ }
+
+ #[test]
+ fn test_build_frame_header() {
+ let mut buf = vec![];
+ let components = vec![
+ Component {
+ id: 1,
+ h: 1,
+ v: 1,
+ tq: 5,
+ dc_table: 5,
+ ac_table: 5,
+ _dc_pred: 0,
+ },
+ Component {
+ id: 2,
+ h: 1,
+ v: 1,
+ tq: 4,
+ dc_table: 4,
+ ac_table: 4,
+ _dc_pred: 0,
+ },
+ ];
+ build_frame_header(&mut buf, 5, 100, 150, &components);
+ assert_eq!(
+ buf,
+ [5, 0, 150, 0, 100, 2, 1, 1 << 4 | 1, 5, 2, 1 << 4 | 1, 4]
+ );
+ }
+
+ #[test]
+ fn test_build_scan_header() {
+ let mut buf = vec![];
+ let components = vec![
+ Component {
+ id: 1,
+ h: 1,
+ v: 1,
+ tq: 5,
+ dc_table: 5,
+ ac_table: 5,
+ _dc_pred: 0,
+ },
+ Component {
+ id: 2,
+ h: 1,
+ v: 1,
+ tq: 4,
+ dc_table: 4,
+ ac_table: 4,
+ _dc_pred: 0,
+ },
+ ];
+ build_scan_header(&mut buf, &components);
+ assert_eq!(buf, [2, 1, 5 << 4 | 5, 2, 4 << 4 | 4, 0, 63, 0]);
+ }
+
+ #[test]
+ fn test_build_huffman_segment() {
+ let mut buf = vec![];
+ build_huffman_segment(
+ &mut buf,
+ DCCLASS,
+ LUMADESTINATION,
+ &STD_LUMA_DC_CODE_LENGTHS,
+ &STD_LUMA_DC_VALUES,
+ );
+ assert_eq!(
+ buf,
+ vec![
+ 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11
+ ]
+ );
+ }
+
+ #[test]
+ fn test_build_quantization_segment() {
+ let mut buf = vec![];
+ let qtable = [0u8; 64];
+ build_quantization_segment(&mut buf, 8, 1, &qtable);
+ let mut expected = vec![];
+ expected.push(0 << 4 | 1);
+ expected.extend_from_slice(&[0; 64]);
+ assert_eq!(buf, expected)
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_jpeg_encoder_new(b: &mut Bencher) {
+ b.iter(|| {
+ let mut y = vec![];
+ let x = JpegEncoder::new(&mut y);
+ })
+ }
+}
diff --git a/vendor/image/src/codecs/jpeg/entropy.rs b/vendor/image/src/codecs/jpeg/entropy.rs
new file mode 100644
index 0000000..5bdcef6
--- /dev/null
+++ b/vendor/image/src/codecs/jpeg/entropy.rs
@@ -0,0 +1,63 @@
+/// Given an array containing the number of codes of each code length,
+/// this function generates the huffman codes lengths and their respective
+/// code lengths as specified by the JPEG spec.
+const fn derive_codes_and_sizes(bits: &[u8; 16]) -> ([u8; 256], [u16; 256]) {
+ let mut huffsize = [0u8; 256];
+ let mut huffcode = [0u16; 256];
+
+ let mut k = 0;
+
+ // Annex C.2
+ // Figure C.1
+ // Generate table of individual code lengths
+ let mut i = 0;
+ while i < 16 {
+ let mut j = 0;
+ while j < bits[i as usize] {
+ huffsize[k] = i + 1;
+ k += 1;
+ j += 1;
+ }
+ i += 1;
+ }
+
+ huffsize[k] = 0;
+
+ // Annex C.2
+ // Figure C.2
+ // Generate table of huffman codes
+ k = 0;
+ let mut code = 0u16;
+ let mut size = huffsize[0];
+
+ while huffsize[k] != 0 {
+ huffcode[k] = code;
+ code += 1;
+ k += 1;
+
+ if huffsize[k] == size {
+ continue;
+ }
+
+ // FIXME there is something wrong with this code
+ let diff = huffsize[k].wrapping_sub(size);
+ code = if diff < 16 { code << diff as usize } else { 0 };
+
+ size = size.wrapping_add(diff);
+ }
+
+ (huffsize, huffcode)
+}
+
+pub(crate) const fn build_huff_lut_const(bits: &[u8; 16], huffval: &[u8]) -> [(u8, u16); 256] {
+ let mut lut = [(17u8, 0u16); 256];
+ let (huffsize, huffcode) = derive_codes_and_sizes(bits);
+
+ let mut i = 0;
+ while i < huffval.len() {
+ lut[huffval[i] as usize] = (huffsize[i], huffcode[i]);
+ i += 1;
+ }
+
+ lut
+}
diff --git a/vendor/image/src/codecs/jpeg/mod.rs b/vendor/image/src/codecs/jpeg/mod.rs
new file mode 100644
index 0000000..4242733
--- /dev/null
+++ b/vendor/image/src/codecs/jpeg/mod.rs
@@ -0,0 +1,16 @@
+//! Decoding and Encoding of JPEG Images
+//!
+//! JPEG (Joint Photographic Experts Group) is an image format that supports lossy compression.
+//! This module implements the Baseline JPEG standard.
+//!
+//! # Related Links
+//! * <http://www.w3.org/Graphics/JPEG/itu-t81.pdf> - The JPEG specification
+//!
+
+pub use self::decoder::JpegDecoder;
+pub use self::encoder::{JpegEncoder, PixelDensity, PixelDensityUnit};
+
+mod decoder;
+mod encoder;
+mod entropy;
+mod transform;
diff --git a/vendor/image/src/codecs/jpeg/transform.rs b/vendor/image/src/codecs/jpeg/transform.rs
new file mode 100644
index 0000000..1ca01a9
--- /dev/null
+++ b/vendor/image/src/codecs/jpeg/transform.rs
@@ -0,0 +1,196 @@
+/*
+fdct is a Rust translation of jfdctint.c from the
+Independent JPEG Group's libjpeg version 9a
+obtained from http://www.ijg.org/files/jpegsr9a.zip
+It comes with the following conditions of distribution and use:
+
+ In plain English:
+
+ 1. We don't promise that this software works. (But if you find any bugs,
+ please let us know!)
+ 2. You can use this software for whatever you want. You don't have to pay us.
+ 3. You may not pretend that you wrote this software. If you use it in a
+ program, you must acknowledge somewhere in your documentation that
+ you've used the IJG code.
+
+ In legalese:
+
+ The authors make NO WARRANTY or representation, either express or implied,
+ with respect to this software, its quality, accuracy, merchantability, or
+ fitness for a particular purpose. This software is provided "AS IS", and you,
+ its user, assume the entire risk as to its quality and accuracy.
+
+ This software is copyright (C) 1991-2014, Thomas G. Lane, Guido Vollbeding.
+ All Rights Reserved except as specified below.
+
+ Permission is hereby granted to use, copy, modify, and distribute this
+ software (or portions thereof) for any purpose, without fee, subject to these
+ conditions:
+ (1) If any part of the source code for this software is distributed, then this
+ README file must be included, with this copyright and no-warranty notice
+ unaltered; and any additions, deletions, or changes to the original files
+ must be clearly indicated in accompanying documentation.
+ (2) If only executable code is distributed, then the accompanying
+ documentation must state that "this software is based in part on the work of
+ the Independent JPEG Group".
+ (3) Permission for use of this software is granted only if the user accepts
+ full responsibility for any undesirable consequences; the authors accept
+ NO LIABILITY for damages of any kind.
+
+ These conditions apply to any software derived from or based on the IJG code,
+ not just to the unmodified library. If you use our work, you ought to
+ acknowledge us.
+
+ Permission is NOT granted for the use of any IJG author's name or company name
+ in advertising or publicity relating to this software or products derived from
+ it. This software may be referred to only as "the Independent JPEG Group's
+ software".
+
+ We specifically permit and encourage the use of this software as the basis of
+ commercial products, provided that all warranty or liability claims are
+ assumed by the product vendor.
+*/
+
+static CONST_BITS: i32 = 13;
+static PASS1_BITS: i32 = 2;
+
+static FIX_0_298631336: i32 = 2446;
+static FIX_0_390180644: i32 = 3196;
+static FIX_0_541196100: i32 = 4433;
+static FIX_0_765366865: i32 = 6270;
+static FIX_0_899976223: i32 = 7373;
+static FIX_1_175875602: i32 = 9633;
+static FIX_1_501321110: i32 = 12_299;
+static FIX_1_847759065: i32 = 15_137;
+static FIX_1_961570560: i32 = 16_069;
+static FIX_2_053119869: i32 = 16_819;
+static FIX_2_562915447: i32 = 20_995;
+static FIX_3_072711026: i32 = 25_172;
+
+pub(crate) fn fdct(samples: &[u8; 64], coeffs: &mut [i32; 64]) {
+ // Pass 1: process rows.
+ // Results are scaled by sqrt(8) compared to a true DCT
+ // furthermore we scale the results by 2**PASS1_BITS
+ for y in 0usize..8 {
+ let y0 = y * 8;
+
+ // Even part
+ let t0 = i32::from(samples[y0]) + i32::from(samples[y0 + 7]);
+ let t1 = i32::from(samples[y0 + 1]) + i32::from(samples[y0 + 6]);
+ let t2 = i32::from(samples[y0 + 2]) + i32::from(samples[y0 + 5]);
+ let t3 = i32::from(samples[y0 + 3]) + i32::from(samples[y0 + 4]);
+
+ let t10 = t0 + t3;
+ let t12 = t0 - t3;
+ let t11 = t1 + t2;
+ let t13 = t1 - t2;
+
+ let t0 = i32::from(samples[y0]) - i32::from(samples[y0 + 7]);
+ let t1 = i32::from(samples[y0 + 1]) - i32::from(samples[y0 + 6]);
+ let t2 = i32::from(samples[y0 + 2]) - i32::from(samples[y0 + 5]);
+ let t3 = i32::from(samples[y0 + 3]) - i32::from(samples[y0 + 4]);
+
+ // Apply unsigned -> signed conversion
+ coeffs[y0] = (t10 + t11 - 8 * 128) << PASS1_BITS as usize;
+ coeffs[y0 + 4] = (t10 - t11) << PASS1_BITS as usize;
+
+ let mut z1 = (t12 + t13) * FIX_0_541196100;
+ // Add fudge factor here for final descale
+ z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as usize;
+
+ coeffs[y0 + 2] = (z1 + t12 * FIX_0_765366865) >> (CONST_BITS - PASS1_BITS) as usize;
+ coeffs[y0 + 6] = (z1 - t13 * FIX_1_847759065) >> (CONST_BITS - PASS1_BITS) as usize;
+
+ // Odd part
+ let t12 = t0 + t2;
+ let t13 = t1 + t3;
+
+ let mut z1 = (t12 + t13) * FIX_1_175875602;
+ // Add fudge factor here for final descale
+ z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as usize;
+
+ let mut t12 = t12 * (-FIX_0_390180644);
+ let mut t13 = t13 * (-FIX_1_961570560);
+ t12 += z1;
+ t13 += z1;
+
+ let z1 = (t0 + t3) * (-FIX_0_899976223);
+ let mut t0 = t0 * FIX_1_501321110;
+ let mut t3 = t3 * FIX_0_298631336;
+ t0 += z1 + t12;
+ t3 += z1 + t13;
+
+ let z1 = (t1 + t2) * (-FIX_2_562915447);
+ let mut t1 = t1 * FIX_3_072711026;
+ let mut t2 = t2 * FIX_2_053119869;
+ t1 += z1 + t13;
+ t2 += z1 + t12;
+
+ coeffs[y0 + 1] = t0 >> (CONST_BITS - PASS1_BITS) as usize;
+ coeffs[y0 + 3] = t1 >> (CONST_BITS - PASS1_BITS) as usize;
+ coeffs[y0 + 5] = t2 >> (CONST_BITS - PASS1_BITS) as usize;
+ coeffs[y0 + 7] = t3 >> (CONST_BITS - PASS1_BITS) as usize;
+ }
+
+ // Pass 2: process columns
+ // We remove the PASS1_BITS scaling but leave the results scaled up an
+ // overall factor of 8
+ for x in (0usize..8).rev() {
+ // Even part
+ let t0 = coeffs[x] + coeffs[x + 8 * 7];
+ let t1 = coeffs[x + 8] + coeffs[x + 8 * 6];
+ let t2 = coeffs[x + 8 * 2] + coeffs[x + 8 * 5];
+ let t3 = coeffs[x + 8 * 3] + coeffs[x + 8 * 4];
+
+ // Add fudge factor here for final descale
+ let t10 = t0 + t3 + (1 << (PASS1_BITS - 1) as usize);
+ let t12 = t0 - t3;
+ let t11 = t1 + t2;
+ let t13 = t1 - t2;
+
+ let t0 = coeffs[x] - coeffs[x + 8 * 7];
+ let t1 = coeffs[x + 8] - coeffs[x + 8 * 6];
+ let t2 = coeffs[x + 8 * 2] - coeffs[x + 8 * 5];
+ let t3 = coeffs[x + 8 * 3] - coeffs[x + 8 * 4];
+
+ coeffs[x] = (t10 + t11) >> PASS1_BITS as usize;
+ coeffs[x + 8 * 4] = (t10 - t11) >> PASS1_BITS as usize;
+
+ let mut z1 = (t12 + t13) * FIX_0_541196100;
+ // Add fudge factor here for final descale
+ z1 += 1 << (CONST_BITS + PASS1_BITS - 1) as usize;
+
+ coeffs[x + 8 * 2] = (z1 + t12 * FIX_0_765366865) >> (CONST_BITS + PASS1_BITS) as usize;
+ coeffs[x + 8 * 6] = (z1 - t13 * FIX_1_847759065) >> (CONST_BITS + PASS1_BITS) as usize;
+
+ // Odd part
+ let t12 = t0 + t2;
+ let t13 = t1 + t3;
+
+ let mut z1 = (t12 + t13) * FIX_1_175875602;
+ // Add fudge factor here for final descale
+ z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as usize;
+
+ let mut t12 = t12 * (-FIX_0_390180644);
+ let mut t13 = t13 * (-FIX_1_961570560);
+ t12 += z1;
+ t13 += z1;
+
+ let z1 = (t0 + t3) * (-FIX_0_899976223);
+ let mut t0 = t0 * FIX_1_501321110;
+ let mut t3 = t3 * FIX_0_298631336;
+ t0 += z1 + t12;
+ t3 += z1 + t13;
+
+ let z1 = (t1 + t2) * (-FIX_2_562915447);
+ let mut t1 = t1 * FIX_3_072711026;
+ let mut t2 = t2 * FIX_2_053119869;
+ t1 += z1 + t13;
+ t2 += z1 + t12;
+
+ coeffs[x + 8] = t0 >> (CONST_BITS + PASS1_BITS) as usize;
+ coeffs[x + 8 * 3] = t1 >> (CONST_BITS + PASS1_BITS) as usize;
+ coeffs[x + 8 * 5] = t2 >> (CONST_BITS + PASS1_BITS) as usize;
+ coeffs[x + 8 * 7] = t3 >> (CONST_BITS + PASS1_BITS) as usize;
+ }
+}
diff --git a/vendor/image/src/codecs/openexr.rs b/vendor/image/src/codecs/openexr.rs
new file mode 100644
index 0000000..52d6ba9
--- /dev/null
+++ b/vendor/image/src/codecs/openexr.rs
@@ -0,0 +1,592 @@
+//! Decoding of OpenEXR (.exr) Images
+//!
+//! OpenEXR is an image format that is widely used, especially in VFX,
+//! because it supports lossless and lossy compression for float data.
+//!
+//! This decoder only supports RGB and RGBA images.
+//! If an image does not contain alpha information,
+//! it is defaulted to `1.0` (no transparency).
+//!
+//! # Related Links
+//! * <https://www.openexr.com/documentation.html> - The OpenEXR reference.
+//!
+//!
+//! Current limitations (July 2021):
+//! - only pixel type `Rgba32F` and `Rgba16F` are supported
+//! - only non-deep rgb/rgba files supported, no conversion from/to YCbCr or similar
+//! - only the first non-deep rgb layer is used
+//! - only the largest mip map level is used
+//! - pixels outside display window are lost
+//! - meta data is lost
+//! - dwaa/dwab compressed images not supported yet by the exr library
+//! - (chroma) subsampling not supported yet by the exr library
+use exr::prelude::*;
+
+use crate::error::{DecodingError, EncodingError, ImageFormatHint};
+use crate::image::decoder_to_vec;
+use crate::{
+ ColorType, ExtendedColorType, ImageDecoder, ImageEncoder, ImageError, ImageFormat, ImageResult,
+ Progress,
+};
+use std::convert::TryInto;
+use std::io::{Cursor, Read, Seek, Write};
+
+/// An OpenEXR decoder. Immediately reads the meta data from the file.
+#[derive(Debug)]
+pub struct OpenExrDecoder<R> {
+ exr_reader: exr::block::reader::Reader<R>,
+
+ // select a header that is rgb and not deep
+ header_index: usize,
+
+ // decode either rgb or rgba.
+ // can be specified to include or discard alpha channels.
+ // if none, the alpha channel will only be allocated where the file contains data for it.
+ alpha_preference: Option<bool>,
+
+ alpha_present_in_file: bool,
+}
+
+impl<R: Read + Seek> OpenExrDecoder<R> {
+ /// Create a decoder. Consumes the first few bytes of the source to extract image dimensions.
+ /// Assumes the reader is buffered. In most cases,
+ /// you should wrap your reader in a `BufReader` for best performance.
+ /// Loads an alpha channel if the file has alpha samples.
+ /// Use `with_alpha_preference` if you want to load or not load alpha unconditionally.
+ pub fn new(source: R) -> ImageResult<Self> {
+ Self::with_alpha_preference(source, None)
+ }
+
+ /// Create a decoder. Consumes the first few bytes of the source to extract image dimensions.
+ /// Assumes the reader is buffered. In most cases,
+ /// you should wrap your reader in a `BufReader` for best performance.
+ /// If alpha preference is specified, an alpha channel will
+ /// always be present or always be not present in the returned image.
+ /// If alpha preference is none, the alpha channel will only be returned if it is found in the file.
+ pub fn with_alpha_preference(source: R, alpha_preference: Option<bool>) -> ImageResult<Self> {
+ // read meta data, then wait for further instructions, keeping the file open and ready
+ let exr_reader = exr::block::read(source, false).map_err(to_image_err)?;
+
+ let header_index = exr_reader
+ .headers()
+ .iter()
+ .position(|header| {
+ // check if r/g/b exists in the channels
+ let has_rgb = ["R", "G", "B"]
+ .iter()
+ .all(|&required| // alpha will be optional
+ header.channels.find_index_of_channel(&Text::from(required)).is_some());
+
+ // we currently dont support deep images, or images with other color spaces than rgb
+ !header.deep && has_rgb
+ })
+ .ok_or_else(|| {
+ ImageError::Decoding(DecodingError::new(
+ ImageFormatHint::Exact(ImageFormat::OpenExr),
+ "image does not contain non-deep rgb channels",
+ ))
+ })?;
+
+ let has_alpha = exr_reader.headers()[header_index]
+ .channels
+ .find_index_of_channel(&Text::from("A"))
+ .is_some();
+
+ Ok(Self {
+ alpha_preference,
+ exr_reader,
+ header_index,
+ alpha_present_in_file: has_alpha,
+ })
+ }
+
+ // does not leak exrs-specific meta data into public api, just does it for this module
+ fn selected_exr_header(&self) -> &exr::meta::header::Header {
+ &self.exr_reader.meta_data().headers[self.header_index]
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for OpenExrDecoder<R> {
+ type Reader = Cursor<Vec<u8>>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ let size = self
+ .selected_exr_header()
+ .shared_attributes
+ .display_window
+ .size;
+ (size.width() as u32, size.height() as u32)
+ }
+
+ fn color_type(&self) -> ColorType {
+ let returns_alpha = self.alpha_preference.unwrap_or(self.alpha_present_in_file);
+ if returns_alpha {
+ ColorType::Rgba32F
+ } else {
+ ColorType::Rgb32F
+ }
+ }
+
+ fn original_color_type(&self) -> ExtendedColorType {
+ if self.alpha_present_in_file {
+ ExtendedColorType::Rgba32F
+ } else {
+ ExtendedColorType::Rgb32F
+ }
+ }
+
+ /// Use `read_image` instead if possible,
+ /// as this method creates a whole new buffer just to contain the entire image.
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(Cursor::new(decoder_to_vec(self)?))
+ }
+
+ fn scanline_bytes(&self) -> u64 {
+ // we cannot always read individual scan lines for every file,
+ // as the tiles or lines in the file could be in random or reversed order.
+ // therefore we currently read all lines at once
+ // Todo: optimize for specific exr.line_order?
+ self.total_bytes()
+ }
+
+ // reads with or without alpha, depending on `self.alpha_preference` and `self.alpha_present_in_file`
+ fn read_image_with_progress<F: Fn(Progress)>(
+ self,
+ unaligned_bytes: &mut [u8],
+ progress_callback: F,
+ ) -> ImageResult<()> {
+ let blocks_in_header = self.selected_exr_header().chunk_count as u64;
+ let channel_count = self.color_type().channel_count() as usize;
+
+ let display_window = self.selected_exr_header().shared_attributes.display_window;
+ let data_window_offset =
+ self.selected_exr_header().own_attributes.layer_position - display_window.position;
+
+ {
+ // check whether the buffer is large enough for the dimensions of the file
+ let (width, height) = self.dimensions();
+ let bytes_per_pixel = self.color_type().bytes_per_pixel() as usize;
+ let expected_byte_count = (width as usize)
+ .checked_mul(height as usize)
+ .and_then(|size| size.checked_mul(bytes_per_pixel));
+
+ // if the width and height does not match the length of the bytes, the arguments are invalid
+ let has_invalid_size_or_overflowed = expected_byte_count
+ .map(|expected_byte_count| unaligned_bytes.len() != expected_byte_count)
+ // otherwise, size calculation overflowed, is bigger than memory,
+ // therefore data is too small, so it is invalid.
+ .unwrap_or(true);
+
+ if has_invalid_size_or_overflowed {
+ panic!("byte buffer not large enough for the specified dimensions and f32 pixels");
+ }
+ }
+
+ let result = read()
+ .no_deep_data()
+ .largest_resolution_level()
+ .rgba_channels(
+ move |_size, _channels| vec![0_f32; display_window.size.area() * channel_count],
+ move |buffer, index_in_data_window, (r, g, b, a_or_1): (f32, f32, f32, f32)| {
+ let index_in_display_window =
+ index_in_data_window.to_i32() + data_window_offset;
+
+ // only keep pixels inside the data window
+ // TODO filter chunks based on this
+ if index_in_display_window.x() >= 0
+ && index_in_display_window.y() >= 0
+ && index_in_display_window.x() < display_window.size.width() as i32
+ && index_in_display_window.y() < display_window.size.height() as i32
+ {
+ let index_in_display_window =
+ index_in_display_window.to_usize("index bug").unwrap();
+ let first_f32_index =
+ index_in_display_window.flat_index_for_size(display_window.size);
+
+ buffer[first_f32_index * channel_count
+ ..(first_f32_index + 1) * channel_count]
+ .copy_from_slice(&[r, g, b, a_or_1][0..channel_count]);
+
+ // TODO white point chromaticities + srgb/linear conversion?
+ }
+ },
+ )
+ .first_valid_layer() // TODO select exact layer by self.header_index?
+ .all_attributes()
+ .on_progress(|progress| {
+ progress_callback(
+ Progress::new(
+ (progress * blocks_in_header as f64) as u64,
+ blocks_in_header,
+ ), // TODO precision errors?
+ );
+ })
+ .from_chunks(self.exr_reader)
+ .map_err(to_image_err)?;
+
+ // TODO this copy is strictly not necessary, but the exr api is a little too simple for reading into a borrowed target slice
+
+ // this cast is safe and works with any alignment, as bytes are copied, and not f32 values.
+ // note: buffer slice length is checked in the beginning of this function and will be correct at this point
+ unaligned_bytes.copy_from_slice(bytemuck::cast_slice(
+ result.layer_data.channel_data.pixels.as_slice(),
+ ));
+ Ok(())
+ }
+}
+
+/// Write a raw byte buffer of pixels,
+/// returning an Error if it has an invalid length.
+///
+/// Assumes the writer is buffered. In most cases,
+/// you should wrap your writer in a `BufWriter` for best performance.
+// private. access via `OpenExrEncoder`
+fn write_buffer(
+ mut buffered_write: impl Write + Seek,
+ unaligned_bytes: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+) -> ImageResult<()> {
+ let width = width as usize;
+ let height = height as usize;
+
+ {
+ // check whether the buffer is large enough for the specified dimensions
+ let expected_byte_count = width
+ .checked_mul(height)
+ .and_then(|size| size.checked_mul(color_type.bytes_per_pixel() as usize));
+
+ // if the width and height does not match the length of the bytes, the arguments are invalid
+ let has_invalid_size_or_overflowed = expected_byte_count
+ .map(|expected_byte_count| unaligned_bytes.len() < expected_byte_count)
+ // otherwise, size calculation overflowed, is bigger than memory,
+ // therefore data is too small, so it is invalid.
+ .unwrap_or(true);
+
+ if has_invalid_size_or_overflowed {
+ return Err(ImageError::Encoding(EncodingError::new(
+ ImageFormatHint::Exact(ImageFormat::OpenExr),
+ "byte buffer not large enough for the specified dimensions and f32 pixels",
+ )));
+ }
+ }
+
+ // bytes might be unaligned so we cannot cast the whole thing, instead lookup each f32 individually
+ let lookup_f32 = move |f32_index: usize| {
+ let unaligned_f32_bytes_slice = &unaligned_bytes[f32_index * 4..(f32_index + 1) * 4];
+ let f32_bytes_array = unaligned_f32_bytes_slice
+ .try_into()
+ .expect("indexing error");
+ f32::from_ne_bytes(f32_bytes_array)
+ };
+
+ match color_type {
+ ColorType::Rgb32F => {
+ exr::prelude::Image // TODO compression method zip??
+ ::from_channels(
+ (width, height),
+ SpecificChannels::rgb(|pixel: Vec2<usize>| {
+ let pixel_index = 3 * pixel.flat_index_for_size(Vec2(width, height));
+ (
+ lookup_f32(pixel_index),
+ lookup_f32(pixel_index + 1),
+ lookup_f32(pixel_index + 2),
+ )
+ }),
+ )
+ .write()
+ // .on_progress(|progress| todo!())
+ .to_buffered(&mut buffered_write)
+ .map_err(to_image_err)?;
+ }
+
+ ColorType::Rgba32F => {
+ exr::prelude::Image // TODO compression method zip??
+ ::from_channels(
+ (width, height),
+ SpecificChannels::rgba(|pixel: Vec2<usize>| {
+ let pixel_index = 4 * pixel.flat_index_for_size(Vec2(width, height));
+ (
+ lookup_f32(pixel_index),
+ lookup_f32(pixel_index + 1),
+ lookup_f32(pixel_index + 2),
+ lookup_f32(pixel_index + 3),
+ )
+ }),
+ )
+ .write()
+ // .on_progress(|progress| todo!())
+ .to_buffered(&mut buffered_write)
+ .map_err(to_image_err)?;
+ }
+
+ // TODO other color types and channel types
+ unsupported_color_type => {
+ return Err(ImageError::Encoding(EncodingError::new(
+ ImageFormatHint::Exact(ImageFormat::OpenExr),
+ format!(
+ "writing color type {:?} not yet supported",
+ unsupported_color_type
+ ),
+ )))
+ }
+ }
+
+ Ok(())
+}
+
+// TODO is this struct and trait actually used anywhere?
+/// A thin wrapper that implements `ImageEncoder` for OpenEXR images. Will behave like `image::codecs::openexr::write_buffer`.
+#[derive(Debug)]
+pub struct OpenExrEncoder<W>(W);
+
+impl<W> OpenExrEncoder<W> {
+ /// Create an `ImageEncoder`. Does not write anything yet. Writing later will behave like `image::codecs::openexr::write_buffer`.
+ // use constructor, not public field, for future backwards-compatibility
+ pub fn new(write: W) -> Self {
+ Self(write)
+ }
+}
+
+impl<W> ImageEncoder for OpenExrEncoder<W>
+where
+ W: Write + Seek,
+{
+ /// Writes the complete image.
+ ///
+ /// Returns an Error if it has an invalid length.
+ /// Assumes the writer is buffered. In most cases,
+ /// you should wrap your writer in a `BufWriter` for best performance.
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ write_buffer(self.0, buf, width, height, color_type)
+ }
+}
+
+fn to_image_err(exr_error: Error) -> ImageError {
+ ImageError::Decoding(DecodingError::new(
+ ImageFormatHint::Exact(ImageFormat::OpenExr),
+ exr_error.to_string(),
+ ))
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ use std::io::BufReader;
+ use std::path::{Path, PathBuf};
+
+ use crate::buffer_::{Rgb32FImage, Rgba32FImage};
+ use crate::error::{LimitError, LimitErrorKind};
+ use crate::{ImageBuffer, Rgb, Rgba};
+
+ const BASE_PATH: &[&str] = &[".", "tests", "images", "exr"];
+
+ /// Write an `Rgb32FImage`.
+ /// Assumes the writer is buffered. In most cases,
+ /// you should wrap your writer in a `BufWriter` for best performance.
+ fn write_rgb_image(write: impl Write + Seek, image: &Rgb32FImage) -> ImageResult<()> {
+ write_buffer(
+ write,
+ bytemuck::cast_slice(image.as_raw().as_slice()),
+ image.width(),
+ image.height(),
+ ColorType::Rgb32F,
+ )
+ }
+
+ /// Write an `Rgba32FImage`.
+ /// Assumes the writer is buffered. In most cases,
+ /// you should wrap your writer in a `BufWriter` for best performance.
+ fn write_rgba_image(write: impl Write + Seek, image: &Rgba32FImage) -> ImageResult<()> {
+ write_buffer(
+ write,
+ bytemuck::cast_slice(image.as_raw().as_slice()),
+ image.width(),
+ image.height(),
+ ColorType::Rgba32F,
+ )
+ }
+
+ /// Read the file from the specified path into an `Rgba32FImage`.
+ fn read_as_rgba_image_from_file(path: impl AsRef<Path>) -> ImageResult<Rgba32FImage> {
+ read_as_rgba_image(BufReader::new(std::fs::File::open(path)?))
+ }
+
+ /// Read the file from the specified path into an `Rgb32FImage`.
+ fn read_as_rgb_image_from_file(path: impl AsRef<Path>) -> ImageResult<Rgb32FImage> {
+ read_as_rgb_image(BufReader::new(std::fs::File::open(path)?))
+ }
+
+ /// Read the file from the specified path into an `Rgb32FImage`.
+ fn read_as_rgb_image(read: impl Read + Seek) -> ImageResult<Rgb32FImage> {
+ let decoder = OpenExrDecoder::with_alpha_preference(read, Some(false))?;
+ let (width, height) = decoder.dimensions();
+ let buffer: Vec<f32> = decoder_to_vec(decoder)?;
+
+ ImageBuffer::from_raw(width, height, buffer)
+ // this should be the only reason for the "from raw" call to fail,
+ // even though such a large allocation would probably cause an error much earlier
+ .ok_or_else(|| {
+ ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory))
+ })
+ }
+
+ /// Read the file from the specified path into an `Rgba32FImage`.
+ fn read_as_rgba_image(read: impl Read + Seek) -> ImageResult<Rgba32FImage> {
+ let decoder = OpenExrDecoder::with_alpha_preference(read, Some(true))?;
+ let (width, height) = decoder.dimensions();
+ let buffer: Vec<f32> = decoder_to_vec(decoder)?;
+
+ ImageBuffer::from_raw(width, height, buffer)
+ // this should be the only reason for the "from raw" call to fail,
+ // even though such a large allocation would probably cause an error much earlier
+ .ok_or_else(|| {
+ ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory))
+ })
+ }
+
+ #[test]
+ fn compare_exr_hdr() {
+ if cfg!(not(feature = "hdr")) {
+ eprintln!("warning: to run all the openexr tests, activate the hdr feature flag");
+ }
+
+ #[cfg(feature = "hdr")]
+ {
+ let folder = BASE_PATH.iter().collect::<PathBuf>();
+ let reference_path = folder.clone().join("overexposed gradient.hdr");
+ let exr_path = folder
+ .clone()
+ .join("overexposed gradient - data window equals display window.exr");
+
+ let hdr: Vec<Rgb<f32>> = crate::codecs::hdr::HdrDecoder::new(std::io::BufReader::new(
+ std::fs::File::open(&reference_path).unwrap(),
+ ))
+ .unwrap()
+ .read_image_hdr()
+ .unwrap();
+
+ let exr_pixels: Rgb32FImage = read_as_rgb_image_from_file(exr_path).unwrap();
+ assert_eq!(
+ exr_pixels.dimensions().0 * exr_pixels.dimensions().1,
+ hdr.len() as u32
+ );
+
+ for (expected, found) in hdr.iter().zip(exr_pixels.pixels()) {
+ for (expected, found) in expected.0.iter().zip(found.0.iter()) {
+ // the large tolerance seems to be caused by
+ // the RGBE u8x4 pixel quantization of the hdr image format
+ assert!(
+ (expected - found).abs() < 0.1,
+ "expected {}, found {}",
+ expected,
+ found
+ );
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn roundtrip_rgba() {
+ let mut next_random = vec![1.0, 0.0, -1.0, -3.14, 27.0, 11.0, 31.0]
+ .into_iter()
+ .cycle();
+ let mut next_random = move || next_random.next().unwrap();
+
+ let generated_image: Rgba32FImage = ImageBuffer::from_fn(9, 31, |_x, _y| {
+ Rgba([next_random(), next_random(), next_random(), next_random()])
+ });
+
+ let mut bytes = vec![];
+ write_rgba_image(Cursor::new(&mut bytes), &generated_image).unwrap();
+ let decoded_image = read_as_rgba_image(Cursor::new(bytes)).unwrap();
+
+ debug_assert_eq!(generated_image, decoded_image);
+ }
+
+ #[test]
+ fn roundtrip_rgb() {
+ let mut next_random = vec![1.0, 0.0, -1.0, -3.14, 27.0, 11.0, 31.0]
+ .into_iter()
+ .cycle();
+ let mut next_random = move || next_random.next().unwrap();
+
+ let generated_image: Rgb32FImage = ImageBuffer::from_fn(9, 31, |_x, _y| {
+ Rgb([next_random(), next_random(), next_random()])
+ });
+
+ let mut bytes = vec![];
+ write_rgb_image(Cursor::new(&mut bytes), &generated_image).unwrap();
+ let decoded_image = read_as_rgb_image(Cursor::new(bytes)).unwrap();
+
+ debug_assert_eq!(generated_image, decoded_image);
+ }
+
+ #[test]
+ fn compare_rgba_rgb() {
+ let exr_path = BASE_PATH
+ .iter()
+ .collect::<PathBuf>()
+ .join("overexposed gradient - data window equals display window.exr");
+
+ let rgb: Rgb32FImage = read_as_rgb_image_from_file(&exr_path).unwrap();
+ let rgba: Rgba32FImage = read_as_rgba_image_from_file(&exr_path).unwrap();
+
+ assert_eq!(rgba.dimensions(), rgb.dimensions());
+
+ for (Rgb(rgb), Rgba(rgba)) in rgb.pixels().zip(rgba.pixels()) {
+ assert_eq!(rgb, &rgba[..3]);
+ }
+ }
+
+ #[test]
+ fn compare_cropped() {
+ // like in photoshop, exr images may have layers placed anywhere in a canvas.
+ // we don't want to load the pixels from the layer, but we want to load the pixels from the canvas.
+ // a layer might be smaller than the canvas, in that case the canvas should be transparent black
+ // where no layer was covering it. a layer might also be larger than the canvas,
+ // these pixels should be discarded.
+ //
+ // in this test we want to make sure that an
+ // auto-cropped image will be reproduced to the original.
+
+ let exr_path = BASE_PATH.iter().collect::<PathBuf>();
+ let original = exr_path.clone().join("cropping - uncropped original.exr");
+ let cropped = exr_path
+ .clone()
+ .join("cropping - data window differs display window.exr");
+
+ // smoke-check that the exr files are actually not the same
+ {
+ let original_exr = read_first_flat_layer_from_file(&original).unwrap();
+ let cropped_exr = read_first_flat_layer_from_file(&cropped).unwrap();
+ assert_eq!(
+ original_exr.attributes.display_window,
+ cropped_exr.attributes.display_window
+ );
+ assert_ne!(
+ original_exr.layer_data.attributes.layer_position,
+ cropped_exr.layer_data.attributes.layer_position
+ );
+ assert_ne!(original_exr.layer_data.size, cropped_exr.layer_data.size);
+ }
+
+ // check that they result in the same image
+ let original: Rgba32FImage = read_as_rgba_image_from_file(&original).unwrap();
+ let cropped: Rgba32FImage = read_as_rgba_image_from_file(&cropped).unwrap();
+ assert_eq!(original.dimensions(), cropped.dimensions());
+
+ // the following is not a simple assert_eq, as in case of an error,
+ // the whole image would be printed to the console, which takes forever
+ assert!(original.pixels().zip(cropped.pixels()).all(|(a, b)| a == b));
+ }
+}
diff --git a/vendor/image/src/codecs/png.rs b/vendor/image/src/codecs/png.rs
new file mode 100644
index 0000000..b9f98ce
--- /dev/null
+++ b/vendor/image/src/codecs/png.rs
@@ -0,0 +1,778 @@
+//! Decoding and Encoding of PNG Images
+//!
+//! PNG (Portable Network Graphics) is an image format that supports lossless compression.
+//!
+//! # Related Links
+//! * <http://www.w3.org/TR/PNG/> - The PNG Specification
+//!
+
+use std::convert::TryFrom;
+use std::fmt;
+use std::io::{self, Read, Write};
+
+use num_rational::Ratio;
+use png::{BlendOp, DisposeOp};
+
+use crate::animation::{Delay, Frame, Frames};
+use crate::color::{Blend, ColorType, ExtendedColorType};
+use crate::error::{
+ DecodingError, EncodingError, ImageError, ImageResult, LimitError, LimitErrorKind,
+ ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::{AnimationDecoder, ImageDecoder, ImageEncoder, ImageFormat};
+use crate::io::Limits;
+use crate::{DynamicImage, GenericImage, ImageBuffer, Luma, LumaA, Rgb, Rgba, RgbaImage};
+
+// http://www.w3.org/TR/PNG-Structure.html
+// The first eight bytes of a PNG file always contain the following (decimal) values:
+pub(crate) const PNG_SIGNATURE: [u8; 8] = [137, 80, 78, 71, 13, 10, 26, 10];
+
+/// Png Reader
+///
+/// This reader will try to read the png one row at a time,
+/// however for interlaced png files this is not possible and
+/// these are therefore read at once.
+pub struct PngReader<R: Read> {
+ reader: png::Reader<R>,
+ buffer: Vec<u8>,
+ index: usize,
+}
+
+impl<R: Read> PngReader<R> {
+ fn new(mut reader: png::Reader<R>) -> ImageResult<PngReader<R>> {
+ let len = reader.output_buffer_size();
+ // Since interlaced images do not come in
+ // scanline order it is almost impossible to
+ // read them in a streaming fashion, however
+ // this shouldn't be a too big of a problem
+ // as most interlaced images should fit in memory.
+ let buffer = if reader.info().interlaced {
+ let mut buffer = vec![0; len];
+ reader
+ .next_frame(&mut buffer)
+ .map_err(ImageError::from_png)?;
+ buffer
+ } else {
+ Vec::new()
+ };
+
+ Ok(PngReader {
+ reader,
+ buffer,
+ index: 0,
+ })
+ }
+}
+
+impl<R: Read> Read for PngReader<R> {
+ fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> {
+ // io::Write::write for slice cannot fail
+ let readed = buf.write(&self.buffer[self.index..]).unwrap();
+
+ let mut bytes = readed;
+ self.index += readed;
+
+ while self.index >= self.buffer.len() {
+ match self.reader.next_row()? {
+ Some(row) => {
+ // Faster to copy directly to external buffer
+ let readed = buf.write(row.data()).unwrap();
+ bytes += readed;
+
+ self.buffer = row.data()[readed..].to_owned();
+ self.index = 0;
+ }
+ None => return Ok(bytes),
+ }
+ }
+
+ Ok(bytes)
+ }
+
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ let mut bytes = self.buffer.len();
+ if buf.is_empty() {
+ std::mem::swap(&mut self.buffer, buf);
+ } else {
+ buf.extend_from_slice(&self.buffer);
+ self.buffer.clear();
+ }
+
+ self.index = 0;
+
+ while let Some(row) = self.reader.next_row()? {
+ buf.extend_from_slice(row.data());
+ bytes += row.data().len();
+ }
+
+ Ok(bytes)
+ }
+}
+
+/// PNG decoder
+pub struct PngDecoder<R: Read> {
+ color_type: ColorType,
+ reader: png::Reader<R>,
+}
+
+impl<R: Read> PngDecoder<R> {
+ /// Creates a new decoder that decodes from the stream ```r```
+ pub fn new(r: R) -> ImageResult<PngDecoder<R>> {
+ Self::with_limits(r, Limits::default())
+ }
+
+ /// Creates a new decoder that decodes from the stream ```r``` with the given limits.
+ pub fn with_limits(r: R, limits: Limits) -> ImageResult<PngDecoder<R>> {
+ limits.check_support(&crate::io::LimitSupport::default())?;
+
+ let max_bytes = usize::try_from(limits.max_alloc.unwrap_or(u64::MAX)).unwrap_or(usize::MAX);
+ let mut decoder = png::Decoder::new_with_limits(r, png::Limits { bytes: max_bytes });
+
+ let info = decoder.read_header_info().map_err(ImageError::from_png)?;
+ limits.check_dimensions(info.width, info.height)?;
+
+ // By default the PNG decoder will scale 16 bpc to 8 bpc, so custom
+ // transformations must be set. EXPAND preserves the default behavior
+ // expanding bpc < 8 to 8 bpc.
+ decoder.set_transformations(png::Transformations::EXPAND);
+ let reader = decoder.read_info().map_err(ImageError::from_png)?;
+ let (color_type, bits) = reader.output_color_type();
+ let color_type = match (color_type, bits) {
+ (png::ColorType::Grayscale, png::BitDepth::Eight) => ColorType::L8,
+ (png::ColorType::Grayscale, png::BitDepth::Sixteen) => ColorType::L16,
+ (png::ColorType::GrayscaleAlpha, png::BitDepth::Eight) => ColorType::La8,
+ (png::ColorType::GrayscaleAlpha, png::BitDepth::Sixteen) => ColorType::La16,
+ (png::ColorType::Rgb, png::BitDepth::Eight) => ColorType::Rgb8,
+ (png::ColorType::Rgb, png::BitDepth::Sixteen) => ColorType::Rgb16,
+ (png::ColorType::Rgba, png::BitDepth::Eight) => ColorType::Rgba8,
+ (png::ColorType::Rgba, png::BitDepth::Sixteen) => ColorType::Rgba16,
+
+ (png::ColorType::Grayscale, png::BitDepth::One) => {
+ return Err(unsupported_color(ExtendedColorType::L1))
+ }
+ (png::ColorType::GrayscaleAlpha, png::BitDepth::One) => {
+ return Err(unsupported_color(ExtendedColorType::La1))
+ }
+ (png::ColorType::Rgb, png::BitDepth::One) => {
+ return Err(unsupported_color(ExtendedColorType::Rgb1))
+ }
+ (png::ColorType::Rgba, png::BitDepth::One) => {
+ return Err(unsupported_color(ExtendedColorType::Rgba1))
+ }
+
+ (png::ColorType::Grayscale, png::BitDepth::Two) => {
+ return Err(unsupported_color(ExtendedColorType::L2))
+ }
+ (png::ColorType::GrayscaleAlpha, png::BitDepth::Two) => {
+ return Err(unsupported_color(ExtendedColorType::La2))
+ }
+ (png::ColorType::Rgb, png::BitDepth::Two) => {
+ return Err(unsupported_color(ExtendedColorType::Rgb2))
+ }
+ (png::ColorType::Rgba, png::BitDepth::Two) => {
+ return Err(unsupported_color(ExtendedColorType::Rgba2))
+ }
+
+ (png::ColorType::Grayscale, png::BitDepth::Four) => {
+ return Err(unsupported_color(ExtendedColorType::L4))
+ }
+ (png::ColorType::GrayscaleAlpha, png::BitDepth::Four) => {
+ return Err(unsupported_color(ExtendedColorType::La4))
+ }
+ (png::ColorType::Rgb, png::BitDepth::Four) => {
+ return Err(unsupported_color(ExtendedColorType::Rgb4))
+ }
+ (png::ColorType::Rgba, png::BitDepth::Four) => {
+ return Err(unsupported_color(ExtendedColorType::Rgba4))
+ }
+
+ (png::ColorType::Indexed, bits) => {
+ return Err(unsupported_color(ExtendedColorType::Unknown(bits as u8)))
+ }
+ };
+
+ Ok(PngDecoder { color_type, reader })
+ }
+
+ /// Turn this into an iterator over the animation frames.
+ ///
+ /// Reading the complete animation requires more memory than reading the data from the IDAT
+ /// frame–multiple frame buffers need to be reserved at the same time. We further do not
+ /// support compositing 16-bit colors. In any case this would be lossy as the interface of
+ /// animation decoders does not support 16-bit colors.
+ ///
+ /// If something is not supported or a limit is violated then the decoding step that requires
+ /// them will fail and an error will be returned instead of the frame. No further frames will
+ /// be returned.
+ pub fn apng(self) -> ApngDecoder<R> {
+ ApngDecoder::new(self)
+ }
+
+ /// Returns if the image contains an animation.
+ ///
+ /// Note that the file itself decides if the default image is considered to be part of the
+ /// animation. When it is not the common interpretation is to use it as a thumbnail.
+ ///
+ /// If a non-animated image is converted into an `ApngDecoder` then its iterator is empty.
+ pub fn is_apng(&self) -> bool {
+ self.reader.info().animation_control.is_some()
+ }
+}
+
+fn unsupported_color(ect: ExtendedColorType) -> ImageError {
+ ImageError::Unsupported(UnsupportedError::from_format_and_kind(
+ ImageFormat::Png.into(),
+ UnsupportedErrorKind::Color(ect),
+ ))
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for PngDecoder<R> {
+ type Reader = PngReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ self.reader.info().size()
+ }
+
+ fn color_type(&self) -> ColorType {
+ self.color_type
+ }
+
+ fn icc_profile(&mut self) -> Option<Vec<u8>> {
+ self.reader.info().icc_profile.as_ref().map(|x| x.to_vec())
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ PngReader::new(self.reader)
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ use byteorder::{BigEndian, ByteOrder, NativeEndian};
+
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ self.reader.next_frame(buf).map_err(ImageError::from_png)?;
+ // PNG images are big endian. For 16 bit per channel and larger types,
+ // the buffer may need to be reordered to native endianness per the
+ // contract of `read_image`.
+ // TODO: assumes equal channel bit depth.
+ let bpc = self.color_type().bytes_per_pixel() / self.color_type().channel_count();
+
+ match bpc {
+ 1 => (), // No reodering necessary for u8
+ 2 => buf.chunks_mut(2).for_each(|c| {
+ let v = BigEndian::read_u16(c);
+ NativeEndian::write_u16(c, v)
+ }),
+ _ => unreachable!(),
+ }
+ Ok(())
+ }
+
+ fn scanline_bytes(&self) -> u64 {
+ let width = self.reader.info().width;
+ self.reader.output_line_size(width) as u64
+ }
+}
+
+/// An [`AnimationDecoder`] adapter of [`PngDecoder`].
+///
+/// See [`PngDecoder::apng`] for more information.
+///
+/// [`AnimationDecoder`]: ../trait.AnimationDecoder.html
+/// [`PngDecoder`]: struct.PngDecoder.html
+/// [`PngDecoder::apng`]: struct.PngDecoder.html#method.apng
+pub struct ApngDecoder<R: Read> {
+ inner: PngDecoder<R>,
+ /// The current output buffer.
+ current: RgbaImage,
+ /// The previous output buffer, used for dispose op previous.
+ previous: RgbaImage,
+ /// The dispose op of the current frame.
+ dispose: DisposeOp,
+ /// The number of image still expected to be able to load.
+ remaining: u32,
+ /// The next (first) image is the thumbnail.
+ has_thumbnail: bool,
+}
+
+impl<R: Read> ApngDecoder<R> {
+ fn new(inner: PngDecoder<R>) -> Self {
+ let (width, height) = inner.dimensions();
+ let info = inner.reader.info();
+ let remaining = match info.animation_control() {
+ // The expected number of fcTL in the remaining image.
+ Some(actl) => actl.num_frames,
+ None => 0,
+ };
+ // If the IDAT has no fcTL then it is not part of the animation counted by
+ // num_frames. All following fdAT chunks must be preceded by an fcTL
+ let has_thumbnail = info.frame_control.is_none();
+ ApngDecoder {
+ inner,
+ // TODO: should we delay this allocation? At least if we support limits we should.
+ current: RgbaImage::new(width, height),
+ previous: RgbaImage::new(width, height),
+ dispose: DisposeOp::Background,
+ remaining,
+ has_thumbnail,
+ }
+ }
+
+ // TODO: thumbnail(&mut self) -> Option<impl ImageDecoder<'_>>
+
+ /// Decode one subframe and overlay it on the canvas.
+ fn mix_next_frame(&mut self) -> Result<Option<&RgbaImage>, ImageError> {
+ // Remove this image from remaining.
+ self.remaining = match self.remaining.checked_sub(1) {
+ None => return Ok(None),
+ Some(next) => next,
+ };
+
+ // Shorten ourselves to 0 in case of error.
+ let remaining = self.remaining;
+ self.remaining = 0;
+
+ // Skip the thumbnail that is not part of the animation.
+ if self.has_thumbnail {
+ self.has_thumbnail = false;
+ let mut buffer = vec![0; self.inner.reader.output_buffer_size()];
+ self.inner
+ .reader
+ .next_frame(&mut buffer)
+ .map_err(ImageError::from_png)?;
+ }
+
+ self.animatable_color_type()?;
+
+ // Dispose of the previous frame.
+ match self.dispose {
+ DisposeOp::None => {
+ self.previous.clone_from(&self.current);
+ }
+ DisposeOp::Background => {
+ self.previous.clone_from(&self.current);
+ self.current
+ .pixels_mut()
+ .for_each(|pixel| *pixel = Rgba([0, 0, 0, 0]));
+ }
+ DisposeOp::Previous => {
+ self.current.clone_from(&self.previous);
+ }
+ }
+
+ // Read next frame data.
+ let mut buffer = vec![0; self.inner.reader.output_buffer_size()];
+ self.inner
+ .reader
+ .next_frame(&mut buffer)
+ .map_err(ImageError::from_png)?;
+ let info = self.inner.reader.info();
+
+ // Find out how to interpret the decoded frame.
+ let (width, height, px, py, blend);
+ match info.frame_control() {
+ None => {
+ width = info.width;
+ height = info.height;
+ px = 0;
+ py = 0;
+ blend = BlendOp::Source;
+ }
+ Some(fc) => {
+ width = fc.width;
+ height = fc.height;
+ px = fc.x_offset;
+ py = fc.y_offset;
+ blend = fc.blend_op;
+ self.dispose = fc.dispose_op;
+ }
+ };
+
+ // Turn the data into an rgba image proper.
+ let source = match self.inner.color_type {
+ ColorType::L8 => {
+ let image = ImageBuffer::<Luma<_>, _>::from_raw(width, height, buffer).unwrap();
+ DynamicImage::ImageLuma8(image).into_rgba8()
+ }
+ ColorType::La8 => {
+ let image = ImageBuffer::<LumaA<_>, _>::from_raw(width, height, buffer).unwrap();
+ DynamicImage::ImageLumaA8(image).into_rgba8()
+ }
+ ColorType::Rgb8 => {
+ let image = ImageBuffer::<Rgb<_>, _>::from_raw(width, height, buffer).unwrap();
+ DynamicImage::ImageRgb8(image).into_rgba8()
+ }
+ ColorType::Rgba8 => ImageBuffer::<Rgba<_>, _>::from_raw(width, height, buffer).unwrap(),
+ ColorType::L16 | ColorType::Rgb16 | ColorType::La16 | ColorType::Rgba16 => {
+ // TODO: to enable remove restriction in `animatable_color_type` method.
+ unreachable!("16-bit apng not yet support")
+ }
+ _ => unreachable!("Invalid png color"),
+ };
+
+ match blend {
+ BlendOp::Source => {
+ self.current
+ .copy_from(&source, px, py)
+ .expect("Invalid png image not detected in png");
+ }
+ BlendOp::Over => {
+ // TODO: investigate speed, speed-ups, and bounds-checks.
+ for (x, y, p) in source.enumerate_pixels() {
+ self.current.get_pixel_mut(x + px, y + py).blend(p);
+ }
+ }
+ }
+
+ // Ok, we can proceed with actually remaining images.
+ self.remaining = remaining;
+ // Return composited output buffer.
+ Ok(Some(&self.current))
+ }
+
+ fn animatable_color_type(&self) -> Result<(), ImageError> {
+ match self.inner.color_type {
+ ColorType::L8 | ColorType::Rgb8 | ColorType::La8 | ColorType::Rgba8 => Ok(()),
+ // TODO: do not handle multi-byte colors. Remember to implement it in `mix_next_frame`.
+ ColorType::L16 | ColorType::Rgb16 | ColorType::La16 | ColorType::Rgba16 => {
+ Err(unsupported_color(self.inner.color_type.into()))
+ }
+ _ => unreachable!("{:?} not a valid png color", self.inner.color_type),
+ }
+ }
+}
+
+impl<'a, R: Read + 'a> AnimationDecoder<'a> for ApngDecoder<R> {
+ fn into_frames(self) -> Frames<'a> {
+ struct FrameIterator<R: Read>(ApngDecoder<R>);
+
+ impl<R: Read> Iterator for FrameIterator<R> {
+ type Item = ImageResult<Frame>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let image = match self.0.mix_next_frame() {
+ Ok(Some(image)) => image.clone(),
+ Ok(None) => return None,
+ Err(err) => return Some(Err(err)),
+ };
+
+ let info = self.0.inner.reader.info();
+ let fc = info.frame_control().unwrap();
+ // PNG delays are rations in seconds.
+ let num = u32::from(fc.delay_num) * 1_000u32;
+ let denom = match fc.delay_den {
+ // The standard dictates to replace by 100 when the denominator is 0.
+ 0 => 100,
+ d => u32::from(d),
+ };
+ let delay = Delay::from_ratio(Ratio::new(num, denom));
+ Some(Ok(Frame::from_parts(image, 0, 0, delay)))
+ }
+ }
+
+ Frames::new(Box::new(FrameIterator(self)))
+ }
+}
+
+/// PNG encoder
+pub struct PngEncoder<W: Write> {
+ w: W,
+ compression: CompressionType,
+ filter: FilterType,
+}
+
+/// Compression level of a PNG encoder. The default setting is `Fast`.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[non_exhaustive]
+pub enum CompressionType {
+ /// Default compression level
+ Default,
+ /// Fast, minimal compression
+ Fast,
+ /// High compression level
+ Best,
+ /// Huffman coding compression
+ #[deprecated(note = "use one of the other compression levels instead, such as 'Fast'")]
+ Huffman,
+ /// Run-length encoding compression
+ #[deprecated(note = "use one of the other compression levels instead, such as 'Fast'")]
+ Rle,
+}
+
+/// Filter algorithms used to process image data to improve compression.
+///
+/// The default filter is `Adaptive`.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[non_exhaustive]
+pub enum FilterType {
+ /// No processing done, best used for low bit depth grayscale or data with a
+ /// low color count
+ NoFilter,
+ /// Filters based on previous pixel in the same scanline
+ Sub,
+ /// Filters based on the scanline above
+ Up,
+ /// Filters based on the average of left and right neighbor pixels
+ Avg,
+ /// Algorithm that takes into account the left, upper left, and above pixels
+ Paeth,
+ /// Uses a heuristic to select one of the preceding filters for each
+ /// scanline rather than one filter for the entire image
+ Adaptive,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+#[non_exhaustive]
+enum BadPngRepresentation {
+ ColorType(ColorType),
+}
+
+impl<W: Write> PngEncoder<W> {
+ /// Create a new encoder that writes its output to ```w```
+ pub fn new(w: W) -> PngEncoder<W> {
+ PngEncoder {
+ w,
+ compression: CompressionType::default(),
+ filter: FilterType::default(),
+ }
+ }
+
+ /// Create a new encoder that writes its output to `w` with `CompressionType` `compression` and
+ /// `FilterType` `filter`.
+ ///
+ /// It is best to view the options as a _hint_ to the implementation on the smallest or fastest
+ /// option for encoding a particular image. That is, using options that map directly to a PNG
+ /// image parameter will use this parameter where possible. But variants that have no direct
+ /// mapping may be interpreted differently in minor versions. The exact output is expressly
+ /// __not__ part the SemVer stability guarantee.
+ ///
+ /// Note that it is not optimal to use a single filter type, so an adaptive
+ /// filter type is selected as the default. The filter which best minimizes
+ /// file size may change with the type of compression used.
+ pub fn new_with_quality(
+ w: W,
+ compression: CompressionType,
+ filter: FilterType,
+ ) -> PngEncoder<W> {
+ PngEncoder {
+ w,
+ compression,
+ filter,
+ }
+ }
+
+ /// Encodes the image `data` that has dimensions `width` and `height` and `ColorType` `c`.
+ ///
+ /// Expects data in big endian.
+ #[deprecated = "Use `PngEncoder::write_image` instead. Beware that `write_image` has a different endianness convention"]
+ pub fn encode(self, data: &[u8], width: u32, height: u32, color: ColorType) -> ImageResult<()> {
+ self.encode_inner(data, width, height, color)
+ }
+
+ fn encode_inner(
+ self,
+ data: &[u8],
+ width: u32,
+ height: u32,
+ color: ColorType,
+ ) -> ImageResult<()> {
+ let (ct, bits) = match color {
+ ColorType::L8 => (png::ColorType::Grayscale, png::BitDepth::Eight),
+ ColorType::L16 => (png::ColorType::Grayscale, png::BitDepth::Sixteen),
+ ColorType::La8 => (png::ColorType::GrayscaleAlpha, png::BitDepth::Eight),
+ ColorType::La16 => (png::ColorType::GrayscaleAlpha, png::BitDepth::Sixteen),
+ ColorType::Rgb8 => (png::ColorType::Rgb, png::BitDepth::Eight),
+ ColorType::Rgb16 => (png::ColorType::Rgb, png::BitDepth::Sixteen),
+ ColorType::Rgba8 => (png::ColorType::Rgba, png::BitDepth::Eight),
+ ColorType::Rgba16 => (png::ColorType::Rgba, png::BitDepth::Sixteen),
+ _ => {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Png.into(),
+ UnsupportedErrorKind::Color(color.into()),
+ ),
+ ))
+ }
+ };
+ let comp = match self.compression {
+ CompressionType::Default => png::Compression::Default,
+ CompressionType::Best => png::Compression::Best,
+ _ => png::Compression::Fast,
+ };
+ let (filter, adaptive_filter) = match self.filter {
+ FilterType::NoFilter => (
+ png::FilterType::NoFilter,
+ png::AdaptiveFilterType::NonAdaptive,
+ ),
+ FilterType::Sub => (png::FilterType::Sub, png::AdaptiveFilterType::NonAdaptive),
+ FilterType::Up => (png::FilterType::Up, png::AdaptiveFilterType::NonAdaptive),
+ FilterType::Avg => (png::FilterType::Avg, png::AdaptiveFilterType::NonAdaptive),
+ FilterType::Paeth => (png::FilterType::Paeth, png::AdaptiveFilterType::NonAdaptive),
+ FilterType::Adaptive => (png::FilterType::Sub, png::AdaptiveFilterType::Adaptive),
+ };
+
+ let mut encoder = png::Encoder::new(self.w, width, height);
+ encoder.set_color(ct);
+ encoder.set_depth(bits);
+ encoder.set_compression(comp);
+ encoder.set_filter(filter);
+ encoder.set_adaptive_filter(adaptive_filter);
+ let mut writer = encoder
+ .write_header()
+ .map_err(|e| ImageError::IoError(e.into()))?;
+ writer
+ .write_image_data(data)
+ .map_err(|e| ImageError::IoError(e.into()))
+ }
+}
+
+impl<W: Write> ImageEncoder for PngEncoder<W> {
+ /// Write a PNG image with the specified width, height, and color type.
+ ///
+ /// For color types with 16-bit per channel or larger, the contents of `buf` should be in
+ /// native endian. PngEncoder will automatically convert to big endian as required by the
+ /// underlying PNG format.
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ use byteorder::{BigEndian, ByteOrder, NativeEndian};
+ use ColorType::*;
+
+ // PNG images are big endian. For 16 bit per channel and larger types,
+ // the buffer may need to be reordered to big endian per the
+ // contract of `write_image`.
+ // TODO: assumes equal channel bit depth.
+ match color_type {
+ L8 | La8 | Rgb8 | Rgba8 => {
+ // No reodering necessary for u8
+ self.encode_inner(buf, width, height, color_type)
+ }
+ L16 | La16 | Rgb16 | Rgba16 => {
+ // Because the buffer is immutable and the PNG encoder does not
+ // yet take Write/Read traits, create a temporary buffer for
+ // big endian reordering.
+ let mut reordered = vec![0; buf.len()];
+ buf.chunks(2)
+ .zip(reordered.chunks_mut(2))
+ .for_each(|(b, r)| BigEndian::write_u16(r, NativeEndian::read_u16(b)));
+ self.encode_inner(&reordered, width, height, color_type)
+ }
+ _ => Err(ImageError::Encoding(EncodingError::new(
+ ImageFormat::Png.into(),
+ BadPngRepresentation::ColorType(color_type),
+ ))),
+ }
+ }
+}
+
+impl ImageError {
+ fn from_png(err: png::DecodingError) -> ImageError {
+ use png::DecodingError::*;
+ match err {
+ IoError(err) => ImageError::IoError(err),
+ // The input image was not a valid PNG.
+ err @ Format(_) => {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Png.into(), err))
+ }
+ // Other is used when:
+ // - The decoder is polled for more animation frames despite being done (or not being animated
+ // in the first place).
+ // - The output buffer does not have the required size.
+ err @ Parameter(_) => ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(err.to_string()),
+ )),
+ LimitsExceeded => {
+ ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory))
+ }
+ }
+ }
+}
+
+impl Default for CompressionType {
+ fn default() -> Self {
+ CompressionType::Fast
+ }
+}
+
+impl Default for FilterType {
+ fn default() -> Self {
+ FilterType::Adaptive
+ }
+}
+
+impl fmt::Display for BadPngRepresentation {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Self::ColorType(color_type) => write!(
+ f,
+ "The color {:?} can not be represented in PNG.",
+ color_type
+ ),
+ }
+ }
+}
+
+impl std::error::Error for BadPngRepresentation {}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::image::ImageDecoder;
+ use crate::ImageOutputFormat;
+
+ use std::io::{Cursor, Read};
+
+ #[test]
+ fn ensure_no_decoder_off_by_one() {
+ let dec = PngDecoder::new(
+ std::fs::File::open("tests/images/png/bugfixes/debug_triangle_corners_widescreen.png")
+ .unwrap(),
+ )
+ .expect("Unable to read PNG file (does it exist?)");
+
+ assert_eq![(2000, 1000), dec.dimensions()];
+
+ assert_eq![
+ ColorType::Rgb8,
+ dec.color_type(),
+ "Image MUST have the Rgb8 format"
+ ];
+
+ let correct_bytes = dec
+ .into_reader()
+ .expect("Unable to read file")
+ .bytes()
+ .map(|x| x.expect("Unable to read byte"))
+ .collect::<Vec<u8>>();
+
+ assert_eq![6_000_000, correct_bytes.len()];
+ }
+
+ #[test]
+ fn underlying_error() {
+ use std::error::Error;
+
+ let mut not_png =
+ std::fs::read("tests/images/png/bugfixes/debug_triangle_corners_widescreen.png")
+ .unwrap();
+ not_png[0] = 0;
+
+ let error = PngDecoder::new(&not_png[..]).err().unwrap();
+ let _ = error
+ .source()
+ .unwrap()
+ .downcast_ref::<png::DecodingError>()
+ .expect("Caused by a png error");
+ }
+
+ #[test]
+ fn encode_bad_color_type() {
+ // regression test for issue #1663
+ let image = DynamicImage::new_rgb32f(1, 1);
+ let mut target = Cursor::new(vec![]);
+ let _ = image.write_to(&mut target, ImageOutputFormat::Png);
+ }
+}
diff --git a/vendor/image/src/codecs/pnm/autobreak.rs b/vendor/image/src/codecs/pnm/autobreak.rs
new file mode 100644
index 0000000..cea2cd8
--- /dev/null
+++ b/vendor/image/src/codecs/pnm/autobreak.rs
@@ -0,0 +1,124 @@
+//! Insert line breaks between written buffers when they would overflow the line length.
+use std::io;
+
+// The pnm standard says to insert line breaks after 70 characters. Assumes that no line breaks
+// are actually written. We have to be careful to fully commit buffers or not commit them at all,
+// otherwise we might insert a newline in the middle of a token.
+pub(crate) struct AutoBreak<W: io::Write> {
+ wrapped: W,
+ line_capacity: usize,
+ line: Vec<u8>,
+ has_newline: bool,
+ panicked: bool, // see https://github.com/rust-lang/rust/issues/30888
+}
+
+impl<W: io::Write> AutoBreak<W> {
+ pub(crate) fn new(writer: W, line_capacity: usize) -> Self {
+ AutoBreak {
+ wrapped: writer,
+ line_capacity,
+ line: Vec::with_capacity(line_capacity + 1),
+ has_newline: false,
+ panicked: false,
+ }
+ }
+
+ fn flush_buf(&mut self) -> io::Result<()> {
+ // from BufWriter
+ let mut written = 0;
+ let len = self.line.len();
+ let mut ret = Ok(());
+ while written < len {
+ self.panicked = true;
+ let r = self.wrapped.write(&self.line[written..]);
+ self.panicked = false;
+ match r {
+ Ok(0) => {
+ ret = Err(io::Error::new(
+ io::ErrorKind::WriteZero,
+ "failed to write the buffered data",
+ ));
+ break;
+ }
+ Ok(n) => written += n,
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(e) => {
+ ret = Err(e);
+ break;
+ }
+ }
+ }
+ if written > 0 {
+ self.line.drain(..written);
+ }
+ ret
+ }
+}
+
+impl<W: io::Write> io::Write for AutoBreak<W> {
+ fn write(&mut self, buffer: &[u8]) -> io::Result<usize> {
+ if self.has_newline {
+ self.flush()?;
+ self.has_newline = false;
+ }
+
+ if !self.line.is_empty() && self.line.len() + buffer.len() > self.line_capacity {
+ self.line.push(b'\n');
+ self.has_newline = true;
+ self.flush()?;
+ self.has_newline = false;
+ }
+
+ self.line.extend_from_slice(buffer);
+ Ok(buffer.len())
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.flush_buf()?;
+ self.wrapped.flush()
+ }
+}
+
+impl<W: io::Write> Drop for AutoBreak<W> {
+ fn drop(&mut self) {
+ if !self.panicked {
+ let _r = self.flush_buf();
+ // internal writer flushed automatically by Drop
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::io::Write;
+
+ #[test]
+ fn test_aligned_writes() {
+ let mut output = Vec::new();
+
+ {
+ let mut writer = AutoBreak::new(&mut output, 10);
+ writer.write_all(b"0123456789").unwrap();
+ writer.write_all(b"0123456789").unwrap();
+ }
+
+ assert_eq!(output.as_slice(), b"0123456789\n0123456789");
+ }
+
+ #[test]
+ fn test_greater_writes() {
+ let mut output = Vec::new();
+
+ {
+ let mut writer = AutoBreak::new(&mut output, 10);
+ writer.write_all(b"012").unwrap();
+ writer.write_all(b"345").unwrap();
+ writer.write_all(b"0123456789").unwrap();
+ writer.write_all(b"012345678910").unwrap();
+ writer.write_all(b"_").unwrap();
+ }
+
+ assert_eq!(output.as_slice(), b"012345\n0123456789\n012345678910\n_");
+ }
+}
diff --git a/vendor/image/src/codecs/pnm/decoder.rs b/vendor/image/src/codecs/pnm/decoder.rs
new file mode 100644
index 0000000..a495871
--- /dev/null
+++ b/vendor/image/src/codecs/pnm/decoder.rs
@@ -0,0 +1,1272 @@
+use std::convert::TryFrom;
+use std::convert::TryInto;
+use std::error;
+use std::fmt::{self, Display};
+use std::io::{self, BufRead, Cursor, Read};
+use std::marker::PhantomData;
+use std::mem;
+use std::num::ParseIntError;
+use std::str::{self, FromStr};
+
+use super::{ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader, PixmapHeader};
+use super::{HeaderRecord, PnmHeader, PnmSubtype, SampleEncoding};
+use crate::color::{ColorType, ExtendedColorType};
+use crate::error::{
+ DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::{self, ImageDecoder, ImageFormat};
+use crate::utils;
+
+use byteorder::{BigEndian, ByteOrder, NativeEndian};
+
+/// All errors that can occur when attempting to parse a PNM
+#[derive(Debug, Clone)]
+enum DecoderError {
+ /// PNM's "P[123456]" signature wrong or missing
+ PnmMagicInvalid([u8; 2]),
+ /// Couldn't parse the specified string as an integer from the specified source
+ UnparsableValue(ErrorDataSource, String, ParseIntError),
+
+ /// More than the exactly one allowed plane specified by the format
+ NonAsciiByteInHeader(u8),
+ /// The PAM header contained a non-ASCII byte
+ NonAsciiLineInPamHeader,
+ /// A sample string contained a non-ASCII byte
+ NonAsciiSample,
+
+ /// The byte after the P7 magic was not 0x0A NEWLINE
+ NotNewlineAfterP7Magic(u8),
+ /// The PNM header had too few lines
+ UnexpectedPnmHeaderEnd,
+
+ /// The specified line was specified twice
+ HeaderLineDuplicated(PnmHeaderLine),
+ /// The line with the specified ID was not understood
+ HeaderLineUnknown(String),
+ /// At least one of the required lines were missing from the header (are `None` here)
+ ///
+ /// Same names as [`PnmHeaderLine`](enum.PnmHeaderLine.html)
+ #[allow(missing_docs)]
+ HeaderLineMissing {
+ height: Option<u32>,
+ width: Option<u32>,
+ depth: Option<u32>,
+ maxval: Option<u32>,
+ },
+
+ /// Not enough data was provided to the Decoder to decode the image
+ InputTooShort,
+ /// Sample raster contained unexpected byte
+ UnexpectedByteInRaster(u8),
+ /// Specified sample was out of bounds (e.g. >1 in B&W)
+ SampleOutOfBounds(u8),
+ /// The image's maxval exceeds 0xFFFF
+ MaxvalTooBig(u32),
+
+ /// The specified tuple type supports restricted depths and maxvals, those restrictions were not met
+ InvalidDepthOrMaxval {
+ tuple_type: ArbitraryTuplType,
+ depth: u32,
+ maxval: u32,
+ },
+ /// The specified tuple type supports restricted depths, those restrictions were not met
+ InvalidDepth {
+ tuple_type: ArbitraryTuplType,
+ depth: u32,
+ },
+ /// The tuple type was not recognised by the parser
+ TupleTypeUnrecognised,
+
+ /// Overflowed the specified value when parsing
+ Overflow,
+}
+
+impl Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DecoderError::PnmMagicInvalid(magic) => f.write_fmt(format_args!(
+ "Expected magic constant for PNM: P1..P7, got [{:#04X?}, {:#04X?}]",
+ magic[0], magic[1]
+ )),
+ DecoderError::UnparsableValue(src, data, err) => {
+ f.write_fmt(format_args!("Error parsing {:?} as {}: {}", data, src, err))
+ }
+
+ DecoderError::NonAsciiByteInHeader(c) => {
+ f.write_fmt(format_args!("Non-ASCII character {:#04X?} in header", c))
+ }
+ DecoderError::NonAsciiLineInPamHeader => f.write_str("Non-ASCII line in PAM header"),
+ DecoderError::NonAsciiSample => {
+ f.write_str("Non-ASCII character where sample value was expected")
+ }
+
+ DecoderError::NotNewlineAfterP7Magic(c) => f.write_fmt(format_args!(
+ "Expected newline after P7 magic, got {:#04X?}",
+ c
+ )),
+ DecoderError::UnexpectedPnmHeaderEnd => f.write_str("Unexpected end of PNM header"),
+
+ DecoderError::HeaderLineDuplicated(line) => {
+ f.write_fmt(format_args!("Duplicate {} line", line))
+ }
+ DecoderError::HeaderLineUnknown(identifier) => f.write_fmt(format_args!(
+ "Unknown header line with identifier {:?}",
+ identifier
+ )),
+ DecoderError::HeaderLineMissing {
+ height,
+ width,
+ depth,
+ maxval,
+ } => f.write_fmt(format_args!(
+ "Missing header line: have height={:?}, width={:?}, depth={:?}, maxval={:?}",
+ height, width, depth, maxval
+ )),
+
+ DecoderError::InputTooShort => {
+ f.write_str("Not enough data was provided to the Decoder to decode the image")
+ }
+ DecoderError::UnexpectedByteInRaster(c) => f.write_fmt(format_args!(
+ "Unexpected character {:#04X?} within sample raster",
+ c
+ )),
+ DecoderError::SampleOutOfBounds(val) => {
+ f.write_fmt(format_args!("Sample value {} outside of bounds", val))
+ }
+ DecoderError::MaxvalTooBig(maxval) => {
+ f.write_fmt(format_args!("Image MAXVAL exceeds {}: {}", 0xFFFF, maxval))
+ }
+
+ DecoderError::InvalidDepthOrMaxval {
+ tuple_type,
+ depth,
+ maxval,
+ } => f.write_fmt(format_args!(
+ "Invalid depth ({}) or maxval ({}) for tuple type {}",
+ depth,
+ maxval,
+ tuple_type.name()
+ )),
+ DecoderError::InvalidDepth { tuple_type, depth } => f.write_fmt(format_args!(
+ "Invalid depth ({}) for tuple type {}",
+ depth,
+ tuple_type.name()
+ )),
+ DecoderError::TupleTypeUnrecognised => f.write_str("Tuple type not recognized"),
+ DecoderError::Overflow => f.write_str("Overflow when parsing value"),
+ }
+ }
+}
+
+/// Note: should `pnm` be extracted into a separate crate,
+/// this will need to be hidden until that crate hits version `1.0`.
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Pnm.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {
+ fn source(&self) -> Option<&(dyn error::Error + 'static)> {
+ match self {
+ DecoderError::UnparsableValue(_, _, err) => Some(err),
+ _ => None,
+ }
+ }
+}
+
+/// Single-value lines in a PNM header
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
+enum PnmHeaderLine {
+ /// "HEIGHT"
+ Height,
+ /// "WIDTH"
+ Width,
+ /// "DEPTH"
+ Depth,
+ /// "MAXVAL", a.k.a. `maxwhite`
+ Maxval,
+}
+
+impl Display for PnmHeaderLine {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match self {
+ PnmHeaderLine::Height => "HEIGHT",
+ PnmHeaderLine::Width => "WIDTH",
+ PnmHeaderLine::Depth => "DEPTH",
+ PnmHeaderLine::Maxval => "MAXVAL",
+ })
+ }
+}
+
+/// Single-value lines in a PNM header
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
+enum ErrorDataSource {
+ /// One of the header lines
+ Line(PnmHeaderLine),
+ /// Value in the preamble
+ Preamble,
+ /// Sample/pixel data
+ Sample,
+}
+
+impl Display for ErrorDataSource {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ ErrorDataSource::Line(l) => l.fmt(f),
+ ErrorDataSource::Preamble => f.write_str("number in preamble"),
+ ErrorDataSource::Sample => f.write_str("sample"),
+ }
+ }
+}
+
+/// Dynamic representation, represents all decodable (sample, depth) combinations.
+#[derive(Clone, Copy)]
+enum TupleType {
+ PbmBit,
+ BWBit,
+ GrayU8,
+ GrayU16,
+ RGBU8,
+ RGBU16,
+}
+
+trait Sample {
+ fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize>;
+ fn from_bytes(bytes: &[u8], row_size: usize, output_buf: &mut [u8]) -> ImageResult<()>;
+ fn from_ascii(reader: &mut dyn Read, output_buf: &mut [u8]) -> ImageResult<()>;
+}
+
+struct U8;
+struct U16;
+struct PbmBit;
+struct BWBit;
+
+trait DecodableImageHeader {
+ fn tuple_type(&self) -> ImageResult<TupleType>;
+}
+
+/// PNM decoder
+pub struct PnmDecoder<R> {
+ reader: R,
+ header: PnmHeader,
+ tuple: TupleType,
+}
+
+impl<R: BufRead> PnmDecoder<R> {
+ /// Create a new decoder that decodes from the stream ```read```
+ pub fn new(mut buffered_read: R) -> ImageResult<PnmDecoder<R>> {
+ let magic = buffered_read.read_magic_constant()?;
+
+ let subtype = match magic {
+ [b'P', b'1'] => PnmSubtype::Bitmap(SampleEncoding::Ascii),
+ [b'P', b'2'] => PnmSubtype::Graymap(SampleEncoding::Ascii),
+ [b'P', b'3'] => PnmSubtype::Pixmap(SampleEncoding::Ascii),
+ [b'P', b'4'] => PnmSubtype::Bitmap(SampleEncoding::Binary),
+ [b'P', b'5'] => PnmSubtype::Graymap(SampleEncoding::Binary),
+ [b'P', b'6'] => PnmSubtype::Pixmap(SampleEncoding::Binary),
+ [b'P', b'7'] => PnmSubtype::ArbitraryMap,
+ _ => return Err(DecoderError::PnmMagicInvalid(magic).into()),
+ };
+
+ let decoder = match subtype {
+ PnmSubtype::Bitmap(enc) => PnmDecoder::read_bitmap_header(buffered_read, enc),
+ PnmSubtype::Graymap(enc) => PnmDecoder::read_graymap_header(buffered_read, enc),
+ PnmSubtype::Pixmap(enc) => PnmDecoder::read_pixmap_header(buffered_read, enc),
+ PnmSubtype::ArbitraryMap => PnmDecoder::read_arbitrary_header(buffered_read),
+ }?;
+
+ if utils::check_dimension_overflow(
+ decoder.dimensions().0,
+ decoder.dimensions().1,
+ decoder.color_type().bytes_per_pixel(),
+ ) {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Pnm.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "Image dimensions ({}x{}) are too large",
+ decoder.dimensions().0,
+ decoder.dimensions().1
+ )),
+ ),
+ ));
+ }
+
+ Ok(decoder)
+ }
+
+ /// Extract the reader and header after an image has been read.
+ pub fn into_inner(self) -> (R, PnmHeader) {
+ (self.reader, self.header)
+ }
+
+ fn read_bitmap_header(mut reader: R, encoding: SampleEncoding) -> ImageResult<PnmDecoder<R>> {
+ let header = reader.read_bitmap_header(encoding)?;
+ Ok(PnmDecoder {
+ reader,
+ tuple: TupleType::PbmBit,
+ header: PnmHeader {
+ decoded: HeaderRecord::Bitmap(header),
+ encoded: None,
+ },
+ })
+ }
+
+ fn read_graymap_header(mut reader: R, encoding: SampleEncoding) -> ImageResult<PnmDecoder<R>> {
+ let header = reader.read_graymap_header(encoding)?;
+ let tuple_type = header.tuple_type()?;
+ Ok(PnmDecoder {
+ reader,
+ tuple: tuple_type,
+ header: PnmHeader {
+ decoded: HeaderRecord::Graymap(header),
+ encoded: None,
+ },
+ })
+ }
+
+ fn read_pixmap_header(mut reader: R, encoding: SampleEncoding) -> ImageResult<PnmDecoder<R>> {
+ let header = reader.read_pixmap_header(encoding)?;
+ let tuple_type = header.tuple_type()?;
+ Ok(PnmDecoder {
+ reader,
+ tuple: tuple_type,
+ header: PnmHeader {
+ decoded: HeaderRecord::Pixmap(header),
+ encoded: None,
+ },
+ })
+ }
+
+ fn read_arbitrary_header(mut reader: R) -> ImageResult<PnmDecoder<R>> {
+ let header = reader.read_arbitrary_header()?;
+ let tuple_type = header.tuple_type()?;
+ Ok(PnmDecoder {
+ reader,
+ tuple: tuple_type,
+ header: PnmHeader {
+ decoded: HeaderRecord::Arbitrary(header),
+ encoded: None,
+ },
+ })
+ }
+}
+
+trait HeaderReader: BufRead {
+ /// Reads the two magic constant bytes
+ fn read_magic_constant(&mut self) -> ImageResult<[u8; 2]> {
+ let mut magic: [u8; 2] = [0, 0];
+ self.read_exact(&mut magic)?;
+ Ok(magic)
+ }
+
+ /// Reads a string as well as a single whitespace after it, ignoring comments
+ fn read_next_string(&mut self) -> ImageResult<String> {
+ let mut bytes = Vec::new();
+
+ // pair input bytes with a bool mask to remove comments
+ let mark_comments = self.bytes().scan(true, |partof, read| {
+ let byte = match read {
+ Err(err) => return Some((*partof, Err(err))),
+ Ok(byte) => byte,
+ };
+ let cur_enabled = *partof && byte != b'#';
+ let next_enabled = cur_enabled || (byte == b'\r' || byte == b'\n');
+ *partof = next_enabled;
+ Some((cur_enabled, Ok(byte)))
+ });
+
+ for (_, byte) in mark_comments.filter(|e| e.0) {
+ match byte {
+ Ok(b'\t') | Ok(b'\n') | Ok(b'\x0b') | Ok(b'\x0c') | Ok(b'\r') | Ok(b' ') => {
+ if !bytes.is_empty() {
+ break; // We're done as we already have some content
+ }
+ }
+ Ok(byte) if !byte.is_ascii() => {
+ return Err(DecoderError::NonAsciiByteInHeader(byte).into())
+ }
+ Ok(byte) => {
+ bytes.push(byte);
+ }
+ Err(_) => break,
+ }
+ }
+
+ if bytes.is_empty() {
+ return Err(ImageError::IoError(io::ErrorKind::UnexpectedEof.into()));
+ }
+
+ if !bytes.as_slice().is_ascii() {
+ // We have only filled the buffer with characters for which `byte.is_ascii()` holds.
+ unreachable!("Non-ASCII character should have returned sooner")
+ }
+
+ let string = String::from_utf8(bytes)
+ // We checked the precondition ourselves a few lines before, `bytes.as_slice().is_ascii()`.
+ .unwrap_or_else(|_| unreachable!("Only ASCII characters should be decoded"));
+
+ Ok(string)
+ }
+
+ /// Read the next line
+ fn read_next_line(&mut self) -> ImageResult<String> {
+ let mut buffer = String::new();
+ self.read_line(&mut buffer)?;
+ Ok(buffer)
+ }
+
+ fn read_next_u32(&mut self) -> ImageResult<u32> {
+ let s = self.read_next_string()?;
+ s.parse::<u32>()
+ .map_err(|err| DecoderError::UnparsableValue(ErrorDataSource::Preamble, s, err).into())
+ }
+
+ fn read_bitmap_header(&mut self, encoding: SampleEncoding) -> ImageResult<BitmapHeader> {
+ let width = self.read_next_u32()?;
+ let height = self.read_next_u32()?;
+ Ok(BitmapHeader {
+ encoding,
+ width,
+ height,
+ })
+ }
+
+ fn read_graymap_header(&mut self, encoding: SampleEncoding) -> ImageResult<GraymapHeader> {
+ self.read_pixmap_header(encoding).map(
+ |PixmapHeader {
+ encoding,
+ width,
+ height,
+ maxval,
+ }| GraymapHeader {
+ encoding,
+ width,
+ height,
+ maxwhite: maxval,
+ },
+ )
+ }
+
+ fn read_pixmap_header(&mut self, encoding: SampleEncoding) -> ImageResult<PixmapHeader> {
+ let width = self.read_next_u32()?;
+ let height = self.read_next_u32()?;
+ let maxval = self.read_next_u32()?;
+ Ok(PixmapHeader {
+ encoding,
+ width,
+ height,
+ maxval,
+ })
+ }
+
+ fn read_arbitrary_header(&mut self) -> ImageResult<ArbitraryHeader> {
+ fn parse_single_value_line(
+ line_val: &mut Option<u32>,
+ rest: &str,
+ line: PnmHeaderLine,
+ ) -> ImageResult<()> {
+ if line_val.is_some() {
+ Err(DecoderError::HeaderLineDuplicated(line).into())
+ } else {
+ let v = rest.trim().parse().map_err(|err| {
+ DecoderError::UnparsableValue(ErrorDataSource::Line(line), rest.to_owned(), err)
+ })?;
+ *line_val = Some(v);
+ Ok(())
+ }
+ }
+
+ match self.bytes().next() {
+ None => return Err(ImageError::IoError(io::ErrorKind::UnexpectedEof.into())),
+ Some(Err(io)) => return Err(ImageError::IoError(io)),
+ Some(Ok(b'\n')) => (),
+ Some(Ok(c)) => return Err(DecoderError::NotNewlineAfterP7Magic(c).into()),
+ }
+
+ let mut line = String::new();
+ let mut height: Option<u32> = None;
+ let mut width: Option<u32> = None;
+ let mut depth: Option<u32> = None;
+ let mut maxval: Option<u32> = None;
+ let mut tupltype: Option<String> = None;
+ loop {
+ line.truncate(0);
+ let len = self.read_line(&mut line)?;
+ if len == 0 {
+ return Err(DecoderError::UnexpectedPnmHeaderEnd.into());
+ }
+ if line.as_bytes()[0] == b'#' {
+ continue;
+ }
+ if !line.is_ascii() {
+ return Err(DecoderError::NonAsciiLineInPamHeader.into());
+ }
+ #[allow(deprecated)]
+ let (identifier, rest) = line
+ .trim_left()
+ .split_at(line.find(char::is_whitespace).unwrap_or(line.len()));
+ match identifier {
+ "ENDHDR" => break,
+ "HEIGHT" => parse_single_value_line(&mut height, rest, PnmHeaderLine::Height)?,
+ "WIDTH" => parse_single_value_line(&mut width, rest, PnmHeaderLine::Width)?,
+ "DEPTH" => parse_single_value_line(&mut depth, rest, PnmHeaderLine::Depth)?,
+ "MAXVAL" => parse_single_value_line(&mut maxval, rest, PnmHeaderLine::Maxval)?,
+ "TUPLTYPE" => {
+ let identifier = rest.trim();
+ if tupltype.is_some() {
+ let appended = tupltype.take().map(|mut v| {
+ v.push(' ');
+ v.push_str(identifier);
+ v
+ });
+ tupltype = appended;
+ } else {
+ tupltype = Some(identifier.to_string());
+ }
+ }
+ _ => return Err(DecoderError::HeaderLineUnknown(identifier.to_string()).into()),
+ }
+ }
+
+ let (h, w, d, m) = match (height, width, depth, maxval) {
+ (Some(h), Some(w), Some(d), Some(m)) => (h, w, d, m),
+ _ => {
+ return Err(DecoderError::HeaderLineMissing {
+ height,
+ width,
+ depth,
+ maxval,
+ }
+ .into())
+ }
+ };
+
+ let tupltype = match tupltype {
+ None => None,
+ Some(ref t) if t == "BLACKANDWHITE" => Some(ArbitraryTuplType::BlackAndWhite),
+ Some(ref t) if t == "BLACKANDWHITE_ALPHA" => {
+ Some(ArbitraryTuplType::BlackAndWhiteAlpha)
+ }
+ Some(ref t) if t == "GRAYSCALE" => Some(ArbitraryTuplType::Grayscale),
+ Some(ref t) if t == "GRAYSCALE_ALPHA" => Some(ArbitraryTuplType::GrayscaleAlpha),
+ Some(ref t) if t == "RGB" => Some(ArbitraryTuplType::RGB),
+ Some(ref t) if t == "RGB_ALPHA" => Some(ArbitraryTuplType::RGBAlpha),
+ Some(other) => Some(ArbitraryTuplType::Custom(other)),
+ };
+
+ Ok(ArbitraryHeader {
+ height: h,
+ width: w,
+ depth: d,
+ maxval: m,
+ tupltype,
+ })
+ }
+}
+
+impl<R> HeaderReader for R where R: BufRead {}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct PnmReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for PnmReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for PnmDecoder<R> {
+ type Reader = PnmReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.header.width(), self.header.height())
+ }
+
+ fn color_type(&self) -> ColorType {
+ match self.tuple {
+ TupleType::PbmBit => ColorType::L8,
+ TupleType::BWBit => ColorType::L8,
+ TupleType::GrayU8 => ColorType::L8,
+ TupleType::GrayU16 => ColorType::L16,
+ TupleType::RGBU8 => ColorType::Rgb8,
+ TupleType::RGBU16 => ColorType::Rgb16,
+ }
+ }
+
+ fn original_color_type(&self) -> ExtendedColorType {
+ match self.tuple {
+ TupleType::PbmBit => ExtendedColorType::L1,
+ TupleType::BWBit => ExtendedColorType::L1,
+ TupleType::GrayU8 => ExtendedColorType::L8,
+ TupleType::GrayU16 => ExtendedColorType::L16,
+ TupleType::RGBU8 => ExtendedColorType::Rgb8,
+ TupleType::RGBU16 => ExtendedColorType::Rgb16,
+ }
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(PnmReader(
+ Cursor::new(image::decoder_to_vec(self)?),
+ PhantomData,
+ ))
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ match self.tuple {
+ TupleType::PbmBit => self.read_samples::<PbmBit>(1, buf),
+ TupleType::BWBit => self.read_samples::<BWBit>(1, buf),
+ TupleType::RGBU8 => self.read_samples::<U8>(3, buf),
+ TupleType::RGBU16 => self.read_samples::<U16>(3, buf),
+ TupleType::GrayU8 => self.read_samples::<U8>(1, buf),
+ TupleType::GrayU16 => self.read_samples::<U16>(1, buf),
+ }
+ }
+}
+
+impl<R: Read> PnmDecoder<R> {
+ fn read_samples<S: Sample>(&mut self, components: u32, buf: &mut [u8]) -> ImageResult<()> {
+ match self.subtype().sample_encoding() {
+ SampleEncoding::Binary => {
+ let width = self.header.width();
+ let height = self.header.height();
+ let bytecount = S::bytelen(width, height, components)?;
+
+ let mut bytes = vec![];
+ self.reader
+ .by_ref()
+ // This conversion is potentially lossy but unlikely and in that case we error
+ // later anyways.
+ .take(bytecount as u64)
+ .read_to_end(&mut bytes)?;
+ if bytes.len() != bytecount {
+ return Err(DecoderError::InputTooShort.into());
+ }
+
+ let width: usize = width.try_into().map_err(|_| DecoderError::Overflow)?;
+ let components: usize =
+ components.try_into().map_err(|_| DecoderError::Overflow)?;
+ let row_size = width
+ .checked_mul(components)
+ .ok_or(DecoderError::Overflow)?;
+
+ S::from_bytes(&bytes, row_size, buf)
+ }
+ SampleEncoding::Ascii => self.read_ascii::<S>(buf),
+ }
+ }
+
+ fn read_ascii<Basic: Sample>(&mut self, output_buf: &mut [u8]) -> ImageResult<()> {
+ Basic::from_ascii(&mut self.reader, output_buf)
+ }
+
+ /// Get the pnm subtype, depending on the magic constant contained in the header
+ pub fn subtype(&self) -> PnmSubtype {
+ self.header.subtype()
+ }
+}
+
+fn read_separated_ascii<T: FromStr<Err = ParseIntError>>(reader: &mut dyn Read) -> ImageResult<T>
+where
+ T::Err: Display,
+{
+ let is_separator = |v: &u8| matches! { *v, b'\t' | b'\n' | b'\x0b' | b'\x0c' | b'\r' | b' ' };
+
+ let token = reader
+ .bytes()
+ .skip_while(|v| v.as_ref().ok().map(is_separator).unwrap_or(false))
+ .take_while(|v| v.as_ref().ok().map(|c| !is_separator(c)).unwrap_or(false))
+ .collect::<Result<Vec<u8>, _>>()?;
+
+ if !token.is_ascii() {
+ return Err(DecoderError::NonAsciiSample.into());
+ }
+
+ let string = str::from_utf8(&token)
+ // We checked the precondition ourselves a few lines before with `token.is_ascii()`.
+ .unwrap_or_else(|_| unreachable!("Only ASCII characters should be decoded"));
+
+ string.parse().map_err(|err| {
+ DecoderError::UnparsableValue(ErrorDataSource::Sample, string.to_owned(), err).into()
+ })
+}
+
+impl Sample for U8 {
+ fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize> {
+ Ok((width * height * samples) as usize)
+ }
+
+ fn from_bytes(bytes: &[u8], _row_size: usize, output_buf: &mut [u8]) -> ImageResult<()> {
+ output_buf.copy_from_slice(bytes);
+ Ok(())
+ }
+
+ fn from_ascii(reader: &mut dyn Read, output_buf: &mut [u8]) -> ImageResult<()> {
+ for b in output_buf {
+ *b = read_separated_ascii(reader)?;
+ }
+ Ok(())
+ }
+}
+
+impl Sample for U16 {
+ fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize> {
+ Ok((width * height * samples * 2) as usize)
+ }
+
+ fn from_bytes(bytes: &[u8], _row_size: usize, output_buf: &mut [u8]) -> ImageResult<()> {
+ output_buf.copy_from_slice(bytes);
+ for chunk in output_buf.chunks_exact_mut(2) {
+ let v = BigEndian::read_u16(chunk);
+ NativeEndian::write_u16(chunk, v);
+ }
+ Ok(())
+ }
+
+ fn from_ascii(reader: &mut dyn Read, output_buf: &mut [u8]) -> ImageResult<()> {
+ for chunk in output_buf.chunks_exact_mut(2) {
+ let v = read_separated_ascii::<u16>(reader)?;
+ NativeEndian::write_u16(chunk, v);
+ }
+ Ok(())
+ }
+}
+
+// The image is encoded in rows of bits, high order bits first. Any bits beyond the row bits should
+// be ignored. Also, contrary to rgb, black pixels are encoded as a 1 while white is 0. This will
+// need to be reversed for the grayscale output.
+impl Sample for PbmBit {
+ fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize> {
+ let count = width * samples;
+ let linelen = (count / 8) + ((count % 8) != 0) as u32;
+ Ok((linelen * height) as usize)
+ }
+
+ fn from_bytes(bytes: &[u8], row_size: usize, output_buf: &mut [u8]) -> ImageResult<()> {
+ let mut expanded = utils::expand_bits(1, row_size.try_into().unwrap(), bytes);
+ for b in expanded.iter_mut() {
+ *b = !*b;
+ }
+ output_buf.copy_from_slice(&expanded);
+ Ok(())
+ }
+
+ fn from_ascii(reader: &mut dyn Read, output_buf: &mut [u8]) -> ImageResult<()> {
+ let mut bytes = reader.bytes();
+ for b in output_buf {
+ loop {
+ let byte = bytes
+ .next()
+ .ok_or_else::<ImageError, _>(|| DecoderError::InputTooShort.into())??;
+ match byte {
+ b'\t' | b'\n' | b'\x0b' | b'\x0c' | b'\r' | b' ' => continue,
+ b'0' => *b = 255,
+ b'1' => *b = 0,
+ c => return Err(DecoderError::UnexpectedByteInRaster(c).into()),
+ }
+ break;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+// Encoded just like a normal U8 but we check the values.
+impl Sample for BWBit {
+ fn bytelen(width: u32, height: u32, samples: u32) -> ImageResult<usize> {
+ U8::bytelen(width, height, samples)
+ }
+
+ fn from_bytes(bytes: &[u8], row_size: usize, output_buf: &mut [u8]) -> ImageResult<()> {
+ U8::from_bytes(bytes, row_size, output_buf)?;
+ if let Some(val) = output_buf.iter().find(|&val| *val > 1) {
+ return Err(DecoderError::SampleOutOfBounds(*val).into());
+ }
+ Ok(())
+ }
+
+ fn from_ascii(_reader: &mut dyn Read, _output_buf: &mut [u8]) -> ImageResult<()> {
+ unreachable!("BW bits from anymaps are never encoded as ASCII")
+ }
+}
+
+impl DecodableImageHeader for BitmapHeader {
+ fn tuple_type(&self) -> ImageResult<TupleType> {
+ Ok(TupleType::PbmBit)
+ }
+}
+
+impl DecodableImageHeader for GraymapHeader {
+ fn tuple_type(&self) -> ImageResult<TupleType> {
+ match self.maxwhite {
+ v if v <= 0xFF => Ok(TupleType::GrayU8),
+ v if v <= 0xFFFF => Ok(TupleType::GrayU16),
+ _ => Err(DecoderError::MaxvalTooBig(self.maxwhite).into()),
+ }
+ }
+}
+
+impl DecodableImageHeader for PixmapHeader {
+ fn tuple_type(&self) -> ImageResult<TupleType> {
+ match self.maxval {
+ v if v <= 0xFF => Ok(TupleType::RGBU8),
+ v if v <= 0xFFFF => Ok(TupleType::RGBU16),
+ _ => Err(DecoderError::MaxvalTooBig(self.maxval).into()),
+ }
+ }
+}
+
+impl DecodableImageHeader for ArbitraryHeader {
+ fn tuple_type(&self) -> ImageResult<TupleType> {
+ match self.tupltype {
+ None if self.depth == 1 => Ok(TupleType::GrayU8),
+ None if self.depth == 2 => Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Pnm.into(),
+ UnsupportedErrorKind::Color(ExtendedColorType::La8),
+ ),
+ )),
+ None if self.depth == 3 => Ok(TupleType::RGBU8),
+ None if self.depth == 4 => Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Pnm.into(),
+ UnsupportedErrorKind::Color(ExtendedColorType::Rgba8),
+ ),
+ )),
+
+ Some(ArbitraryTuplType::BlackAndWhite) if self.maxval == 1 && self.depth == 1 => {
+ Ok(TupleType::BWBit)
+ }
+ Some(ArbitraryTuplType::BlackAndWhite) => Err(DecoderError::InvalidDepthOrMaxval {
+ tuple_type: ArbitraryTuplType::BlackAndWhite,
+ maxval: self.maxval,
+ depth: self.depth,
+ }
+ .into()),
+
+ Some(ArbitraryTuplType::Grayscale) if self.depth == 1 && self.maxval <= 0xFF => {
+ Ok(TupleType::GrayU8)
+ }
+ Some(ArbitraryTuplType::Grayscale) if self.depth <= 1 && self.maxval <= 0xFFFF => {
+ Ok(TupleType::GrayU16)
+ }
+ Some(ArbitraryTuplType::Grayscale) => Err(DecoderError::InvalidDepthOrMaxval {
+ tuple_type: ArbitraryTuplType::Grayscale,
+ maxval: self.maxval,
+ depth: self.depth,
+ }
+ .into()),
+
+ Some(ArbitraryTuplType::RGB) if self.depth == 3 && self.maxval <= 0xFF => {
+ Ok(TupleType::RGBU8)
+ }
+ Some(ArbitraryTuplType::RGB) if self.depth == 3 && self.maxval <= 0xFFFF => {
+ Ok(TupleType::RGBU16)
+ }
+ Some(ArbitraryTuplType::RGB) => Err(DecoderError::InvalidDepth {
+ tuple_type: ArbitraryTuplType::RGB,
+ depth: self.depth,
+ }
+ .into()),
+
+ Some(ArbitraryTuplType::BlackAndWhiteAlpha) => Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Pnm.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "Color type {}",
+ ArbitraryTuplType::BlackAndWhiteAlpha.name()
+ )),
+ ),
+ )),
+ Some(ArbitraryTuplType::GrayscaleAlpha) => Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Pnm.into(),
+ UnsupportedErrorKind::Color(ExtendedColorType::La8),
+ ),
+ )),
+ Some(ArbitraryTuplType::RGBAlpha) => Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Pnm.into(),
+ UnsupportedErrorKind::Color(ExtendedColorType::Rgba8),
+ ),
+ )),
+ Some(ArbitraryTuplType::Custom(ref custom)) => Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Pnm.into(),
+ UnsupportedErrorKind::GenericFeature(format!("Tuple type {:?}", custom)),
+ ),
+ )),
+ None => Err(DecoderError::TupleTypeUnrecognised.into()),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ /// Tests reading of a valid blackandwhite pam
+ #[test]
+ fn pam_blackandwhite() {
+ let pamdata = b"P7
+WIDTH 4
+HEIGHT 4
+DEPTH 1
+MAXVAL 1
+TUPLTYPE BLACKANDWHITE
+# Comment line
+ENDHDR
+\x01\x00\x00\x01\x01\x00\x00\x01\x01\x00\x00\x01\x01\x00\x00\x01";
+ let decoder = PnmDecoder::new(&pamdata[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.original_color_type(), ExtendedColorType::L1);
+ assert_eq!(decoder.dimensions(), (4, 4));
+ assert_eq!(decoder.subtype(), PnmSubtype::ArbitraryMap);
+
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(
+ image,
+ vec![
+ 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, 0x00,
+ 0x00, 0x01
+ ]
+ );
+ match PnmDecoder::new(&pamdata[..]).unwrap().into_inner() {
+ (
+ _,
+ PnmHeader {
+ decoded:
+ HeaderRecord::Arbitrary(ArbitraryHeader {
+ width: 4,
+ height: 4,
+ maxval: 1,
+ depth: 1,
+ tupltype: Some(ArbitraryTuplType::BlackAndWhite),
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ /// Tests reading of a valid grayscale pam
+ #[test]
+ fn pam_grayscale() {
+ let pamdata = b"P7
+WIDTH 4
+HEIGHT 4
+DEPTH 1
+MAXVAL 255
+TUPLTYPE GRAYSCALE
+# Comment line
+ENDHDR
+\xde\xad\xbe\xef\xde\xad\xbe\xef\xde\xad\xbe\xef\xde\xad\xbe\xef";
+ let decoder = PnmDecoder::new(&pamdata[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.dimensions(), (4, 4));
+ assert_eq!(decoder.subtype(), PnmSubtype::ArbitraryMap);
+
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(
+ image,
+ vec![
+ 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad,
+ 0xbe, 0xef
+ ]
+ );
+ match PnmDecoder::new(&pamdata[..]).unwrap().into_inner() {
+ (
+ _,
+ PnmHeader {
+ decoded:
+ HeaderRecord::Arbitrary(ArbitraryHeader {
+ width: 4,
+ height: 4,
+ depth: 1,
+ maxval: 255,
+ tupltype: Some(ArbitraryTuplType::Grayscale),
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ /// Tests reading of a valid rgb pam
+ #[test]
+ fn pam_rgb() {
+ let pamdata = b"P7
+# Comment line
+MAXVAL 255
+TUPLTYPE RGB
+DEPTH 3
+WIDTH 2
+HEIGHT 2
+ENDHDR
+\xde\xad\xbe\xef\xde\xad\xbe\xef\xde\xad\xbe\xef";
+ let decoder = PnmDecoder::new(&pamdata[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::Rgb8);
+ assert_eq!(decoder.dimensions(), (2, 2));
+ assert_eq!(decoder.subtype(), PnmSubtype::ArbitraryMap);
+
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(
+ image,
+ vec![0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef]
+ );
+ match PnmDecoder::new(&pamdata[..]).unwrap().into_inner() {
+ (
+ _,
+ PnmHeader {
+ decoded:
+ HeaderRecord::Arbitrary(ArbitraryHeader {
+ maxval: 255,
+ tupltype: Some(ArbitraryTuplType::RGB),
+ depth: 3,
+ width: 2,
+ height: 2,
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ #[test]
+ fn pbm_binary() {
+ // The data contains two rows of the image (each line is padded to the full byte). For
+ // comments on its format, see documentation of `impl SampleType for PbmBit`.
+ let pbmbinary = [&b"P4 6 2\n"[..], &[0b01101100 as u8, 0b10110111]].concat();
+ let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.original_color_type(), ExtendedColorType::L1);
+ assert_eq!(decoder.dimensions(), (6, 2));
+ assert_eq!(
+ decoder.subtype(),
+ PnmSubtype::Bitmap(SampleEncoding::Binary)
+ );
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(image, vec![255, 0, 0, 255, 0, 0, 0, 255, 0, 0, 255, 0]);
+ match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() {
+ (
+ _,
+ PnmHeader {
+ decoded:
+ HeaderRecord::Bitmap(BitmapHeader {
+ encoding: SampleEncoding::Binary,
+ width: 6,
+ height: 2,
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ /// A previous infinite loop.
+ #[test]
+ fn pbm_binary_ascii_termination() {
+ use std::io::{BufReader, Cursor, Error, ErrorKind, Read, Result};
+ struct FailRead(Cursor<&'static [u8]>);
+
+ impl Read for FailRead {
+ fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
+ match self.0.read(buf) {
+ Ok(n) if n > 0 => Ok(n),
+ _ => Err(Error::new(
+ ErrorKind::BrokenPipe,
+ "Simulated broken pipe error",
+ )),
+ }
+ }
+ }
+
+ let pbmbinary = BufReader::new(FailRead(Cursor::new(b"P1 1 1\n")));
+
+ let decoder = PnmDecoder::new(pbmbinary).unwrap();
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder
+ .read_image(&mut image)
+ .expect_err("Image is malformed");
+ }
+
+ #[test]
+ fn pbm_ascii() {
+ // The data contains two rows of the image (each line is padded to the full byte). For
+ // comments on its format, see documentation of `impl SampleType for PbmBit`. Tests all
+ // whitespace characters that should be allowed (the 6 characters according to POSIX).
+ let pbmbinary = b"P1 6 2\n 0 1 1 0 1 1\n1 0 1 1 0\t\n\x0b\x0c\r1";
+ let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.original_color_type(), ExtendedColorType::L1);
+ assert_eq!(decoder.dimensions(), (6, 2));
+ assert_eq!(decoder.subtype(), PnmSubtype::Bitmap(SampleEncoding::Ascii));
+
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(image, vec![255, 0, 0, 255, 0, 0, 0, 255, 0, 0, 255, 0]);
+ match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() {
+ (
+ _,
+ PnmHeader {
+ decoded:
+ HeaderRecord::Bitmap(BitmapHeader {
+ encoding: SampleEncoding::Ascii,
+ width: 6,
+ height: 2,
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ #[test]
+ fn pbm_ascii_nospace() {
+ // The data contains two rows of the image (each line is padded to the full byte). Notably,
+ // it is completely within specification for the ascii data not to contain separating
+ // whitespace for the pbm format or any mix.
+ let pbmbinary = b"P1 6 2\n011011101101";
+ let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.original_color_type(), ExtendedColorType::L1);
+ assert_eq!(decoder.dimensions(), (6, 2));
+ assert_eq!(decoder.subtype(), PnmSubtype::Bitmap(SampleEncoding::Ascii));
+
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(image, vec![255, 0, 0, 255, 0, 0, 0, 255, 0, 0, 255, 0]);
+ match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() {
+ (
+ _,
+ PnmHeader {
+ decoded:
+ HeaderRecord::Bitmap(BitmapHeader {
+ encoding: SampleEncoding::Ascii,
+ width: 6,
+ height: 2,
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ #[test]
+ fn pgm_binary() {
+ // The data contains two rows of the image (each line is padded to the full byte). For
+ // comments on its format, see documentation of `impl SampleType for PbmBit`.
+ let elements = (0..16).collect::<Vec<_>>();
+ let pbmbinary = [&b"P5 4 4 255\n"[..], &elements].concat();
+ let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.dimensions(), (4, 4));
+ assert_eq!(
+ decoder.subtype(),
+ PnmSubtype::Graymap(SampleEncoding::Binary)
+ );
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(image, elements);
+ match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() {
+ (
+ _,
+ PnmHeader {
+ decoded:
+ HeaderRecord::Graymap(GraymapHeader {
+ encoding: SampleEncoding::Binary,
+ width: 4,
+ height: 4,
+ maxwhite: 255,
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ #[test]
+ fn pgm_ascii() {
+ // The data contains two rows of the image (each line is padded to the full byte). For
+ // comments on its format, see documentation of `impl SampleType for PbmBit`.
+ let pbmbinary = b"P2 4 4 255\n 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15";
+ let decoder = PnmDecoder::new(&pbmbinary[..]).unwrap();
+ assert_eq!(decoder.color_type(), ColorType::L8);
+ assert_eq!(decoder.dimensions(), (4, 4));
+ assert_eq!(
+ decoder.subtype(),
+ PnmSubtype::Graymap(SampleEncoding::Ascii)
+ );
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut image).unwrap();
+ assert_eq!(image, (0..16).collect::<Vec<_>>());
+ match PnmDecoder::new(&pbmbinary[..]).unwrap().into_inner() {
+ (
+ _,
+ PnmHeader {
+ decoded:
+ HeaderRecord::Graymap(GraymapHeader {
+ encoding: SampleEncoding::Ascii,
+ width: 4,
+ height: 4,
+ maxwhite: 255,
+ }),
+ encoded: _,
+ },
+ ) => (),
+ _ => panic!("Decoded header is incorrect"),
+ }
+ }
+
+ #[test]
+ fn dimension_overflow() {
+ let pamdata = b"P7
+# Comment line
+MAXVAL 255
+TUPLTYPE RGB
+DEPTH 3
+WIDTH 4294967295
+HEIGHT 4294967295
+ENDHDR
+\xde\xad\xbe\xef\xde\xad\xbe\xef\xde\xad\xbe\xef";
+
+ assert!(PnmDecoder::new(&pamdata[..]).is_err());
+ }
+
+ #[test]
+ fn issue_1508() {
+ let _ = crate::load_from_memory(b"P391919 16999 1 1 9 919 16999 1 9999 999* 99999 N");
+ }
+
+ #[test]
+ fn issue_1616_overflow() {
+ let data = vec![
+ 80, 54, 10, 52, 50, 57, 52, 56, 50, 57, 52, 56, 35, 56, 10, 52, 10, 48, 10, 12, 12, 56,
+ ];
+ // Validate: we have a header. Note: we might already calculate that this will fail but
+ // then we could not return information about the header to the caller.
+ let decoder = PnmDecoder::new(&data[..]).unwrap();
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ let _ = decoder.read_image(&mut image);
+ }
+}
diff --git a/vendor/image/src/codecs/pnm/encoder.rs b/vendor/image/src/codecs/pnm/encoder.rs
new file mode 100644
index 0000000..9f823d0
--- /dev/null
+++ b/vendor/image/src/codecs/pnm/encoder.rs
@@ -0,0 +1,673 @@
+//! Encoding of PNM Images
+use std::fmt;
+use std::io;
+
+use std::io::Write;
+
+use super::AutoBreak;
+use super::{ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader, PixmapHeader};
+use super::{HeaderRecord, PnmHeader, PnmSubtype, SampleEncoding};
+use crate::color::{ColorType, ExtendedColorType};
+use crate::error::{
+ ImageError, ImageResult, ParameterError, ParameterErrorKind, UnsupportedError,
+ UnsupportedErrorKind,
+};
+use crate::image::{ImageEncoder, ImageFormat};
+
+use byteorder::{BigEndian, WriteBytesExt};
+
+enum HeaderStrategy {
+ Dynamic,
+ Subtype(PnmSubtype),
+ Chosen(PnmHeader),
+}
+
+#[derive(Clone, Copy)]
+pub enum FlatSamples<'a> {
+ U8(&'a [u8]),
+ U16(&'a [u16]),
+}
+
+/// Encodes images to any of the `pnm` image formats.
+pub struct PnmEncoder<W: Write> {
+ writer: W,
+ header: HeaderStrategy,
+}
+
+/// Encapsulate the checking system in the type system. Non of the fields are actually accessed
+/// but requiring them forces us to validly construct the struct anyways.
+struct CheckedImageBuffer<'a> {
+ _image: FlatSamples<'a>,
+ _width: u32,
+ _height: u32,
+ _color: ExtendedColorType,
+}
+
+// Check the header against the buffer. Each struct produces the next after a check.
+struct UncheckedHeader<'a> {
+ header: &'a PnmHeader,
+}
+
+struct CheckedDimensions<'a> {
+ unchecked: UncheckedHeader<'a>,
+ width: u32,
+ height: u32,
+}
+
+struct CheckedHeaderColor<'a> {
+ dimensions: CheckedDimensions<'a>,
+ color: ExtendedColorType,
+}
+
+struct CheckedHeader<'a> {
+ color: CheckedHeaderColor<'a>,
+ encoding: TupleEncoding<'a>,
+ _image: CheckedImageBuffer<'a>,
+}
+
+enum TupleEncoding<'a> {
+ PbmBits {
+ samples: FlatSamples<'a>,
+ width: u32,
+ },
+ Ascii {
+ samples: FlatSamples<'a>,
+ },
+ Bytes {
+ samples: FlatSamples<'a>,
+ },
+}
+
+impl<W: Write> PnmEncoder<W> {
+ /// Create new PnmEncoder from the `writer`.
+ ///
+ /// The encoded images will have some `pnm` format. If more control over the image type is
+ /// required, use either one of `with_subtype` or `with_header`. For more information on the
+ /// behaviour, see `with_dynamic_header`.
+ pub fn new(writer: W) -> Self {
+ PnmEncoder {
+ writer,
+ header: HeaderStrategy::Dynamic,
+ }
+ }
+
+ /// Encode a specific pnm subtype image.
+ ///
+ /// The magic number and encoding type will be chosen as provided while the rest of the header
+ /// data will be generated dynamically. Trying to encode incompatible images (e.g. encoding an
+ /// RGB image as Graymap) will result in an error.
+ ///
+ /// This will overwrite the effect of earlier calls to `with_header` and `with_dynamic_header`.
+ pub fn with_subtype(self, subtype: PnmSubtype) -> Self {
+ PnmEncoder {
+ writer: self.writer,
+ header: HeaderStrategy::Subtype(subtype),
+ }
+ }
+
+ /// Enforce the use of a chosen header.
+ ///
+ /// While this option gives the most control over the actual written data, the encoding process
+ /// will error in case the header data and image parameters do not agree. It is the users
+ /// obligation to ensure that the width and height are set accordingly, for example.
+ ///
+ /// Choose this option if you want a lossless decoding/encoding round trip.
+ ///
+ /// This will overwrite the effect of earlier calls to `with_subtype` and `with_dynamic_header`.
+ pub fn with_header(self, header: PnmHeader) -> Self {
+ PnmEncoder {
+ writer: self.writer,
+ header: HeaderStrategy::Chosen(header),
+ }
+ }
+
+ /// Create the header dynamically for each image.
+ ///
+ /// This is the default option upon creation of the encoder. With this, most images should be
+ /// encodable but the specific format chosen is out of the users control. The pnm subtype is
+ /// chosen arbitrarily by the library.
+ ///
+ /// This will overwrite the effect of earlier calls to `with_subtype` and `with_header`.
+ pub fn with_dynamic_header(self) -> Self {
+ PnmEncoder {
+ writer: self.writer,
+ header: HeaderStrategy::Dynamic,
+ }
+ }
+
+ /// Encode an image whose samples are represented as `u8`.
+ ///
+ /// Some `pnm` subtypes are incompatible with some color options, a chosen header most
+ /// certainly with any deviation from the original decoded image.
+ pub fn encode<'s, S>(
+ &mut self,
+ image: S,
+ width: u32,
+ height: u32,
+ color: ColorType,
+ ) -> ImageResult<()>
+ where
+ S: Into<FlatSamples<'s>>,
+ {
+ let image = image.into();
+ match self.header {
+ HeaderStrategy::Dynamic => {
+ self.write_dynamic_header(image, width, height, color.into())
+ }
+ HeaderStrategy::Subtype(subtype) => {
+ self.write_subtyped_header(subtype, image, width, height, color.into())
+ }
+ HeaderStrategy::Chosen(ref header) => Self::write_with_header(
+ &mut self.writer,
+ header,
+ image,
+ width,
+ height,
+ color.into(),
+ ),
+ }
+ }
+
+ /// Choose any valid pnm format that the image can be expressed in and write its header.
+ ///
+ /// Returns how the body should be written if successful.
+ fn write_dynamic_header(
+ &mut self,
+ image: FlatSamples,
+ width: u32,
+ height: u32,
+ color: ExtendedColorType,
+ ) -> ImageResult<()> {
+ let depth = u32::from(color.channel_count());
+ let (maxval, tupltype) = match color {
+ ExtendedColorType::L1 => (1, ArbitraryTuplType::BlackAndWhite),
+ ExtendedColorType::L8 => (0xff, ArbitraryTuplType::Grayscale),
+ ExtendedColorType::L16 => (0xffff, ArbitraryTuplType::Grayscale),
+ ExtendedColorType::La1 => (1, ArbitraryTuplType::BlackAndWhiteAlpha),
+ ExtendedColorType::La8 => (0xff, ArbitraryTuplType::GrayscaleAlpha),
+ ExtendedColorType::La16 => (0xffff, ArbitraryTuplType::GrayscaleAlpha),
+ ExtendedColorType::Rgb8 => (0xff, ArbitraryTuplType::RGB),
+ ExtendedColorType::Rgb16 => (0xffff, ArbitraryTuplType::RGB),
+ ExtendedColorType::Rgba8 => (0xff, ArbitraryTuplType::RGBAlpha),
+ ExtendedColorType::Rgba16 => (0xffff, ArbitraryTuplType::RGBAlpha),
+ _ => {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Pnm.into(),
+ UnsupportedErrorKind::Color(color),
+ ),
+ ))
+ }
+ };
+
+ let header = PnmHeader {
+ decoded: HeaderRecord::Arbitrary(ArbitraryHeader {
+ width,
+ height,
+ depth,
+ maxval,
+ tupltype: Some(tupltype),
+ }),
+ encoded: None,
+ };
+
+ Self::write_with_header(&mut self.writer, &header, image, width, height, color)
+ }
+
+ /// Try to encode the image with the chosen format, give its corresponding pixel encoding type.
+ fn write_subtyped_header(
+ &mut self,
+ subtype: PnmSubtype,
+ image: FlatSamples,
+ width: u32,
+ height: u32,
+ color: ExtendedColorType,
+ ) -> ImageResult<()> {
+ let header = match (subtype, color) {
+ (PnmSubtype::ArbitraryMap, color) => {
+ return self.write_dynamic_header(image, width, height, color)
+ }
+ (PnmSubtype::Pixmap(encoding), ExtendedColorType::Rgb8) => PnmHeader {
+ decoded: HeaderRecord::Pixmap(PixmapHeader {
+ encoding,
+ width,
+ height,
+ maxval: 255,
+ }),
+ encoded: None,
+ },
+ (PnmSubtype::Graymap(encoding), ExtendedColorType::L8) => PnmHeader {
+ decoded: HeaderRecord::Graymap(GraymapHeader {
+ encoding,
+ width,
+ height,
+ maxwhite: 255,
+ }),
+ encoded: None,
+ },
+ (PnmSubtype::Bitmap(encoding), ExtendedColorType::L8)
+ | (PnmSubtype::Bitmap(encoding), ExtendedColorType::L1) => PnmHeader {
+ decoded: HeaderRecord::Bitmap(BitmapHeader {
+ encoding,
+ width,
+ height,
+ }),
+ encoded: None,
+ },
+ (_, _) => {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(
+ "Color type can not be represented in the chosen format".to_owned(),
+ ),
+ )));
+ }
+ };
+
+ Self::write_with_header(&mut self.writer, &header, image, width, height, color)
+ }
+
+ /// Try to encode the image with the chosen header, checking if values are correct.
+ ///
+ /// Returns how the body should be written if successful.
+ fn write_with_header(
+ writer: &mut dyn Write,
+ header: &PnmHeader,
+ image: FlatSamples,
+ width: u32,
+ height: u32,
+ color: ExtendedColorType,
+ ) -> ImageResult<()> {
+ let unchecked = UncheckedHeader { header };
+
+ unchecked
+ .check_header_dimensions(width, height)?
+ .check_header_color(color)?
+ .check_sample_values(image)?
+ .write_header(writer)?
+ .write_image(writer)
+ }
+}
+
+impl<W: Write> ImageEncoder for PnmEncoder<W> {
+ fn write_image(
+ mut self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ self.encode(buf, width, height, color_type)
+ }
+}
+
+impl<'a> CheckedImageBuffer<'a> {
+ fn check(
+ image: FlatSamples<'a>,
+ width: u32,
+ height: u32,
+ color: ExtendedColorType,
+ ) -> ImageResult<CheckedImageBuffer<'a>> {
+ let components = color.channel_count() as usize;
+ let uwidth = width as usize;
+ let uheight = height as usize;
+ let expected_len = components
+ .checked_mul(uwidth)
+ .and_then(|v| v.checked_mul(uheight));
+ if Some(image.len()) != expected_len {
+ // Image buffer does not correspond to size and colour.
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+ Ok(CheckedImageBuffer {
+ _image: image,
+ _width: width,
+ _height: height,
+ _color: color,
+ })
+ }
+}
+
+impl<'a> UncheckedHeader<'a> {
+ fn check_header_dimensions(
+ self,
+ width: u32,
+ height: u32,
+ ) -> ImageResult<CheckedDimensions<'a>> {
+ if self.header.width() != width || self.header.height() != height {
+ // Chosen header does not match Image dimensions.
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+
+ Ok(CheckedDimensions {
+ unchecked: self,
+ width,
+ height,
+ })
+ }
+}
+
+impl<'a> CheckedDimensions<'a> {
+ // Check color compatibility with the header. This will only error when we are certain that
+ // the combination is bogus (e.g. combining Pixmap and Palette) but allows uncertain
+ // combinations (basically a ArbitraryTuplType::Custom with any color of fitting depth).
+ fn check_header_color(self, color: ExtendedColorType) -> ImageResult<CheckedHeaderColor<'a>> {
+ let components = u32::from(color.channel_count());
+
+ match *self.unchecked.header {
+ PnmHeader {
+ decoded: HeaderRecord::Bitmap(_),
+ ..
+ } => match color {
+ ExtendedColorType::L1 | ExtendedColorType::L8 | ExtendedColorType::L16 => (),
+ _ => {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(
+ "PBM format only support luma color types".to_owned(),
+ ),
+ )))
+ }
+ },
+ PnmHeader {
+ decoded: HeaderRecord::Graymap(_),
+ ..
+ } => match color {
+ ExtendedColorType::L1 | ExtendedColorType::L8 | ExtendedColorType::L16 => (),
+ _ => {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(
+ "PGM format only support luma color types".to_owned(),
+ ),
+ )))
+ }
+ },
+ PnmHeader {
+ decoded: HeaderRecord::Pixmap(_),
+ ..
+ } => match color {
+ ExtendedColorType::Rgb8 => (),
+ _ => {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(
+ "PPM format only support ExtendedColorType::Rgb8".to_owned(),
+ ),
+ )))
+ }
+ },
+ PnmHeader {
+ decoded:
+ HeaderRecord::Arbitrary(ArbitraryHeader {
+ depth,
+ ref tupltype,
+ ..
+ }),
+ ..
+ } => match (tupltype, color) {
+ (&Some(ArbitraryTuplType::BlackAndWhite), ExtendedColorType::L1) => (),
+ (&Some(ArbitraryTuplType::BlackAndWhiteAlpha), ExtendedColorType::La8) => (),
+
+ (&Some(ArbitraryTuplType::Grayscale), ExtendedColorType::L1) => (),
+ (&Some(ArbitraryTuplType::Grayscale), ExtendedColorType::L8) => (),
+ (&Some(ArbitraryTuplType::Grayscale), ExtendedColorType::L16) => (),
+ (&Some(ArbitraryTuplType::GrayscaleAlpha), ExtendedColorType::La8) => (),
+
+ (&Some(ArbitraryTuplType::RGB), ExtendedColorType::Rgb8) => (),
+ (&Some(ArbitraryTuplType::RGBAlpha), ExtendedColorType::Rgba8) => (),
+
+ (&None, _) if depth == components => (),
+ (&Some(ArbitraryTuplType::Custom(_)), _) if depth == components => (),
+ _ if depth != components => {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(format!(
+ "Depth mismatch: header {} vs. color {}",
+ depth, components
+ )),
+ )))
+ }
+ _ => {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(
+ "Invalid color type for selected PAM color type".to_owned(),
+ ),
+ )))
+ }
+ },
+ }
+
+ Ok(CheckedHeaderColor {
+ dimensions: self,
+ color,
+ })
+ }
+}
+
+impl<'a> CheckedHeaderColor<'a> {
+ fn check_sample_values(self, image: FlatSamples<'a>) -> ImageResult<CheckedHeader<'a>> {
+ let header_maxval = match self.dimensions.unchecked.header.decoded {
+ HeaderRecord::Bitmap(_) => 1,
+ HeaderRecord::Graymap(GraymapHeader { maxwhite, .. }) => maxwhite,
+ HeaderRecord::Pixmap(PixmapHeader { maxval, .. }) => maxval,
+ HeaderRecord::Arbitrary(ArbitraryHeader { maxval, .. }) => maxval,
+ };
+
+ // We trust the image color bit count to be correct at least.
+ let max_sample = match self.color {
+ ExtendedColorType::Unknown(n) if n <= 16 => (1 << n) - 1,
+ ExtendedColorType::L1 => 1,
+ ExtendedColorType::L8
+ | ExtendedColorType::La8
+ | ExtendedColorType::Rgb8
+ | ExtendedColorType::Rgba8
+ | ExtendedColorType::Bgr8
+ | ExtendedColorType::Bgra8 => 0xff,
+ ExtendedColorType::L16
+ | ExtendedColorType::La16
+ | ExtendedColorType::Rgb16
+ | ExtendedColorType::Rgba16 => 0xffff,
+ _ => {
+ // Unsupported target color type.
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Pnm.into(),
+ UnsupportedErrorKind::Color(self.color),
+ ),
+ ));
+ }
+ };
+
+ // Avoid the performance heavy check if possible, e.g. if the header has been chosen by us.
+ if header_maxval < max_sample && !image.all_smaller(header_maxval) {
+ // Sample value greater than allowed for chosen header.
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Pnm.into(),
+ UnsupportedErrorKind::GenericFeature(
+ "Sample value greater than allowed for chosen header".to_owned(),
+ ),
+ ),
+ ));
+ }
+
+ let encoding = image.encoding_for(&self.dimensions.unchecked.header.decoded);
+
+ let image = CheckedImageBuffer::check(
+ image,
+ self.dimensions.width,
+ self.dimensions.height,
+ self.color,
+ )?;
+
+ Ok(CheckedHeader {
+ color: self,
+ encoding,
+ _image: image,
+ })
+ }
+}
+
+impl<'a> CheckedHeader<'a> {
+ fn write_header(self, writer: &mut dyn Write) -> ImageResult<TupleEncoding<'a>> {
+ self.header().write(writer)?;
+ Ok(self.encoding)
+ }
+
+ fn header(&self) -> &PnmHeader {
+ self.color.dimensions.unchecked.header
+ }
+}
+
+struct SampleWriter<'a>(&'a mut dyn Write);
+
+impl<'a> SampleWriter<'a> {
+ fn write_samples_ascii<V>(self, samples: V) -> io::Result<()>
+ where
+ V: Iterator,
+ V::Item: fmt::Display,
+ {
+ let mut auto_break_writer = AutoBreak::new(self.0, 70);
+ for value in samples {
+ write!(auto_break_writer, "{} ", value)?;
+ }
+ auto_break_writer.flush()
+ }
+
+ fn write_pbm_bits<V>(self, samples: &[V], width: u32) -> io::Result<()>
+ /* Default gives 0 for all primitives. TODO: replace this with `Zeroable` once it hits stable */
+ where
+ V: Default + Eq + Copy,
+ {
+ // The length of an encoded scanline
+ let line_width = (width - 1) / 8 + 1;
+
+ // We'll be writing single bytes, so buffer
+ let mut line_buffer = Vec::with_capacity(line_width as usize);
+
+ for line in samples.chunks(width as usize) {
+ for byte_bits in line.chunks(8) {
+ let mut byte = 0u8;
+ for i in 0..8 {
+ // Black pixels are encoded as 1s
+ if let Some(&v) = byte_bits.get(i) {
+ if v == V::default() {
+ byte |= 1u8 << (7 - i)
+ }
+ }
+ }
+ line_buffer.push(byte)
+ }
+ self.0.write_all(line_buffer.as_slice())?;
+ line_buffer.clear();
+ }
+
+ self.0.flush()
+ }
+}
+
+impl<'a> FlatSamples<'a> {
+ fn len(&self) -> usize {
+ match *self {
+ FlatSamples::U8(arr) => arr.len(),
+ FlatSamples::U16(arr) => arr.len(),
+ }
+ }
+
+ fn all_smaller(&self, max_val: u32) -> bool {
+ match *self {
+ FlatSamples::U8(arr) => arr.iter().any(|&val| u32::from(val) > max_val),
+ FlatSamples::U16(arr) => arr.iter().any(|&val| u32::from(val) > max_val),
+ }
+ }
+
+ fn encoding_for(&self, header: &HeaderRecord) -> TupleEncoding<'a> {
+ match *header {
+ HeaderRecord::Bitmap(BitmapHeader {
+ encoding: SampleEncoding::Binary,
+ width,
+ ..
+ }) => TupleEncoding::PbmBits {
+ samples: *self,
+ width,
+ },
+
+ HeaderRecord::Bitmap(BitmapHeader {
+ encoding: SampleEncoding::Ascii,
+ ..
+ }) => TupleEncoding::Ascii { samples: *self },
+
+ HeaderRecord::Arbitrary(_) => TupleEncoding::Bytes { samples: *self },
+
+ HeaderRecord::Graymap(GraymapHeader {
+ encoding: SampleEncoding::Ascii,
+ ..
+ })
+ | HeaderRecord::Pixmap(PixmapHeader {
+ encoding: SampleEncoding::Ascii,
+ ..
+ }) => TupleEncoding::Ascii { samples: *self },
+
+ HeaderRecord::Graymap(GraymapHeader {
+ encoding: SampleEncoding::Binary,
+ ..
+ })
+ | HeaderRecord::Pixmap(PixmapHeader {
+ encoding: SampleEncoding::Binary,
+ ..
+ }) => TupleEncoding::Bytes { samples: *self },
+ }
+ }
+}
+
+impl<'a> From<&'a [u8]> for FlatSamples<'a> {
+ fn from(samples: &'a [u8]) -> Self {
+ FlatSamples::U8(samples)
+ }
+}
+
+impl<'a> From<&'a [u16]> for FlatSamples<'a> {
+ fn from(samples: &'a [u16]) -> Self {
+ FlatSamples::U16(samples)
+ }
+}
+
+impl<'a> TupleEncoding<'a> {
+ fn write_image(&self, writer: &mut dyn Write) -> ImageResult<()> {
+ match *self {
+ TupleEncoding::PbmBits {
+ samples: FlatSamples::U8(samples),
+ width,
+ } => SampleWriter(writer)
+ .write_pbm_bits(samples, width)
+ .map_err(ImageError::IoError),
+ TupleEncoding::PbmBits {
+ samples: FlatSamples::U16(samples),
+ width,
+ } => SampleWriter(writer)
+ .write_pbm_bits(samples, width)
+ .map_err(ImageError::IoError),
+
+ TupleEncoding::Bytes {
+ samples: FlatSamples::U8(samples),
+ } => writer.write_all(samples).map_err(ImageError::IoError),
+ TupleEncoding::Bytes {
+ samples: FlatSamples::U16(samples),
+ } => samples.iter().try_for_each(|&sample| {
+ writer
+ .write_u16::<BigEndian>(sample)
+ .map_err(ImageError::IoError)
+ }),
+
+ TupleEncoding::Ascii {
+ samples: FlatSamples::U8(samples),
+ } => SampleWriter(writer)
+ .write_samples_ascii(samples.iter())
+ .map_err(ImageError::IoError),
+ TupleEncoding::Ascii {
+ samples: FlatSamples::U16(samples),
+ } => SampleWriter(writer)
+ .write_samples_ascii(samples.iter())
+ .map_err(ImageError::IoError),
+ }
+ }
+}
diff --git a/vendor/image/src/codecs/pnm/header.rs b/vendor/image/src/codecs/pnm/header.rs
new file mode 100644
index 0000000..443a701
--- /dev/null
+++ b/vendor/image/src/codecs/pnm/header.rs
@@ -0,0 +1,354 @@
+use std::{fmt, io};
+
+/// The kind of encoding used to store sample values
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum SampleEncoding {
+ /// Samples are unsigned binary integers in big endian
+ Binary,
+
+ /// Samples are encoded as decimal ascii strings separated by whitespace
+ Ascii,
+}
+
+/// Denotes the category of the magic number
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum PnmSubtype {
+ /// Magic numbers P1 and P4
+ Bitmap(SampleEncoding),
+
+ /// Magic numbers P2 and P5
+ Graymap(SampleEncoding),
+
+ /// Magic numbers P3 and P6
+ Pixmap(SampleEncoding),
+
+ /// Magic number P7
+ ArbitraryMap,
+}
+
+/// Stores the complete header data of a file.
+///
+/// Internally, provides mechanisms for lossless reencoding. After reading a file with the decoder
+/// it is possible to recover the header and construct an encoder. Using the encoder on the just
+/// loaded image should result in a byte copy of the original file (for single image pnms without
+/// additional trailing data).
+pub struct PnmHeader {
+ pub(crate) decoded: HeaderRecord,
+ pub(crate) encoded: Option<Vec<u8>>,
+}
+
+pub(crate) enum HeaderRecord {
+ Bitmap(BitmapHeader),
+ Graymap(GraymapHeader),
+ Pixmap(PixmapHeader),
+ Arbitrary(ArbitraryHeader),
+}
+
+/// Header produced by a `pbm` file ("Portable Bit Map")
+#[derive(Clone, Copy, Debug)]
+pub struct BitmapHeader {
+ /// Binary or Ascii encoded file
+ pub encoding: SampleEncoding,
+
+ /// Height of the image file
+ pub height: u32,
+
+ /// Width of the image file
+ pub width: u32,
+}
+
+/// Header produced by a `pgm` file ("Portable Gray Map")
+#[derive(Clone, Copy, Debug)]
+pub struct GraymapHeader {
+ /// Binary or Ascii encoded file
+ pub encoding: SampleEncoding,
+
+ /// Height of the image file
+ pub height: u32,
+
+ /// Width of the image file
+ pub width: u32,
+
+ /// Maximum sample value within the image
+ pub maxwhite: u32,
+}
+
+/// Header produced by a `ppm` file ("Portable Pixel Map")
+#[derive(Clone, Copy, Debug)]
+pub struct PixmapHeader {
+ /// Binary or Ascii encoded file
+ pub encoding: SampleEncoding,
+
+ /// Height of the image file
+ pub height: u32,
+
+ /// Width of the image file
+ pub width: u32,
+
+ /// Maximum sample value within the image
+ pub maxval: u32,
+}
+
+/// Header produced by a `pam` file ("Portable Arbitrary Map")
+#[derive(Clone, Debug)]
+pub struct ArbitraryHeader {
+ /// Height of the image file
+ pub height: u32,
+
+ /// Width of the image file
+ pub width: u32,
+
+ /// Number of color channels
+ pub depth: u32,
+
+ /// Maximum sample value within the image
+ pub maxval: u32,
+
+ /// Color interpretation of image pixels
+ pub tupltype: Option<ArbitraryTuplType>,
+}
+
+/// Standardized tuple type specifiers in the header of a `pam`.
+#[derive(Clone, Debug)]
+pub enum ArbitraryTuplType {
+ /// Pixels are either black (0) or white (1)
+ BlackAndWhite,
+
+ /// Pixels are either black (0) or white (1) and a second alpha channel
+ BlackAndWhiteAlpha,
+
+ /// Pixels represent the amount of white
+ Grayscale,
+
+ /// Grayscale with an additional alpha channel
+ GrayscaleAlpha,
+
+ /// Three channels: Red, Green, Blue
+ RGB,
+
+ /// Four channels: Red, Green, Blue, Alpha
+ RGBAlpha,
+
+ /// An image format which is not standardized
+ Custom(String),
+}
+
+impl ArbitraryTuplType {
+ pub(crate) fn name(&self) -> &str {
+ match self {
+ ArbitraryTuplType::BlackAndWhite => "BLACKANDWHITE",
+ ArbitraryTuplType::BlackAndWhiteAlpha => "BLACKANDWHITE_ALPHA",
+ ArbitraryTuplType::Grayscale => "GRAYSCALE",
+ ArbitraryTuplType::GrayscaleAlpha => "GRAYSCALE_ALPHA",
+ ArbitraryTuplType::RGB => "RGB",
+ ArbitraryTuplType::RGBAlpha => "RGB_ALPHA",
+ ArbitraryTuplType::Custom(custom) => custom,
+ }
+ }
+}
+
+impl PnmSubtype {
+ /// Get the two magic constant bytes corresponding to this format subtype.
+ pub fn magic_constant(self) -> &'static [u8; 2] {
+ match self {
+ PnmSubtype::Bitmap(SampleEncoding::Ascii) => b"P1",
+ PnmSubtype::Graymap(SampleEncoding::Ascii) => b"P2",
+ PnmSubtype::Pixmap(SampleEncoding::Ascii) => b"P3",
+ PnmSubtype::Bitmap(SampleEncoding::Binary) => b"P4",
+ PnmSubtype::Graymap(SampleEncoding::Binary) => b"P5",
+ PnmSubtype::Pixmap(SampleEncoding::Binary) => b"P6",
+ PnmSubtype::ArbitraryMap => b"P7",
+ }
+ }
+
+ /// Whether samples are stored as binary or as decimal ascii
+ pub fn sample_encoding(self) -> SampleEncoding {
+ match self {
+ PnmSubtype::ArbitraryMap => SampleEncoding::Binary,
+ PnmSubtype::Bitmap(enc) => enc,
+ PnmSubtype::Graymap(enc) => enc,
+ PnmSubtype::Pixmap(enc) => enc,
+ }
+ }
+}
+
+impl PnmHeader {
+ /// Retrieve the format subtype from which the header was created.
+ pub fn subtype(&self) -> PnmSubtype {
+ match self.decoded {
+ HeaderRecord::Bitmap(BitmapHeader { encoding, .. }) => PnmSubtype::Bitmap(encoding),
+ HeaderRecord::Graymap(GraymapHeader { encoding, .. }) => PnmSubtype::Graymap(encoding),
+ HeaderRecord::Pixmap(PixmapHeader { encoding, .. }) => PnmSubtype::Pixmap(encoding),
+ HeaderRecord::Arbitrary(ArbitraryHeader { .. }) => PnmSubtype::ArbitraryMap,
+ }
+ }
+
+ /// The width of the image this header is for.
+ pub fn width(&self) -> u32 {
+ match self.decoded {
+ HeaderRecord::Bitmap(BitmapHeader { width, .. }) => width,
+ HeaderRecord::Graymap(GraymapHeader { width, .. }) => width,
+ HeaderRecord::Pixmap(PixmapHeader { width, .. }) => width,
+ HeaderRecord::Arbitrary(ArbitraryHeader { width, .. }) => width,
+ }
+ }
+
+ /// The height of the image this header is for.
+ pub fn height(&self) -> u32 {
+ match self.decoded {
+ HeaderRecord::Bitmap(BitmapHeader { height, .. }) => height,
+ HeaderRecord::Graymap(GraymapHeader { height, .. }) => height,
+ HeaderRecord::Pixmap(PixmapHeader { height, .. }) => height,
+ HeaderRecord::Arbitrary(ArbitraryHeader { height, .. }) => height,
+ }
+ }
+
+ /// The biggest value a sample can have. In other words, the colour resolution.
+ pub fn maximal_sample(&self) -> u32 {
+ match self.decoded {
+ HeaderRecord::Bitmap(BitmapHeader { .. }) => 1,
+ HeaderRecord::Graymap(GraymapHeader { maxwhite, .. }) => maxwhite,
+ HeaderRecord::Pixmap(PixmapHeader { maxval, .. }) => maxval,
+ HeaderRecord::Arbitrary(ArbitraryHeader { maxval, .. }) => maxval,
+ }
+ }
+
+ /// Retrieve the underlying bitmap header if any
+ pub fn as_bitmap(&self) -> Option<&BitmapHeader> {
+ match self.decoded {
+ HeaderRecord::Bitmap(ref bitmap) => Some(bitmap),
+ _ => None,
+ }
+ }
+
+ /// Retrieve the underlying graymap header if any
+ pub fn as_graymap(&self) -> Option<&GraymapHeader> {
+ match self.decoded {
+ HeaderRecord::Graymap(ref graymap) => Some(graymap),
+ _ => None,
+ }
+ }
+
+ /// Retrieve the underlying pixmap header if any
+ pub fn as_pixmap(&self) -> Option<&PixmapHeader> {
+ match self.decoded {
+ HeaderRecord::Pixmap(ref pixmap) => Some(pixmap),
+ _ => None,
+ }
+ }
+
+ /// Retrieve the underlying arbitrary header if any
+ pub fn as_arbitrary(&self) -> Option<&ArbitraryHeader> {
+ match self.decoded {
+ HeaderRecord::Arbitrary(ref arbitrary) => Some(arbitrary),
+ _ => None,
+ }
+ }
+
+ /// Write the header back into a binary stream
+ pub fn write(&self, writer: &mut dyn io::Write) -> io::Result<()> {
+ writer.write_all(self.subtype().magic_constant())?;
+ match *self {
+ PnmHeader {
+ encoded: Some(ref content),
+ ..
+ } => writer.write_all(content),
+ PnmHeader {
+ decoded:
+ HeaderRecord::Bitmap(BitmapHeader {
+ encoding: _encoding,
+ width,
+ height,
+ }),
+ ..
+ } => writeln!(writer, "\n{} {}", width, height),
+ PnmHeader {
+ decoded:
+ HeaderRecord::Graymap(GraymapHeader {
+ encoding: _encoding,
+ width,
+ height,
+ maxwhite,
+ }),
+ ..
+ } => writeln!(writer, "\n{} {} {}", width, height, maxwhite),
+ PnmHeader {
+ decoded:
+ HeaderRecord::Pixmap(PixmapHeader {
+ encoding: _encoding,
+ width,
+ height,
+ maxval,
+ }),
+ ..
+ } => writeln!(writer, "\n{} {} {}", width, height, maxval),
+ PnmHeader {
+ decoded:
+ HeaderRecord::Arbitrary(ArbitraryHeader {
+ width,
+ height,
+ depth,
+ maxval,
+ ref tupltype,
+ }),
+ ..
+ } => {
+ struct TupltypeWriter<'a>(&'a Option<ArbitraryTuplType>);
+ impl<'a> fmt::Display for TupltypeWriter<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.0 {
+ Some(tt) => writeln!(f, "TUPLTYPE {}", tt.name()),
+ None => Ok(()),
+ }
+ }
+ }
+
+ writeln!(
+ writer,
+ "\nWIDTH {}\nHEIGHT {}\nDEPTH {}\nMAXVAL {}\n{}ENDHDR",
+ width,
+ height,
+ depth,
+ maxval,
+ TupltypeWriter(tupltype)
+ )
+ }
+ }
+ }
+}
+
+impl From<BitmapHeader> for PnmHeader {
+ fn from(header: BitmapHeader) -> Self {
+ PnmHeader {
+ decoded: HeaderRecord::Bitmap(header),
+ encoded: None,
+ }
+ }
+}
+
+impl From<GraymapHeader> for PnmHeader {
+ fn from(header: GraymapHeader) -> Self {
+ PnmHeader {
+ decoded: HeaderRecord::Graymap(header),
+ encoded: None,
+ }
+ }
+}
+
+impl From<PixmapHeader> for PnmHeader {
+ fn from(header: PixmapHeader) -> Self {
+ PnmHeader {
+ decoded: HeaderRecord::Pixmap(header),
+ encoded: None,
+ }
+ }
+}
+
+impl From<ArbitraryHeader> for PnmHeader {
+ fn from(header: ArbitraryHeader) -> Self {
+ PnmHeader {
+ decoded: HeaderRecord::Arbitrary(header),
+ encoded: None,
+ }
+ }
+}
diff --git a/vendor/image/src/codecs/pnm/mod.rs b/vendor/image/src/codecs/pnm/mod.rs
new file mode 100644
index 0000000..de8612d
--- /dev/null
+++ b/vendor/image/src/codecs/pnm/mod.rs
@@ -0,0 +1,184 @@
+//! Decoding of netpbm image formats (pbm, pgm, ppm and pam).
+//!
+//! The formats pbm, pgm and ppm are fully supported. The pam decoder recognizes the tuple types
+//! `BLACKANDWHITE`, `GRAYSCALE` and `RGB` and explicitly recognizes but rejects their `_ALPHA`
+//! variants for now as alpha color types are unsupported.
+use self::autobreak::AutoBreak;
+pub use self::decoder::PnmDecoder;
+pub use self::encoder::PnmEncoder;
+use self::header::HeaderRecord;
+pub use self::header::{
+ ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader, PixmapHeader,
+};
+pub use self::header::{PnmHeader, PnmSubtype, SampleEncoding};
+
+mod autobreak;
+mod decoder;
+mod encoder;
+mod header;
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::color::ColorType;
+ use crate::image::ImageDecoder;
+ use byteorder::{ByteOrder, NativeEndian};
+
+ fn execute_roundtrip_default(buffer: &[u8], width: u32, height: u32, color: ColorType) {
+ let mut encoded_buffer = Vec::new();
+
+ {
+ let mut encoder = PnmEncoder::new(&mut encoded_buffer);
+ encoder
+ .encode(buffer, width, height, color)
+ .expect("Failed to encode the image buffer");
+ }
+
+ let (header, loaded_color, loaded_image) = {
+ let decoder = PnmDecoder::new(&encoded_buffer[..]).unwrap();
+ let color_type = decoder.color_type();
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder
+ .read_image(&mut image)
+ .expect("Failed to decode the image");
+ let (_, header) = PnmDecoder::new(&encoded_buffer[..]).unwrap().into_inner();
+ (header, color_type, image)
+ };
+
+ assert_eq!(header.width(), width);
+ assert_eq!(header.height(), height);
+ assert_eq!(loaded_color, color);
+ assert_eq!(loaded_image.as_slice(), buffer);
+ }
+
+ fn execute_roundtrip_with_subtype(
+ buffer: &[u8],
+ width: u32,
+ height: u32,
+ color: ColorType,
+ subtype: PnmSubtype,
+ ) {
+ let mut encoded_buffer = Vec::new();
+
+ {
+ let mut encoder = PnmEncoder::new(&mut encoded_buffer).with_subtype(subtype);
+ encoder
+ .encode(buffer, width, height, color)
+ .expect("Failed to encode the image buffer");
+ }
+
+ let (header, loaded_color, loaded_image) = {
+ let decoder = PnmDecoder::new(&encoded_buffer[..]).unwrap();
+ let color_type = decoder.color_type();
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder
+ .read_image(&mut image)
+ .expect("Failed to decode the image");
+ let (_, header) = PnmDecoder::new(&encoded_buffer[..]).unwrap().into_inner();
+ (header, color_type, image)
+ };
+
+ assert_eq!(header.width(), width);
+ assert_eq!(header.height(), height);
+ assert_eq!(header.subtype(), subtype);
+ assert_eq!(loaded_color, color);
+ assert_eq!(loaded_image.as_slice(), buffer);
+ }
+
+ fn execute_roundtrip_u16(buffer: &[u16], width: u32, height: u32, color: ColorType) {
+ let mut encoded_buffer = Vec::new();
+
+ {
+ let mut encoder = PnmEncoder::new(&mut encoded_buffer);
+ encoder
+ .encode(buffer, width, height, color)
+ .expect("Failed to encode the image buffer");
+ }
+
+ let (header, loaded_color, loaded_image) = {
+ let decoder = PnmDecoder::new(&encoded_buffer[..]).unwrap();
+ let color_type = decoder.color_type();
+ let mut image = vec![0; decoder.total_bytes() as usize];
+ decoder
+ .read_image(&mut image)
+ .expect("Failed to decode the image");
+ let (_, header) = PnmDecoder::new(&encoded_buffer[..]).unwrap().into_inner();
+ (header, color_type, image)
+ };
+
+ let mut buffer_u8 = vec![0; buffer.len() * 2];
+ NativeEndian::write_u16_into(buffer, &mut buffer_u8[..]);
+
+ assert_eq!(header.width(), width);
+ assert_eq!(header.height(), height);
+ assert_eq!(loaded_color, color);
+ assert_eq!(loaded_image, buffer_u8);
+ }
+
+ #[test]
+ fn roundtrip_gray() {
+ #[rustfmt::skip]
+ let buf: [u8; 16] = [
+ 0, 0, 0, 255,
+ 255, 255, 255, 255,
+ 255, 0, 255, 0,
+ 255, 0, 0, 0,
+ ];
+
+ execute_roundtrip_default(&buf, 4, 4, ColorType::L8);
+ execute_roundtrip_with_subtype(&buf, 4, 4, ColorType::L8, PnmSubtype::ArbitraryMap);
+ execute_roundtrip_with_subtype(
+ &buf,
+ 4,
+ 4,
+ ColorType::L8,
+ PnmSubtype::Graymap(SampleEncoding::Ascii),
+ );
+ execute_roundtrip_with_subtype(
+ &buf,
+ 4,
+ 4,
+ ColorType::L8,
+ PnmSubtype::Graymap(SampleEncoding::Binary),
+ );
+ }
+
+ #[test]
+ fn roundtrip_rgb() {
+ #[rustfmt::skip]
+ let buf: [u8; 27] = [
+ 0, 0, 0,
+ 0, 0, 255,
+ 0, 255, 0,
+ 0, 255, 255,
+ 255, 0, 0,
+ 255, 0, 255,
+ 255, 255, 0,
+ 255, 255, 255,
+ 255, 255, 255,
+ ];
+ execute_roundtrip_default(&buf, 3, 3, ColorType::Rgb8);
+ execute_roundtrip_with_subtype(&buf, 3, 3, ColorType::Rgb8, PnmSubtype::ArbitraryMap);
+ execute_roundtrip_with_subtype(
+ &buf,
+ 3,
+ 3,
+ ColorType::Rgb8,
+ PnmSubtype::Pixmap(SampleEncoding::Binary),
+ );
+ execute_roundtrip_with_subtype(
+ &buf,
+ 3,
+ 3,
+ ColorType::Rgb8,
+ PnmSubtype::Pixmap(SampleEncoding::Ascii),
+ );
+ }
+
+ #[test]
+ fn roundtrip_u16() {
+ let buf: [u16; 6] = [0, 1, 0xFFFF, 0x1234, 0x3412, 0xBEAF];
+
+ execute_roundtrip_u16(&buf, 6, 1, ColorType::L16);
+ }
+}
diff --git a/vendor/image/src/codecs/qoi.rs b/vendor/image/src/codecs/qoi.rs
new file mode 100644
index 0000000..214e99b
--- /dev/null
+++ b/vendor/image/src/codecs/qoi.rs
@@ -0,0 +1,104 @@
+//! Decoding and encoding of QOI images
+
+use crate::{
+ error::{DecodingError, EncodingError},
+ ColorType, ImageDecoder, ImageEncoder, ImageError, ImageFormat, ImageResult,
+};
+use std::io::{Cursor, Read, Write};
+
+/// QOI decoder
+pub struct QoiDecoder<R> {
+ decoder: qoi::Decoder<R>,
+}
+
+impl<R> QoiDecoder<R>
+where
+ R: Read,
+{
+ /// Creates a new decoder that decodes from the stream ```reader```
+ pub fn new(reader: R) -> ImageResult<Self> {
+ let decoder = qoi::Decoder::from_stream(reader).map_err(decoding_error)?;
+ Ok(Self { decoder })
+ }
+}
+
+impl<'a, R: Read + 'a> ImageDecoder<'a> for QoiDecoder<R> {
+ type Reader = Cursor<Vec<u8>>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.decoder.header().width, self.decoder.header().height)
+ }
+
+ fn color_type(&self) -> ColorType {
+ match self.decoder.header().channels {
+ qoi::Channels::Rgb => ColorType::Rgb8,
+ qoi::Channels::Rgba => ColorType::Rgba8,
+ }
+ }
+
+ fn into_reader(mut self) -> ImageResult<Self::Reader> {
+ let buffer = self.decoder.decode_to_vec().map_err(decoding_error)?;
+ Ok(Cursor::new(buffer))
+ }
+}
+
+fn decoding_error(error: qoi::Error) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Qoi.into(), error))
+}
+
+fn encoding_error(error: qoi::Error) -> ImageError {
+ ImageError::Encoding(EncodingError::new(ImageFormat::Qoi.into(), error))
+}
+
+/// QOI encoder
+pub struct QoiEncoder<W> {
+ writer: W,
+}
+
+impl<W: Write> QoiEncoder<W> {
+ /// Creates a new encoder that writes its output to ```writer```
+ pub fn new(writer: W) -> Self {
+ Self { writer }
+ }
+}
+
+impl<W: Write> ImageEncoder for QoiEncoder<W> {
+ fn write_image(
+ mut self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ if !matches!(color_type, ColorType::Rgba8 | ColorType::Rgb8) {
+ return Err(ImageError::Encoding(EncodingError::new(
+ ImageFormat::Qoi.into(),
+ format!("unsupported color type {color_type:?}. Supported are Rgba8 and Rgb8."),
+ )));
+ }
+
+ // Encode data in QOI
+ let data = qoi::encode_to_vec(buf, width, height).map_err(encoding_error)?;
+
+ // Write data to buffer
+ self.writer.write_all(&data[..])?;
+ self.writer.flush()?;
+
+ Ok(())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::fs::File;
+
+ #[test]
+ fn decode_test_image() {
+ let decoder = QoiDecoder::new(File::open("tests/images/qoi/basic-test.qoi").unwrap())
+ .expect("Unable to read QOI file");
+
+ assert_eq!((5, 5), decoder.dimensions());
+ assert_eq!(ColorType::Rgba8, decoder.color_type());
+ }
+}
diff --git a/vendor/image/src/codecs/tga/decoder.rs b/vendor/image/src/codecs/tga/decoder.rs
new file mode 100644
index 0000000..16243ce
--- /dev/null
+++ b/vendor/image/src/codecs/tga/decoder.rs
@@ -0,0 +1,502 @@
+use super::header::{Header, ImageType, ALPHA_BIT_MASK, SCREEN_ORIGIN_BIT_MASK};
+use crate::{
+ color::{ColorType, ExtendedColorType},
+ error::{
+ ImageError, ImageResult, LimitError, LimitErrorKind, UnsupportedError, UnsupportedErrorKind,
+ },
+ image::{ImageDecoder, ImageFormat, ImageReadBuffer},
+};
+use byteorder::ReadBytesExt;
+use std::{
+ convert::TryFrom,
+ io::{self, Read, Seek},
+ mem,
+};
+
+struct ColorMap {
+ /// sizes in bytes
+ start_offset: usize,
+ entry_size: usize,
+ bytes: Vec<u8>,
+}
+
+impl ColorMap {
+ pub(crate) fn from_reader(
+ r: &mut dyn Read,
+ start_offset: u16,
+ num_entries: u16,
+ bits_per_entry: u8,
+ ) -> ImageResult<ColorMap> {
+ let bytes_per_entry = (bits_per_entry as usize + 7) / 8;
+
+ let mut bytes = vec![0; bytes_per_entry * num_entries as usize];
+ r.read_exact(&mut bytes)?;
+
+ Ok(ColorMap {
+ entry_size: bytes_per_entry,
+ start_offset: start_offset as usize,
+ bytes,
+ })
+ }
+
+ /// Get one entry from the color map
+ pub(crate) fn get(&self, index: usize) -> Option<&[u8]> {
+ let entry = self.start_offset + self.entry_size * index;
+ self.bytes.get(entry..entry + self.entry_size)
+ }
+}
+
+/// The representation of a TGA decoder
+pub struct TgaDecoder<R> {
+ r: R,
+
+ width: usize,
+ height: usize,
+ bytes_per_pixel: usize,
+ has_loaded_metadata: bool,
+
+ image_type: ImageType,
+ color_type: ColorType,
+ original_color_type: Option<ExtendedColorType>,
+
+ header: Header,
+ color_map: Option<ColorMap>,
+
+ // Used in read_scanline
+ line_read: Option<usize>,
+ line_remain_buff: Vec<u8>,
+}
+
+impl<R: Read + Seek> TgaDecoder<R> {
+ /// Create a new decoder that decodes from the stream `r`
+ pub fn new(r: R) -> ImageResult<TgaDecoder<R>> {
+ let mut decoder = TgaDecoder {
+ r,
+
+ width: 0,
+ height: 0,
+ bytes_per_pixel: 0,
+ has_loaded_metadata: false,
+
+ image_type: ImageType::Unknown,
+ color_type: ColorType::L8,
+ original_color_type: None,
+
+ header: Header::default(),
+ color_map: None,
+
+ line_read: None,
+ line_remain_buff: Vec::new(),
+ };
+ decoder.read_metadata()?;
+ Ok(decoder)
+ }
+
+ fn read_header(&mut self) -> ImageResult<()> {
+ self.header = Header::from_reader(&mut self.r)?;
+ self.image_type = ImageType::new(self.header.image_type);
+ self.width = self.header.image_width as usize;
+ self.height = self.header.image_height as usize;
+ self.bytes_per_pixel = (self.header.pixel_depth as usize + 7) / 8;
+ Ok(())
+ }
+
+ fn read_metadata(&mut self) -> ImageResult<()> {
+ if !self.has_loaded_metadata {
+ self.read_header()?;
+ self.read_image_id()?;
+ self.read_color_map()?;
+ self.read_color_information()?;
+ self.has_loaded_metadata = true;
+ }
+ Ok(())
+ }
+
+ /// Loads the color information for the decoder
+ ///
+ /// To keep things simple, we won't handle bit depths that aren't divisible
+ /// by 8 and are larger than 32.
+ fn read_color_information(&mut self) -> ImageResult<()> {
+ if self.header.pixel_depth % 8 != 0 || self.header.pixel_depth > 32 {
+ // Bit depth must be divisible by 8, and must be less than or equal
+ // to 32.
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Tga.into(),
+ UnsupportedErrorKind::Color(ExtendedColorType::Unknown(
+ self.header.pixel_depth,
+ )),
+ ),
+ ));
+ }
+
+ let num_alpha_bits = self.header.image_desc & ALPHA_BIT_MASK;
+
+ let other_channel_bits = if self.header.map_type != 0 {
+ self.header.map_entry_size
+ } else {
+ if num_alpha_bits > self.header.pixel_depth {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Tga.into(),
+ UnsupportedErrorKind::Color(ExtendedColorType::Unknown(
+ self.header.pixel_depth,
+ )),
+ ),
+ ));
+ }
+
+ self.header.pixel_depth - num_alpha_bits
+ };
+ let color = self.image_type.is_color();
+
+ match (num_alpha_bits, other_channel_bits, color) {
+ // really, the encoding is BGR and BGRA, this is fixed
+ // up with `TgaDecoder::reverse_encoding`.
+ (0, 32, true) => self.color_type = ColorType::Rgba8,
+ (8, 24, true) => self.color_type = ColorType::Rgba8,
+ (0, 24, true) => self.color_type = ColorType::Rgb8,
+ (8, 8, false) => self.color_type = ColorType::La8,
+ (0, 8, false) => self.color_type = ColorType::L8,
+ (8, 0, false) => {
+ // alpha-only image is treated as L8
+ self.color_type = ColorType::L8;
+ self.original_color_type = Some(ExtendedColorType::A8);
+ }
+ _ => {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Tga.into(),
+ UnsupportedErrorKind::Color(ExtendedColorType::Unknown(
+ self.header.pixel_depth,
+ )),
+ ),
+ ))
+ }
+ }
+ Ok(())
+ }
+
+ /// Read the image id field
+ ///
+ /// We're not interested in this field, so this function skips it if it
+ /// is present
+ fn read_image_id(&mut self) -> ImageResult<()> {
+ self.r
+ .seek(io::SeekFrom::Current(i64::from(self.header.id_length)))?;
+ Ok(())
+ }
+
+ fn read_color_map(&mut self) -> ImageResult<()> {
+ if self.header.map_type == 1 {
+ // FIXME: we could reverse the map entries, which avoids having to reverse all pixels
+ // in the final output individually.
+ self.color_map = Some(ColorMap::from_reader(
+ &mut self.r,
+ self.header.map_origin,
+ self.header.map_length,
+ self.header.map_entry_size,
+ )?);
+ }
+ Ok(())
+ }
+
+ /// Expands indices into its mapped color
+ fn expand_color_map(&self, pixel_data: &[u8]) -> io::Result<Vec<u8>> {
+ #[inline]
+ fn bytes_to_index(bytes: &[u8]) -> usize {
+ let mut result = 0usize;
+ for byte in bytes.iter() {
+ result = result << 8 | *byte as usize;
+ }
+ result
+ }
+
+ let bytes_per_entry = (self.header.map_entry_size as usize + 7) / 8;
+ let mut result = Vec::with_capacity(self.width * self.height * bytes_per_entry);
+
+ if self.bytes_per_pixel == 0 {
+ return Err(io::ErrorKind::Other.into());
+ }
+
+ let color_map = self
+ .color_map
+ .as_ref()
+ .ok_or_else(|| io::Error::from(io::ErrorKind::Other))?;
+
+ for chunk in pixel_data.chunks(self.bytes_per_pixel) {
+ let index = bytes_to_index(chunk);
+ if let Some(color) = color_map.get(index) {
+ result.extend_from_slice(color);
+ } else {
+ return Err(io::ErrorKind::Other.into());
+ }
+ }
+
+ Ok(result)
+ }
+
+ /// Reads a run length encoded data for given number of bytes
+ fn read_encoded_data(&mut self, num_bytes: usize) -> io::Result<Vec<u8>> {
+ let mut pixel_data = Vec::with_capacity(num_bytes);
+ let mut repeat_buf = Vec::with_capacity(self.bytes_per_pixel);
+
+ while pixel_data.len() < num_bytes {
+ let run_packet = self.r.read_u8()?;
+ // If the highest bit in `run_packet` is set, then we repeat pixels
+ //
+ // Note: the TGA format adds 1 to both counts because having a count
+ // of 0 would be pointless.
+ if (run_packet & 0x80) != 0 {
+ // high bit set, so we will repeat the data
+ let repeat_count = ((run_packet & !0x80) + 1) as usize;
+ self.r
+ .by_ref()
+ .take(self.bytes_per_pixel as u64)
+ .read_to_end(&mut repeat_buf)?;
+
+ // get the repeating pixels from the bytes of the pixel stored in `repeat_buf`
+ let data = repeat_buf
+ .iter()
+ .cycle()
+ .take(repeat_count * self.bytes_per_pixel);
+ pixel_data.extend(data);
+ repeat_buf.clear();
+ } else {
+ // not set, so `run_packet+1` is the number of non-encoded pixels
+ let num_raw_bytes = (run_packet + 1) as usize * self.bytes_per_pixel;
+ self.r
+ .by_ref()
+ .take(num_raw_bytes as u64)
+ .read_to_end(&mut pixel_data)?;
+ }
+ }
+
+ if pixel_data.len() > num_bytes {
+ // FIXME: the last packet contained more data than we asked for!
+ // This is at least a warning. We truncate the data since some methods rely on the
+ // length to be accurate in the success case.
+ pixel_data.truncate(num_bytes);
+ }
+
+ Ok(pixel_data)
+ }
+
+ /// Reads a run length encoded packet
+ fn read_all_encoded_data(&mut self) -> ImageResult<Vec<u8>> {
+ let num_bytes = self.width * self.height * self.bytes_per_pixel;
+
+ Ok(self.read_encoded_data(num_bytes)?)
+ }
+
+ /// Reads a run length encoded line
+ fn read_encoded_line(&mut self) -> io::Result<Vec<u8>> {
+ let line_num_bytes = self.width * self.bytes_per_pixel;
+ let remain_len = self.line_remain_buff.len();
+
+ if remain_len >= line_num_bytes {
+ // `Vec::split_to` if std had it
+ let bytes = {
+ let bytes_after = self.line_remain_buff.split_off(line_num_bytes);
+ mem::replace(&mut self.line_remain_buff, bytes_after)
+ };
+
+ return Ok(bytes);
+ }
+
+ let num_bytes = line_num_bytes - remain_len;
+
+ let line_data = self.read_encoded_data(num_bytes)?;
+
+ let mut pixel_data = Vec::with_capacity(line_num_bytes);
+ pixel_data.append(&mut self.line_remain_buff);
+ pixel_data.extend_from_slice(&line_data[..num_bytes]);
+
+ // put the remain data to line_remain_buff.
+ // expects `self.line_remain_buff` to be empty from
+ // the above `pixel_data.append` call
+ debug_assert!(self.line_remain_buff.is_empty());
+ self.line_remain_buff
+ .extend_from_slice(&line_data[num_bytes..]);
+
+ Ok(pixel_data)
+ }
+
+ /// Reverse from BGR encoding to RGB encoding
+ ///
+ /// TGA files are stored in the BGRA encoding. This function swaps
+ /// the blue and red bytes in the `pixels` array.
+ fn reverse_encoding_in_output(&mut self, pixels: &mut [u8]) {
+ // We only need to reverse the encoding of color images
+ match self.color_type {
+ ColorType::Rgb8 | ColorType::Rgba8 => {
+ for chunk in pixels.chunks_mut(self.color_type.bytes_per_pixel().into()) {
+ chunk.swap(0, 2);
+ }
+ }
+ _ => {}
+ }
+ }
+
+ /// Flip the image vertically depending on the screen origin bit
+ ///
+ /// The bit in position 5 of the image descriptor byte is the screen origin bit.
+ /// If it's 1, the origin is in the top left corner.
+ /// If it's 0, the origin is in the bottom left corner.
+ /// This function checks the bit, and if it's 0, flips the image vertically.
+ fn flip_vertically(&mut self, pixels: &mut [u8]) {
+ if self.is_flipped_vertically() {
+ if self.height == 0 {
+ return;
+ }
+
+ let num_bytes = pixels.len();
+
+ let width_bytes = num_bytes / self.height;
+
+ // Flip the image vertically.
+ for vertical_index in 0..(self.height / 2) {
+ let vertical_target = (self.height - vertical_index) * width_bytes - width_bytes;
+
+ for horizontal_index in 0..width_bytes {
+ let source = vertical_index * width_bytes + horizontal_index;
+ let target = vertical_target + horizontal_index;
+
+ pixels.swap(target, source);
+ }
+ }
+ }
+ }
+
+ /// Check whether the image is vertically flipped
+ ///
+ /// The bit in position 5 of the image descriptor byte is the screen origin bit.
+ /// If it's 1, the origin is in the top left corner.
+ /// If it's 0, the origin is in the bottom left corner.
+ /// This function checks the bit, and if it's 0, flips the image vertically.
+ fn is_flipped_vertically(&self) -> bool {
+ let screen_origin_bit = SCREEN_ORIGIN_BIT_MASK & self.header.image_desc != 0;
+ !screen_origin_bit
+ }
+
+ fn read_scanline(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ if let Some(line_read) = self.line_read {
+ if line_read == self.height {
+ return Ok(0);
+ }
+ }
+
+ // read the pixels from the data region
+ let mut pixel_data = if self.image_type.is_encoded() {
+ self.read_encoded_line()?
+ } else {
+ let num_raw_bytes = self.width * self.bytes_per_pixel;
+ let mut buf = vec![0; num_raw_bytes];
+ self.r.by_ref().read_exact(&mut buf)?;
+ buf
+ };
+
+ // expand the indices using the color map if necessary
+ if self.image_type.is_color_mapped() {
+ pixel_data = self.expand_color_map(&pixel_data)?;
+ }
+ self.reverse_encoding_in_output(&mut pixel_data);
+
+ // copy to the output buffer
+ buf[..pixel_data.len()].copy_from_slice(&pixel_data);
+
+ self.line_read = Some(self.line_read.unwrap_or(0) + 1);
+
+ Ok(pixel_data.len())
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for TgaDecoder<R> {
+ type Reader = TGAReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.width as u32, self.height as u32)
+ }
+
+ fn color_type(&self) -> ColorType {
+ self.color_type
+ }
+
+ fn original_color_type(&self) -> ExtendedColorType {
+ self.original_color_type
+ .unwrap_or_else(|| self.color_type().into())
+ }
+
+ fn scanline_bytes(&self) -> u64 {
+ // This cannot overflow because TGA has a maximum width of u16::MAX_VALUE and
+ // `bytes_per_pixel` is a u8.
+ u64::from(self.color_type.bytes_per_pixel()) * self.width as u64
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ Ok(TGAReader {
+ buffer: ImageReadBuffer::new(self.scanline_bytes(), self.total_bytes()),
+ decoder: self,
+ })
+ }
+
+ fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ // In indexed images, we might need more bytes than pixels to read them. That's nonsensical
+ // to encode but we'll not want to crash.
+ let mut fallback_buf = vec![];
+ // read the pixels from the data region
+ let rawbuf = if self.image_type.is_encoded() {
+ let pixel_data = self.read_all_encoded_data()?;
+ if self.bytes_per_pixel <= usize::from(self.color_type.bytes_per_pixel()) {
+ buf[..pixel_data.len()].copy_from_slice(&pixel_data);
+ &buf[..pixel_data.len()]
+ } else {
+ fallback_buf = pixel_data;
+ &fallback_buf[..]
+ }
+ } else {
+ let num_raw_bytes = self.width * self.height * self.bytes_per_pixel;
+ if self.bytes_per_pixel <= usize::from(self.color_type.bytes_per_pixel()) {
+ self.r.by_ref().read_exact(&mut buf[..num_raw_bytes])?;
+ &buf[..num_raw_bytes]
+ } else {
+ fallback_buf.resize(num_raw_bytes, 0u8);
+ self.r
+ .by_ref()
+ .read_exact(&mut fallback_buf[..num_raw_bytes])?;
+ &fallback_buf[..num_raw_bytes]
+ }
+ };
+
+ // expand the indices using the color map if necessary
+ if self.image_type.is_color_mapped() {
+ let pixel_data = self.expand_color_map(rawbuf)?;
+ // not enough data to fill the buffer, or would overflow the buffer
+ if pixel_data.len() != buf.len() {
+ return Err(ImageError::Limits(LimitError::from_kind(
+ LimitErrorKind::DimensionError,
+ )));
+ }
+ buf.copy_from_slice(&pixel_data);
+ }
+
+ self.reverse_encoding_in_output(buf);
+
+ self.flip_vertically(buf);
+
+ Ok(())
+ }
+}
+
+pub struct TGAReader<R> {
+ buffer: ImageReadBuffer,
+ decoder: TgaDecoder<R>,
+}
+impl<R: Read + Seek> Read for TGAReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ let decoder = &mut self.decoder;
+ self.buffer.read(buf, |buf| decoder.read_scanline(buf))
+ }
+}
diff --git a/vendor/image/src/codecs/tga/encoder.rs b/vendor/image/src/codecs/tga/encoder.rs
new file mode 100644
index 0000000..cf34984
--- /dev/null
+++ b/vendor/image/src/codecs/tga/encoder.rs
@@ -0,0 +1,215 @@
+use super::header::Header;
+use crate::{error::EncodingError, ColorType, ImageEncoder, ImageError, ImageFormat, ImageResult};
+use std::{convert::TryFrom, error, fmt, io::Write};
+
+/// Errors that can occur during encoding and saving of a TGA image.
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
+enum EncoderError {
+ /// Invalid TGA width.
+ WidthInvalid(u32),
+
+ /// Invalid TGA height.
+ HeightInvalid(u32),
+}
+
+impl fmt::Display for EncoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ EncoderError::WidthInvalid(s) => f.write_fmt(format_args!("Invalid TGA width: {}", s)),
+ EncoderError::HeightInvalid(s) => {
+ f.write_fmt(format_args!("Invalid TGA height: {}", s))
+ }
+ }
+ }
+}
+
+impl From<EncoderError> for ImageError {
+ fn from(e: EncoderError) -> ImageError {
+ ImageError::Encoding(EncodingError::new(ImageFormat::Tga.into(), e))
+ }
+}
+
+impl error::Error for EncoderError {}
+
+/// TGA encoder.
+pub struct TgaEncoder<W: Write> {
+ writer: W,
+}
+
+impl<W: Write> TgaEncoder<W> {
+ /// Create a new encoder that writes its output to ```w```.
+ pub fn new(w: W) -> TgaEncoder<W> {
+ TgaEncoder { writer: w }
+ }
+
+ /// Encodes the image ```buf``` that has dimensions ```width```
+ /// and ```height``` and ```ColorType``` ```color_type```.
+ ///
+ /// The dimensions of the image must be between 0 and 65535 (inclusive) or
+ /// an error will be returned.
+ pub fn encode(
+ mut self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ // Validate dimensions.
+ let width = u16::try_from(width)
+ .map_err(|_| ImageError::from(EncoderError::WidthInvalid(width)))?;
+
+ let height = u16::try_from(height)
+ .map_err(|_| ImageError::from(EncoderError::HeightInvalid(height)))?;
+
+ // Write out TGA header.
+ let header = Header::from_pixel_info(color_type, width, height)?;
+ header.write_to(&mut self.writer)?;
+
+ // Write out Bgr(a)8 or L(a)8 image data.
+ match color_type {
+ ColorType::Rgb8 | ColorType::Rgba8 => {
+ let mut image = Vec::from(buf);
+
+ for chunk in image.chunks_mut(usize::from(color_type.bytes_per_pixel())) {
+ chunk.swap(0, 2);
+ }
+
+ self.writer.write_all(&image)?;
+ }
+ _ => {
+ self.writer.write_all(buf)?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl<W: Write> ImageEncoder for TgaEncoder<W> {
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ self.encode(buf, width, height, color_type)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{EncoderError, TgaEncoder};
+ use crate::{codecs::tga::TgaDecoder, ColorType, ImageDecoder, ImageError};
+ use std::{error::Error, io::Cursor};
+
+ fn round_trip_image(image: &[u8], width: u32, height: u32, c: ColorType) -> Vec<u8> {
+ let mut encoded_data = Vec::new();
+ {
+ let encoder = TgaEncoder::new(&mut encoded_data);
+ encoder
+ .encode(&image, width, height, c)
+ .expect("could not encode image");
+ }
+
+ let decoder = TgaDecoder::new(Cursor::new(&encoded_data)).expect("failed to decode");
+
+ let mut buf = vec![0; decoder.total_bytes() as usize];
+ decoder.read_image(&mut buf).expect("failed to decode");
+ buf
+ }
+
+ #[test]
+ fn test_image_width_too_large() {
+ // TGA cannot encode images larger than 65,535×65,535
+ // create a 65,536×1 8-bit black image buffer
+ let size = usize::from(u16::MAX) + 1;
+ let dimension = size as u32;
+ let img = vec![0u8; size];
+ // Try to encode an image that is too large
+ let mut encoded = Vec::new();
+ let encoder = TgaEncoder::new(&mut encoded);
+ let result = encoder.encode(&img, dimension, 1, ColorType::L8);
+ match result {
+ Err(ImageError::Encoding(err)) => {
+ let err = err
+ .source()
+ .unwrap()
+ .downcast_ref::<EncoderError>()
+ .unwrap();
+ assert_eq!(*err, EncoderError::WidthInvalid(dimension));
+ }
+ other => panic!(
+ "Encoding an image that is too wide should return a InvalidWidth \
+ it returned {:?} instead",
+ other
+ ),
+ }
+ }
+
+ #[test]
+ fn test_image_height_too_large() {
+ // TGA cannot encode images larger than 65,535×65,535
+ // create a 65,536×1 8-bit black image buffer
+ let size = usize::from(u16::MAX) + 1;
+ let dimension = size as u32;
+ let img = vec![0u8; size];
+ // Try to encode an image that is too large
+ let mut encoded = Vec::new();
+ let encoder = TgaEncoder::new(&mut encoded);
+ let result = encoder.encode(&img, 1, dimension, ColorType::L8);
+ match result {
+ Err(ImageError::Encoding(err)) => {
+ let err = err
+ .source()
+ .unwrap()
+ .downcast_ref::<EncoderError>()
+ .unwrap();
+ assert_eq!(*err, EncoderError::HeightInvalid(dimension));
+ }
+ other => panic!(
+ "Encoding an image that is too tall should return a InvalidHeight \
+ it returned {:?} instead",
+ other
+ ),
+ }
+ }
+
+ #[test]
+ fn round_trip_single_pixel_rgb() {
+ let image = [0, 1, 2];
+ let decoded = round_trip_image(&image, 1, 1, ColorType::Rgb8);
+ assert_eq!(decoded.len(), image.len());
+ assert_eq!(decoded.as_slice(), image);
+ }
+
+ #[test]
+ fn round_trip_single_pixel_rgba() {
+ let image = [0, 1, 2, 3];
+ let decoded = round_trip_image(&image, 1, 1, ColorType::Rgba8);
+ assert_eq!(decoded.len(), image.len());
+ assert_eq!(decoded.as_slice(), image);
+ }
+
+ #[test]
+ fn round_trip_gray() {
+ let image = [0, 1, 2];
+ let decoded = round_trip_image(&image, 3, 1, ColorType::L8);
+ assert_eq!(decoded.len(), image.len());
+ assert_eq!(decoded.as_slice(), image);
+ }
+
+ #[test]
+ fn round_trip_graya() {
+ let image = [0, 1, 2, 3, 4, 5];
+ let decoded = round_trip_image(&image, 1, 3, ColorType::La8);
+ assert_eq!(decoded.len(), image.len());
+ assert_eq!(decoded.as_slice(), image);
+ }
+
+ #[test]
+ fn round_trip_3px_rgb() {
+ let image = [0; 3 * 3 * 3]; // 3x3 pixels, 3 bytes per pixel
+ let _decoded = round_trip_image(&image, 3, 3, ColorType::Rgb8);
+ }
+}
diff --git a/vendor/image/src/codecs/tga/header.rs b/vendor/image/src/codecs/tga/header.rs
new file mode 100644
index 0000000..83ba7a3
--- /dev/null
+++ b/vendor/image/src/codecs/tga/header.rs
@@ -0,0 +1,150 @@
+use crate::{
+ error::{UnsupportedError, UnsupportedErrorKind},
+ ColorType, ImageError, ImageFormat, ImageResult,
+};
+use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
+use std::io::{Read, Write};
+
+pub(crate) const ALPHA_BIT_MASK: u8 = 0b1111;
+pub(crate) const SCREEN_ORIGIN_BIT_MASK: u8 = 0b10_0000;
+
+pub(crate) enum ImageType {
+ NoImageData = 0,
+ /// Uncompressed images.
+ RawColorMap = 1,
+ RawTrueColor = 2,
+ RawGrayScale = 3,
+ /// Run length encoded images.
+ RunColorMap = 9,
+ RunTrueColor = 10,
+ RunGrayScale = 11,
+ Unknown,
+}
+
+impl ImageType {
+ /// Create a new image type from a u8.
+ pub(crate) fn new(img_type: u8) -> ImageType {
+ match img_type {
+ 0 => ImageType::NoImageData,
+
+ 1 => ImageType::RawColorMap,
+ 2 => ImageType::RawTrueColor,
+ 3 => ImageType::RawGrayScale,
+
+ 9 => ImageType::RunColorMap,
+ 10 => ImageType::RunTrueColor,
+ 11 => ImageType::RunGrayScale,
+
+ _ => ImageType::Unknown,
+ }
+ }
+
+ /// Check if the image format uses colors as opposed to gray scale.
+ pub(crate) fn is_color(&self) -> bool {
+ matches! { *self,
+ ImageType::RawColorMap
+ | ImageType::RawTrueColor
+ | ImageType::RunTrueColor
+ | ImageType::RunColorMap
+ }
+ }
+
+ /// Does the image use a color map.
+ pub(crate) fn is_color_mapped(&self) -> bool {
+ matches! { *self, ImageType::RawColorMap | ImageType::RunColorMap }
+ }
+
+ /// Is the image run length encoded.
+ pub(crate) fn is_encoded(&self) -> bool {
+ matches! {*self, ImageType::RunColorMap | ImageType::RunTrueColor | ImageType::RunGrayScale }
+ }
+}
+
+/// Header used by TGA image files.
+#[derive(Debug, Default)]
+pub(crate) struct Header {
+ pub(crate) id_length: u8, // length of ID string
+ pub(crate) map_type: u8, // color map type
+ pub(crate) image_type: u8, // image type code
+ pub(crate) map_origin: u16, // starting index of map
+ pub(crate) map_length: u16, // length of map
+ pub(crate) map_entry_size: u8, // size of map entries in bits
+ pub(crate) x_origin: u16, // x-origin of image
+ pub(crate) y_origin: u16, // y-origin of image
+ pub(crate) image_width: u16, // width of image
+ pub(crate) image_height: u16, // height of image
+ pub(crate) pixel_depth: u8, // bits per pixel
+ pub(crate) image_desc: u8, // image descriptor
+}
+
+impl Header {
+ /// Load the header with values from pixel information.
+ pub(crate) fn from_pixel_info(
+ color_type: ColorType,
+ width: u16,
+ height: u16,
+ ) -> ImageResult<Self> {
+ let mut header = Self::default();
+
+ if width > 0 && height > 0 {
+ let (num_alpha_bits, other_channel_bits, image_type) = match color_type {
+ ColorType::Rgba8 => (8, 24, ImageType::RawTrueColor),
+ ColorType::Rgb8 => (0, 24, ImageType::RawTrueColor),
+ ColorType::La8 => (8, 8, ImageType::RawGrayScale),
+ ColorType::L8 => (0, 8, ImageType::RawGrayScale),
+ _ => {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Tga.into(),
+ UnsupportedErrorKind::Color(color_type.into()),
+ ),
+ ))
+ }
+ };
+
+ header.image_type = image_type as u8;
+ header.image_width = width;
+ header.image_height = height;
+ header.pixel_depth = num_alpha_bits + other_channel_bits;
+ header.image_desc = num_alpha_bits & ALPHA_BIT_MASK;
+ header.image_desc |= SCREEN_ORIGIN_BIT_MASK; // Upper left origin.
+ }
+
+ Ok(header)
+ }
+
+ /// Load the header with values from the reader.
+ pub(crate) fn from_reader(r: &mut dyn Read) -> ImageResult<Self> {
+ Ok(Self {
+ id_length: r.read_u8()?,
+ map_type: r.read_u8()?,
+ image_type: r.read_u8()?,
+ map_origin: r.read_u16::<LittleEndian>()?,
+ map_length: r.read_u16::<LittleEndian>()?,
+ map_entry_size: r.read_u8()?,
+ x_origin: r.read_u16::<LittleEndian>()?,
+ y_origin: r.read_u16::<LittleEndian>()?,
+ image_width: r.read_u16::<LittleEndian>()?,
+ image_height: r.read_u16::<LittleEndian>()?,
+ pixel_depth: r.read_u8()?,
+ image_desc: r.read_u8()?,
+ })
+ }
+
+ /// Write out the header values.
+ pub(crate) fn write_to(&self, w: &mut dyn Write) -> ImageResult<()> {
+ w.write_u8(self.id_length)?;
+ w.write_u8(self.map_type)?;
+ w.write_u8(self.image_type)?;
+ w.write_u16::<LittleEndian>(self.map_origin)?;
+ w.write_u16::<LittleEndian>(self.map_length)?;
+ w.write_u8(self.map_entry_size)?;
+ w.write_u16::<LittleEndian>(self.x_origin)?;
+ w.write_u16::<LittleEndian>(self.y_origin)?;
+ w.write_u16::<LittleEndian>(self.image_width)?;
+ w.write_u16::<LittleEndian>(self.image_height)?;
+ w.write_u8(self.pixel_depth)?;
+ w.write_u8(self.image_desc)?;
+ Ok(())
+ }
+}
diff --git a/vendor/image/src/codecs/tga/mod.rs b/vendor/image/src/codecs/tga/mod.rs
new file mode 100644
index 0000000..fdc2f0c
--- /dev/null
+++ b/vendor/image/src/codecs/tga/mod.rs
@@ -0,0 +1,17 @@
+//! Decoding of TGA Images
+//!
+//! # Related Links
+//! <http://googlesites.inequation.org/tgautilities>
+
+/// A decoder for TGA images
+///
+/// Currently this decoder does not support 8, 15 and 16 bit color images.
+pub use self::decoder::TgaDecoder;
+
+//TODO add 8, 15, 16 bit color support
+
+pub use self::encoder::TgaEncoder;
+
+mod decoder;
+mod encoder;
+mod header;
diff --git a/vendor/image/src/codecs/tiff.rs b/vendor/image/src/codecs/tiff.rs
new file mode 100644
index 0000000..7c33412
--- /dev/null
+++ b/vendor/image/src/codecs/tiff.rs
@@ -0,0 +1,353 @@
+//! Decoding and Encoding of TIFF Images
+//!
+//! TIFF (Tagged Image File Format) is a versatile image format that supports
+//! lossless and lossy compression.
+//!
+//! # Related Links
+//! * <http://partners.adobe.com/public/developer/tiff/index.html> - The TIFF specification
+
+extern crate tiff;
+
+use std::convert::TryFrom;
+use std::io::{self, Cursor, Read, Seek, Write};
+use std::marker::PhantomData;
+use std::mem;
+
+use crate::color::{ColorType, ExtendedColorType};
+use crate::error::{
+ DecodingError, EncodingError, ImageError, ImageResult, LimitError, LimitErrorKind,
+ ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::{ImageDecoder, ImageEncoder, ImageFormat};
+use crate::utils;
+
+/// Decoder for TIFF images.
+pub struct TiffDecoder<R>
+where
+ R: Read + Seek,
+{
+ dimensions: (u32, u32),
+ color_type: ColorType,
+
+ // We only use an Option here so we can call with_limits on the decoder without moving.
+ inner: Option<tiff::decoder::Decoder<R>>,
+}
+
+impl<R> TiffDecoder<R>
+where
+ R: Read + Seek,
+{
+ /// Create a new TiffDecoder.
+ pub fn new(r: R) -> Result<TiffDecoder<R>, ImageError> {
+ let mut inner = tiff::decoder::Decoder::new(r).map_err(ImageError::from_tiff_decode)?;
+
+ let dimensions = inner.dimensions().map_err(ImageError::from_tiff_decode)?;
+ let color_type = inner.colortype().map_err(ImageError::from_tiff_decode)?;
+ match inner.find_tag_unsigned_vec::<u16>(tiff::tags::Tag::SampleFormat) {
+ Ok(Some(sample_formats)) => {
+ for format in sample_formats {
+ check_sample_format(format)?;
+ }
+ }
+ Ok(None) => { /* assume UInt format */ }
+ Err(other) => return Err(ImageError::from_tiff_decode(other)),
+ };
+
+ let color_type = match color_type {
+ tiff::ColorType::Gray(8) => ColorType::L8,
+ tiff::ColorType::Gray(16) => ColorType::L16,
+ tiff::ColorType::GrayA(8) => ColorType::La8,
+ tiff::ColorType::GrayA(16) => ColorType::La16,
+ tiff::ColorType::RGB(8) => ColorType::Rgb8,
+ tiff::ColorType::RGB(16) => ColorType::Rgb16,
+ tiff::ColorType::RGBA(8) => ColorType::Rgba8,
+ tiff::ColorType::RGBA(16) => ColorType::Rgba16,
+
+ tiff::ColorType::Palette(n) | tiff::ColorType::Gray(n) => {
+ return Err(err_unknown_color_type(n))
+ }
+ tiff::ColorType::GrayA(n) => return Err(err_unknown_color_type(n.saturating_mul(2))),
+ tiff::ColorType::RGB(n) => return Err(err_unknown_color_type(n.saturating_mul(3))),
+ tiff::ColorType::YCbCr(n) => return Err(err_unknown_color_type(n.saturating_mul(3))),
+ tiff::ColorType::RGBA(n) | tiff::ColorType::CMYK(n) => {
+ return Err(err_unknown_color_type(n.saturating_mul(4)))
+ }
+ };
+
+ Ok(TiffDecoder {
+ dimensions,
+ color_type,
+ inner: Some(inner),
+ })
+ }
+}
+
+fn check_sample_format(sample_format: u16) -> Result<(), ImageError> {
+ match tiff::tags::SampleFormat::from_u16(sample_format) {
+ Some(tiff::tags::SampleFormat::Uint) => Ok(()),
+ Some(other) => Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Tiff.into(),
+ UnsupportedErrorKind::GenericFeature(format!(
+ "Unhandled TIFF sample format {:?}",
+ other
+ )),
+ ),
+ )),
+ None => Err(ImageError::Decoding(DecodingError::from_format_hint(
+ ImageFormat::Tiff.into(),
+ ))),
+ }
+}
+
+fn err_unknown_color_type(value: u8) -> ImageError {
+ ImageError::Unsupported(UnsupportedError::from_format_and_kind(
+ ImageFormat::Tiff.into(),
+ UnsupportedErrorKind::Color(ExtendedColorType::Unknown(value)),
+ ))
+}
+
+impl ImageError {
+ fn from_tiff_decode(err: tiff::TiffError) -> ImageError {
+ match err {
+ tiff::TiffError::IoError(err) => ImageError::IoError(err),
+ err @ tiff::TiffError::FormatError(_)
+ | err @ tiff::TiffError::IntSizeError
+ | err @ tiff::TiffError::UsageError(_) => {
+ ImageError::Decoding(DecodingError::new(ImageFormat::Tiff.into(), err))
+ }
+ tiff::TiffError::UnsupportedError(desc) => {
+ ImageError::Unsupported(UnsupportedError::from_format_and_kind(
+ ImageFormat::Tiff.into(),
+ UnsupportedErrorKind::GenericFeature(desc.to_string()),
+ ))
+ }
+ tiff::TiffError::LimitsExceeded => {
+ ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory))
+ }
+ }
+ }
+
+ fn from_tiff_encode(err: tiff::TiffError) -> ImageError {
+ match err {
+ tiff::TiffError::IoError(err) => ImageError::IoError(err),
+ err @ tiff::TiffError::FormatError(_)
+ | err @ tiff::TiffError::IntSizeError
+ | err @ tiff::TiffError::UsageError(_) => {
+ ImageError::Encoding(EncodingError::new(ImageFormat::Tiff.into(), err))
+ }
+ tiff::TiffError::UnsupportedError(desc) => {
+ ImageError::Unsupported(UnsupportedError::from_format_and_kind(
+ ImageFormat::Tiff.into(),
+ UnsupportedErrorKind::GenericFeature(desc.to_string()),
+ ))
+ }
+ tiff::TiffError::LimitsExceeded => {
+ ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory))
+ }
+ }
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct TiffReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for TiffReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read + Seek> ImageDecoder<'a> for TiffDecoder<R> {
+ type Reader = TiffReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ self.dimensions
+ }
+
+ fn color_type(&self) -> ColorType {
+ self.color_type
+ }
+
+ fn icc_profile(&mut self) -> Option<Vec<u8>> {
+ if let Some(decoder) = &mut self.inner {
+ decoder.get_tag_u8_vec(tiff::tags::Tag::Unknown(34675)).ok()
+ } else {
+ None
+ }
+ }
+
+ fn set_limits(&mut self, limits: crate::io::Limits) -> ImageResult<()> {
+ limits.check_support(&crate::io::LimitSupport::default())?;
+
+ let (width, height) = self.dimensions();
+ limits.check_dimensions(width, height)?;
+
+ let max_alloc = limits.max_alloc.unwrap_or(u64::MAX);
+ let max_intermediate_alloc = max_alloc.saturating_sub(self.total_bytes());
+
+ let mut tiff_limits: tiff::decoder::Limits = Default::default();
+ tiff_limits.decoding_buffer_size =
+ usize::try_from(max_alloc - max_intermediate_alloc).unwrap_or(usize::MAX);
+ tiff_limits.intermediate_buffer_size =
+ usize::try_from(max_intermediate_alloc).unwrap_or(usize::MAX);
+ tiff_limits.ifd_value_size = tiff_limits.intermediate_buffer_size;
+ self.inner = Some(self.inner.take().unwrap().with_limits(tiff_limits));
+
+ Ok(())
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ let buf = match self
+ .inner
+ .unwrap()
+ .read_image()
+ .map_err(ImageError::from_tiff_decode)?
+ {
+ tiff::decoder::DecodingResult::U8(v) => v,
+ tiff::decoder::DecodingResult::U16(v) => utils::vec_copy_to_u8(&v),
+ tiff::decoder::DecodingResult::U32(v) => utils::vec_copy_to_u8(&v),
+ tiff::decoder::DecodingResult::U64(v) => utils::vec_copy_to_u8(&v),
+ tiff::decoder::DecodingResult::I8(v) => utils::vec_copy_to_u8(&v),
+ tiff::decoder::DecodingResult::I16(v) => utils::vec_copy_to_u8(&v),
+ tiff::decoder::DecodingResult::I32(v) => utils::vec_copy_to_u8(&v),
+ tiff::decoder::DecodingResult::I64(v) => utils::vec_copy_to_u8(&v),
+ tiff::decoder::DecodingResult::F32(v) => utils::vec_copy_to_u8(&v),
+ tiff::decoder::DecodingResult::F64(v) => utils::vec_copy_to_u8(&v),
+ };
+
+ Ok(TiffReader(Cursor::new(buf), PhantomData))
+ }
+
+ fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+ match self
+ .inner
+ .unwrap()
+ .read_image()
+ .map_err(ImageError::from_tiff_decode)?
+ {
+ tiff::decoder::DecodingResult::U8(v) => {
+ buf.copy_from_slice(&v);
+ }
+ tiff::decoder::DecodingResult::U16(v) => {
+ buf.copy_from_slice(bytemuck::cast_slice(&v));
+ }
+ tiff::decoder::DecodingResult::U32(v) => {
+ buf.copy_from_slice(bytemuck::cast_slice(&v));
+ }
+ tiff::decoder::DecodingResult::U64(v) => {
+ buf.copy_from_slice(bytemuck::cast_slice(&v));
+ }
+ tiff::decoder::DecodingResult::I8(v) => {
+ buf.copy_from_slice(bytemuck::cast_slice(&v));
+ }
+ tiff::decoder::DecodingResult::I16(v) => {
+ buf.copy_from_slice(bytemuck::cast_slice(&v));
+ }
+ tiff::decoder::DecodingResult::I32(v) => {
+ buf.copy_from_slice(bytemuck::cast_slice(&v));
+ }
+ tiff::decoder::DecodingResult::I64(v) => {
+ buf.copy_from_slice(bytemuck::cast_slice(&v));
+ }
+ tiff::decoder::DecodingResult::F32(v) => {
+ buf.copy_from_slice(bytemuck::cast_slice(&v));
+ }
+ tiff::decoder::DecodingResult::F64(v) => {
+ buf.copy_from_slice(bytemuck::cast_slice(&v));
+ }
+ }
+ Ok(())
+ }
+}
+
+/// Encoder for tiff images
+pub struct TiffEncoder<W> {
+ w: W,
+}
+
+// Utility to simplify and deduplicate error handling during 16-bit encoding.
+fn u8_slice_as_u16(buf: &[u8]) -> ImageResult<&[u16]> {
+ bytemuck::try_cast_slice(buf).map_err(|err| {
+ // If the buffer is not aligned or the correct length for a u16 slice, err.
+ //
+ // `bytemuck::PodCastError` of bytemuck-1.2.0 does not implement
+ // `Error` and `Display` trait.
+ // See <https://github.com/Lokathor/bytemuck/issues/22>.
+ ImageError::Parameter(ParameterError::from_kind(ParameterErrorKind::Generic(
+ format!("{:?}", err),
+ )))
+ })
+}
+
+impl<W: Write + Seek> TiffEncoder<W> {
+ /// Create a new encoder that writes its output to `w`
+ pub fn new(w: W) -> TiffEncoder<W> {
+ TiffEncoder { w }
+ }
+
+ /// Encodes the image `image` that has dimensions `width` and `height` and `ColorType` `c`.
+ ///
+ /// 16-bit types assume the buffer is native endian.
+ pub fn encode(self, data: &[u8], width: u32, height: u32, color: ColorType) -> ImageResult<()> {
+ let mut encoder =
+ tiff::encoder::TiffEncoder::new(self.w).map_err(ImageError::from_tiff_encode)?;
+ match color {
+ ColorType::L8 => {
+ encoder.write_image::<tiff::encoder::colortype::Gray8>(width, height, data)
+ }
+ ColorType::Rgb8 => {
+ encoder.write_image::<tiff::encoder::colortype::RGB8>(width, height, data)
+ }
+ ColorType::Rgba8 => {
+ encoder.write_image::<tiff::encoder::colortype::RGBA8>(width, height, data)
+ }
+ ColorType::L16 => encoder.write_image::<tiff::encoder::colortype::Gray16>(
+ width,
+ height,
+ u8_slice_as_u16(data)?,
+ ),
+ ColorType::Rgb16 => encoder.write_image::<tiff::encoder::colortype::RGB16>(
+ width,
+ height,
+ u8_slice_as_u16(data)?,
+ ),
+ ColorType::Rgba16 => encoder.write_image::<tiff::encoder::colortype::RGBA16>(
+ width,
+ height,
+ u8_slice_as_u16(data)?,
+ ),
+ _ => {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::Tiff.into(),
+ UnsupportedErrorKind::Color(color.into()),
+ ),
+ ))
+ }
+ }
+ .map_err(ImageError::from_tiff_encode)?;
+
+ Ok(())
+ }
+}
+
+impl<W: Write + Seek> ImageEncoder for TiffEncoder<W> {
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ self.encode(buf, width, height, color_type)
+ }
+}
diff --git a/vendor/image/src/codecs/webp/decoder.rs b/vendor/image/src/codecs/webp/decoder.rs
new file mode 100644
index 0000000..9120290
--- /dev/null
+++ b/vendor/image/src/codecs/webp/decoder.rs
@@ -0,0 +1,399 @@
+use byteorder::{LittleEndian, ReadBytesExt};
+use std::convert::TryFrom;
+use std::io::{self, Cursor, Error, Read};
+use std::marker::PhantomData;
+use std::{error, fmt, mem};
+
+use crate::error::{DecodingError, ImageError, ImageResult, ParameterError, ParameterErrorKind};
+use crate::image::{ImageDecoder, ImageFormat};
+use crate::{color, AnimationDecoder, Frames, Rgba};
+
+use super::lossless::{LosslessDecoder, LosslessFrame};
+use super::vp8::{Frame as VP8Frame, Vp8Decoder};
+
+use super::extended::{read_extended_header, ExtendedImage};
+
+/// All errors that can occur when attempting to parse a WEBP container
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum DecoderError {
+ /// RIFF's "RIFF" signature not found or invalid
+ RiffSignatureInvalid([u8; 4]),
+ /// WebP's "WEBP" signature not found or invalid
+ WebpSignatureInvalid([u8; 4]),
+ /// Chunk Header was incorrect or invalid in its usage
+ ChunkHeaderInvalid([u8; 4]),
+}
+
+impl fmt::Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ struct SignatureWriter([u8; 4]);
+ impl fmt::Display for SignatureWriter {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "[{:#04X?}, {:#04X?}, {:#04X?}, {:#04X?}]",
+ self.0[0], self.0[1], self.0[2], self.0[3]
+ )
+ }
+ }
+
+ match self {
+ DecoderError::RiffSignatureInvalid(riff) => f.write_fmt(format_args!(
+ "Invalid RIFF signature: {}",
+ SignatureWriter(*riff)
+ )),
+ DecoderError::WebpSignatureInvalid(webp) => f.write_fmt(format_args!(
+ "Invalid WebP signature: {}",
+ SignatureWriter(*webp)
+ )),
+ DecoderError::ChunkHeaderInvalid(header) => f.write_fmt(format_args!(
+ "Invalid Chunk header: {}",
+ SignatureWriter(*header)
+ )),
+ }
+ }
+}
+
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {}
+
+/// All possible RIFF chunks in a WebP image file
+#[allow(clippy::upper_case_acronyms)]
+#[derive(Debug, Clone, Copy, PartialEq)]
+pub(crate) enum WebPRiffChunk {
+ RIFF,
+ WEBP,
+ VP8,
+ VP8L,
+ VP8X,
+ ANIM,
+ ANMF,
+ ALPH,
+ ICCP,
+ EXIF,
+ XMP,
+}
+
+impl WebPRiffChunk {
+ pub(crate) fn from_fourcc(chunk_fourcc: [u8; 4]) -> ImageResult<Self> {
+ match &chunk_fourcc {
+ b"RIFF" => Ok(Self::RIFF),
+ b"WEBP" => Ok(Self::WEBP),
+ b"VP8 " => Ok(Self::VP8),
+ b"VP8L" => Ok(Self::VP8L),
+ b"VP8X" => Ok(Self::VP8X),
+ b"ANIM" => Ok(Self::ANIM),
+ b"ANMF" => Ok(Self::ANMF),
+ b"ALPH" => Ok(Self::ALPH),
+ b"ICCP" => Ok(Self::ICCP),
+ b"EXIF" => Ok(Self::EXIF),
+ b"XMP " => Ok(Self::XMP),
+ _ => Err(DecoderError::ChunkHeaderInvalid(chunk_fourcc).into()),
+ }
+ }
+
+ pub(crate) fn to_fourcc(&self) -> [u8; 4] {
+ match self {
+ Self::RIFF => *b"RIFF",
+ Self::WEBP => *b"WEBP",
+ Self::VP8 => *b"VP8 ",
+ Self::VP8L => *b"VP8L",
+ Self::VP8X => *b"VP8X",
+ Self::ANIM => *b"ANIM",
+ Self::ANMF => *b"ANMF",
+ Self::ALPH => *b"ALPH",
+ Self::ICCP => *b"ICCP",
+ Self::EXIF => *b"EXIF",
+ Self::XMP => *b"XMP ",
+ }
+ }
+}
+
+enum WebPImage {
+ Lossy(VP8Frame),
+ Lossless(LosslessFrame),
+ Extended(ExtendedImage),
+}
+
+/// WebP Image format decoder. Currently only supports lossy RGB images or lossless RGBA images.
+pub struct WebPDecoder<R> {
+ r: R,
+ image: WebPImage,
+}
+
+impl<R: Read> WebPDecoder<R> {
+ /// Create a new WebPDecoder from the Reader ```r```.
+ /// This function takes ownership of the Reader.
+ pub fn new(r: R) -> ImageResult<WebPDecoder<R>> {
+ let image = WebPImage::Lossy(Default::default());
+
+ let mut decoder = WebPDecoder { r, image };
+ decoder.read_data()?;
+ Ok(decoder)
+ }
+
+ //reads the 12 bytes of the WebP file header
+ fn read_riff_header(&mut self) -> ImageResult<u32> {
+ let mut riff = [0; 4];
+ self.r.read_exact(&mut riff)?;
+ if &riff != b"RIFF" {
+ return Err(DecoderError::RiffSignatureInvalid(riff).into());
+ }
+
+ let size = self.r.read_u32::<LittleEndian>()?;
+
+ let mut webp = [0; 4];
+ self.r.read_exact(&mut webp)?;
+ if &webp != b"WEBP" {
+ return Err(DecoderError::WebpSignatureInvalid(webp).into());
+ }
+
+ Ok(size)
+ }
+
+ //reads the chunk header, decodes the frame and returns the inner decoder
+ fn read_frame(&mut self) -> ImageResult<WebPImage> {
+ let chunk = read_chunk(&mut self.r)?;
+
+ match chunk {
+ Some((cursor, WebPRiffChunk::VP8)) => {
+ let mut vp8_decoder = Vp8Decoder::new(cursor);
+ let frame = vp8_decoder.decode_frame()?;
+
+ Ok(WebPImage::Lossy(frame.clone()))
+ }
+ Some((cursor, WebPRiffChunk::VP8L)) => {
+ let mut lossless_decoder = LosslessDecoder::new(cursor);
+ let frame = lossless_decoder.decode_frame()?;
+
+ Ok(WebPImage::Lossless(frame.clone()))
+ }
+ Some((mut cursor, WebPRiffChunk::VP8X)) => {
+ let info = read_extended_header(&mut cursor)?;
+
+ let image = ExtendedImage::read_extended_chunks(&mut self.r, info)?;
+
+ Ok(WebPImage::Extended(image))
+ }
+ None => Err(ImageError::IoError(Error::from(
+ io::ErrorKind::UnexpectedEof,
+ ))),
+ Some((_, chunk)) => Err(DecoderError::ChunkHeaderInvalid(chunk.to_fourcc()).into()),
+ }
+ }
+
+ fn read_data(&mut self) -> ImageResult<()> {
+ let _size = self.read_riff_header()?;
+
+ let image = self.read_frame()?;
+
+ self.image = image;
+
+ Ok(())
+ }
+
+ /// Returns true if the image as described by the bitstream is animated.
+ pub fn has_animation(&self) -> bool {
+ match &self.image {
+ WebPImage::Lossy(_) => false,
+ WebPImage::Lossless(_) => false,
+ WebPImage::Extended(extended) => extended.has_animation(),
+ }
+ }
+
+ /// Sets the background color if the image is an extended and animated webp.
+ pub fn set_background_color(&mut self, color: Rgba<u8>) -> ImageResult<()> {
+ match &mut self.image {
+ WebPImage::Extended(image) => image.set_background_color(color),
+ _ => Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(
+ "Background color can only be set on animated webp".to_owned(),
+ ),
+ ))),
+ }
+ }
+}
+
+pub(crate) fn read_len_cursor<R>(r: &mut R) -> ImageResult<Cursor<Vec<u8>>>
+where
+ R: Read,
+{
+ let unpadded_len = u64::from(r.read_u32::<LittleEndian>()?);
+
+ // RIFF chunks containing an uneven number of bytes append
+ // an extra 0x00 at the end of the chunk
+ //
+ // The addition cannot overflow since we have a u64 that was created from a u32
+ let len = unpadded_len + (unpadded_len % 2);
+
+ let mut framedata = Vec::new();
+ r.by_ref().take(len).read_to_end(&mut framedata)?;
+
+ //remove padding byte
+ if unpadded_len % 2 == 1 {
+ framedata.pop();
+ }
+
+ Ok(io::Cursor::new(framedata))
+}
+
+/// Reads a chunk header FourCC
+/// Returns None if and only if we hit end of file reading the four character code of the chunk
+/// The inner error is `Err` if and only if the chunk header FourCC is present but unknown
+pub(crate) fn read_fourcc<R: Read>(r: &mut R) -> ImageResult<Option<ImageResult<WebPRiffChunk>>> {
+ let mut chunk_fourcc = [0; 4];
+ let result = r.read_exact(&mut chunk_fourcc);
+
+ match result {
+ Ok(()) => {}
+ Err(err) => {
+ if err.kind() == io::ErrorKind::UnexpectedEof {
+ return Ok(None);
+ } else {
+ return Err(err.into());
+ }
+ }
+ }
+
+ let chunk = WebPRiffChunk::from_fourcc(chunk_fourcc);
+ Ok(Some(chunk))
+}
+
+/// Reads a chunk
+/// Returns an error if the chunk header is not a valid webp header or some other reading error
+/// Returns None if and only if we hit end of file reading the four character code of the chunk
+pub(crate) fn read_chunk<R>(r: &mut R) -> ImageResult<Option<(Cursor<Vec<u8>>, WebPRiffChunk)>>
+where
+ R: Read,
+{
+ if let Some(chunk) = read_fourcc(r)? {
+ let chunk = chunk?;
+ let cursor = read_len_cursor(r)?;
+ Ok(Some((cursor, chunk)))
+ } else {
+ Ok(None)
+ }
+}
+
+/// Wrapper struct around a `Cursor<Vec<u8>>`
+pub struct WebpReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
+impl<R> Read for WebpReader<R> {
+ fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
+ self.0.read(buf)
+ }
+ fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
+ if self.0.position() == 0 && buf.is_empty() {
+ mem::swap(buf, self.0.get_mut());
+ Ok(buf.len())
+ } else {
+ self.0.read_to_end(buf)
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> ImageDecoder<'a> for WebPDecoder<R> {
+ type Reader = WebpReader<R>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ match &self.image {
+ WebPImage::Lossy(vp8_frame) => {
+ (u32::from(vp8_frame.width), u32::from(vp8_frame.height))
+ }
+ WebPImage::Lossless(lossless_frame) => (
+ u32::from(lossless_frame.width),
+ u32::from(lossless_frame.height),
+ ),
+ WebPImage::Extended(extended) => extended.dimensions(),
+ }
+ }
+
+ fn color_type(&self) -> color::ColorType {
+ match &self.image {
+ WebPImage::Lossy(_) => color::ColorType::Rgb8,
+ WebPImage::Lossless(_) => color::ColorType::Rgba8,
+ WebPImage::Extended(extended) => extended.color_type(),
+ }
+ }
+
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ match &self.image {
+ WebPImage::Lossy(vp8_frame) => {
+ let mut data = vec![0; vp8_frame.get_buf_size()];
+ vp8_frame.fill_rgb(data.as_mut_slice());
+ Ok(WebpReader(Cursor::new(data), PhantomData))
+ }
+ WebPImage::Lossless(lossless_frame) => {
+ let mut data = vec![0; lossless_frame.get_buf_size()];
+ lossless_frame.fill_rgba(data.as_mut_slice());
+ Ok(WebpReader(Cursor::new(data), PhantomData))
+ }
+ WebPImage::Extended(extended) => {
+ let mut data = vec![0; extended.get_buf_size()];
+ extended.fill_buf(data.as_mut_slice());
+ Ok(WebpReader(Cursor::new(data), PhantomData))
+ }
+ }
+ }
+
+ fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ match &self.image {
+ WebPImage::Lossy(vp8_frame) => {
+ vp8_frame.fill_rgb(buf);
+ }
+ WebPImage::Lossless(lossless_frame) => {
+ lossless_frame.fill_rgba(buf);
+ }
+ WebPImage::Extended(extended) => {
+ extended.fill_buf(buf);
+ }
+ }
+ Ok(())
+ }
+
+ fn icc_profile(&mut self) -> Option<Vec<u8>> {
+ if let WebPImage::Extended(extended) = &self.image {
+ extended.icc_profile()
+ } else {
+ None
+ }
+ }
+}
+
+impl<'a, R: 'a + Read> AnimationDecoder<'a> for WebPDecoder<R> {
+ fn into_frames(self) -> Frames<'a> {
+ match self.image {
+ WebPImage::Lossy(_) | WebPImage::Lossless(_) => {
+ Frames::new(Box::new(std::iter::empty()))
+ }
+ WebPImage::Extended(extended_image) => extended_image.into_frames(),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn add_with_overflow_size() {
+ let bytes = vec![
+ 0x52, 0x49, 0x46, 0x46, 0xaf, 0x37, 0x80, 0x47, 0x57, 0x45, 0x42, 0x50, 0x6c, 0x64,
+ 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xfb, 0x7e, 0x73, 0x00, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65,
+ 0x40, 0xfb, 0xff, 0xff, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x62, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49,
+ 0x49, 0x54, 0x55, 0x50, 0x4c, 0x54, 0x59, 0x50, 0x45, 0x33, 0x37, 0x44, 0x4d, 0x46,
+ ];
+
+ let data = std::io::Cursor::new(bytes);
+
+ let _ = WebPDecoder::new(data);
+ }
+}
diff --git a/vendor/image/src/codecs/webp/encoder.rs b/vendor/image/src/codecs/webp/encoder.rs
new file mode 100644
index 0000000..0383046
--- /dev/null
+++ b/vendor/image/src/codecs/webp/encoder.rs
@@ -0,0 +1,242 @@
+//! Encoding of WebP images.
+///
+/// Uses the simple encoding API from the [libwebp] library.
+///
+/// [libwebp]: https://developers.google.com/speed/webp/docs/api#simple_encoding_api
+use std::io::Write;
+
+use libwebp::{Encoder, PixelLayout, WebPMemory};
+
+use crate::error::{
+ EncodingError, ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::flat::SampleLayout;
+use crate::{ColorType, ImageEncoder, ImageError, ImageFormat, ImageResult};
+
+/// WebP Encoder.
+pub struct WebPEncoder<W> {
+ inner: W,
+ quality: WebPQuality,
+}
+
+/// WebP encoder quality.
+#[derive(Debug, Copy, Clone)]
+pub struct WebPQuality(Quality);
+
+#[derive(Debug, Copy, Clone)]
+enum Quality {
+ Lossless,
+ Lossy(u8),
+}
+
+impl WebPQuality {
+ /// Minimum lossy quality value (0).
+ pub const MIN: u8 = 0;
+ /// Maximum lossy quality value (100).
+ pub const MAX: u8 = 100;
+ /// Default lossy quality (80), providing a balance of quality and file size.
+ pub const DEFAULT: u8 = 80;
+
+ /// Lossless encoding.
+ pub fn lossless() -> Self {
+ Self(Quality::Lossless)
+ }
+
+ /// Lossy encoding. 0 = low quality, small size; 100 = high quality, large size.
+ ///
+ /// Values are clamped from 0 to 100.
+ pub fn lossy(quality: u8) -> Self {
+ Self(Quality::Lossy(quality.clamp(Self::MIN, Self::MAX)))
+ }
+}
+
+impl Default for WebPQuality {
+ fn default() -> Self {
+ Self::lossy(WebPQuality::DEFAULT)
+ }
+}
+
+impl<W: Write> WebPEncoder<W> {
+ /// Create a new encoder that writes its output to `w`.
+ ///
+ /// Defaults to lossy encoding, see [`WebPQuality::DEFAULT`].
+ pub fn new(w: W) -> Self {
+ WebPEncoder::new_with_quality(w, WebPQuality::default())
+ }
+
+ /// Create a new encoder with the specified quality, that writes its output to `w`.
+ pub fn new_with_quality(w: W, quality: WebPQuality) -> Self {
+ Self { inner: w, quality }
+ }
+
+ /// Encode image data with the indicated color type.
+ ///
+ /// The encoder requires image data be Rgb8 or Rgba8.
+ pub fn encode(
+ mut self,
+ data: &[u8],
+ width: u32,
+ height: u32,
+ color: ColorType,
+ ) -> ImageResult<()> {
+ // TODO: convert color types internally?
+ let layout = match color {
+ ColorType::Rgb8 => PixelLayout::Rgb,
+ ColorType::Rgba8 => PixelLayout::Rgba,
+ _ => {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::WebP.into(),
+ UnsupportedErrorKind::Color(color.into()),
+ ),
+ ))
+ }
+ };
+
+ // Validate dimensions upfront to avoid panics.
+ if width == 0
+ || height == 0
+ || !SampleLayout::row_major_packed(color.channel_count(), width, height)
+ .fits(data.len())
+ {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+
+ // Call the native libwebp library to encode the image.
+ let encoder = Encoder::new(data, layout, width, height);
+ let encoded: WebPMemory = match self.quality.0 {
+ Quality::Lossless => encoder.encode_lossless(),
+ Quality::Lossy(quality) => encoder.encode(quality as f32),
+ };
+
+ // The simple encoding API in libwebp does not return errors.
+ if encoded.is_empty() {
+ return Err(ImageError::Encoding(EncodingError::new(
+ ImageFormat::WebP.into(),
+ "encoding failed, output empty",
+ )));
+ }
+
+ self.inner.write_all(&encoded)?;
+ Ok(())
+ }
+}
+
+impl<W: Write> ImageEncoder for WebPEncoder<W> {
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()> {
+ self.encode(buf, width, height, color_type)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::codecs::webp::{WebPEncoder, WebPQuality};
+ use crate::{ColorType, ImageEncoder};
+
+ #[test]
+ fn webp_lossless_deterministic() {
+ // 1x1 8-bit image buffer containing a single red pixel.
+ let rgb: &[u8] = &[255, 0, 0];
+ let rgba: &[u8] = &[255, 0, 0, 128];
+ for (color, img, expected) in [
+ (
+ ColorType::Rgb8,
+ rgb,
+ [
+ 82, 73, 70, 70, 28, 0, 0, 0, 87, 69, 66, 80, 86, 80, 56, 76, 15, 0, 0, 0, 47,
+ 0, 0, 0, 0, 7, 16, 253, 143, 254, 7, 34, 162, 255, 1, 0,
+ ],
+ ),
+ (
+ ColorType::Rgba8,
+ rgba,
+ [
+ 82, 73, 70, 70, 28, 0, 0, 0, 87, 69, 66, 80, 86, 80, 56, 76, 15, 0, 0, 0, 47,
+ 0, 0, 0, 16, 7, 16, 253, 143, 2, 6, 34, 162, 255, 1, 0,
+ ],
+ ),
+ ] {
+ // Encode it into a memory buffer.
+ let mut encoded_img = Vec::new();
+ {
+ let encoder =
+ WebPEncoder::new_with_quality(&mut encoded_img, WebPQuality::lossless());
+ encoder
+ .write_image(&img, 1, 1, color)
+ .expect("image encoding failed");
+ }
+
+ // WebP encoding should be deterministic.
+ assert_eq!(encoded_img, expected);
+ }
+ }
+
+ #[derive(Debug, Clone)]
+ struct MockImage {
+ width: u32,
+ height: u32,
+ color: ColorType,
+ data: Vec<u8>,
+ }
+
+ impl quickcheck::Arbitrary for MockImage {
+ fn arbitrary(g: &mut quickcheck::Gen) -> Self {
+ // Limit to small, non-empty images <= 512x512.
+ let width = u32::arbitrary(g) % 512 + 1;
+ let height = u32::arbitrary(g) % 512 + 1;
+ let (color, stride) = if bool::arbitrary(g) {
+ (ColorType::Rgb8, 3)
+ } else {
+ (ColorType::Rgba8, 4)
+ };
+ let size = width * height * stride;
+ let data: Vec<u8> = (0..size).map(|_| u8::arbitrary(g)).collect();
+ MockImage {
+ width,
+ height,
+ color,
+ data,
+ }
+ }
+ }
+
+ quickcheck! {
+ fn fuzz_webp_valid_image(image: MockImage, quality: u8) -> bool {
+ // Check valid images do not panic.
+ let mut buffer = Vec::<u8>::new();
+ for webp_quality in [WebPQuality::lossless(), WebPQuality::lossy(quality)] {
+ buffer.clear();
+ let encoder = WebPEncoder::new_with_quality(&mut buffer, webp_quality);
+ if !encoder
+ .write_image(&image.data, image.width, image.height, image.color)
+ .is_ok() {
+ return false;
+ }
+ }
+ true
+ }
+
+ fn fuzz_webp_no_panic(data: Vec<u8>, width: u8, height: u8, quality: u8) -> bool {
+ // Check random (usually invalid) parameters do not panic.
+ let mut buffer = Vec::<u8>::new();
+ for color in [ColorType::Rgb8, ColorType::Rgba8] {
+ for webp_quality in [WebPQuality::lossless(), WebPQuality::lossy(quality)] {
+ buffer.clear();
+ let encoder = WebPEncoder::new_with_quality(&mut buffer, webp_quality);
+ // Ignore errors.
+ let _ = encoder
+ .write_image(&data, width as u32, height as u32, color);
+ }
+ }
+ true
+ }
+ }
+}
diff --git a/vendor/image/src/codecs/webp/extended.rs b/vendor/image/src/codecs/webp/extended.rs
new file mode 100644
index 0000000..3dc6b34
--- /dev/null
+++ b/vendor/image/src/codecs/webp/extended.rs
@@ -0,0 +1,839 @@
+use std::convert::TryInto;
+use std::io::{self, Cursor, Error, Read};
+use std::{error, fmt};
+
+use super::decoder::{
+ read_chunk, read_fourcc, read_len_cursor, DecoderError::ChunkHeaderInvalid, WebPRiffChunk,
+};
+use super::lossless::{LosslessDecoder, LosslessFrame};
+use super::vp8::{Frame as VP8Frame, Vp8Decoder};
+use crate::error::{DecodingError, ParameterError, ParameterErrorKind};
+use crate::image::ImageFormat;
+use crate::{
+ ColorType, Delay, Frame, Frames, ImageError, ImageResult, Rgb, RgbImage, Rgba, RgbaImage,
+};
+use byteorder::{LittleEndian, ReadBytesExt};
+
+//all errors that can occur while parsing extended chunks in a WebP file
+#[derive(Debug, Clone, Copy)]
+enum DecoderError {
+ // Some bits were invalid
+ InfoBitsInvalid { name: &'static str, value: u32 },
+ // Alpha chunk doesn't match the frame's size
+ AlphaChunkSizeMismatch,
+ // Image is too large, either for the platform's pointer size or generally
+ ImageTooLarge,
+ // Frame would go out of the canvas
+ FrameOutsideImage,
+}
+
+impl fmt::Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DecoderError::InfoBitsInvalid { name, value } => f.write_fmt(format_args!(
+ "Info bits `{}` invalid, received value: {}",
+ name, value
+ )),
+ DecoderError::AlphaChunkSizeMismatch => {
+ f.write_str("Alpha chunk doesn't match the size of the frame")
+ }
+ DecoderError::ImageTooLarge => f.write_str("Image is too large to be decoded"),
+ DecoderError::FrameOutsideImage => {
+ f.write_str("Frame is too large and would go outside the image")
+ }
+ }
+ }
+}
+
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {}
+
+#[derive(Debug, Clone)]
+pub(crate) struct WebPExtendedInfo {
+ _icc_profile: bool,
+ _alpha: bool,
+ _exif_metadata: bool,
+ _xmp_metadata: bool,
+ _animation: bool,
+ canvas_width: u32,
+ canvas_height: u32,
+ icc_profile: Option<Vec<u8>>,
+}
+
+#[derive(Debug)]
+enum ExtendedImageData {
+ Animation {
+ frames: Vec<AnimatedFrame>,
+ anim_info: WebPAnimatedInfo,
+ },
+ Static(WebPStatic),
+}
+
+#[derive(Debug)]
+pub(crate) struct ExtendedImage {
+ info: WebPExtendedInfo,
+ image: ExtendedImageData,
+}
+
+impl ExtendedImage {
+ pub(crate) fn dimensions(&self) -> (u32, u32) {
+ (self.info.canvas_width, self.info.canvas_height)
+ }
+
+ pub(crate) fn has_animation(&self) -> bool {
+ self.info._animation
+ }
+
+ pub(crate) fn icc_profile(&self) -> Option<Vec<u8>> {
+ self.info.icc_profile.clone()
+ }
+
+ pub(crate) fn color_type(&self) -> ColorType {
+ match &self.image {
+ ExtendedImageData::Animation { frames, .. } => &frames[0].image,
+ ExtendedImageData::Static(image) => image,
+ }
+ .color_type()
+ }
+
+ pub(crate) fn into_frames<'a>(self) -> Frames<'a> {
+ struct FrameIterator {
+ image: ExtendedImage,
+ index: usize,
+ canvas: RgbaImage,
+ }
+
+ impl Iterator for FrameIterator {
+ type Item = ImageResult<Frame>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if let ExtendedImageData::Animation { frames, anim_info } = &self.image.image {
+ let frame = frames.get(self.index);
+ match frame {
+ Some(anim_image) => {
+ self.index += 1;
+ ExtendedImage::draw_subimage(
+ &mut self.canvas,
+ anim_image,
+ anim_info.background_color,
+ )
+ }
+ None => None,
+ }
+ } else {
+ None
+ }
+ }
+ }
+
+ let width = self.info.canvas_width;
+ let height = self.info.canvas_height;
+ let background_color =
+ if let ExtendedImageData::Animation { ref anim_info, .. } = self.image {
+ anim_info.background_color
+ } else {
+ Rgba([0, 0, 0, 0])
+ };
+
+ let frame_iter = FrameIterator {
+ image: self,
+ index: 0,
+ canvas: RgbaImage::from_pixel(width, height, background_color),
+ };
+
+ Frames::new(Box::new(frame_iter))
+ }
+
+ pub(crate) fn read_extended_chunks<R: Read>(
+ reader: &mut R,
+ mut info: WebPExtendedInfo,
+ ) -> ImageResult<ExtendedImage> {
+ let mut anim_info: Option<WebPAnimatedInfo> = None;
+ let mut anim_frames: Vec<AnimatedFrame> = Vec::new();
+ let mut static_frame: Option<WebPStatic> = None;
+ //go until end of file and while chunk headers are valid
+ while let Some((mut cursor, chunk)) = read_extended_chunk(reader)? {
+ match chunk {
+ WebPRiffChunk::EXIF | WebPRiffChunk::XMP => {
+ //ignore these chunks
+ }
+ WebPRiffChunk::ANIM => {
+ if anim_info.is_none() {
+ anim_info = Some(Self::read_anim_info(&mut cursor)?);
+ }
+ }
+ WebPRiffChunk::ANMF => {
+ let frame = read_anim_frame(cursor, info.canvas_width, info.canvas_height)?;
+ anim_frames.push(frame);
+ }
+ WebPRiffChunk::ALPH => {
+ if static_frame.is_none() {
+ let alpha_chunk =
+ read_alpha_chunk(&mut cursor, info.canvas_width, info.canvas_height)?;
+
+ let vp8_frame = read_lossy_with_chunk(reader)?;
+
+ let img = WebPStatic::from_alpha_lossy(alpha_chunk, vp8_frame)?;
+
+ static_frame = Some(img);
+ }
+ }
+ WebPRiffChunk::ICCP => {
+ let mut icc_profile = Vec::new();
+ cursor.read_to_end(&mut icc_profile)?;
+ info.icc_profile = Some(icc_profile);
+ }
+ WebPRiffChunk::VP8 => {
+ if static_frame.is_none() {
+ let vp8_frame = read_lossy(cursor)?;
+
+ let img = WebPStatic::from_lossy(vp8_frame)?;
+
+ static_frame = Some(img);
+ }
+ }
+ WebPRiffChunk::VP8L => {
+ if static_frame.is_none() {
+ let mut lossless_decoder = LosslessDecoder::new(cursor);
+ let frame = lossless_decoder.decode_frame()?;
+ let image = WebPStatic::Lossless(frame.clone());
+
+ static_frame = Some(image);
+ }
+ }
+ _ => return Err(ChunkHeaderInvalid(chunk.to_fourcc()).into()),
+ }
+ }
+
+ let image = if let Some(info) = anim_info {
+ if anim_frames.is_empty() {
+ return Err(ImageError::IoError(Error::from(
+ io::ErrorKind::UnexpectedEof,
+ )));
+ }
+ ExtendedImageData::Animation {
+ frames: anim_frames,
+ anim_info: info,
+ }
+ } else if let Some(frame) = static_frame {
+ ExtendedImageData::Static(frame)
+ } else {
+ //reached end of file too early before image data was reached
+ return Err(ImageError::IoError(Error::from(
+ io::ErrorKind::UnexpectedEof,
+ )));
+ };
+
+ let image = ExtendedImage { image, info };
+
+ Ok(image)
+ }
+
+ fn read_anim_info<R: Read>(reader: &mut R) -> ImageResult<WebPAnimatedInfo> {
+ let mut colors: [u8; 4] = [0; 4];
+ reader.read_exact(&mut colors)?;
+
+ //background color is [blue, green, red, alpha]
+ let background_color = Rgba([colors[2], colors[1], colors[0], colors[3]]);
+
+ let loop_count = reader.read_u16::<LittleEndian>()?;
+
+ let info = WebPAnimatedInfo {
+ background_color,
+ _loop_count: loop_count,
+ };
+
+ Ok(info)
+ }
+
+ fn draw_subimage(
+ canvas: &mut RgbaImage,
+ anim_image: &AnimatedFrame,
+ background_color: Rgba<u8>,
+ ) -> Option<ImageResult<Frame>> {
+ let mut buffer = vec![0; anim_image.image.get_buf_size()];
+ anim_image.image.fill_buf(&mut buffer);
+ let has_alpha = anim_image.image.has_alpha();
+ let pixel_len: u32 = anim_image.image.color_type().bytes_per_pixel().into();
+
+ 'x: for x in 0..anim_image.width {
+ for y in 0..anim_image.height {
+ let canvas_index: (u32, u32) = (x + anim_image.offset_x, y + anim_image.offset_y);
+ // Negative offsets are not possible due to unsigned ints
+ // If we go out of bounds by height, still continue by x
+ if canvas_index.1 >= canvas.height() {
+ continue 'x;
+ }
+ // If we go out of bounds by width, it doesn't make sense to continue at all
+ if canvas_index.0 >= canvas.width() {
+ break 'x;
+ }
+ let index: usize = ((y * anim_image.width + x) * pixel_len).try_into().unwrap();
+ canvas[canvas_index] = if anim_image.use_alpha_blending && has_alpha {
+ let buffer: [u8; 4] = buffer[index..][..4].try_into().unwrap();
+ ExtendedImage::do_alpha_blending(buffer, canvas[canvas_index])
+ } else {
+ Rgba([
+ buffer[index],
+ buffer[index + 1],
+ buffer[index + 2],
+ if has_alpha { buffer[index + 3] } else { 255 },
+ ])
+ };
+ }
+ }
+
+ let delay = Delay::from_numer_denom_ms(anim_image.duration, 1);
+ let img = canvas.clone();
+ let frame = Frame::from_parts(img, 0, 0, delay);
+
+ if anim_image.dispose {
+ for x in 0..anim_image.width {
+ for y in 0..anim_image.height {
+ let canvas_index = (x + anim_image.offset_x, y + anim_image.offset_y);
+ canvas[canvas_index] = background_color;
+ }
+ }
+ }
+
+ Some(Ok(frame))
+ }
+
+ fn do_alpha_blending(buffer: [u8; 4], canvas: Rgba<u8>) -> Rgba<u8> {
+ let canvas_alpha = f64::from(canvas[3]);
+ let buffer_alpha = f64::from(buffer[3]);
+ let blend_alpha_f64 = buffer_alpha + canvas_alpha * (1.0 - buffer_alpha / 255.0);
+ //value should be between 0 and 255, this truncates the fractional part
+ let blend_alpha: u8 = blend_alpha_f64 as u8;
+
+ let blend_rgb: [u8; 3] = if blend_alpha == 0 {
+ [0, 0, 0]
+ } else {
+ let mut rgb = [0u8; 3];
+ for i in 0..3 {
+ let canvas_f64 = f64::from(canvas[i]);
+ let buffer_f64 = f64::from(buffer[i]);
+
+ let val = (buffer_f64 * buffer_alpha
+ + canvas_f64 * canvas_alpha * (1.0 - buffer_alpha / 255.0))
+ / blend_alpha_f64;
+ //value should be between 0 and 255, this truncates the fractional part
+ rgb[i] = val as u8;
+ }
+
+ rgb
+ };
+
+ Rgba([blend_rgb[0], blend_rgb[1], blend_rgb[2], blend_alpha])
+ }
+
+ pub(crate) fn fill_buf(&self, buf: &mut [u8]) {
+ match &self.image {
+ // will always have at least one frame
+ ExtendedImageData::Animation { frames, anim_info } => {
+ let first_frame = &frames[0];
+ let (canvas_width, canvas_height) = self.dimensions();
+ if canvas_width == first_frame.width && canvas_height == first_frame.height {
+ first_frame.image.fill_buf(buf);
+ } else {
+ let bg_color = match &self.info._alpha {
+ true => Rgba::from([0, 0, 0, 0]),
+ false => anim_info.background_color,
+ };
+ let mut canvas = RgbaImage::from_pixel(canvas_width, canvas_height, bg_color);
+ let _ = ExtendedImage::draw_subimage(&mut canvas, first_frame, bg_color)
+ .unwrap()
+ .unwrap();
+ buf.copy_from_slice(canvas.into_raw().as_slice());
+ }
+ }
+ ExtendedImageData::Static(image) => {
+ image.fill_buf(buf);
+ }
+ }
+ }
+
+ pub(crate) fn get_buf_size(&self) -> usize {
+ match &self.image {
+ // will always have at least one frame
+ ExtendedImageData::Animation { frames, .. } => &frames[0].image,
+ ExtendedImageData::Static(image) => image,
+ }
+ .get_buf_size()
+ }
+
+ pub(crate) fn set_background_color(&mut self, color: Rgba<u8>) -> ImageResult<()> {
+ match &mut self.image {
+ ExtendedImageData::Animation { anim_info, .. } => {
+ anim_info.background_color = color;
+ Ok(())
+ }
+ _ => Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::Generic(
+ "Background color can only be set on animated webp".to_owned(),
+ ),
+ ))),
+ }
+ }
+}
+
+#[derive(Debug)]
+enum WebPStatic {
+ LossyWithAlpha(RgbaImage),
+ LossyWithoutAlpha(RgbImage),
+ Lossless(LosslessFrame),
+}
+
+impl WebPStatic {
+ pub(crate) fn from_alpha_lossy(
+ alpha: AlphaChunk,
+ vp8_frame: VP8Frame,
+ ) -> ImageResult<WebPStatic> {
+ if alpha.data.len() != usize::from(vp8_frame.width) * usize::from(vp8_frame.height) {
+ return Err(DecoderError::AlphaChunkSizeMismatch.into());
+ }
+
+ let size = usize::from(vp8_frame.width).checked_mul(usize::from(vp8_frame.height) * 4);
+ let mut image_vec = match size {
+ Some(size) => vec![0u8; size],
+ None => return Err(DecoderError::ImageTooLarge.into()),
+ };
+
+ vp8_frame.fill_rgba(&mut image_vec);
+
+ for y in 0..vp8_frame.height {
+ for x in 0..vp8_frame.width {
+ let predictor: u8 = WebPStatic::get_predictor(
+ x.into(),
+ y.into(),
+ vp8_frame.width.into(),
+ alpha.filtering_method,
+ &image_vec,
+ );
+ let predictor = u16::from(predictor);
+
+ let alpha_index = usize::from(y) * usize::from(vp8_frame.width) + usize::from(x);
+ let alpha_val = alpha.data[alpha_index];
+ let alpha: u8 = ((predictor + u16::from(alpha_val)) % 256)
+ .try_into()
+ .unwrap();
+
+ let alpha_index = alpha_index * 4 + 3;
+ image_vec[alpha_index] = alpha;
+ }
+ }
+
+ let image = RgbaImage::from_vec(vp8_frame.width.into(), vp8_frame.height.into(), image_vec)
+ .unwrap();
+
+ Ok(WebPStatic::LossyWithAlpha(image))
+ }
+
+ fn get_predictor(
+ x: usize,
+ y: usize,
+ width: usize,
+ filtering_method: FilteringMethod,
+ image_slice: &[u8],
+ ) -> u8 {
+ match filtering_method {
+ FilteringMethod::None => 0,
+ FilteringMethod::Horizontal => {
+ if x == 0 && y == 0 {
+ 0
+ } else if x == 0 {
+ let index = (y - 1) * width + x;
+ image_slice[index * 4 + 3]
+ } else {
+ let index = y * width + x - 1;
+ image_slice[index * 4 + 3]
+ }
+ }
+ FilteringMethod::Vertical => {
+ if x == 0 && y == 0 {
+ 0
+ } else if y == 0 {
+ let index = y * width + x - 1;
+ image_slice[index * 4 + 3]
+ } else {
+ let index = (y - 1) * width + x;
+ image_slice[index * 4 + 3]
+ }
+ }
+ FilteringMethod::Gradient => {
+ let (left, top, top_left) = match (x, y) {
+ (0, 0) => (0, 0, 0),
+ (0, y) => {
+ let above_index = (y - 1) * width + x;
+ let val = image_slice[above_index * 4 + 3];
+ (val, val, val)
+ }
+ (x, 0) => {
+ let before_index = y * width + x - 1;
+ let val = image_slice[before_index * 4 + 3];
+ (val, val, val)
+ }
+ (x, y) => {
+ let left_index = y * width + x - 1;
+ let left = image_slice[left_index * 4 + 3];
+ let top_index = (y - 1) * width + x;
+ let top = image_slice[top_index * 4 + 3];
+ let top_left_index = (y - 1) * width + x - 1;
+ let top_left = image_slice[top_left_index * 4 + 3];
+
+ (left, top, top_left)
+ }
+ };
+
+ let combination = i16::from(left) + i16::from(top) - i16::from(top_left);
+ i16::clamp(combination, 0, 255).try_into().unwrap()
+ }
+ }
+ }
+
+ pub(crate) fn from_lossy(vp8_frame: VP8Frame) -> ImageResult<WebPStatic> {
+ let mut image = RgbImage::from_pixel(
+ vp8_frame.width.into(),
+ vp8_frame.height.into(),
+ Rgb([0, 0, 0]),
+ );
+
+ vp8_frame.fill_rgb(&mut image);
+
+ Ok(WebPStatic::LossyWithoutAlpha(image))
+ }
+
+ pub(crate) fn fill_buf(&self, buf: &mut [u8]) {
+ match self {
+ WebPStatic::LossyWithAlpha(image) => {
+ buf.copy_from_slice(image);
+ }
+ WebPStatic::LossyWithoutAlpha(image) => {
+ buf.copy_from_slice(image);
+ }
+ WebPStatic::Lossless(lossless) => {
+ lossless.fill_rgba(buf);
+ }
+ }
+ }
+
+ pub(crate) fn get_buf_size(&self) -> usize {
+ match self {
+ WebPStatic::LossyWithAlpha(rgb_image) => rgb_image.len(),
+ WebPStatic::LossyWithoutAlpha(rgba_image) => rgba_image.len(),
+ WebPStatic::Lossless(lossless) => lossless.get_buf_size(),
+ }
+ }
+
+ pub(crate) fn color_type(&self) -> ColorType {
+ if self.has_alpha() {
+ ColorType::Rgba8
+ } else {
+ ColorType::Rgb8
+ }
+ }
+
+ pub(crate) fn has_alpha(&self) -> bool {
+ match self {
+ Self::LossyWithAlpha(..) | Self::Lossless(..) => true,
+ Self::LossyWithoutAlpha(..) => false,
+ }
+ }
+}
+
+#[derive(Debug)]
+struct WebPAnimatedInfo {
+ background_color: Rgba<u8>,
+ _loop_count: u16,
+}
+
+#[derive(Debug)]
+struct AnimatedFrame {
+ offset_x: u32,
+ offset_y: u32,
+ width: u32,
+ height: u32,
+ duration: u32,
+ use_alpha_blending: bool,
+ dispose: bool,
+ image: WebPStatic,
+}
+
+/// Reads a chunk, but silently ignores unknown chunks at the end of a file
+fn read_extended_chunk<R>(r: &mut R) -> ImageResult<Option<(Cursor<Vec<u8>>, WebPRiffChunk)>>
+where
+ R: Read,
+{
+ let mut unknown_chunk = Ok(());
+
+ while let Some(chunk) = read_fourcc(r)? {
+ let cursor = read_len_cursor(r)?;
+ match chunk {
+ Ok(chunk) => return unknown_chunk.and(Ok(Some((cursor, chunk)))),
+ Err(err) => unknown_chunk = unknown_chunk.and(Err(err)),
+ }
+ }
+
+ Ok(None)
+}
+
+pub(crate) fn read_extended_header<R: Read>(reader: &mut R) -> ImageResult<WebPExtendedInfo> {
+ let chunk_flags = reader.read_u8()?;
+
+ let reserved_first = chunk_flags & 0b11000000;
+ let icc_profile = chunk_flags & 0b00100000 != 0;
+ let alpha = chunk_flags & 0b00010000 != 0;
+ let exif_metadata = chunk_flags & 0b00001000 != 0;
+ let xmp_metadata = chunk_flags & 0b00000100 != 0;
+ let animation = chunk_flags & 0b00000010 != 0;
+ let reserved_second = chunk_flags & 0b00000001;
+
+ let reserved_third = read_3_bytes(reader)?;
+
+ if reserved_first != 0 || reserved_second != 0 || reserved_third != 0 {
+ let value: u32 = if reserved_first != 0 {
+ reserved_first.into()
+ } else if reserved_second != 0 {
+ reserved_second.into()
+ } else {
+ reserved_third
+ };
+ return Err(DecoderError::InfoBitsInvalid {
+ name: "reserved",
+ value,
+ }
+ .into());
+ }
+
+ let canvas_width = read_3_bytes(reader)? + 1;
+ let canvas_height = read_3_bytes(reader)? + 1;
+
+ //product of canvas dimensions cannot be larger than u32 max
+ if u32::checked_mul(canvas_width, canvas_height).is_none() {
+ return Err(DecoderError::ImageTooLarge.into());
+ }
+
+ let info = WebPExtendedInfo {
+ _icc_profile: icc_profile,
+ _alpha: alpha,
+ _exif_metadata: exif_metadata,
+ _xmp_metadata: xmp_metadata,
+ _animation: animation,
+ canvas_width,
+ canvas_height,
+ icc_profile: None,
+ };
+
+ Ok(info)
+}
+
+fn read_anim_frame<R: Read>(
+ mut reader: R,
+ canvas_width: u32,
+ canvas_height: u32,
+) -> ImageResult<AnimatedFrame> {
+ //offsets for the frames are twice the values
+ let frame_x = read_3_bytes(&mut reader)? * 2;
+ let frame_y = read_3_bytes(&mut reader)? * 2;
+
+ let frame_width = read_3_bytes(&mut reader)? + 1;
+ let frame_height = read_3_bytes(&mut reader)? + 1;
+
+ if frame_x + frame_width > canvas_width || frame_y + frame_height > canvas_height {
+ return Err(DecoderError::FrameOutsideImage.into());
+ }
+
+ let duration = read_3_bytes(&mut reader)?;
+
+ let frame_info = reader.read_u8()?;
+ let reserved = frame_info & 0b11111100;
+ if reserved != 0 {
+ return Err(DecoderError::InfoBitsInvalid {
+ name: "reserved",
+ value: reserved.into(),
+ }
+ .into());
+ }
+ let use_alpha_blending = frame_info & 0b00000010 == 0;
+ let dispose = frame_info & 0b00000001 != 0;
+
+ //read normal bitstream now
+ let static_image = read_image(&mut reader, frame_width, frame_height)?;
+
+ let frame = AnimatedFrame {
+ offset_x: frame_x,
+ offset_y: frame_y,
+ width: frame_width,
+ height: frame_height,
+ duration,
+ use_alpha_blending,
+ dispose,
+ image: static_image,
+ };
+
+ Ok(frame)
+}
+
+fn read_3_bytes<R: Read>(reader: &mut R) -> ImageResult<u32> {
+ let mut buffer: [u8; 3] = [0; 3];
+ reader.read_exact(&mut buffer)?;
+ let value: u32 =
+ (u32::from(buffer[2]) << 16) | (u32::from(buffer[1]) << 8) | u32::from(buffer[0]);
+ Ok(value)
+}
+
+fn read_lossy_with_chunk<R: Read>(reader: &mut R) -> ImageResult<VP8Frame> {
+ let (cursor, chunk) =
+ read_chunk(reader)?.ok_or_else(|| Error::from(io::ErrorKind::UnexpectedEof))?;
+
+ if chunk != WebPRiffChunk::VP8 {
+ return Err(ChunkHeaderInvalid(chunk.to_fourcc()).into());
+ }
+
+ read_lossy(cursor)
+}
+
+fn read_lossy(cursor: Cursor<Vec<u8>>) -> ImageResult<VP8Frame> {
+ let mut vp8_decoder = Vp8Decoder::new(cursor);
+ let frame = vp8_decoder.decode_frame()?;
+
+ Ok(frame.clone())
+}
+
+fn read_image<R: Read>(reader: &mut R, width: u32, height: u32) -> ImageResult<WebPStatic> {
+ let chunk = read_chunk(reader)?;
+
+ match chunk {
+ Some((cursor, WebPRiffChunk::VP8)) => {
+ let mut vp8_decoder = Vp8Decoder::new(cursor);
+ let frame = vp8_decoder.decode_frame()?;
+
+ let img = WebPStatic::from_lossy(frame.clone())?;
+
+ Ok(img)
+ }
+ Some((cursor, WebPRiffChunk::VP8L)) => {
+ let mut lossless_decoder = LosslessDecoder::new(cursor);
+ let frame = lossless_decoder.decode_frame()?;
+
+ let img = WebPStatic::Lossless(frame.clone());
+
+ Ok(img)
+ }
+ Some((mut cursor, WebPRiffChunk::ALPH)) => {
+ let alpha_chunk = read_alpha_chunk(&mut cursor, width, height)?;
+
+ let vp8_frame = read_lossy_with_chunk(reader)?;
+
+ let img = WebPStatic::from_alpha_lossy(alpha_chunk, vp8_frame)?;
+
+ Ok(img)
+ }
+ None => Err(ImageError::IoError(Error::from(
+ io::ErrorKind::UnexpectedEof,
+ ))),
+ Some((_, chunk)) => Err(ChunkHeaderInvalid(chunk.to_fourcc()).into()),
+ }
+}
+
+#[derive(Debug)]
+struct AlphaChunk {
+ _preprocessing: bool,
+ filtering_method: FilteringMethod,
+ data: Vec<u8>,
+}
+
+#[derive(Debug, Copy, Clone)]
+enum FilteringMethod {
+ None,
+ Horizontal,
+ Vertical,
+ Gradient,
+}
+
+fn read_alpha_chunk<R: Read>(reader: &mut R, width: u32, height: u32) -> ImageResult<AlphaChunk> {
+ let info_byte = reader.read_u8()?;
+
+ let reserved = info_byte & 0b11000000;
+ let preprocessing = (info_byte & 0b00110000) >> 4;
+ let filtering = (info_byte & 0b00001100) >> 2;
+ let compression = info_byte & 0b00000011;
+
+ if reserved != 0 {
+ return Err(DecoderError::InfoBitsInvalid {
+ name: "reserved",
+ value: reserved.into(),
+ }
+ .into());
+ }
+
+ let preprocessing = match preprocessing {
+ 0 => false,
+ 1 => true,
+ _ => {
+ return Err(DecoderError::InfoBitsInvalid {
+ name: "reserved",
+ value: preprocessing.into(),
+ }
+ .into())
+ }
+ };
+
+ let filtering_method = match filtering {
+ 0 => FilteringMethod::None,
+ 1 => FilteringMethod::Horizontal,
+ 2 => FilteringMethod::Vertical,
+ 3 => FilteringMethod::Gradient,
+ _ => unreachable!(),
+ };
+
+ let lossless_compression = match compression {
+ 0 => false,
+ 1 => true,
+ _ => {
+ return Err(DecoderError::InfoBitsInvalid {
+ name: "lossless compression",
+ value: compression.into(),
+ }
+ .into())
+ }
+ };
+
+ let mut framedata = Vec::new();
+ reader.read_to_end(&mut framedata)?;
+
+ let data = if lossless_compression {
+ let cursor = io::Cursor::new(framedata);
+
+ let mut decoder = LosslessDecoder::new(cursor);
+ //this is a potential problem for large images; would require rewriting lossless decoder to use u32 for width and height
+ let width: u16 = width
+ .try_into()
+ .map_err(|_| ImageError::from(DecoderError::ImageTooLarge))?;
+ let height: u16 = height
+ .try_into()
+ .map_err(|_| ImageError::from(DecoderError::ImageTooLarge))?;
+ let frame = decoder.decode_frame_implicit_dims(width, height)?;
+
+ let mut data = vec![0u8; usize::from(width) * usize::from(height)];
+
+ frame.fill_green(&mut data);
+
+ data
+ } else {
+ framedata
+ };
+
+ let chunk = AlphaChunk {
+ _preprocessing: preprocessing,
+ filtering_method,
+ data,
+ };
+
+ Ok(chunk)
+}
diff --git a/vendor/image/src/codecs/webp/huffman.rs b/vendor/image/src/codecs/webp/huffman.rs
new file mode 100644
index 0000000..986eee6
--- /dev/null
+++ b/vendor/image/src/codecs/webp/huffman.rs
@@ -0,0 +1,202 @@
+use std::convert::TryInto;
+
+use super::lossless::BitReader;
+use super::lossless::DecoderError;
+use crate::ImageResult;
+
+/// Rudimentary utility for reading Canonical Huffman Codes.
+/// Based off https://github.com/webmproject/libwebp/blob/7f8472a610b61ec780ef0a8873cd954ac512a505/src/utils/huffman.c
+///
+
+const MAX_ALLOWED_CODE_LENGTH: usize = 15;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum HuffmanTreeNode {
+ Branch(usize), //offset in vector to children
+ Leaf(u16), //symbol stored in leaf
+ Empty,
+}
+
+/// Huffman tree
+#[derive(Clone, Debug, Default)]
+pub(crate) struct HuffmanTree {
+ tree: Vec<HuffmanTreeNode>,
+ max_nodes: usize,
+ num_nodes: usize,
+}
+
+impl HuffmanTree {
+ fn is_full(&self) -> bool {
+ self.num_nodes == self.max_nodes
+ }
+
+ /// Turns a node from empty into a branch and assigns its children
+ fn assign_children(&mut self, node_index: usize) -> usize {
+ let offset_index = self.num_nodes - node_index;
+ self.tree[node_index] = HuffmanTreeNode::Branch(offset_index);
+ self.num_nodes += 2;
+
+ offset_index
+ }
+
+ /// Init a huffman tree
+ fn init(num_leaves: usize) -> ImageResult<HuffmanTree> {
+ if num_leaves == 0 {
+ return Err(DecoderError::HuffmanError.into());
+ }
+
+ let max_nodes = 2 * num_leaves - 1;
+ let tree = vec![HuffmanTreeNode::Empty; max_nodes];
+ let num_nodes = 1;
+
+ let tree = HuffmanTree {
+ tree,
+ max_nodes,
+ num_nodes,
+ };
+
+ Ok(tree)
+ }
+
+ /// Converts code lengths to codes
+ fn code_lengths_to_codes(code_lengths: &[u16]) -> ImageResult<Vec<Option<u16>>> {
+ let max_code_length = *code_lengths
+ .iter()
+ .reduce(|a, b| if a >= b { a } else { b })
+ .unwrap();
+
+ if max_code_length > MAX_ALLOWED_CODE_LENGTH.try_into().unwrap() {
+ return Err(DecoderError::HuffmanError.into());
+ }
+
+ let mut code_length_hist = vec![0; MAX_ALLOWED_CODE_LENGTH + 1];
+
+ for &length in code_lengths.iter() {
+ code_length_hist[usize::from(length)] += 1;
+ }
+
+ code_length_hist[0] = 0;
+
+ let mut curr_code = 0;
+ let mut next_codes = vec![None; MAX_ALLOWED_CODE_LENGTH + 1];
+
+ for code_len in 1..=usize::from(max_code_length) {
+ curr_code = (curr_code + code_length_hist[code_len - 1]) << 1;
+ next_codes[code_len] = Some(curr_code);
+ }
+
+ let mut huff_codes = vec![None; code_lengths.len()];
+
+ for (symbol, &length) in code_lengths.iter().enumerate() {
+ let length = usize::from(length);
+ if length > 0 {
+ huff_codes[symbol] = next_codes[length];
+ if let Some(value) = next_codes[length].as_mut() {
+ *value += 1;
+ }
+ } else {
+ huff_codes[symbol] = None;
+ }
+ }
+
+ Ok(huff_codes)
+ }
+
+ /// Adds a symbol to a huffman tree
+ fn add_symbol(&mut self, symbol: u16, code: u16, code_length: u16) -> ImageResult<()> {
+ let mut node_index = 0;
+ let code = usize::from(code);
+
+ for length in (0..code_length).rev() {
+ if node_index >= self.max_nodes {
+ return Err(DecoderError::HuffmanError.into());
+ }
+
+ let node = self.tree[node_index];
+
+ let offset = match node {
+ HuffmanTreeNode::Empty => {
+ if self.is_full() {
+ return Err(DecoderError::HuffmanError.into());
+ }
+ self.assign_children(node_index)
+ }
+ HuffmanTreeNode::Leaf(_) => return Err(DecoderError::HuffmanError.into()),
+ HuffmanTreeNode::Branch(offset) => offset,
+ };
+
+ node_index += offset + ((code >> length) & 1);
+ }
+
+ match self.tree[node_index] {
+ HuffmanTreeNode::Empty => self.tree[node_index] = HuffmanTreeNode::Leaf(symbol),
+ HuffmanTreeNode::Leaf(_) => return Err(DecoderError::HuffmanError.into()),
+ HuffmanTreeNode::Branch(_offset) => return Err(DecoderError::HuffmanError.into()),
+ }
+
+ Ok(())
+ }
+
+ /// Builds a tree implicitly, just from code lengths
+ pub(crate) fn build_implicit(code_lengths: Vec<u16>) -> ImageResult<HuffmanTree> {
+ let mut num_symbols = 0;
+ let mut root_symbol = 0;
+
+ for (symbol, length) in code_lengths.iter().enumerate() {
+ if *length > 0 {
+ num_symbols += 1;
+ root_symbol = symbol.try_into().unwrap();
+ }
+ }
+
+ let mut tree = HuffmanTree::init(num_symbols)?;
+
+ if num_symbols == 1 {
+ tree.add_symbol(root_symbol, 0, 0)?;
+ } else {
+ let codes = HuffmanTree::code_lengths_to_codes(&code_lengths)?;
+
+ for (symbol, &length) in code_lengths.iter().enumerate() {
+ if length > 0 && codes[symbol].is_some() {
+ tree.add_symbol(symbol.try_into().unwrap(), codes[symbol].unwrap(), length)?;
+ }
+ }
+ }
+
+ Ok(tree)
+ }
+
+ /// Builds a tree explicitly from lengths, codes and symbols
+ pub(crate) fn build_explicit(
+ code_lengths: Vec<u16>,
+ codes: Vec<u16>,
+ symbols: Vec<u16>,
+ ) -> ImageResult<HuffmanTree> {
+ let mut tree = HuffmanTree::init(symbols.len())?;
+
+ for i in 0..symbols.len() {
+ tree.add_symbol(symbols[i], codes[i], code_lengths[i])?;
+ }
+
+ Ok(tree)
+ }
+
+ /// Reads a symbol using the bitstream
+ pub(crate) fn read_symbol(&self, bit_reader: &mut BitReader) -> ImageResult<u16> {
+ let mut index = 0;
+ let mut node = self.tree[index];
+
+ while let HuffmanTreeNode::Branch(children_offset) = node {
+ index += children_offset + bit_reader.read_bits::<usize>(1)?;
+ node = self.tree[index];
+ }
+
+ let symbol = match node {
+ HuffmanTreeNode::Branch(_) => unreachable!(),
+ HuffmanTreeNode::Empty => return Err(DecoderError::HuffmanError.into()),
+ HuffmanTreeNode::Leaf(symbol) => symbol,
+ };
+
+ Ok(symbol)
+ }
+}
diff --git a/vendor/image/src/codecs/webp/loop_filter.rs b/vendor/image/src/codecs/webp/loop_filter.rs
new file mode 100644
index 0000000..312059f
--- /dev/null
+++ b/vendor/image/src/codecs/webp/loop_filter.rs
@@ -0,0 +1,147 @@
+//! Does loop filtering on webp lossy images
+
+use crate::utils::clamp;
+
+#[inline]
+fn c(val: i32) -> i32 {
+ clamp(val, -128, 127)
+}
+
+//unsigned to signed
+#[inline]
+fn u2s(val: u8) -> i32 {
+ i32::from(val) - 128
+}
+
+//signed to unsigned
+#[inline]
+fn s2u(val: i32) -> u8 {
+ (c(val) + 128) as u8
+}
+
+#[inline]
+fn diff(val1: u8, val2: u8) -> u8 {
+ if val1 > val2 {
+ val1 - val2
+ } else {
+ val2 - val1
+ }
+}
+
+//15.2
+fn common_adjust(use_outer_taps: bool, pixels: &mut [u8], point: usize, stride: usize) -> i32 {
+ let p1 = u2s(pixels[point - 2 * stride]);
+ let p0 = u2s(pixels[point - stride]);
+ let q0 = u2s(pixels[point]);
+ let q1 = u2s(pixels[point + stride]);
+
+ //value for the outer 2 pixels
+ let outer = if use_outer_taps { c(p1 - q1) } else { 0 };
+
+ let mut a = c(outer + 3 * (q0 - p0));
+
+ let b = (c(a + 3)) >> 3;
+
+ a = (c(a + 4)) >> 3;
+
+ pixels[point] = s2u(q0 - a);
+ pixels[point - stride] = s2u(p0 + b);
+
+ a
+}
+
+fn simple_threshold(filter_limit: i32, pixels: &[u8], point: usize, stride: usize) -> bool {
+ i32::from(diff(pixels[point - stride], pixels[point])) * 2
+ + i32::from(diff(pixels[point - 2 * stride], pixels[point + stride])) / 2
+ <= filter_limit
+}
+
+fn should_filter(
+ interior_limit: u8,
+ edge_limit: u8,
+ pixels: &[u8],
+ point: usize,
+ stride: usize,
+) -> bool {
+ simple_threshold(i32::from(edge_limit), pixels, point, stride)
+ && diff(pixels[point - 4 * stride], pixels[point - 3 * stride]) <= interior_limit
+ && diff(pixels[point - 3 * stride], pixels[point - 2 * stride]) <= interior_limit
+ && diff(pixels[point - 2 * stride], pixels[point - stride]) <= interior_limit
+ && diff(pixels[point + 3 * stride], pixels[point + 2 * stride]) <= interior_limit
+ && diff(pixels[point + 2 * stride], pixels[point + stride]) <= interior_limit
+ && diff(pixels[point + stride], pixels[point]) <= interior_limit
+}
+
+fn high_edge_variance(threshold: u8, pixels: &[u8], point: usize, stride: usize) -> bool {
+ diff(pixels[point - 2 * stride], pixels[point - stride]) > threshold
+ || diff(pixels[point + stride], pixels[point]) > threshold
+}
+
+//simple filter
+//effects 4 pixels on an edge(2 each side)
+pub(crate) fn simple_segment(edge_limit: u8, pixels: &mut [u8], point: usize, stride: usize) {
+ if simple_threshold(i32::from(edge_limit), pixels, point, stride) {
+ common_adjust(true, pixels, point, stride);
+ }
+}
+
+//normal filter
+//works on the 8 pixels on the edges between subblocks inside a macroblock
+pub(crate) fn subblock_filter(
+ hev_threshold: u8,
+ interior_limit: u8,
+ edge_limit: u8,
+ pixels: &mut [u8],
+ point: usize,
+ stride: usize,
+) {
+ if should_filter(interior_limit, edge_limit, pixels, point, stride) {
+ let hv = high_edge_variance(hev_threshold, pixels, point, stride);
+
+ let a = (common_adjust(hv, pixels, point, stride) + 1) >> 1;
+
+ if !hv {
+ pixels[point + stride] = s2u(u2s(pixels[point + stride]) - a);
+ pixels[point - 2 * stride] = s2u(u2s(pixels[point - 2 * stride]) - a);
+ }
+ }
+}
+
+//normal filter
+//works on the 8 pixels on the edges between macroblocks
+pub(crate) fn macroblock_filter(
+ hev_threshold: u8,
+ interior_limit: u8,
+ edge_limit: u8,
+ pixels: &mut [u8],
+ point: usize,
+ stride: usize,
+) {
+ let mut spixels = [0i32; 8];
+ for i in 0..8 {
+ spixels[i] = u2s(pixels[point + i * stride - 4 * stride]);
+ }
+
+ if should_filter(interior_limit, edge_limit, pixels, point, stride) {
+ if !high_edge_variance(hev_threshold, pixels, point, stride) {
+ let w = c(c(spixels[2] - spixels[5]) + 3 * (spixels[4] - spixels[3]));
+
+ let mut a = c((27 * w + 63) >> 7);
+
+ pixels[point] = s2u(spixels[4] - a);
+ pixels[point - stride] = s2u(spixels[3] + a);
+
+ a = c((18 * w + 63) >> 7);
+
+ pixels[point + stride] = s2u(spixels[5] - a);
+ pixels[point - 2 * stride] = s2u(spixels[2] + a);
+
+ a = c((9 * w + 63) >> 7);
+
+ pixels[point + 2 * stride] = s2u(spixels[6] - a);
+ pixels[point - 3 * stride] = s2u(spixels[1] + a);
+ } else {
+ common_adjust(true, pixels, point, stride);
+ }
+ }
+}
diff --git a/vendor/image/src/codecs/webp/lossless.rs b/vendor/image/src/codecs/webp/lossless.rs
new file mode 100644
index 0000000..7271eda
--- /dev/null
+++ b/vendor/image/src/codecs/webp/lossless.rs
@@ -0,0 +1,783 @@
+//! Decoding of lossless WebP images
+//!
+//! [Lossless spec](https://developers.google.com/speed/webp/docs/webp_lossless_bitstream_specification)
+//!
+
+use std::{
+ convert::TryFrom,
+ convert::TryInto,
+ error, fmt,
+ io::Read,
+ ops::{AddAssign, Shl},
+};
+
+use byteorder::ReadBytesExt;
+
+use crate::{error::DecodingError, ImageError, ImageFormat, ImageResult};
+
+use super::huffman::HuffmanTree;
+use super::lossless_transform::{add_pixels, TransformType};
+
+const CODE_LENGTH_CODES: usize = 19;
+const CODE_LENGTH_CODE_ORDER: [usize; CODE_LENGTH_CODES] = [
+ 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+];
+
+#[rustfmt::skip]
+const DISTANCE_MAP: [(i8, i8); 120] = [
+ (0, 1), (1, 0), (1, 1), (-1, 1), (0, 2), (2, 0), (1, 2), (-1, 2),
+ (2, 1), (-2, 1), (2, 2), (-2, 2), (0, 3), (3, 0), (1, 3), (-1, 3),
+ (3, 1), (-3, 1), (2, 3), (-2, 3), (3, 2), (-3, 2), (0, 4), (4, 0),
+ (1, 4), (-1, 4), (4, 1), (-4, 1), (3, 3), (-3, 3), (2, 4), (-2, 4),
+ (4, 2), (-4, 2), (0, 5), (3, 4), (-3, 4), (4, 3), (-4, 3), (5, 0),
+ (1, 5), (-1, 5), (5, 1), (-5, 1), (2, 5), (-2, 5), (5, 2), (-5, 2),
+ (4, 4), (-4, 4), (3, 5), (-3, 5), (5, 3), (-5, 3), (0, 6), (6, 0),
+ (1, 6), (-1, 6), (6, 1), (-6, 1), (2, 6), (-2, 6), (6, 2), (-6, 2),
+ (4, 5), (-4, 5), (5, 4), (-5, 4), (3, 6), (-3, 6), (6, 3), (-6, 3),
+ (0, 7), (7, 0), (1, 7), (-1, 7), (5, 5), (-5, 5), (7, 1), (-7, 1),
+ (4, 6), (-4, 6), (6, 4), (-6, 4), (2, 7), (-2, 7), (7, 2), (-7, 2),
+ (3, 7), (-3, 7), (7, 3), (-7, 3), (5, 6), (-5, 6), (6, 5), (-6, 5),
+ (8, 0), (4, 7), (-4, 7), (7, 4), (-7, 4), (8, 1), (8, 2), (6, 6),
+ (-6, 6), (8, 3), (5, 7), (-5, 7), (7, 5), (-7, 5), (8, 4), (6, 7),
+ (-6, 7), (7, 6), (-7, 6), (8, 5), (7, 7), (-7, 7), (8, 6), (8, 7)
+];
+
+const GREEN: usize = 0;
+const RED: usize = 1;
+const BLUE: usize = 2;
+const ALPHA: usize = 3;
+const DIST: usize = 4;
+
+const HUFFMAN_CODES_PER_META_CODE: usize = 5;
+
+type HuffmanCodeGroup = [HuffmanTree; HUFFMAN_CODES_PER_META_CODE];
+
+const ALPHABET_SIZE: [u16; HUFFMAN_CODES_PER_META_CODE] = [256 + 24, 256, 256, 256, 40];
+
+#[inline]
+pub(crate) fn subsample_size(size: u16, bits: u8) -> u16 {
+ ((u32::from(size) + (1u32 << bits) - 1) >> bits)
+ .try_into()
+ .unwrap()
+}
+
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum DecoderError {
+ /// Signature of 0x2f not found
+ LosslessSignatureInvalid(u8),
+ /// Version Number must be 0
+ VersionNumberInvalid(u8),
+
+ ///
+ InvalidColorCacheBits(u8),
+
+ HuffmanError,
+
+ BitStreamError,
+
+ TransformError,
+}
+
+impl fmt::Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DecoderError::LosslessSignatureInvalid(sig) => {
+ f.write_fmt(format_args!("Invalid lossless signature: {}", sig))
+ }
+ DecoderError::VersionNumberInvalid(num) => {
+ f.write_fmt(format_args!("Invalid version number: {}", num))
+ }
+ DecoderError::InvalidColorCacheBits(num) => f.write_fmt(format_args!(
+ "Invalid color cache(must be between 1-11): {}",
+ num
+ )),
+ DecoderError::HuffmanError => f.write_fmt(format_args!("Error building Huffman Tree")),
+ DecoderError::BitStreamError => {
+ f.write_fmt(format_args!("Error while reading bitstream"))
+ }
+ DecoderError::TransformError => {
+ f.write_fmt(format_args!("Error while reading or writing transforms"))
+ }
+ }
+ }
+}
+
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {}
+
+const NUM_TRANSFORM_TYPES: usize = 4;
+
+//Decodes lossless WebP images
+#[derive(Debug)]
+pub(crate) struct LosslessDecoder<R> {
+ r: R,
+ bit_reader: BitReader,
+ frame: LosslessFrame,
+ transforms: [Option<TransformType>; NUM_TRANSFORM_TYPES],
+ transform_order: Vec<u8>,
+}
+
+impl<R: Read> LosslessDecoder<R> {
+ /// Create a new decoder
+ pub(crate) fn new(r: R) -> LosslessDecoder<R> {
+ LosslessDecoder {
+ r,
+ bit_reader: BitReader::new(),
+ frame: Default::default(),
+ transforms: [None, None, None, None],
+ transform_order: Vec::new(),
+ }
+ }
+
+ /// Reads the frame
+ pub(crate) fn decode_frame(&mut self) -> ImageResult<&LosslessFrame> {
+ let signature = self.r.read_u8()?;
+
+ if signature != 0x2f {
+ return Err(DecoderError::LosslessSignatureInvalid(signature).into());
+ }
+
+ let mut buf = Vec::new();
+ self.r.read_to_end(&mut buf)?;
+ self.bit_reader.init(buf);
+
+ self.frame.width = self.bit_reader.read_bits::<u16>(14)? + 1;
+ self.frame.height = self.bit_reader.read_bits::<u16>(14)? + 1;
+
+ let _alpha_used = self.bit_reader.read_bits::<u8>(1)?;
+
+ let version_num = self.bit_reader.read_bits::<u8>(3)?;
+
+ if version_num != 0 {
+ return Err(DecoderError::VersionNumberInvalid(version_num).into());
+ }
+
+ let mut data = self.decode_image_stream(self.frame.width, self.frame.height, true)?;
+
+ for &trans_index in self.transform_order.iter().rev() {
+ let trans = self.transforms[usize::from(trans_index)].as_ref().unwrap();
+ trans.apply_transform(&mut data, self.frame.width, self.frame.height)?;
+ }
+
+ self.frame.buf = data;
+ Ok(&self.frame)
+ }
+
+ //used for alpha data in extended decoding
+ pub(crate) fn decode_frame_implicit_dims(
+ &mut self,
+ width: u16,
+ height: u16,
+ ) -> ImageResult<&LosslessFrame> {
+ let mut buf = Vec::new();
+ self.r.read_to_end(&mut buf)?;
+ self.bit_reader.init(buf);
+
+ self.frame.width = width;
+ self.frame.height = height;
+
+ let mut data = self.decode_image_stream(self.frame.width, self.frame.height, true)?;
+
+ //transform_order is vector of indices(0-3) into transforms in order decoded
+ for &trans_index in self.transform_order.iter().rev() {
+ let trans = self.transforms[usize::from(trans_index)].as_ref().unwrap();
+ trans.apply_transform(&mut data, self.frame.width, self.frame.height)?;
+ }
+
+ self.frame.buf = data;
+ Ok(&self.frame)
+ }
+
+ /// Reads Image data from the bitstream
+ /// Can be in any of the 5 roles described in the Specification
+ /// ARGB Image role has different behaviour to the other 4
+ /// xsize and ysize describe the size of the blocks where each block has its own entropy code
+ fn decode_image_stream(
+ &mut self,
+ xsize: u16,
+ ysize: u16,
+ is_argb_img: bool,
+ ) -> ImageResult<Vec<u32>> {
+ let trans_xsize = if is_argb_img {
+ self.read_transforms()?
+ } else {
+ xsize
+ };
+
+ let color_cache_bits = self.read_color_cache()?;
+
+ let color_cache = color_cache_bits.map(|bits| {
+ let size = 1 << bits;
+ let cache = vec![0u32; size];
+ ColorCache {
+ color_cache_bits: bits,
+ color_cache: cache,
+ }
+ });
+
+ let huffman_info = self.read_huffman_codes(is_argb_img, trans_xsize, ysize, color_cache)?;
+
+ //decode data
+ let data = self.decode_image_data(trans_xsize, ysize, huffman_info)?;
+
+ Ok(data)
+ }
+
+ /// Reads transforms and their data from the bitstream
+ fn read_transforms(&mut self) -> ImageResult<u16> {
+ let mut xsize = self.frame.width;
+
+ while self.bit_reader.read_bits::<u8>(1)? == 1 {
+ let transform_type_val = self.bit_reader.read_bits::<u8>(2)?;
+
+ if self.transforms[usize::from(transform_type_val)].is_some() {
+ //can only have one of each transform, error
+ return Err(DecoderError::TransformError.into());
+ }
+
+ self.transform_order.push(transform_type_val);
+
+ let transform_type = match transform_type_val {
+ 0 => {
+ //predictor
+
+ let size_bits = self.bit_reader.read_bits::<u8>(3)? + 2;
+
+ let block_xsize = subsample_size(xsize, size_bits);
+ let block_ysize = subsample_size(self.frame.height, size_bits);
+
+ let data = self.decode_image_stream(block_xsize, block_ysize, false)?;
+
+ TransformType::PredictorTransform {
+ size_bits,
+ predictor_data: data,
+ }
+ }
+ 1 => {
+ //color transform
+
+ let size_bits = self.bit_reader.read_bits::<u8>(3)? + 2;
+
+ let block_xsize = subsample_size(xsize, size_bits);
+ let block_ysize = subsample_size(self.frame.height, size_bits);
+
+ let data = self.decode_image_stream(block_xsize, block_ysize, false)?;
+
+ TransformType::ColorTransform {
+ size_bits,
+ transform_data: data,
+ }
+ }
+ 2 => {
+ //subtract green
+
+ TransformType::SubtractGreen
+ }
+ 3 => {
+ let color_table_size = self.bit_reader.read_bits::<u16>(8)? + 1;
+
+ let mut color_map = self.decode_image_stream(color_table_size, 1, false)?;
+
+ let bits = if color_table_size <= 2 {
+ 3
+ } else if color_table_size <= 4 {
+ 2
+ } else if color_table_size <= 16 {
+ 1
+ } else {
+ 0
+ };
+ xsize = subsample_size(xsize, bits);
+
+ Self::adjust_color_map(&mut color_map);
+
+ TransformType::ColorIndexingTransform {
+ table_size: color_table_size,
+ table_data: color_map,
+ }
+ }
+ _ => unreachable!(),
+ };
+
+ self.transforms[usize::from(transform_type_val)] = Some(transform_type);
+ }
+
+ Ok(xsize)
+ }
+
+ /// Adjusts the color map since it's subtraction coded
+ fn adjust_color_map(color_map: &mut Vec<u32>) {
+ for i in 1..color_map.len() {
+ color_map[i] = add_pixels(color_map[i], color_map[i - 1]);
+ }
+ }
+
+ /// Reads huffman codes associated with an image
+ fn read_huffman_codes(
+ &mut self,
+ read_meta: bool,
+ xsize: u16,
+ ysize: u16,
+ color_cache: Option<ColorCache>,
+ ) -> ImageResult<HuffmanInfo> {
+ let mut num_huff_groups = 1;
+
+ let mut huffman_bits = 0;
+ let mut huffman_xsize = 1;
+ let mut huffman_ysize = 1;
+ let mut entropy_image = Vec::new();
+
+ if read_meta && self.bit_reader.read_bits::<u8>(1)? == 1 {
+ //meta huffman codes
+ huffman_bits = self.bit_reader.read_bits::<u8>(3)? + 2;
+ huffman_xsize = subsample_size(xsize, huffman_bits);
+ huffman_ysize = subsample_size(ysize, huffman_bits);
+
+ entropy_image = self.decode_image_stream(huffman_xsize, huffman_ysize, false)?;
+
+ for pixel in entropy_image.iter_mut() {
+ let meta_huff_code = (*pixel >> 8) & 0xffff;
+
+ *pixel = meta_huff_code;
+
+ if meta_huff_code >= num_huff_groups {
+ num_huff_groups = meta_huff_code + 1;
+ }
+ }
+ }
+
+ let mut hufftree_groups = Vec::new();
+
+ for _i in 0..num_huff_groups {
+ let mut group: HuffmanCodeGroup = Default::default();
+ for j in 0..HUFFMAN_CODES_PER_META_CODE {
+ let mut alphabet_size = ALPHABET_SIZE[j];
+ if j == 0 {
+ if let Some(color_cache) = color_cache.as_ref() {
+ alphabet_size += 1 << color_cache.color_cache_bits;
+ }
+ }
+
+ let tree = self.read_huffman_code(alphabet_size)?;
+ group[j] = tree;
+ }
+ hufftree_groups.push(group);
+ }
+
+ let huffman_mask = if huffman_bits == 0 {
+ !0
+ } else {
+ (1 << huffman_bits) - 1
+ };
+
+ let info = HuffmanInfo {
+ xsize: huffman_xsize,
+ _ysize: huffman_ysize,
+ color_cache,
+ image: entropy_image,
+ bits: huffman_bits,
+ mask: huffman_mask,
+ huffman_code_groups: hufftree_groups,
+ };
+
+ Ok(info)
+ }
+
+ /// Decodes and returns a single huffman tree
+ fn read_huffman_code(&mut self, alphabet_size: u16) -> ImageResult<HuffmanTree> {
+ let simple = self.bit_reader.read_bits::<u8>(1)? == 1;
+
+ if simple {
+ let num_symbols = self.bit_reader.read_bits::<u8>(1)? + 1;
+
+ let mut code_lengths = vec![u16::from(num_symbols - 1)];
+ let mut codes = vec![0];
+ let mut symbols = Vec::new();
+
+ let is_first_8bits = self.bit_reader.read_bits::<u8>(1)?;
+ symbols.push(self.bit_reader.read_bits::<u16>(1 + 7 * is_first_8bits)?);
+
+ if num_symbols == 2 {
+ symbols.push(self.bit_reader.read_bits::<u16>(8)?);
+ code_lengths.push(1);
+ codes.push(1);
+ }
+
+ HuffmanTree::build_explicit(code_lengths, codes, symbols)
+ } else {
+ let mut code_length_code_lengths = vec![0; CODE_LENGTH_CODES];
+
+ let num_code_lengths = 4 + self.bit_reader.read_bits::<usize>(4)?;
+ for i in 0..num_code_lengths {
+ code_length_code_lengths[CODE_LENGTH_CODE_ORDER[i]] =
+ self.bit_reader.read_bits(3)?;
+ }
+
+ let new_code_lengths =
+ self.read_huffman_code_lengths(code_length_code_lengths, alphabet_size)?;
+
+ HuffmanTree::build_implicit(new_code_lengths)
+ }
+ }
+
+ /// Reads huffman code lengths
+ fn read_huffman_code_lengths(
+ &mut self,
+ code_length_code_lengths: Vec<u16>,
+ num_symbols: u16,
+ ) -> ImageResult<Vec<u16>> {
+ let table = HuffmanTree::build_implicit(code_length_code_lengths)?;
+
+ let mut max_symbol = if self.bit_reader.read_bits::<u8>(1)? == 1 {
+ let length_nbits = 2 + 2 * self.bit_reader.read_bits::<u8>(3)?;
+ 2 + self.bit_reader.read_bits::<u16>(length_nbits)?
+ } else {
+ num_symbols
+ };
+
+ let mut code_lengths = vec![0; usize::from(num_symbols)];
+ let mut prev_code_len = 8; //default code length
+
+ let mut symbol = 0;
+ while symbol < num_symbols {
+ if max_symbol == 0 {
+ break;
+ }
+ max_symbol -= 1;
+
+ let code_len = table.read_symbol(&mut self.bit_reader)?;
+
+ if code_len < 16 {
+ code_lengths[usize::from(symbol)] = code_len;
+ symbol += 1;
+ if code_len != 0 {
+ prev_code_len = code_len;
+ }
+ } else {
+ let use_prev = code_len == 16;
+ let slot = code_len - 16;
+ let extra_bits = match slot {
+ 0 => 2,
+ 1 => 3,
+ 2 => 7,
+ _ => return Err(DecoderError::BitStreamError.into()),
+ };
+ let repeat_offset = match slot {
+ 0 | 1 => 3,
+ 2 => 11,
+ _ => return Err(DecoderError::BitStreamError.into()),
+ };
+
+ let mut repeat = self.bit_reader.read_bits::<u16>(extra_bits)? + repeat_offset;
+
+ if symbol + repeat > num_symbols {
+ return Err(DecoderError::BitStreamError.into());
+ } else {
+ let length = if use_prev { prev_code_len } else { 0 };
+ while repeat > 0 {
+ repeat -= 1;
+ code_lengths[usize::from(symbol)] = length;
+ symbol += 1;
+ }
+ }
+ }
+ }
+
+ Ok(code_lengths)
+ }
+
+ /// Decodes the image data using the huffman trees and either of the 3 methods of decoding
+ fn decode_image_data(
+ &mut self,
+ width: u16,
+ height: u16,
+ mut huffman_info: HuffmanInfo,
+ ) -> ImageResult<Vec<u32>> {
+ let num_values = usize::from(width) * usize::from(height);
+ let mut data = vec![0; num_values];
+
+ let huff_index = huffman_info.get_huff_index(0, 0);
+ let mut tree = &huffman_info.huffman_code_groups[huff_index];
+ let mut last_cached = 0;
+ let mut index = 0;
+ let mut x = 0;
+ let mut y = 0;
+ while index < num_values {
+ if (x & huffman_info.mask) == 0 {
+ let index = huffman_info.get_huff_index(x, y);
+ tree = &huffman_info.huffman_code_groups[index];
+ }
+
+ let code = tree[GREEN].read_symbol(&mut self.bit_reader)?;
+
+ //check code
+ if code < 256 {
+ //literal, so just use huffman codes and read as argb
+ let red = tree[RED].read_symbol(&mut self.bit_reader)?;
+ let blue = tree[BLUE].read_symbol(&mut self.bit_reader)?;
+ let alpha = tree[ALPHA].read_symbol(&mut self.bit_reader)?;
+
+ data[index] = (u32::from(alpha) << 24)
+ + (u32::from(red) << 16)
+ + (u32::from(code) << 8)
+ + u32::from(blue);
+
+ index += 1;
+ x += 1;
+ if x >= width {
+ x = 0;
+ y += 1;
+ }
+ } else if code < 256 + 24 {
+ //backward reference, so go back and use that to add image data
+ let length_symbol = code - 256;
+ let length = Self::get_copy_distance(&mut self.bit_reader, length_symbol)?;
+
+ let dist_symbol = tree[DIST].read_symbol(&mut self.bit_reader)?;
+ let dist_code = Self::get_copy_distance(&mut self.bit_reader, dist_symbol)?;
+ let dist = Self::plane_code_to_distance(width, dist_code);
+
+ if index < dist || num_values - index < length {
+ return Err(DecoderError::BitStreamError.into());
+ }
+
+ for i in 0..length {
+ data[index + i] = data[index + i - dist];
+ }
+ index += length;
+ x += u16::try_from(length).unwrap();
+ while x >= width {
+ x -= width;
+ y += 1;
+ }
+ if index < num_values {
+ let index = huffman_info.get_huff_index(x, y);
+ tree = &huffman_info.huffman_code_groups[index];
+ }
+ } else {
+ //color cache, so use previously stored pixels to get this pixel
+ let key = code - 256 - 24;
+
+ if let Some(color_cache) = huffman_info.color_cache.as_mut() {
+ //cache old colors
+ while last_cached < index {
+ color_cache.insert(data[last_cached]);
+ last_cached += 1;
+ }
+ data[index] = color_cache.lookup(key.into())?;
+ } else {
+ return Err(DecoderError::BitStreamError.into());
+ }
+ index += 1;
+ x += 1;
+ if x >= width {
+ x = 0;
+ y += 1;
+ }
+ }
+ }
+
+ Ok(data)
+ }
+
+ /// Reads color cache data from the bitstream
+ fn read_color_cache(&mut self) -> ImageResult<Option<u8>> {
+ if self.bit_reader.read_bits::<u8>(1)? == 1 {
+ let code_bits = self.bit_reader.read_bits::<u8>(4)?;
+
+ if !(1..=11).contains(&code_bits) {
+ return Err(DecoderError::InvalidColorCacheBits(code_bits).into());
+ }
+
+ Ok(Some(code_bits))
+ } else {
+ Ok(None)
+ }
+ }
+
+ /// Gets the copy distance from the prefix code and bitstream
+ fn get_copy_distance(bit_reader: &mut BitReader, prefix_code: u16) -> ImageResult<usize> {
+ if prefix_code < 4 {
+ return Ok(usize::from(prefix_code + 1));
+ }
+ let extra_bits: u8 = ((prefix_code - 2) >> 1).try_into().unwrap();
+ let offset = (2 + (usize::from(prefix_code) & 1)) << extra_bits;
+
+ Ok(offset + bit_reader.read_bits::<usize>(extra_bits)? + 1)
+ }
+
+ /// Gets distance to pixel
+ fn plane_code_to_distance(xsize: u16, plane_code: usize) -> usize {
+ if plane_code > 120 {
+ plane_code - 120
+ } else {
+ let (xoffset, yoffset) = DISTANCE_MAP[plane_code - 1];
+
+ let dist = i32::from(xoffset) + i32::from(yoffset) * i32::from(xsize);
+ if dist < 1 {
+ return 1;
+ }
+ dist.try_into().unwrap()
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+struct HuffmanInfo {
+ xsize: u16,
+ _ysize: u16,
+ color_cache: Option<ColorCache>,
+ image: Vec<u32>,
+ bits: u8,
+ mask: u16,
+ huffman_code_groups: Vec<HuffmanCodeGroup>,
+}
+
+impl HuffmanInfo {
+ fn get_huff_index(&self, x: u16, y: u16) -> usize {
+ if self.bits == 0 {
+ return 0;
+ }
+ let position = usize::from((y >> self.bits) * self.xsize + (x >> self.bits));
+ let meta_huff_code: usize = self.image[position].try_into().unwrap();
+ meta_huff_code
+ }
+}
+
+#[derive(Debug, Clone)]
+struct ColorCache {
+ color_cache_bits: u8,
+ color_cache: Vec<u32>,
+}
+
+impl ColorCache {
+ fn insert(&mut self, color: u32) {
+ let index = (0x1e35a7bdu32.overflowing_mul(color).0) >> (32 - self.color_cache_bits);
+ self.color_cache[index as usize] = color;
+ }
+
+ fn lookup(&self, index: usize) -> ImageResult<u32> {
+ match self.color_cache.get(index) {
+ Some(&value) => Ok(value),
+ None => Err(DecoderError::BitStreamError.into()),
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+pub(crate) struct BitReader {
+ buf: Vec<u8>,
+ index: usize,
+ bit_count: u8,
+}
+
+impl BitReader {
+ fn new() -> BitReader {
+ BitReader {
+ buf: Vec::new(),
+ index: 0,
+ bit_count: 0,
+ }
+ }
+
+ fn init(&mut self, buf: Vec<u8>) {
+ self.buf = buf;
+ }
+
+ pub(crate) fn read_bits<T>(&mut self, num: u8) -> ImageResult<T>
+ where
+ T: num_traits::Unsigned + Shl<u8, Output = T> + AddAssign<T> + From<bool>,
+ {
+ let mut value: T = T::zero();
+
+ for i in 0..num {
+ if self.buf.len() <= self.index {
+ return Err(DecoderError::BitStreamError.into());
+ }
+ let bit_true = self.buf[self.index] & (1 << self.bit_count) != 0;
+ value += T::from(bit_true) << i;
+ self.bit_count = if self.bit_count == 7 {
+ self.index += 1;
+ 0
+ } else {
+ self.bit_count + 1
+ };
+ }
+
+ Ok(value)
+ }
+}
+
+#[derive(Debug, Clone, Default)]
+pub(crate) struct LosslessFrame {
+ pub(crate) width: u16,
+ pub(crate) height: u16,
+
+ pub(crate) buf: Vec<u32>,
+}
+
+impl LosslessFrame {
+ /// Fills a buffer by converting from argb to rgba
+ pub(crate) fn fill_rgba(&self, buf: &mut [u8]) {
+ for (&argb_val, chunk) in self.buf.iter().zip(buf.chunks_exact_mut(4)) {
+ chunk[0] = ((argb_val >> 16) & 0xff).try_into().unwrap();
+ chunk[1] = ((argb_val >> 8) & 0xff).try_into().unwrap();
+ chunk[2] = (argb_val & 0xff).try_into().unwrap();
+ chunk[3] = ((argb_val >> 24) & 0xff).try_into().unwrap();
+ }
+ }
+
+ /// Get buffer size from the image
+ pub(crate) fn get_buf_size(&self) -> usize {
+ usize::from(self.width) * usize::from(self.height) * 4
+ }
+
+ /// Fills a buffer with just the green values from the lossless decoding
+ /// Used in extended alpha decoding
+ pub(crate) fn fill_green(&self, buf: &mut [u8]) {
+ for (&argb_val, buf_value) in self.buf.iter().zip(buf.iter_mut()) {
+ *buf_value = ((argb_val >> 8) & 0xff).try_into().unwrap();
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+
+ use super::BitReader;
+
+ #[test]
+ fn bit_read_test() {
+ let mut bit_reader = BitReader::new();
+
+ //10011100 01000001 11100001
+ let buf = vec![0x9C, 0x41, 0xE1];
+
+ bit_reader.init(buf);
+
+ assert_eq!(bit_reader.read_bits::<u8>(3).unwrap(), 4); //100
+ assert_eq!(bit_reader.read_bits::<u8>(2).unwrap(), 3); //11
+ assert_eq!(bit_reader.read_bits::<u8>(6).unwrap(), 12); //001100
+ assert_eq!(bit_reader.read_bits::<u16>(10).unwrap(), 40); //0000101000
+ assert_eq!(bit_reader.read_bits::<u8>(3).unwrap(), 7); //111
+ }
+
+ #[test]
+ fn bit_read_error_test() {
+ let mut bit_reader = BitReader::new();
+
+ //01101010
+ let buf = vec![0x6A];
+
+ bit_reader.init(buf);
+
+ assert_eq!(bit_reader.read_bits::<u8>(3).unwrap(), 2); //010
+ assert_eq!(bit_reader.read_bits::<u8>(5).unwrap(), 13); //01101
+ assert!(bit_reader.read_bits::<u8>(4).is_err()); //error
+ }
+}
diff --git a/vendor/image/src/codecs/webp/lossless_transform.rs b/vendor/image/src/codecs/webp/lossless_transform.rs
new file mode 100644
index 0000000..f9a82c1
--- /dev/null
+++ b/vendor/image/src/codecs/webp/lossless_transform.rs
@@ -0,0 +1,464 @@
+use std::convert::TryFrom;
+use std::convert::TryInto;
+
+use super::lossless::subsample_size;
+use super::lossless::DecoderError;
+
+#[derive(Debug, Clone)]
+pub(crate) enum TransformType {
+ PredictorTransform {
+ size_bits: u8,
+ predictor_data: Vec<u32>,
+ },
+ ColorTransform {
+ size_bits: u8,
+ transform_data: Vec<u32>,
+ },
+ SubtractGreen,
+ ColorIndexingTransform {
+ table_size: u16,
+ table_data: Vec<u32>,
+ },
+}
+
+impl TransformType {
+ /// Applies a transform to the image data
+ pub(crate) fn apply_transform(
+ &self,
+ image_data: &mut Vec<u32>,
+ width: u16,
+ height: u16,
+ ) -> Result<(), DecoderError> {
+ match self {
+ TransformType::PredictorTransform {
+ size_bits,
+ predictor_data,
+ } => {
+ let block_xsize = usize::from(subsample_size(width, *size_bits));
+ let width = usize::from(width);
+ let height = usize::from(height);
+
+ if image_data.len() < width * height {
+ return Err(DecoderError::TransformError);
+ }
+
+ //handle top and left borders specially
+ //this involves ignoring mode and just setting prediction values like this
+ image_data[0] = add_pixels(image_data[0], 0xff000000);
+
+ for x in 1..width {
+ image_data[x] = add_pixels(image_data[x], get_left(image_data, x, 0, width));
+ }
+
+ for y in 1..height {
+ image_data[y * width] =
+ add_pixels(image_data[y * width], get_top(image_data, 0, y, width));
+ }
+
+ for y in 1..height {
+ for x in 1..width {
+ let block_index = (y >> size_bits) * block_xsize + (x >> size_bits);
+
+ let index = y * width + x;
+
+ let green = (predictor_data[block_index] >> 8) & 0xff;
+
+ match green {
+ 0 => image_data[index] = add_pixels(image_data[index], 0xff000000),
+ 1 => {
+ image_data[index] =
+ add_pixels(image_data[index], get_left(image_data, x, y, width))
+ }
+ 2 => {
+ image_data[index] =
+ add_pixels(image_data[index], get_top(image_data, x, y, width))
+ }
+ 3 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ get_top_right(image_data, x, y, width),
+ )
+ }
+ 4 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ get_top_left(image_data, x, y, width),
+ )
+ }
+ 5 => {
+ image_data[index] = add_pixels(image_data[index], {
+ let first = average2(
+ get_left(image_data, x, y, width),
+ get_top_right(image_data, x, y, width),
+ );
+ average2(first, get_top(image_data, x, y, width))
+ })
+ }
+ 6 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ average2(
+ get_left(image_data, x, y, width),
+ get_top_left(image_data, x, y, width),
+ ),
+ )
+ }
+ 7 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ average2(
+ get_left(image_data, x, y, width),
+ get_top(image_data, x, y, width),
+ ),
+ )
+ }
+ 8 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ average2(
+ get_top_left(image_data, x, y, width),
+ get_top(image_data, x, y, width),
+ ),
+ )
+ }
+ 9 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ average2(
+ get_top(image_data, x, y, width),
+ get_top_right(image_data, x, y, width),
+ ),
+ )
+ }
+ 10 => {
+ image_data[index] = add_pixels(image_data[index], {
+ let first = average2(
+ get_left(image_data, x, y, width),
+ get_top_left(image_data, x, y, width),
+ );
+ let second = average2(
+ get_top(image_data, x, y, width),
+ get_top_right(image_data, x, y, width),
+ );
+ average2(first, second)
+ })
+ }
+ 11 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ select(
+ get_left(image_data, x, y, width),
+ get_top(image_data, x, y, width),
+ get_top_left(image_data, x, y, width),
+ ),
+ )
+ }
+ 12 => {
+ image_data[index] = add_pixels(
+ image_data[index],
+ clamp_add_subtract_full(
+ get_left(image_data, x, y, width),
+ get_top(image_data, x, y, width),
+ get_top_left(image_data, x, y, width),
+ ),
+ )
+ }
+ 13 => {
+ image_data[index] = add_pixels(image_data[index], {
+ let first = average2(
+ get_left(image_data, x, y, width),
+ get_top(image_data, x, y, width),
+ );
+ clamp_add_subtract_half(
+ first,
+ get_top_left(image_data, x, y, width),
+ )
+ })
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+ TransformType::ColorTransform {
+ size_bits,
+ transform_data,
+ } => {
+ let block_xsize = usize::from(subsample_size(width, *size_bits));
+ let width = usize::from(width);
+ let height = usize::from(height);
+
+ for y in 0..height {
+ for x in 0..width {
+ let block_index = (y >> size_bits) * block_xsize + (x >> size_bits);
+
+ let index = y * width + x;
+
+ let multiplier =
+ ColorTransformElement::from_color_code(transform_data[block_index]);
+
+ image_data[index] = transform_color(&multiplier, image_data[index]);
+ }
+ }
+ }
+ TransformType::SubtractGreen => {
+ let width = usize::from(width);
+ for y in 0..usize::from(height) {
+ for x in 0..width {
+ image_data[y * width + x] = add_green(image_data[y * width + x]);
+ }
+ }
+ }
+ TransformType::ColorIndexingTransform {
+ table_size,
+ table_data,
+ } => {
+ let mut new_image_data =
+ Vec::with_capacity(usize::from(width) * usize::from(height));
+
+ let table_size = *table_size;
+ let width_bits: u8 = if table_size <= 2 {
+ 3
+ } else if table_size <= 4 {
+ 2
+ } else if table_size <= 16 {
+ 1
+ } else {
+ 0
+ };
+
+ let bits_per_pixel = 8 >> width_bits;
+ let mask = (1 << bits_per_pixel) - 1;
+
+ let mut src = 0;
+ let width = usize::from(width);
+
+ let pixels_per_byte = 1 << width_bits;
+ let count_mask = pixels_per_byte - 1;
+ let mut packed_pixels = 0;
+
+ for _y in 0..usize::from(height) {
+ for x in 0..width {
+ if (x & count_mask) == 0 {
+ packed_pixels = (image_data[src] >> 8) & 0xff;
+ src += 1;
+ }
+
+ let pixels: usize = (packed_pixels & mask).try_into().unwrap();
+ let new_val = if pixels >= table_size.into() {
+ 0x00000000
+ } else {
+ table_data[pixels]
+ };
+
+ new_image_data.push(new_val);
+
+ packed_pixels >>= bits_per_pixel;
+ }
+ }
+
+ *image_data = new_image_data;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+//predictor functions
+
+/// Adds 2 pixels mod 256 for each pixel
+pub(crate) fn add_pixels(a: u32, b: u32) -> u32 {
+ let new_alpha = ((a >> 24) + (b >> 24)) & 0xff;
+ let new_red = (((a >> 16) & 0xff) + ((b >> 16) & 0xff)) & 0xff;
+ let new_green = (((a >> 8) & 0xff) + ((b >> 8) & 0xff)) & 0xff;
+ let new_blue = ((a & 0xff) + (b & 0xff)) & 0xff;
+
+ (new_alpha << 24) + (new_red << 16) + (new_green << 8) + new_blue
+}
+
+/// Get left pixel
+fn get_left(data: &[u32], x: usize, y: usize, width: usize) -> u32 {
+ data[y * width + x - 1]
+}
+
+/// Get top pixel
+fn get_top(data: &[u32], x: usize, y: usize, width: usize) -> u32 {
+ data[(y - 1) * width + x]
+}
+
+/// Get pixel to top right
+fn get_top_right(data: &[u32], x: usize, y: usize, width: usize) -> u32 {
+ // if x == width - 1 this gets the left most pixel of the current row
+ // as described in the specification
+ data[(y - 1) * width + x + 1]
+}
+
+/// Get pixel to top left
+fn get_top_left(data: &[u32], x: usize, y: usize, width: usize) -> u32 {
+ data[(y - 1) * width + x - 1]
+}
+
+/// Get average of 2 pixels
+fn average2(a: u32, b: u32) -> u32 {
+ let mut avg = 0u32;
+ for i in 0..4 {
+ let sub_a: u8 = ((a >> (i * 8)) & 0xff).try_into().unwrap();
+ let sub_b: u8 = ((b >> (i * 8)) & 0xff).try_into().unwrap();
+ avg |= u32::from(sub_average2(sub_a, sub_b)) << (i * 8);
+ }
+ avg
+}
+
+/// Get average of 2 bytes
+fn sub_average2(a: u8, b: u8) -> u8 {
+ ((u16::from(a) + u16::from(b)) / 2).try_into().unwrap()
+}
+
+/// Get a specific byte from argb pixel
+fn get_byte(val: u32, byte: u8) -> u8 {
+ ((val >> (byte * 8)) & 0xff).try_into().unwrap()
+}
+
+/// Get byte as i32 for convenience
+fn get_byte_i32(val: u32, byte: u8) -> i32 {
+ i32::from(get_byte(val, byte))
+}
+
+/// Select left or top byte
+fn select(left: u32, top: u32, top_left: u32) -> u32 {
+ let predict_alpha = get_byte_i32(left, 3) + get_byte_i32(top, 3) - get_byte_i32(top_left, 3);
+ let predict_red = get_byte_i32(left, 2) + get_byte_i32(top, 2) - get_byte_i32(top_left, 2);
+ let predict_green = get_byte_i32(left, 1) + get_byte_i32(top, 1) - get_byte_i32(top_left, 1);
+ let predict_blue = get_byte_i32(left, 0) + get_byte_i32(top, 0) - get_byte_i32(top_left, 0);
+
+ let predict_left = i32::abs(predict_alpha - get_byte_i32(left, 3))
+ + i32::abs(predict_red - get_byte_i32(left, 2))
+ + i32::abs(predict_green - get_byte_i32(left, 1))
+ + i32::abs(predict_blue - get_byte_i32(left, 0));
+ let predict_top = i32::abs(predict_alpha - get_byte_i32(top, 3))
+ + i32::abs(predict_red - get_byte_i32(top, 2))
+ + i32::abs(predict_green - get_byte_i32(top, 1))
+ + i32::abs(predict_blue - get_byte_i32(top, 0));
+
+ if predict_left < predict_top {
+ left
+ } else {
+ top
+ }
+}
+
+/// Clamp a to [0, 255]
+fn clamp(a: i32) -> i32 {
+ if a < 0 {
+ 0
+ } else if a > 255 {
+ 255
+ } else {
+ a
+ }
+}
+
+/// Clamp add subtract full on one part
+fn clamp_add_subtract_full_sub(a: i32, b: i32, c: i32) -> i32 {
+ clamp(a + b - c)
+}
+
+/// Clamp add subtract half on one part
+fn clamp_add_subtract_half_sub(a: i32, b: i32) -> i32 {
+ clamp(a + (a - b) / 2)
+}
+
+/// Clamp add subtract full on 3 pixels
+fn clamp_add_subtract_full(a: u32, b: u32, c: u32) -> u32 {
+ let mut value: u32 = 0;
+ for i in 0..4u8 {
+ let sub_a: i32 = ((a >> (i * 8)) & 0xff).try_into().unwrap();
+ let sub_b: i32 = ((b >> (i * 8)) & 0xff).try_into().unwrap();
+ let sub_c: i32 = ((c >> (i * 8)) & 0xff).try_into().unwrap();
+ value |=
+ u32::try_from(clamp_add_subtract_full_sub(sub_a, sub_b, sub_c)).unwrap() << (i * 8);
+ }
+ value
+}
+
+/// Clamp add subtract half on 2 pixels
+fn clamp_add_subtract_half(a: u32, b: u32) -> u32 {
+ let mut value = 0;
+ for i in 0..4u8 {
+ let sub_a: i32 = ((a >> (i * 8)) & 0xff).try_into().unwrap();
+ let sub_b: i32 = ((b >> (i * 8)) & 0xff).try_into().unwrap();
+ value |= u32::try_from(clamp_add_subtract_half_sub(sub_a, sub_b)).unwrap() << (i * 8);
+ }
+
+ value
+}
+
+//color transform
+
+#[derive(Debug, Clone, Copy)]
+struct ColorTransformElement {
+ green_to_red: u8,
+ green_to_blue: u8,
+ red_to_blue: u8,
+}
+
+impl ColorTransformElement {
+ fn from_color_code(color_code: u32) -> ColorTransformElement {
+ ColorTransformElement {
+ green_to_red: (color_code & 0xff).try_into().unwrap(),
+ green_to_blue: ((color_code >> 8) & 0xff).try_into().unwrap(),
+ red_to_blue: ((color_code >> 16) & 0xff).try_into().unwrap(),
+ }
+ }
+}
+
+/// Does color transform on red and blue transformed by green
+fn color_transform(red: u8, blue: u8, green: u8, trans: &ColorTransformElement) -> (u8, u8) {
+ let mut temp_red = u32::from(red);
+ let mut temp_blue = u32::from(blue);
+
+ //as does the conversion from u8 to signed two's complement i8 required
+ temp_red += color_transform_delta(trans.green_to_red as i8, green as i8);
+ temp_blue += color_transform_delta(trans.green_to_blue as i8, green as i8);
+ temp_blue += color_transform_delta(trans.red_to_blue as i8, temp_red as i8);
+
+ (
+ (temp_red & 0xff).try_into().unwrap(),
+ (temp_blue & 0xff).try_into().unwrap(),
+ )
+}
+
+/// Does color transform on 2 numbers
+fn color_transform_delta(t: i8, c: i8) -> u32 {
+ ((i16::from(t) * i16::from(c)) as u32) >> 5
+}
+
+// Does color transform on a pixel with a color transform element
+fn transform_color(multiplier: &ColorTransformElement, color_value: u32) -> u32 {
+ let alpha = get_byte(color_value, 3);
+ let red = get_byte(color_value, 2);
+ let green = get_byte(color_value, 1);
+ let blue = get_byte(color_value, 0);
+
+ let (new_red, new_blue) = color_transform(red, blue, green, multiplier);
+
+ (u32::from(alpha) << 24)
+ + (u32::from(new_red) << 16)
+ + (u32::from(green) << 8)
+ + u32::from(new_blue)
+}
+
+//subtract green function
+
+/// Adds green to red and blue of a pixel
+fn add_green(argb: u32) -> u32 {
+ let red = (argb >> 16) & 0xff;
+ let green = (argb >> 8) & 0xff;
+ let blue = argb & 0xff;
+
+ let new_red = (red + green) & 0xff;
+ let new_blue = (blue + green) & 0xff;
+
+ (argb & 0xff00ff00) | (new_red << 16) | (new_blue)
+}
diff --git a/vendor/image/src/codecs/webp/mod.rs b/vendor/image/src/codecs/webp/mod.rs
new file mode 100644
index 0000000..b38faed
--- /dev/null
+++ b/vendor/image/src/codecs/webp/mod.rs
@@ -0,0 +1,28 @@
+//! Decoding and Encoding of WebP Images
+
+#[cfg(feature = "webp-encoder")]
+pub use self::encoder::{WebPEncoder, WebPQuality};
+
+#[cfg(feature = "webp-encoder")]
+mod encoder;
+
+#[cfg(feature = "webp")]
+pub use self::decoder::WebPDecoder;
+
+#[cfg(feature = "webp")]
+mod decoder;
+#[cfg(feature = "webp")]
+mod extended;
+#[cfg(feature = "webp")]
+mod huffman;
+#[cfg(feature = "webp")]
+mod loop_filter;
+#[cfg(feature = "webp")]
+mod lossless;
+#[cfg(feature = "webp")]
+mod lossless_transform;
+#[cfg(feature = "webp")]
+mod transform;
+
+#[cfg(feature = "webp")]
+pub mod vp8;
diff --git a/vendor/image/src/codecs/webp/transform.rs b/vendor/image/src/codecs/webp/transform.rs
new file mode 100644
index 0000000..3b3ef5a
--- /dev/null
+++ b/vendor/image/src/codecs/webp/transform.rs
@@ -0,0 +1,77 @@
+static CONST1: i64 = 20091;
+static CONST2: i64 = 35468;
+
+pub(crate) fn idct4x4(block: &mut [i32]) {
+ // The intermediate results may overflow the types, so we stretch the type.
+ fn fetch(block: &mut [i32], idx: usize) -> i64 {
+ i64::from(block[idx])
+ }
+
+ for i in 0usize..4 {
+ let a1 = fetch(block, i) + fetch(block, 8 + i);
+ let b1 = fetch(block, i) - fetch(block, 8 + i);
+
+ let t1 = (fetch(block, 4 + i) * CONST2) >> 16;
+ let t2 = fetch(block, 12 + i) + ((fetch(block, 12 + i) * CONST1) >> 16);
+ let c1 = t1 - t2;
+
+ let t1 = fetch(block, 4 + i) + ((fetch(block, 4 + i) * CONST1) >> 16);
+ let t2 = (fetch(block, 12 + i) * CONST2) >> 16;
+ let d1 = t1 + t2;
+
+ block[i] = (a1 + d1) as i32;
+ block[4 + i] = (b1 + c1) as i32;
+ block[4 * 3 + i] = (a1 - d1) as i32;
+ block[4 * 2 + i] = (b1 - c1) as i32;
+ }
+
+ for i in 0usize..4 {
+ let a1 = fetch(block, 4 * i) + fetch(block, 4 * i + 2);
+ let b1 = fetch(block, 4 * i) - fetch(block, 4 * i + 2);
+
+ let t1 = (fetch(block, 4 * i + 1) * CONST2) >> 16;
+ let t2 = fetch(block, 4 * i + 3) + ((fetch(block, 4 * i + 3) * CONST1) >> 16);
+ let c1 = t1 - t2;
+
+ let t1 = fetch(block, 4 * i + 1) + ((fetch(block, 4 * i + 1) * CONST1) >> 16);
+ let t2 = (fetch(block, 4 * i + 3) * CONST2) >> 16;
+ let d1 = t1 + t2;
+
+ block[4 * i] = ((a1 + d1 + 4) >> 3) as i32;
+ block[4 * i + 3] = ((a1 - d1 + 4) >> 3) as i32;
+ block[4 * i + 1] = ((b1 + c1 + 4) >> 3) as i32;
+ block[4 * i + 2] = ((b1 - c1 + 4) >> 3) as i32;
+ }
+}
+
+// 14.3
+pub(crate) fn iwht4x4(block: &mut [i32]) {
+ for i in 0usize..4 {
+ let a1 = block[i] + block[12 + i];
+ let b1 = block[4 + i] + block[8 + i];
+ let c1 = block[4 + i] - block[8 + i];
+ let d1 = block[i] - block[12 + i];
+
+ block[i] = a1 + b1;
+ block[4 + i] = c1 + d1;
+ block[8 + i] = a1 - b1;
+ block[12 + i] = d1 - c1;
+ }
+
+ for i in 0usize..4 {
+ let a1 = block[4 * i] + block[4 * i + 3];
+ let b1 = block[4 * i + 1] + block[4 * i + 2];
+ let c1 = block[4 * i + 1] - block[4 * i + 2];
+ let d1 = block[4 * i] - block[4 * i + 3];
+
+ let a2 = a1 + b1;
+ let b2 = c1 + d1;
+ let c2 = a1 - b1;
+ let d2 = d1 - c1;
+
+ block[4 * i] = (a2 + 3) >> 3;
+ block[4 * i + 1] = (b2 + 3) >> 3;
+ block[4 * i + 2] = (c2 + 3) >> 3;
+ block[4 * i + 3] = (d2 + 3) >> 3;
+ }
+}
diff --git a/vendor/image/src/codecs/webp/vp8.rs b/vendor/image/src/codecs/webp/vp8.rs
new file mode 100644
index 0000000..67b8820
--- /dev/null
+++ b/vendor/image/src/codecs/webp/vp8.rs
@@ -0,0 +1,2932 @@
+//! An implementation of the VP8 Video Codec
+//!
+//! This module contains a partial implementation of the
+//! VP8 video format as defined in RFC-6386.
+//!
+//! It decodes Keyframes only.
+//! VP8 is the underpinning of the WebP image format
+//!
+//! # Related Links
+//! * [rfc-6386](http://tools.ietf.org/html/rfc6386) - The VP8 Data Format and Decoding Guide
+//! * [VP8.pdf](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37073.pdf) - An overview of
+//! of the VP8 format
+//!
+
+use byteorder::{LittleEndian, ReadBytesExt};
+use std::convert::TryInto;
+use std::default::Default;
+use std::io::Read;
+use std::{cmp, error, fmt};
+
+use super::loop_filter;
+use super::transform;
+use crate::error::{
+ DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::ImageFormat;
+
+use crate::utils::clamp;
+
+const MAX_SEGMENTS: usize = 4;
+const NUM_DCT_TOKENS: usize = 12;
+
+// Prediction modes
+const DC_PRED: i8 = 0;
+const V_PRED: i8 = 1;
+const H_PRED: i8 = 2;
+const TM_PRED: i8 = 3;
+const B_PRED: i8 = 4;
+
+const B_DC_PRED: i8 = 0;
+const B_TM_PRED: i8 = 1;
+const B_VE_PRED: i8 = 2;
+const B_HE_PRED: i8 = 3;
+const B_LD_PRED: i8 = 4;
+const B_RD_PRED: i8 = 5;
+const B_VR_PRED: i8 = 6;
+const B_VL_PRED: i8 = 7;
+const B_HD_PRED: i8 = 8;
+const B_HU_PRED: i8 = 9;
+
+// Prediction mode enum
+#[repr(i8)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum LumaMode {
+ /// Predict DC using row above and column to the left.
+ DC = DC_PRED,
+
+ /// Predict rows using row above.
+ V = V_PRED,
+
+ /// Predict columns using column to the left.
+ H = H_PRED,
+
+ /// Propagate second differences.
+ TM = TM_PRED,
+
+ /// Each Y subblock is independently predicted.
+ B = B_PRED,
+}
+
+#[repr(i8)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum ChromaMode {
+ /// Predict DC using row above and column to the left.
+ DC = DC_PRED,
+
+ /// Predict rows using row above.
+ V = V_PRED,
+
+ /// Predict columns using column to the left.
+ H = H_PRED,
+
+ /// Propagate second differences.
+ TM = TM_PRED,
+}
+
+#[repr(i8)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum IntraMode {
+ DC = B_DC_PRED,
+ TM = B_TM_PRED,
+ VE = B_VE_PRED,
+ HE = B_HE_PRED,
+ LD = B_LD_PRED,
+ RD = B_RD_PRED,
+ VR = B_VR_PRED,
+ VL = B_VL_PRED,
+ HD = B_HD_PRED,
+ HU = B_HU_PRED,
+}
+
+type Prob = u8;
+
+static SEGMENT_ID_TREE: [i8; 6] = [2, 4, -0, -1, -2, -3];
+
+// Section 11.2
+// Tree for determining the keyframe luma intra prediction modes:
+static KEYFRAME_YMODE_TREE: [i8; 8] = [-B_PRED, 2, 4, 6, -DC_PRED, -V_PRED, -H_PRED, -TM_PRED];
+
+// Default probabilities for decoding the keyframe luma modes
+static KEYFRAME_YMODE_PROBS: [Prob; 4] = [145, 156, 163, 128];
+
+// Tree for determining the keyframe B_PRED mode:
+static KEYFRAME_BPRED_MODE_TREE: [i8; 18] = [
+ -B_DC_PRED, 2, -B_TM_PRED, 4, -B_VE_PRED, 6, 8, 12, -B_HE_PRED, 10, -B_RD_PRED, -B_VR_PRED,
+ -B_LD_PRED, 14, -B_VL_PRED, 16, -B_HD_PRED, -B_HU_PRED,
+];
+
+// Probabilities for the BPRED_MODE_TREE
+static KEYFRAME_BPRED_MODE_PROBS: [[[u8; 9]; 10]; 10] = [
+ [
+ [231, 120, 48, 89, 115, 113, 120, 152, 112],
+ [152, 179, 64, 126, 170, 118, 46, 70, 95],
+ [175, 69, 143, 80, 85, 82, 72, 155, 103],
+ [56, 58, 10, 171, 218, 189, 17, 13, 152],
+ [144, 71, 10, 38, 171, 213, 144, 34, 26],
+ [114, 26, 17, 163, 44, 195, 21, 10, 173],
+ [121, 24, 80, 195, 26, 62, 44, 64, 85],
+ [170, 46, 55, 19, 136, 160, 33, 206, 71],
+ [63, 20, 8, 114, 114, 208, 12, 9, 226],
+ [81, 40, 11, 96, 182, 84, 29, 16, 36],
+ ],
+ [
+ [134, 183, 89, 137, 98, 101, 106, 165, 148],
+ [72, 187, 100, 130, 157, 111, 32, 75, 80],
+ [66, 102, 167, 99, 74, 62, 40, 234, 128],
+ [41, 53, 9, 178, 241, 141, 26, 8, 107],
+ [104, 79, 12, 27, 217, 255, 87, 17, 7],
+ [74, 43, 26, 146, 73, 166, 49, 23, 157],
+ [65, 38, 105, 160, 51, 52, 31, 115, 128],
+ [87, 68, 71, 44, 114, 51, 15, 186, 23],
+ [47, 41, 14, 110, 182, 183, 21, 17, 194],
+ [66, 45, 25, 102, 197, 189, 23, 18, 22],
+ ],
+ [
+ [88, 88, 147, 150, 42, 46, 45, 196, 205],
+ [43, 97, 183, 117, 85, 38, 35, 179, 61],
+ [39, 53, 200, 87, 26, 21, 43, 232, 171],
+ [56, 34, 51, 104, 114, 102, 29, 93, 77],
+ [107, 54, 32, 26, 51, 1, 81, 43, 31],
+ [39, 28, 85, 171, 58, 165, 90, 98, 64],
+ [34, 22, 116, 206, 23, 34, 43, 166, 73],
+ [68, 25, 106, 22, 64, 171, 36, 225, 114],
+ [34, 19, 21, 102, 132, 188, 16, 76, 124],
+ [62, 18, 78, 95, 85, 57, 50, 48, 51],
+ ],
+ [
+ [193, 101, 35, 159, 215, 111, 89, 46, 111],
+ [60, 148, 31, 172, 219, 228, 21, 18, 111],
+ [112, 113, 77, 85, 179, 255, 38, 120, 114],
+ [40, 42, 1, 196, 245, 209, 10, 25, 109],
+ [100, 80, 8, 43, 154, 1, 51, 26, 71],
+ [88, 43, 29, 140, 166, 213, 37, 43, 154],
+ [61, 63, 30, 155, 67, 45, 68, 1, 209],
+ [142, 78, 78, 16, 255, 128, 34, 197, 171],
+ [41, 40, 5, 102, 211, 183, 4, 1, 221],
+ [51, 50, 17, 168, 209, 192, 23, 25, 82],
+ ],
+ [
+ [125, 98, 42, 88, 104, 85, 117, 175, 82],
+ [95, 84, 53, 89, 128, 100, 113, 101, 45],
+ [75, 79, 123, 47, 51, 128, 81, 171, 1],
+ [57, 17, 5, 71, 102, 57, 53, 41, 49],
+ [115, 21, 2, 10, 102, 255, 166, 23, 6],
+ [38, 33, 13, 121, 57, 73, 26, 1, 85],
+ [41, 10, 67, 138, 77, 110, 90, 47, 114],
+ [101, 29, 16, 10, 85, 128, 101, 196, 26],
+ [57, 18, 10, 102, 102, 213, 34, 20, 43],
+ [117, 20, 15, 36, 163, 128, 68, 1, 26],
+ ],
+ [
+ [138, 31, 36, 171, 27, 166, 38, 44, 229],
+ [67, 87, 58, 169, 82, 115, 26, 59, 179],
+ [63, 59, 90, 180, 59, 166, 93, 73, 154],
+ [40, 40, 21, 116, 143, 209, 34, 39, 175],
+ [57, 46, 22, 24, 128, 1, 54, 17, 37],
+ [47, 15, 16, 183, 34, 223, 49, 45, 183],
+ [46, 17, 33, 183, 6, 98, 15, 32, 183],
+ [65, 32, 73, 115, 28, 128, 23, 128, 205],
+ [40, 3, 9, 115, 51, 192, 18, 6, 223],
+ [87, 37, 9, 115, 59, 77, 64, 21, 47],
+ ],
+ [
+ [104, 55, 44, 218, 9, 54, 53, 130, 226],
+ [64, 90, 70, 205, 40, 41, 23, 26, 57],
+ [54, 57, 112, 184, 5, 41, 38, 166, 213],
+ [30, 34, 26, 133, 152, 116, 10, 32, 134],
+ [75, 32, 12, 51, 192, 255, 160, 43, 51],
+ [39, 19, 53, 221, 26, 114, 32, 73, 255],
+ [31, 9, 65, 234, 2, 15, 1, 118, 73],
+ [88, 31, 35, 67, 102, 85, 55, 186, 85],
+ [56, 21, 23, 111, 59, 205, 45, 37, 192],
+ [55, 38, 70, 124, 73, 102, 1, 34, 98],
+ ],
+ [
+ [102, 61, 71, 37, 34, 53, 31, 243, 192],
+ [69, 60, 71, 38, 73, 119, 28, 222, 37],
+ [68, 45, 128, 34, 1, 47, 11, 245, 171],
+ [62, 17, 19, 70, 146, 85, 55, 62, 70],
+ [75, 15, 9, 9, 64, 255, 184, 119, 16],
+ [37, 43, 37, 154, 100, 163, 85, 160, 1],
+ [63, 9, 92, 136, 28, 64, 32, 201, 85],
+ [86, 6, 28, 5, 64, 255, 25, 248, 1],
+ [56, 8, 17, 132, 137, 255, 55, 116, 128],
+ [58, 15, 20, 82, 135, 57, 26, 121, 40],
+ ],
+ [
+ [164, 50, 31, 137, 154, 133, 25, 35, 218],
+ [51, 103, 44, 131, 131, 123, 31, 6, 158],
+ [86, 40, 64, 135, 148, 224, 45, 183, 128],
+ [22, 26, 17, 131, 240, 154, 14, 1, 209],
+ [83, 12, 13, 54, 192, 255, 68, 47, 28],
+ [45, 16, 21, 91, 64, 222, 7, 1, 197],
+ [56, 21, 39, 155, 60, 138, 23, 102, 213],
+ [85, 26, 85, 85, 128, 128, 32, 146, 171],
+ [18, 11, 7, 63, 144, 171, 4, 4, 246],
+ [35, 27, 10, 146, 174, 171, 12, 26, 128],
+ ],
+ [
+ [190, 80, 35, 99, 180, 80, 126, 54, 45],
+ [85, 126, 47, 87, 176, 51, 41, 20, 32],
+ [101, 75, 128, 139, 118, 146, 116, 128, 85],
+ [56, 41, 15, 176, 236, 85, 37, 9, 62],
+ [146, 36, 19, 30, 171, 255, 97, 27, 20],
+ [71, 30, 17, 119, 118, 255, 17, 18, 138],
+ [101, 38, 60, 138, 55, 70, 43, 26, 142],
+ [138, 45, 61, 62, 219, 1, 81, 188, 64],
+ [32, 41, 20, 117, 151, 142, 20, 21, 163],
+ [112, 19, 12, 61, 195, 128, 48, 4, 24],
+ ],
+];
+
+// Section 11.4 Tree for determining macroblock the chroma mode
+static KEYFRAME_UV_MODE_TREE: [i8; 6] = [-DC_PRED, 2, -V_PRED, 4, -H_PRED, -TM_PRED];
+
+// Probabilities for determining macroblock mode
+static KEYFRAME_UV_MODE_PROBS: [Prob; 3] = [142, 114, 183];
+
+// Section 13.4
+type TokenProbTables = [[[[Prob; NUM_DCT_TOKENS - 1]; 3]; 8]; 4];
+
+// Probabilities that a token's probability will be updated
+static COEFF_UPDATE_PROBS: TokenProbTables = [
+ [
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255],
+ [249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255],
+ [234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255],
+ [250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ ],
+ [
+ [
+ [217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255],
+ [234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255],
+ ],
+ [
+ [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ ],
+ [
+ [
+ [186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255],
+ [234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255],
+ [251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ ],
+ [
+ [
+ [248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255],
+ [248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255],
+ [248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255],
+ [250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ [
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255],
+ ],
+ ],
+];
+
+// Section 13.5
+// Default Probabilities for tokens
+static COEFF_PROBS: TokenProbTables = [
+ [
+ [
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128],
+ [189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128],
+ [106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128],
+ [181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128],
+ [78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128],
+ ],
+ [
+ [1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128],
+ [184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128],
+ [77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128],
+ [170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128],
+ [37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128],
+ [207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128],
+ [102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128],
+ [177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128],
+ [80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ ],
+ [
+ [
+ [198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62],
+ [131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1],
+ [68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128],
+ ],
+ [
+ [1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128],
+ [184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128],
+ [81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128],
+ ],
+ [
+ [1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128],
+ [99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128],
+ [23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128],
+ ],
+ [
+ [1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128],
+ [109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128],
+ [44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128],
+ [94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128],
+ [22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128],
+ ],
+ [
+ [1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128],
+ [124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128],
+ [35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128],
+ ],
+ [
+ [1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128],
+ [121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128],
+ [45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128],
+ ],
+ [
+ [1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128],
+ [203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128],
+ [137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128],
+ ],
+ ],
+ [
+ [
+ [253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128],
+ [175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128],
+ [73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128],
+ ],
+ [
+ [1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128],
+ [239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128],
+ [155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128],
+ [201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128],
+ [69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128],
+ ],
+ [
+ [1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128],
+ [223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128],
+ [141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128],
+ [190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128],
+ [149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128],
+ [213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128],
+ [55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ [
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ ],
+ [
+ [
+ [202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255],
+ [126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128],
+ [61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128],
+ ],
+ [
+ [1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128],
+ [166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128],
+ [39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128],
+ ],
+ [
+ [1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128],
+ [124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128],
+ [24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128],
+ ],
+ [
+ [1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128],
+ [149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128],
+ [28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128],
+ ],
+ [
+ [1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128],
+ [123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128],
+ [20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128],
+ ],
+ [
+ [1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128],
+ [168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128],
+ [47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128],
+ ],
+ [
+ [1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128],
+ [141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128],
+ [42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128],
+ ],
+ [
+ [1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ [238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128],
+ ],
+ ],
+];
+
+// DCT Tokens
+const DCT_0: i8 = 0;
+const DCT_1: i8 = 1;
+const DCT_2: i8 = 2;
+const DCT_3: i8 = 3;
+const DCT_4: i8 = 4;
+const DCT_CAT1: i8 = 5;
+const DCT_CAT2: i8 = 6;
+const DCT_CAT3: i8 = 7;
+const DCT_CAT4: i8 = 8;
+const DCT_CAT5: i8 = 9;
+const DCT_CAT6: i8 = 10;
+const DCT_EOB: i8 = 11;
+
+static DCT_TOKEN_TREE: [i8; 22] = [
+ -DCT_EOB, 2, -DCT_0, 4, -DCT_1, 6, 8, 12, -DCT_2, 10, -DCT_3, -DCT_4, 14, 16, -DCT_CAT1,
+ -DCT_CAT2, 18, 20, -DCT_CAT3, -DCT_CAT4, -DCT_CAT5, -DCT_CAT6,
+];
+
+static PROB_DCT_CAT: [[Prob; 12]; 6] = [
+ [159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [165, 145, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [173, 148, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [176, 155, 140, 135, 0, 0, 0, 0, 0, 0, 0, 0],
+ [180, 157, 141, 134, 130, 0, 0, 0, 0, 0, 0, 0],
+ [254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0],
+];
+
+static DCT_CAT_BASE: [u8; 6] = [5, 7, 11, 19, 35, 67];
+static COEFF_BANDS: [u8; 16] = [0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7];
+
+#[rustfmt::skip]
+static DC_QUANT: [i16; 128] = [
+ 4, 5, 6, 7, 8, 9, 10, 10,
+ 11, 12, 13, 14, 15, 16, 17, 17,
+ 18, 19, 20, 20, 21, 21, 22, 22,
+ 23, 23, 24, 25, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 76, 77, 78, 79, 80, 81,
+ 82, 83, 84, 85, 86, 87, 88, 89,
+ 91, 93, 95, 96, 98, 100, 101, 102,
+ 104, 106, 108, 110, 112, 114, 116, 118,
+ 122, 124, 126, 128, 130, 132, 134, 136,
+ 138, 140, 143, 145, 148, 151, 154, 157,
+];
+
+#[rustfmt::skip]
+static AC_QUANT: [i16; 128] = [
+ 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 60,
+ 62, 64, 66, 68, 70, 72, 74, 76,
+ 78, 80, 82, 84, 86, 88, 90, 92,
+ 94, 96, 98, 100, 102, 104, 106, 108,
+ 110, 112, 114, 116, 119, 122, 125, 128,
+ 131, 134, 137, 140, 143, 146, 149, 152,
+ 155, 158, 161, 164, 167, 170, 173, 177,
+ 181, 185, 189, 193, 197, 201, 205, 209,
+ 213, 217, 221, 225, 229, 234, 239, 245,
+ 249, 254, 259, 264, 269, 274, 279, 284,
+];
+
+static ZIGZAG: [u8; 16] = [0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15];
+
+/// All errors that can occur when attempting to parse a VP8 codec inside WebP
+#[derive(Debug, Clone, Copy)]
+enum DecoderError {
+ /// VP8's `[0x9D, 0x01, 0x2A]` magic not found or invalid
+ Vp8MagicInvalid([u8; 3]),
+
+ /// Decoder initialisation wasn't provided with enough data
+ NotEnoughInitData,
+
+ /// At time of writing, only the YUV colour-space encoded as `0` is specified
+ ColorSpaceInvalid(u8),
+ /// LUMA prediction mode was not recognised
+ LumaPredictionModeInvalid(i8),
+ /// Intra-prediction mode was not recognised
+ IntraPredictionModeInvalid(i8),
+ /// Chroma prediction mode was not recognised
+ ChromaPredictionModeInvalid(i8),
+}
+
+impl fmt::Display for DecoderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DecoderError::Vp8MagicInvalid(tag) => f.write_fmt(format_args!(
+ "Invalid VP8 magic: [{:#04X?}, {:#04X?}, {:#04X?}]",
+ tag[0], tag[1], tag[2]
+ )),
+
+ DecoderError::NotEnoughInitData => {
+ f.write_str("Expected at least 2 bytes of VP8 decoder initialization data")
+ }
+
+ DecoderError::ColorSpaceInvalid(cs) => {
+ f.write_fmt(format_args!("Invalid non-YUV VP8 color space {}", cs))
+ }
+ DecoderError::LumaPredictionModeInvalid(pm) => {
+ f.write_fmt(format_args!("Invalid VP8 LUMA prediction mode {}", pm))
+ }
+ DecoderError::IntraPredictionModeInvalid(i) => {
+ f.write_fmt(format_args!("Invalid VP8 intra-prediction mode {}", i))
+ }
+ DecoderError::ChromaPredictionModeInvalid(c) => {
+ f.write_fmt(format_args!("Invalid VP8 chroma prediction mode {}", c))
+ }
+ }
+ }
+}
+
+impl From<DecoderError> for ImageError {
+ fn from(e: DecoderError) -> ImageError {
+ ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e))
+ }
+}
+
+impl error::Error for DecoderError {}
+
+struct BoolReader {
+ buf: Vec<u8>,
+ index: usize,
+
+ range: u32,
+ value: u32,
+ bit_count: u8,
+}
+
+impl BoolReader {
+ pub(crate) fn new() -> BoolReader {
+ BoolReader {
+ buf: Vec::new(),
+ range: 0,
+ value: 0,
+ bit_count: 0,
+ index: 0,
+ }
+ }
+
+ pub(crate) fn init(&mut self, buf: Vec<u8>) -> ImageResult<()> {
+ if buf.len() < 2 {
+ return Err(DecoderError::NotEnoughInitData.into());
+ }
+
+ self.buf = buf;
+ // Direct access safe, since length has just been validated.
+ self.value = (u32::from(self.buf[0]) << 8) | u32::from(self.buf[1]);
+ self.index = 2;
+ self.range = 255;
+ self.bit_count = 0;
+
+ Ok(())
+ }
+
+ pub(crate) fn read_bool(&mut self, probability: u8) -> bool {
+ let split = 1 + (((self.range - 1) * u32::from(probability)) >> 8);
+ let bigsplit = split << 8;
+
+ let retval = if self.value >= bigsplit {
+ self.range -= split;
+ self.value -= bigsplit;
+ true
+ } else {
+ self.range = split;
+ false
+ };
+
+ while self.range < 128 {
+ self.value <<= 1;
+ self.range <<= 1;
+ self.bit_count += 1;
+
+ if self.bit_count == 8 {
+ self.bit_count = 0;
+
+ // If no more bits are available, just don't do anything.
+ // This strategy is suggested in the reference implementation of RFC6386 (p.135)
+ if self.index < self.buf.len() {
+ self.value |= u32::from(self.buf[self.index]);
+ self.index += 1;
+ }
+ }
+ }
+
+ retval
+ }
+
+ pub(crate) fn read_literal(&mut self, n: u8) -> u8 {
+ let mut v = 0u8;
+ let mut n = n;
+
+ while n != 0 {
+ v = (v << 1) + self.read_bool(128u8) as u8;
+ n -= 1;
+ }
+
+ v
+ }
+
+ pub(crate) fn read_magnitude_and_sign(&mut self, n: u8) -> i32 {
+ let magnitude = self.read_literal(n);
+ let sign = self.read_literal(1);
+
+ if sign == 1 {
+ -i32::from(magnitude)
+ } else {
+ i32::from(magnitude)
+ }
+ }
+
+ pub(crate) fn read_with_tree(&mut self, tree: &[i8], probs: &[Prob], start: isize) -> i8 {
+ let mut index = start;
+
+ loop {
+ let a = self.read_bool(probs[index as usize >> 1]);
+ let b = index + a as isize;
+ index = tree[b as usize] as isize;
+
+ if index <= 0 {
+ break;
+ }
+ }
+
+ -index as i8
+ }
+
+ pub(crate) fn read_flag(&mut self) -> bool {
+ 0 != self.read_literal(1)
+ }
+}
+
+#[derive(Default, Clone, Copy)]
+struct MacroBlock {
+ bpred: [IntraMode; 16],
+ complexity: [u8; 9],
+ luma_mode: LumaMode,
+ chroma_mode: ChromaMode,
+ segmentid: u8,
+ coeffs_skipped: bool,
+}
+
+/// A Representation of the last decoded video frame
+#[derive(Default, Debug, Clone)]
+pub struct Frame {
+ /// The width of the luma plane
+ pub width: u16,
+
+ /// The height of the luma plane
+ pub height: u16,
+
+ /// The luma plane of the frame
+ pub ybuf: Vec<u8>,
+
+ /// The blue plane of the frame
+ pub ubuf: Vec<u8>,
+
+ /// The red plane of the frame
+ pub vbuf: Vec<u8>,
+
+ /// Indicates whether this frame is a keyframe
+ pub keyframe: bool,
+
+ version: u8,
+
+ /// Indicates whether this frame is intended for display
+ pub for_display: bool,
+
+ // Section 9.2
+ /// The pixel type of the frame as defined by Section 9.2
+ /// of the VP8 Specification
+ pub pixel_type: u8,
+
+ // Section 9.4 and 15
+ filter_type: bool, //if true uses simple filter // if false uses normal filter
+ filter_level: u8,
+ sharpness_level: u8,
+}
+
+impl Frame {
+ /// Chroma plane is half the size of the Luma plane
+ fn chroma_width(&self) -> u16 {
+ (self.width + 1) / 2
+ }
+
+ fn chroma_height(&self) -> u16 {
+ (self.height + 1) / 2
+ }
+
+ /// Fills an rgb buffer with the image
+ pub(crate) fn fill_rgb(&self, buf: &mut [u8]) {
+ for (index, rgb_chunk) in (0..self.ybuf.len()).zip(buf.chunks_exact_mut(3)) {
+ let y = index / self.width as usize;
+ let x = index % self.width as usize;
+ let chroma_index = self.chroma_width() as usize * (y / 2) + x / 2;
+
+ Frame::fill_single(
+ self.ybuf[index],
+ self.ubuf[chroma_index],
+ self.vbuf[chroma_index],
+ rgb_chunk,
+ );
+ }
+ }
+
+ /// Fills an rgba buffer by skipping the alpha values
+ pub(crate) fn fill_rgba(&self, buf: &mut [u8]) {
+ for (index, rgba_chunk) in (0..self.ybuf.len()).zip(buf.chunks_exact_mut(4)) {
+ let y = index / self.width as usize;
+ let x = index % self.width as usize;
+ let chroma_index = self.chroma_width() as usize * (y / 2) + x / 2;
+
+ Frame::fill_single(
+ self.ybuf[index],
+ self.ubuf[chroma_index],
+ self.vbuf[chroma_index],
+ rgba_chunk,
+ );
+ }
+ }
+
+ /// Conversion values from https://docs.microsoft.com/en-us/windows/win32/medfound/recommended-8-bit-yuv-formats-for-video-rendering#converting-8-bit-yuv-to-rgb888
+ fn fill_single(y: u8, u: u8, v: u8, rgb: &mut [u8]) {
+ let c: i32 = i32::from(y) - 16;
+ let d: i32 = i32::from(u) - 128;
+ let e: i32 = i32::from(v) - 128;
+
+ let r: u8 = clamp((298 * c + 409 * e + 128) >> 8, 0, 255)
+ .try_into()
+ .unwrap();
+ let g: u8 = clamp((298 * c - 100 * d - 208 * e + 128) >> 8, 0, 255)
+ .try_into()
+ .unwrap();
+ let b: u8 = clamp((298 * c + 516 * d + 128) >> 8, 0, 255)
+ .try_into()
+ .unwrap();
+
+ rgb[0] = r;
+ rgb[1] = g;
+ rgb[2] = b;
+ }
+
+ /// Gets the buffer size
+ pub fn get_buf_size(&self) -> usize {
+ self.ybuf.len() * 3
+ }
+}
+
+#[derive(Clone, Copy, Default)]
+struct Segment {
+ ydc: i16,
+ yac: i16,
+
+ y2dc: i16,
+ y2ac: i16,
+
+ uvdc: i16,
+ uvac: i16,
+
+ delta_values: bool,
+
+ quantizer_level: i8,
+ loopfilter_level: i8,
+}
+
+/// VP8 Decoder
+///
+/// Only decodes keyframes
+pub struct Vp8Decoder<R> {
+ r: R,
+ b: BoolReader,
+
+ mbwidth: u16,
+ mbheight: u16,
+ macroblocks: Vec<MacroBlock>,
+
+ frame: Frame,
+
+ segments_enabled: bool,
+ segments_update_map: bool,
+ segment: [Segment; MAX_SEGMENTS],
+
+ ref_delta: [i32; 4],
+ mode_delta: [i32; 4],
+
+ partitions: [BoolReader; 8],
+ num_partitions: u8,
+
+ segment_tree_probs: [Prob; 3],
+ token_probs: Box<TokenProbTables>,
+
+ // Section 9.10
+ prob_intra: Prob,
+
+ // Section 9.11
+ prob_skip_false: Option<Prob>,
+
+ top: Vec<MacroBlock>,
+ left: MacroBlock,
+
+ top_border: Vec<u8>,
+ left_border: Vec<u8>,
+}
+
+impl<R: Read> Vp8Decoder<R> {
+ /// Create a new decoder.
+ /// The reader must present a raw vp8 bitstream to the decoder
+ pub fn new(r: R) -> Vp8Decoder<R> {
+ let f = Frame::default();
+ let s = Segment::default();
+ let m = MacroBlock::default();
+
+ Vp8Decoder {
+ r,
+ b: BoolReader::new(),
+
+ mbwidth: 0,
+ mbheight: 0,
+ macroblocks: Vec::new(),
+
+ frame: f,
+ segments_enabled: false,
+ segments_update_map: false,
+ segment: [s; MAX_SEGMENTS],
+
+ ref_delta: [0; 4],
+ mode_delta: [0; 4],
+
+ partitions: [
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ BoolReader::new(),
+ ],
+
+ num_partitions: 1,
+
+ segment_tree_probs: [255u8; 3],
+ token_probs: Box::new(COEFF_PROBS),
+
+ // Section 9.10
+ prob_intra: 0u8,
+
+ // Section 9.11
+ prob_skip_false: None,
+
+ top: Vec::new(),
+ left: m,
+
+ top_border: Vec::new(),
+ left_border: Vec::new(),
+ }
+ }
+
+ fn update_token_probabilities(&mut self) {
+ for (i, is) in COEFF_UPDATE_PROBS.iter().enumerate() {
+ for (j, js) in is.iter().enumerate() {
+ for (k, ks) in js.iter().enumerate() {
+ for (t, prob) in ks.iter().enumerate().take(NUM_DCT_TOKENS - 1) {
+ if self.b.read_bool(*prob) {
+ let v = self.b.read_literal(8);
+ self.token_probs[i][j][k][t] = v;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ fn init_partitions(&mut self, n: usize) -> ImageResult<()> {
+ if n > 1 {
+ let mut sizes = vec![0; 3 * n - 3];
+ self.r.read_exact(sizes.as_mut_slice())?;
+
+ for (i, s) in sizes.chunks(3).enumerate() {
+ let size = { s }
+ .read_u24::<LittleEndian>()
+ .expect("Reading from &[u8] can't fail and the chunk is complete");
+
+ let mut buf = vec![0; size as usize];
+ self.r.read_exact(buf.as_mut_slice())?;
+
+ self.partitions[i].init(buf)?;
+ }
+ }
+
+ let mut buf = Vec::new();
+ self.r.read_to_end(&mut buf)?;
+ self.partitions[n - 1].init(buf)?;
+
+ Ok(())
+ }
+
+ fn read_quantization_indices(&mut self) {
+ fn dc_quant(index: i32) -> i16 {
+ DC_QUANT[clamp(index, 0, 127) as usize]
+ }
+
+ fn ac_quant(index: i32) -> i16 {
+ AC_QUANT[clamp(index, 0, 127) as usize]
+ }
+
+ let yac_abs = self.b.read_literal(7);
+ let ydc_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let y2dc_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let y2ac_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let uvdc_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let uvac_delta = if self.b.read_flag() {
+ self.b.read_magnitude_and_sign(4)
+ } else {
+ 0
+ };
+
+ let n = if self.segments_enabled {
+ MAX_SEGMENTS
+ } else {
+ 1
+ };
+ for i in 0usize..n {
+ let base = i32::from(if !self.segment[i].delta_values {
+ i16::from(self.segment[i].quantizer_level)
+ } else {
+ i16::from(self.segment[i].quantizer_level) + i16::from(yac_abs)
+ });
+
+ self.segment[i].ydc = dc_quant(base + ydc_delta);
+ self.segment[i].yac = ac_quant(base);
+
+ self.segment[i].y2dc = dc_quant(base + y2dc_delta) * 2;
+ // The intermediate result (max`284*155`) can be larger than the `i16` range.
+ self.segment[i].y2ac = (i32::from(ac_quant(base + y2ac_delta)) * 155 / 100) as i16;
+
+ self.segment[i].uvdc = dc_quant(base + uvdc_delta);
+ self.segment[i].uvac = ac_quant(base + uvac_delta);
+
+ if self.segment[i].y2ac < 8 {
+ self.segment[i].y2ac = 8;
+ }
+
+ if self.segment[i].uvdc > 132 {
+ self.segment[i].uvdc = 132;
+ }
+ }
+ }
+
+ fn read_loop_filter_adjustments(&mut self) {
+ if self.b.read_flag() {
+ for i in 0usize..4 {
+ let ref_frame_delta_update_flag = self.b.read_flag();
+
+ self.ref_delta[i] = if ref_frame_delta_update_flag {
+ self.b.read_magnitude_and_sign(6)
+ } else {
+ 0i32
+ };
+ }
+
+ for i in 0usize..4 {
+ let mb_mode_delta_update_flag = self.b.read_flag();
+
+ self.mode_delta[i] = if mb_mode_delta_update_flag {
+ self.b.read_magnitude_and_sign(6)
+ } else {
+ 0i32
+ };
+ }
+ }
+ }
+
+ fn read_segment_updates(&mut self) {
+ // Section 9.3
+ self.segments_update_map = self.b.read_flag();
+ let update_segment_feature_data = self.b.read_flag();
+
+ if update_segment_feature_data {
+ let segment_feature_mode = self.b.read_flag();
+
+ for i in 0usize..MAX_SEGMENTS {
+ self.segment[i].delta_values = !segment_feature_mode;
+ }
+
+ for i in 0usize..MAX_SEGMENTS {
+ let update = self.b.read_flag();
+
+ self.segment[i].quantizer_level = if update {
+ self.b.read_magnitude_and_sign(7)
+ } else {
+ 0i32
+ } as i8;
+ }
+
+ for i in 0usize..MAX_SEGMENTS {
+ let update = self.b.read_flag();
+
+ self.segment[i].loopfilter_level = if update {
+ self.b.read_magnitude_and_sign(6)
+ } else {
+ 0i32
+ } as i8;
+ }
+ }
+
+ if self.segments_update_map {
+ for i in 0usize..3 {
+ let update = self.b.read_flag();
+
+ self.segment_tree_probs[i] = if update { self.b.read_literal(8) } else { 255 };
+ }
+ }
+ }
+
+ fn read_frame_header(&mut self) -> ImageResult<()> {
+ let tag = self.r.read_u24::<LittleEndian>()?;
+
+ self.frame.keyframe = tag & 1 == 0;
+ self.frame.version = ((tag >> 1) & 7) as u8;
+ self.frame.for_display = (tag >> 4) & 1 != 0;
+
+ let first_partition_size = tag >> 5;
+
+ if self.frame.keyframe {
+ let mut tag = [0u8; 3];
+ self.r.read_exact(&mut tag)?;
+
+ if tag != [0x9d, 0x01, 0x2a] {
+ return Err(DecoderError::Vp8MagicInvalid(tag).into());
+ }
+
+ let w = self.r.read_u16::<LittleEndian>()?;
+ let h = self.r.read_u16::<LittleEndian>()?;
+
+ self.frame.width = w & 0x3FFF;
+ self.frame.height = h & 0x3FFF;
+
+ self.top = init_top_macroblocks(self.frame.width as usize);
+ // Almost always the first macro block, except when non exists (i.e. `width == 0`)
+ self.left = self.top.get(0).cloned().unwrap_or_default();
+
+ self.mbwidth = (self.frame.width + 15) / 16;
+ self.mbheight = (self.frame.height + 15) / 16;
+
+ self.frame.ybuf = vec![0u8; self.frame.width as usize * self.frame.height as usize];
+ self.frame.ubuf =
+ vec![0u8; self.frame.chroma_width() as usize * self.frame.chroma_height() as usize];
+ self.frame.vbuf =
+ vec![0u8; self.frame.chroma_width() as usize * self.frame.chroma_height() as usize];
+
+ self.top_border = vec![127u8; self.frame.width as usize + 4 + 16];
+ self.left_border = vec![129u8; 1 + 16];
+ }
+
+ let mut buf = vec![0; first_partition_size as usize];
+ self.r.read_exact(&mut buf)?;
+
+ // initialise binary decoder
+ self.b.init(buf)?;
+
+ if self.frame.keyframe {
+ let color_space = self.b.read_literal(1);
+ self.frame.pixel_type = self.b.read_literal(1);
+
+ if color_space != 0 {
+ return Err(DecoderError::ColorSpaceInvalid(color_space).into());
+ }
+ }
+
+ self.segments_enabled = self.b.read_flag();
+ if self.segments_enabled {
+ self.read_segment_updates();
+ }
+
+ self.frame.filter_type = self.b.read_flag();
+ self.frame.filter_level = self.b.read_literal(6);
+ self.frame.sharpness_level = self.b.read_literal(3);
+
+ let lf_adjust_enable = self.b.read_flag();
+ if lf_adjust_enable {
+ self.read_loop_filter_adjustments();
+ }
+
+ self.num_partitions = (1usize << self.b.read_literal(2) as usize) as u8;
+ let num_partitions = self.num_partitions as usize;
+ self.init_partitions(num_partitions)?;
+
+ self.read_quantization_indices();
+
+ if !self.frame.keyframe {
+ // 9.7 refresh golden frame and altref frame
+ // FIXME: support this?
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::WebP.into(),
+ UnsupportedErrorKind::GenericFeature("Non-keyframe frames".to_owned()),
+ ),
+ ));
+ } else {
+ // Refresh entropy probs ?????
+ let _ = self.b.read_literal(1);
+ }
+
+ self.update_token_probabilities();
+
+ let mb_no_skip_coeff = self.b.read_literal(1);
+ self.prob_skip_false = if mb_no_skip_coeff == 1 {
+ Some(self.b.read_literal(8))
+ } else {
+ None
+ };
+
+ if !self.frame.keyframe {
+ // 9.10 remaining frame data
+ self.prob_intra = 0;
+
+ // FIXME: support this?
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::WebP.into(),
+ UnsupportedErrorKind::GenericFeature("Non-keyframe frames".to_owned()),
+ ),
+ ));
+ } else {
+ // Reset motion vectors
+ }
+
+ Ok(())
+ }
+
+ fn read_macroblock_header(&mut self, mbx: usize) -> ImageResult<MacroBlock> {
+ let mut mb = MacroBlock::default();
+
+ if self.segments_enabled && self.segments_update_map {
+ mb.segmentid = self
+ .b
+ .read_with_tree(&SEGMENT_ID_TREE, &self.segment_tree_probs, 0)
+ as u8;
+ };
+
+ mb.coeffs_skipped = if self.prob_skip_false.is_some() {
+ self.b.read_bool(*self.prob_skip_false.as_ref().unwrap())
+ } else {
+ false
+ };
+
+ let inter_predicted = if !self.frame.keyframe {
+ self.b.read_bool(self.prob_intra)
+ } else {
+ false
+ };
+
+ if inter_predicted {
+ return Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormat::WebP.into(),
+ UnsupportedErrorKind::GenericFeature("VP8 inter-prediction".to_owned()),
+ ),
+ ));
+ }
+
+ if self.frame.keyframe {
+ // intra prediction
+ let luma = self
+ .b
+ .read_with_tree(&KEYFRAME_YMODE_TREE, &KEYFRAME_YMODE_PROBS, 0);
+ mb.luma_mode =
+ LumaMode::from_i8(luma).ok_or(DecoderError::LumaPredictionModeInvalid(luma))?;
+
+ match mb.luma_mode.into_intra() {
+ // `LumaMode::B` - This is predicted individually
+ None => {
+ for y in 0usize..4 {
+ for x in 0usize..4 {
+ let top = self.top[mbx].bpred[12 + x];
+ let left = self.left.bpred[y];
+ let intra = self.b.read_with_tree(
+ &KEYFRAME_BPRED_MODE_TREE,
+ &KEYFRAME_BPRED_MODE_PROBS[top as usize][left as usize],
+ 0,
+ );
+ let bmode = IntraMode::from_i8(intra)
+ .ok_or(DecoderError::IntraPredictionModeInvalid(intra))?;
+ mb.bpred[x + y * 4] = bmode;
+
+ self.top[mbx].bpred[12 + x] = bmode;
+ self.left.bpred[y] = bmode;
+ }
+ }
+ }
+ Some(mode) => {
+ for i in 0usize..4 {
+ mb.bpred[12 + i] = mode;
+ self.left.bpred[i] = mode;
+ }
+ }
+ }
+
+ let chroma = self
+ .b
+ .read_with_tree(&KEYFRAME_UV_MODE_TREE, &KEYFRAME_UV_MODE_PROBS, 0);
+ mb.chroma_mode = ChromaMode::from_i8(chroma)
+ .ok_or(DecoderError::ChromaPredictionModeInvalid(chroma))?;
+ }
+
+ self.top[mbx].chroma_mode = mb.chroma_mode;
+ self.top[mbx].luma_mode = mb.luma_mode;
+ self.top[mbx].bpred = mb.bpred;
+
+ Ok(mb)
+ }
+
+ fn intra_predict_luma(&mut self, mbx: usize, mby: usize, mb: &MacroBlock, resdata: &[i32]) {
+ let stride = 1usize + 16 + 4;
+ let w = self.frame.width as usize;
+ let mw = self.mbwidth as usize;
+ let mut ws = create_border_luma(mbx, mby, mw, &self.top_border, &self.left_border);
+
+ match mb.luma_mode {
+ LumaMode::V => predict_vpred(&mut ws, 16, 1, 1, stride),
+ LumaMode::H => predict_hpred(&mut ws, 16, 1, 1, stride),
+ LumaMode::TM => predict_tmpred(&mut ws, 16, 1, 1, stride),
+ LumaMode::DC => predict_dcpred(&mut ws, 16, stride, mby != 0, mbx != 0),
+ LumaMode::B => predict_4x4(&mut ws, stride, &mb.bpred, resdata),
+ }
+
+ if mb.luma_mode != LumaMode::B {
+ for y in 0usize..4 {
+ for x in 0usize..4 {
+ let i = x + y * 4;
+ // Create a reference to a [i32; 16] array for add_residue (slices of size 16 do not work).
+ let rb: &[i32; 16] = resdata[i * 16..][..16].try_into().unwrap();
+ let y0 = 1 + y * 4;
+ let x0 = 1 + x * 4;
+
+ add_residue(&mut ws, rb, y0, x0, stride);
+ }
+ }
+ }
+
+ self.left_border[0] = ws[16];
+
+ for i in 0usize..16 {
+ self.top_border[mbx * 16 + i] = ws[16 * stride + 1 + i];
+ self.left_border[i + 1] = ws[(i + 1) * stride + 16];
+ }
+
+ // Length is the remainder to the border, but maximally the current chunk.
+ let ylength = cmp::min(self.frame.height as usize - mby * 16, 16);
+ let xlength = cmp::min(self.frame.width as usize - mbx * 16, 16);
+
+ for y in 0usize..ylength {
+ for x in 0usize..xlength {
+ self.frame.ybuf[(mby * 16 + y) * w + mbx * 16 + x] = ws[(1 + y) * stride + 1 + x];
+ }
+ }
+ }
+
+ fn intra_predict_chroma(&mut self, mbx: usize, mby: usize, mb: &MacroBlock, resdata: &[i32]) {
+ let stride = 1usize + 8;
+
+ let w = self.frame.chroma_width() as usize;
+
+ //8x8 with left top border of 1
+ let mut uws = [0u8; (8 + 1) * (8 + 1)];
+ let mut vws = [0u8; (8 + 1) * (8 + 1)];
+
+ let ylength = cmp::min(self.frame.chroma_height() as usize - mby * 8, 8);
+ let xlength = cmp::min(self.frame.chroma_width() as usize - mbx * 8, 8);
+
+ //left border
+ for y in 0usize..8 {
+ let (uy, vy) = if mbx == 0 || y >= ylength {
+ (129, 129)
+ } else {
+ let index = (mby * 8 + y) * w + ((mbx - 1) * 8 + 7);
+ (self.frame.ubuf[index], self.frame.vbuf[index])
+ };
+
+ uws[(y + 1) * stride] = uy;
+ vws[(y + 1) * stride] = vy;
+ }
+ //top border
+ for x in 0usize..8 {
+ let (ux, vx) = if mby == 0 || x >= xlength {
+ (127, 127)
+ } else {
+ let index = ((mby - 1) * 8 + 7) * w + (mbx * 8 + x);
+ (self.frame.ubuf[index], self.frame.vbuf[index])
+ };
+
+ uws[x + 1] = ux;
+ vws[x + 1] = vx;
+ }
+
+ //top left point
+ let (u1, v1) = if mby == 0 {
+ (127, 127)
+ } else if mbx == 0 {
+ (129, 129)
+ } else {
+ let index = ((mby - 1) * 8 + 7) * w + (mbx - 1) * 8 + 7;
+ if index >= self.frame.ubuf.len() {
+ (127, 127)
+ } else {
+ (self.frame.ubuf[index], self.frame.vbuf[index])
+ }
+ };
+
+ uws[0] = u1;
+ vws[0] = v1;
+
+ match mb.chroma_mode {
+ ChromaMode::DC => {
+ predict_dcpred(&mut uws, 8, stride, mby != 0, mbx != 0);
+ predict_dcpred(&mut vws, 8, stride, mby != 0, mbx != 0);
+ }
+ ChromaMode::V => {
+ predict_vpred(&mut uws, 8, 1, 1, stride);
+ predict_vpred(&mut vws, 8, 1, 1, stride);
+ }
+ ChromaMode::H => {
+ predict_hpred(&mut uws, 8, 1, 1, stride);
+ predict_hpred(&mut vws, 8, 1, 1, stride);
+ }
+ ChromaMode::TM => {
+ predict_tmpred(&mut uws, 8, 1, 1, stride);
+ predict_tmpred(&mut vws, 8, 1, 1, stride);
+ }
+ }
+
+ for y in 0usize..2 {
+ for x in 0usize..2 {
+ let i = x + y * 2;
+ let urb: &[i32; 16] = resdata[16 * 16 + i * 16..][..16].try_into().unwrap();
+
+ let y0 = 1 + y * 4;
+ let x0 = 1 + x * 4;
+ add_residue(&mut uws, urb, y0, x0, stride);
+
+ let vrb: &[i32; 16] = resdata[20 * 16 + i * 16..][..16].try_into().unwrap();
+
+ add_residue(&mut vws, vrb, y0, x0, stride);
+ }
+ }
+
+ for y in 0usize..ylength {
+ for x in 0usize..xlength {
+ self.frame.ubuf[(mby * 8 + y) * w + mbx * 8 + x] = uws[(1 + y) * stride + 1 + x];
+ self.frame.vbuf[(mby * 8 + y) * w + mbx * 8 + x] = vws[(1 + y) * stride + 1 + x];
+ }
+ }
+ }
+
+ fn read_coefficients(
+ &mut self,
+ block: &mut [i32],
+ p: usize,
+ plane: usize,
+ complexity: usize,
+ dcq: i16,
+ acq: i16,
+ ) -> bool {
+ let first = if plane == 0 { 1usize } else { 0usize };
+ let probs = &self.token_probs[plane];
+ let tree = &DCT_TOKEN_TREE;
+
+ let mut complexity = complexity;
+ let mut has_coefficients = false;
+ let mut skip = false;
+
+ for i in first..16usize {
+ let table = &probs[COEFF_BANDS[i] as usize][complexity];
+
+ let token = if !skip {
+ self.partitions[p].read_with_tree(tree, table, 0)
+ } else {
+ self.partitions[p].read_with_tree(tree, table, 2)
+ };
+
+ let mut abs_value = i32::from(match token {
+ DCT_EOB => break,
+
+ DCT_0 => {
+ skip = true;
+ has_coefficients = true;
+ complexity = 0;
+ continue;
+ }
+
+ literal @ DCT_1..=DCT_4 => i16::from(literal),
+
+ category @ DCT_CAT1..=DCT_CAT6 => {
+ let t = PROB_DCT_CAT[(category - DCT_CAT1) as usize];
+
+ let mut extra = 0i16;
+ let mut j = 0;
+
+ while t[j] > 0 {
+ extra = extra + extra + self.partitions[p].read_bool(t[j]) as i16;
+ j += 1;
+ }
+
+ i16::from(DCT_CAT_BASE[(category - DCT_CAT1) as usize]) + extra
+ }
+
+ c => panic!("unknown token: {}", c),
+ });
+
+ skip = false;
+
+ complexity = if abs_value == 0 {
+ 0
+ } else if abs_value == 1 {
+ 1
+ } else {
+ 2
+ };
+
+ if self.partitions[p].read_bool(128) {
+ abs_value = -abs_value;
+ }
+
+ block[ZIGZAG[i] as usize] =
+ abs_value * i32::from(if ZIGZAG[i] > 0 { acq } else { dcq });
+
+ has_coefficients = true;
+ }
+
+ has_coefficients
+ }
+
+ fn read_residual_data(&mut self, mb: &MacroBlock, mbx: usize, p: usize) -> [i32; 384] {
+ let sindex = mb.segmentid as usize;
+ let mut blocks = [0i32; 384];
+ let mut plane = if mb.luma_mode == LumaMode::B { 3 } else { 1 };
+
+ if plane == 1 {
+ let complexity = self.top[mbx].complexity[0] + self.left.complexity[0];
+ let mut block = [0i32; 16];
+ let dcq = self.segment[sindex].y2dc;
+ let acq = self.segment[sindex].y2ac;
+ let n = self.read_coefficients(&mut block, p, plane, complexity as usize, dcq, acq);
+
+ self.left.complexity[0] = if n { 1 } else { 0 };
+ self.top[mbx].complexity[0] = if n { 1 } else { 0 };
+
+ transform::iwht4x4(&mut block);
+
+ for k in 0usize..16 {
+ blocks[16 * k] = block[k];
+ }
+
+ plane = 0;
+ }
+
+ for y in 0usize..4 {
+ let mut left = self.left.complexity[y + 1];
+ for x in 0usize..4 {
+ let i = x + y * 4;
+ let block = &mut blocks[i * 16..i * 16 + 16];
+
+ let complexity = self.top[mbx].complexity[x + 1] + left;
+ let dcq = self.segment[sindex].ydc;
+ let acq = self.segment[sindex].yac;
+
+ let n = self.read_coefficients(block, p, plane, complexity as usize, dcq, acq);
+
+ if block[0] != 0 || n {
+ transform::idct4x4(block);
+ }
+
+ left = if n { 1 } else { 0 };
+ self.top[mbx].complexity[x + 1] = if n { 1 } else { 0 };
+ }
+
+ self.left.complexity[y + 1] = left;
+ }
+
+ plane = 2;
+
+ for &j in &[5usize, 7usize] {
+ for y in 0usize..2 {
+ let mut left = self.left.complexity[y + j];
+
+ for x in 0usize..2 {
+ let i = x + y * 2 + if j == 5 { 16 } else { 20 };
+ let block = &mut blocks[i * 16..i * 16 + 16];
+
+ let complexity = self.top[mbx].complexity[x + j] + left;
+ let dcq = self.segment[sindex].uvdc;
+ let acq = self.segment[sindex].uvac;
+
+ let n = self.read_coefficients(block, p, plane, complexity as usize, dcq, acq);
+ if block[0] != 0 || n {
+ transform::idct4x4(block);
+ }
+
+ left = if n { 1 } else { 0 };
+ self.top[mbx].complexity[x + j] = if n { 1 } else { 0 };
+ }
+
+ self.left.complexity[y + j] = left;
+ }
+ }
+
+ blocks
+ }
+
+ /// Does loop filtering on the macroblock
+ fn loop_filter(&mut self, mbx: usize, mby: usize, mb: &MacroBlock) {
+ let luma_w = self.frame.width as usize;
+ let luma_h = self.frame.height as usize;
+ let chroma_w = self.frame.chroma_width() as usize;
+ let chroma_h = self.frame.chroma_height() as usize;
+
+ let (filter_level, interior_limit, hev_threshold) = self.calculate_filter_parameters(mb);
+
+ if filter_level > 0 {
+ let mbedge_limit = (filter_level + 2) * 2 + interior_limit;
+ let sub_bedge_limit = (filter_level * 2) + interior_limit;
+
+ let luma_ylength = cmp::min(luma_h - 16 * mby, 16);
+ let luma_xlength = cmp::min(luma_w - 16 * mbx, 16);
+
+ let chroma_ylength = cmp::min(chroma_h - 8 * mby, 8);
+ let chroma_xlength = cmp::min(chroma_w - 8 * mbx, 8);
+
+ //filter across left of macroblock
+ if mbx > 0 {
+ //simple loop filtering
+ if self.frame.filter_type {
+ if luma_xlength >= 2 {
+ for y in 0usize..luma_ylength {
+ let y0 = mby * 16 + y;
+ let x0 = mbx * 16;
+
+ loop_filter::simple_segment(
+ mbedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ 1,
+ );
+ }
+ }
+ } else {
+ if luma_xlength >= 4 {
+ for y in 0usize..luma_ylength {
+ let y0 = mby * 16 + y;
+ let x0 = mbx * 16;
+
+ loop_filter::macroblock_filter(
+ hev_threshold,
+ interior_limit,
+ mbedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ 1,
+ );
+ }
+ }
+
+ if chroma_xlength >= 4 {
+ for y in 0usize..chroma_ylength {
+ let y0 = mby * 8 + y;
+ let x0 = mbx * 8;
+
+ loop_filter::macroblock_filter(
+ hev_threshold,
+ interior_limit,
+ mbedge_limit,
+ &mut self.frame.ubuf[..],
+ y0 * chroma_w + x0,
+ 1,
+ );
+ loop_filter::macroblock_filter(
+ hev_threshold,
+ interior_limit,
+ mbedge_limit,
+ &mut self.frame.vbuf[..],
+ y0 * chroma_w + x0,
+ 1,
+ );
+ }
+ }
+ }
+ }
+
+ //filter across vertical subblocks in macroblock
+ if mb.luma_mode == LumaMode::B || !mb.coeffs_skipped {
+ if self.frame.filter_type {
+ for x in (4usize..luma_xlength - 1).step_by(4) {
+ for y in 0..luma_ylength {
+ let y0 = mby * 16 + y;
+ let x0 = mbx * 16 + x;
+
+ loop_filter::simple_segment(
+ sub_bedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ 1,
+ );
+ }
+ }
+ } else {
+ if luma_xlength > 3 {
+ for x in (4usize..luma_xlength - 3).step_by(4) {
+ for y in 0..luma_ylength {
+ let y0 = mby * 16 + y;
+ let x0 = mbx * 16 + x;
+
+ loop_filter::subblock_filter(
+ hev_threshold,
+ interior_limit,
+ sub_bedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ 1,
+ );
+ }
+ }
+ }
+
+ if chroma_xlength == 8 {
+ for y in 0usize..chroma_ylength {
+ let y0 = mby * 8 + y;
+ let x0 = mbx * 8 + 4;
+
+ loop_filter::subblock_filter(
+ hev_threshold,
+ interior_limit,
+ sub_bedge_limit,
+ &mut self.frame.ubuf[..],
+ y0 * chroma_w + x0,
+ 1,
+ );
+
+ loop_filter::subblock_filter(
+ hev_threshold,
+ interior_limit,
+ sub_bedge_limit,
+ &mut self.frame.vbuf[..],
+ y0 * chroma_w + x0,
+ 1,
+ );
+ }
+ }
+ }
+ }
+
+ //filter across top of macroblock
+ if mby > 0 {
+ if self.frame.filter_type {
+ if luma_ylength >= 2 {
+ for x in 0usize..luma_xlength {
+ let y0 = mby * 16;
+ let x0 = mbx * 16 + x;
+
+ loop_filter::simple_segment(
+ mbedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ luma_w,
+ );
+ }
+ }
+ } else {
+ //if bottom macroblock, can only filter if there is 3 pixels below
+ if luma_ylength >= 4 {
+ for x in 0usize..luma_xlength {
+ let y0 = mby * 16;
+ let x0 = mbx * 16 + x;
+
+ loop_filter::macroblock_filter(
+ hev_threshold,
+ interior_limit,
+ mbedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ luma_w,
+ );
+ }
+ }
+
+ if chroma_ylength >= 4 {
+ for x in 0usize..chroma_xlength {
+ let y0 = mby * 8;
+ let x0 = mbx * 8 + x;
+
+ loop_filter::macroblock_filter(
+ hev_threshold,
+ interior_limit,
+ mbedge_limit,
+ &mut self.frame.ubuf[..],
+ y0 * chroma_w + x0,
+ chroma_w,
+ );
+ loop_filter::macroblock_filter(
+ hev_threshold,
+ interior_limit,
+ mbedge_limit,
+ &mut self.frame.vbuf[..],
+ y0 * chroma_w + x0,
+ chroma_w,
+ );
+ }
+ }
+ }
+ }
+
+ //filter across horizontal subblock edges within the macroblock
+ if mb.luma_mode == LumaMode::B || !mb.coeffs_skipped {
+ if self.frame.filter_type {
+ for y in (4usize..luma_ylength - 1).step_by(4) {
+ for x in 0..luma_xlength {
+ let y0 = mby * 16 + y;
+ let x0 = mbx * 16 + x;
+
+ loop_filter::simple_segment(
+ sub_bedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ luma_w,
+ );
+ }
+ }
+ } else {
+ if luma_ylength > 3 {
+ for y in (4usize..luma_ylength - 3).step_by(4) {
+ for x in 0..luma_xlength {
+ let y0 = mby * 16 + y;
+ let x0 = mbx * 16 + x;
+
+ loop_filter::subblock_filter(
+ hev_threshold,
+ interior_limit,
+ sub_bedge_limit,
+ &mut self.frame.ybuf[..],
+ y0 * luma_w + x0,
+ luma_w,
+ );
+ }
+ }
+ }
+
+ if chroma_ylength == 8 {
+ for x in 0..chroma_xlength {
+ let y0 = mby * 8 + 4;
+ let x0 = mbx * 8 + x;
+
+ loop_filter::subblock_filter(
+ hev_threshold,
+ interior_limit,
+ sub_bedge_limit,
+ &mut self.frame.ubuf[..],
+ y0 * chroma_w + x0,
+ chroma_w,
+ );
+
+ loop_filter::subblock_filter(
+ hev_threshold,
+ interior_limit,
+ sub_bedge_limit,
+ &mut self.frame.vbuf[..],
+ y0 * chroma_w + x0,
+ chroma_w,
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ //return values are the filter level, interior limit and hev threshold
+ fn calculate_filter_parameters(&self, macroblock: &MacroBlock) -> (u8, u8, u8) {
+ let segment = self.segment[macroblock.segmentid as usize];
+ let mut filter_level = self.frame.filter_level as i32;
+
+ if self.segments_enabled {
+ if segment.delta_values {
+ filter_level += i32::from(segment.loopfilter_level);
+ } else {
+ filter_level = i32::from(segment.loopfilter_level);
+ }
+ }
+
+ filter_level = clamp(filter_level, 0, 63);
+
+ if macroblock.luma_mode == LumaMode::B {
+ filter_level += self.mode_delta[0];
+ }
+
+ let filter_level = clamp(filter_level, 0, 63) as u8;
+
+ //interior limit
+ let mut interior_limit = filter_level;
+
+ if self.frame.sharpness_level > 0 {
+ interior_limit >>= if self.frame.sharpness_level > 4 { 2 } else { 1 };
+
+ if interior_limit > 9 - self.frame.sharpness_level {
+ interior_limit = 9 - self.frame.sharpness_level;
+ }
+ }
+
+ if interior_limit == 0 {
+ interior_limit = 1;
+ }
+
+ //high edge variance threshold
+ let mut hev_threshold = 0;
+
+ #[allow(clippy::collapsible_else_if)]
+ if self.frame.keyframe {
+ if filter_level >= 40 {
+ hev_threshold = 2;
+ } else {
+ hev_threshold = 1;
+ }
+ } else {
+ if filter_level >= 40 {
+ hev_threshold = 3;
+ } else if filter_level >= 20 {
+ hev_threshold = 2;
+ } else if filter_level >= 15 {
+ hev_threshold = 1;
+ }
+ }
+
+ (filter_level, interior_limit, hev_threshold)
+ }
+
+ /// Decodes the current frame
+ pub fn decode_frame(&mut self) -> ImageResult<&Frame> {
+ self.read_frame_header()?;
+
+ for mby in 0..self.mbheight as usize {
+ let p = mby % self.num_partitions as usize;
+ self.left = MacroBlock::default();
+
+ for mbx in 0..self.mbwidth as usize {
+ let mb = self.read_macroblock_header(mbx)?;
+ let blocks = if !mb.coeffs_skipped {
+ self.read_residual_data(&mb, mbx, p)
+ } else {
+ if mb.luma_mode != LumaMode::B {
+ self.left.complexity[0] = 0;
+ self.top[mbx].complexity[0] = 0;
+ }
+
+ for i in 1usize..9 {
+ self.left.complexity[i] = 0;
+ self.top[mbx].complexity[i] = 0;
+ }
+
+ [0i32; 384]
+ };
+
+ self.intra_predict_luma(mbx, mby, &mb, &blocks);
+ self.intra_predict_chroma(mbx, mby, &mb, &blocks);
+
+ self.macroblocks.push(mb);
+ }
+
+ self.left_border = vec![129u8; 1 + 16];
+ }
+
+ //do loop filtering
+ for mby in 0..self.mbheight as usize {
+ for mbx in 0..self.mbwidth as usize {
+ let mb = self.macroblocks[mby * self.mbwidth as usize + mbx];
+ self.loop_filter(mbx, mby, &mb);
+ }
+ }
+
+ Ok(&self.frame)
+ }
+}
+
+impl LumaMode {
+ fn from_i8(val: i8) -> Option<Self> {
+ Some(match val {
+ DC_PRED => LumaMode::DC,
+ V_PRED => LumaMode::V,
+ H_PRED => LumaMode::H,
+ TM_PRED => LumaMode::TM,
+ B_PRED => LumaMode::B,
+ _ => return None,
+ })
+ }
+
+ fn into_intra(self) -> Option<IntraMode> {
+ Some(match self {
+ LumaMode::DC => IntraMode::DC,
+ LumaMode::V => IntraMode::VE,
+ LumaMode::H => IntraMode::HE,
+ LumaMode::TM => IntraMode::TM,
+ LumaMode::B => return None,
+ })
+ }
+}
+
+impl Default for LumaMode {
+ fn default() -> Self {
+ LumaMode::DC
+ }
+}
+
+impl ChromaMode {
+ fn from_i8(val: i8) -> Option<Self> {
+ Some(match val {
+ DC_PRED => ChromaMode::DC,
+ V_PRED => ChromaMode::V,
+ H_PRED => ChromaMode::H,
+ TM_PRED => ChromaMode::TM,
+ _ => return None,
+ })
+ }
+}
+
+impl Default for ChromaMode {
+ fn default() -> Self {
+ ChromaMode::DC
+ }
+}
+
+impl IntraMode {
+ fn from_i8(val: i8) -> Option<Self> {
+ Some(match val {
+ B_DC_PRED => IntraMode::DC,
+ B_TM_PRED => IntraMode::TM,
+ B_VE_PRED => IntraMode::VE,
+ B_HE_PRED => IntraMode::HE,
+ B_LD_PRED => IntraMode::LD,
+ B_RD_PRED => IntraMode::RD,
+ B_VR_PRED => IntraMode::VR,
+ B_VL_PRED => IntraMode::VL,
+ B_HD_PRED => IntraMode::HD,
+ B_HU_PRED => IntraMode::HU,
+ _ => return None,
+ })
+ }
+}
+
+impl Default for IntraMode {
+ fn default() -> Self {
+ IntraMode::DC
+ }
+}
+
+fn init_top_macroblocks(width: usize) -> Vec<MacroBlock> {
+ let mb_width = (width + 15) / 16;
+
+ let mb = MacroBlock {
+ // Section 11.3 #3
+ bpred: [IntraMode::DC; 16],
+ luma_mode: LumaMode::DC,
+ ..MacroBlock::default()
+ };
+
+ vec![mb; mb_width]
+}
+
+fn create_border_luma(mbx: usize, mby: usize, mbw: usize, top: &[u8], left: &[u8]) -> [u8; 357] {
+ let stride = 1usize + 16 + 4;
+ let mut ws = [0u8; (1 + 16) * (1 + 16 + 4)];
+
+ // A
+ {
+ let above = &mut ws[1..stride];
+ if mby == 0 {
+ for above in above.iter_mut() {
+ *above = 127;
+ }
+ } else {
+ for i in 0usize..16 {
+ above[i] = top[mbx * 16 + i];
+ }
+
+ if mbx == mbw - 1 {
+ for above in above.iter_mut().skip(16) {
+ *above = top[mbx * 16 + 15];
+ }
+ } else {
+ for i in 16usize..above.len() {
+ above[i] = top[mbx * 16 + i];
+ }
+ }
+ }
+ }
+
+ for i in 17usize..stride {
+ ws[4 * stride + i] = ws[i];
+ ws[8 * stride + i] = ws[i];
+ ws[12 * stride + i] = ws[i];
+ }
+
+ // L
+ if mbx == 0 {
+ for i in 0usize..16 {
+ ws[(i + 1) * stride] = 129;
+ }
+ } else {
+ for i in 0usize..16 {
+ ws[(i + 1) * stride] = left[i + 1];
+ }
+ }
+
+ // P
+ ws[0] = if mby == 0 {
+ 127
+ } else if mbx == 0 {
+ 129
+ } else {
+ left[0]
+ };
+
+ ws
+}
+
+fn avg3(left: u8, this: u8, right: u8) -> u8 {
+ let avg = (u16::from(left) + 2 * u16::from(this) + u16::from(right) + 2) >> 2;
+ avg as u8
+}
+
+fn avg2(this: u8, right: u8) -> u8 {
+ let avg = (u16::from(this) + u16::from(right) + 1) >> 1;
+ avg as u8
+}
+
+// Only 16 elements from rblock are used to add residue, so it is restricted to 16 elements
+// to enable SIMD and other optimizations.
+fn add_residue(pblock: &mut [u8], rblock: &[i32; 16], y0: usize, x0: usize, stride: usize) {
+ let mut pos = y0 * stride + x0;
+ for row in rblock.chunks(4) {
+ for (p, &a) in pblock[pos..pos + 4].iter_mut().zip(row.iter()) {
+ *p = clamp(a + i32::from(*p), 0, 255) as u8;
+ }
+ pos += stride;
+ }
+}
+
+fn predict_4x4(ws: &mut [u8], stride: usize, modes: &[IntraMode], resdata: &[i32]) {
+ for sby in 0usize..4 {
+ for sbx in 0usize..4 {
+ let i = sbx + sby * 4;
+ let y0 = sby * 4 + 1;
+ let x0 = sbx * 4 + 1;
+
+ match modes[i] {
+ IntraMode::TM => predict_tmpred(ws, 4, x0, y0, stride),
+ IntraMode::VE => predict_bvepred(ws, x0, y0, stride),
+ IntraMode::HE => predict_bhepred(ws, x0, y0, stride),
+ IntraMode::DC => predict_bdcpred(ws, x0, y0, stride),
+ IntraMode::LD => predict_bldpred(ws, x0, y0, stride),
+ IntraMode::RD => predict_brdpred(ws, x0, y0, stride),
+ IntraMode::VR => predict_bvrpred(ws, x0, y0, stride),
+ IntraMode::VL => predict_bvlpred(ws, x0, y0, stride),
+ IntraMode::HD => predict_bhdpred(ws, x0, y0, stride),
+ IntraMode::HU => predict_bhupred(ws, x0, y0, stride),
+ }
+
+ let rb: &[i32; 16] = resdata[i * 16..][..16].try_into().unwrap();
+ add_residue(ws, rb, y0, x0, stride);
+ }
+ }
+}
+
+fn predict_vpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) {
+ for y in 0usize..size {
+ for x in 0usize..size {
+ a[(x + x0) + stride * (y + y0)] = a[(x + x0) + stride * (y0 + y - 1)];
+ }
+ }
+}
+
+fn predict_hpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) {
+ for y in 0usize..size {
+ for x in 0usize..size {
+ a[(x + x0) + stride * (y + y0)] = a[(x + x0 - 1) + stride * (y0 + y)];
+ }
+ }
+}
+
+fn predict_dcpred(a: &mut [u8], size: usize, stride: usize, above: bool, left: bool) {
+ let mut sum = 0;
+ let mut shf = if size == 8 { 2 } else { 3 };
+
+ if left {
+ for y in 0usize..size {
+ sum += u32::from(a[(y + 1) * stride]);
+ }
+
+ shf += 1;
+ }
+
+ if above {
+ for x in 0usize..size {
+ sum += u32::from(a[x + 1]);
+ }
+
+ shf += 1;
+ }
+
+ let dcval = if !left && !above {
+ 128
+ } else {
+ (sum + (1 << (shf - 1))) >> shf
+ };
+
+ for y in 0usize..size {
+ for x in 0usize..size {
+ a[(x + 1) + stride * (y + 1)] = dcval as u8;
+ }
+ }
+}
+
+fn predict_tmpred(a: &mut [u8], size: usize, x0: usize, y0: usize, stride: usize) {
+ for y in 0usize..size {
+ for x in 0usize..size {
+ let pred = i32::from(a[(y0 + y) * stride + x0 - 1])
+ + i32::from(a[(y0 - 1) * stride + x0 + x])
+ - i32::from(a[(y0 - 1) * stride + x0 - 1]);
+
+ a[(x + x0) + stride * (y + y0)] = clamp(pred, 0, 255) as u8;
+ }
+ }
+}
+
+fn predict_bdcpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let mut v = 4;
+ for i in 0usize..4 {
+ v += u32::from(a[(y0 + i) * stride + x0 - 1]) + u32::from(a[(y0 - 1) * stride + x0 + i]);
+ }
+
+ v >>= 3;
+ for y in 0usize..4 {
+ for x in 0usize..4 {
+ a[x + x0 + stride * (y + y0)] = v as u8;
+ }
+ }
+}
+
+fn topleft_pixel(a: &[u8], x0: usize, y0: usize, stride: usize) -> u8 {
+ a[(y0 - 1) * stride + x0 - 1]
+}
+
+fn top_pixels(a: &[u8], x0: usize, y0: usize, stride: usize) -> (u8, u8, u8, u8, u8, u8, u8, u8) {
+ let pos = (y0 - 1) * stride + x0;
+ let a_slice = &a[pos..pos + 8];
+ let a0 = a_slice[0];
+ let a1 = a_slice[1];
+ let a2 = a_slice[2];
+ let a3 = a_slice[3];
+ let a4 = a_slice[4];
+ let a5 = a_slice[5];
+ let a6 = a_slice[6];
+ let a7 = a_slice[7];
+
+ (a0, a1, a2, a3, a4, a5, a6, a7)
+}
+
+fn left_pixels(a: &[u8], x0: usize, y0: usize, stride: usize) -> (u8, u8, u8, u8) {
+ let l0 = a[y0 * stride + x0 - 1];
+ let l1 = a[(y0 + 1) * stride + x0 - 1];
+ let l2 = a[(y0 + 2) * stride + x0 - 1];
+ let l3 = a[(y0 + 3) * stride + x0 - 1];
+
+ (l0, l1, l2, l3)
+}
+
+fn edge_pixels(
+ a: &[u8],
+ x0: usize,
+ y0: usize,
+ stride: usize,
+) -> (u8, u8, u8, u8, u8, u8, u8, u8, u8) {
+ let pos = (y0 - 1) * stride + x0 - 1;
+ let a_slice = &a[pos..=pos + 4];
+ let e0 = a[pos + 4 * stride];
+ let e1 = a[pos + 3 * stride];
+ let e2 = a[pos + 2 * stride];
+ let e3 = a[pos + stride];
+ let e4 = a_slice[0];
+ let e5 = a_slice[1];
+ let e6 = a_slice[2];
+ let e7 = a_slice[3];
+ let e8 = a_slice[4];
+
+ (e0, e1, e2, e3, e4, e5, e6, e7, e8)
+}
+
+fn predict_bvepred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let p = topleft_pixel(a, x0, y0, stride);
+ let (a0, a1, a2, a3, a4, _, _, _) = top_pixels(a, x0, y0, stride);
+ let avg_1 = avg3(p, a0, a1);
+ let avg_2 = avg3(a0, a1, a2);
+ let avg_3 = avg3(a1, a2, a3);
+ let avg_4 = avg3(a2, a3, a4);
+
+ let avg = [avg_1, avg_2, avg_3, avg_4];
+
+ let mut pos = y0 * stride + x0;
+ for _ in 0..4 {
+ a[pos..=pos + 3].copy_from_slice(&avg);
+ pos += stride;
+ }
+}
+
+fn predict_bhepred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let p = topleft_pixel(a, x0, y0, stride);
+ let (l0, l1, l2, l3) = left_pixels(a, x0, y0, stride);
+
+ let avgs = [
+ avg3(p, l0, l1),
+ avg3(l0, l1, l2),
+ avg3(l1, l2, l3),
+ avg3(l2, l3, l3),
+ ];
+
+ let mut pos = y0 * stride + x0;
+ for &avg in avgs.iter() {
+ for a_p in a[pos..=pos + 3].iter_mut() {
+ *a_p = avg;
+ }
+ pos += stride;
+ }
+}
+
+fn predict_bldpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (a0, a1, a2, a3, a4, a5, a6, a7) = top_pixels(a, x0, y0, stride);
+
+ let avgs = [
+ avg3(a0, a1, a2),
+ avg3(a1, a2, a3),
+ avg3(a2, a3, a4),
+ avg3(a3, a4, a5),
+ avg3(a4, a5, a6),
+ avg3(a5, a6, a7),
+ avg3(a6, a7, a7),
+ ];
+
+ let mut pos = y0 * stride + x0;
+
+ for i in 0..4 {
+ a[pos..=pos + 3].copy_from_slice(&avgs[i..=i + 3]);
+ pos += stride;
+ }
+}
+
+fn predict_brdpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (e0, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(a, x0, y0, stride);
+
+ let avgs = [
+ avg3(e0, e1, e2),
+ avg3(e1, e2, e3),
+ avg3(e2, e3, e4),
+ avg3(e3, e4, e5),
+ avg3(e4, e5, e6),
+ avg3(e5, e6, e7),
+ avg3(e6, e7, e8),
+ ];
+ let mut pos = y0 * stride + x0;
+
+ for i in 0..4 {
+ a[pos..=pos + 3].copy_from_slice(&avgs[3 - i..7 - i]);
+ pos += stride;
+ }
+}
+
+fn predict_bvrpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (_, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(a, x0, y0, stride);
+
+ a[(y0 + 3) * stride + x0] = avg3(e1, e2, e3);
+ a[(y0 + 2) * stride + x0] = avg3(e2, e3, e4);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(e3, e4, e5);
+ a[(y0 + 1) * stride + x0] = avg3(e3, e4, e5);
+ a[(y0 + 2) * stride + x0 + 1] = avg2(e4, e5);
+ a[y0 * stride + x0] = avg2(e4, e5);
+ a[(y0 + 3) * stride + x0 + 2] = avg3(e4, e5, e6);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(e4, e5, e6);
+ a[(y0 + 2) * stride + x0 + 2] = avg2(e5, e6);
+ a[y0 * stride + x0 + 1] = avg2(e5, e6);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(e5, e6, e7);
+ a[(y0 + 1) * stride + x0 + 2] = avg3(e5, e6, e7);
+ a[(y0 + 2) * stride + x0 + 3] = avg2(e6, e7);
+ a[y0 * stride + x0 + 2] = avg2(e6, e7);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(e6, e7, e8);
+ a[y0 * stride + x0 + 3] = avg2(e7, e8);
+}
+
+fn predict_bvlpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (a0, a1, a2, a3, a4, a5, a6, a7) = top_pixels(a, x0, y0, stride);
+
+ a[y0 * stride + x0] = avg2(a0, a1);
+ a[(y0 + 1) * stride + x0] = avg3(a0, a1, a2);
+ a[(y0 + 2) * stride + x0] = avg2(a1, a2);
+ a[y0 * stride + x0 + 1] = avg2(a1, a2);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(a1, a2, a3);
+ a[(y0 + 3) * stride + x0] = avg3(a1, a2, a3);
+ a[(y0 + 2) * stride + x0 + 1] = avg2(a2, a3);
+ a[y0 * stride + x0 + 2] = avg2(a2, a3);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(a2, a3, a4);
+ a[(y0 + 1) * stride + x0 + 2] = avg3(a2, a3, a4);
+ a[(y0 + 2) * stride + x0 + 2] = avg2(a3, a4);
+ a[y0 * stride + x0 + 3] = avg2(a3, a4);
+ a[(y0 + 3) * stride + x0 + 2] = avg3(a3, a4, a5);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(a3, a4, a5);
+ a[(y0 + 2) * stride + x0 + 3] = avg3(a4, a5, a6);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(a5, a6, a7);
+}
+
+fn predict_bhdpred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (e0, e1, e2, e3, e4, e5, e6, e7, _) = edge_pixels(a, x0, y0, stride);
+
+ a[(y0 + 3) * stride + x0] = avg2(e0, e1);
+ a[(y0 + 3) * stride + x0 + 1] = avg3(e0, e1, e2);
+ a[(y0 + 2) * stride + x0] = avg2(e1, e2);
+ a[(y0 + 3) * stride + x0 + 2] = avg2(e1, e2);
+ a[(y0 + 2) * stride + x0 + 1] = avg3(e1, e2, e3);
+ a[(y0 + 3) * stride + x0 + 3] = avg3(e1, e2, e3);
+ a[(y0 + 2) * stride + x0 + 2] = avg2(e2, e3);
+ a[(y0 + 1) * stride + x0] = avg2(e2, e3);
+ a[(y0 + 2) * stride + x0 + 3] = avg3(e2, e3, e4);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(e2, e3, e4);
+ a[(y0 + 1) * stride + x0 + 2] = avg2(e3, e4);
+ a[y0 * stride + x0] = avg2(e3, e4);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(e3, e4, e5);
+ a[y0 * stride + x0 + 1] = avg3(e3, e4, e5);
+ a[y0 * stride + x0 + 2] = avg3(e4, e5, e6);
+ a[y0 * stride + x0 + 3] = avg3(e5, e6, e7);
+}
+
+fn predict_bhupred(a: &mut [u8], x0: usize, y0: usize, stride: usize) {
+ let (l0, l1, l2, l3) = left_pixels(a, x0, y0, stride);
+
+ a[y0 * stride + x0] = avg2(l0, l1);
+ a[y0 * stride + x0 + 1] = avg3(l0, l1, l2);
+ a[y0 * stride + x0 + 2] = avg2(l1, l2);
+ a[(y0 + 1) * stride + x0] = avg2(l1, l2);
+ a[y0 * stride + x0 + 3] = avg3(l1, l2, l3);
+ a[(y0 + 1) * stride + x0 + 1] = avg3(l1, l2, l3);
+ a[(y0 + 1) * stride + x0 + 2] = avg2(l2, l3);
+ a[(y0 + 2) * stride + x0] = avg2(l2, l3);
+ a[(y0 + 1) * stride + x0 + 3] = avg3(l2, l3, l3);
+ a[(y0 + 2) * stride + x0 + 1] = avg3(l2, l3, l3);
+ a[(y0 + 2) * stride + x0 + 2] = l3;
+ a[(y0 + 2) * stride + x0 + 3] = l3;
+ a[(y0 + 3) * stride + x0] = l3;
+ a[(y0 + 3) * stride + x0 + 1] = l3;
+ a[(y0 + 3) * stride + x0 + 2] = l3;
+ a[(y0 + 3) * stride + x0 + 3] = l3;
+}
+
+#[cfg(test)]
+mod test {
+
+ #[cfg(feature = "benchmarks")]
+ extern crate test;
+ use super::{
+ add_residue, avg2, avg3, edge_pixels, predict_bhepred, predict_bldpred, predict_brdpred,
+ predict_bvepred, top_pixels,
+ };
+ #[cfg(feature = "benchmarks")]
+ use super::{predict_4x4, IntraMode};
+ #[cfg(feature = "benchmarks")]
+ use test::{black_box, Bencher};
+
+ #[cfg(feature = "benchmarks")]
+ const W: usize = 256;
+ #[cfg(feature = "benchmarks")]
+ const H: usize = 256;
+
+ #[cfg(feature = "benchmarks")]
+ fn make_sample_image() -> Vec<u8> {
+ let mut v = Vec::with_capacity((W * H * 4) as usize);
+ for c in 0u8..=255 {
+ for k in 0u8..=255 {
+ v.push(c);
+ v.push(0);
+ v.push(0);
+ v.push(k);
+ }
+ }
+ v
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_predict_4x4(b: &mut Bencher) {
+ let mut v = black_box(make_sample_image());
+
+ let res_data = vec![1i32; W * H * 4];
+ let modes = [
+ IntraMode::TM,
+ IntraMode::VE,
+ IntraMode::HE,
+ IntraMode::DC,
+ IntraMode::LD,
+ IntraMode::RD,
+ IntraMode::VR,
+ IntraMode::VL,
+ IntraMode::HD,
+ IntraMode::HU,
+ IntraMode::TM,
+ IntraMode::VE,
+ IntraMode::HE,
+ IntraMode::DC,
+ IntraMode::LD,
+ IntraMode::RD,
+ ];
+
+ b.iter(|| {
+ black_box(predict_4x4(&mut v, W * 2, &modes, &res_data));
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_predict_bvepred(b: &mut Bencher) {
+ let mut v = make_sample_image();
+
+ b.iter(|| {
+ predict_bvepred(black_box(&mut v), 5, 5, W * 2);
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_predict_bldpred(b: &mut Bencher) {
+ let mut v = black_box(make_sample_image());
+
+ b.iter(|| {
+ black_box(predict_bldpred(black_box(&mut v), 5, 5, W * 2));
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_predict_brdpred(b: &mut Bencher) {
+ let mut v = black_box(make_sample_image());
+
+ b.iter(|| {
+ black_box(predict_brdpred(black_box(&mut v), 5, 5, W * 2));
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_predict_bhepred(b: &mut Bencher) {
+ let mut v = black_box(make_sample_image());
+
+ b.iter(|| {
+ black_box(predict_bhepred(black_box(&mut v), 5, 5, W * 2));
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_top_pixels(b: &mut Bencher) {
+ let v = black_box(make_sample_image());
+
+ b.iter(|| {
+ black_box(top_pixels(black_box(&v), 5, 5, W * 2));
+ });
+ }
+
+ #[cfg(feature = "benchmarks")]
+ #[bench]
+ fn bench_edge_pixels(b: &mut Bencher) {
+ let v = black_box(make_sample_image());
+
+ b.iter(|| {
+ black_box(edge_pixels(black_box(&v), 5, 5, W * 2));
+ });
+ }
+
+ #[test]
+ fn test_avg2() {
+ for i in 0u8..=255 {
+ for j in 0u8..=255 {
+ let ceil_avg = ((i as f32) + (j as f32)) / 2.0;
+ let ceil_avg = ceil_avg.ceil() as u8;
+ assert_eq!(
+ ceil_avg,
+ avg2(i, j),
+ "avg2({}, {}), expected {}, got {}.",
+ i,
+ j,
+ ceil_avg,
+ avg2(i, j)
+ );
+ }
+ }
+ }
+
+ #[test]
+ fn test_avg2_specific() {
+ assert_eq!(
+ 255,
+ avg2(255, 255),
+ "avg2(255, 255), expected 255, got {}.",
+ avg2(255, 255)
+ );
+ assert_eq!(1, avg2(1, 1), "avg2(1, 1), expected 1, got {}.", avg2(1, 1));
+ assert_eq!(2, avg2(2, 1), "avg2(2, 1), expected 2, got {}.", avg2(2, 1));
+ }
+
+ #[test]
+ fn test_avg3() {
+ for i in 0u8..=255 {
+ for j in 0u8..=255 {
+ for k in 0u8..=255 {
+ let floor_avg = ((i as f32) + 2.0 * (j as f32) + { k as f32 } + 2.0) / 4.0;
+ let floor_avg = floor_avg.floor() as u8;
+ assert_eq!(
+ floor_avg,
+ avg3(i, j, k),
+ "avg3({}, {}, {}), expected {}, got {}.",
+ i,
+ j,
+ k,
+ floor_avg,
+ avg3(i, j, k)
+ );
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_edge_pixels() {
+ #[rustfmt::skip]
+ let im = vec![5, 6, 7, 8, 9,
+ 4, 0, 0, 0, 0,
+ 3, 0, 0, 0, 0,
+ 2, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0];
+ let (e0, e1, e2, e3, e4, e5, e6, e7, e8) = edge_pixels(&im, 1, 1, 5);
+ assert_eq!(e0, 1);
+ assert_eq!(e1, 2);
+ assert_eq!(e2, 3);
+ assert_eq!(e3, 4);
+ assert_eq!(e4, 5);
+ assert_eq!(e5, 6);
+ assert_eq!(e6, 7);
+ assert_eq!(e7, 8);
+ assert_eq!(e8, 9);
+ }
+
+ #[test]
+ fn test_top_pixels() {
+ #[rustfmt::skip]
+ let im = vec![1, 2, 3, 4, 5, 6, 7, 8,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0];
+ let (e0, e1, e2, e3, e4, e5, e6, e7) = top_pixels(&im, 0, 1, 8);
+ assert_eq!(e0, 1);
+ assert_eq!(e1, 2);
+ assert_eq!(e2, 3);
+ assert_eq!(e3, 4);
+ assert_eq!(e4, 5);
+ assert_eq!(e5, 6);
+ assert_eq!(e6, 7);
+ assert_eq!(e7, 8);
+ }
+
+ #[test]
+ fn test_add_residue() {
+ let mut pblock = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
+ let rblock = [
+ -1, -2, -3, -4, 250, 249, 248, 250, -10, -18, -192, -17, -3, 15, 18, 9,
+ ];
+ let expected: [u8; 16] = [0, 0, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 10, 29, 33, 25];
+
+ add_residue(&mut pblock, &rblock, 0, 0, 4);
+
+ for (&e, &i) in expected.iter().zip(&pblock) {
+ assert_eq!(e, i);
+ }
+ }
+
+ #[test]
+ fn test_predict_bhepred() {
+ #[rustfmt::skip]
+ let expected: Vec<u8> = vec![5, 0, 0, 0, 0,
+ 4, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1];
+
+ #[rustfmt::skip]
+ let mut im = vec![5, 0, 0, 0, 0,
+ 4, 0, 0, 0, 0,
+ 3, 0, 0, 0, 0,
+ 2, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0];
+ predict_bhepred(&mut im, 1, 1, 5);
+ for (&e, i) in expected.iter().zip(im) {
+ assert_eq!(e, i);
+ }
+ }
+
+ #[test]
+ fn test_predict_brdpred() {
+ #[rustfmt::skip]
+ let expected: Vec<u8> = vec![5, 6, 7, 8, 9,
+ 4, 5, 6, 7, 8,
+ 3, 4, 5, 6, 7,
+ 2, 3, 4, 5, 6,
+ 1, 2, 3, 4, 5];
+
+ #[rustfmt::skip]
+ let mut im = vec![5, 6, 7, 8, 9,
+ 4, 0, 0, 0, 0,
+ 3, 0, 0, 0, 0,
+ 2, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0];
+ predict_brdpred(&mut im, 1, 1, 5);
+ for (&e, i) in expected.iter().zip(im) {
+ assert_eq!(e, i);
+ }
+ }
+
+ #[test]
+ fn test_predict_bldpred() {
+ #[rustfmt::skip]
+ let mut im: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0];
+ let avg_1 = 2u8;
+ let avg_2 = 3u8;
+ let avg_3 = 4u8;
+ let avg_4 = 5u8;
+ let avg_5 = 6u8;
+ let avg_6 = 7u8;
+ let avg_7 = 8u8;
+
+ predict_bldpred(&mut im, 0, 1, 8);
+
+ assert_eq!(im[8], avg_1);
+ assert_eq!(im[9], avg_2);
+ assert_eq!(im[10], avg_3);
+ assert_eq!(im[11], avg_4);
+ assert_eq!(im[16], avg_2);
+ assert_eq!(im[17], avg_3);
+ assert_eq!(im[18], avg_4);
+ assert_eq!(im[19], avg_5);
+ assert_eq!(im[24], avg_3);
+ assert_eq!(im[25], avg_4);
+ assert_eq!(im[26], avg_5);
+ assert_eq!(im[27], avg_6);
+ assert_eq!(im[32], avg_4);
+ assert_eq!(im[33], avg_5);
+ assert_eq!(im[34], avg_6);
+ assert_eq!(im[35], avg_7);
+ }
+
+ #[test]
+ fn test_predict_bvepred() {
+ #[rustfmt::skip]
+ let mut im: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0];
+ let avg_1 = 2u8;
+ let avg_2 = 3u8;
+ let avg_3 = 4u8;
+ let avg_4 = 5u8;
+
+ predict_bvepred(&mut im, 1, 1, 9);
+
+ assert_eq!(im[10], avg_1);
+ assert_eq!(im[11], avg_2);
+ assert_eq!(im[12], avg_3);
+ assert_eq!(im[13], avg_4);
+ assert_eq!(im[19], avg_1);
+ assert_eq!(im[20], avg_2);
+ assert_eq!(im[21], avg_3);
+ assert_eq!(im[22], avg_4);
+ assert_eq!(im[28], avg_1);
+ assert_eq!(im[29], avg_2);
+ assert_eq!(im[30], avg_3);
+ assert_eq!(im[31], avg_4);
+ assert_eq!(im[37], avg_1);
+ assert_eq!(im[38], avg_2);
+ assert_eq!(im[39], avg_3);
+ assert_eq!(im[40], avg_4);
+ }
+}
diff --git a/vendor/image/src/color.rs b/vendor/image/src/color.rs
new file mode 100644
index 0000000..57a8511
--- /dev/null
+++ b/vendor/image/src/color.rs
@@ -0,0 +1,985 @@
+use std::ops::{Index, IndexMut};
+
+use num_traits::{NumCast, ToPrimitive, Zero};
+
+use crate::traits::{Enlargeable, Pixel, Primitive};
+
+/// An enumeration over supported color types and bit depths
+#[derive(Copy, PartialEq, Eq, Debug, Clone, Hash)]
+#[non_exhaustive]
+pub enum ColorType {
+ /// Pixel is 8-bit luminance
+ L8,
+ /// Pixel is 8-bit luminance with an alpha channel
+ La8,
+ /// Pixel contains 8-bit R, G and B channels
+ Rgb8,
+ /// Pixel is 8-bit RGB with an alpha channel
+ Rgba8,
+
+ /// Pixel is 16-bit luminance
+ L16,
+ /// Pixel is 16-bit luminance with an alpha channel
+ La16,
+ /// Pixel is 16-bit RGB
+ Rgb16,
+ /// Pixel is 16-bit RGBA
+ Rgba16,
+
+ /// Pixel is 32-bit float RGB
+ Rgb32F,
+ /// Pixel is 32-bit float RGBA
+ Rgba32F,
+}
+
+impl ColorType {
+ /// Returns the number of bytes contained in a pixel of `ColorType` ```c```
+ pub fn bytes_per_pixel(self) -> u8 {
+ match self {
+ ColorType::L8 => 1,
+ ColorType::L16 | ColorType::La8 => 2,
+ ColorType::Rgb8 => 3,
+ ColorType::Rgba8 | ColorType::La16 => 4,
+ ColorType::Rgb16 => 6,
+ ColorType::Rgba16 => 8,
+ ColorType::Rgb32F => 3 * 4,
+ ColorType::Rgba32F => 4 * 4,
+ }
+ }
+
+ /// Returns if there is an alpha channel.
+ pub fn has_alpha(self) -> bool {
+ use ColorType::*;
+ match self {
+ L8 | L16 | Rgb8 | Rgb16 | Rgb32F => false,
+ La8 | Rgba8 | La16 | Rgba16 | Rgba32F => true,
+ }
+ }
+
+ /// Returns false if the color scheme is grayscale, true otherwise.
+ pub fn has_color(self) -> bool {
+ use ColorType::*;
+ match self {
+ L8 | L16 | La8 | La16 => false,
+ Rgb8 | Rgb16 | Rgba8 | Rgba16 | Rgb32F | Rgba32F => true,
+ }
+ }
+
+ /// Returns the number of bits contained in a pixel of `ColorType` ```c``` (which will always be
+ /// a multiple of 8).
+ pub fn bits_per_pixel(self) -> u16 {
+ <u16 as From<u8>>::from(self.bytes_per_pixel()) * 8
+ }
+
+ /// Returns the number of color channels that make up this pixel
+ pub fn channel_count(self) -> u8 {
+ let e: ExtendedColorType = self.into();
+ e.channel_count()
+ }
+}
+
+/// An enumeration of color types encountered in image formats.
+///
+/// This is not exhaustive over all existing image formats but should be granular enough to allow
+/// round tripping of decoding and encoding as much as possible. The variants will be extended as
+/// necessary to enable this.
+///
+/// Another purpose is to advise users of a rough estimate of the accuracy and effort of the
+/// decoding from and encoding to such an image format.
+#[derive(Copy, PartialEq, Eq, Debug, Clone, Hash)]
+#[non_exhaustive]
+pub enum ExtendedColorType {
+ /// Pixel is 8-bit alpha
+ A8,
+ /// Pixel is 1-bit luminance
+ L1,
+ /// Pixel is 1-bit luminance with an alpha channel
+ La1,
+ /// Pixel contains 1-bit R, G and B channels
+ Rgb1,
+ /// Pixel is 1-bit RGB with an alpha channel
+ Rgba1,
+ /// Pixel is 2-bit luminance
+ L2,
+ /// Pixel is 2-bit luminance with an alpha channel
+ La2,
+ /// Pixel contains 2-bit R, G and B channels
+ Rgb2,
+ /// Pixel is 2-bit RGB with an alpha channel
+ Rgba2,
+ /// Pixel is 4-bit luminance
+ L4,
+ /// Pixel is 4-bit luminance with an alpha channel
+ La4,
+ /// Pixel contains 4-bit R, G and B channels
+ Rgb4,
+ /// Pixel is 4-bit RGB with an alpha channel
+ Rgba4,
+ /// Pixel is 8-bit luminance
+ L8,
+ /// Pixel is 8-bit luminance with an alpha channel
+ La8,
+ /// Pixel contains 8-bit R, G and B channels
+ Rgb8,
+ /// Pixel is 8-bit RGB with an alpha channel
+ Rgba8,
+ /// Pixel is 16-bit luminance
+ L16,
+ /// Pixel is 16-bit luminance with an alpha channel
+ La16,
+ /// Pixel contains 16-bit R, G and B channels
+ Rgb16,
+ /// Pixel is 16-bit RGB with an alpha channel
+ Rgba16,
+ /// Pixel contains 8-bit B, G and R channels
+ Bgr8,
+ /// Pixel is 8-bit BGR with an alpha channel
+ Bgra8,
+
+ // TODO f16 types?
+ /// Pixel is 32-bit float RGB
+ Rgb32F,
+ /// Pixel is 32-bit float RGBA
+ Rgba32F,
+
+ /// Pixel is of unknown color type with the specified bits per pixel. This can apply to pixels
+ /// which are associated with an external palette. In that case, the pixel value is an index
+ /// into the palette.
+ Unknown(u8),
+}
+
+impl ExtendedColorType {
+ /// Get the number of channels for colors of this type.
+ ///
+ /// Note that the `Unknown` variant returns a value of `1` since pixels can only be treated as
+ /// an opaque datum by the library.
+ pub fn channel_count(self) -> u8 {
+ match self {
+ ExtendedColorType::A8
+ | ExtendedColorType::L1
+ | ExtendedColorType::L2
+ | ExtendedColorType::L4
+ | ExtendedColorType::L8
+ | ExtendedColorType::L16
+ | ExtendedColorType::Unknown(_) => 1,
+ ExtendedColorType::La1
+ | ExtendedColorType::La2
+ | ExtendedColorType::La4
+ | ExtendedColorType::La8
+ | ExtendedColorType::La16 => 2,
+ ExtendedColorType::Rgb1
+ | ExtendedColorType::Rgb2
+ | ExtendedColorType::Rgb4
+ | ExtendedColorType::Rgb8
+ | ExtendedColorType::Rgb16
+ | ExtendedColorType::Rgb32F
+ | ExtendedColorType::Bgr8 => 3,
+ ExtendedColorType::Rgba1
+ | ExtendedColorType::Rgba2
+ | ExtendedColorType::Rgba4
+ | ExtendedColorType::Rgba8
+ | ExtendedColorType::Rgba16
+ | ExtendedColorType::Rgba32F
+ | ExtendedColorType::Bgra8 => 4,
+ }
+ }
+}
+impl From<ColorType> for ExtendedColorType {
+ fn from(c: ColorType) -> Self {
+ match c {
+ ColorType::L8 => ExtendedColorType::L8,
+ ColorType::La8 => ExtendedColorType::La8,
+ ColorType::Rgb8 => ExtendedColorType::Rgb8,
+ ColorType::Rgba8 => ExtendedColorType::Rgba8,
+ ColorType::L16 => ExtendedColorType::L16,
+ ColorType::La16 => ExtendedColorType::La16,
+ ColorType::Rgb16 => ExtendedColorType::Rgb16,
+ ColorType::Rgba16 => ExtendedColorType::Rgba16,
+ ColorType::Rgb32F => ExtendedColorType::Rgb32F,
+ ColorType::Rgba32F => ExtendedColorType::Rgba32F,
+ }
+ }
+}
+
+macro_rules! define_colors {
+ {$(
+ $(#[$doc:meta])*
+ pub struct $ident:ident<T: $($bound:ident)*>([T; $channels:expr, $alphas:expr])
+ = $interpretation:literal;
+ )*} => {
+
+$( // START Structure definitions
+
+$(#[$doc])*
+#[derive(PartialEq, Eq, Clone, Debug, Copy, Hash)]
+#[repr(C)]
+#[allow(missing_docs)]
+pub struct $ident<T> (pub [T; $channels]);
+
+impl<T: $($bound+)*> Pixel for $ident<T> {
+ type Subpixel = T;
+
+ const CHANNEL_COUNT: u8 = $channels;
+
+ #[inline(always)]
+ fn channels(&self) -> &[T] {
+ &self.0
+ }
+
+ #[inline(always)]
+ fn channels_mut(&mut self) -> &mut [T] {
+ &mut self.0
+ }
+
+ const COLOR_MODEL: &'static str = $interpretation;
+
+ fn channels4(&self) -> (T, T, T, T) {
+ const CHANNELS: usize = $channels;
+ let mut channels = [T::DEFAULT_MAX_VALUE; 4];
+ channels[0..CHANNELS].copy_from_slice(&self.0);
+ (channels[0], channels[1], channels[2], channels[3])
+ }
+
+ fn from_channels(a: T, b: T, c: T, d: T,) -> $ident<T> {
+ const CHANNELS: usize = $channels;
+ *<$ident<T> as Pixel>::from_slice(&[a, b, c, d][..CHANNELS])
+ }
+
+ fn from_slice(slice: &[T]) -> &$ident<T> {
+ assert_eq!(slice.len(), $channels);
+ unsafe { &*(slice.as_ptr() as *const $ident<T>) }
+ }
+ fn from_slice_mut(slice: &mut [T]) -> &mut $ident<T> {
+ assert_eq!(slice.len(), $channels);
+ unsafe { &mut *(slice.as_mut_ptr() as *mut $ident<T>) }
+ }
+
+ fn to_rgb(&self) -> Rgb<T> {
+ let mut pix = Rgb([Zero::zero(), Zero::zero(), Zero::zero()]);
+ pix.from_color(self);
+ pix
+ }
+
+ fn to_rgba(&self) -> Rgba<T> {
+ let mut pix = Rgba([Zero::zero(), Zero::zero(), Zero::zero(), Zero::zero()]);
+ pix.from_color(self);
+ pix
+ }
+
+ fn to_luma(&self) -> Luma<T> {
+ let mut pix = Luma([Zero::zero()]);
+ pix.from_color(self);
+ pix
+ }
+
+ fn to_luma_alpha(&self) -> LumaA<T> {
+ let mut pix = LumaA([Zero::zero(), Zero::zero()]);
+ pix.from_color(self);
+ pix
+ }
+
+ fn map<F>(& self, f: F) -> $ident<T> where F: FnMut(T) -> T {
+ let mut this = (*self).clone();
+ this.apply(f);
+ this
+ }
+
+ fn apply<F>(&mut self, mut f: F) where F: FnMut(T) -> T {
+ for v in &mut self.0 {
+ *v = f(*v)
+ }
+ }
+
+ fn map_with_alpha<F, G>(&self, f: F, g: G) -> $ident<T> where F: FnMut(T) -> T, G: FnMut(T) -> T {
+ let mut this = (*self).clone();
+ this.apply_with_alpha(f, g);
+ this
+ }
+
+ fn apply_with_alpha<F, G>(&mut self, mut f: F, mut g: G) where F: FnMut(T) -> T, G: FnMut(T) -> T {
+ const ALPHA: usize = $channels - $alphas;
+ for v in self.0[..ALPHA].iter_mut() {
+ *v = f(*v)
+ }
+ // The branch of this match is `const`. This way ensures that no subexpression fails the
+ // `const_err` lint (the expression `self.0[ALPHA]` would).
+ if let Some(v) = self.0.get_mut(ALPHA) {
+ *v = g(*v)
+ }
+ }
+
+ fn map2<F>(&self, other: &Self, f: F) -> $ident<T> where F: FnMut(T, T) -> T {
+ let mut this = (*self).clone();
+ this.apply2(other, f);
+ this
+ }
+
+ fn apply2<F>(&mut self, other: &$ident<T>, mut f: F) where F: FnMut(T, T) -> T {
+ for (a, &b) in self.0.iter_mut().zip(other.0.iter()) {
+ *a = f(*a, b)
+ }
+ }
+
+ fn invert(&mut self) {
+ Invert::invert(self)
+ }
+
+ fn blend(&mut self, other: &$ident<T>) {
+ Blend::blend(self, other)
+ }
+}
+
+impl<T> Index<usize> for $ident<T> {
+ type Output = T;
+ #[inline(always)]
+ fn index(&self, _index: usize) -> &T {
+ &self.0[_index]
+ }
+}
+
+impl<T> IndexMut<usize> for $ident<T> {
+ #[inline(always)]
+ fn index_mut(&mut self, _index: usize) -> &mut T {
+ &mut self.0[_index]
+ }
+}
+
+impl<T> From<[T; $channels]> for $ident<T> {
+ fn from(c: [T; $channels]) -> Self {
+ Self(c)
+ }
+}
+
+)* // END Structure definitions
+
+ }
+}
+
+define_colors! {
+ /// RGB colors.
+ ///
+ /// For the purpose of color conversion, as well as blending, the implementation of `Pixel`
+ /// assumes an `sRGB` color space of its data.
+ pub struct Rgb<T: Primitive Enlargeable>([T; 3, 0]) = "RGB";
+ /// Grayscale colors.
+ pub struct Luma<T: Primitive>([T; 1, 0]) = "Y";
+ /// RGB colors + alpha channel
+ pub struct Rgba<T: Primitive Enlargeable>([T; 4, 1]) = "RGBA";
+ /// Grayscale colors + alpha channel
+ pub struct LumaA<T: Primitive>([T; 2, 1]) = "YA";
+}
+
+/// Convert from one pixel component type to another. For example, convert from `u8` to `f32` pixel values.
+pub trait FromPrimitive<Component> {
+ /// Converts from any pixel component type to this type.
+ fn from_primitive(component: Component) -> Self;
+}
+
+impl<T: Primitive> FromPrimitive<T> for T {
+ fn from_primitive(sample: T) -> Self {
+ sample
+ }
+}
+
+// from f32:
+// Note that in to-integer-conversion we are performing rounding but NumCast::from is implemented
+// as truncate towards zero. We emulate rounding by adding a bias.
+
+impl FromPrimitive<f32> for u8 {
+ fn from_primitive(float: f32) -> Self {
+ let inner = (float.clamp(0.0, 1.0) * u8::MAX as f32).round();
+ NumCast::from(inner).unwrap()
+ }
+}
+
+impl FromPrimitive<f32> for u16 {
+ fn from_primitive(float: f32) -> Self {
+ let inner = (float.clamp(0.0, 1.0) * u16::MAX as f32).round();
+ NumCast::from(inner).unwrap()
+ }
+}
+
+// from u16:
+
+impl FromPrimitive<u16> for u8 {
+ fn from_primitive(c16: u16) -> Self {
+ fn from(c: impl Into<u32>) -> u32 {
+ c.into()
+ }
+ // The input c is the numerator of `c / u16::MAX`.
+ // Derive numerator of `num / u8::MAX`, with rounding.
+ //
+ // This method is based on the inverse (see FromPrimitive<u8> for u16) and was tested
+ // exhaustively in Python. It's the same as the reference function:
+ // round(c * (2**8 - 1) / (2**16 - 1))
+ NumCast::from((from(c16) + 128) / 257).unwrap()
+ }
+}
+
+impl FromPrimitive<u16> for f32 {
+ fn from_primitive(int: u16) -> Self {
+ (int as f32 / u16::MAX as f32).clamp(0.0, 1.0)
+ }
+}
+
+// from u8:
+
+impl FromPrimitive<u8> for f32 {
+ fn from_primitive(int: u8) -> Self {
+ (int as f32 / u8::MAX as f32).clamp(0.0, 1.0)
+ }
+}
+
+impl FromPrimitive<u8> for u16 {
+ fn from_primitive(c8: u8) -> Self {
+ let x = c8.to_u64().unwrap();
+ NumCast::from((x << 8) | x).unwrap()
+ }
+}
+
+/// Provides color conversions for the different pixel types.
+pub trait FromColor<Other> {
+ /// Changes `self` to represent `Other` in the color space of `Self`
+ fn from_color(&mut self, _: &Other);
+}
+
+/// Copy-based conversions to target pixel types using `FromColor`.
+// FIXME: this trait should be removed and replaced with real color space models
+// rather than assuming sRGB.
+pub(crate) trait IntoColor<Other> {
+ /// Constructs a pixel of the target type and converts this pixel into it.
+ fn into_color(&self) -> Other;
+}
+
+impl<O, S> IntoColor<O> for S
+where
+ O: Pixel + FromColor<S>,
+{
+ fn into_color(&self) -> O {
+ // Note we cannot use Pixel::CHANNELS_COUNT here to directly construct
+ // the pixel due to a current bug/limitation of consts.
+ #[allow(deprecated)]
+ let mut pix = O::from_channels(Zero::zero(), Zero::zero(), Zero::zero(), Zero::zero());
+ pix.from_color(self);
+ pix
+ }
+}
+
+/// Coefficients to transform from sRGB to a CIE Y (luminance) value.
+const SRGB_LUMA: [u32; 3] = [2126, 7152, 722];
+const SRGB_LUMA_DIV: u32 = 10000;
+
+#[inline]
+fn rgb_to_luma<T: Primitive + Enlargeable>(rgb: &[T]) -> T {
+ let l = <T::Larger as NumCast>::from(SRGB_LUMA[0]).unwrap() * rgb[0].to_larger()
+ + <T::Larger as NumCast>::from(SRGB_LUMA[1]).unwrap() * rgb[1].to_larger()
+ + <T::Larger as NumCast>::from(SRGB_LUMA[2]).unwrap() * rgb[2].to_larger();
+ T::clamp_from(l / <T::Larger as NumCast>::from(SRGB_LUMA_DIV).unwrap())
+}
+
+// `FromColor` for Luma
+impl<S: Primitive, T: Primitive> FromColor<Luma<S>> for Luma<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &Luma<S>) {
+ let own = self.channels_mut();
+ let other = other.channels();
+ own[0] = T::from_primitive(other[0]);
+ }
+}
+
+impl<S: Primitive, T: Primitive> FromColor<LumaA<S>> for Luma<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &LumaA<S>) {
+ self.channels_mut()[0] = T::from_primitive(other.channels()[0])
+ }
+}
+
+impl<S: Primitive + Enlargeable, T: Primitive> FromColor<Rgb<S>> for Luma<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &Rgb<S>) {
+ let gray = self.channels_mut();
+ let rgb = other.channels();
+ gray[0] = T::from_primitive(rgb_to_luma(rgb));
+ }
+}
+
+impl<S: Primitive + Enlargeable, T: Primitive> FromColor<Rgba<S>> for Luma<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &Rgba<S>) {
+ let gray = self.channels_mut();
+ let rgb = other.channels();
+ let l = rgb_to_luma(rgb);
+ gray[0] = T::from_primitive(l);
+ }
+}
+
+// `FromColor` for LumaA
+
+impl<S: Primitive, T: Primitive> FromColor<LumaA<S>> for LumaA<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &LumaA<S>) {
+ let own = self.channels_mut();
+ let other = other.channels();
+ own[0] = T::from_primitive(other[0]);
+ own[1] = T::from_primitive(other[1]);
+ }
+}
+
+impl<S: Primitive + Enlargeable, T: Primitive> FromColor<Rgb<S>> for LumaA<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &Rgb<S>) {
+ let gray_a = self.channels_mut();
+ let rgb = other.channels();
+ gray_a[0] = T::from_primitive(rgb_to_luma(rgb));
+ gray_a[1] = T::DEFAULT_MAX_VALUE;
+ }
+}
+
+impl<S: Primitive + Enlargeable, T: Primitive> FromColor<Rgba<S>> for LumaA<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &Rgba<S>) {
+ let gray_a = self.channels_mut();
+ let rgba = other.channels();
+ gray_a[0] = T::from_primitive(rgb_to_luma(rgba));
+ gray_a[1] = T::from_primitive(rgba[3]);
+ }
+}
+
+impl<S: Primitive, T: Primitive> FromColor<Luma<S>> for LumaA<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &Luma<S>) {
+ let gray_a = self.channels_mut();
+ gray_a[0] = T::from_primitive(other.channels()[0]);
+ gray_a[1] = T::DEFAULT_MAX_VALUE;
+ }
+}
+
+// `FromColor` for RGBA
+
+impl<S: Primitive, T: Primitive> FromColor<Rgba<S>> for Rgba<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &Rgba<S>) {
+ let own = &mut self.0;
+ let other = &other.0;
+ own[0] = T::from_primitive(other[0]);
+ own[1] = T::from_primitive(other[1]);
+ own[2] = T::from_primitive(other[2]);
+ own[3] = T::from_primitive(other[3]);
+ }
+}
+
+impl<S: Primitive, T: Primitive> FromColor<Rgb<S>> for Rgba<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &Rgb<S>) {
+ let rgba = &mut self.0;
+ let rgb = &other.0;
+ rgba[0] = T::from_primitive(rgb[0]);
+ rgba[1] = T::from_primitive(rgb[1]);
+ rgba[2] = T::from_primitive(rgb[2]);
+ rgba[3] = T::DEFAULT_MAX_VALUE;
+ }
+}
+
+impl<S: Primitive, T: Primitive> FromColor<LumaA<S>> for Rgba<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, gray: &LumaA<S>) {
+ let rgba = &mut self.0;
+ let gray = &gray.0;
+ rgba[0] = T::from_primitive(gray[0]);
+ rgba[1] = T::from_primitive(gray[0]);
+ rgba[2] = T::from_primitive(gray[0]);
+ rgba[3] = T::from_primitive(gray[1]);
+ }
+}
+
+impl<S: Primitive, T: Primitive> FromColor<Luma<S>> for Rgba<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, gray: &Luma<S>) {
+ let rgba = &mut self.0;
+ let gray = gray.0[0];
+ rgba[0] = T::from_primitive(gray);
+ rgba[1] = T::from_primitive(gray);
+ rgba[2] = T::from_primitive(gray);
+ rgba[3] = T::DEFAULT_MAX_VALUE;
+ }
+}
+
+// `FromColor` for RGB
+
+impl<S: Primitive, T: Primitive> FromColor<Rgb<S>> for Rgb<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &Rgb<S>) {
+ let own = &mut self.0;
+ let other = &other.0;
+ own[0] = T::from_primitive(other[0]);
+ own[1] = T::from_primitive(other[1]);
+ own[2] = T::from_primitive(other[2]);
+ }
+}
+
+impl<S: Primitive, T: Primitive> FromColor<Rgba<S>> for Rgb<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &Rgba<S>) {
+ let rgb = &mut self.0;
+ let rgba = &other.0;
+ rgb[0] = T::from_primitive(rgba[0]);
+ rgb[1] = T::from_primitive(rgba[1]);
+ rgb[2] = T::from_primitive(rgba[2]);
+ }
+}
+
+impl<S: Primitive, T: Primitive> FromColor<LumaA<S>> for Rgb<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &LumaA<S>) {
+ let rgb = &mut self.0;
+ let gray = other.0[0];
+ rgb[0] = T::from_primitive(gray);
+ rgb[1] = T::from_primitive(gray);
+ rgb[2] = T::from_primitive(gray);
+ }
+}
+
+impl<S: Primitive, T: Primitive> FromColor<Luma<S>> for Rgb<T>
+where
+ T: FromPrimitive<S>,
+{
+ fn from_color(&mut self, other: &Luma<S>) {
+ let rgb = &mut self.0;
+ let gray = other.0[0];
+ rgb[0] = T::from_primitive(gray);
+ rgb[1] = T::from_primitive(gray);
+ rgb[2] = T::from_primitive(gray);
+ }
+}
+
+/// Blends a color inter another one
+pub(crate) trait Blend {
+ /// Blends a color in-place.
+ fn blend(&mut self, other: &Self);
+}
+
+impl<T: Primitive> Blend for LumaA<T> {
+ fn blend(&mut self, other: &LumaA<T>) {
+ let max_t = T::DEFAULT_MAX_VALUE;
+ let max_t = max_t.to_f32().unwrap();
+ let (bg_luma, bg_a) = (self.0[0], self.0[1]);
+ let (fg_luma, fg_a) = (other.0[0], other.0[1]);
+
+ let (bg_luma, bg_a) = (
+ bg_luma.to_f32().unwrap() / max_t,
+ bg_a.to_f32().unwrap() / max_t,
+ );
+ let (fg_luma, fg_a) = (
+ fg_luma.to_f32().unwrap() / max_t,
+ fg_a.to_f32().unwrap() / max_t,
+ );
+
+ let alpha_final = bg_a + fg_a - bg_a * fg_a;
+ if alpha_final == 0.0 {
+ return;
+ };
+ let bg_luma_a = bg_luma * bg_a;
+ let fg_luma_a = fg_luma * fg_a;
+
+ let out_luma_a = fg_luma_a + bg_luma_a * (1.0 - fg_a);
+ let out_luma = out_luma_a / alpha_final;
+
+ *self = LumaA([
+ NumCast::from(max_t * out_luma).unwrap(),
+ NumCast::from(max_t * alpha_final).unwrap(),
+ ])
+ }
+}
+
+impl<T: Primitive> Blend for Luma<T> {
+ fn blend(&mut self, other: &Luma<T>) {
+ *self = *other
+ }
+}
+
+impl<T: Primitive> Blend for Rgba<T> {
+ fn blend(&mut self, other: &Rgba<T>) {
+ // http://stackoverflow.com/questions/7438263/alpha-compositing-algorithm-blend-modes#answer-11163848
+
+ if other.0[3].is_zero() {
+ return;
+ }
+ if other.0[3] == T::DEFAULT_MAX_VALUE {
+ *self = *other;
+ return;
+ }
+
+ // First, as we don't know what type our pixel is, we have to convert to floats between 0.0 and 1.0
+ let max_t = T::DEFAULT_MAX_VALUE;
+ let max_t = max_t.to_f32().unwrap();
+ let (bg_r, bg_g, bg_b, bg_a) = (self.0[0], self.0[1], self.0[2], self.0[3]);
+ let (fg_r, fg_g, fg_b, fg_a) = (other.0[0], other.0[1], other.0[2], other.0[3]);
+ let (bg_r, bg_g, bg_b, bg_a) = (
+ bg_r.to_f32().unwrap() / max_t,
+ bg_g.to_f32().unwrap() / max_t,
+ bg_b.to_f32().unwrap() / max_t,
+ bg_a.to_f32().unwrap() / max_t,
+ );
+ let (fg_r, fg_g, fg_b, fg_a) = (
+ fg_r.to_f32().unwrap() / max_t,
+ fg_g.to_f32().unwrap() / max_t,
+ fg_b.to_f32().unwrap() / max_t,
+ fg_a.to_f32().unwrap() / max_t,
+ );
+
+ // Work out what the final alpha level will be
+ let alpha_final = bg_a + fg_a - bg_a * fg_a;
+ if alpha_final == 0.0 {
+ return;
+ };
+
+ // We premultiply our channels by their alpha, as this makes it easier to calculate
+ let (bg_r_a, bg_g_a, bg_b_a) = (bg_r * bg_a, bg_g * bg_a, bg_b * bg_a);
+ let (fg_r_a, fg_g_a, fg_b_a) = (fg_r * fg_a, fg_g * fg_a, fg_b * fg_a);
+
+ // Standard formula for src-over alpha compositing
+ let (out_r_a, out_g_a, out_b_a) = (
+ fg_r_a + bg_r_a * (1.0 - fg_a),
+ fg_g_a + bg_g_a * (1.0 - fg_a),
+ fg_b_a + bg_b_a * (1.0 - fg_a),
+ );
+
+ // Unmultiply the channels by our resultant alpha channel
+ let (out_r, out_g, out_b) = (
+ out_r_a / alpha_final,
+ out_g_a / alpha_final,
+ out_b_a / alpha_final,
+ );
+
+ // Cast back to our initial type on return
+ *self = Rgba([
+ NumCast::from(max_t * out_r).unwrap(),
+ NumCast::from(max_t * out_g).unwrap(),
+ NumCast::from(max_t * out_b).unwrap(),
+ NumCast::from(max_t * alpha_final).unwrap(),
+ ])
+ }
+}
+
+impl<T: Primitive> Blend for Rgb<T> {
+ fn blend(&mut self, other: &Rgb<T>) {
+ *self = *other
+ }
+}
+
+/// Invert a color
+pub(crate) trait Invert {
+ /// Inverts a color in-place.
+ fn invert(&mut self);
+}
+
+impl<T: Primitive> Invert for LumaA<T> {
+ fn invert(&mut self) {
+ let l = self.0;
+ let max = T::DEFAULT_MAX_VALUE;
+
+ *self = LumaA([max - l[0], l[1]])
+ }
+}
+
+impl<T: Primitive> Invert for Luma<T> {
+ fn invert(&mut self) {
+ let l = self.0;
+
+ let max = T::DEFAULT_MAX_VALUE;
+ let l1 = max - l[0];
+
+ *self = Luma([l1])
+ }
+}
+
+impl<T: Primitive> Invert for Rgba<T> {
+ fn invert(&mut self) {
+ let rgba = self.0;
+
+ let max = T::DEFAULT_MAX_VALUE;
+
+ *self = Rgba([max - rgba[0], max - rgba[1], max - rgba[2], rgba[3]])
+ }
+}
+
+impl<T: Primitive> Invert for Rgb<T> {
+ fn invert(&mut self) {
+ let rgb = self.0;
+
+ let max = T::DEFAULT_MAX_VALUE;
+
+ let r1 = max - rgb[0];
+ let g1 = max - rgb[1];
+ let b1 = max - rgb[2];
+
+ *self = Rgb([r1, g1, b1])
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{Luma, LumaA, Pixel, Rgb, Rgba};
+
+ #[test]
+ fn test_apply_with_alpha_rgba() {
+ let mut rgba = Rgba([0, 0, 0, 0]);
+ rgba.apply_with_alpha(|s| s, |_| 0xFF);
+ assert_eq!(rgba, Rgba([0, 0, 0, 0xFF]));
+ }
+
+ #[test]
+ fn test_apply_with_alpha_rgb() {
+ let mut rgb = Rgb([0, 0, 0]);
+ rgb.apply_with_alpha(|s| s, |_| panic!("bug"));
+ assert_eq!(rgb, Rgb([0, 0, 0]));
+ }
+
+ #[test]
+ fn test_map_with_alpha_rgba() {
+ let rgba = Rgba([0, 0, 0, 0]).map_with_alpha(|s| s, |_| 0xFF);
+ assert_eq!(rgba, Rgba([0, 0, 0, 0xFF]));
+ }
+
+ #[test]
+ fn test_map_with_alpha_rgb() {
+ let rgb = Rgb([0, 0, 0]).map_with_alpha(|s| s, |_| panic!("bug"));
+ assert_eq!(rgb, Rgb([0, 0, 0]));
+ }
+
+ #[test]
+ fn test_blend_luma_alpha() {
+ let ref mut a = LumaA([255 as u8, 255]);
+ let b = LumaA([255 as u8, 255]);
+ a.blend(&b);
+ assert_eq!(a.0[0], 255);
+ assert_eq!(a.0[1], 255);
+
+ let ref mut a = LumaA([255 as u8, 0]);
+ let b = LumaA([255 as u8, 255]);
+ a.blend(&b);
+ assert_eq!(a.0[0], 255);
+ assert_eq!(a.0[1], 255);
+
+ let ref mut a = LumaA([255 as u8, 255]);
+ let b = LumaA([255 as u8, 0]);
+ a.blend(&b);
+ assert_eq!(a.0[0], 255);
+ assert_eq!(a.0[1], 255);
+
+ let ref mut a = LumaA([255 as u8, 0]);
+ let b = LumaA([255 as u8, 0]);
+ a.blend(&b);
+ assert_eq!(a.0[0], 255);
+ assert_eq!(a.0[1], 0);
+ }
+
+ #[test]
+ fn test_blend_rgba() {
+ let ref mut a = Rgba([255 as u8, 255, 255, 255]);
+ let b = Rgba([255 as u8, 255, 255, 255]);
+ a.blend(&b);
+ assert_eq!(a.0, [255, 255, 255, 255]);
+
+ let ref mut a = Rgba([255 as u8, 255, 255, 0]);
+ let b = Rgba([255 as u8, 255, 255, 255]);
+ a.blend(&b);
+ assert_eq!(a.0, [255, 255, 255, 255]);
+
+ let ref mut a = Rgba([255 as u8, 255, 255, 255]);
+ let b = Rgba([255 as u8, 255, 255, 0]);
+ a.blend(&b);
+ assert_eq!(a.0, [255, 255, 255, 255]);
+
+ let ref mut a = Rgba([255 as u8, 255, 255, 0]);
+ let b = Rgba([255 as u8, 255, 255, 0]);
+ a.blend(&b);
+ assert_eq!(a.0, [255, 255, 255, 0]);
+ }
+
+ #[test]
+ fn test_apply_without_alpha_rgba() {
+ let mut rgba = Rgba([0, 0, 0, 0]);
+ rgba.apply_without_alpha(|s| s + 1);
+ assert_eq!(rgba, Rgba([1, 1, 1, 0]));
+ }
+
+ #[test]
+ fn test_apply_without_alpha_rgb() {
+ let mut rgb = Rgb([0, 0, 0]);
+ rgb.apply_without_alpha(|s| s + 1);
+ assert_eq!(rgb, Rgb([1, 1, 1]));
+ }
+
+ #[test]
+ fn test_map_without_alpha_rgba() {
+ let rgba = Rgba([0, 0, 0, 0]).map_without_alpha(|s| s + 1);
+ assert_eq!(rgba, Rgba([1, 1, 1, 0]));
+ }
+
+ #[test]
+ fn test_map_without_alpha_rgb() {
+ let rgb = Rgb([0, 0, 0]).map_without_alpha(|s| s + 1);
+ assert_eq!(rgb, Rgb([1, 1, 1]));
+ }
+
+ macro_rules! test_lossless_conversion {
+ ($a:ty, $b:ty, $c:ty) => {
+ let a: $a = [<$a as Pixel>::Subpixel::DEFAULT_MAX_VALUE >> 2;
+ <$a as Pixel>::CHANNEL_COUNT as usize]
+ .into();
+ let b: $b = a.into_color();
+ let c: $c = b.into_color();
+ assert_eq!(a.channels(), c.channels());
+ };
+ }
+
+ #[test]
+ fn test_lossless_conversions() {
+ use super::IntoColor;
+ use crate::traits::Primitive;
+
+ test_lossless_conversion!(Luma<u8>, Luma<u16>, Luma<u8>);
+ test_lossless_conversion!(LumaA<u8>, LumaA<u16>, LumaA<u8>);
+ test_lossless_conversion!(Rgb<u8>, Rgb<u16>, Rgb<u8>);
+ test_lossless_conversion!(Rgba<u8>, Rgba<u16>, Rgba<u8>);
+ }
+
+ #[test]
+ fn accuracy_conversion() {
+ use super::{Luma, Pixel, Rgb};
+ let pixel = Rgb::from([13, 13, 13]);
+ let Luma([luma]) = pixel.to_luma();
+ assert_eq!(luma, 13);
+ }
+}
diff --git a/vendor/image/src/dynimage.rs b/vendor/image/src/dynimage.rs
new file mode 100644
index 0000000..c3071e0
--- /dev/null
+++ b/vendor/image/src/dynimage.rs
@@ -0,0 +1,1353 @@
+use std::io;
+use std::io::{Seek, Write};
+use std::path::Path;
+use std::u32;
+
+#[cfg(feature = "gif")]
+use crate::codecs::gif;
+#[cfg(feature = "png")]
+use crate::codecs::png;
+#[cfg(feature = "pnm")]
+use crate::codecs::pnm;
+
+use crate::buffer_::{
+ ConvertBuffer, Gray16Image, GrayAlpha16Image, GrayAlphaImage, GrayImage, ImageBuffer,
+ Rgb16Image, RgbImage, Rgba16Image, RgbaImage,
+};
+use crate::color::{self, IntoColor};
+use crate::error::{ImageError, ImageResult, ParameterError, ParameterErrorKind};
+use crate::flat::FlatSamples;
+use crate::image::{
+ GenericImage, GenericImageView, ImageDecoder, ImageEncoder, ImageFormat, ImageOutputFormat,
+};
+use crate::imageops;
+use crate::io::free_functions;
+use crate::math::resize_dimensions;
+use crate::traits::Pixel;
+use crate::{image, Luma, LumaA};
+use crate::{Rgb32FImage, Rgba32FImage};
+
+/// A Dynamic Image
+///
+/// This represents a _matrix_ of _pixels_ which are _convertible_ from and to an _RGBA_
+/// representation. More variants that adhere to these principles may get added in the future, in
+/// particular to cover other combinations typically used.
+///
+/// # Usage
+///
+/// This type can act as a converter between specific `ImageBuffer` instances.
+///
+/// ```
+/// use image::{DynamicImage, GrayImage, RgbImage};
+///
+/// let rgb: RgbImage = RgbImage::new(10, 10);
+/// let luma: GrayImage = DynamicImage::ImageRgb8(rgb).into_luma8();
+/// ```
+///
+/// # Design
+///
+/// There is no goal to provide an all-encompassing type with all possible memory layouts. This
+/// would hardly be feasible as a simple enum, due to the sheer number of combinations of channel
+/// kinds, channel order, and bit depth. Rather, this type provides an opinionated selection with
+/// normalized channel order which can store common pixel values without loss.
+#[derive(Clone, Debug, PartialEq)]
+#[non_exhaustive]
+pub enum DynamicImage {
+ /// Each pixel in this image is 8-bit Luma
+ ImageLuma8(GrayImage),
+
+ /// Each pixel in this image is 8-bit Luma with alpha
+ ImageLumaA8(GrayAlphaImage),
+
+ /// Each pixel in this image is 8-bit Rgb
+ ImageRgb8(RgbImage),
+
+ /// Each pixel in this image is 8-bit Rgb with alpha
+ ImageRgba8(RgbaImage),
+
+ /// Each pixel in this image is 16-bit Luma
+ ImageLuma16(Gray16Image),
+
+ /// Each pixel in this image is 16-bit Luma with alpha
+ ImageLumaA16(GrayAlpha16Image),
+
+ /// Each pixel in this image is 16-bit Rgb
+ ImageRgb16(Rgb16Image),
+
+ /// Each pixel in this image is 16-bit Rgb with alpha
+ ImageRgba16(Rgba16Image),
+
+ /// Each pixel in this image is 32-bit float Rgb
+ ImageRgb32F(Rgb32FImage),
+
+ /// Each pixel in this image is 32-bit float Rgb with alpha
+ ImageRgba32F(Rgba32FImage),
+}
+
+macro_rules! dynamic_map(
+ ($dynimage: expr, $image: pat => $action: expr) => ({
+ use DynamicImage::*;
+ match $dynimage {
+ ImageLuma8($image) => ImageLuma8($action),
+ ImageLumaA8($image) => ImageLumaA8($action),
+ ImageRgb8($image) => ImageRgb8($action),
+ ImageRgba8($image) => ImageRgba8($action),
+ ImageLuma16($image) => ImageLuma16($action),
+ ImageLumaA16($image) => ImageLumaA16($action),
+ ImageRgb16($image) => ImageRgb16($action),
+ ImageRgba16($image) => ImageRgba16($action),
+ ImageRgb32F($image) => ImageRgb32F($action),
+ ImageRgba32F($image) => ImageRgba32F($action),
+ }
+ });
+
+ ($dynimage: expr, |$image: pat| $action: expr) => (
+ match $dynimage {
+ DynamicImage::ImageLuma8($image) => $action,
+ DynamicImage::ImageLumaA8($image) => $action,
+ DynamicImage::ImageRgb8($image) => $action,
+ DynamicImage::ImageRgba8($image) => $action,
+ DynamicImage::ImageLuma16($image) => $action,
+ DynamicImage::ImageLumaA16($image) => $action,
+ DynamicImage::ImageRgb16($image) => $action,
+ DynamicImage::ImageRgba16($image) => $action,
+ DynamicImage::ImageRgb32F($image) => $action,
+ DynamicImage::ImageRgba32F($image) => $action,
+ }
+ );
+);
+
+impl DynamicImage {
+ /// Creates a dynamic image backed by a buffer of gray pixels.
+ pub fn new_luma8(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageLuma8(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of gray
+ /// pixels with transparency.
+ pub fn new_luma_a8(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageLumaA8(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of RGB pixels.
+ pub fn new_rgb8(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageRgb8(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of RGBA pixels.
+ pub fn new_rgba8(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageRgba8(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of gray pixels.
+ pub fn new_luma16(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageLuma16(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of gray
+ /// pixels with transparency.
+ pub fn new_luma_a16(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageLumaA16(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of RGB pixels.
+ pub fn new_rgb16(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageRgb16(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of RGBA pixels.
+ pub fn new_rgba16(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageRgba16(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of RGB pixels.
+ pub fn new_rgb32f(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageRgb32F(ImageBuffer::new(w, h))
+ }
+
+ /// Creates a dynamic image backed by a buffer of RGBA pixels.
+ pub fn new_rgba32f(w: u32, h: u32) -> DynamicImage {
+ DynamicImage::ImageRgba32F(ImageBuffer::new(w, h))
+ }
+
+ /// Decodes an encoded image into a dynamic image.
+ pub fn from_decoder<'a>(decoder: impl ImageDecoder<'a>) -> ImageResult<Self> {
+ decoder_to_image(decoder)
+ }
+
+ /// Returns a copy of this image as an RGB image.
+ pub fn to_rgb8(&self) -> RgbImage {
+ dynamic_map!(*self, |ref p| p.convert())
+ }
+
+ /// Returns a copy of this image as an RGB image.
+ pub fn to_rgb16(&self) -> Rgb16Image {
+ dynamic_map!(*self, |ref p| p.convert())
+ }
+
+ /// Returns a copy of this image as an RGB image.
+ pub fn to_rgb32f(&self) -> Rgb32FImage {
+ dynamic_map!(*self, |ref p| p.convert())
+ }
+
+ /// Returns a copy of this image as an RGBA image.
+ pub fn to_rgba8(&self) -> RgbaImage {
+ dynamic_map!(*self, |ref p| p.convert())
+ }
+
+ /// Returns a copy of this image as an RGBA image.
+ pub fn to_rgba16(&self) -> Rgba16Image {
+ dynamic_map!(*self, |ref p| p.convert())
+ }
+
+ /// Returns a copy of this image as an RGBA image.
+ pub fn to_rgba32f(&self) -> Rgba32FImage {
+ dynamic_map!(*self, |ref p| p.convert())
+ }
+
+ /// Returns a copy of this image as a Luma image.
+ pub fn to_luma8(&self) -> GrayImage {
+ dynamic_map!(*self, |ref p| p.convert())
+ }
+
+ /// Returns a copy of this image as a Luma image.
+ pub fn to_luma16(&self) -> Gray16Image {
+ dynamic_map!(*self, |ref p| p.convert())
+ }
+
+ /// Returns a copy of this image as a Luma image.
+ pub fn to_luma32f(&self) -> ImageBuffer<Luma<f32>, Vec<f32>> {
+ dynamic_map!(*self, |ref p| p.convert())
+ }
+
+ /// Returns a copy of this image as a LumaA image.
+ pub fn to_luma_alpha8(&self) -> GrayAlphaImage {
+ dynamic_map!(*self, |ref p| p.convert())
+ }
+
+ /// Returns a copy of this image as a LumaA image.
+ pub fn to_luma_alpha16(&self) -> GrayAlpha16Image {
+ dynamic_map!(*self, |ref p| p.convert())
+ }
+
+ /// Returns a copy of this image as a LumaA image.
+ pub fn to_luma_alpha32f(&self) -> ImageBuffer<LumaA<f32>, Vec<f32>> {
+ dynamic_map!(*self, |ref p| p.convert())
+ }
+
+ /// Consume the image and returns a RGB image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_rgb8(self) -> RgbImage {
+ match self {
+ DynamicImage::ImageRgb8(x) => x,
+ x => x.to_rgb8(),
+ }
+ }
+
+ /// Consume the image and returns a RGB image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_rgb16(self) -> Rgb16Image {
+ match self {
+ DynamicImage::ImageRgb16(x) => x,
+ x => x.to_rgb16(),
+ }
+ }
+
+ /// Consume the image and returns a RGB image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_rgb32f(self) -> Rgb32FImage {
+ match self {
+ DynamicImage::ImageRgb32F(x) => x,
+ x => x.to_rgb32f(),
+ }
+ }
+
+ /// Consume the image and returns a RGBA image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_rgba8(self) -> RgbaImage {
+ match self {
+ DynamicImage::ImageRgba8(x) => x,
+ x => x.to_rgba8(),
+ }
+ }
+
+ /// Consume the image and returns a RGBA image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_rgba16(self) -> Rgba16Image {
+ match self {
+ DynamicImage::ImageRgba16(x) => x,
+ x => x.to_rgba16(),
+ }
+ }
+
+ /// Consume the image and returns a RGBA image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_rgba32f(self) -> Rgba32FImage {
+ match self {
+ DynamicImage::ImageRgba32F(x) => x,
+ x => x.to_rgba32f(),
+ }
+ }
+
+ /// Consume the image and returns a Luma image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_luma8(self) -> GrayImage {
+ match self {
+ DynamicImage::ImageLuma8(x) => x,
+ x => x.to_luma8(),
+ }
+ }
+
+ /// Consume the image and returns a Luma image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_luma16(self) -> Gray16Image {
+ match self {
+ DynamicImage::ImageLuma16(x) => x,
+ x => x.to_luma16(),
+ }
+ }
+
+ /// Consume the image and returns a LumaA image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_luma_alpha8(self) -> GrayAlphaImage {
+ match self {
+ DynamicImage::ImageLumaA8(x) => x,
+ x => x.to_luma_alpha8(),
+ }
+ }
+
+ /// Consume the image and returns a LumaA image.
+ ///
+ /// If the image was already the correct format, it is returned as is.
+ /// Otherwise, a copy is created.
+ pub fn into_luma_alpha16(self) -> GrayAlpha16Image {
+ match self {
+ DynamicImage::ImageLumaA16(x) => x,
+ x => x.to_luma_alpha16(),
+ }
+ }
+
+ /// Return a cut-out of this image delimited by the bounding rectangle.
+ ///
+ /// Note: this method does *not* modify the object,
+ /// and its signature will be replaced with `crop_imm()`'s in the 0.24 release
+ pub fn crop(&mut self, x: u32, y: u32, width: u32, height: u32) -> DynamicImage {
+ dynamic_map!(*self, ref mut p => imageops::crop(p, x, y, width, height).to_image())
+ }
+
+ /// Return a cut-out of this image delimited by the bounding rectangle.
+ pub fn crop_imm(&self, x: u32, y: u32, width: u32, height: u32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::crop_imm(p, x, y, width, height).to_image())
+ }
+
+ /// Return a reference to an 8bit RGB image
+ pub fn as_rgb8(&self) -> Option<&RgbImage> {
+ match *self {
+ DynamicImage::ImageRgb8(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 8bit RGB image
+ pub fn as_mut_rgb8(&mut self) -> Option<&mut RgbImage> {
+ match *self {
+ DynamicImage::ImageRgb8(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 8bit RGBA image
+ pub fn as_rgba8(&self) -> Option<&RgbaImage> {
+ match *self {
+ DynamicImage::ImageRgba8(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 8bit RGBA image
+ pub fn as_mut_rgba8(&mut self) -> Option<&mut RgbaImage> {
+ match *self {
+ DynamicImage::ImageRgba8(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 8bit Grayscale image
+ pub fn as_luma8(&self) -> Option<&GrayImage> {
+ match *self {
+ DynamicImage::ImageLuma8(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 8bit Grayscale image
+ pub fn as_mut_luma8(&mut self) -> Option<&mut GrayImage> {
+ match *self {
+ DynamicImage::ImageLuma8(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 8bit Grayscale image with an alpha channel
+ pub fn as_luma_alpha8(&self) -> Option<&GrayAlphaImage> {
+ match *self {
+ DynamicImage::ImageLumaA8(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 8bit Grayscale image with an alpha channel
+ pub fn as_mut_luma_alpha8(&mut self) -> Option<&mut GrayAlphaImage> {
+ match *self {
+ DynamicImage::ImageLumaA8(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 16bit RGB image
+ pub fn as_rgb16(&self) -> Option<&Rgb16Image> {
+ match *self {
+ DynamicImage::ImageRgb16(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 16bit RGB image
+ pub fn as_mut_rgb16(&mut self) -> Option<&mut Rgb16Image> {
+ match *self {
+ DynamicImage::ImageRgb16(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 16bit RGBA image
+ pub fn as_rgba16(&self) -> Option<&Rgba16Image> {
+ match *self {
+ DynamicImage::ImageRgba16(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 16bit RGBA image
+ pub fn as_mut_rgba16(&mut self) -> Option<&mut Rgba16Image> {
+ match *self {
+ DynamicImage::ImageRgba16(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 32bit RGB image
+ pub fn as_rgb32f(&self) -> Option<&Rgb32FImage> {
+ match *self {
+ DynamicImage::ImageRgb32F(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 32bit RGB image
+ pub fn as_mut_rgb32f(&mut self) -> Option<&mut Rgb32FImage> {
+ match *self {
+ DynamicImage::ImageRgb32F(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 32bit RGBA image
+ pub fn as_rgba32f(&self) -> Option<&Rgba32FImage> {
+ match *self {
+ DynamicImage::ImageRgba32F(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 16bit RGBA image
+ pub fn as_mut_rgba32f(&mut self) -> Option<&mut Rgba32FImage> {
+ match *self {
+ DynamicImage::ImageRgba32F(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 16bit Grayscale image
+ pub fn as_luma16(&self) -> Option<&Gray16Image> {
+ match *self {
+ DynamicImage::ImageLuma16(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 16bit Grayscale image
+ pub fn as_mut_luma16(&mut self) -> Option<&mut Gray16Image> {
+ match *self {
+ DynamicImage::ImageLuma16(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a reference to an 16bit Grayscale image with an alpha channel
+ pub fn as_luma_alpha16(&self) -> Option<&GrayAlpha16Image> {
+ match *self {
+ DynamicImage::ImageLumaA16(ref p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a mutable reference to an 16bit Grayscale image with an alpha channel
+ pub fn as_mut_luma_alpha16(&mut self) -> Option<&mut GrayAlpha16Image> {
+ match *self {
+ DynamicImage::ImageLumaA16(ref mut p) => Some(p),
+ _ => None,
+ }
+ }
+
+ /// Return a view on the raw sample buffer for 8 bit per channel images.
+ pub fn as_flat_samples_u8(&self) -> Option<FlatSamples<&[u8]>> {
+ match *self {
+ DynamicImage::ImageLuma8(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageLumaA8(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageRgb8(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageRgba8(ref p) => Some(p.as_flat_samples()),
+ _ => None,
+ }
+ }
+
+ /// Return a view on the raw sample buffer for 16 bit per channel images.
+ pub fn as_flat_samples_u16(&self) -> Option<FlatSamples<&[u16]>> {
+ match *self {
+ DynamicImage::ImageLuma16(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageLumaA16(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageRgb16(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageRgba16(ref p) => Some(p.as_flat_samples()),
+ _ => None,
+ }
+ }
+
+ /// Return a view on the raw sample buffer for 32bit per channel images.
+ pub fn as_flat_samples_f32(&self) -> Option<FlatSamples<&[f32]>> {
+ match *self {
+ DynamicImage::ImageRgb32F(ref p) => Some(p.as_flat_samples()),
+ DynamicImage::ImageRgba32F(ref p) => Some(p.as_flat_samples()),
+ _ => None,
+ }
+ }
+
+ /// Return this image's pixels as a native endian byte slice.
+ pub fn as_bytes(&self) -> &[u8] {
+ // we can do this because every variant contains an `ImageBuffer<_, Vec<_>>`
+ dynamic_map!(*self, |ref image_buffer| bytemuck::cast_slice(
+ image_buffer.as_raw().as_ref()
+ ))
+ }
+
+ // TODO: choose a name under which to expose?
+ fn inner_bytes(&self) -> &[u8] {
+ // we can do this because every variant contains an `ImageBuffer<_, Vec<_>>`
+ dynamic_map!(*self, |ref image_buffer| bytemuck::cast_slice(
+ image_buffer.inner_pixels()
+ ))
+ }
+
+ /// Return this image's pixels as a byte vector. If the `ImageBuffer`
+ /// container is `Vec<u8>`, this operation is free. Otherwise, a copy
+ /// is returned.
+ pub fn into_bytes(self) -> Vec<u8> {
+ // we can do this because every variant contains an `ImageBuffer<_, Vec<_>>`
+ dynamic_map!(self, |image_buffer| {
+ match bytemuck::allocation::try_cast_vec(image_buffer.into_raw()) {
+ Ok(vec) => vec,
+ Err((_, vec)) => {
+ // Fallback: vector requires an exact alignment and size match
+ // Reuse of the allocation as done in the Ok branch only works if the
+ // underlying container is exactly Vec<u8> (or compatible but that's the only
+ // alternative at the time of writing).
+ // In all other cases we must allocate a new vector with the 'same' contents.
+ bytemuck::cast_slice(&vec).to_owned()
+ }
+ }
+ })
+ }
+
+ /// Return a copy of this image's pixels as a byte vector.
+ /// Deprecated, because it does nothing but hide an expensive clone operation.
+ #[deprecated(
+ since = "0.24.0",
+ note = "use `image.into_bytes()` or `image.as_bytes().to_vec()` instead"
+ )]
+ pub fn to_bytes(&self) -> Vec<u8> {
+ self.as_bytes().to_vec()
+ }
+
+ /// Return this image's color type.
+ pub fn color(&self) -> color::ColorType {
+ match *self {
+ DynamicImage::ImageLuma8(_) => color::ColorType::L8,
+ DynamicImage::ImageLumaA8(_) => color::ColorType::La8,
+ DynamicImage::ImageRgb8(_) => color::ColorType::Rgb8,
+ DynamicImage::ImageRgba8(_) => color::ColorType::Rgba8,
+ DynamicImage::ImageLuma16(_) => color::ColorType::L16,
+ DynamicImage::ImageLumaA16(_) => color::ColorType::La16,
+ DynamicImage::ImageRgb16(_) => color::ColorType::Rgb16,
+ DynamicImage::ImageRgba16(_) => color::ColorType::Rgba16,
+ DynamicImage::ImageRgb32F(_) => color::ColorType::Rgb32F,
+ DynamicImage::ImageRgba32F(_) => color::ColorType::Rgba32F,
+ }
+ }
+
+ /// Returns the width of the underlying image
+ pub fn width(&self) -> u32 {
+ dynamic_map!(*self, |ref p| { p.width() })
+ }
+
+ /// Returns the height of the underlying image
+ pub fn height(&self) -> u32 {
+ dynamic_map!(*self, |ref p| { p.height() })
+ }
+
+ /// Return a grayscale version of this image.
+ /// Returns `Luma` images in most cases. However, for `f32` images,
+ /// this will return a grayscale `Rgb/Rgba` image instead.
+ pub fn grayscale(&self) -> DynamicImage {
+ match *self {
+ DynamicImage::ImageLuma8(ref p) => DynamicImage::ImageLuma8(p.clone()),
+ DynamicImage::ImageLumaA8(ref p) => {
+ DynamicImage::ImageLumaA8(imageops::grayscale_alpha(p))
+ }
+ DynamicImage::ImageRgb8(ref p) => DynamicImage::ImageLuma8(imageops::grayscale(p)),
+ DynamicImage::ImageRgba8(ref p) => {
+ DynamicImage::ImageLumaA8(imageops::grayscale_alpha(p))
+ }
+ DynamicImage::ImageLuma16(ref p) => DynamicImage::ImageLuma16(p.clone()),
+ DynamicImage::ImageLumaA16(ref p) => {
+ DynamicImage::ImageLumaA16(imageops::grayscale_alpha(p))
+ }
+ DynamicImage::ImageRgb16(ref p) => DynamicImage::ImageLuma16(imageops::grayscale(p)),
+ DynamicImage::ImageRgba16(ref p) => {
+ DynamicImage::ImageLumaA16(imageops::grayscale_alpha(p))
+ }
+ DynamicImage::ImageRgb32F(ref p) => {
+ DynamicImage::ImageRgb32F(imageops::grayscale_with_type(p))
+ }
+ DynamicImage::ImageRgba32F(ref p) => {
+ DynamicImage::ImageRgba32F(imageops::grayscale_with_type_alpha(p))
+ }
+ }
+ }
+
+ /// Invert the colors of this image.
+ /// This method operates inplace.
+ pub fn invert(&mut self) {
+ dynamic_map!(*self, |ref mut p| imageops::invert(p))
+ }
+
+ /// Resize this image using the specified filter algorithm.
+ /// Returns a new image. The image's aspect ratio is preserved.
+ /// The image is scaled to the maximum possible size that fits
+ /// within the bounds specified by `nwidth` and `nheight`.
+ pub fn resize(&self, nwidth: u32, nheight: u32, filter: imageops::FilterType) -> DynamicImage {
+ if (nwidth, nheight) == self.dimensions() {
+ return self.clone();
+ }
+ let (width2, height2) =
+ resize_dimensions(self.width(), self.height(), nwidth, nheight, false);
+
+ self.resize_exact(width2, height2, filter)
+ }
+
+ /// Resize this image using the specified filter algorithm.
+ /// Returns a new image. Does not preserve aspect ratio.
+ /// `nwidth` and `nheight` are the new image's dimensions
+ pub fn resize_exact(
+ &self,
+ nwidth: u32,
+ nheight: u32,
+ filter: imageops::FilterType,
+ ) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::resize(p, nwidth, nheight, filter))
+ }
+
+ /// Scale this image down to fit within a specific size.
+ /// Returns a new image. The image's aspect ratio is preserved.
+ /// The image is scaled to the maximum possible size that fits
+ /// within the bounds specified by `nwidth` and `nheight`.
+ ///
+ /// This method uses a fast integer algorithm where each source
+ /// pixel contributes to exactly one target pixel.
+ /// May give aliasing artifacts if new size is close to old size.
+ pub fn thumbnail(&self, nwidth: u32, nheight: u32) -> DynamicImage {
+ let (width2, height2) =
+ resize_dimensions(self.width(), self.height(), nwidth, nheight, false);
+ self.thumbnail_exact(width2, height2)
+ }
+
+ /// Scale this image down to a specific size.
+ /// Returns a new image. Does not preserve aspect ratio.
+ /// `nwidth` and `nheight` are the new image's dimensions.
+ /// This method uses a fast integer algorithm where each source
+ /// pixel contributes to exactly one target pixel.
+ /// May give aliasing artifacts if new size is close to old size.
+ pub fn thumbnail_exact(&self, nwidth: u32, nheight: u32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::thumbnail(p, nwidth, nheight))
+ }
+
+ /// Resize this image using the specified filter algorithm.
+ /// Returns a new image. The image's aspect ratio is preserved.
+ /// The image is scaled to the maximum possible size that fits
+ /// within the larger (relative to aspect ratio) of the bounds
+ /// specified by `nwidth` and `nheight`, then cropped to
+ /// fit within the other bound.
+ pub fn resize_to_fill(
+ &self,
+ nwidth: u32,
+ nheight: u32,
+ filter: imageops::FilterType,
+ ) -> DynamicImage {
+ let (width2, height2) =
+ resize_dimensions(self.width(), self.height(), nwidth, nheight, true);
+
+ let mut intermediate = self.resize_exact(width2, height2, filter);
+ let (iwidth, iheight) = intermediate.dimensions();
+ let ratio = u64::from(iwidth) * u64::from(nheight);
+ let nratio = u64::from(nwidth) * u64::from(iheight);
+
+ if nratio > ratio {
+ intermediate.crop(0, (iheight - nheight) / 2, nwidth, nheight)
+ } else {
+ intermediate.crop((iwidth - nwidth) / 2, 0, nwidth, nheight)
+ }
+ }
+
+ /// Performs a Gaussian blur on this image.
+ /// `sigma` is a measure of how much to blur by.
+ pub fn blur(&self, sigma: f32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::blur(p, sigma))
+ }
+
+ /// Performs an unsharpen mask on this image.
+ /// `sigma` is the amount to blur the image by.
+ /// `threshold` is a control of how much to sharpen.
+ ///
+ /// See <https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking>
+ pub fn unsharpen(&self, sigma: f32, threshold: i32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::unsharpen(p, sigma, threshold))
+ }
+
+ /// Filters this image with the specified 3x3 kernel.
+ pub fn filter3x3(&self, kernel: &[f32]) -> DynamicImage {
+ if kernel.len() != 9 {
+ panic!("filter must be 3 x 3")
+ }
+
+ dynamic_map!(*self, ref p => imageops::filter3x3(p, kernel))
+ }
+
+ /// Adjust the contrast of this image.
+ /// `contrast` is the amount to adjust the contrast by.
+ /// Negative values decrease the contrast and positive values increase the contrast.
+ pub fn adjust_contrast(&self, c: f32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::contrast(p, c))
+ }
+
+ /// Brighten the pixels of this image.
+ /// `value` is the amount to brighten each pixel by.
+ /// Negative values decrease the brightness and positive values increase it.
+ pub fn brighten(&self, value: i32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::brighten(p, value))
+ }
+
+ /// Hue rotate the supplied image.
+ /// `value` is the degrees to rotate each pixel by.
+ /// 0 and 360 do nothing, the rest rotates by the given degree value.
+ /// just like the css webkit filter hue-rotate(180)
+ pub fn huerotate(&self, value: i32) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::huerotate(p, value))
+ }
+
+ /// Flip this image vertically
+ pub fn flipv(&self) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::flip_vertical(p))
+ }
+
+ /// Flip this image horizontally
+ pub fn fliph(&self) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::flip_horizontal(p))
+ }
+
+ /// Rotate this image 90 degrees clockwise.
+ pub fn rotate90(&self) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::rotate90(p))
+ }
+
+ /// Rotate this image 180 degrees clockwise.
+ pub fn rotate180(&self) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::rotate180(p))
+ }
+
+ /// Rotate this image 270 degrees clockwise.
+ pub fn rotate270(&self) -> DynamicImage {
+ dynamic_map!(*self, ref p => imageops::rotate270(p))
+ }
+
+ /// Encode this image and write it to ```w```.
+ ///
+ /// Assumes the writer is buffered. In most cases,
+ /// you should wrap your writer in a `BufWriter` for best performance.
+ pub fn write_to<W: Write + Seek, F: Into<ImageOutputFormat>>(
+ &self,
+ w: &mut W,
+ format: F,
+ ) -> ImageResult<()> {
+ #[allow(unused_variables)]
+ // When no features are supported
+ let w = w;
+ #[allow(unused_variables, unused_mut)]
+ let mut bytes = self.inner_bytes();
+ #[allow(unused_variables)]
+ let (width, height) = self.dimensions();
+ #[allow(unused_variables, unused_mut)]
+ let mut color = self.color();
+ let format = format.into();
+
+ // TODO do not repeat this match statement across the crate
+
+ #[allow(deprecated)]
+ match format {
+ #[cfg(feature = "png")]
+ image::ImageOutputFormat::Png => {
+ let p = png::PngEncoder::new(w);
+ p.write_image(bytes, width, height, color)?;
+ Ok(())
+ }
+
+ #[cfg(feature = "pnm")]
+ image::ImageOutputFormat::Pnm(subtype) => {
+ let p = pnm::PnmEncoder::new(w).with_subtype(subtype);
+ p.write_image(bytes, width, height, color)?;
+ Ok(())
+ }
+
+ #[cfg(feature = "gif")]
+ image::ImageOutputFormat::Gif => {
+ let mut g = gif::GifEncoder::new(w);
+ g.encode_frame(crate::animation::Frame::new(self.to_rgba8()))?;
+ Ok(())
+ }
+
+ format => write_buffer_with_format(w, bytes, width, height, color, format),
+ }
+ }
+
+ /// Encode this image with the provided encoder.
+ pub fn write_with_encoder(&self, encoder: impl ImageEncoder) -> ImageResult<()> {
+ dynamic_map!(self, |ref p| p.write_with_encoder(encoder))
+ }
+
+ /// Saves the buffer to a file at the path specified.
+ ///
+ /// The image format is derived from the file extension.
+ pub fn save<Q>(&self, path: Q) -> ImageResult<()>
+ where
+ Q: AsRef<Path>,
+ {
+ dynamic_map!(*self, |ref p| p.save(path))
+ }
+
+ /// Saves the buffer to a file at the specified path in
+ /// the specified format.
+ ///
+ /// See [`save_buffer_with_format`](fn.save_buffer_with_format.html) for
+ /// supported types.
+ pub fn save_with_format<Q>(&self, path: Q, format: ImageFormat) -> ImageResult<()>
+ where
+ Q: AsRef<Path>,
+ {
+ dynamic_map!(*self, |ref p| p.save_with_format(path, format))
+ }
+}
+
+impl From<GrayImage> for DynamicImage {
+ fn from(image: GrayImage) -> Self {
+ DynamicImage::ImageLuma8(image)
+ }
+}
+
+impl From<GrayAlphaImage> for DynamicImage {
+ fn from(image: GrayAlphaImage) -> Self {
+ DynamicImage::ImageLumaA8(image)
+ }
+}
+
+impl From<RgbImage> for DynamicImage {
+ fn from(image: RgbImage) -> Self {
+ DynamicImage::ImageRgb8(image)
+ }
+}
+
+impl From<RgbaImage> for DynamicImage {
+ fn from(image: RgbaImage) -> Self {
+ DynamicImage::ImageRgba8(image)
+ }
+}
+
+impl From<Gray16Image> for DynamicImage {
+ fn from(image: Gray16Image) -> Self {
+ DynamicImage::ImageLuma16(image)
+ }
+}
+
+impl From<GrayAlpha16Image> for DynamicImage {
+ fn from(image: GrayAlpha16Image) -> Self {
+ DynamicImage::ImageLumaA16(image)
+ }
+}
+
+impl From<Rgb16Image> for DynamicImage {
+ fn from(image: Rgb16Image) -> Self {
+ DynamicImage::ImageRgb16(image)
+ }
+}
+
+impl From<Rgba16Image> for DynamicImage {
+ fn from(image: Rgba16Image) -> Self {
+ DynamicImage::ImageRgba16(image)
+ }
+}
+
+impl From<Rgb32FImage> for DynamicImage {
+ fn from(image: Rgb32FImage) -> Self {
+ DynamicImage::ImageRgb32F(image)
+ }
+}
+
+impl From<Rgba32FImage> for DynamicImage {
+ fn from(image: Rgba32FImage) -> Self {
+ DynamicImage::ImageRgba32F(image)
+ }
+}
+
+impl From<ImageBuffer<Luma<f32>, Vec<f32>>> for DynamicImage {
+ fn from(image: ImageBuffer<Luma<f32>, Vec<f32>>) -> Self {
+ DynamicImage::ImageRgb32F(image.convert())
+ }
+}
+
+impl From<ImageBuffer<LumaA<f32>, Vec<f32>>> for DynamicImage {
+ fn from(image: ImageBuffer<LumaA<f32>, Vec<f32>>) -> Self {
+ DynamicImage::ImageRgba32F(image.convert())
+ }
+}
+
+#[allow(deprecated)]
+impl GenericImageView for DynamicImage {
+ type Pixel = color::Rgba<u8>; // TODO use f32 as default for best precision and unbounded color?
+
+ fn dimensions(&self) -> (u32, u32) {
+ dynamic_map!(*self, |ref p| p.dimensions())
+ }
+
+ fn bounds(&self) -> (u32, u32, u32, u32) {
+ dynamic_map!(*self, |ref p| p.bounds())
+ }
+
+ fn get_pixel(&self, x: u32, y: u32) -> color::Rgba<u8> {
+ dynamic_map!(*self, |ref p| p.get_pixel(x, y).to_rgba().into_color())
+ }
+}
+
+#[allow(deprecated)]
+impl GenericImage for DynamicImage {
+ fn put_pixel(&mut self, x: u32, y: u32, pixel: color::Rgba<u8>) {
+ match *self {
+ DynamicImage::ImageLuma8(ref mut p) => p.put_pixel(x, y, pixel.to_luma()),
+ DynamicImage::ImageLumaA8(ref mut p) => p.put_pixel(x, y, pixel.to_luma_alpha()),
+ DynamicImage::ImageRgb8(ref mut p) => p.put_pixel(x, y, pixel.to_rgb()),
+ DynamicImage::ImageRgba8(ref mut p) => p.put_pixel(x, y, pixel),
+ DynamicImage::ImageLuma16(ref mut p) => p.put_pixel(x, y, pixel.to_luma().into_color()),
+ DynamicImage::ImageLumaA16(ref mut p) => {
+ p.put_pixel(x, y, pixel.to_luma_alpha().into_color())
+ }
+ DynamicImage::ImageRgb16(ref mut p) => p.put_pixel(x, y, pixel.to_rgb().into_color()),
+ DynamicImage::ImageRgba16(ref mut p) => p.put_pixel(x, y, pixel.into_color()),
+ DynamicImage::ImageRgb32F(ref mut p) => p.put_pixel(x, y, pixel.to_rgb().into_color()),
+ DynamicImage::ImageRgba32F(ref mut p) => p.put_pixel(x, y, pixel.into_color()),
+ }
+ }
+
+ fn blend_pixel(&mut self, x: u32, y: u32, pixel: color::Rgba<u8>) {
+ match *self {
+ DynamicImage::ImageLuma8(ref mut p) => p.blend_pixel(x, y, pixel.to_luma()),
+ DynamicImage::ImageLumaA8(ref mut p) => p.blend_pixel(x, y, pixel.to_luma_alpha()),
+ DynamicImage::ImageRgb8(ref mut p) => p.blend_pixel(x, y, pixel.to_rgb()),
+ DynamicImage::ImageRgba8(ref mut p) => p.blend_pixel(x, y, pixel),
+ DynamicImage::ImageLuma16(ref mut p) => {
+ p.blend_pixel(x, y, pixel.to_luma().into_color())
+ }
+ DynamicImage::ImageLumaA16(ref mut p) => {
+ p.blend_pixel(x, y, pixel.to_luma_alpha().into_color())
+ }
+ DynamicImage::ImageRgb16(ref mut p) => p.blend_pixel(x, y, pixel.to_rgb().into_color()),
+ DynamicImage::ImageRgba16(ref mut p) => p.blend_pixel(x, y, pixel.into_color()),
+ DynamicImage::ImageRgb32F(ref mut p) => {
+ p.blend_pixel(x, y, pixel.to_rgb().into_color())
+ }
+ DynamicImage::ImageRgba32F(ref mut p) => p.blend_pixel(x, y, pixel.into_color()),
+ }
+ }
+
+ /// Do not use is function: It is unimplemented!
+ fn get_pixel_mut(&mut self, _: u32, _: u32) -> &mut color::Rgba<u8> {
+ unimplemented!()
+ }
+}
+
+impl Default for DynamicImage {
+ fn default() -> Self {
+ Self::ImageRgba8(Default::default())
+ }
+}
+
+/// Decodes an image and stores it into a dynamic image
+fn decoder_to_image<'a, I: ImageDecoder<'a>>(decoder: I) -> ImageResult<DynamicImage> {
+ let (w, h) = decoder.dimensions();
+ let color_type = decoder.color_type();
+
+ let image = match color_type {
+ color::ColorType::Rgb8 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageRgb8)
+ }
+
+ color::ColorType::Rgba8 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageRgba8)
+ }
+
+ color::ColorType::L8 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageLuma8)
+ }
+
+ color::ColorType::La8 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageLumaA8)
+ }
+
+ color::ColorType::Rgb16 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageRgb16)
+ }
+
+ color::ColorType::Rgba16 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageRgba16)
+ }
+
+ color::ColorType::Rgb32F => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageRgb32F)
+ }
+
+ color::ColorType::Rgba32F => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageRgba32F)
+ }
+
+ color::ColorType::L16 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageLuma16)
+ }
+
+ color::ColorType::La16 => {
+ let buf = image::decoder_to_vec(decoder)?;
+ ImageBuffer::from_raw(w, h, buf).map(DynamicImage::ImageLumaA16)
+ }
+ };
+
+ match image {
+ Some(image) => Ok(image),
+ None => Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ ))),
+ }
+}
+
+/// Open the image located at the path specified.
+/// The image's format is determined from the path's file extension.
+///
+/// Try [`io::Reader`] for more advanced uses, including guessing the format based on the file's
+/// content before its path.
+///
+/// [`io::Reader`]: io/struct.Reader.html
+pub fn open<P>(path: P) -> ImageResult<DynamicImage>
+where
+ P: AsRef<Path>,
+{
+ // thin wrapper function to strip generics before calling open_impl
+ free_functions::open_impl(path.as_ref())
+}
+
+/// Read a tuple containing the (width, height) of the image located at the specified path.
+/// This is faster than fully loading the image and then getting its dimensions.
+///
+/// Try [`io::Reader`] for more advanced uses, including guessing the format based on the file's
+/// content before its path or manually supplying the format.
+///
+/// [`io::Reader`]: io/struct.Reader.html
+pub fn image_dimensions<P>(path: P) -> ImageResult<(u32, u32)>
+where
+ P: AsRef<Path>,
+{
+ // thin wrapper function to strip generics before calling open_impl
+ free_functions::image_dimensions_impl(path.as_ref())
+}
+
+/// Saves the supplied buffer to a file at the path specified.
+///
+/// The image format is derived from the file extension. The buffer is assumed to have
+/// the correct format according to the specified color type.
+///
+/// This will lead to corrupted files if the buffer contains malformed data. Currently only
+/// jpeg, png, ico, pnm, bmp, exr and tiff files are supported.
+pub fn save_buffer<P>(
+ path: P,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color: color::ColorType,
+) -> ImageResult<()>
+where
+ P: AsRef<Path>,
+{
+ // thin wrapper function to strip generics before calling save_buffer_impl
+ free_functions::save_buffer_impl(path.as_ref(), buf, width, height, color)
+}
+
+/// Saves the supplied buffer to a file at the path specified
+/// in the specified format.
+///
+/// The buffer is assumed to have the correct format according
+/// to the specified color type.
+/// This will lead to corrupted files if the buffer contains
+/// malformed data. Currently only jpeg, png, ico, bmp, exr and
+/// tiff files are supported.
+pub fn save_buffer_with_format<P>(
+ path: P,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color: color::ColorType,
+ format: ImageFormat,
+) -> ImageResult<()>
+where
+ P: AsRef<Path>,
+{
+ // thin wrapper function to strip generics
+ free_functions::save_buffer_with_format_impl(path.as_ref(), buf, width, height, color, format)
+}
+
+/// Writes the supplied buffer to a writer in the specified format.
+///
+/// The buffer is assumed to have the correct format according
+/// to the specified color type.
+/// This will lead to corrupted writers if the buffer contains
+/// malformed data.
+///
+/// See [`ImageOutputFormat`](enum.ImageOutputFormat.html) for
+/// supported types.
+///
+/// Assumes the writer is buffered. In most cases,
+/// you should wrap your writer in a `BufWriter` for best performance.
+pub fn write_buffer_with_format<W, F>(
+ buffered_writer: &mut W,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color: color::ColorType,
+ format: F,
+) -> ImageResult<()>
+where
+ W: Write + Seek,
+ F: Into<ImageOutputFormat>,
+{
+ // thin wrapper function to strip generics
+ free_functions::write_buffer_impl(buffered_writer, buf, width, height, color, format.into())
+}
+
+/// Create a new image from a byte slice
+///
+/// Makes an educated guess about the image format.
+/// TGA is not supported by this function.
+///
+/// Try [`io::Reader`] for more advanced uses.
+///
+/// [`io::Reader`]: io/struct.Reader.html
+pub fn load_from_memory(buffer: &[u8]) -> ImageResult<DynamicImage> {
+ let format = free_functions::guess_format(buffer)?;
+ load_from_memory_with_format(buffer, format)
+}
+
+/// Create a new image from a byte slice
+///
+/// This is just a simple wrapper that constructs an `std::io::Cursor` around the buffer and then
+/// calls `load` with that reader.
+///
+/// Try [`io::Reader`] for more advanced uses.
+///
+/// [`load`]: fn.load.html
+/// [`io::Reader`]: io/struct.Reader.html
+#[inline(always)]
+pub fn load_from_memory_with_format(buf: &[u8], format: ImageFormat) -> ImageResult<DynamicImage> {
+ let b = io::Cursor::new(buf);
+ free_functions::load(b, format)
+}
+
+#[cfg(test)]
+mod bench {
+ #[cfg(feature = "benchmarks")]
+ use test;
+
+ #[bench]
+ #[cfg(feature = "benchmarks")]
+ fn bench_conversion(b: &mut test::Bencher) {
+ let a = super::DynamicImage::ImageRgb8(crate::ImageBuffer::new(1000, 1000));
+ b.iter(|| a.to_luma8());
+ b.bytes = 1000 * 1000 * 3
+ }
+}
+
+#[cfg(test)]
+mod test {
+ #[test]
+ fn test_empty_file() {
+ assert!(super::load_from_memory(b"").is_err());
+ }
+
+ #[cfg(feature = "jpeg")]
+ #[test]
+ fn image_dimensions() {
+ let im_path = "./tests/images/jpg/progressive/cat.jpg";
+ let dims = super::image_dimensions(im_path).unwrap();
+ assert_eq!(dims, (320, 240));
+ }
+
+ #[cfg(feature = "png")]
+ #[test]
+ fn open_16bpc_png() {
+ let im_path = "./tests/images/png/16bpc/basn6a16.png";
+ let image = super::open(im_path).unwrap();
+ assert_eq!(image.color(), super::color::ColorType::Rgba16);
+ }
+
+ fn test_grayscale(mut img: super::DynamicImage, alpha_discarded: bool) {
+ use crate::image::{GenericImage, GenericImageView};
+ img.put_pixel(0, 0, crate::color::Rgba([255, 0, 0, 100]));
+ let expected_alpha = if alpha_discarded { 255 } else { 100 };
+ assert_eq!(
+ img.grayscale().get_pixel(0, 0),
+ crate::color::Rgba([54, 54, 54, expected_alpha])
+ );
+ }
+
+ fn test_grayscale_alpha_discarded(img: super::DynamicImage) {
+ test_grayscale(img, true);
+ }
+
+ fn test_grayscale_alpha_preserved(img: super::DynamicImage) {
+ test_grayscale(img, false);
+ }
+
+ #[test]
+ fn test_grayscale_luma8() {
+ test_grayscale_alpha_discarded(super::DynamicImage::new_luma8(1, 1));
+ }
+
+ #[test]
+ fn test_grayscale_luma_a8() {
+ test_grayscale_alpha_preserved(super::DynamicImage::new_luma_a8(1, 1));
+ }
+
+ #[test]
+ fn test_grayscale_rgb8() {
+ test_grayscale_alpha_discarded(super::DynamicImage::new_rgb8(1, 1));
+ }
+
+ #[test]
+ fn test_grayscale_rgba8() {
+ test_grayscale_alpha_preserved(super::DynamicImage::new_rgba8(1, 1));
+ }
+
+ #[test]
+ fn test_grayscale_luma16() {
+ test_grayscale_alpha_discarded(super::DynamicImage::new_luma16(1, 1));
+ }
+
+ #[test]
+ fn test_grayscale_luma_a16() {
+ test_grayscale_alpha_preserved(super::DynamicImage::new_luma_a16(1, 1));
+ }
+
+ #[test]
+ fn test_grayscale_rgb16() {
+ test_grayscale_alpha_discarded(super::DynamicImage::new_rgb16(1, 1));
+ }
+
+ #[test]
+ fn test_grayscale_rgba16() {
+ test_grayscale_alpha_preserved(super::DynamicImage::new_rgba16(1, 1));
+ }
+
+ #[test]
+ fn test_grayscale_rgb32f() {
+ test_grayscale_alpha_discarded(super::DynamicImage::new_rgb32f(1, 1));
+ }
+
+ #[test]
+ fn test_grayscale_rgba32f() {
+ test_grayscale_alpha_preserved(super::DynamicImage::new_rgba32f(1, 1));
+ }
+
+ #[test]
+ fn test_dynamic_image_default_implementation() {
+ // Test that structs wrapping a DynamicImage are able to auto-derive the Default trait
+ // ensures that DynamicImage implements Default (if it didn't, this would cause a compile error).
+ #[derive(Default)]
+ struct Foo {
+ _image: super::DynamicImage,
+ }
+ }
+
+ #[test]
+ fn test_to_vecu8() {
+ let _ = super::DynamicImage::new_luma8(1, 1).into_bytes();
+ let _ = super::DynamicImage::new_luma16(1, 1).into_bytes();
+ }
+
+ #[test]
+ fn issue_1705_can_turn_16bit_image_into_bytes() {
+ let pixels = vec![65535u16; 64 * 64];
+ let img = super::ImageBuffer::from_vec(64, 64, pixels).unwrap();
+
+ let img = super::DynamicImage::ImageLuma16(img.into());
+ assert!(img.as_luma16().is_some());
+
+ let bytes: Vec<u8> = img.into_bytes();
+ assert_eq!(bytes, vec![0xFF; 64 * 64 * 2]);
+ }
+}
diff --git a/vendor/image/src/error.rs b/vendor/image/src/error.rs
new file mode 100644
index 0000000..07ee275
--- /dev/null
+++ b/vendor/image/src/error.rs
@@ -0,0 +1,506 @@
+//! Contains detailed error representation.
+//!
+//! See the main [`ImageError`] which contains a variant for each specialized error type. The
+//! subtypes used in each variant are opaque by design. They can be roughly inspected through their
+//! respective `kind` methods which work similar to `std::io::Error::kind`.
+//!
+//! The error interface makes it possible to inspect the error of an underlying decoder or encoder,
+//! through the `Error::source` method. Note that this is not part of the stable interface and you
+//! may not rely on a particular error value for a particular operation. This means mainly that
+//! `image` does not promise to remain on a particular version of its underlying decoders but if
+//! you ensure to use the same version of the dependency (or at least of the error type) through
+//! external means then you could inspect the error type in slightly more detail.
+//!
+//! [`ImageError`]: enum.ImageError.html
+
+use std::error::Error;
+use std::{fmt, io};
+
+use crate::color::ExtendedColorType;
+use crate::image::ImageFormat;
+
+/// The generic error type for image operations.
+///
+/// This high level enum allows, by variant matching, a rough separation of concerns between
+/// underlying IO, the caller, format specifications, and the `image` implementation.
+#[derive(Debug)]
+pub enum ImageError {
+ /// An error was encountered while decoding.
+ ///
+ /// This means that the input data did not conform to the specification of some image format,
+ /// or that no format could be determined, or that it did not match format specific
+ /// requirements set by the caller.
+ Decoding(DecodingError),
+
+ /// An error was encountered while encoding.
+ ///
+ /// The input image can not be encoded with the chosen format, for example because the
+ /// specification has no representation for its color space or because a necessary conversion
+ /// is ambiguous. In some cases it might also happen that the dimensions can not be used with
+ /// the format.
+ Encoding(EncodingError),
+
+ /// An error was encountered in input arguments.
+ ///
+ /// This is a catch-all case for strictly internal operations such as scaling, conversions,
+ /// etc. that involve no external format specifications.
+ Parameter(ParameterError),
+
+ /// Completing the operation would have required more resources than allowed.
+ ///
+ /// Errors of this type are limits set by the user or environment, *not* inherent in a specific
+ /// format or operation that was executed.
+ Limits(LimitError),
+
+ /// An operation can not be completed by the chosen abstraction.
+ ///
+ /// This means that it might be possible for the operation to succeed in general but
+ /// * it requires a disabled feature,
+ /// * the implementation does not yet exist, or
+ /// * no abstraction for a lower level could be found.
+ Unsupported(UnsupportedError),
+
+ /// An error occurred while interacting with the environment.
+ IoError(io::Error),
+}
+
+/// The implementation for an operation was not provided.
+///
+/// See the variant [`Unsupported`] for more documentation.
+///
+/// [`Unsupported`]: enum.ImageError.html#variant.Unsupported
+#[derive(Debug)]
+pub struct UnsupportedError {
+ format: ImageFormatHint,
+ kind: UnsupportedErrorKind,
+}
+
+/// Details what feature is not supported.
+#[derive(Clone, Debug, Hash, PartialEq)]
+#[non_exhaustive]
+pub enum UnsupportedErrorKind {
+ /// The required color type can not be handled.
+ Color(ExtendedColorType),
+ /// An image format is not supported.
+ Format(ImageFormatHint),
+ /// Some feature specified by string.
+ /// This is discouraged and is likely to get deprecated (but not removed).
+ GenericFeature(String),
+}
+
+/// An error was encountered while encoding an image.
+///
+/// This is used as an opaque representation for the [`ImageError::Encoding`] variant. See its
+/// documentation for more information.
+///
+/// [`ImageError::Encoding`]: enum.ImageError.html#variant.Encoding
+#[derive(Debug)]
+pub struct EncodingError {
+ format: ImageFormatHint,
+ underlying: Option<Box<dyn Error + Send + Sync>>,
+}
+
+/// An error was encountered in inputs arguments.
+///
+/// This is used as an opaque representation for the [`ImageError::Parameter`] variant. See its
+/// documentation for more information.
+///
+/// [`ImageError::Parameter`]: enum.ImageError.html#variant.Parameter
+#[derive(Debug)]
+pub struct ParameterError {
+ kind: ParameterErrorKind,
+ underlying: Option<Box<dyn Error + Send + Sync>>,
+}
+
+/// Details how a parameter is malformed.
+#[derive(Clone, Debug, Hash, PartialEq)]
+#[non_exhaustive]
+pub enum ParameterErrorKind {
+ /// The dimensions passed are wrong.
+ DimensionMismatch,
+ /// Repeated an operation for which error that could not be cloned was emitted already.
+ FailedAlready,
+ /// A string describing the parameter.
+ /// This is discouraged and is likely to get deprecated (but not removed).
+ Generic(String),
+ /// The end of the image has been reached.
+ NoMoreData,
+}
+
+/// An error was encountered while decoding an image.
+///
+/// This is used as an opaque representation for the [`ImageError::Decoding`] variant. See its
+/// documentation for more information.
+///
+/// [`ImageError::Decoding`]: enum.ImageError.html#variant.Decoding
+#[derive(Debug)]
+pub struct DecodingError {
+ format: ImageFormatHint,
+ underlying: Option<Box<dyn Error + Send + Sync>>,
+}
+
+/// Completing the operation would have required more resources than allowed.
+///
+/// This is used as an opaque representation for the [`ImageError::Limits`] variant. See its
+/// documentation for more information.
+///
+/// [`ImageError::Limits`]: enum.ImageError.html#variant.Limits
+#[derive(Debug)]
+pub struct LimitError {
+ kind: LimitErrorKind,
+ // do we need an underlying error?
+}
+
+/// Indicates the limit that prevented an operation from completing.
+///
+/// Note that this enumeration is not exhaustive and may in the future be extended to provide more
+/// detailed information or to incorporate other resources types.
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+#[non_exhaustive]
+#[allow(missing_copy_implementations)] // Might be non-Copy in the future.
+pub enum LimitErrorKind {
+ /// The resulting image exceed dimension limits in either direction.
+ DimensionError,
+ /// The operation would have performed an allocation larger than allowed.
+ InsufficientMemory,
+ /// The specified strict limits are not supported for this operation
+ Unsupported {
+ /// The given limits
+ limits: crate::io::Limits,
+ /// The supported strict limits
+ supported: crate::io::LimitSupport,
+ },
+}
+
+/// A best effort representation for image formats.
+#[derive(Clone, Debug, Hash, PartialEq)]
+#[non_exhaustive]
+pub enum ImageFormatHint {
+ /// The format is known exactly.
+ Exact(ImageFormat),
+
+ /// The format can be identified by a name.
+ Name(String),
+
+ /// A common path extension for the format is known.
+ PathExtension(std::path::PathBuf),
+
+ /// The format is not known or could not be determined.
+ Unknown,
+}
+
+impl UnsupportedError {
+ /// Create an `UnsupportedError` for an image with details on the unsupported feature.
+ ///
+ /// If the operation was not connected to a particular image format then the hint may be
+ /// `Unknown`.
+ pub fn from_format_and_kind(format: ImageFormatHint, kind: UnsupportedErrorKind) -> Self {
+ UnsupportedError { format, kind }
+ }
+
+ /// Returns the corresponding `UnsupportedErrorKind` of the error.
+ pub fn kind(&self) -> UnsupportedErrorKind {
+ self.kind.clone()
+ }
+
+ /// Returns the image format associated with this error.
+ pub fn format_hint(&self) -> ImageFormatHint {
+ self.format.clone()
+ }
+}
+
+impl DecodingError {
+ /// Create a `DecodingError` that stems from an arbitrary error of an underlying decoder.
+ pub fn new(format: ImageFormatHint, err: impl Into<Box<dyn Error + Send + Sync>>) -> Self {
+ DecodingError {
+ format,
+ underlying: Some(err.into()),
+ }
+ }
+
+ /// Create a `DecodingError` for an image format.
+ ///
+ /// The error will not contain any further information but is very easy to create.
+ pub fn from_format_hint(format: ImageFormatHint) -> Self {
+ DecodingError {
+ format,
+ underlying: None,
+ }
+ }
+
+ /// Returns the image format associated with this error.
+ pub fn format_hint(&self) -> ImageFormatHint {
+ self.format.clone()
+ }
+}
+
+impl EncodingError {
+ /// Create an `EncodingError` that stems from an arbitrary error of an underlying encoder.
+ pub fn new(format: ImageFormatHint, err: impl Into<Box<dyn Error + Send + Sync>>) -> Self {
+ EncodingError {
+ format,
+ underlying: Some(err.into()),
+ }
+ }
+
+ /// Create an `EncodingError` for an image format.
+ ///
+ /// The error will not contain any further information but is very easy to create.
+ pub fn from_format_hint(format: ImageFormatHint) -> Self {
+ EncodingError {
+ format,
+ underlying: None,
+ }
+ }
+
+ /// Return the image format associated with this error.
+ pub fn format_hint(&self) -> ImageFormatHint {
+ self.format.clone()
+ }
+}
+
+impl ParameterError {
+ /// Construct a `ParameterError` directly from a corresponding kind.
+ pub fn from_kind(kind: ParameterErrorKind) -> Self {
+ ParameterError {
+ kind,
+ underlying: None,
+ }
+ }
+
+ /// Returns the corresponding `ParameterErrorKind` of the error.
+ pub fn kind(&self) -> ParameterErrorKind {
+ self.kind.clone()
+ }
+}
+
+impl LimitError {
+ /// Construct a generic `LimitError` directly from a corresponding kind.
+ pub fn from_kind(kind: LimitErrorKind) -> Self {
+ LimitError { kind }
+ }
+
+ /// Returns the corresponding `LimitErrorKind` of the error.
+ pub fn kind(&self) -> LimitErrorKind {
+ self.kind.clone()
+ }
+}
+
+impl From<io::Error> for ImageError {
+ fn from(err: io::Error) -> ImageError {
+ ImageError::IoError(err)
+ }
+}
+
+impl From<ImageFormat> for ImageFormatHint {
+ fn from(format: ImageFormat) -> Self {
+ ImageFormatHint::Exact(format)
+ }
+}
+
+impl From<&'_ std::path::Path> for ImageFormatHint {
+ fn from(path: &'_ std::path::Path) -> Self {
+ match path.extension() {
+ Some(ext) => ImageFormatHint::PathExtension(ext.into()),
+ None => ImageFormatHint::Unknown,
+ }
+ }
+}
+
+impl From<ImageFormatHint> for UnsupportedError {
+ fn from(hint: ImageFormatHint) -> Self {
+ UnsupportedError {
+ format: hint.clone(),
+ kind: UnsupportedErrorKind::Format(hint),
+ }
+ }
+}
+
+/// Result of an image decoding/encoding process
+pub type ImageResult<T> = Result<T, ImageError>;
+
+impl fmt::Display for ImageError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match self {
+ ImageError::IoError(err) => err.fmt(fmt),
+ ImageError::Decoding(err) => err.fmt(fmt),
+ ImageError::Encoding(err) => err.fmt(fmt),
+ ImageError::Parameter(err) => err.fmt(fmt),
+ ImageError::Limits(err) => err.fmt(fmt),
+ ImageError::Unsupported(err) => err.fmt(fmt),
+ }
+ }
+}
+
+impl Error for ImageError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match self {
+ ImageError::IoError(err) => err.source(),
+ ImageError::Decoding(err) => err.source(),
+ ImageError::Encoding(err) => err.source(),
+ ImageError::Parameter(err) => err.source(),
+ ImageError::Limits(err) => err.source(),
+ ImageError::Unsupported(err) => err.source(),
+ }
+ }
+}
+
+impl fmt::Display for UnsupportedError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match &self.kind {
+ UnsupportedErrorKind::Format(ImageFormatHint::Unknown) => {
+ write!(fmt, "The image format could not be determined",)
+ }
+ UnsupportedErrorKind::Format(format @ ImageFormatHint::PathExtension(_)) => write!(
+ fmt,
+ "The file extension {} was not recognized as an image format",
+ format,
+ ),
+ UnsupportedErrorKind::Format(format) => {
+ write!(fmt, "The image format {} is not supported", format,)
+ }
+ UnsupportedErrorKind::Color(color) => write!(
+ fmt,
+ "The decoder for {} does not support the color type `{:?}`",
+ self.format, color,
+ ),
+ UnsupportedErrorKind::GenericFeature(message) => match &self.format {
+ ImageFormatHint::Unknown => write!(
+ fmt,
+ "The decoder does not support the format feature {}",
+ message,
+ ),
+ other => write!(
+ fmt,
+ "The decoder for {} does not support the format features {}",
+ other, message,
+ ),
+ },
+ }
+ }
+}
+
+impl Error for UnsupportedError {}
+
+impl fmt::Display for ParameterError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match &self.kind {
+ ParameterErrorKind::DimensionMismatch => write!(
+ fmt,
+ "The Image's dimensions are either too \
+ small or too large"
+ ),
+ ParameterErrorKind::FailedAlready => write!(
+ fmt,
+ "The end the image stream has been reached due to a previous error"
+ ),
+ ParameterErrorKind::Generic(message) => {
+ write!(fmt, "The parameter is malformed: {}", message,)
+ }
+ ParameterErrorKind::NoMoreData => write!(fmt, "The end of the image has been reached",),
+ }?;
+
+ if let Some(underlying) = &self.underlying {
+ write!(fmt, "\n{}", underlying)?;
+ }
+
+ Ok(())
+ }
+}
+
+impl Error for ParameterError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match &self.underlying {
+ None => None,
+ Some(source) => Some(&**source),
+ }
+ }
+}
+
+impl fmt::Display for EncodingError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match &self.underlying {
+ Some(underlying) => write!(
+ fmt,
+ "Format error encoding {}:\n{}",
+ self.format, underlying,
+ ),
+ None => write!(fmt, "Format error encoding {}", self.format,),
+ }
+ }
+}
+
+impl Error for EncodingError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match &self.underlying {
+ None => None,
+ Some(source) => Some(&**source),
+ }
+ }
+}
+
+impl fmt::Display for DecodingError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match &self.underlying {
+ None => match self.format {
+ ImageFormatHint::Unknown => write!(fmt, "Format error"),
+ _ => write!(fmt, "Format error decoding {}", self.format),
+ },
+ Some(underlying) => {
+ write!(fmt, "Format error decoding {}: {}", self.format, underlying)
+ }
+ }
+ }
+}
+
+impl Error for DecodingError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match &self.underlying {
+ None => None,
+ Some(source) => Some(&**source),
+ }
+ }
+}
+
+impl fmt::Display for LimitError {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match self.kind {
+ LimitErrorKind::InsufficientMemory => write!(fmt, "Insufficient memory"),
+ LimitErrorKind::DimensionError => write!(fmt, "Image is too large"),
+ LimitErrorKind::Unsupported { .. } => {
+ write!(fmt, "The following strict limits are specified but not supported by the opertation: ")?;
+ Ok(())
+ }
+ }
+ }
+}
+
+impl Error for LimitError {}
+
+impl fmt::Display for ImageFormatHint {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ match self {
+ ImageFormatHint::Exact(format) => write!(fmt, "{:?}", format),
+ ImageFormatHint::Name(name) => write!(fmt, "`{}`", name),
+ ImageFormatHint::PathExtension(ext) => write!(fmt, "`.{:?}`", ext),
+ ImageFormatHint::Unknown => write!(fmt, "`Unknown`"),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::mem;
+
+ #[allow(dead_code)]
+ // This will fail to compile if the size of this type is large.
+ const ASSERT_SMALLISH: usize = [0][(mem::size_of::<ImageError>() >= 200) as usize];
+
+ #[test]
+ fn test_send_sync_stability() {
+ fn assert_send_sync<T: Send + Sync>() {}
+
+ assert_send_sync::<ImageError>();
+ }
+}
diff --git a/vendor/image/src/flat.rs b/vendor/image/src/flat.rs
new file mode 100644
index 0000000..24a14d1
--- /dev/null
+++ b/vendor/image/src/flat.rs
@@ -0,0 +1,1735 @@
+//! Image representations for ffi.
+//!
+//! # Usage
+//!
+//! Imagine you want to offer a very simple ffi interface: The caller provides an image buffer and
+//! your program creates a thumbnail from it and dumps that image as `png`. This module is designed
+//! to help you transition from raw memory data to Rust representation.
+//!
+//! ```no_run
+//! use std::ptr;
+//! use std::slice;
+//! use image::Rgb;
+//! use image::flat::{FlatSamples, SampleLayout};
+//! use image::imageops::thumbnail;
+//!
+//! #[no_mangle]
+//! pub extern "C" fn store_rgb8_compressed(
+//! data: *const u8, len: usize,
+//! layout: *const SampleLayout
+//! )
+//! -> bool
+//! {
+//! let samples = unsafe { slice::from_raw_parts(data, len) };
+//! let layout = unsafe { ptr::read(layout) };
+//!
+//! let buffer = FlatSamples {
+//! samples,
+//! layout,
+//! color_hint: None,
+//! };
+//!
+//! let view = match buffer.as_view::<Rgb<u8>>() {
+//! Err(_) => return false, // Invalid layout.
+//! Ok(view) => view,
+//! };
+//!
+//! thumbnail(&view, 64, 64)
+//! .save("output.png")
+//! .map(|_| true)
+//! .unwrap_or_else(|_| false)
+//! }
+//! ```
+//!
+use std::marker::PhantomData;
+use std::ops::{Deref, Index, IndexMut};
+use std::{cmp, error, fmt};
+
+use num_traits::Zero;
+
+use crate::color::ColorType;
+use crate::error::{
+ DecodingError, ImageError, ImageFormatHint, ParameterError, ParameterErrorKind,
+ UnsupportedError, UnsupportedErrorKind,
+};
+use crate::image::{GenericImage, GenericImageView};
+use crate::traits::Pixel;
+use crate::ImageBuffer;
+
+/// A flat buffer over a (multi channel) image.
+///
+/// In contrast to `ImageBuffer`, this representation of a sample collection is much more lenient
+/// in the layout thereof. It also allows grouping by color planes instead of by pixel as long as
+/// the strides of each extent are constant. This struct itself has no invariants on the strides
+/// but not every possible configuration can be interpreted as a [`GenericImageView`] or
+/// [`GenericImage`]. The methods [`as_view`] and [`as_view_mut`] construct the actual implementors
+/// of these traits and perform necessary checks. To manually perform this and other layout checks
+/// use [`is_normal`] or [`has_aliased_samples`].
+///
+/// Instances can be constructed not only by hand. The buffer instances returned by library
+/// functions such as [`ImageBuffer::as_flat_samples`] guarantee that the conversion to a generic
+/// image or generic view succeeds. A very different constructor is [`with_monocolor`]. It uses a
+/// single pixel as the backing storage for an arbitrarily sized read-only raster by mapping each
+/// pixel to the same samples by setting some strides to `0`.
+///
+/// [`GenericImage`]: ../trait.GenericImage.html
+/// [`GenericImageView`]: ../trait.GenericImageView.html
+/// [`ImageBuffer::as_flat_samples`]: ../struct.ImageBuffer.html#method.as_flat_samples
+/// [`is_normal`]: #method.is_normal
+/// [`has_aliased_samples`]: #method.has_aliased_samples
+/// [`as_view`]: #method.as_view
+/// [`as_view_mut`]: #method.as_view_mut
+/// [`with_monocolor`]: #method.with_monocolor
+#[derive(Clone, Debug)]
+pub struct FlatSamples<Buffer> {
+ /// Underlying linear container holding sample values.
+ pub samples: Buffer,
+
+ /// A `repr(C)` description of the layout of buffer samples.
+ pub layout: SampleLayout,
+
+ /// Supplementary color information.
+ ///
+ /// You may keep this as `None` in most cases. This is NOT checked in `View` or other
+ /// converters. It is intended mainly as a way for types that convert to this buffer type to
+ /// attach their otherwise static color information. A dynamic image representation could
+ /// however use this to resolve representational ambiguities such as the order of RGB channels.
+ pub color_hint: Option<ColorType>,
+}
+
+/// A ffi compatible description of a sample buffer.
+#[repr(C)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub struct SampleLayout {
+ /// The number of channels in the color representation of the image.
+ pub channels: u8,
+
+ /// Add this to an index to get to the sample in the next channel.
+ pub channel_stride: usize,
+
+ /// The width of the represented image.
+ pub width: u32,
+
+ /// Add this to an index to get to the next sample in x-direction.
+ pub width_stride: usize,
+
+ /// The height of the represented image.
+ pub height: u32,
+
+ /// Add this to an index to get to the next sample in y-direction.
+ pub height_stride: usize,
+}
+
+/// Helper struct for an unnamed (stride, length) pair.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+struct Dim(usize, usize);
+
+impl SampleLayout {
+ /// Describe a row-major image packed in all directions.
+ ///
+ /// The resulting will surely be `NormalForm::RowMajorPacked`. It can therefore be converted to
+ /// safely to an `ImageBuffer` with a large enough underlying buffer.
+ ///
+ /// ```
+ /// # use image::flat::{NormalForm, SampleLayout};
+ /// let layout = SampleLayout::row_major_packed(3, 640, 480);
+ /// assert!(layout.is_normal(NormalForm::RowMajorPacked));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// On platforms where `usize` has the same size as `u32` this panics when the resulting stride
+ /// in the `height` direction would be larger than `usize::max_value()`. On other platforms
+ /// where it can surely accommodate `u8::max_value() * u32::max_value(), this can never happen.
+ pub fn row_major_packed(channels: u8, width: u32, height: u32) -> Self {
+ let height_stride = (channels as usize).checked_mul(width as usize).expect(
+ "Row major packed image can not be described because it does not fit into memory",
+ );
+ SampleLayout {
+ channels,
+ channel_stride: 1,
+ width,
+ width_stride: channels as usize,
+ height,
+ height_stride,
+ }
+ }
+
+ /// Describe a column-major image packed in all directions.
+ ///
+ /// The resulting will surely be `NormalForm::ColumnMajorPacked`. This is not particularly
+ /// useful for conversion but can be used to describe such a buffer without pitfalls.
+ ///
+ /// ```
+ /// # use image::flat::{NormalForm, SampleLayout};
+ /// let layout = SampleLayout::column_major_packed(3, 640, 480);
+ /// assert!(layout.is_normal(NormalForm::ColumnMajorPacked));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// On platforms where `usize` has the same size as `u32` this panics when the resulting stride
+ /// in the `width` direction would be larger than `usize::max_value()`. On other platforms
+ /// where it can surely accommodate `u8::max_value() * u32::max_value(), this can never happen.
+ pub fn column_major_packed(channels: u8, width: u32, height: u32) -> Self {
+ let width_stride = (channels as usize).checked_mul(height as usize).expect(
+ "Column major packed image can not be described because it does not fit into memory",
+ );
+ SampleLayout {
+ channels,
+ channel_stride: 1,
+ height,
+ height_stride: channels as usize,
+ width,
+ width_stride,
+ }
+ }
+
+ /// Get the strides for indexing matrix-like `[(c, w, h)]`.
+ ///
+ /// For a row-major layout with grouped samples, this tuple is strictly
+ /// increasing.
+ pub fn strides_cwh(&self) -> (usize, usize, usize) {
+ (self.channel_stride, self.width_stride, self.height_stride)
+ }
+
+ /// Get the dimensions `(channels, width, height)`.
+ ///
+ /// The interface is optimized for use with `strides_cwh` instead. The channel extent will be
+ /// before width and height.
+ pub fn extents(&self) -> (usize, usize, usize) {
+ (
+ self.channels as usize,
+ self.width as usize,
+ self.height as usize,
+ )
+ }
+
+ /// Tuple of bounds in the order of coordinate inputs.
+ ///
+ /// This function should be used whenever working with image coordinates opposed to buffer
+ /// coordinates. The only difference compared to `extents` is the output type.
+ pub fn bounds(&self) -> (u8, u32, u32) {
+ (self.channels, self.width, self.height)
+ }
+
+ /// Get the minimum length of a buffer such that all in-bounds samples have valid indices.
+ ///
+ /// This method will allow zero strides, allowing compact representations of monochrome images.
+ /// To check that no aliasing occurs, try `check_alias_invariants`. For compact images (no
+ /// aliasing and no unindexed samples) this is `width*height*channels`. But for both of the
+ /// other cases, the reasoning is slightly more involved.
+ ///
+ /// # Explanation
+ ///
+ /// Note that there is a difference between `min_length` and the index of the sample
+ /// 'one-past-the-end`. This is due to strides that may be larger than the dimension below.
+ ///
+ /// ## Example with holes
+ ///
+ /// Let's look at an example of a grayscale image with
+ /// * `width_stride = 1`
+ /// * `width = 2`
+ /// * `height_stride = 3`
+ /// * `height = 2`
+ ///
+ /// ```text
+ /// | x x | x x m | $
+ /// min_length m ^
+ /// ^ one-past-the-end $
+ /// ```
+ ///
+ /// The difference is also extreme for empty images with large strides. The one-past-the-end
+ /// sample index is still as large as the largest of these strides while `min_length = 0`.
+ ///
+ /// ## Example with aliasing
+ ///
+ /// The concept gets even more important when you allow samples to alias each other. Here we
+ /// have the buffer of a small grayscale image where this is the case, this time we will first
+ /// show the buffer and then the individual rows below.
+ ///
+ /// * `width_stride = 1`
+ /// * `width = 3`
+ /// * `height_stride = 2`
+ /// * `height = 2`
+ ///
+ /// ```text
+ /// 1 2 3 4 5 m
+ /// |1 2 3| row one
+ /// |3 4 5| row two
+ /// ^ m min_length
+ /// ^ ??? one-past-the-end
+ /// ```
+ ///
+ /// This time 'one-past-the-end' is not even simply the largest stride times the extent of its
+ /// dimension. That still points inside the image because `height*height_stride = 4` but also
+ /// `index_of(1, 2) = 4`.
+ pub fn min_length(&self) -> Option<usize> {
+ if self.width == 0 || self.height == 0 || self.channels == 0 {
+ return Some(0);
+ }
+
+ self.index(self.channels - 1, self.width - 1, self.height - 1)
+ .and_then(|idx| idx.checked_add(1))
+ }
+
+ /// Check if a buffer of length `len` is large enough.
+ pub fn fits(&self, len: usize) -> bool {
+ self.min_length().map(|min| len >= min).unwrap_or(false)
+ }
+
+ /// The extents of this array, in order of increasing strides.
+ fn increasing_stride_dims(&self) -> [Dim; 3] {
+ // Order extents by strides, then check that each is less equal than the next stride.
+ let mut grouped: [Dim; 3] = [
+ Dim(self.channel_stride, self.channels as usize),
+ Dim(self.width_stride, self.width as usize),
+ Dim(self.height_stride, self.height as usize),
+ ];
+
+ grouped.sort();
+
+ let (min_dim, mid_dim, max_dim) = (grouped[0], grouped[1], grouped[2]);
+ assert!(min_dim.stride() <= mid_dim.stride() && mid_dim.stride() <= max_dim.stride());
+
+ grouped
+ }
+
+ /// If there are any samples aliasing each other.
+ ///
+ /// If this is not the case, it would always be safe to allow mutable access to two different
+ /// samples at the same time. Otherwise, this operation would need additional checks. When one
+ /// dimension overflows `usize` with its stride we also consider this aliasing.
+ pub fn has_aliased_samples(&self) -> bool {
+ let grouped = self.increasing_stride_dims();
+ let (min_dim, mid_dim, max_dim) = (grouped[0], grouped[1], grouped[2]);
+
+ let min_size = match min_dim.checked_len() {
+ None => return true,
+ Some(size) => size,
+ };
+
+ let mid_size = match mid_dim.checked_len() {
+ None => return true,
+ Some(size) => size,
+ };
+
+ match max_dim.checked_len() {
+ None => return true,
+ Some(_) => (), // Only want to know this didn't overflow.
+ };
+
+ // Each higher dimension must walk over all of one lower dimension.
+ min_size > mid_dim.stride() || mid_size > max_dim.stride()
+ }
+
+ /// Check if a buffer fulfills the requirements of a normal form.
+ ///
+ /// Certain conversions have preconditions on the structure of the sample buffer that are not
+ /// captured (by design) by the type system. These are then checked before the conversion. Such
+ /// checks can all be done in constant time and will not inspect the buffer content. You can
+ /// perform these checks yourself when the conversion is not required at this moment but maybe
+ /// still performed later.
+ pub fn is_normal(&self, form: NormalForm) -> bool {
+ if self.has_aliased_samples() {
+ return false;
+ }
+
+ if form >= NormalForm::PixelPacked && self.channel_stride != 1 {
+ return false;
+ }
+
+ if form >= NormalForm::ImagePacked {
+ // has aliased already checked for overflows.
+ let grouped = self.increasing_stride_dims();
+ let (min_dim, mid_dim, max_dim) = (grouped[0], grouped[1], grouped[2]);
+
+ if 1 != min_dim.stride() {
+ return false;
+ }
+
+ if min_dim.len() != mid_dim.stride() {
+ return false;
+ }
+
+ if mid_dim.len() != max_dim.stride() {
+ return false;
+ }
+ }
+
+ if form >= NormalForm::RowMajorPacked {
+ if self.width_stride != self.channels as usize {
+ return false;
+ }
+
+ if self.width as usize * self.width_stride != self.height_stride {
+ return false;
+ }
+ }
+
+ if form >= NormalForm::ColumnMajorPacked {
+ if self.height_stride != self.channels as usize {
+ return false;
+ }
+
+ if self.height as usize * self.height_stride != self.width_stride {
+ return false;
+ }
+ }
+
+ true
+ }
+
+ /// Check that the pixel and the channel index are in bounds.
+ ///
+ /// An in-bound coordinate does not yet guarantee that the corresponding calculation of a
+ /// buffer index does not overflow. However, if such a buffer large enough to hold all samples
+ /// actually exists in memory, this property of course follows.
+ pub fn in_bounds(&self, channel: u8, x: u32, y: u32) -> bool {
+ channel < self.channels && x < self.width && y < self.height
+ }
+
+ /// Resolve the index of a particular sample.
+ ///
+ /// `None` if the index is outside the bounds or does not fit into a `usize`.
+ pub fn index(&self, channel: u8, x: u32, y: u32) -> Option<usize> {
+ if !self.in_bounds(channel, x, y) {
+ return None;
+ }
+
+ self.index_ignoring_bounds(channel as usize, x as usize, y as usize)
+ }
+
+ /// Get the theoretical position of sample (channel, x, y).
+ ///
+ /// The 'check' is for overflow during index calculation, not that it is contained in the
+ /// image. Two samples may return the same index, even when one of them is out of bounds. This
+ /// happens when all strides are `0`, i.e. the image is an arbitrarily large monochrome image.
+ pub fn index_ignoring_bounds(&self, channel: usize, x: usize, y: usize) -> Option<usize> {
+ let idx_c = channel.checked_mul(self.channel_stride);
+ let idx_x = x.checked_mul(self.width_stride);
+ let idx_y = y.checked_mul(self.height_stride);
+
+ let (idx_c, idx_x, idx_y) = match (idx_c, idx_x, idx_y) {
+ (Some(idx_c), Some(idx_x), Some(idx_y)) => (idx_c, idx_x, idx_y),
+ _ => return None,
+ };
+
+ Some(0usize)
+ .and_then(|b| b.checked_add(idx_c))
+ .and_then(|b| b.checked_add(idx_x))
+ .and_then(|b| b.checked_add(idx_y))
+ }
+
+ /// Get an index provided it is inbouds.
+ ///
+ /// Assumes that the image is backed by some sufficiently large buffer. Then computation can
+ /// not overflow as we could represent the maximum coordinate. Since overflow is defined either
+ /// way, this method can not be unsafe.
+ pub fn in_bounds_index(&self, c: u8, x: u32, y: u32) -> usize {
+ let (c_stride, x_stride, y_stride) = self.strides_cwh();
+ (y as usize * y_stride) + (x as usize * x_stride) + (c as usize * c_stride)
+ }
+
+ /// Shrink the image to the minimum of current and given extents.
+ ///
+ /// This does not modify the strides, so that the resulting sample buffer may have holes
+ /// created by the shrinking operation. Shrinking could also lead to an non-aliasing image when
+ /// samples had aliased each other before.
+ pub fn shrink_to(&mut self, channels: u8, width: u32, height: u32) {
+ self.channels = self.channels.min(channels);
+ self.width = self.width.min(width);
+ self.height = self.height.min(height);
+ }
+}
+
+impl Dim {
+ fn stride(self) -> usize {
+ self.0
+ }
+
+ /// Length of this dimension in memory.
+ fn checked_len(self) -> Option<usize> {
+ self.0.checked_mul(self.1)
+ }
+
+ fn len(self) -> usize {
+ self.0 * self.1
+ }
+}
+
+impl<Buffer> FlatSamples<Buffer> {
+ /// Get the strides for indexing matrix-like `[(c, w, h)]`.
+ ///
+ /// For a row-major layout with grouped samples, this tuple is strictly
+ /// increasing.
+ pub fn strides_cwh(&self) -> (usize, usize, usize) {
+ self.layout.strides_cwh()
+ }
+
+ /// Get the dimensions `(channels, width, height)`.
+ ///
+ /// The interface is optimized for use with `strides_cwh` instead. The channel extent will be
+ /// before width and height.
+ pub fn extents(&self) -> (usize, usize, usize) {
+ self.layout.extents()
+ }
+
+ /// Tuple of bounds in the order of coordinate inputs.
+ ///
+ /// This function should be used whenever working with image coordinates opposed to buffer
+ /// coordinates. The only difference compared to `extents` is the output type.
+ pub fn bounds(&self) -> (u8, u32, u32) {
+ self.layout.bounds()
+ }
+
+ /// Get a reference based version.
+ pub fn as_ref<T>(&self) -> FlatSamples<&[T]>
+ where
+ Buffer: AsRef<[T]>,
+ {
+ FlatSamples {
+ samples: self.samples.as_ref(),
+ layout: self.layout,
+ color_hint: self.color_hint,
+ }
+ }
+
+ /// Get a mutable reference based version.
+ pub fn as_mut<T>(&mut self) -> FlatSamples<&mut [T]>
+ where
+ Buffer: AsMut<[T]>,
+ {
+ FlatSamples {
+ samples: self.samples.as_mut(),
+ layout: self.layout,
+ color_hint: self.color_hint,
+ }
+ }
+
+ /// Copy the data into an owned vector.
+ pub fn to_vec<T>(&self) -> FlatSamples<Vec<T>>
+ where
+ T: Clone,
+ Buffer: AsRef<[T]>,
+ {
+ FlatSamples {
+ samples: self.samples.as_ref().to_vec(),
+ layout: self.layout,
+ color_hint: self.color_hint,
+ }
+ }
+
+ /// Get a reference to a single sample.
+ ///
+ /// This more restrictive than the method based on `std::ops::Index` but guarantees to properly
+ /// check all bounds and not panic as long as `Buffer::as_ref` does not do so.
+ ///
+ /// ```
+ /// # use image::{RgbImage};
+ /// let flat = RgbImage::new(480, 640).into_flat_samples();
+ ///
+ /// // Get the blue channel at (10, 10).
+ /// assert!(flat.get_sample(1, 10, 10).is_some());
+ ///
+ /// // There is no alpha channel.
+ /// assert!(flat.get_sample(3, 10, 10).is_none());
+ /// ```
+ ///
+ /// For cases where a special buffer does not provide `AsRef<[T]>`, consider encapsulating
+ /// bounds checks with `min_length` in a type similar to `View`. Then you may use
+ /// `in_bounds_index` as a small speedup over the index calculation of this method which relies
+ /// on `index_ignoring_bounds` since it can not have a-priori knowledge that the sample
+ /// coordinate is in fact backed by any memory buffer.
+ pub fn get_sample<T>(&self, channel: u8, x: u32, y: u32) -> Option<&T>
+ where
+ Buffer: AsRef<[T]>,
+ {
+ self.index(channel, x, y)
+ .and_then(|idx| self.samples.as_ref().get(idx))
+ }
+
+ /// Get a mutable reference to a single sample.
+ ///
+ /// This more restrictive than the method based on `std::ops::IndexMut` but guarantees to
+ /// properly check all bounds and not panic as long as `Buffer::as_ref` does not do so.
+ /// Contrary to conversion to `ViewMut`, this does not require that samples are packed since it
+ /// does not need to convert samples to a color representation.
+ ///
+ /// **WARNING**: Note that of course samples may alias, so that the mutable reference returned
+ /// here can in fact modify more than the coordinate in the argument.
+ ///
+ /// ```
+ /// # use image::{RgbImage};
+ /// let mut flat = RgbImage::new(480, 640).into_flat_samples();
+ ///
+ /// // Assign some new color to the blue channel at (10, 10).
+ /// *flat.get_mut_sample(1, 10, 10).unwrap() = 255;
+ ///
+ /// // There is no alpha channel.
+ /// assert!(flat.get_mut_sample(3, 10, 10).is_none());
+ /// ```
+ ///
+ /// For cases where a special buffer does not provide `AsRef<[T]>`, consider encapsulating
+ /// bounds checks with `min_length` in a type similar to `View`. Then you may use
+ /// `in_bounds_index` as a small speedup over the index calculation of this method which relies
+ /// on `index_ignoring_bounds` since it can not have a-priori knowledge that the sample
+ /// coordinate is in fact backed by any memory buffer.
+ pub fn get_mut_sample<T>(&mut self, channel: u8, x: u32, y: u32) -> Option<&mut T>
+ where
+ Buffer: AsMut<[T]>,
+ {
+ match self.index(channel, x, y) {
+ None => None,
+ Some(idx) => self.samples.as_mut().get_mut(idx),
+ }
+ }
+
+ /// View this buffer as an image over some type of pixel.
+ ///
+ /// This first ensures that all in-bounds coordinates refer to valid indices in the sample
+ /// buffer. It also checks that the specified pixel format expects the same number of channels
+ /// that are present in this buffer. Neither are larger nor a smaller number will be accepted.
+ /// There is no automatic conversion.
+ pub fn as_view<P>(&self) -> Result<View<&[P::Subpixel], P>, Error>
+ where
+ P: Pixel,
+ Buffer: AsRef<[P::Subpixel]>,
+ {
+ if self.layout.channels != P::CHANNEL_COUNT {
+ return Err(Error::ChannelCountMismatch(
+ self.layout.channels,
+ P::CHANNEL_COUNT,
+ ));
+ }
+
+ let as_ref = self.samples.as_ref();
+ if !self.layout.fits(as_ref.len()) {
+ return Err(Error::TooLarge);
+ }
+
+ Ok(View {
+ inner: FlatSamples {
+ samples: as_ref,
+ layout: self.layout,
+ color_hint: self.color_hint,
+ },
+ phantom: PhantomData,
+ })
+ }
+
+ /// View this buffer but keep mutability at a sample level.
+ ///
+ /// This is similar to `as_view` but subtly different from `as_view_mut`. The resulting type
+ /// can be used as a `GenericImage` with the same prior invariants needed as for `as_view`.
+ /// It can not be used as a mutable `GenericImage` but does not need channels to be packed in
+ /// their pixel representation.
+ ///
+ /// This first ensures that all in-bounds coordinates refer to valid indices in the sample
+ /// buffer. It also checks that the specified pixel format expects the same number of channels
+ /// that are present in this buffer. Neither are larger nor a smaller number will be accepted.
+ /// There is no automatic conversion.
+ ///
+ /// **WARNING**: Note that of course samples may alias, so that the mutable reference returned
+ /// for one sample can in fact modify other samples as well. Sometimes exactly this is
+ /// intended.
+ pub fn as_view_with_mut_samples<P>(&mut self) -> Result<View<&mut [P::Subpixel], P>, Error>
+ where
+ P: Pixel,
+ Buffer: AsMut<[P::Subpixel]>,
+ {
+ if self.layout.channels != P::CHANNEL_COUNT {
+ return Err(Error::ChannelCountMismatch(
+ self.layout.channels,
+ P::CHANNEL_COUNT,
+ ));
+ }
+
+ let as_mut = self.samples.as_mut();
+ if !self.layout.fits(as_mut.len()) {
+ return Err(Error::TooLarge);
+ }
+
+ Ok(View {
+ inner: FlatSamples {
+ samples: as_mut,
+ layout: self.layout,
+ color_hint: self.color_hint,
+ },
+ phantom: PhantomData,
+ })
+ }
+
+ /// Interpret this buffer as a mutable image.
+ ///
+ /// To succeed, the pixels in this buffer may not alias each other and the samples of each
+ /// pixel must be packed (i.e. `channel_stride` is `1`). The number of channels must be
+ /// consistent with the channel count expected by the pixel format.
+ ///
+ /// This is similar to an `ImageBuffer` except it is a temporary view that is not normalized as
+ /// strongly. To get an owning version, consider copying the data into an `ImageBuffer`. This
+ /// provides many more operations, is possibly faster (if not you may want to open an issue) is
+ /// generally polished. You can also try to convert this buffer inline, see
+ /// `ImageBuffer::from_raw`.
+ pub fn as_view_mut<P>(&mut self) -> Result<ViewMut<&mut [P::Subpixel], P>, Error>
+ where
+ P: Pixel,
+ Buffer: AsMut<[P::Subpixel]>,
+ {
+ if !self.layout.is_normal(NormalForm::PixelPacked) {
+ return Err(Error::NormalFormRequired(NormalForm::PixelPacked));
+ }
+
+ if self.layout.channels != P::CHANNEL_COUNT {
+ return Err(Error::ChannelCountMismatch(
+ self.layout.channels,
+ P::CHANNEL_COUNT,
+ ));
+ }
+
+ let as_mut = self.samples.as_mut();
+ if !self.layout.fits(as_mut.len()) {
+ return Err(Error::TooLarge);
+ }
+
+ Ok(ViewMut {
+ inner: FlatSamples {
+ samples: as_mut,
+ layout: self.layout,
+ color_hint: self.color_hint,
+ },
+ phantom: PhantomData,
+ })
+ }
+
+ /// View the samples as a slice.
+ ///
+ /// The slice is not limited to the region of the image and not all sample indices are valid
+ /// indices into this buffer. See `image_mut_slice` as an alternative.
+ pub fn as_slice<T>(&self) -> &[T]
+ where
+ Buffer: AsRef<[T]>,
+ {
+ self.samples.as_ref()
+ }
+
+ /// View the samples as a slice.
+ ///
+ /// The slice is not limited to the region of the image and not all sample indices are valid
+ /// indices into this buffer. See `image_mut_slice` as an alternative.
+ pub fn as_mut_slice<T>(&mut self) -> &mut [T]
+ where
+ Buffer: AsMut<[T]>,
+ {
+ self.samples.as_mut()
+ }
+
+ /// Return the portion of the buffer that holds sample values.
+ ///
+ /// This may fail when the coordinates in this image are either out-of-bounds of the underlying
+ /// buffer or can not be represented. Note that the slice may have holes that do not correspond
+ /// to any sample in the image represented by it.
+ pub fn image_slice<T>(&self) -> Option<&[T]>
+ where
+ Buffer: AsRef<[T]>,
+ {
+ let min_length = match self.min_length() {
+ None => return None,
+ Some(index) => index,
+ };
+
+ let slice = self.samples.as_ref();
+ if slice.len() < min_length {
+ return None;
+ }
+
+ Some(&slice[..min_length])
+ }
+
+ /// Mutable portion of the buffer that holds sample values.
+ pub fn image_mut_slice<T>(&mut self) -> Option<&mut [T]>
+ where
+ Buffer: AsMut<[T]>,
+ {
+ let min_length = match self.min_length() {
+ None => return None,
+ Some(index) => index,
+ };
+
+ let slice = self.samples.as_mut();
+ if slice.len() < min_length {
+ return None;
+ }
+
+ Some(&mut slice[..min_length])
+ }
+
+ /// Move the data into an image buffer.
+ ///
+ /// This does **not** convert the sample layout. The buffer needs to be in packed row-major form
+ /// before calling this function. In case of an error, returns the buffer again so that it does
+ /// not release any allocation.
+ pub fn try_into_buffer<P>(self) -> Result<ImageBuffer<P, Buffer>, (Error, Self)>
+ where
+ P: Pixel + 'static,
+ P::Subpixel: 'static,
+ Buffer: Deref<Target = [P::Subpixel]>,
+ {
+ if !self.is_normal(NormalForm::RowMajorPacked) {
+ return Err((Error::NormalFormRequired(NormalForm::RowMajorPacked), self));
+ }
+
+ if self.layout.channels != P::CHANNEL_COUNT {
+ return Err((
+ Error::ChannelCountMismatch(self.layout.channels, P::CHANNEL_COUNT),
+ self,
+ ));
+ }
+
+ if !self.fits(self.samples.deref().len()) {
+ return Err((Error::TooLarge, self));
+ }
+
+ Ok(
+ ImageBuffer::from_raw(self.layout.width, self.layout.height, self.samples)
+ .unwrap_or_else(|| {
+ panic!("Preconditions should have been ensured before conversion")
+ }),
+ )
+ }
+
+ /// Get the minimum length of a buffer such that all in-bounds samples have valid indices.
+ ///
+ /// This method will allow zero strides, allowing compact representations of monochrome images.
+ /// To check that no aliasing occurs, try `check_alias_invariants`. For compact images (no
+ /// aliasing and no unindexed samples) this is `width*height*channels`. But for both of the
+ /// other cases, the reasoning is slightly more involved.
+ ///
+ /// # Explanation
+ ///
+ /// Note that there is a difference between `min_length` and the index of the sample
+ /// 'one-past-the-end`. This is due to strides that may be larger than the dimension below.
+ ///
+ /// ## Example with holes
+ ///
+ /// Let's look at an example of a grayscale image with
+ /// * `width_stride = 1`
+ /// * `width = 2`
+ /// * `height_stride = 3`
+ /// * `height = 2`
+ ///
+ /// ```text
+ /// | x x | x x m | $
+ /// min_length m ^
+ /// ^ one-past-the-end $
+ /// ```
+ ///
+ /// The difference is also extreme for empty images with large strides. The one-past-the-end
+ /// sample index is still as large as the largest of these strides while `min_length = 0`.
+ ///
+ /// ## Example with aliasing
+ ///
+ /// The concept gets even more important when you allow samples to alias each other. Here we
+ /// have the buffer of a small grayscale image where this is the case, this time we will first
+ /// show the buffer and then the individual rows below.
+ ///
+ /// * `width_stride = 1`
+ /// * `width = 3`
+ /// * `height_stride = 2`
+ /// * `height = 2`
+ ///
+ /// ```text
+ /// 1 2 3 4 5 m
+ /// |1 2 3| row one
+ /// |3 4 5| row two
+ /// ^ m min_length
+ /// ^ ??? one-past-the-end
+ /// ```
+ ///
+ /// This time 'one-past-the-end' is not even simply the largest stride times the extent of its
+ /// dimension. That still points inside the image because `height*height_stride = 4` but also
+ /// `index_of(1, 2) = 4`.
+ pub fn min_length(&self) -> Option<usize> {
+ self.layout.min_length()
+ }
+
+ /// Check if a buffer of length `len` is large enough.
+ pub fn fits(&self, len: usize) -> bool {
+ self.layout.fits(len)
+ }
+
+ /// If there are any samples aliasing each other.
+ ///
+ /// If this is not the case, it would always be safe to allow mutable access to two different
+ /// samples at the same time. Otherwise, this operation would need additional checks. When one
+ /// dimension overflows `usize` with its stride we also consider this aliasing.
+ pub fn has_aliased_samples(&self) -> bool {
+ self.layout.has_aliased_samples()
+ }
+
+ /// Check if a buffer fulfills the requirements of a normal form.
+ ///
+ /// Certain conversions have preconditions on the structure of the sample buffer that are not
+ /// captured (by design) by the type system. These are then checked before the conversion. Such
+ /// checks can all be done in constant time and will not inspect the buffer content. You can
+ /// perform these checks yourself when the conversion is not required at this moment but maybe
+ /// still performed later.
+ pub fn is_normal(&self, form: NormalForm) -> bool {
+ self.layout.is_normal(form)
+ }
+
+ /// Check that the pixel and the channel index are in bounds.
+ ///
+ /// An in-bound coordinate does not yet guarantee that the corresponding calculation of a
+ /// buffer index does not overflow. However, if such a buffer large enough to hold all samples
+ /// actually exists in memory, this property of course follows.
+ pub fn in_bounds(&self, channel: u8, x: u32, y: u32) -> bool {
+ self.layout.in_bounds(channel, x, y)
+ }
+
+ /// Resolve the index of a particular sample.
+ ///
+ /// `None` if the index is outside the bounds or does not fit into a `usize`.
+ pub fn index(&self, channel: u8, x: u32, y: u32) -> Option<usize> {
+ self.layout.index(channel, x, y)
+ }
+
+ /// Get the theoretical position of sample (x, y, channel).
+ ///
+ /// The 'check' is for overflow during index calculation, not that it is contained in the
+ /// image. Two samples may return the same index, even when one of them is out of bounds. This
+ /// happens when all strides are `0`, i.e. the image is an arbitrarily large monochrome image.
+ pub fn index_ignoring_bounds(&self, channel: usize, x: usize, y: usize) -> Option<usize> {
+ self.layout.index_ignoring_bounds(channel, x, y)
+ }
+
+ /// Get an index provided it is inbouds.
+ ///
+ /// Assumes that the image is backed by some sufficiently large buffer. Then computation can
+ /// not overflow as we could represent the maximum coordinate. Since overflow is defined either
+ /// way, this method can not be unsafe.
+ pub fn in_bounds_index(&self, channel: u8, x: u32, y: u32) -> usize {
+ self.layout.in_bounds_index(channel, x, y)
+ }
+
+ /// Shrink the image to the minimum of current and given extents.
+ ///
+ /// This does not modify the strides, so that the resulting sample buffer may have holes
+ /// created by the shrinking operation. Shrinking could also lead to an non-aliasing image when
+ /// samples had aliased each other before.
+ pub fn shrink_to(&mut self, channels: u8, width: u32, height: u32) {
+ self.layout.shrink_to(channels, width, height)
+ }
+}
+
+impl<'buf, Subpixel> FlatSamples<&'buf [Subpixel]> {
+ /// Create a monocolor image from a single pixel.
+ ///
+ /// This can be used as a very cheap source of a `GenericImageView` with an arbitrary number of
+ /// pixels of a single color, without any dynamic allocation.
+ ///
+ /// ## Examples
+ ///
+ /// ```
+ /// # fn paint_something<T>(_: T) {}
+ /// use image::{flat::FlatSamples, GenericImage, RgbImage, Rgb};
+ ///
+ /// let background = Rgb([20, 20, 20]);
+ /// let bg = FlatSamples::with_monocolor(&background, 200, 200);;
+ ///
+ /// let mut image = RgbImage::new(200, 200);
+ /// paint_something(&mut image);
+ ///
+ /// // Reset the canvas
+ /// image.copy_from(&bg.as_view().unwrap(), 0, 0);
+ /// ```
+ pub fn with_monocolor<P>(pixel: &'buf P, width: u32, height: u32) -> Self
+ where
+ P: Pixel<Subpixel = Subpixel>,
+ Subpixel: crate::Primitive,
+ {
+ FlatSamples {
+ samples: pixel.channels(),
+ layout: SampleLayout {
+ channels: P::CHANNEL_COUNT,
+ channel_stride: 1,
+ width,
+ width_stride: 0,
+ height,
+ height_stride: 0,
+ },
+
+ // TODO this value is never set. It should be set in all places where the Pixel type implements PixelWithColorType
+ color_hint: None,
+ }
+ }
+}
+
+/// A flat buffer that can be used as an image view.
+///
+/// This is a nearly trivial wrapper around a buffer but at least sanitizes by checking the buffer
+/// length first and constraining the pixel type.
+///
+/// Note that this does not eliminate panics as the `AsRef<[T]>` implementation of `Buffer` may be
+/// unreliable, i.e. return different buffers at different times. This of course is a non-issue for
+/// all common collections where the bounds check once must be enough.
+///
+/// # Inner invariants
+///
+/// * For all indices inside bounds, the corresponding index is valid in the buffer
+/// * `P::channel_count()` agrees with `self.inner.layout.channels`
+///
+#[derive(Clone, Debug)]
+pub struct View<Buffer, P: Pixel>
+where
+ Buffer: AsRef<[P::Subpixel]>,
+{
+ inner: FlatSamples<Buffer>,
+ phantom: PhantomData<P>,
+}
+
+/// A mutable owning version of a flat buffer.
+///
+/// While this wraps a buffer similar to `ImageBuffer`, this is mostly intended as a utility. The
+/// library endorsed normalized representation is still `ImageBuffer`. Also, the implementation of
+/// `AsMut<[P::Subpixel]>` must always yield the same buffer. Therefore there is no public way to
+/// construct this with an owning buffer.
+///
+/// # Inner invariants
+///
+/// * For all indices inside bounds, the corresponding index is valid in the buffer
+/// * There is no aliasing of samples
+/// * The samples are packed, i.e. `self.inner.layout.sample_stride == 1`
+/// * `P::channel_count()` agrees with `self.inner.layout.channels`
+///
+#[derive(Clone, Debug)]
+pub struct ViewMut<Buffer, P: Pixel>
+where
+ Buffer: AsMut<[P::Subpixel]>,
+{
+ inner: FlatSamples<Buffer>,
+ phantom: PhantomData<P>,
+}
+
+/// Denotes invalid flat sample buffers when trying to convert to stricter types.
+///
+/// The biggest use case being `ImageBuffer` which expects closely packed
+/// samples in a row major matrix representation. But this error type may be
+/// resused for other import functions. A more versatile user may also try to
+/// correct the underlying representation depending on the error variant.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub enum Error {
+ /// The represented image was too large.
+ ///
+ /// The optional value denotes a possibly accepted maximal bound.
+ TooLarge,
+
+ /// The represented image can not use this representation.
+ ///
+ /// Has an additional value of the normalized form that would be accepted.
+ NormalFormRequired(NormalForm),
+
+ /// The color format did not match the channel count.
+ ///
+ /// In some cases you might be able to fix this by lowering the reported pixel count of the
+ /// buffer without touching the strides.
+ ///
+ /// In very special circumstances you *may* do the opposite. This is **VERY** dangerous but not
+ /// directly memory unsafe although that will likely alias pixels. One scenario is when you
+ /// want to construct an `Rgba` image but have only 3 bytes per pixel and for some reason don't
+ /// care about the value of the alpha channel even though you need `Rgba`.
+ ChannelCountMismatch(u8, u8),
+
+ /// Deprecated - ChannelCountMismatch is used instead
+ WrongColor(ColorType),
+}
+
+/// Different normal forms of buffers.
+///
+/// A normal form is an unaliased buffer with some additional constraints. The `ÌmageBuffer` uses
+/// row major form with packed samples.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub enum NormalForm {
+ /// No pixel aliases another.
+ ///
+ /// Unaliased also guarantees that all index calculations in the image bounds using
+ /// `dim_index*dim_stride` (such as `x*width_stride + y*height_stride`) do not overflow.
+ Unaliased,
+
+ /// At least pixels are packed.
+ ///
+ /// Images of these types can wrap `[T]`-slices into the standard color types. This is a
+ /// precondition for `GenericImage` which requires by-reference access to pixels.
+ PixelPacked,
+
+ /// All samples are packed.
+ ///
+ /// This is orthogonal to `PixelPacked`. It requires that there are no holes in the image but
+ /// it is not necessary that the pixel samples themselves are adjacent. An example of this
+ /// behaviour is a planar image layout.
+ ImagePacked,
+
+ /// The samples are in row-major form and all samples are packed.
+ ///
+ /// In addition to `PixelPacked` and `ImagePacked` this also asserts that the pixel matrix is
+ /// in row-major form.
+ RowMajorPacked,
+
+ /// The samples are in column-major form and all samples are packed.
+ ///
+ /// In addition to `PixelPacked` and `ImagePacked` this also asserts that the pixel matrix is
+ /// in column-major form.
+ ColumnMajorPacked,
+}
+
+impl<Buffer, P: Pixel> View<Buffer, P>
+where
+ Buffer: AsRef<[P::Subpixel]>,
+{
+ /// Take out the sample buffer.
+ ///
+ /// Gives up the normalization invariants on the buffer format.
+ pub fn into_inner(self) -> FlatSamples<Buffer> {
+ self.inner
+ }
+
+ /// Get a reference on the inner sample descriptor.
+ ///
+ /// There is no mutable counterpart as modifying the buffer format, including strides and
+ /// lengths, could invalidate the accessibility invariants of the `View`. It is not specified
+ /// if the inner buffer is the same as the buffer of the image from which this view was
+ /// created. It might have been truncated as an optimization.
+ pub fn flat(&self) -> &FlatSamples<Buffer> {
+ &self.inner
+ }
+
+ /// Get a reference on the inner buffer.
+ ///
+ /// There is no mutable counter part since it is not intended to allow you to reassign the
+ /// buffer or otherwise change its size or properties.
+ pub fn samples(&self) -> &Buffer {
+ &self.inner.samples
+ }
+
+ /// Get a reference to a selected subpixel if it is in-bounds.
+ ///
+ /// This method will return `None` when the sample is out-of-bounds. All errors that could
+ /// occur due to overflow have been eliminated while construction the `View`.
+ pub fn get_sample(&self, channel: u8, x: u32, y: u32) -> Option<&P::Subpixel> {
+ if !self.inner.in_bounds(channel, x, y) {
+ return None;
+ }
+
+ let index = self.inner.in_bounds_index(channel, x, y);
+ // Should always be `Some(_)` but checking is more costly.
+ self.samples().as_ref().get(index)
+ }
+
+ /// Get a mutable reference to a selected subpixel if it is in-bounds.
+ ///
+ /// This is relevant only when constructed with `FlatSamples::as_view_with_mut_samples`. This
+ /// method will return `None` when the sample is out-of-bounds. All errors that could occur due
+ /// to overflow have been eliminated while construction the `View`.
+ ///
+ /// **WARNING**: Note that of course samples may alias, so that the mutable reference returned
+ /// here can in fact modify more than the coordinate in the argument.
+ pub fn get_mut_sample(&mut self, channel: u8, x: u32, y: u32) -> Option<&mut P::Subpixel>
+ where
+ Buffer: AsMut<[P::Subpixel]>,
+ {
+ if !self.inner.in_bounds(channel, x, y) {
+ return None;
+ }
+
+ let index = self.inner.in_bounds_index(channel, x, y);
+ // Should always be `Some(_)` but checking is more costly.
+ self.inner.samples.as_mut().get_mut(index)
+ }
+
+ /// Get the minimum length of a buffer such that all in-bounds samples have valid indices.
+ ///
+ /// See `FlatSamples::min_length`. This method will always succeed.
+ pub fn min_length(&self) -> usize {
+ self.inner.min_length().unwrap()
+ }
+
+ /// Return the portion of the buffer that holds sample values.
+ ///
+ /// While this can not fail–the validity of all coordinates has been validated during the
+ /// conversion from `FlatSamples`–the resulting slice may still contain holes.
+ pub fn image_slice(&self) -> &[P::Subpixel] {
+ &self.samples().as_ref()[..self.min_length()]
+ }
+
+ /// Return the mutable portion of the buffer that holds sample values.
+ ///
+ /// This is relevant only when constructed with `FlatSamples::as_view_with_mut_samples`. While
+ /// this can not fail–the validity of all coordinates has been validated during the conversion
+ /// from `FlatSamples`–the resulting slice may still contain holes.
+ pub fn image_mut_slice(&mut self) -> &mut [P::Subpixel]
+ where
+ Buffer: AsMut<[P::Subpixel]>,
+ {
+ let min_length = self.min_length();
+ &mut self.inner.samples.as_mut()[..min_length]
+ }
+
+ /// Shrink the inner image.
+ ///
+ /// The new dimensions will be the minimum of the previous dimensions. Since the set of
+ /// in-bounds pixels afterwards is a subset of the current ones, this is allowed on a `View`.
+ /// Note that you can not change the number of channels as an intrinsic property of `P`.
+ pub fn shrink_to(&mut self, width: u32, height: u32) {
+ let channels = self.inner.layout.channels;
+ self.inner.shrink_to(channels, width, height)
+ }
+
+ /// Try to convert this into an image with mutable pixels.
+ ///
+ /// The resulting image implements `GenericImage` in addition to `GenericImageView`. While this
+ /// has mutable samples, it does not enforce that pixel can not alias and that samples are
+ /// packed enough for a mutable pixel reference. This is slightly cheaper than the chain
+ /// `self.into_inner().as_view_mut()` and keeps the `View` alive on failure.
+ ///
+ /// ```
+ /// # use image::RgbImage;
+ /// # use image::Rgb;
+ /// let mut buffer = RgbImage::new(480, 640).into_flat_samples();
+ /// let view = buffer.as_view_with_mut_samples::<Rgb<u8>>().unwrap();
+ ///
+ /// // Inspect some pixels, …
+ ///
+ /// // Doesn't fail because it was originally an `RgbImage`.
+ /// let view_mut = view.try_upgrade().unwrap();
+ /// ```
+ pub fn try_upgrade(self) -> Result<ViewMut<Buffer, P>, (Error, Self)>
+ where
+ Buffer: AsMut<[P::Subpixel]>,
+ {
+ if !self.inner.is_normal(NormalForm::PixelPacked) {
+ return Err((Error::NormalFormRequired(NormalForm::PixelPacked), self));
+ }
+
+ // No length check or channel count check required, all the same.
+ Ok(ViewMut {
+ inner: self.inner,
+ phantom: PhantomData,
+ })
+ }
+}
+
+impl<Buffer, P: Pixel> ViewMut<Buffer, P>
+where
+ Buffer: AsMut<[P::Subpixel]>,
+{
+ /// Take out the sample buffer.
+ ///
+ /// Gives up the normalization invariants on the buffer format.
+ pub fn into_inner(self) -> FlatSamples<Buffer> {
+ self.inner
+ }
+
+ /// Get a reference on the sample buffer descriptor.
+ ///
+ /// There is no mutable counterpart as modifying the buffer format, including strides and
+ /// lengths, could invalidate the accessibility invariants of the `View`. It is not specified
+ /// if the inner buffer is the same as the buffer of the image from which this view was
+ /// created. It might have been truncated as an optimization.
+ pub fn flat(&self) -> &FlatSamples<Buffer> {
+ &self.inner
+ }
+
+ /// Get a reference on the inner buffer.
+ ///
+ /// There is no mutable counter part since it is not intended to allow you to reassign the
+ /// buffer or otherwise change its size or properties. However, its contents can be accessed
+ /// mutable through a slice with `image_mut_slice`.
+ pub fn samples(&self) -> &Buffer {
+ &self.inner.samples
+ }
+
+ /// Get the minimum length of a buffer such that all in-bounds samples have valid indices.
+ ///
+ /// See `FlatSamples::min_length`. This method will always succeed.
+ pub fn min_length(&self) -> usize {
+ self.inner.min_length().unwrap()
+ }
+
+ /// Get a reference to a selected subpixel.
+ ///
+ /// This method will return `None` when the sample is out-of-bounds. All errors that could
+ /// occur due to overflow have been eliminated while construction the `View`.
+ pub fn get_sample(&self, channel: u8, x: u32, y: u32) -> Option<&P::Subpixel>
+ where
+ Buffer: AsRef<[P::Subpixel]>,
+ {
+ if !self.inner.in_bounds(channel, x, y) {
+ return None;
+ }
+
+ let index = self.inner.in_bounds_index(channel, x, y);
+ // Should always be `Some(_)` but checking is more costly.
+ self.samples().as_ref().get(index)
+ }
+
+ /// Get a mutable reference to a selected sample.
+ ///
+ /// This method will return `None` when the sample is out-of-bounds. All errors that could
+ /// occur due to overflow have been eliminated while construction the `View`.
+ pub fn get_mut_sample(&mut self, channel: u8, x: u32, y: u32) -> Option<&mut P::Subpixel> {
+ if !self.inner.in_bounds(channel, x, y) {
+ return None;
+ }
+
+ let index = self.inner.in_bounds_index(channel, x, y);
+ // Should always be `Some(_)` but checking is more costly.
+ self.inner.samples.as_mut().get_mut(index)
+ }
+
+ /// Return the portion of the buffer that holds sample values.
+ ///
+ /// While this can not fail–the validity of all coordinates has been validated during the
+ /// conversion from `FlatSamples`–the resulting slice may still contain holes.
+ pub fn image_slice(&self) -> &[P::Subpixel]
+ where
+ Buffer: AsRef<[P::Subpixel]>,
+ {
+ &self.inner.samples.as_ref()[..self.min_length()]
+ }
+
+ /// Return the mutable buffer that holds sample values.
+ pub fn image_mut_slice(&mut self) -> &mut [P::Subpixel] {
+ let length = self.min_length();
+ &mut self.inner.samples.as_mut()[..length]
+ }
+
+ /// Shrink the inner image.
+ ///
+ /// The new dimensions will be the minimum of the previous dimensions. Since the set of
+ /// in-bounds pixels afterwards is a subset of the current ones, this is allowed on a `View`.
+ /// Note that you can not change the number of channels as an intrinsic property of `P`.
+ pub fn shrink_to(&mut self, width: u32, height: u32) {
+ let channels = self.inner.layout.channels;
+ self.inner.shrink_to(channels, width, height)
+ }
+}
+
+// The out-of-bounds panic for single sample access similar to `slice::index`.
+#[inline(never)]
+#[cold]
+fn panic_cwh_out_of_bounds(
+ (c, x, y): (u8, u32, u32),
+ bounds: (u8, u32, u32),
+ strides: (usize, usize, usize),
+) -> ! {
+ panic!(
+ "Sample coordinates {:?} out of sample matrix bounds {:?} with strides {:?}",
+ (c, x, y),
+ bounds,
+ strides
+ )
+}
+
+// The out-of-bounds panic for pixel access similar to `slice::index`.
+#[inline(never)]
+#[cold]
+fn panic_pixel_out_of_bounds((x, y): (u32, u32), bounds: (u32, u32)) -> ! {
+ panic!("Image index {:?} out of bounds {:?}", (x, y), bounds)
+}
+
+impl<Buffer> Index<(u8, u32, u32)> for FlatSamples<Buffer>
+where
+ Buffer: Index<usize>,
+{
+ type Output = Buffer::Output;
+
+ /// Return a reference to a single sample at specified coordinates.
+ ///
+ /// # Panics
+ ///
+ /// When the coordinates are out of bounds or the index calculation fails.
+ fn index(&self, (c, x, y): (u8, u32, u32)) -> &Self::Output {
+ let bounds = self.bounds();
+ let strides = self.strides_cwh();
+ let index = self
+ .index(c, x, y)
+ .unwrap_or_else(|| panic_cwh_out_of_bounds((c, x, y), bounds, strides));
+ &self.samples[index]
+ }
+}
+
+impl<Buffer> IndexMut<(u8, u32, u32)> for FlatSamples<Buffer>
+where
+ Buffer: IndexMut<usize>,
+{
+ /// Return a mutable reference to a single sample at specified coordinates.
+ ///
+ /// # Panics
+ ///
+ /// When the coordinates are out of bounds or the index calculation fails.
+ fn index_mut(&mut self, (c, x, y): (u8, u32, u32)) -> &mut Self::Output {
+ let bounds = self.bounds();
+ let strides = self.strides_cwh();
+ let index = self
+ .index(c, x, y)
+ .unwrap_or_else(|| panic_cwh_out_of_bounds((c, x, y), bounds, strides));
+ &mut self.samples[index]
+ }
+}
+
+impl<Buffer, P: Pixel> GenericImageView for View<Buffer, P>
+where
+ Buffer: AsRef<[P::Subpixel]>,
+{
+ type Pixel = P;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.inner.layout.width, self.inner.layout.height)
+ }
+
+ fn bounds(&self) -> (u32, u32, u32, u32) {
+ let (w, h) = self.dimensions();
+ (0, w, 0, h)
+ }
+
+ fn in_bounds(&self, x: u32, y: u32) -> bool {
+ let (w, h) = self.dimensions();
+ x < w && y < h
+ }
+
+ fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
+ if !self.inner.in_bounds(0, x, y) {
+ panic_pixel_out_of_bounds((x, y), self.dimensions())
+ }
+
+ let image = self.inner.samples.as_ref();
+ let base_index = self.inner.in_bounds_index(0, x, y);
+ let channels = P::CHANNEL_COUNT as usize;
+
+ let mut buffer = [Zero::zero(); 256];
+ buffer
+ .iter_mut()
+ .enumerate()
+ .take(channels)
+ .for_each(|(c, to)| {
+ let index = base_index + c * self.inner.layout.channel_stride;
+ *to = image[index];
+ });
+
+ *P::from_slice(&buffer[..channels])
+ }
+}
+
+impl<Buffer, P: Pixel> GenericImageView for ViewMut<Buffer, P>
+where
+ Buffer: AsMut<[P::Subpixel]> + AsRef<[P::Subpixel]>,
+{
+ type Pixel = P;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.inner.layout.width, self.inner.layout.height)
+ }
+
+ fn bounds(&self) -> (u32, u32, u32, u32) {
+ let (w, h) = self.dimensions();
+ (0, w, 0, h)
+ }
+
+ fn in_bounds(&self, x: u32, y: u32) -> bool {
+ let (w, h) = self.dimensions();
+ x < w && y < h
+ }
+
+ fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
+ if !self.inner.in_bounds(0, x, y) {
+ panic_pixel_out_of_bounds((x, y), self.dimensions())
+ }
+
+ let image = self.inner.samples.as_ref();
+ let base_index = self.inner.in_bounds_index(0, x, y);
+ let channels = P::CHANNEL_COUNT as usize;
+
+ let mut buffer = [Zero::zero(); 256];
+ buffer
+ .iter_mut()
+ .enumerate()
+ .take(channels)
+ .for_each(|(c, to)| {
+ let index = base_index + c * self.inner.layout.channel_stride;
+ *to = image[index];
+ });
+
+ *P::from_slice(&buffer[..channels])
+ }
+}
+
+impl<Buffer, P: Pixel> GenericImage for ViewMut<Buffer, P>
+where
+ Buffer: AsMut<[P::Subpixel]> + AsRef<[P::Subpixel]>,
+{
+ fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut Self::Pixel {
+ if !self.inner.in_bounds(0, x, y) {
+ panic_pixel_out_of_bounds((x, y), self.dimensions())
+ }
+
+ let base_index = self.inner.in_bounds_index(0, x, y);
+ let channel_count = <P as Pixel>::CHANNEL_COUNT as usize;
+ let pixel_range = base_index..base_index + channel_count;
+ P::from_slice_mut(&mut self.inner.samples.as_mut()[pixel_range])
+ }
+
+ #[allow(deprecated)]
+ fn put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
+ *self.get_pixel_mut(x, y) = pixel;
+ }
+
+ #[allow(deprecated)]
+ fn blend_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
+ self.get_pixel_mut(x, y).blend(&pixel);
+ }
+}
+
+impl From<Error> for ImageError {
+ fn from(error: Error) -> ImageError {
+ #[derive(Debug)]
+ struct NormalFormRequiredError(NormalForm);
+ impl fmt::Display for NormalFormRequiredError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Required sample buffer in normal form {:?}", self.0)
+ }
+ }
+ impl error::Error for NormalFormRequiredError {}
+
+ match error {
+ Error::TooLarge => ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )),
+ Error::NormalFormRequired(form) => ImageError::Decoding(DecodingError::new(
+ ImageFormatHint::Unknown,
+ NormalFormRequiredError(form),
+ )),
+ Error::ChannelCountMismatch(_lc, _pc) => ImageError::Parameter(
+ ParameterError::from_kind(ParameterErrorKind::DimensionMismatch),
+ ),
+ Error::WrongColor(color) => {
+ ImageError::Unsupported(UnsupportedError::from_format_and_kind(
+ ImageFormatHint::Unknown,
+ UnsupportedErrorKind::Color(color.into()),
+ ))
+ }
+ }
+ }
+}
+
+impl fmt::Display for Error {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ Error::TooLarge => write!(f, "The layout is too large"),
+ Error::NormalFormRequired(form) => write!(
+ f,
+ "The layout needs to {}",
+ match form {
+ NormalForm::ColumnMajorPacked => "be packed and in column major form",
+ NormalForm::ImagePacked => "be fully packed",
+ NormalForm::PixelPacked => "have packed pixels",
+ NormalForm::RowMajorPacked => "be packed and in row major form",
+ NormalForm::Unaliased => "not have any aliasing channels",
+ }
+ ),
+ Error::ChannelCountMismatch(layout_channels, pixel_channels) => write!(
+ f,
+ "The channel count of the chosen pixel (={}) does agree with the layout (={})",
+ pixel_channels, layout_channels
+ ),
+ Error::WrongColor(color) => write!(
+ f,
+ "The chosen color type does not match the hint {:?}",
+ color
+ ),
+ }
+ }
+}
+
+impl error::Error for Error {}
+
+impl PartialOrd for NormalForm {
+ /// Compares the logical preconditions.
+ ///
+ /// `a < b` if the normal form `a` has less preconditions than `b`.
+ fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+ match (*self, *other) {
+ (NormalForm::Unaliased, NormalForm::Unaliased) => Some(cmp::Ordering::Equal),
+ (NormalForm::PixelPacked, NormalForm::PixelPacked) => Some(cmp::Ordering::Equal),
+ (NormalForm::ImagePacked, NormalForm::ImagePacked) => Some(cmp::Ordering::Equal),
+ (NormalForm::RowMajorPacked, NormalForm::RowMajorPacked) => Some(cmp::Ordering::Equal),
+ (NormalForm::ColumnMajorPacked, NormalForm::ColumnMajorPacked) => {
+ Some(cmp::Ordering::Equal)
+ }
+
+ (NormalForm::Unaliased, _) => Some(cmp::Ordering::Less),
+ (_, NormalForm::Unaliased) => Some(cmp::Ordering::Greater),
+
+ (NormalForm::PixelPacked, NormalForm::ColumnMajorPacked) => Some(cmp::Ordering::Less),
+ (NormalForm::PixelPacked, NormalForm::RowMajorPacked) => Some(cmp::Ordering::Less),
+ (NormalForm::RowMajorPacked, NormalForm::PixelPacked) => Some(cmp::Ordering::Greater),
+ (NormalForm::ColumnMajorPacked, NormalForm::PixelPacked) => {
+ Some(cmp::Ordering::Greater)
+ }
+
+ (NormalForm::ImagePacked, NormalForm::ColumnMajorPacked) => Some(cmp::Ordering::Less),
+ (NormalForm::ImagePacked, NormalForm::RowMajorPacked) => Some(cmp::Ordering::Less),
+ (NormalForm::RowMajorPacked, NormalForm::ImagePacked) => Some(cmp::Ordering::Greater),
+ (NormalForm::ColumnMajorPacked, NormalForm::ImagePacked) => {
+ Some(cmp::Ordering::Greater)
+ }
+
+ (NormalForm::ImagePacked, NormalForm::PixelPacked) => None,
+ (NormalForm::PixelPacked, NormalForm::ImagePacked) => None,
+ (NormalForm::RowMajorPacked, NormalForm::ColumnMajorPacked) => None,
+ (NormalForm::ColumnMajorPacked, NormalForm::RowMajorPacked) => None,
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::buffer_::GrayAlphaImage;
+ use crate::color::{LumaA, Rgb};
+
+ #[test]
+ fn aliasing_view() {
+ let buffer = FlatSamples {
+ samples: &[42],
+ layout: SampleLayout {
+ channels: 3,
+ channel_stride: 0,
+ width: 100,
+ width_stride: 0,
+ height: 100,
+ height_stride: 0,
+ },
+ color_hint: None,
+ };
+
+ let view = buffer.as_view::<Rgb<u8>>().expect("This is a valid view");
+ let pixel_count = view
+ .pixels()
+ .inspect(|pixel| assert!(pixel.2 == Rgb([42, 42, 42])))
+ .count();
+ assert_eq!(pixel_count, 100 * 100);
+ }
+
+ #[test]
+ fn mutable_view() {
+ let mut buffer = FlatSamples {
+ samples: [0; 18],
+ layout: SampleLayout {
+ channels: 2,
+ channel_stride: 1,
+ width: 3,
+ width_stride: 2,
+ height: 3,
+ height_stride: 6,
+ },
+ color_hint: None,
+ };
+
+ {
+ let mut view = buffer
+ .as_view_mut::<LumaA<u16>>()
+ .expect("This should be a valid mutable buffer");
+ assert_eq!(view.dimensions(), (3, 3));
+ #[allow(deprecated)]
+ for i in 0..9 {
+ *view.get_pixel_mut(i % 3, i / 3) = LumaA([2 * i as u16, 2 * i as u16 + 1]);
+ }
+ }
+
+ buffer
+ .samples
+ .iter()
+ .enumerate()
+ .for_each(|(idx, sample)| assert_eq!(idx, *sample as usize));
+ }
+
+ #[test]
+ fn normal_forms() {
+ assert!(FlatSamples {
+ samples: [0u8; 0],
+ layout: SampleLayout {
+ channels: 2,
+ channel_stride: 1,
+ width: 3,
+ width_stride: 9,
+ height: 3,
+ height_stride: 28,
+ },
+ color_hint: None,
+ }
+ .is_normal(NormalForm::PixelPacked));
+
+ assert!(FlatSamples {
+ samples: [0u8; 0],
+ layout: SampleLayout {
+ channels: 2,
+ channel_stride: 8,
+ width: 4,
+ width_stride: 1,
+ height: 2,
+ height_stride: 4,
+ },
+ color_hint: None,
+ }
+ .is_normal(NormalForm::ImagePacked));
+
+ assert!(FlatSamples {
+ samples: [0u8; 0],
+ layout: SampleLayout {
+ channels: 2,
+ channel_stride: 1,
+ width: 4,
+ width_stride: 2,
+ height: 2,
+ height_stride: 8,
+ },
+ color_hint: None,
+ }
+ .is_normal(NormalForm::RowMajorPacked));
+
+ assert!(FlatSamples {
+ samples: [0u8; 0],
+ layout: SampleLayout {
+ channels: 2,
+ channel_stride: 1,
+ width: 4,
+ width_stride: 4,
+ height: 2,
+ height_stride: 2,
+ },
+ color_hint: None,
+ }
+ .is_normal(NormalForm::ColumnMajorPacked));
+ }
+
+ #[test]
+ fn image_buffer_conversion() {
+ let expected_layout = SampleLayout {
+ channels: 2,
+ channel_stride: 1,
+ width: 4,
+ width_stride: 2,
+ height: 2,
+ height_stride: 8,
+ };
+
+ let initial = GrayAlphaImage::new(expected_layout.width, expected_layout.height);
+ let buffer = initial.into_flat_samples();
+
+ assert_eq!(buffer.layout, expected_layout);
+
+ let _: GrayAlphaImage = buffer.try_into_buffer().unwrap_or_else(|(error, _)| {
+ panic!("Expected buffer to be convertible but {:?}", error)
+ });
+ }
+}
diff --git a/vendor/image/src/image.rs b/vendor/image/src/image.rs
new file mode 100644
index 0000000..d131b98
--- /dev/null
+++ b/vendor/image/src/image.rs
@@ -0,0 +1,1915 @@
+#![allow(clippy::too_many_arguments)]
+use std::convert::TryFrom;
+use std::ffi::OsStr;
+use std::io;
+use std::io::Read;
+use std::ops::{Deref, DerefMut};
+use std::path::Path;
+use std::usize;
+
+use crate::color::{ColorType, ExtendedColorType};
+use crate::error::{
+ ImageError, ImageFormatHint, ImageResult, LimitError, LimitErrorKind, ParameterError,
+ ParameterErrorKind,
+};
+use crate::math::Rect;
+use crate::traits::Pixel;
+use crate::ImageBuffer;
+
+use crate::animation::Frames;
+
+#[cfg(feature = "pnm")]
+use crate::codecs::pnm::PnmSubtype;
+
+/// An enumeration of supported image formats.
+/// Not all formats support both encoding and decoding.
+#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
+#[non_exhaustive]
+pub enum ImageFormat {
+ /// An Image in PNG Format
+ Png,
+
+ /// An Image in JPEG Format
+ Jpeg,
+
+ /// An Image in GIF Format
+ Gif,
+
+ /// An Image in WEBP Format
+ WebP,
+
+ /// An Image in general PNM Format
+ Pnm,
+
+ /// An Image in TIFF Format
+ Tiff,
+
+ /// An Image in TGA Format
+ Tga,
+
+ /// An Image in DDS Format
+ Dds,
+
+ /// An Image in BMP Format
+ Bmp,
+
+ /// An Image in ICO Format
+ Ico,
+
+ /// An Image in Radiance HDR Format
+ Hdr,
+
+ /// An Image in OpenEXR Format
+ OpenExr,
+
+ /// An Image in farbfeld Format
+ Farbfeld,
+
+ /// An Image in AVIF format.
+ Avif,
+
+ /// An Image in QOI format.
+ Qoi,
+}
+
+impl ImageFormat {
+ /// Return the image format specified by a path's file extension.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use image::ImageFormat;
+ ///
+ /// let format = ImageFormat::from_extension("jpg");
+ /// assert_eq!(format, Some(ImageFormat::Jpeg));
+ /// ```
+ #[inline]
+ pub fn from_extension<S>(ext: S) -> Option<Self>
+ where
+ S: AsRef<OsStr>,
+ {
+ // thin wrapper function to strip generics
+ fn inner(ext: &OsStr) -> Option<ImageFormat> {
+ let ext = ext.to_str()?.to_ascii_lowercase();
+
+ Some(match ext.as_str() {
+ "avif" => ImageFormat::Avif,
+ "jpg" | "jpeg" => ImageFormat::Jpeg,
+ "png" => ImageFormat::Png,
+ "gif" => ImageFormat::Gif,
+ "webp" => ImageFormat::WebP,
+ "tif" | "tiff" => ImageFormat::Tiff,
+ "tga" => ImageFormat::Tga,
+ "dds" => ImageFormat::Dds,
+ "bmp" => ImageFormat::Bmp,
+ "ico" => ImageFormat::Ico,
+ "hdr" => ImageFormat::Hdr,
+ "exr" => ImageFormat::OpenExr,
+ "pbm" | "pam" | "ppm" | "pgm" => ImageFormat::Pnm,
+ "ff" | "farbfeld" => ImageFormat::Farbfeld,
+ "qoi" => ImageFormat::Qoi,
+ _ => return None,
+ })
+ }
+
+ inner(ext.as_ref())
+ }
+
+ /// Return the image format specified by the path's file extension.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use image::ImageFormat;
+ ///
+ /// let format = ImageFormat::from_path("images/ferris.png")?;
+ /// assert_eq!(format, ImageFormat::Png);
+ ///
+ /// # Ok::<(), image::error::ImageError>(())
+ /// ```
+ #[inline]
+ pub fn from_path<P>(path: P) -> ImageResult<Self>
+ where
+ P: AsRef<Path>,
+ {
+ // thin wrapper function to strip generics
+ fn inner(path: &Path) -> ImageResult<ImageFormat> {
+ let exact_ext = path.extension();
+ exact_ext
+ .and_then(ImageFormat::from_extension)
+ .ok_or_else(|| {
+ let format_hint = match exact_ext {
+ None => ImageFormatHint::Unknown,
+ Some(os) => ImageFormatHint::PathExtension(os.into()),
+ };
+ ImageError::Unsupported(format_hint.into())
+ })
+ }
+
+ inner(path.as_ref())
+ }
+
+ /// Return the image format specified by a MIME type.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use image::ImageFormat;
+ ///
+ /// let format = ImageFormat::from_mime_type("image/png").unwrap();
+ /// assert_eq!(format, ImageFormat::Png);
+ /// ```
+ pub fn from_mime_type<M>(mime_type: M) -> Option<Self>
+ where
+ M: AsRef<str>,
+ {
+ match mime_type.as_ref() {
+ "image/avif" => Some(ImageFormat::Avif),
+ "image/jpeg" => Some(ImageFormat::Jpeg),
+ "image/png" => Some(ImageFormat::Png),
+ "image/gif" => Some(ImageFormat::Gif),
+ "image/webp" => Some(ImageFormat::WebP),
+ "image/tiff" => Some(ImageFormat::Tiff),
+ "image/x-targa" | "image/x-tga" => Some(ImageFormat::Tga),
+ "image/vnd-ms.dds" => Some(ImageFormat::Dds),
+ "image/bmp" => Some(ImageFormat::Bmp),
+ "image/x-icon" => Some(ImageFormat::Ico),
+ "image/vnd.radiance" => Some(ImageFormat::Hdr),
+ "image/x-exr" => Some(ImageFormat::OpenExr),
+ "image/x-portable-bitmap"
+ | "image/x-portable-graymap"
+ | "image/x-portable-pixmap"
+ | "image/x-portable-anymap" => Some(ImageFormat::Pnm),
+ // Qoi's MIME type is being worked on.
+ // See: https://github.com/phoboslab/qoi/issues/167
+ "image/x-qoi" => Some(ImageFormat::Qoi),
+ _ => None,
+ }
+ }
+
+ /// Return the MIME type for this image format or "application/octet-stream" if no MIME type
+ /// exists for the format.
+ ///
+ /// Some notes on a few of the MIME types:
+ ///
+ /// - The portable anymap format has a separate MIME type for the pixmap, graymap and bitmap
+ /// formats, but this method returns the general "image/x-portable-anymap" MIME type.
+ /// - The Targa format has two common MIME types, "image/x-targa" and "image/x-tga"; this
+ /// method returns "image/x-targa" for that format.
+ /// - The QOI MIME type is still a work in progress. This method returns "image/x-qoi" for
+ /// that format.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use image::ImageFormat;
+ ///
+ /// let mime_type = ImageFormat::Png.to_mime_type();
+ /// assert_eq!(mime_type, "image/png");
+ /// ```
+ pub fn to_mime_type(&self) -> &'static str {
+ match self {
+ ImageFormat::Avif => "image/avif",
+ ImageFormat::Jpeg => "image/jpeg",
+ ImageFormat::Png => "image/png",
+ ImageFormat::Gif => "image/gif",
+ ImageFormat::WebP => "image/webp",
+ ImageFormat::Tiff => "image/tiff",
+ // the targa MIME type has two options, but this one seems to be used more
+ ImageFormat::Tga => "image/x-targa",
+ ImageFormat::Dds => "image/vnd-ms.dds",
+ ImageFormat::Bmp => "image/bmp",
+ ImageFormat::Ico => "image/x-icon",
+ ImageFormat::Hdr => "image/vnd.radiance",
+ ImageFormat::OpenExr => "image/x-exr",
+ // return the most general MIME type
+ ImageFormat::Pnm => "image/x-portable-anymap",
+ // Qoi's MIME type is being worked on.
+ // See: https://github.com/phoboslab/qoi/issues/167
+ ImageFormat::Qoi => "image/x-qoi",
+ // farbfield's MIME type taken from https://www.wikidata.org/wiki/Q28206109
+ ImageFormat::Farbfeld => "application/octet-stream",
+ }
+ }
+
+ /// Return if the ImageFormat can be decoded by the lib.
+ #[inline]
+ pub fn can_read(&self) -> bool {
+ // Needs to be updated once a new variant's decoder is added to free_functions.rs::load
+ match self {
+ ImageFormat::Png => true,
+ ImageFormat::Gif => true,
+ ImageFormat::Jpeg => true,
+ ImageFormat::WebP => true,
+ ImageFormat::Tiff => true,
+ ImageFormat::Tga => true,
+ ImageFormat::Dds => false,
+ ImageFormat::Bmp => true,
+ ImageFormat::Ico => true,
+ ImageFormat::Hdr => true,
+ ImageFormat::OpenExr => true,
+ ImageFormat::Pnm => true,
+ ImageFormat::Farbfeld => true,
+ ImageFormat::Avif => true,
+ ImageFormat::Qoi => true,
+ }
+ }
+
+ /// Return if the ImageFormat can be encoded by the lib.
+ #[inline]
+ pub fn can_write(&self) -> bool {
+ // Needs to be updated once a new variant's encoder is added to free_functions.rs::save_buffer_with_format_impl
+ match self {
+ ImageFormat::Gif => true,
+ ImageFormat::Ico => true,
+ ImageFormat::Jpeg => true,
+ ImageFormat::Png => true,
+ ImageFormat::Bmp => true,
+ ImageFormat::Tiff => true,
+ ImageFormat::Tga => true,
+ ImageFormat::Pnm => true,
+ ImageFormat::Farbfeld => true,
+ ImageFormat::Avif => true,
+ ImageFormat::WebP => true,
+ ImageFormat::Hdr => false,
+ ImageFormat::OpenExr => true,
+ ImageFormat::Dds => false,
+ ImageFormat::Qoi => true,
+ }
+ }
+
+ /// Return a list of applicable extensions for this format.
+ ///
+ /// All currently recognized image formats specify at least on extension but for future
+ /// compatibility you should not rely on this fact. The list may be empty if the format has no
+ /// recognized file representation, for example in case it is used as a purely transient memory
+ /// format.
+ ///
+ /// The method name `extensions` remains reserved for introducing another method in the future
+ /// that yields a slice of `OsStr` which is blocked by several features of const evaluation.
+ pub fn extensions_str(self) -> &'static [&'static str] {
+ match self {
+ ImageFormat::Png => &["png"],
+ ImageFormat::Jpeg => &["jpg", "jpeg"],
+ ImageFormat::Gif => &["gif"],
+ ImageFormat::WebP => &["webp"],
+ ImageFormat::Pnm => &["pbm", "pam", "ppm", "pgm"],
+ ImageFormat::Tiff => &["tiff", "tif"],
+ ImageFormat::Tga => &["tga"],
+ ImageFormat::Dds => &["dds"],
+ ImageFormat::Bmp => &["bmp"],
+ ImageFormat::Ico => &["ico"],
+ ImageFormat::Hdr => &["hdr"],
+ ImageFormat::OpenExr => &["exr"],
+ ImageFormat::Farbfeld => &["ff"],
+ // According to: https://aomediacodec.github.io/av1-avif/#mime-registration
+ ImageFormat::Avif => &["avif"],
+ ImageFormat::Qoi => &["qoi"],
+ }
+ }
+}
+
+/// An enumeration of supported image formats for encoding.
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[non_exhaustive]
+pub enum ImageOutputFormat {
+ #[cfg(feature = "png")]
+ /// An Image in PNG Format
+ Png,
+
+ #[cfg(feature = "jpeg")]
+ /// An Image in JPEG Format with specified quality, up to 100
+ Jpeg(u8),
+
+ #[cfg(feature = "pnm")]
+ /// An Image in one of the PNM Formats
+ Pnm(PnmSubtype),
+
+ #[cfg(feature = "gif")]
+ /// An Image in GIF Format
+ Gif,
+
+ #[cfg(feature = "ico")]
+ /// An Image in ICO Format
+ Ico,
+
+ #[cfg(feature = "bmp")]
+ /// An Image in BMP Format
+ Bmp,
+
+ #[cfg(feature = "farbfeld")]
+ /// An Image in farbfeld Format
+ Farbfeld,
+
+ #[cfg(feature = "tga")]
+ /// An Image in TGA Format
+ Tga,
+
+ #[cfg(feature = "exr")]
+ /// An Image in OpenEXR Format
+ OpenExr,
+
+ #[cfg(feature = "tiff")]
+ /// An Image in TIFF Format
+ Tiff,
+
+ #[cfg(feature = "avif-encoder")]
+ /// An image in AVIF Format
+ Avif,
+
+ #[cfg(feature = "qoi")]
+ /// An image in QOI Format
+ Qoi,
+
+ #[cfg(feature = "webp-encoder")]
+ /// An image in WebP Format.
+ WebP,
+
+ /// A value for signalling an error: An unsupported format was requested
+ // Note: When TryFrom is stabilized, this value should not be needed, and
+ // a TryInto<ImageOutputFormat> should be used instead of an Into<ImageOutputFormat>.
+ Unsupported(String),
+}
+
+impl From<ImageFormat> for ImageOutputFormat {
+ fn from(fmt: ImageFormat) -> Self {
+ match fmt {
+ #[cfg(feature = "png")]
+ ImageFormat::Png => ImageOutputFormat::Png,
+ #[cfg(feature = "jpeg")]
+ ImageFormat::Jpeg => ImageOutputFormat::Jpeg(75),
+ #[cfg(feature = "pnm")]
+ ImageFormat::Pnm => ImageOutputFormat::Pnm(PnmSubtype::ArbitraryMap),
+ #[cfg(feature = "gif")]
+ ImageFormat::Gif => ImageOutputFormat::Gif,
+ #[cfg(feature = "ico")]
+ ImageFormat::Ico => ImageOutputFormat::Ico,
+ #[cfg(feature = "bmp")]
+ ImageFormat::Bmp => ImageOutputFormat::Bmp,
+ #[cfg(feature = "farbfeld")]
+ ImageFormat::Farbfeld => ImageOutputFormat::Farbfeld,
+ #[cfg(feature = "tga")]
+ ImageFormat::Tga => ImageOutputFormat::Tga,
+ #[cfg(feature = "exr")]
+ ImageFormat::OpenExr => ImageOutputFormat::OpenExr,
+ #[cfg(feature = "tiff")]
+ ImageFormat::Tiff => ImageOutputFormat::Tiff,
+
+ #[cfg(feature = "avif-encoder")]
+ ImageFormat::Avif => ImageOutputFormat::Avif,
+ #[cfg(feature = "webp-encoder")]
+ ImageFormat::WebP => ImageOutputFormat::WebP,
+
+ #[cfg(feature = "qoi")]
+ ImageFormat::Qoi => ImageOutputFormat::Qoi,
+
+ f => ImageOutputFormat::Unsupported(format!("{:?}", f)),
+ }
+ }
+}
+
+// This struct manages buffering associated with implementing `Read` and `Seek` on decoders that can
+// must decode ranges of bytes at a time.
+#[allow(dead_code)]
+// When no image formats that use it are enabled
+pub(crate) struct ImageReadBuffer {
+ scanline_bytes: usize,
+ buffer: Vec<u8>,
+ consumed: usize,
+
+ total_bytes: u64,
+ offset: u64,
+}
+impl ImageReadBuffer {
+ /// Create a new ImageReadBuffer.
+ ///
+ /// Panics if scanline_bytes doesn't fit into a usize, because that would mean reading anything
+ /// from the image would take more RAM than the entire virtual address space. In other words,
+ /// actually using this struct would instantly OOM so just get it out of the way now.
+ #[allow(dead_code)]
+ // When no image formats that use it are enabled
+ pub(crate) fn new(scanline_bytes: u64, total_bytes: u64) -> Self {
+ Self {
+ scanline_bytes: usize::try_from(scanline_bytes).unwrap(),
+ buffer: Vec::new(),
+ consumed: 0,
+ total_bytes,
+ offset: 0,
+ }
+ }
+
+ #[allow(dead_code)]
+ // When no image formats that use it are enabled
+ pub(crate) fn read<F>(&mut self, buf: &mut [u8], mut read_scanline: F) -> io::Result<usize>
+ where
+ F: FnMut(&mut [u8]) -> io::Result<usize>,
+ {
+ if self.buffer.len() == self.consumed {
+ if self.offset == self.total_bytes {
+ return Ok(0);
+ } else if buf.len() >= self.scanline_bytes {
+ // If there is nothing buffered and the user requested a full scanline worth of
+ // data, skip buffering.
+ let bytes_read = read_scanline(&mut buf[..self.scanline_bytes])?;
+ self.offset += u64::try_from(bytes_read).unwrap();
+ return Ok(bytes_read);
+ } else {
+ // Lazily allocate buffer the first time that read is called with a buffer smaller
+ // than the scanline size.
+ if self.buffer.is_empty() {
+ self.buffer.resize(self.scanline_bytes, 0);
+ }
+
+ self.consumed = 0;
+ let bytes_read = read_scanline(&mut self.buffer[..])?;
+ self.buffer.resize(bytes_read, 0);
+ self.offset += u64::try_from(bytes_read).unwrap();
+
+ assert!(bytes_read == self.scanline_bytes || self.offset == self.total_bytes);
+ }
+ }
+
+ // Finally, copy bytes into output buffer.
+ let bytes_buffered = self.buffer.len() - self.consumed;
+ if bytes_buffered > buf.len() {
+ buf.copy_from_slice(&self.buffer[self.consumed..][..buf.len()]);
+ self.consumed += buf.len();
+ Ok(buf.len())
+ } else {
+ buf[..bytes_buffered].copy_from_slice(&self.buffer[self.consumed..][..bytes_buffered]);
+ self.consumed = self.buffer.len();
+ Ok(bytes_buffered)
+ }
+ }
+}
+
+/// Decodes a specific region of the image, represented by the rectangle
+/// starting from ```x``` and ```y``` and having ```length``` and ```width```
+#[allow(dead_code)]
+// When no image formats that use it are enabled
+pub(crate) fn load_rect<'a, D, F, F1, F2, E>(
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ buf: &mut [u8],
+ progress_callback: F,
+ decoder: &mut D,
+ mut seek_scanline: F1,
+ mut read_scanline: F2,
+) -> ImageResult<()>
+where
+ D: ImageDecoder<'a>,
+ F: Fn(Progress),
+ F1: FnMut(&mut D, u64) -> io::Result<()>,
+ F2: FnMut(&mut D, &mut [u8]) -> Result<(), E>,
+ ImageError: From<E>,
+{
+ let (x, y, width, height) = (
+ u64::from(x),
+ u64::from(y),
+ u64::from(width),
+ u64::from(height),
+ );
+ let dimensions = decoder.dimensions();
+ let bytes_per_pixel = u64::from(decoder.color_type().bytes_per_pixel());
+ let row_bytes = bytes_per_pixel * u64::from(dimensions.0);
+ let scanline_bytes = decoder.scanline_bytes();
+ let total_bytes = width * height * bytes_per_pixel;
+
+ if buf.len() < usize::try_from(total_bytes).unwrap_or(usize::max_value()) {
+ panic!(
+ "output buffer too short\n expected `{}`, provided `{}`",
+ total_bytes,
+ buf.len()
+ );
+ }
+
+ let mut bytes_read = 0u64;
+ let mut current_scanline = 0;
+ let mut tmp = Vec::new();
+ let mut tmp_scanline = None;
+
+ {
+ // Read a range of the image starting from byte number `start` and continuing until byte
+ // number `end`. Updates `current_scanline` and `bytes_read` appropriately.
+ let mut read_image_range = |mut start: u64, end: u64| -> ImageResult<()> {
+ // If the first scanline we need is already stored in the temporary buffer, then handle
+ // it first.
+ let target_scanline = start / scanline_bytes;
+ if tmp_scanline == Some(target_scanline) {
+ let position = target_scanline * scanline_bytes;
+ let offset = start.saturating_sub(position);
+ let len = (end - start)
+ .min(scanline_bytes - offset)
+ .min(end - position);
+
+ buf[(bytes_read as usize)..][..len as usize]
+ .copy_from_slice(&tmp[offset as usize..][..len as usize]);
+ bytes_read += len;
+ start += len;
+
+ progress_callback(Progress {
+ current: bytes_read,
+ total: total_bytes,
+ });
+
+ if start == end {
+ return Ok(());
+ }
+ }
+
+ let target_scanline = start / scanline_bytes;
+ if target_scanline != current_scanline {
+ seek_scanline(decoder, target_scanline)?;
+ current_scanline = target_scanline;
+ }
+
+ let mut position = current_scanline * scanline_bytes;
+ while position < end {
+ if position >= start && end - position >= scanline_bytes {
+ read_scanline(
+ decoder,
+ &mut buf[(bytes_read as usize)..][..(scanline_bytes as usize)],
+ )?;
+ bytes_read += scanline_bytes;
+ } else {
+ tmp.resize(scanline_bytes as usize, 0u8);
+ read_scanline(decoder, &mut tmp)?;
+ tmp_scanline = Some(current_scanline);
+
+ let offset = start.saturating_sub(position);
+ let len = (end - start)
+ .min(scanline_bytes - offset)
+ .min(end - position);
+
+ buf[(bytes_read as usize)..][..len as usize]
+ .copy_from_slice(&tmp[offset as usize..][..len as usize]);
+ bytes_read += len;
+ }
+
+ current_scanline += 1;
+ position += scanline_bytes;
+ progress_callback(Progress {
+ current: bytes_read,
+ total: total_bytes,
+ });
+ }
+ Ok(())
+ };
+
+ if x + width > u64::from(dimensions.0)
+ || y + height > u64::from(dimensions.1)
+ || width == 0
+ || height == 0
+ {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+ if scanline_bytes > usize::max_value() as u64 {
+ return Err(ImageError::Limits(LimitError::from_kind(
+ LimitErrorKind::InsufficientMemory,
+ )));
+ }
+
+ progress_callback(Progress {
+ current: 0,
+ total: total_bytes,
+ });
+ if x == 0 && width == u64::from(dimensions.0) {
+ let start = x * bytes_per_pixel + y * row_bytes;
+ let end = (x + width) * bytes_per_pixel + (y + height - 1) * row_bytes;
+ read_image_range(start, end)?;
+ } else {
+ for row in y..(y + height) {
+ let start = x * bytes_per_pixel + row * row_bytes;
+ let end = (x + width) * bytes_per_pixel + row * row_bytes;
+ read_image_range(start, end)?;
+ }
+ }
+ }
+
+ // Seek back to the start
+ Ok(seek_scanline(decoder, 0)?)
+}
+
+/// Reads all of the bytes of a decoder into a Vec<T>. No particular alignment
+/// of the output buffer is guaranteed.
+///
+/// Panics if there isn't enough memory to decode the image.
+pub(crate) fn decoder_to_vec<'a, T>(decoder: impl ImageDecoder<'a>) -> ImageResult<Vec<T>>
+where
+ T: crate::traits::Primitive + bytemuck::Pod,
+{
+ let total_bytes = usize::try_from(decoder.total_bytes());
+ if total_bytes.is_err() || total_bytes.unwrap() > isize::max_value() as usize {
+ return Err(ImageError::Limits(LimitError::from_kind(
+ LimitErrorKind::InsufficientMemory,
+ )));
+ }
+
+ let mut buf = vec![num_traits::Zero::zero(); total_bytes.unwrap() / std::mem::size_of::<T>()];
+ decoder.read_image(bytemuck::cast_slice_mut(buf.as_mut_slice()))?;
+ Ok(buf)
+}
+
+/// Represents the progress of an image operation.
+///
+/// Note that this is not necessarily accurate and no change to the values passed to the progress
+/// function during decoding will be considered breaking. A decoder could in theory report the
+/// progress `(0, 0)` if progress is unknown, without violating the interface contract of the type.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub struct Progress {
+ current: u64,
+ total: u64,
+}
+
+impl Progress {
+ /// Create Progress. Result in invalid progress if you provide a greater `current` than `total`.
+ pub(crate) fn new(current: u64, total: u64) -> Self {
+ Self { current, total }
+ }
+
+ /// A measure of completed decoding.
+ pub fn current(self) -> u64 {
+ self.current
+ }
+
+ /// A measure of all necessary decoding work.
+ ///
+ /// This is in general greater or equal than `current`.
+ pub fn total(self) -> u64 {
+ self.total
+ }
+
+ /// Calculate a measure for remaining decoding work.
+ pub fn remaining(self) -> u64 {
+ self.total.max(self.current) - self.current
+ }
+}
+
+/// The trait that all decoders implement
+pub trait ImageDecoder<'a>: Sized {
+ /// The type of reader produced by `into_reader`.
+ type Reader: Read + 'a;
+
+ /// Returns a tuple containing the width and height of the image
+ fn dimensions(&self) -> (u32, u32);
+
+ /// Returns the color type of the image data produced by this decoder
+ fn color_type(&self) -> ColorType;
+
+ /// Returns the color type of the image file before decoding
+ fn original_color_type(&self) -> ExtendedColorType {
+ self.color_type().into()
+ }
+
+ /// Returns the ICC color profile embedded in the image
+ ///
+ /// For formats that don't support embedded profiles this function will always return `None`.
+ /// This feature is currently only supported for the JPEG, PNG, and AVIF formats.
+ fn icc_profile(&mut self) -> Option<Vec<u8>> {
+ None
+ }
+
+ /// Returns a reader that can be used to obtain the bytes of the image. For the best
+ /// performance, always try to read at least `scanline_bytes` from the reader at a time. Reading
+ /// fewer bytes will cause the reader to perform internal buffering.
+ fn into_reader(self) -> ImageResult<Self::Reader>;
+
+ /// Returns the total number of bytes in the decoded image.
+ ///
+ /// This is the size of the buffer that must be passed to `read_image` or
+ /// `read_image_with_progress`. The returned value may exceed usize::MAX, in
+ /// which case it isn't actually possible to construct a buffer to decode all the image data
+ /// into. If, however, the size does not fit in a u64 then u64::MAX is returned.
+ fn total_bytes(&self) -> u64 {
+ let dimensions = self.dimensions();
+ let total_pixels = u64::from(dimensions.0) * u64::from(dimensions.1);
+ let bytes_per_pixel = u64::from(self.color_type().bytes_per_pixel());
+ total_pixels.saturating_mul(bytes_per_pixel)
+ }
+
+ /// Returns the minimum number of bytes that can be efficiently read from this decoder. This may
+ /// be as few as 1 or as many as `total_bytes()`.
+ fn scanline_bytes(&self) -> u64 {
+ self.total_bytes()
+ }
+
+ /// Returns all the bytes in the image.
+ ///
+ /// This function takes a slice of bytes and writes the pixel data of the image into it.
+ /// Although not required, for certain color types callers may want to pass buffers which are
+ /// aligned to 2 or 4 byte boundaries to the slice can be cast to a [u16] or [u32]. To accommodate
+ /// such casts, the returned contents will always be in native endian.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if buf.len() != self.total_bytes().
+ ///
+ /// # Examples
+ ///
+ /// ```no_build
+ /// use zerocopy::{AsBytes, FromBytes};
+ /// fn read_16bit_image(decoder: impl ImageDecoder) -> Vec<16> {
+ /// let mut buf: Vec<u16> = vec![0; decoder.total_bytes()/2];
+ /// decoder.read_image(buf.as_bytes());
+ /// buf
+ /// }
+ /// ```
+ fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
+ self.read_image_with_progress(buf, |_| {})
+ }
+
+ /// Same as `read_image` but periodically calls the provided callback to give updates on loading
+ /// progress.
+ fn read_image_with_progress<F: Fn(Progress)>(
+ self,
+ buf: &mut [u8],
+ progress_callback: F,
+ ) -> ImageResult<()> {
+ assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
+
+ let total_bytes = self.total_bytes() as usize;
+ let scanline_bytes = self.scanline_bytes() as usize;
+ let target_read_size = if scanline_bytes < 4096 {
+ (4096 / scanline_bytes) * scanline_bytes
+ } else {
+ scanline_bytes
+ };
+
+ let mut reader = self.into_reader()?;
+
+ let mut bytes_read = 0;
+ while bytes_read < total_bytes {
+ let read_size = target_read_size.min(total_bytes - bytes_read);
+ reader.read_exact(&mut buf[bytes_read..][..read_size])?;
+ bytes_read += read_size;
+
+ progress_callback(Progress {
+ current: bytes_read as u64,
+ total: total_bytes as u64,
+ });
+ }
+
+ Ok(())
+ }
+
+ /// Set decoding limits for this decoder. See [`Limits`] for the different kinds of
+ /// limits that is possible to set.
+ ///
+ /// Note to implementors: make sure you call [`Limits::check_support`] so that
+ /// decoding fails if any unsupported strict limits are set. Also make sure
+ /// you call [`Limits::check_dimensions`] to check the `max_image_width` and
+ /// `max_image_height` limits.
+ ///
+ /// [`Limits`]: ./io/struct.Limits.html
+ /// [`Limits::check_support`]: ./io/struct.Limits.html#method.check_support
+ /// [`Limits::check_dimensions`]: ./io/struct.Limits.html#method.check_dimensions
+ fn set_limits(&mut self, limits: crate::io::Limits) -> ImageResult<()> {
+ limits.check_support(&crate::io::LimitSupport::default())?;
+
+ let (width, height) = self.dimensions();
+ limits.check_dimensions(width, height)?;
+
+ Ok(())
+ }
+}
+
+/// Specialized image decoding not be supported by all formats
+pub trait ImageDecoderRect<'a>: ImageDecoder<'a> + Sized {
+ /// Decode a rectangular section of the image; see [`read_rect_with_progress()`](#fn.read_rect_with_progress).
+ fn read_rect(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ buf: &mut [u8],
+ ) -> ImageResult<()> {
+ self.read_rect_with_progress(x, y, width, height, buf, |_| {})
+ }
+
+ /// Decode a rectangular section of the image, periodically reporting progress.
+ ///
+ /// The output buffer will be filled with fields specified by
+ /// [`ImageDecoder::color_type()`](trait.ImageDecoder.html#fn.color_type),
+ /// in that order, each field represented in native-endian.
+ ///
+ /// The progress callback will be called at least once at the start and the end of decoding,
+ /// implementations are encouraged to call this more often,
+ /// with a frequency meaningful for display to the end-user.
+ ///
+ /// This function will panic if the output buffer isn't at least
+ /// `color_type().bytes_per_pixel() * color_type().channel_count() * width * height` bytes long.
+ fn read_rect_with_progress<F: Fn(Progress)>(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ buf: &mut [u8],
+ progress_callback: F,
+ ) -> ImageResult<()>;
+}
+
+/// AnimationDecoder trait
+pub trait AnimationDecoder<'a> {
+ /// Consume the decoder producing a series of frames.
+ fn into_frames(self) -> Frames<'a>;
+}
+
+/// The trait all encoders implement
+pub trait ImageEncoder {
+ /// Writes all the bytes in an image to the encoder.
+ ///
+ /// This function takes a slice of bytes of the pixel data of the image
+ /// and encodes them. Unlike particular format encoders inherent impl encode
+ /// methods where endianness is not specified, here image data bytes should
+ /// always be in native endian. The implementor will reorder the endianness
+ /// as necessary for the target encoding format.
+ ///
+ /// See also `ImageDecoder::read_image` which reads byte buffers into
+ /// native endian.
+ fn write_image(
+ self,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color_type: ColorType,
+ ) -> ImageResult<()>;
+}
+
+/// Immutable pixel iterator
+#[derive(Debug)]
+pub struct Pixels<'a, I: ?Sized + 'a> {
+ image: &'a I,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+}
+
+impl<'a, I: GenericImageView> Iterator for Pixels<'a, I> {
+ type Item = (u32, u32, I::Pixel);
+
+ fn next(&mut self) -> Option<(u32, u32, I::Pixel)> {
+ if self.x >= self.width {
+ self.x = 0;
+ self.y += 1;
+ }
+
+ if self.y >= self.height {
+ None
+ } else {
+ let pixel = self.image.get_pixel(self.x, self.y);
+ let p = (self.x, self.y, pixel);
+
+ self.x += 1;
+
+ Some(p)
+ }
+ }
+}
+
+impl<I: ?Sized> Clone for Pixels<'_, I> {
+ fn clone(&self) -> Self {
+ Pixels { ..*self }
+ }
+}
+
+/// Trait to inspect an image.
+///
+/// ```
+/// use image::{GenericImageView, Rgb, RgbImage};
+///
+/// let buffer = RgbImage::new(10, 10);
+/// let image: &dyn GenericImageView<Pixel=Rgb<u8>> = &buffer;
+/// ```
+pub trait GenericImageView {
+ /// The type of pixel.
+ type Pixel: Pixel;
+
+ /// The width and height of this image.
+ fn dimensions(&self) -> (u32, u32);
+
+ /// The width of this image.
+ fn width(&self) -> u32 {
+ let (w, _) = self.dimensions();
+ w
+ }
+
+ /// The height of this image.
+ fn height(&self) -> u32 {
+ let (_, h) = self.dimensions();
+ h
+ }
+
+ /// The bounding rectangle of this image.
+ fn bounds(&self) -> (u32, u32, u32, u32);
+
+ /// Returns true if this x, y coordinate is contained inside the image.
+ fn in_bounds(&self, x: u32, y: u32) -> bool {
+ let (ix, iy, iw, ih) = self.bounds();
+ x >= ix && x < ix + iw && y >= iy && y < iy + ih
+ }
+
+ /// Returns the pixel located at (x, y). Indexed from top left.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `(x, y)` is out of bounds.
+ fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel;
+
+ /// Returns the pixel located at (x, y). Indexed from top left.
+ ///
+ /// This function can be implemented in a way that ignores bounds checking.
+ /// # Safety
+ ///
+ /// The coordinates must be [`in_bounds`] of the image.
+ ///
+ /// [`in_bounds`]: #method.in_bounds
+ unsafe fn unsafe_get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
+ self.get_pixel(x, y)
+ }
+
+ /// Returns an Iterator over the pixels of this image.
+ /// The iterator yields the coordinates of each pixel
+ /// along with their value
+ fn pixels(&self) -> Pixels<Self>
+ where
+ Self: Sized,
+ {
+ let (width, height) = self.dimensions();
+
+ Pixels {
+ image: self,
+ x: 0,
+ y: 0,
+ width,
+ height,
+ }
+ }
+
+ /// Returns a subimage that is an immutable view into this image.
+ /// You can use [`GenericImage::sub_image`] if you need a mutable view instead.
+ /// The coordinates set the position of the top left corner of the view.
+ fn view(&self, x: u32, y: u32, width: u32, height: u32) -> SubImage<&Self>
+ where
+ Self: Sized,
+ {
+ assert!(x as u64 + width as u64 <= self.width() as u64);
+ assert!(y as u64 + height as u64 <= self.height() as u64);
+ SubImage::new(self, x, y, width, height)
+ }
+}
+
+/// A trait for manipulating images.
+pub trait GenericImage: GenericImageView {
+ /// Gets a reference to the mutable pixel at location `(x, y)`. Indexed from top left.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `(x, y)` is out of bounds.
+ ///
+ /// Panics for dynamic images (this method is deprecated and will be removed).
+ ///
+ /// ## Known issues
+ ///
+ /// This requires the buffer to contain a unique set of continuous channels in the exact order
+ /// and byte representation that the pixel type requires. This is somewhat restrictive.
+ ///
+ /// TODO: Maybe use some kind of entry API? this would allow pixel type conversion on the fly
+ /// while still doing only one array lookup:
+ ///
+ /// ```ignore
+ /// let px = image.pixel_entry_at(x,y);
+ /// px.set_from_rgba(rgba)
+ /// ```
+ #[deprecated(since = "0.24.0", note = "Use `get_pixel` and `put_pixel` instead.")]
+ fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut Self::Pixel;
+
+ /// Put a pixel at location (x, y). Indexed from top left.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `(x, y)` is out of bounds.
+ fn put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel);
+
+ /// Puts a pixel at location (x, y). Indexed from top left.
+ ///
+ /// This function can be implemented in a way that ignores bounds checking.
+ /// # Safety
+ ///
+ /// The coordinates must be [`in_bounds`] of the image.
+ ///
+ /// [`in_bounds`]: traits.GenericImageView.html#method.in_bounds
+ unsafe fn unsafe_put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
+ self.put_pixel(x, y, pixel);
+ }
+
+ /// Put a pixel at location (x, y), taking into account alpha channels
+ #[deprecated(
+ since = "0.24.0",
+ note = "Use iterator `pixels_mut` to blend the pixels directly"
+ )]
+ fn blend_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel);
+
+ /// Copies all of the pixels from another image into this image.
+ ///
+ /// The other image is copied with the top-left corner of the
+ /// other image placed at (x, y).
+ ///
+ /// In order to copy only a piece of the other image, use [`GenericImageView::view`].
+ ///
+ /// You can use [`FlatSamples`] to source pixels from an arbitrary regular raster of channel
+ /// values, for example from a foreign interface or a fixed image.
+ ///
+ /// # Returns
+ /// Returns an error if the image is too large to be copied at the given position
+ ///
+ /// [`GenericImageView::view`]: trait.GenericImageView.html#method.view
+ /// [`FlatSamples`]: flat/struct.FlatSamples.html
+ fn copy_from<O>(&mut self, other: &O, x: u32, y: u32) -> ImageResult<()>
+ where
+ O: GenericImageView<Pixel = Self::Pixel>,
+ {
+ // Do bounds checking here so we can use the non-bounds-checking
+ // functions to copy pixels.
+ if self.width() < other.width() + x || self.height() < other.height() + y {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+
+ for k in 0..other.height() {
+ for i in 0..other.width() {
+ let p = other.get_pixel(i, k);
+ self.put_pixel(i + x, k + y, p);
+ }
+ }
+ Ok(())
+ }
+
+ /// Copies all of the pixels from one part of this image to another part of this image.
+ ///
+ /// The destination rectangle of the copy is specified with the top-left corner placed at (x, y).
+ ///
+ /// # Returns
+ /// `true` if the copy was successful, `false` if the image could not
+ /// be copied due to size constraints.
+ fn copy_within(&mut self, source: Rect, x: u32, y: u32) -> bool {
+ let Rect {
+ x: sx,
+ y: sy,
+ width,
+ height,
+ } = source;
+ let dx = x;
+ let dy = y;
+ assert!(sx < self.width() && dx < self.width());
+ assert!(sy < self.height() && dy < self.height());
+ if self.width() - dx.max(sx) < width || self.height() - dy.max(sy) < height {
+ return false;
+ }
+ // since `.rev()` creates a new dype we would either have to go with dynamic dispatch for the ranges
+ // or have quite a lot of code bloat. A macro gives us static dispatch with less visible bloat.
+ macro_rules! copy_within_impl_ {
+ ($xiter:expr, $yiter:expr) => {
+ for y in $yiter {
+ let sy = sy + y;
+ let dy = dy + y;
+ for x in $xiter {
+ let sx = sx + x;
+ let dx = dx + x;
+ let pixel = self.get_pixel(sx, sy);
+ self.put_pixel(dx, dy, pixel);
+ }
+ }
+ };
+ }
+ // check how target and source rectangles relate to each other so we dont overwrite data before we copied it.
+ match (sx < dx, sy < dy) {
+ (true, true) => copy_within_impl_!((0..width).rev(), (0..height).rev()),
+ (true, false) => copy_within_impl_!((0..width).rev(), 0..height),
+ (false, true) => copy_within_impl_!(0..width, (0..height).rev()),
+ (false, false) => copy_within_impl_!(0..width, 0..height),
+ }
+ true
+ }
+
+ /// Returns a mutable subimage that is a view into this image.
+ /// If you want an immutable subimage instead, use [`GenericImageView::view`]
+ /// The coordinates set the position of the top left corner of the SubImage.
+ fn sub_image(&mut self, x: u32, y: u32, width: u32, height: u32) -> SubImage<&mut Self>
+ where
+ Self: Sized,
+ {
+ assert!(x as u64 + width as u64 <= self.width() as u64);
+ assert!(y as u64 + height as u64 <= self.height() as u64);
+ SubImage::new(self, x, y, width, height)
+ }
+}
+
+/// A View into another image
+///
+/// Instances of this struct can be created using:
+/// - [`GenericImage::sub_image`] to create a mutable view,
+/// - [`GenericImageView::view`] to create an immutable view,
+/// - [`SubImage::new`] to instantiate the struct directly.
+///
+/// Note that this does _not_ implement `GenericImage`, but it dereferences to one which allows you
+/// to use it as if it did. See [Design Considerations](#Design-Considerations) below for details.
+///
+/// # Design Considerations
+///
+/// For reasons relating to coherence, this is not itself a `GenericImage` or a `GenericImageView`.
+/// In short, we want to reserve the ability of adding traits implemented for _all_ generic images
+/// but in a different manner for `SubImage`. This may be required to ensure that stacking
+/// sub-images comes at no double indirect cost.
+///
+/// If, ultimately, this is not needed then a directly implementation of `GenericImage` can and
+/// will get added. This inconvenience may alternatively get resolved if Rust allows some forms of
+/// specialization, which might make this trick unnecessary and thus also allows for a direct
+/// implementation.
+#[derive(Copy, Clone)]
+pub struct SubImage<I> {
+ inner: SubImageInner<I>,
+}
+
+/// The inner type of `SubImage` that implements `GenericImage{,View}`.
+///
+/// This type is _nominally_ `pub` but it is not exported from the crate. It should be regarded as
+/// an existential type in any case.
+#[derive(Copy, Clone)]
+pub struct SubImageInner<I> {
+ image: I,
+ xoffset: u32,
+ yoffset: u32,
+ xstride: u32,
+ ystride: u32,
+}
+
+/// Alias to access Pixel behind a reference
+type DerefPixel<I> = <<I as Deref>::Target as GenericImageView>::Pixel;
+
+/// Alias to access Subpixel behind a reference
+type DerefSubpixel<I> = <DerefPixel<I> as Pixel>::Subpixel;
+
+impl<I> SubImage<I> {
+ /// Construct a new subimage
+ /// The coordinates set the position of the top left corner of the SubImage.
+ pub fn new(image: I, x: u32, y: u32, width: u32, height: u32) -> SubImage<I> {
+ SubImage {
+ inner: SubImageInner {
+ image,
+ xoffset: x,
+ yoffset: y,
+ xstride: width,
+ ystride: height,
+ },
+ }
+ }
+
+ /// Change the coordinates of this subimage.
+ pub fn change_bounds(&mut self, x: u32, y: u32, width: u32, height: u32) {
+ self.inner.xoffset = x;
+ self.inner.yoffset = y;
+ self.inner.xstride = width;
+ self.inner.ystride = height;
+ }
+
+ /// Convert this subimage to an ImageBuffer
+ pub fn to_image(&self) -> ImageBuffer<DerefPixel<I>, Vec<DerefSubpixel<I>>>
+ where
+ I: Deref,
+ I::Target: GenericImageView + 'static,
+ {
+ let mut out = ImageBuffer::new(self.inner.xstride, self.inner.ystride);
+ let borrowed = self.inner.image.deref();
+
+ for y in 0..self.inner.ystride {
+ for x in 0..self.inner.xstride {
+ let p = borrowed.get_pixel(x + self.inner.xoffset, y + self.inner.yoffset);
+ out.put_pixel(x, y, p);
+ }
+ }
+
+ out
+ }
+}
+
+/// Methods for readable images.
+impl<I> SubImage<I>
+where
+ I: Deref,
+ I::Target: GenericImageView,
+{
+ /// Create a sub-view of the image.
+ ///
+ /// The coordinates given are relative to the current view on the underlying image.
+ ///
+ /// Note that this method is preferred to the one from `GenericImageView`. This is accessible
+ /// with the explicit method call syntax but it should rarely be needed due to causing an
+ /// extra level of indirection.
+ ///
+ /// ```
+ /// use image::{GenericImageView, RgbImage, SubImage};
+ /// let buffer = RgbImage::new(10, 10);
+ ///
+ /// let subimage: SubImage<&RgbImage> = buffer.view(0, 0, 10, 10);
+ /// let subview: SubImage<&RgbImage> = subimage.view(0, 0, 10, 10);
+ ///
+ /// // Less efficient and NOT &RgbImage
+ /// let _: SubImage<&_> = GenericImageView::view(&*subimage, 0, 0, 10, 10);
+ /// ```
+ pub fn view(&self, x: u32, y: u32, width: u32, height: u32) -> SubImage<&I::Target> {
+ use crate::GenericImageView as _;
+ assert!(x as u64 + width as u64 <= self.inner.width() as u64);
+ assert!(y as u64 + height as u64 <= self.inner.height() as u64);
+ let x = self.inner.xoffset + x;
+ let y = self.inner.yoffset + y;
+ SubImage::new(&*self.inner.image, x, y, width, height)
+ }
+
+ /// Get a reference to the underlying image.
+ pub fn inner(&self) -> &I::Target {
+ &self.inner.image
+ }
+}
+
+impl<I> SubImage<I>
+where
+ I: DerefMut,
+ I::Target: GenericImage,
+{
+ /// Create a mutable sub-view of the image.
+ ///
+ /// The coordinates given are relative to the current view on the underlying image.
+ pub fn sub_image(
+ &mut self,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+ ) -> SubImage<&mut I::Target> {
+ assert!(x as u64 + width as u64 <= self.inner.width() as u64);
+ assert!(y as u64 + height as u64 <= self.inner.height() as u64);
+ let x = self.inner.xoffset + x;
+ let y = self.inner.yoffset + y;
+ SubImage::new(&mut *self.inner.image, x, y, width, height)
+ }
+
+ /// Get a mutable reference to the underlying image.
+ pub fn inner_mut(&mut self) -> &mut I::Target {
+ &mut self.inner.image
+ }
+}
+
+impl<I> Deref for SubImage<I>
+where
+ I: Deref,
+{
+ type Target = SubImageInner<I>;
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+}
+
+impl<I> DerefMut for SubImage<I>
+where
+ I: DerefMut,
+{
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.inner
+ }
+}
+
+#[allow(deprecated)]
+impl<I> GenericImageView for SubImageInner<I>
+where
+ I: Deref,
+ I::Target: GenericImageView,
+{
+ type Pixel = DerefPixel<I>;
+
+ fn dimensions(&self) -> (u32, u32) {
+ (self.xstride, self.ystride)
+ }
+
+ fn bounds(&self) -> (u32, u32, u32, u32) {
+ (self.xoffset, self.yoffset, self.xstride, self.ystride)
+ }
+
+ fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
+ self.image.get_pixel(x + self.xoffset, y + self.yoffset)
+ }
+}
+
+#[allow(deprecated)]
+impl<I> GenericImage for SubImageInner<I>
+where
+ I: DerefMut,
+ I::Target: GenericImage + Sized,
+{
+ fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut Self::Pixel {
+ self.image.get_pixel_mut(x + self.xoffset, y + self.yoffset)
+ }
+
+ fn put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
+ self.image
+ .put_pixel(x + self.xoffset, y + self.yoffset, pixel)
+ }
+
+ /// DEPRECATED: This method will be removed. Blend the pixel directly instead.
+ fn blend_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
+ self.image
+ .blend_pixel(x + self.xoffset, y + self.yoffset, pixel)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::io;
+ use std::path::Path;
+
+ use super::{
+ load_rect, ColorType, GenericImage, GenericImageView, ImageDecoder, ImageFormat,
+ ImageResult,
+ };
+ use crate::color::Rgba;
+ use crate::math::Rect;
+ use crate::{GrayImage, ImageBuffer};
+
+ #[test]
+ #[allow(deprecated)]
+ /// Test that alpha blending works as expected
+ fn test_image_alpha_blending() {
+ let mut target = ImageBuffer::new(1, 1);
+ target.put_pixel(0, 0, Rgba([255u8, 0, 0, 255]));
+ assert!(*target.get_pixel(0, 0) == Rgba([255, 0, 0, 255]));
+ target.blend_pixel(0, 0, Rgba([0, 255, 0, 255]));
+ assert!(*target.get_pixel(0, 0) == Rgba([0, 255, 0, 255]));
+
+ // Blending an alpha channel onto a solid background
+ target.blend_pixel(0, 0, Rgba([255, 0, 0, 127]));
+ assert!(*target.get_pixel(0, 0) == Rgba([127, 127, 0, 255]));
+
+ // Blending two alpha channels
+ target.put_pixel(0, 0, Rgba([0, 255, 0, 127]));
+ target.blend_pixel(0, 0, Rgba([255, 0, 0, 127]));
+ assert!(*target.get_pixel(0, 0) == Rgba([169, 85, 0, 190]));
+ }
+
+ #[test]
+ fn test_in_bounds() {
+ let mut target = ImageBuffer::new(2, 2);
+ target.put_pixel(0, 0, Rgba([255u8, 0, 0, 255]));
+
+ assert!(target.in_bounds(0, 0));
+ assert!(target.in_bounds(1, 0));
+ assert!(target.in_bounds(0, 1));
+ assert!(target.in_bounds(1, 1));
+
+ assert!(!target.in_bounds(2, 0));
+ assert!(!target.in_bounds(0, 2));
+ assert!(!target.in_bounds(2, 2));
+ }
+
+ #[test]
+ fn test_can_subimage_clone_nonmut() {
+ let mut source = ImageBuffer::new(3, 3);
+ source.put_pixel(1, 1, Rgba([255u8, 0, 0, 255]));
+
+ // A non-mutable copy of the source image
+ let source = source.clone();
+
+ // Clone a view into non-mutable to a separate buffer
+ let cloned = source.view(1, 1, 1, 1).to_image();
+
+ assert!(cloned.get_pixel(0, 0) == source.get_pixel(1, 1));
+ }
+
+ #[test]
+ fn test_can_nest_views() {
+ let mut source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
+
+ {
+ let mut sub1 = source.sub_image(0, 0, 2, 2);
+ let mut sub2 = sub1.sub_image(1, 1, 1, 1);
+ sub2.put_pixel(0, 0, Rgba([0, 0, 0, 0]));
+ }
+
+ assert_eq!(*source.get_pixel(1, 1), Rgba([0, 0, 0, 0]));
+
+ let view1 = source.view(0, 0, 2, 2);
+ assert_eq!(*source.get_pixel(1, 1), view1.get_pixel(1, 1));
+
+ let view2 = view1.view(1, 1, 1, 1);
+ assert_eq!(*source.get_pixel(1, 1), view2.get_pixel(0, 0));
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_view_out_of_bounds() {
+ let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
+ source.view(1, 1, 3, 3);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_view_coordinates_out_of_bounds() {
+ let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
+ source.view(3, 3, 3, 3);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_view_width_out_of_bounds() {
+ let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
+ source.view(1, 1, 3, 2);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_view_height_out_of_bounds() {
+ let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
+ source.view(1, 1, 2, 3);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_view_x_out_of_bounds() {
+ let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
+ source.view(3, 1, 3, 3);
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_view_y_out_of_bounds() {
+ let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
+ source.view(1, 3, 3, 3);
+ }
+
+ #[test]
+ fn test_view_in_bounds() {
+ let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
+ source.view(0, 0, 3, 3);
+ source.view(1, 1, 2, 2);
+ source.view(2, 2, 0, 0);
+ }
+
+ #[test]
+ fn test_copy_sub_image() {
+ let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
+ let view = source.view(0, 0, 3, 3);
+ let mut views = Vec::new();
+ views.push(view);
+ view.to_image();
+ }
+
+ #[test]
+ fn test_load_rect() {
+ struct MockDecoder {
+ scanline_number: u64,
+ scanline_bytes: u64,
+ }
+ impl<'a> ImageDecoder<'a> for MockDecoder {
+ type Reader = Box<dyn io::Read>;
+ fn dimensions(&self) -> (u32, u32) {
+ (5, 5)
+ }
+ fn color_type(&self) -> ColorType {
+ ColorType::L8
+ }
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ unimplemented!()
+ }
+ fn scanline_bytes(&self) -> u64 {
+ self.scanline_bytes
+ }
+ }
+
+ const DATA: [u8; 25] = [
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ ];
+
+ fn seek_scanline(m: &mut MockDecoder, n: u64) -> io::Result<()> {
+ m.scanline_number = n;
+ Ok(())
+ }
+ fn read_scanline(m: &mut MockDecoder, buf: &mut [u8]) -> io::Result<()> {
+ let bytes_read = m.scanline_number * m.scanline_bytes;
+ if bytes_read >= 25 {
+ return Ok(());
+ }
+
+ let len = m.scanline_bytes.min(25 - bytes_read);
+ buf[..(len as usize)].copy_from_slice(&DATA[(bytes_read as usize)..][..(len as usize)]);
+ m.scanline_number += 1;
+ Ok(())
+ }
+
+ for scanline_bytes in 1..30 {
+ let mut output = [0u8; 26];
+
+ load_rect(
+ 0,
+ 0,
+ 5,
+ 5,
+ &mut output,
+ |_| {},
+ &mut MockDecoder {
+ scanline_number: 0,
+ scanline_bytes,
+ },
+ seek_scanline,
+ read_scanline,
+ )
+ .unwrap();
+ assert_eq!(output[0..25], DATA);
+ assert_eq!(output[25], 0);
+
+ output = [0u8; 26];
+ load_rect(
+ 3,
+ 2,
+ 1,
+ 1,
+ &mut output,
+ |_| {},
+ &mut MockDecoder {
+ scanline_number: 0,
+ scanline_bytes,
+ },
+ seek_scanline,
+ read_scanline,
+ )
+ .unwrap();
+ assert_eq!(output[0..2], [13, 0]);
+
+ output = [0u8; 26];
+ load_rect(
+ 3,
+ 2,
+ 2,
+ 2,
+ &mut output,
+ |_| {},
+ &mut MockDecoder {
+ scanline_number: 0,
+ scanline_bytes,
+ },
+ seek_scanline,
+ read_scanline,
+ )
+ .unwrap();
+ assert_eq!(output[0..5], [13, 14, 18, 19, 0]);
+
+ output = [0u8; 26];
+ load_rect(
+ 1,
+ 1,
+ 2,
+ 4,
+ &mut output,
+ |_| {},
+ &mut MockDecoder {
+ scanline_number: 0,
+ scanline_bytes,
+ },
+ seek_scanline,
+ read_scanline,
+ )
+ .unwrap();
+ assert_eq!(output[0..9], [6, 7, 11, 12, 16, 17, 21, 22, 0]);
+ }
+ }
+
+ #[test]
+ fn test_load_rect_single_scanline() {
+ const DATA: [u8; 25] = [
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24,
+ ];
+
+ struct MockDecoder;
+ impl<'a> ImageDecoder<'a> for MockDecoder {
+ type Reader = Box<dyn io::Read>;
+ fn dimensions(&self) -> (u32, u32) {
+ (5, 5)
+ }
+ fn color_type(&self) -> ColorType {
+ ColorType::L8
+ }
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ unimplemented!()
+ }
+ fn scanline_bytes(&self) -> u64 {
+ 25
+ }
+ }
+
+ // Ensure that seek scanline is called only once.
+ let mut seeks = 0;
+ let seek_scanline = |_d: &mut MockDecoder, n: u64| -> io::Result<()> {
+ seeks += 1;
+ assert_eq!(n, 0);
+ assert_eq!(seeks, 1);
+ Ok(())
+ };
+
+ fn read_scanline(_m: &mut MockDecoder, buf: &mut [u8]) -> io::Result<()> {
+ buf.copy_from_slice(&DATA);
+ Ok(())
+ }
+
+ let mut output = [0; 26];
+ load_rect(
+ 1,
+ 1,
+ 2,
+ 4,
+ &mut output,
+ |_| {},
+ &mut MockDecoder,
+ seek_scanline,
+ read_scanline,
+ )
+ .unwrap();
+ assert_eq!(output[0..9], [6, 7, 11, 12, 16, 17, 21, 22, 0]);
+ }
+
+ #[test]
+ fn test_image_format_from_path() {
+ fn from_path(s: &str) -> ImageResult<ImageFormat> {
+ ImageFormat::from_path(Path::new(s))
+ }
+ assert_eq!(from_path("./a.jpg").unwrap(), ImageFormat::Jpeg);
+ assert_eq!(from_path("./a.jpeg").unwrap(), ImageFormat::Jpeg);
+ assert_eq!(from_path("./a.JPEG").unwrap(), ImageFormat::Jpeg);
+ assert_eq!(from_path("./a.pNg").unwrap(), ImageFormat::Png);
+ assert_eq!(from_path("./a.gif").unwrap(), ImageFormat::Gif);
+ assert_eq!(from_path("./a.webp").unwrap(), ImageFormat::WebP);
+ assert_eq!(from_path("./a.tiFF").unwrap(), ImageFormat::Tiff);
+ assert_eq!(from_path("./a.tif").unwrap(), ImageFormat::Tiff);
+ assert_eq!(from_path("./a.tga").unwrap(), ImageFormat::Tga);
+ assert_eq!(from_path("./a.dds").unwrap(), ImageFormat::Dds);
+ assert_eq!(from_path("./a.bmp").unwrap(), ImageFormat::Bmp);
+ assert_eq!(from_path("./a.Ico").unwrap(), ImageFormat::Ico);
+ assert_eq!(from_path("./a.hdr").unwrap(), ImageFormat::Hdr);
+ assert_eq!(from_path("./a.exr").unwrap(), ImageFormat::OpenExr);
+ assert_eq!(from_path("./a.pbm").unwrap(), ImageFormat::Pnm);
+ assert_eq!(from_path("./a.pAM").unwrap(), ImageFormat::Pnm);
+ assert_eq!(from_path("./a.Ppm").unwrap(), ImageFormat::Pnm);
+ assert_eq!(from_path("./a.pgm").unwrap(), ImageFormat::Pnm);
+ assert_eq!(from_path("./a.AViF").unwrap(), ImageFormat::Avif);
+ assert!(from_path("./a.txt").is_err());
+ assert!(from_path("./a").is_err());
+ }
+
+ #[test]
+ fn test_generic_image_copy_within_oob() {
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, vec![0u8; 16]).unwrap();
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(
+ Rect {
+ x: 0,
+ y: 0,
+ width: 5,
+ height: 4
+ },
+ 0,
+ 0
+ ));
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(
+ Rect {
+ x: 0,
+ y: 0,
+ width: 4,
+ height: 5
+ },
+ 0,
+ 0
+ ));
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(
+ Rect {
+ x: 1,
+ y: 0,
+ width: 4,
+ height: 4
+ },
+ 0,
+ 0
+ ));
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(
+ Rect {
+ x: 0,
+ y: 0,
+ width: 4,
+ height: 4
+ },
+ 1,
+ 0
+ ));
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(
+ Rect {
+ x: 0,
+ y: 1,
+ width: 4,
+ height: 4
+ },
+ 0,
+ 0
+ ));
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(
+ Rect {
+ x: 0,
+ y: 0,
+ width: 4,
+ height: 4
+ },
+ 0,
+ 1
+ ));
+ assert!(!image.sub_image(0, 0, 4, 4).copy_within(
+ Rect {
+ x: 1,
+ y: 1,
+ width: 4,
+ height: 4
+ },
+ 0,
+ 0
+ ));
+ }
+
+ #[test]
+ fn test_generic_image_copy_within_tl() {
+ let data = &[
+ 00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15,
+ ];
+ let expected = [
+ 00, 01, 02, 03, 04, 00, 01, 02, 08, 04, 05, 06, 12, 08, 09, 10,
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.sub_image(0, 0, 4, 4).copy_within(
+ Rect {
+ x: 0,
+ y: 0,
+ width: 3,
+ height: 3
+ },
+ 1,
+ 1
+ ));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn test_generic_image_copy_within_tr() {
+ let data = &[
+ 00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15,
+ ];
+ let expected = [
+ 00, 01, 02, 03, 01, 02, 03, 07, 05, 06, 07, 11, 09, 10, 11, 15,
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.sub_image(0, 0, 4, 4).copy_within(
+ Rect {
+ x: 1,
+ y: 0,
+ width: 3,
+ height: 3
+ },
+ 0,
+ 1
+ ));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn test_generic_image_copy_within_bl() {
+ let data = &[
+ 00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15,
+ ];
+ let expected = [
+ 00, 04, 05, 06, 04, 08, 09, 10, 08, 12, 13, 14, 12, 13, 14, 15,
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.sub_image(0, 0, 4, 4).copy_within(
+ Rect {
+ x: 0,
+ y: 1,
+ width: 3,
+ height: 3
+ },
+ 1,
+ 0
+ ));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn test_generic_image_copy_within_br() {
+ let data = &[
+ 00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15,
+ ];
+ let expected = [
+ 05, 06, 07, 03, 09, 10, 11, 07, 13, 14, 15, 11, 12, 13, 14, 15,
+ ];
+ let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
+ assert!(image.sub_image(0, 0, 4, 4).copy_within(
+ Rect {
+ x: 1,
+ y: 1,
+ width: 3,
+ height: 3
+ },
+ 0,
+ 0
+ ));
+ assert_eq!(&image.into_raw(), &expected);
+ }
+
+ #[test]
+ fn image_formats_are_recognized() {
+ use ImageFormat::*;
+ const ALL_FORMATS: &'static [ImageFormat] = &[
+ Avif, Png, Jpeg, Gif, WebP, Pnm, Tiff, Tga, Dds, Bmp, Ico, Hdr, Farbfeld, OpenExr,
+ ];
+ for &format in ALL_FORMATS {
+ let mut file = Path::new("file.nothing").to_owned();
+ for ext in format.extensions_str() {
+ assert!(file.set_extension(ext));
+ match ImageFormat::from_path(&file) {
+ Err(_) => panic!("Path {} not recognized as {:?}", file.display(), format),
+ Ok(result) => assert_eq!(format, result),
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn total_bytes_overflow() {
+ struct D;
+ impl<'a> ImageDecoder<'a> for D {
+ type Reader = std::io::Cursor<Vec<u8>>;
+ fn color_type(&self) -> ColorType {
+ ColorType::Rgb8
+ }
+ fn dimensions(&self) -> (u32, u32) {
+ (0xffffffff, 0xffffffff)
+ }
+ fn into_reader(self) -> ImageResult<Self::Reader> {
+ unreachable!()
+ }
+ }
+ assert_eq!(D.total_bytes(), u64::max_value());
+
+ let v: ImageResult<Vec<u8>> = super::decoder_to_vec(D);
+ assert!(v.is_err());
+ }
+}
diff --git a/vendor/image/src/imageops/affine.rs b/vendor/image/src/imageops/affine.rs
new file mode 100644
index 0000000..548381c
--- /dev/null
+++ b/vendor/image/src/imageops/affine.rs
@@ -0,0 +1,410 @@
+//! Functions for performing affine transformations.
+
+use crate::error::{ImageError, ParameterError, ParameterErrorKind};
+use crate::image::{GenericImage, GenericImageView};
+use crate::traits::Pixel;
+use crate::ImageBuffer;
+
+/// Rotate an image 90 degrees clockwise.
+pub fn rotate90<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+where
+ I::Pixel: 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(height, width);
+ let _ = rotate90_in(image, &mut out);
+ out
+}
+
+/// Rotate an image 180 degrees clockwise.
+pub fn rotate180<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+where
+ I::Pixel: 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+ let _ = rotate180_in(image, &mut out);
+ out
+}
+
+/// Rotate an image 270 degrees clockwise.
+pub fn rotate270<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+where
+ I::Pixel: 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(height, width);
+ let _ = rotate270_in(image, &mut out);
+ out
+}
+
+/// Rotate an image 90 degrees clockwise and put the result into the destination [`ImageBuffer`].
+pub fn rotate90_in<I, Container>(
+ image: &I,
+ destination: &mut ImageBuffer<I::Pixel, Container>,
+) -> crate::ImageResult<()>
+where
+ I: GenericImageView,
+ I::Pixel: 'static,
+ Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>,
+{
+ let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
+ if w0 != h1 || h0 != w1 {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+
+ for y in 0..h0 {
+ for x in 0..w0 {
+ let p = image.get_pixel(x, y);
+ destination.put_pixel(h0 - y - 1, x, p);
+ }
+ }
+ Ok(())
+}
+
+/// Rotate an image 180 degrees clockwise and put the result into the destination [`ImageBuffer`].
+pub fn rotate180_in<I, Container>(
+ image: &I,
+ destination: &mut ImageBuffer<I::Pixel, Container>,
+) -> crate::ImageResult<()>
+where
+ I: GenericImageView,
+ I::Pixel: 'static,
+ Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>,
+{
+ let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
+ if w0 != w1 || h0 != h1 {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+
+ for y in 0..h0 {
+ for x in 0..w0 {
+ let p = image.get_pixel(x, y);
+ destination.put_pixel(w0 - x - 1, h0 - y - 1, p);
+ }
+ }
+ Ok(())
+}
+
+/// Rotate an image 270 degrees clockwise and put the result into the destination [`ImageBuffer`].
+pub fn rotate270_in<I, Container>(
+ image: &I,
+ destination: &mut ImageBuffer<I::Pixel, Container>,
+) -> crate::ImageResult<()>
+where
+ I: GenericImageView,
+ I::Pixel: 'static,
+ Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>,
+{
+ let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
+ if w0 != h1 || h0 != w1 {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+
+ for y in 0..h0 {
+ for x in 0..w0 {
+ let p = image.get_pixel(x, y);
+ destination.put_pixel(y, w0 - x - 1, p);
+ }
+ }
+ Ok(())
+}
+
+/// Flip an image horizontally
+pub fn flip_horizontal<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+where
+ I::Pixel: 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+ let _ = flip_horizontal_in(image, &mut out);
+ out
+}
+
+/// Flip an image vertically
+pub fn flip_vertical<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+where
+ I::Pixel: 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+ let _ = flip_vertical_in(image, &mut out);
+ out
+}
+
+/// Flip an image horizontally and put the result into the destination [`ImageBuffer`].
+pub fn flip_horizontal_in<I, Container>(
+ image: &I,
+ destination: &mut ImageBuffer<I::Pixel, Container>,
+) -> crate::ImageResult<()>
+where
+ I: GenericImageView,
+ I::Pixel: 'static,
+ Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>,
+{
+ let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
+ if w0 != w1 || h0 != h1 {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+
+ for y in 0..h0 {
+ for x in 0..w0 {
+ let p = image.get_pixel(x, y);
+ destination.put_pixel(w0 - x - 1, y, p);
+ }
+ }
+ Ok(())
+}
+
+/// Flip an image vertically and put the result into the destination [`ImageBuffer`].
+pub fn flip_vertical_in<I, Container>(
+ image: &I,
+ destination: &mut ImageBuffer<I::Pixel, Container>,
+) -> crate::ImageResult<()>
+where
+ I: GenericImageView,
+ I::Pixel: 'static,
+ Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>,
+{
+ let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
+ if w0 != w1 || h0 != h1 {
+ return Err(ImageError::Parameter(ParameterError::from_kind(
+ ParameterErrorKind::DimensionMismatch,
+ )));
+ }
+
+ for y in 0..h0 {
+ for x in 0..w0 {
+ let p = image.get_pixel(x, y);
+ destination.put_pixel(x, h0 - 1 - y, p);
+ }
+ }
+ Ok(())
+}
+
+/// Rotate an image 180 degrees clockwise in place.
+pub fn rotate180_in_place<I: GenericImage>(image: &mut I) {
+ let (width, height) = image.dimensions();
+
+ for y in 0..height / 2 {
+ for x in 0..width {
+ let p = image.get_pixel(x, y);
+
+ let x2 = width - x - 1;
+ let y2 = height - y - 1;
+
+ let p2 = image.get_pixel(x2, y2);
+ image.put_pixel(x, y, p2);
+ image.put_pixel(x2, y2, p);
+ }
+ }
+
+ if height % 2 != 0 {
+ let middle = height / 2;
+
+ for x in 0..width / 2 {
+ let p = image.get_pixel(x, middle);
+ let x2 = width - x - 1;
+
+ let p2 = image.get_pixel(x2, middle);
+ image.put_pixel(x, middle, p2);
+ image.put_pixel(x2, middle, p);
+ }
+ }
+}
+
+/// Flip an image horizontally in place.
+pub fn flip_horizontal_in_place<I: GenericImage>(image: &mut I) {
+ let (width, height) = image.dimensions();
+
+ for y in 0..height {
+ for x in 0..width / 2 {
+ let x2 = width - x - 1;
+ let p2 = image.get_pixel(x2, y);
+ let p = image.get_pixel(x, y);
+ image.put_pixel(x2, y, p);
+ image.put_pixel(x, y, p2);
+ }
+ }
+}
+
+/// Flip an image vertically in place.
+pub fn flip_vertical_in_place<I: GenericImage>(image: &mut I) {
+ let (width, height) = image.dimensions();
+
+ for y in 0..height / 2 {
+ for x in 0..width {
+ let y2 = height - y - 1;
+ let p2 = image.get_pixel(x, y2);
+ let p = image.get_pixel(x, y);
+ image.put_pixel(x, y2, p);
+ image.put_pixel(x, y, p2);
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::{
+ flip_horizontal, flip_horizontal_in_place, flip_vertical, flip_vertical_in_place,
+ rotate180, rotate180_in_place, rotate270, rotate90,
+ };
+ use crate::image::GenericImage;
+ use crate::traits::Pixel;
+ use crate::{GrayImage, ImageBuffer};
+
+ macro_rules! assert_pixels_eq {
+ ($actual:expr, $expected:expr) => {{
+ let actual_dim = $actual.dimensions();
+ let expected_dim = $expected.dimensions();
+
+ if actual_dim != expected_dim {
+ panic!(
+ "dimensions do not match. \
+ actual: {:?}, expected: {:?}",
+ actual_dim, expected_dim
+ )
+ }
+
+ let diffs = pixel_diffs($actual, $expected);
+
+ if !diffs.is_empty() {
+ let mut err = "".to_string();
+
+ let diff_messages = diffs
+ .iter()
+ .take(5)
+ .map(|d| format!("\nactual: {:?}, expected {:?} ", d.0, d.1))
+ .collect::<Vec<_>>()
+ .join("");
+
+ err.push_str(&diff_messages);
+ panic!("pixels do not match. {:?}", err)
+ }
+ }};
+ }
+
+ #[test]
+ fn test_rotate90() {
+ let image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(2, 3, vec![10u8, 00u8, 11u8, 01u8, 12u8, 02u8]).unwrap();
+
+ assert_pixels_eq!(&rotate90(&image), &expected);
+ }
+
+ #[test]
+ fn test_rotate180() {
+ let image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![12u8, 11u8, 10u8, 02u8, 01u8, 00u8]).unwrap();
+
+ assert_pixels_eq!(&rotate180(&image), &expected);
+ }
+
+ #[test]
+ fn test_rotate270() {
+ let image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(2, 3, vec![02u8, 12u8, 01u8, 11u8, 00u8, 10u8]).unwrap();
+
+ assert_pixels_eq!(&rotate270(&image), &expected);
+ }
+
+ #[test]
+ fn test_rotate180_in_place() {
+ let mut image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![12u8, 11u8, 10u8, 02u8, 01u8, 00u8]).unwrap();
+
+ rotate180_in_place(&mut image);
+
+ assert_pixels_eq!(&image, &expected);
+ }
+
+ #[test]
+ fn test_flip_horizontal() {
+ let image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![02u8, 01u8, 00u8, 12u8, 11u8, 10u8]).unwrap();
+
+ assert_pixels_eq!(&flip_horizontal(&image), &expected);
+ }
+
+ #[test]
+ fn test_flip_vertical() {
+ let image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![10u8, 11u8, 12u8, 00u8, 01u8, 02u8]).unwrap();
+
+ assert_pixels_eq!(&flip_vertical(&image), &expected);
+ }
+
+ #[test]
+ fn test_flip_horizontal_in_place() {
+ let mut image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![02u8, 01u8, 00u8, 12u8, 11u8, 10u8]).unwrap();
+
+ flip_horizontal_in_place(&mut image);
+
+ assert_pixels_eq!(&image, &expected);
+ }
+
+ #[test]
+ fn test_flip_vertical_in_place() {
+ let mut image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![10u8, 11u8, 12u8, 00u8, 01u8, 02u8]).unwrap();
+
+ flip_vertical_in_place(&mut image);
+
+ assert_pixels_eq!(&image, &expected);
+ }
+
+ fn pixel_diffs<I, J, P>(left: &I, right: &J) -> Vec<((u32, u32, P), (u32, u32, P))>
+ where
+ I: GenericImage<Pixel = P>,
+ J: GenericImage<Pixel = P>,
+ P: Pixel + Eq,
+ {
+ left.pixels()
+ .zip(right.pixels())
+ .filter(|&(p, q)| p != q)
+ .collect::<Vec<_>>()
+ }
+}
diff --git a/vendor/image/src/imageops/colorops.rs b/vendor/image/src/imageops/colorops.rs
new file mode 100644
index 0000000..085e5f4
--- /dev/null
+++ b/vendor/image/src/imageops/colorops.rs
@@ -0,0 +1,646 @@
+//! Functions for altering and converting the color of pixelbufs
+
+use num_traits::NumCast;
+use std::f64::consts::PI;
+
+use crate::color::{FromColor, IntoColor, Luma, LumaA, Rgba};
+use crate::image::{GenericImage, GenericImageView};
+use crate::traits::{Pixel, Primitive};
+use crate::utils::clamp;
+use crate::ImageBuffer;
+
+type Subpixel<I> = <<I as GenericImageView>::Pixel as Pixel>::Subpixel;
+
+/// Convert the supplied image to grayscale. Alpha channel is discarded.
+pub fn grayscale<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<Luma<Subpixel<I>>, Vec<Subpixel<I>>> {
+ grayscale_with_type(image)
+}
+
+/// Convert the supplied image to grayscale. Alpha channel is preserved.
+pub fn grayscale_alpha<I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<LumaA<Subpixel<I>>, Vec<Subpixel<I>>> {
+ grayscale_with_type_alpha(image)
+}
+
+/// Convert the supplied image to a grayscale image with the specified pixel type. Alpha channel is discarded.
+pub fn grayscale_with_type<NewPixel, I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<NewPixel, Vec<NewPixel::Subpixel>>
+where
+ NewPixel: Pixel + FromColor<Luma<Subpixel<I>>>,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+
+ for (x, y, pixel) in image.pixels() {
+ let grayscale = pixel.to_luma();
+ let new_pixel = grayscale.into_color(); // no-op for luma->luma
+
+ out.put_pixel(x, y, new_pixel);
+ }
+
+ out
+}
+
+/// Convert the supplied image to a grayscale image with the specified pixel type. Alpha channel is preserved.
+pub fn grayscale_with_type_alpha<NewPixel, I: GenericImageView>(
+ image: &I,
+) -> ImageBuffer<NewPixel, Vec<NewPixel::Subpixel>>
+where
+ NewPixel: Pixel + FromColor<LumaA<Subpixel<I>>>,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+
+ for (x, y, pixel) in image.pixels() {
+ let grayscale = pixel.to_luma_alpha();
+ let new_pixel = grayscale.into_color(); // no-op for luma->luma
+
+ out.put_pixel(x, y, new_pixel);
+ }
+
+ out
+}
+
+/// Invert each pixel within the supplied image.
+/// This function operates in place.
+pub fn invert<I: GenericImage>(image: &mut I) {
+ // TODO find a way to use pixels?
+ let (width, height) = image.dimensions();
+
+ for y in 0..height {
+ for x in 0..width {
+ let mut p = image.get_pixel(x, y);
+ p.invert();
+
+ image.put_pixel(x, y, p);
+ }
+ }
+}
+
+/// Adjust the contrast of the supplied image.
+/// ```contrast``` is the amount to adjust the contrast by.
+/// Negative values decrease the contrast and positive values increase the contrast.
+///
+/// *[See also `contrast_in_place`.][contrast_in_place]*
+pub fn contrast<I, P, S>(image: &I, contrast: f32) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+
+ let max = S::DEFAULT_MAX_VALUE;
+ let max: f32 = NumCast::from(max).unwrap();
+
+ let percent = ((100.0 + contrast) / 100.0).powi(2);
+
+ for (x, y, pixel) in image.pixels() {
+ let f = pixel.map(|b| {
+ let c: f32 = NumCast::from(b).unwrap();
+
+ let d = ((c / max - 0.5) * percent + 0.5) * max;
+ let e = clamp(d, 0.0, max);
+
+ NumCast::from(e).unwrap()
+ });
+ out.put_pixel(x, y, f);
+ }
+
+ out
+}
+
+/// Adjust the contrast of the supplied image in place.
+/// ```contrast``` is the amount to adjust the contrast by.
+/// Negative values decrease the contrast and positive values increase the contrast.
+///
+/// *[See also `contrast`.][contrast]*
+pub fn contrast_in_place<I>(image: &mut I, contrast: f32)
+where
+ I: GenericImage,
+{
+ let (width, height) = image.dimensions();
+
+ let max = <I::Pixel as Pixel>::Subpixel::DEFAULT_MAX_VALUE;
+ let max: f32 = NumCast::from(max).unwrap();
+
+ let percent = ((100.0 + contrast) / 100.0).powi(2);
+
+ // TODO find a way to use pixels?
+ for y in 0..height {
+ for x in 0..width {
+ let f = image.get_pixel(x, y).map(|b| {
+ let c: f32 = NumCast::from(b).unwrap();
+
+ let d = ((c / max - 0.5) * percent + 0.5) * max;
+ let e = clamp(d, 0.0, max);
+
+ NumCast::from(e).unwrap()
+ });
+
+ image.put_pixel(x, y, f);
+ }
+ }
+}
+
+/// Brighten the supplied image.
+/// ```value``` is the amount to brighten each pixel by.
+/// Negative values decrease the brightness and positive values increase it.
+///
+/// *[See also `brighten_in_place`.][brighten_in_place]*
+pub fn brighten<I, P, S>(image: &I, value: i32) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+
+ let max = S::DEFAULT_MAX_VALUE;
+ let max: i32 = NumCast::from(max).unwrap();
+
+ for (x, y, pixel) in image.pixels() {
+ let e = pixel.map_with_alpha(
+ |b| {
+ let c: i32 = NumCast::from(b).unwrap();
+ let d = clamp(c + value, 0, max);
+
+ NumCast::from(d).unwrap()
+ },
+ |alpha| alpha,
+ );
+ out.put_pixel(x, y, e);
+ }
+
+ out
+}
+
+/// Brighten the supplied image in place.
+/// ```value``` is the amount to brighten each pixel by.
+/// Negative values decrease the brightness and positive values increase it.
+///
+/// *[See also `brighten`.][brighten]*
+pub fn brighten_in_place<I>(image: &mut I, value: i32)
+where
+ I: GenericImage,
+{
+ let (width, height) = image.dimensions();
+
+ let max = <I::Pixel as Pixel>::Subpixel::DEFAULT_MAX_VALUE;
+ let max: i32 = NumCast::from(max).unwrap(); // TODO what does this do for f32? clamp at 1??
+
+ // TODO find a way to use pixels?
+ for y in 0..height {
+ for x in 0..width {
+ let e = image.get_pixel(x, y).map_with_alpha(
+ |b| {
+ let c: i32 = NumCast::from(b).unwrap();
+ let d = clamp(c + value, 0, max);
+
+ NumCast::from(d).unwrap()
+ },
+ |alpha| alpha,
+ );
+
+ image.put_pixel(x, y, e);
+ }
+ }
+}
+
+/// Hue rotate the supplied image.
+/// `value` is the degrees to rotate each pixel by.
+/// 0 and 360 do nothing, the rest rotates by the given degree value.
+/// just like the css webkit filter hue-rotate(180)
+///
+/// *[See also `huerotate_in_place`.][huerotate_in_place]*
+pub fn huerotate<I, P, S>(image: &I, value: i32) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, height);
+
+ let angle: f64 = NumCast::from(value).unwrap();
+
+ let cosv = (angle * PI / 180.0).cos();
+ let sinv = (angle * PI / 180.0).sin();
+ let matrix: [f64; 9] = [
+ // Reds
+ 0.213 + cosv * 0.787 - sinv * 0.213,
+ 0.715 - cosv * 0.715 - sinv * 0.715,
+ 0.072 - cosv * 0.072 + sinv * 0.928,
+ // Greens
+ 0.213 - cosv * 0.213 + sinv * 0.143,
+ 0.715 + cosv * 0.285 + sinv * 0.140,
+ 0.072 - cosv * 0.072 - sinv * 0.283,
+ // Blues
+ 0.213 - cosv * 0.213 - sinv * 0.787,
+ 0.715 - cosv * 0.715 + sinv * 0.715,
+ 0.072 + cosv * 0.928 + sinv * 0.072,
+ ];
+ for (x, y, pixel) in out.enumerate_pixels_mut() {
+ let p = image.get_pixel(x, y);
+
+ #[allow(deprecated)]
+ let (k1, k2, k3, k4) = p.channels4();
+ let vec: (f64, f64, f64, f64) = (
+ NumCast::from(k1).unwrap(),
+ NumCast::from(k2).unwrap(),
+ NumCast::from(k3).unwrap(),
+ NumCast::from(k4).unwrap(),
+ );
+
+ let r = vec.0;
+ let g = vec.1;
+ let b = vec.2;
+
+ let new_r = matrix[0] * r + matrix[1] * g + matrix[2] * b;
+ let new_g = matrix[3] * r + matrix[4] * g + matrix[5] * b;
+ let new_b = matrix[6] * r + matrix[7] * g + matrix[8] * b;
+ let max = 255f64;
+
+ #[allow(deprecated)]
+ let outpixel = Pixel::from_channels(
+ NumCast::from(clamp(new_r, 0.0, max)).unwrap(),
+ NumCast::from(clamp(new_g, 0.0, max)).unwrap(),
+ NumCast::from(clamp(new_b, 0.0, max)).unwrap(),
+ NumCast::from(clamp(vec.3, 0.0, max)).unwrap(),
+ );
+ *pixel = outpixel;
+ }
+ out
+}
+
+/// Hue rotate the supplied image in place.
+/// `value` is the degrees to rotate each pixel by.
+/// 0 and 360 do nothing, the rest rotates by the given degree value.
+/// just like the css webkit filter hue-rotate(180)
+///
+/// *[See also `huerotate`.][huerotate]*
+pub fn huerotate_in_place<I>(image: &mut I, value: i32)
+where
+ I: GenericImage,
+{
+ let (width, height) = image.dimensions();
+
+ let angle: f64 = NumCast::from(value).unwrap();
+
+ let cosv = (angle * PI / 180.0).cos();
+ let sinv = (angle * PI / 180.0).sin();
+ let matrix: [f64; 9] = [
+ // Reds
+ 0.213 + cosv * 0.787 - sinv * 0.213,
+ 0.715 - cosv * 0.715 - sinv * 0.715,
+ 0.072 - cosv * 0.072 + sinv * 0.928,
+ // Greens
+ 0.213 - cosv * 0.213 + sinv * 0.143,
+ 0.715 + cosv * 0.285 + sinv * 0.140,
+ 0.072 - cosv * 0.072 - sinv * 0.283,
+ // Blues
+ 0.213 - cosv * 0.213 - sinv * 0.787,
+ 0.715 - cosv * 0.715 + sinv * 0.715,
+ 0.072 + cosv * 0.928 + sinv * 0.072,
+ ];
+
+ // TODO find a way to use pixels?
+ for y in 0..height {
+ for x in 0..width {
+ let pixel = image.get_pixel(x, y);
+
+ #[allow(deprecated)]
+ let (k1, k2, k3, k4) = pixel.channels4();
+
+ let vec: (f64, f64, f64, f64) = (
+ NumCast::from(k1).unwrap(),
+ NumCast::from(k2).unwrap(),
+ NumCast::from(k3).unwrap(),
+ NumCast::from(k4).unwrap(),
+ );
+
+ let r = vec.0;
+ let g = vec.1;
+ let b = vec.2;
+
+ let new_r = matrix[0] * r + matrix[1] * g + matrix[2] * b;
+ let new_g = matrix[3] * r + matrix[4] * g + matrix[5] * b;
+ let new_b = matrix[6] * r + matrix[7] * g + matrix[8] * b;
+ let max = 255f64;
+
+ #[allow(deprecated)]
+ let outpixel = Pixel::from_channels(
+ NumCast::from(clamp(new_r, 0.0, max)).unwrap(),
+ NumCast::from(clamp(new_g, 0.0, max)).unwrap(),
+ NumCast::from(clamp(new_b, 0.0, max)).unwrap(),
+ NumCast::from(clamp(vec.3, 0.0, max)).unwrap(),
+ );
+
+ image.put_pixel(x, y, outpixel);
+ }
+ }
+}
+
+/// A color map
+pub trait ColorMap {
+ /// The color type on which the map operates on
+ type Color;
+ /// Returns the index of the closest match of `color`
+ /// in the color map.
+ fn index_of(&self, color: &Self::Color) -> usize;
+ /// Looks up color by index in the color map. If `idx` is out of range for the color map, or
+ /// ColorMap doesn't implement `lookup` `None` is returned.
+ fn lookup(&self, index: usize) -> Option<Self::Color> {
+ let _ = index;
+ None
+ }
+ /// Determine if this implementation of ColorMap overrides the default `lookup`.
+ fn has_lookup(&self) -> bool {
+ false
+ }
+ /// Maps `color` to the closest color in the color map.
+ fn map_color(&self, color: &mut Self::Color);
+}
+
+/// A bi-level color map
+///
+/// # Examples
+/// ```
+/// use image::imageops::colorops::{index_colors, BiLevel, ColorMap};
+/// use image::{ImageBuffer, Luma};
+///
+/// let (w, h) = (16, 16);
+/// // Create an image with a smooth horizontal gradient from black (0) to white (255).
+/// let gray = ImageBuffer::from_fn(w, h, |x, y| -> Luma<u8> { [(255 * x / w) as u8].into() });
+/// // Mapping the gray image through the `BiLevel` filter should map gray pixels less than half
+/// // intensity (127) to black (0), and anything greater to white (255).
+/// let cmap = BiLevel;
+/// let palletized = index_colors(&gray, &cmap);
+/// let mapped = ImageBuffer::from_fn(w, h, |x, y| {
+/// let p = palletized.get_pixel(x, y);
+/// cmap.lookup(p.0[0] as usize)
+/// .expect("indexed color out-of-range")
+/// });
+/// // Create an black and white image of expected output.
+/// let bw = ImageBuffer::from_fn(w, h, |x, y| -> Luma<u8> {
+/// if x <= (w / 2) {
+/// [0].into()
+/// } else {
+/// [255].into()
+/// }
+/// });
+/// assert_eq!(mapped, bw);
+/// ```
+#[derive(Clone, Copy)]
+pub struct BiLevel;
+
+impl ColorMap for BiLevel {
+ type Color = Luma<u8>;
+
+ #[inline(always)]
+ fn index_of(&self, color: &Luma<u8>) -> usize {
+ let luma = color.0;
+ if luma[0] > 127 {
+ 1
+ } else {
+ 0
+ }
+ }
+
+ #[inline(always)]
+ fn lookup(&self, idx: usize) -> Option<Self::Color> {
+ match idx {
+ 0 => Some([0].into()),
+ 1 => Some([255].into()),
+ _ => None,
+ }
+ }
+
+ /// Indicate NeuQuant implements `lookup`.
+ fn has_lookup(&self) -> bool {
+ true
+ }
+
+ #[inline(always)]
+ fn map_color(&self, color: &mut Luma<u8>) {
+ let new_color = 0xFF * self.index_of(color) as u8;
+ let luma = &mut color.0;
+ luma[0] = new_color;
+ }
+}
+
+impl ColorMap for color_quant::NeuQuant {
+ type Color = Rgba<u8>;
+
+ #[inline(always)]
+ fn index_of(&self, color: &Rgba<u8>) -> usize {
+ self.index_of(color.channels())
+ }
+
+ #[inline(always)]
+ fn lookup(&self, idx: usize) -> Option<Self::Color> {
+ self.lookup(idx).map(|p| p.into())
+ }
+
+ /// Indicate NeuQuant implements `lookup`.
+ fn has_lookup(&self) -> bool {
+ true
+ }
+
+ #[inline(always)]
+ fn map_color(&self, color: &mut Rgba<u8>) {
+ self.map_pixel(color.channels_mut())
+ }
+}
+
+/// Floyd-Steinberg error diffusion
+fn diffuse_err<P: Pixel<Subpixel = u8>>(pixel: &mut P, error: [i16; 3], factor: i16) {
+ for (e, c) in error.iter().zip(pixel.channels_mut().iter_mut()) {
+ *c = match <i16 as From<_>>::from(*c) + e * factor / 16 {
+ val if val < 0 => 0,
+ val if val > 0xFF => 0xFF,
+ val => val as u8,
+ }
+ }
+}
+
+macro_rules! do_dithering(
+ ($map:expr, $image:expr, $err:expr, $x:expr, $y:expr) => (
+ {
+ let old_pixel = $image[($x, $y)];
+ let new_pixel = $image.get_pixel_mut($x, $y);
+ $map.map_color(new_pixel);
+ for ((e, &old), &new) in $err.iter_mut()
+ .zip(old_pixel.channels().iter())
+ .zip(new_pixel.channels().iter())
+ {
+ *e = <i16 as From<_>>::from(old) - <i16 as From<_>>::from(new)
+ }
+ }
+ )
+);
+
+/// Reduces the colors of the image using the supplied `color_map` while applying
+/// Floyd-Steinberg dithering to improve the visual conception
+pub fn dither<Pix, Map>(image: &mut ImageBuffer<Pix, Vec<u8>>, color_map: &Map)
+where
+ Map: ColorMap<Color = Pix> + ?Sized,
+ Pix: Pixel<Subpixel = u8> + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut err: [i16; 3] = [0; 3];
+ for y in 0..height - 1 {
+ let x = 0;
+ do_dithering!(color_map, image, err, x, y);
+ diffuse_err(image.get_pixel_mut(x + 1, y), err, 7);
+ diffuse_err(image.get_pixel_mut(x, y + 1), err, 5);
+ diffuse_err(image.get_pixel_mut(x + 1, y + 1), err, 1);
+ for x in 1..width - 1 {
+ do_dithering!(color_map, image, err, x, y);
+ diffuse_err(image.get_pixel_mut(x + 1, y), err, 7);
+ diffuse_err(image.get_pixel_mut(x - 1, y + 1), err, 3);
+ diffuse_err(image.get_pixel_mut(x, y + 1), err, 5);
+ diffuse_err(image.get_pixel_mut(x + 1, y + 1), err, 1);
+ }
+ let x = width - 1;
+ do_dithering!(color_map, image, err, x, y);
+ diffuse_err(image.get_pixel_mut(x - 1, y + 1), err, 3);
+ diffuse_err(image.get_pixel_mut(x, y + 1), err, 5);
+ }
+ let y = height - 1;
+ let x = 0;
+ do_dithering!(color_map, image, err, x, y);
+ diffuse_err(image.get_pixel_mut(x + 1, y), err, 7);
+ for x in 1..width - 1 {
+ do_dithering!(color_map, image, err, x, y);
+ diffuse_err(image.get_pixel_mut(x + 1, y), err, 7);
+ }
+ let x = width - 1;
+ do_dithering!(color_map, image, err, x, y);
+}
+
+/// Reduces the colors using the supplied `color_map` and returns an image of the indices
+pub fn index_colors<Pix, Map>(
+ image: &ImageBuffer<Pix, Vec<u8>>,
+ color_map: &Map,
+) -> ImageBuffer<Luma<u8>, Vec<u8>>
+where
+ Map: ColorMap<Color = Pix> + ?Sized,
+ Pix: Pixel<Subpixel = u8> + 'static,
+{
+ let mut indices = ImageBuffer::new(image.width(), image.height());
+ for (pixel, idx) in image.pixels().zip(indices.pixels_mut()) {
+ *idx = Luma([color_map.index_of(pixel) as u8])
+ }
+ indices
+}
+
+#[cfg(test)]
+mod test {
+
+ use super::*;
+ use crate::{GrayImage, ImageBuffer};
+
+ macro_rules! assert_pixels_eq {
+ ($actual:expr, $expected:expr) => {{
+ let actual_dim = $actual.dimensions();
+ let expected_dim = $expected.dimensions();
+
+ if actual_dim != expected_dim {
+ panic!(
+ "dimensions do not match. \
+ actual: {:?}, expected: {:?}",
+ actual_dim, expected_dim
+ )
+ }
+
+ let diffs = pixel_diffs($actual, $expected);
+
+ if !diffs.is_empty() {
+ let mut err = "".to_string();
+
+ let diff_messages = diffs
+ .iter()
+ .take(5)
+ .map(|d| format!("\nactual: {:?}, expected {:?} ", d.0, d.1))
+ .collect::<Vec<_>>()
+ .join("");
+
+ err.push_str(&diff_messages);
+ panic!("pixels do not match. {:?}", err)
+ }
+ }};
+ }
+
+ #[test]
+ fn test_dither() {
+ let mut image = ImageBuffer::from_raw(2, 2, vec![127, 127, 127, 127]).unwrap();
+ let cmap = BiLevel;
+ dither(&mut image, &cmap);
+ assert_eq!(&*image, &[0, 0xFF, 0xFF, 0]);
+ assert_eq!(index_colors(&image, &cmap).into_raw(), vec![0, 1, 1, 0])
+ }
+
+ #[test]
+ fn test_grayscale() {
+ let mut image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ assert_pixels_eq!(&grayscale(&mut image), &expected);
+ }
+
+ #[test]
+ fn test_invert() {
+ let mut image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![255u8, 254u8, 253u8, 245u8, 244u8, 243u8]).unwrap();
+
+ invert(&mut image);
+ assert_pixels_eq!(&image, &expected);
+ }
+ #[test]
+ fn test_brighten() {
+ let image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![10u8, 11u8, 12u8, 20u8, 21u8, 22u8]).unwrap();
+
+ assert_pixels_eq!(&brighten(&image, 10), &expected);
+ }
+
+ #[test]
+ fn test_brighten_place() {
+ let mut image: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![00u8, 01u8, 02u8, 10u8, 11u8, 12u8]).unwrap();
+
+ let expected: GrayImage =
+ ImageBuffer::from_raw(3, 2, vec![10u8, 11u8, 12u8, 20u8, 21u8, 22u8]).unwrap();
+
+ brighten_in_place(&mut image, 10);
+ assert_pixels_eq!(&image, &expected);
+ }
+
+ fn pixel_diffs<I, J, P>(left: &I, right: &J) -> Vec<((u32, u32, P), (u32, u32, P))>
+ where
+ I: GenericImage<Pixel = P>,
+ J: GenericImage<Pixel = P>,
+ P: Pixel + Eq,
+ {
+ left.pixels()
+ .zip(right.pixels())
+ .filter(|&(p, q)| p != q)
+ .collect::<Vec<_>>()
+ }
+}
diff --git a/vendor/image/src/imageops/mod.rs b/vendor/image/src/imageops/mod.rs
new file mode 100644
index 0000000..fdd2bf3
--- /dev/null
+++ b/vendor/image/src/imageops/mod.rs
@@ -0,0 +1,485 @@
+//! Image Processing Functions
+use std::cmp;
+
+use crate::image::{GenericImage, GenericImageView, SubImage};
+use crate::traits::{Lerp, Pixel, Primitive};
+
+pub use self::sample::FilterType;
+
+pub use self::sample::FilterType::{CatmullRom, Gaussian, Lanczos3, Nearest, Triangle};
+
+/// Affine transformations
+pub use self::affine::{
+ flip_horizontal, flip_horizontal_in, flip_horizontal_in_place, flip_vertical, flip_vertical_in,
+ flip_vertical_in_place, rotate180, rotate180_in, rotate180_in_place, rotate270, rotate270_in,
+ rotate90, rotate90_in,
+};
+
+/// Image sampling
+pub use self::sample::{
+ blur, filter3x3, interpolate_bilinear, interpolate_nearest, resize, sample_bilinear,
+ sample_nearest, thumbnail, unsharpen,
+};
+
+/// Color operations
+pub use self::colorops::{
+ brighten, contrast, dither, grayscale, grayscale_alpha, grayscale_with_type,
+ grayscale_with_type_alpha, huerotate, index_colors, invert, BiLevel, ColorMap,
+};
+
+mod affine;
+// Public only because of Rust bug:
+// https://github.com/rust-lang/rust/issues/18241
+pub mod colorops;
+mod sample;
+
+/// Return a mutable view into an image
+/// The coordinates set the position of the top left corner of the crop.
+pub fn crop<I: GenericImageView>(
+ image: &mut I,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+) -> SubImage<&mut I> {
+ let (x, y, width, height) = crop_dimms(image, x, y, width, height);
+ SubImage::new(image, x, y, width, height)
+}
+
+/// Return an immutable view into an image
+/// The coordinates set the position of the top left corner of the crop.
+pub fn crop_imm<I: GenericImageView>(
+ image: &I,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+) -> SubImage<&I> {
+ let (x, y, width, height) = crop_dimms(image, x, y, width, height);
+ SubImage::new(image, x, y, width, height)
+}
+
+fn crop_dimms<I: GenericImageView>(
+ image: &I,
+ x: u32,
+ y: u32,
+ width: u32,
+ height: u32,
+) -> (u32, u32, u32, u32) {
+ let (iwidth, iheight) = image.dimensions();
+
+ let x = cmp::min(x, iwidth);
+ let y = cmp::min(y, iheight);
+
+ let height = cmp::min(height, iheight - y);
+ let width = cmp::min(width, iwidth - x);
+
+ (x, y, width, height)
+}
+
+/// Calculate the region that can be copied from top to bottom.
+///
+/// Given image size of bottom and top image, and a point at which we want to place the top image
+/// onto the bottom image, how large can we be? Have to wary of the following issues:
+/// * Top might be larger than bottom
+/// * Overflows in the computation
+/// * Coordinates could be completely out of bounds
+///
+/// The main idea is to make use of inequalities provided by the nature of `saturating_add` and
+/// `saturating_sub`. These intrinsically validate that all resulting coordinates will be in bounds
+/// for both images.
+///
+/// We want that all these coordinate accesses are safe:
+/// 1. `bottom.get_pixel(x + [0..x_range), y + [0..y_range))`
+/// 2. `top.get_pixel([0..x_range), [0..y_range))`
+///
+/// Proof that the function provides the necessary bounds for width. Note that all unaugmented math
+/// operations are to be read in standard arithmetic, not integer arithmetic. Since no direct
+/// integer arithmetic occurs in the implementation, this is unambiguous.
+///
+/// ```text
+/// Three short notes/lemmata:
+/// - Iff `(a - b) <= 0` then `a.saturating_sub(b) = 0`
+/// - Iff `(a - b) >= 0` then `a.saturating_sub(b) = a - b`
+/// - If `a <= c` then `a.saturating_sub(b) <= c.saturating_sub(b)`
+///
+/// 1.1 We show that if `bottom_width <= x`, then `x_range = 0` therefore `x + [0..x_range)` is empty.
+///
+/// x_range
+/// = (top_width.saturating_add(x).min(bottom_width)).saturating_sub(x)
+/// <= bottom_width.saturating_sub(x)
+///
+/// bottom_width <= x
+/// <==> bottom_width - x <= 0
+/// <==> bottom_width.saturating_sub(x) = 0
+/// ==> x_range <= 0
+/// ==> x_range = 0
+///
+/// 1.2 If `x < bottom_width` then `x + x_range < bottom_width`
+///
+/// x + x_range
+/// <= x + bottom_width.saturating_sub(x)
+/// = x + (bottom_width - x)
+/// = bottom_width
+///
+/// 2. We show that `x_range <= top_width`
+///
+/// x_range
+/// = (top_width.saturating_add(x).min(bottom_width)).saturating_sub(x)
+/// <= top_width.saturating_add(x).saturating_sub(x)
+/// <= (top_wdith + x).saturating_sub(x)
+/// = top_width (due to `top_width >= 0` and `x >= 0`)
+/// ```
+///
+/// Proof is the same for height.
+pub fn overlay_bounds(
+ (bottom_width, bottom_height): (u32, u32),
+ (top_width, top_height): (u32, u32),
+ x: u32,
+ y: u32,
+) -> (u32, u32) {
+ let x_range = top_width
+ .saturating_add(x) // Calculate max coordinate
+ .min(bottom_width) // Restrict to lower width
+ .saturating_sub(x); // Determinate length from start `x`
+ let y_range = top_height
+ .saturating_add(y)
+ .min(bottom_height)
+ .saturating_sub(y);
+ (x_range, y_range)
+}
+
+/// Calculate the region that can be copied from top to bottom.
+///
+/// Given image size of bottom and top image, and a point at which we want to place the top image
+/// onto the bottom image, how large can we be? Have to wary of the following issues:
+/// * Top might be larger than bottom
+/// * Overflows in the computation
+/// * Coordinates could be completely out of bounds
+///
+/// The returned value is of the form:
+///
+/// `(origin_bottom_x, origin_bottom_y, origin_top_x, origin_top_y, x_range, y_range)`
+///
+/// The main idea is to do computations on i64's and then clamp to image dimensions.
+/// In particular, we want to ensure that all these coordinate accesses are safe:
+/// 1. `bottom.get_pixel(origin_bottom_x + [0..x_range), origin_bottom_y + [0..y_range))`
+/// 2. `top.get_pixel(origin_top_y + [0..x_range), origin_top_y + [0..y_range))`
+///
+fn overlay_bounds_ext(
+ (bottom_width, bottom_height): (u32, u32),
+ (top_width, top_height): (u32, u32),
+ x: i64,
+ y: i64,
+) -> (u32, u32, u32, u32, u32, u32) {
+ // Return a predictable value if the two images don't overlap at all.
+ if x > i64::from(bottom_width)
+ || y > i64::from(bottom_height)
+ || x.saturating_add(i64::from(top_width)) <= 0
+ || y.saturating_add(i64::from(top_height)) <= 0
+ {
+ return (0, 0, 0, 0, 0, 0);
+ }
+
+ // Find the maximum x and y coordinates in terms of the bottom image.
+ let max_x = x.saturating_add(i64::from(top_width));
+ let max_y = y.saturating_add(i64::from(top_height));
+
+ // Clip the origin and maximum coordinates to the bounds of the bottom image.
+ // Casting to a u32 is safe because both 0 and `bottom_{width,height}` fit
+ // into 32-bits.
+ let max_inbounds_x = max_x.clamp(0, i64::from(bottom_width)) as u32;
+ let max_inbounds_y = max_y.clamp(0, i64::from(bottom_height)) as u32;
+ let origin_bottom_x = x.clamp(0, i64::from(bottom_width)) as u32;
+ let origin_bottom_y = y.clamp(0, i64::from(bottom_height)) as u32;
+
+ // The range is the difference between the maximum inbounds coordinates and
+ // the clipped origin. Unchecked subtraction is safe here because both are
+ // always positive and `max_inbounds_{x,y}` >= `origin_{x,y}` due to
+ // `top_{width,height}` being >= 0.
+ let x_range = max_inbounds_x - origin_bottom_x;
+ let y_range = max_inbounds_y - origin_bottom_y;
+
+ // If x (or y) is negative, then the origin of the top image is shifted by -x (or -y).
+ let origin_top_x = x.saturating_mul(-1).clamp(0, i64::from(top_width)) as u32;
+ let origin_top_y = y.saturating_mul(-1).clamp(0, i64::from(top_height)) as u32;
+
+ (
+ origin_bottom_x,
+ origin_bottom_y,
+ origin_top_x,
+ origin_top_y,
+ x_range,
+ y_range,
+ )
+}
+
+/// Overlay an image at a given coordinate (x, y)
+pub fn overlay<I, J>(bottom: &mut I, top: &J, x: i64, y: i64)
+where
+ I: GenericImage,
+ J: GenericImageView<Pixel = I::Pixel>,
+{
+ let bottom_dims = bottom.dimensions();
+ let top_dims = top.dimensions();
+
+ // Crop our top image if we're going out of bounds
+ let (origin_bottom_x, origin_bottom_y, origin_top_x, origin_top_y, range_width, range_height) =
+ overlay_bounds_ext(bottom_dims, top_dims, x, y);
+
+ for y in 0..range_height {
+ for x in 0..range_width {
+ let p = top.get_pixel(origin_top_x + x, origin_top_y + y);
+ let mut bottom_pixel = bottom.get_pixel(origin_bottom_x + x, origin_bottom_y + y);
+ bottom_pixel.blend(&p);
+
+ bottom.put_pixel(origin_bottom_x + x, origin_bottom_y + y, bottom_pixel);
+ }
+ }
+}
+
+/// Tile an image by repeating it multiple times
+///
+/// # Examples
+/// ```no_run
+/// use image::{RgbaImage};
+///
+/// let mut img = RgbaImage::new(1920, 1080);
+/// let tile = image::open("tile.png").unwrap();
+///
+/// image::imageops::tile(&mut img, &tile);
+/// img.save("tiled_wallpaper.png").unwrap();
+/// ```
+pub fn tile<I, J>(bottom: &mut I, top: &J)
+where
+ I: GenericImage,
+ J: GenericImageView<Pixel = I::Pixel>,
+{
+ for x in (0..bottom.width()).step_by(top.width() as usize) {
+ for y in (0..bottom.height()).step_by(top.height() as usize) {
+ overlay(bottom, top, i64::from(x), i64::from(y));
+ }
+ }
+}
+
+/// Fill the image with a linear vertical gradient
+///
+/// This function assumes a linear color space.
+///
+/// # Examples
+/// ```no_run
+/// use image::{Rgba, RgbaImage, Pixel};
+///
+/// let mut img = RgbaImage::new(100, 100);
+/// let start = Rgba::from_slice(&[0, 128, 0, 0]);
+/// let end = Rgba::from_slice(&[255, 255, 255, 255]);
+///
+/// image::imageops::vertical_gradient(&mut img, start, end);
+/// img.save("vertical_gradient.png").unwrap();
+pub fn vertical_gradient<S, P, I>(img: &mut I, start: &P, stop: &P)
+where
+ I: GenericImage<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + Lerp + 'static,
+{
+ for y in 0..img.height() {
+ let pixel = start.map2(stop, |a, b| {
+ let y = <S::Ratio as num_traits::NumCast>::from(y).unwrap();
+ let height = <S::Ratio as num_traits::NumCast>::from(img.height() - 1).unwrap();
+ S::lerp(a, b, y / height)
+ });
+
+ for x in 0..img.width() {
+ img.put_pixel(x, y, pixel);
+ }
+ }
+}
+
+/// Fill the image with a linear horizontal gradient
+///
+/// This function assumes a linear color space.
+///
+/// # Examples
+/// ```no_run
+/// use image::{Rgba, RgbaImage, Pixel};
+///
+/// let mut img = RgbaImage::new(100, 100);
+/// let start = Rgba::from_slice(&[0, 128, 0, 0]);
+/// let end = Rgba::from_slice(&[255, 255, 255, 255]);
+///
+/// image::imageops::horizontal_gradient(&mut img, start, end);
+/// img.save("horizontal_gradient.png").unwrap();
+pub fn horizontal_gradient<S, P, I>(img: &mut I, start: &P, stop: &P)
+where
+ I: GenericImage<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + Lerp + 'static,
+{
+ for x in 0..img.width() {
+ let pixel = start.map2(stop, |a, b| {
+ let x = <S::Ratio as num_traits::NumCast>::from(x).unwrap();
+ let width = <S::Ratio as num_traits::NumCast>::from(img.width() - 1).unwrap();
+ S::lerp(a, b, x / width)
+ });
+
+ for y in 0..img.height() {
+ img.put_pixel(x, y, pixel);
+ }
+ }
+}
+
+/// Replace the contents of an image at a given coordinate (x, y)
+pub fn replace<I, J>(bottom: &mut I, top: &J, x: i64, y: i64)
+where
+ I: GenericImage,
+ J: GenericImageView<Pixel = I::Pixel>,
+{
+ let bottom_dims = bottom.dimensions();
+ let top_dims = top.dimensions();
+
+ // Crop our top image if we're going out of bounds
+ let (origin_bottom_x, origin_bottom_y, origin_top_x, origin_top_y, range_width, range_height) =
+ overlay_bounds_ext(bottom_dims, top_dims, x, y);
+
+ for y in 0..range_height {
+ for x in 0..range_width {
+ let p = top.get_pixel(origin_top_x + x, origin_top_y + y);
+ bottom.put_pixel(origin_bottom_x + x, origin_bottom_y + y, p);
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+
+ use super::{overlay, overlay_bounds_ext};
+ use crate::color::Rgb;
+ use crate::ImageBuffer;
+ use crate::RgbaImage;
+
+ #[test]
+ fn test_overlay_bounds_ext() {
+ assert_eq!(
+ overlay_bounds_ext((10, 10), (10, 10), 0, 0),
+ (0, 0, 0, 0, 10, 10)
+ );
+ assert_eq!(
+ overlay_bounds_ext((10, 10), (10, 10), 1, 0),
+ (1, 0, 0, 0, 9, 10)
+ );
+ assert_eq!(
+ overlay_bounds_ext((10, 10), (10, 10), 0, 11),
+ (0, 0, 0, 0, 0, 0)
+ );
+ assert_eq!(
+ overlay_bounds_ext((10, 10), (10, 10), -1, 0),
+ (0, 0, 1, 0, 9, 10)
+ );
+ assert_eq!(
+ overlay_bounds_ext((10, 10), (10, 10), -10, 0),
+ (0, 0, 0, 0, 0, 0)
+ );
+ assert_eq!(
+ overlay_bounds_ext((10, 10), (10, 10), 1i64 << 50, 0),
+ (0, 0, 0, 0, 0, 0)
+ );
+ assert_eq!(
+ overlay_bounds_ext((10, 10), (10, 10), -(1i64 << 50), 0),
+ (0, 0, 0, 0, 0, 0)
+ );
+ assert_eq!(
+ overlay_bounds_ext((10, 10), (u32::MAX, 10), 10 - i64::from(u32::MAX), 0),
+ (0, 0, u32::MAX - 10, 0, 10, 10)
+ );
+ }
+
+ #[test]
+ /// Test that images written into other images works
+ fn test_image_in_image() {
+ let mut target = ImageBuffer::new(32, 32);
+ let source = ImageBuffer::from_pixel(16, 16, Rgb([255u8, 0, 0]));
+ overlay(&mut target, &source, 0, 0);
+ assert!(*target.get_pixel(0, 0) == Rgb([255u8, 0, 0]));
+ assert!(*target.get_pixel(15, 0) == Rgb([255u8, 0, 0]));
+ assert!(*target.get_pixel(16, 0) == Rgb([0u8, 0, 0]));
+ assert!(*target.get_pixel(0, 15) == Rgb([255u8, 0, 0]));
+ assert!(*target.get_pixel(0, 16) == Rgb([0u8, 0, 0]));
+ }
+
+ #[test]
+ /// Test that images written outside of a frame doesn't blow up
+ fn test_image_in_image_outside_of_bounds() {
+ let mut target = ImageBuffer::new(32, 32);
+ let source = ImageBuffer::from_pixel(32, 32, Rgb([255u8, 0, 0]));
+ overlay(&mut target, &source, 1, 1);
+ assert!(*target.get_pixel(0, 0) == Rgb([0, 0, 0]));
+ assert!(*target.get_pixel(1, 1) == Rgb([255u8, 0, 0]));
+ assert!(*target.get_pixel(31, 31) == Rgb([255u8, 0, 0]));
+ }
+
+ #[test]
+ /// Test that images written to coordinates out of the frame doesn't blow up
+ /// (issue came up in #848)
+ fn test_image_outside_image_no_wrap_around() {
+ let mut target = ImageBuffer::new(32, 32);
+ let source = ImageBuffer::from_pixel(32, 32, Rgb([255u8, 0, 0]));
+ overlay(&mut target, &source, 33, 33);
+ assert!(*target.get_pixel(0, 0) == Rgb([0, 0, 0]));
+ assert!(*target.get_pixel(1, 1) == Rgb([0, 0, 0]));
+ assert!(*target.get_pixel(31, 31) == Rgb([0, 0, 0]));
+ }
+
+ #[test]
+ /// Test that images written to coordinates with overflow works
+ fn test_image_coordinate_overflow() {
+ let mut target = ImageBuffer::new(16, 16);
+ let source = ImageBuffer::from_pixel(32, 32, Rgb([255u8, 0, 0]));
+ // Overflows to 'sane' coordinates but top is larger than bot.
+ overlay(
+ &mut target,
+ &source,
+ i64::from(u32::max_value() - 31),
+ i64::from(u32::max_value() - 31),
+ );
+ assert!(*target.get_pixel(0, 0) == Rgb([0, 0, 0]));
+ assert!(*target.get_pixel(1, 1) == Rgb([0, 0, 0]));
+ assert!(*target.get_pixel(15, 15) == Rgb([0, 0, 0]));
+ }
+
+ use super::{horizontal_gradient, vertical_gradient};
+
+ #[test]
+ /// Test that horizontal gradients are correctly generated
+ fn test_image_horizontal_gradient_limits() {
+ let mut img = ImageBuffer::new(100, 1);
+
+ let start = Rgb([0u8, 128, 0]);
+ let end = Rgb([255u8, 255, 255]);
+
+ horizontal_gradient(&mut img, &start, &end);
+
+ assert_eq!(img.get_pixel(0, 0), &start);
+ assert_eq!(img.get_pixel(img.width() - 1, 0), &end);
+ }
+
+ #[test]
+ /// Test that vertical gradients are correctly generated
+ fn test_image_vertical_gradient_limits() {
+ let mut img = ImageBuffer::new(1, 100);
+
+ let start = Rgb([0u8, 128, 0]);
+ let end = Rgb([255u8, 255, 255]);
+
+ vertical_gradient(&mut img, &start, &end);
+
+ assert_eq!(img.get_pixel(0, 0), &start);
+ assert_eq!(img.get_pixel(0, img.height() - 1), &end);
+ }
+
+ #[test]
+ /// Test blur doesn't panick when passed 0.0
+ fn test_blur_zero() {
+ let image = RgbaImage::new(50, 50);
+ let _ = super::blur(&image, 0.0);
+ }
+}
diff --git a/vendor/image/src/imageops/sample.rs b/vendor/image/src/imageops/sample.rs
new file mode 100644
index 0000000..a362f83
--- /dev/null
+++ b/vendor/image/src/imageops/sample.rs
@@ -0,0 +1,1228 @@
+//! Functions and filters for the sampling of pixels.
+
+// See http://cs.brown.edu/courses/cs123/lectures/08_Image_Processing_IV.pdf
+// for some of the theory behind image scaling and convolution
+
+use std::f32;
+
+use num_traits::{NumCast, ToPrimitive, Zero};
+
+use crate::image::{GenericImage, GenericImageView};
+use crate::traits::{Enlargeable, Pixel, Primitive};
+use crate::utils::clamp;
+use crate::{ImageBuffer, Rgba32FImage};
+
+/// Available Sampling Filters.
+///
+/// ## Examples
+///
+/// To test the different sampling filters on a real example, you can find two
+/// examples called
+/// [`scaledown`](https://github.com/image-rs/image/tree/master/examples/scaledown)
+/// and
+/// [`scaleup`](https://github.com/image-rs/image/tree/master/examples/scaleup)
+/// in the `examples` directory of the crate source code.
+///
+/// Here is a 3.58 MiB
+/// [test image](https://github.com/image-rs/image/blob/master/examples/scaledown/test.jpg)
+/// that has been scaled down to 300x225 px:
+///
+/// <!-- NOTE: To test new test images locally, replace the GitHub path with `../../../docs/` -->
+/// <div style="display: flex; flex-wrap: wrap; align-items: flex-start;">
+/// <div style="margin: 0 8px 8px 0;">
+/// <img src="https://raw.githubusercontent.com/image-rs/image/master/examples/scaledown/scaledown-test-near.png" title="Nearest"><br>
+/// Nearest Neighbor
+/// </div>
+/// <div style="margin: 0 8px 8px 0;">
+/// <img src="https://raw.githubusercontent.com/image-rs/image/master/examples/scaledown/scaledown-test-tri.png" title="Triangle"><br>
+/// Linear: Triangle
+/// </div>
+/// <div style="margin: 0 8px 8px 0;">
+/// <img src="https://raw.githubusercontent.com/image-rs/image/master/examples/scaledown/scaledown-test-cmr.png" title="CatmullRom"><br>
+/// Cubic: Catmull-Rom
+/// </div>
+/// <div style="margin: 0 8px 8px 0;">
+/// <img src="https://raw.githubusercontent.com/image-rs/image/master/examples/scaledown/scaledown-test-gauss.png" title="Gaussian"><br>
+/// Gaussian
+/// </div>
+/// <div style="margin: 0 8px 8px 0;">
+/// <img src="https://raw.githubusercontent.com/image-rs/image/master/examples/scaledown/scaledown-test-lcz2.png" title="Lanczos3"><br>
+/// Lanczos with window 3
+/// </div>
+/// </div>
+///
+/// ## Speed
+///
+/// Time required to create each of the examples above, tested on an Intel
+/// i7-4770 CPU with Rust 1.37 in release mode:
+///
+/// <table style="width: auto;">
+/// <tr>
+/// <th>Nearest</th>
+/// <td>31 ms</td>
+/// </tr>
+/// <tr>
+/// <th>Triangle</th>
+/// <td>414 ms</td>
+/// </tr>
+/// <tr>
+/// <th>CatmullRom</th>
+/// <td>817 ms</td>
+/// </tr>
+/// <tr>
+/// <th>Gaussian</th>
+/// <td>1180 ms</td>
+/// </tr>
+/// <tr>
+/// <th>Lanczos3</th>
+/// <td>1170 ms</td>
+/// </tr>
+/// </table>
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub enum FilterType {
+ /// Nearest Neighbor
+ Nearest,
+
+ /// Linear Filter
+ Triangle,
+
+ /// Cubic Filter
+ CatmullRom,
+
+ /// Gaussian Filter
+ Gaussian,
+
+ /// Lanczos with window 3
+ Lanczos3,
+}
+
+/// A Representation of a separable filter.
+pub(crate) struct Filter<'a> {
+ /// The filter's filter function.
+ pub(crate) kernel: Box<dyn Fn(f32) -> f32 + 'a>,
+
+ /// The window on which this filter operates.
+ pub(crate) support: f32,
+}
+
+struct FloatNearest(f32);
+
+// to_i64, to_u64, and to_f64 implicitly affect all other lower conversions.
+// Note that to_f64 by default calls to_i64 and thus needs to be overridden.
+impl ToPrimitive for FloatNearest {
+ // to_{i,u}64 is required, to_{i,u}{8,16} are useful.
+ // If a usecase for full 32 bits is found its trivial to add
+ fn to_i8(&self) -> Option<i8> {
+ self.0.round().to_i8()
+ }
+ fn to_i16(&self) -> Option<i16> {
+ self.0.round().to_i16()
+ }
+ fn to_i64(&self) -> Option<i64> {
+ self.0.round().to_i64()
+ }
+ fn to_u8(&self) -> Option<u8> {
+ self.0.round().to_u8()
+ }
+ fn to_u16(&self) -> Option<u16> {
+ self.0.round().to_u16()
+ }
+ fn to_u64(&self) -> Option<u64> {
+ self.0.round().to_u64()
+ }
+ fn to_f64(&self) -> Option<f64> {
+ self.0.to_f64()
+ }
+}
+
+// sinc function: the ideal sampling filter.
+fn sinc(t: f32) -> f32 {
+ let a = t * f32::consts::PI;
+
+ if t == 0.0 {
+ 1.0
+ } else {
+ a.sin() / a
+ }
+}
+
+// lanczos kernel function. A windowed sinc function.
+fn lanczos(x: f32, t: f32) -> f32 {
+ if x.abs() < t {
+ sinc(x) * sinc(x / t)
+ } else {
+ 0.0
+ }
+}
+
+// Calculate a splice based on the b and c parameters.
+// from authors Mitchell and Netravali.
+fn bc_cubic_spline(x: f32, b: f32, c: f32) -> f32 {
+ let a = x.abs();
+
+ let k = if a < 1.0 {
+ (12.0 - 9.0 * b - 6.0 * c) * a.powi(3)
+ + (-18.0 + 12.0 * b + 6.0 * c) * a.powi(2)
+ + (6.0 - 2.0 * b)
+ } else if a < 2.0 {
+ (-b - 6.0 * c) * a.powi(3)
+ + (6.0 * b + 30.0 * c) * a.powi(2)
+ + (-12.0 * b - 48.0 * c) * a
+ + (8.0 * b + 24.0 * c)
+ } else {
+ 0.0
+ };
+
+ k / 6.0
+}
+
+/// The Gaussian Function.
+/// ```r``` is the standard deviation.
+pub(crate) fn gaussian(x: f32, r: f32) -> f32 {
+ ((2.0 * f32::consts::PI).sqrt() * r).recip() * (-x.powi(2) / (2.0 * r.powi(2))).exp()
+}
+
+/// Calculate the lanczos kernel with a window of 3
+pub(crate) fn lanczos3_kernel(x: f32) -> f32 {
+ lanczos(x, 3.0)
+}
+
+/// Calculate the gaussian function with a
+/// standard deviation of 0.5
+pub(crate) fn gaussian_kernel(x: f32) -> f32 {
+ gaussian(x, 0.5)
+}
+
+/// Calculate the Catmull-Rom cubic spline.
+/// Also known as a form of `BiCubic` sampling in two dimensions.
+pub(crate) fn catmullrom_kernel(x: f32) -> f32 {
+ bc_cubic_spline(x, 0.0, 0.5)
+}
+
+/// Calculate the triangle function.
+/// Also known as `BiLinear` sampling in two dimensions.
+pub(crate) fn triangle_kernel(x: f32) -> f32 {
+ if x.abs() < 1.0 {
+ 1.0 - x.abs()
+ } else {
+ 0.0
+ }
+}
+
+/// Calculate the box kernel.
+/// Only pixels inside the box should be considered, and those
+/// contribute equally. So this method simply returns 1.
+pub(crate) fn box_kernel(_x: f32) -> f32 {
+ 1.0
+}
+
+// Sample the rows of the supplied image using the provided filter.
+// The height of the image remains unchanged.
+// ```new_width``` is the desired width of the new image
+// ```filter``` is the filter to use for sampling.
+// ```image``` is not necessarily Rgba and the order of channels is passed through.
+fn horizontal_sample<P, S>(
+ image: &Rgba32FImage,
+ new_width: u32,
+ filter: &mut Filter,
+) -> ImageBuffer<P, Vec<S>>
+where
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(new_width, height);
+ let mut ws = Vec::new();
+
+ let max: f32 = NumCast::from(S::DEFAULT_MAX_VALUE).unwrap();
+ let min: f32 = NumCast::from(S::DEFAULT_MIN_VALUE).unwrap();
+ let ratio = width as f32 / new_width as f32;
+ let sratio = if ratio < 1.0 { 1.0 } else { ratio };
+ let src_support = filter.support * sratio;
+
+ for outx in 0..new_width {
+ // Find the point in the input image corresponding to the centre
+ // of the current pixel in the output image.
+ let inputx = (outx as f32 + 0.5) * ratio;
+
+ // Left and right are slice bounds for the input pixels relevant
+ // to the output pixel we are calculating. Pixel x is relevant
+ // if and only if (x >= left) && (x < right).
+
+ // Invariant: 0 <= left < right <= width
+
+ let left = (inputx - src_support).floor() as i64;
+ let left = clamp(left, 0, <i64 as From<_>>::from(width) - 1) as u32;
+
+ let right = (inputx + src_support).ceil() as i64;
+ let right = clamp(
+ right,
+ <i64 as From<_>>::from(left) + 1,
+ <i64 as From<_>>::from(width),
+ ) as u32;
+
+ // Go back to left boundary of pixel, to properly compare with i
+ // below, as the kernel treats the centre of a pixel as 0.
+ let inputx = inputx - 0.5;
+
+ ws.clear();
+ let mut sum = 0.0;
+ for i in left..right {
+ let w = (filter.kernel)((i as f32 - inputx) / sratio);
+ ws.push(w);
+ sum += w;
+ }
+ ws.iter_mut().for_each(|w| *w /= sum);
+
+ for y in 0..height {
+ let mut t = (0.0, 0.0, 0.0, 0.0);
+
+ for (i, w) in ws.iter().enumerate() {
+ let p = image.get_pixel(left + i as u32, y);
+
+ #[allow(deprecated)]
+ let vec = p.channels4();
+
+ t.0 += vec.0 * w;
+ t.1 += vec.1 * w;
+ t.2 += vec.2 * w;
+ t.3 += vec.3 * w;
+ }
+
+ #[allow(deprecated)]
+ let t = Pixel::from_channels(
+ NumCast::from(FloatNearest(clamp(t.0, min, max))).unwrap(),
+ NumCast::from(FloatNearest(clamp(t.1, min, max))).unwrap(),
+ NumCast::from(FloatNearest(clamp(t.2, min, max))).unwrap(),
+ NumCast::from(FloatNearest(clamp(t.3, min, max))).unwrap(),
+ );
+
+ out.put_pixel(outx, y, t);
+ }
+ }
+
+ out
+}
+
+/// Linearly sample from an image using coordinates in [0, 1].
+pub fn sample_bilinear<P: Pixel>(
+ img: &impl GenericImageView<Pixel = P>,
+ u: f32,
+ v: f32,
+) -> Option<P> {
+ if ![u, v].iter().all(|c| (0.0..=1.0).contains(c)) {
+ return None;
+ }
+
+ let (w, h) = img.dimensions();
+ if w == 0 || h == 0 {
+ return None;
+ }
+
+ let ui = w as f32 * u - 0.5;
+ let vi = h as f32 * v - 0.5;
+ interpolate_bilinear(
+ img,
+ ui.max(0.).min((w - 1) as f32),
+ vi.max(0.).min((h - 1) as f32),
+ )
+}
+
+/// Sample from an image using coordinates in [0, 1], taking the nearest coordinate.
+pub fn sample_nearest<P: Pixel>(
+ img: &impl GenericImageView<Pixel = P>,
+ u: f32,
+ v: f32,
+) -> Option<P> {
+ if ![u, v].iter().all(|c| (0.0..=1.0).contains(c)) {
+ return None;
+ }
+
+ let (w, h) = img.dimensions();
+ let ui = w as f32 * u - 0.5;
+ let ui = ui.max(0.).min((w.saturating_sub(1)) as f32);
+
+ let vi = h as f32 * v - 0.5;
+ let vi = vi.max(0.).min((h.saturating_sub(1)) as f32);
+ interpolate_nearest(img, ui, vi)
+}
+
+/// Sample from an image using coordinates in [0, w-1] and [0, h-1], taking the
+/// nearest pixel.
+///
+/// Coordinates outside the image bounds will return `None`, however the
+/// behavior for points within half a pixel of the image bounds may change in
+/// the future.
+pub fn interpolate_nearest<P: Pixel>(
+ img: &impl GenericImageView<Pixel = P>,
+ x: f32,
+ y: f32,
+) -> Option<P> {
+ let (w, h) = img.dimensions();
+ if w == 0 || h == 0 {
+ return None;
+ }
+ if !(0.0..=((w - 1) as f32)).contains(&x) {
+ return None;
+ }
+ if !(0.0..=((h - 1) as f32)).contains(&y) {
+ return None;
+ }
+
+ Some(img.get_pixel(x.round() as u32, y.round() as u32))
+}
+
+/// Linearly sample from an image using coordinates in [0, w-1] and [0, h-1].
+pub fn interpolate_bilinear<P: Pixel>(
+ img: &impl GenericImageView<Pixel = P>,
+ x: f32,
+ y: f32,
+) -> Option<P> {
+ let (w, h) = img.dimensions();
+ if w == 0 || h == 0 {
+ return None;
+ }
+ if !(0.0..=((w - 1) as f32)).contains(&x) {
+ return None;
+ }
+ if !(0.0..=((h - 1) as f32)).contains(&y) {
+ return None;
+ }
+
+ let uf = x.floor();
+ let vf = y.floor();
+ let uc = (x + 1.).min((w - 1) as f32);
+ let vc = (y + 1.).min((h - 1) as f32);
+
+ // clamp coords to the range of the image
+ let coords = [[uf, vf], [uf, vc], [uc, vf], [uc, vc]];
+
+ assert!(coords
+ .iter()
+ .all(|&[u, v]| { img.in_bounds(u as u32, v as u32) }));
+ let samples = coords.map(|[u, v]| img.get_pixel(u as u32, v as u32));
+ assert!(P::CHANNEL_COUNT <= 4);
+
+ // convert samples to f32
+ // currently rgba is the largest one,
+ // so just store as many items as necessary,
+ // because there's not a simple way to be generic over all of them.
+ let [sff, sfc, scf, scc] = samples.map(|s| {
+ let mut out = [0.; 4];
+ for (i, c) in s.channels().iter().enumerate() {
+ out[i] = c.to_f32().unwrap();
+ }
+ out
+ });
+ // weights
+ let [ufw, vfw] = [x - uf, y - vf];
+ let [ucw, vcw] = [1. - ufw, 1. - vfw];
+
+ // https://en.wikipedia.org/wiki/Bilinear_interpolation#Weighted_mean
+ // the distance between pixels is 1 so there is no denominator
+ let wff = ucw * vcw;
+ let wfc = ucw * vfw;
+ let wcf = ufw * vcw;
+ let wcc = ufw * vfw;
+ assert!(f32::abs((wff + wfc + wcf + wcc) - 1.) < 1e-3);
+
+ // hack to get around not being able to construct a generic Pixel
+ let mut out = samples[0];
+ for (i, c) in out.channels_mut().iter_mut().enumerate() {
+ let v = wff * sff[i] + wfc * sfc[i] + wcf * scf[i] + wcc * scc[i];
+ // this rounding may introduce quantization errors,
+ // but cannot do anything about it.
+ *c = <P::Subpixel as NumCast>::from(v.round()).unwrap_or({
+ if v < 0.0 {
+ P::Subpixel::DEFAULT_MIN_VALUE
+ } else {
+ P::Subpixel::DEFAULT_MAX_VALUE
+ }
+ })
+ }
+ Some(out)
+}
+
+// Sample the columns of the supplied image using the provided filter.
+// The width of the image remains unchanged.
+// ```new_height``` is the desired height of the new image
+// ```filter``` is the filter to use for sampling.
+// The return value is not necessarily Rgba, the underlying order of channels in ```image``` is
+// preserved.
+fn vertical_sample<I, P, S>(image: &I, new_height: u32, filter: &mut Filter) -> Rgba32FImage
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(width, new_height);
+ let mut ws = Vec::new();
+
+ let ratio = height as f32 / new_height as f32;
+ let sratio = if ratio < 1.0 { 1.0 } else { ratio };
+ let src_support = filter.support * sratio;
+
+ for outy in 0..new_height {
+ // For an explanation of this algorithm, see the comments
+ // in horizontal_sample.
+ let inputy = (outy as f32 + 0.5) * ratio;
+
+ let left = (inputy - src_support).floor() as i64;
+ let left = clamp(left, 0, <i64 as From<_>>::from(height) - 1) as u32;
+
+ let right = (inputy + src_support).ceil() as i64;
+ let right = clamp(
+ right,
+ <i64 as From<_>>::from(left) + 1,
+ <i64 as From<_>>::from(height),
+ ) as u32;
+
+ let inputy = inputy - 0.5;
+
+ ws.clear();
+ let mut sum = 0.0;
+ for i in left..right {
+ let w = (filter.kernel)((i as f32 - inputy) / sratio);
+ ws.push(w);
+ sum += w;
+ }
+ ws.iter_mut().for_each(|w| *w /= sum);
+
+ for x in 0..width {
+ let mut t = (0.0, 0.0, 0.0, 0.0);
+
+ for (i, w) in ws.iter().enumerate() {
+ let p = image.get_pixel(x, left + i as u32);
+
+ #[allow(deprecated)]
+ let (k1, k2, k3, k4) = p.channels4();
+ let vec: (f32, f32, f32, f32) = (
+ NumCast::from(k1).unwrap(),
+ NumCast::from(k2).unwrap(),
+ NumCast::from(k3).unwrap(),
+ NumCast::from(k4).unwrap(),
+ );
+
+ t.0 += vec.0 * w;
+ t.1 += vec.1 * w;
+ t.2 += vec.2 * w;
+ t.3 += vec.3 * w;
+ }
+
+ #[allow(deprecated)]
+ // This is not necessarily Rgba.
+ let t = Pixel::from_channels(t.0, t.1, t.2, t.3);
+
+ out.put_pixel(x, outy, t);
+ }
+ }
+
+ out
+}
+
+/// Local struct for keeping track of pixel sums for fast thumbnail averaging
+struct ThumbnailSum<S: Primitive + Enlargeable>(S::Larger, S::Larger, S::Larger, S::Larger);
+
+impl<S: Primitive + Enlargeable> ThumbnailSum<S> {
+ fn zeroed() -> Self {
+ ThumbnailSum(
+ S::Larger::zero(),
+ S::Larger::zero(),
+ S::Larger::zero(),
+ S::Larger::zero(),
+ )
+ }
+
+ fn sample_val(val: S) -> S::Larger {
+ <S::Larger as NumCast>::from(val).unwrap()
+ }
+
+ fn add_pixel<P: Pixel<Subpixel = S>>(&mut self, pixel: P) {
+ #[allow(deprecated)]
+ let pixel = pixel.channels4();
+ self.0 += Self::sample_val(pixel.0);
+ self.1 += Self::sample_val(pixel.1);
+ self.2 += Self::sample_val(pixel.2);
+ self.3 += Self::sample_val(pixel.3);
+ }
+}
+
+/// Resize the supplied image to the specific dimensions.
+///
+/// For downscaling, this method uses a fast integer algorithm where each source pixel contributes
+/// to exactly one target pixel. May give aliasing artifacts if new size is close to old size.
+///
+/// In case the current width is smaller than the new width or similar for the height, another
+/// strategy is used instead. For each pixel in the output, a rectangular region of the input is
+/// determined, just as previously. But when no input pixel is part of this region, the nearest
+/// pixels are interpolated instead.
+///
+/// For speed reasons, all interpolation is performed linearly over the colour values. It will not
+/// take the pixel colour spaces into account.
+pub fn thumbnail<I, P, S>(image: &I, new_width: u32, new_height: u32) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + Enlargeable + 'static,
+{
+ let (width, height) = image.dimensions();
+ let mut out = ImageBuffer::new(new_width, new_height);
+
+ let x_ratio = width as f32 / new_width as f32;
+ let y_ratio = height as f32 / new_height as f32;
+
+ for outy in 0..new_height {
+ let bottomf = outy as f32 * y_ratio;
+ let topf = bottomf + y_ratio;
+
+ let bottom = clamp(bottomf.ceil() as u32, 0, height - 1);
+ let top = clamp(topf.ceil() as u32, bottom, height);
+
+ for outx in 0..new_width {
+ let leftf = outx as f32 * x_ratio;
+ let rightf = leftf + x_ratio;
+
+ let left = clamp(leftf.ceil() as u32, 0, width - 1);
+ let right = clamp(rightf.ceil() as u32, left, width);
+
+ let avg = if bottom != top && left != right {
+ thumbnail_sample_block(image, left, right, bottom, top)
+ } else if bottom != top {
+ // && left == right
+ // In the first column we have left == 0 and right > ceil(y_scale) > 0 so this
+ // assertion can never trigger.
+ debug_assert!(
+ left > 0 && right > 0,
+ "First output column must have corresponding pixels"
+ );
+
+ let fraction_horizontal = (leftf.fract() + rightf.fract()) / 2.;
+ thumbnail_sample_fraction_horizontal(
+ image,
+ right - 1,
+ fraction_horizontal,
+ bottom,
+ top,
+ )
+ } else if left != right {
+ // && bottom == top
+ // In the first line we have bottom == 0 and top > ceil(x_scale) > 0 so this
+ // assertion can never trigger.
+ debug_assert!(
+ bottom > 0 && top > 0,
+ "First output row must have corresponding pixels"
+ );
+
+ let fraction_vertical = (topf.fract() + bottomf.fract()) / 2.;
+ thumbnail_sample_fraction_vertical(image, left, right, top - 1, fraction_vertical)
+ } else {
+ // bottom == top && left == right
+ let fraction_horizontal = (topf.fract() + bottomf.fract()) / 2.;
+ let fraction_vertical = (leftf.fract() + rightf.fract()) / 2.;
+
+ thumbnail_sample_fraction_both(
+ image,
+ right - 1,
+ fraction_horizontal,
+ top - 1,
+ fraction_vertical,
+ )
+ };
+
+ #[allow(deprecated)]
+ let pixel = Pixel::from_channels(avg.0, avg.1, avg.2, avg.3);
+ out.put_pixel(outx, outy, pixel);
+ }
+ }
+
+ out
+}
+
+/// Get a pixel for a thumbnail where the input window encloses at least a full pixel.
+fn thumbnail_sample_block<I, P, S>(
+ image: &I,
+ left: u32,
+ right: u32,
+ bottom: u32,
+ top: u32,
+) -> (S, S, S, S)
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S>,
+ S: Primitive + Enlargeable,
+{
+ let mut sum = ThumbnailSum::zeroed();
+
+ for y in bottom..top {
+ for x in left..right {
+ let k = image.get_pixel(x, y);
+ sum.add_pixel(k);
+ }
+ }
+
+ let n = <S::Larger as NumCast>::from((right - left) * (top - bottom)).unwrap();
+ let round = <S::Larger as NumCast>::from(n / NumCast::from(2).unwrap()).unwrap();
+ (
+ S::clamp_from((sum.0 + round) / n),
+ S::clamp_from((sum.1 + round) / n),
+ S::clamp_from((sum.2 + round) / n),
+ S::clamp_from((sum.3 + round) / n),
+ )
+}
+
+/// Get a thumbnail pixel where the input window encloses at least a vertical pixel.
+fn thumbnail_sample_fraction_horizontal<I, P, S>(
+ image: &I,
+ left: u32,
+ fraction_horizontal: f32,
+ bottom: u32,
+ top: u32,
+) -> (S, S, S, S)
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S>,
+ S: Primitive + Enlargeable,
+{
+ let fract = fraction_horizontal;
+
+ let mut sum_left = ThumbnailSum::zeroed();
+ let mut sum_right = ThumbnailSum::zeroed();
+ for x in bottom..top {
+ let k_left = image.get_pixel(left, x);
+ sum_left.add_pixel(k_left);
+
+ let k_right = image.get_pixel(left + 1, x);
+ sum_right.add_pixel(k_right);
+ }
+
+ // Now we approximate: left/n*(1-fract) + right/n*fract
+ let fact_right = fract / ((top - bottom) as f32);
+ let fact_left = (1. - fract) / ((top - bottom) as f32);
+
+ let mix_left_and_right = |leftv: S::Larger, rightv: S::Larger| {
+ <S as NumCast>::from(
+ fact_left * leftv.to_f32().unwrap() + fact_right * rightv.to_f32().unwrap(),
+ )
+ .expect("Average sample value should fit into sample type")
+ };
+
+ (
+ mix_left_and_right(sum_left.0, sum_right.0),
+ mix_left_and_right(sum_left.1, sum_right.1),
+ mix_left_and_right(sum_left.2, sum_right.2),
+ mix_left_and_right(sum_left.3, sum_right.3),
+ )
+}
+
+/// Get a thumbnail pixel where the input window encloses at least a horizontal pixel.
+fn thumbnail_sample_fraction_vertical<I, P, S>(
+ image: &I,
+ left: u32,
+ right: u32,
+ bottom: u32,
+ fraction_vertical: f32,
+) -> (S, S, S, S)
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S>,
+ S: Primitive + Enlargeable,
+{
+ let fract = fraction_vertical;
+
+ let mut sum_bot = ThumbnailSum::zeroed();
+ let mut sum_top = ThumbnailSum::zeroed();
+ for x in left..right {
+ let k_bot = image.get_pixel(x, bottom);
+ sum_bot.add_pixel(k_bot);
+
+ let k_top = image.get_pixel(x, bottom + 1);
+ sum_top.add_pixel(k_top);
+ }
+
+ // Now we approximate: bot/n*fract + top/n*(1-fract)
+ let fact_top = fract / ((right - left) as f32);
+ let fact_bot = (1. - fract) / ((right - left) as f32);
+
+ let mix_bot_and_top = |botv: S::Larger, topv: S::Larger| {
+ <S as NumCast>::from(fact_bot * botv.to_f32().unwrap() + fact_top * topv.to_f32().unwrap())
+ .expect("Average sample value should fit into sample type")
+ };
+
+ (
+ mix_bot_and_top(sum_bot.0, sum_top.0),
+ mix_bot_and_top(sum_bot.1, sum_top.1),
+ mix_bot_and_top(sum_bot.2, sum_top.2),
+ mix_bot_and_top(sum_bot.3, sum_top.3),
+ )
+}
+
+/// Get a single pixel for a thumbnail where the input window does not enclose any full pixel.
+fn thumbnail_sample_fraction_both<I, P, S>(
+ image: &I,
+ left: u32,
+ fraction_vertical: f32,
+ bottom: u32,
+ fraction_horizontal: f32,
+) -> (S, S, S, S)
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S>,
+ S: Primitive + Enlargeable,
+{
+ #[allow(deprecated)]
+ let k_bl = image.get_pixel(left, bottom).channels4();
+ #[allow(deprecated)]
+ let k_tl = image.get_pixel(left, bottom + 1).channels4();
+ #[allow(deprecated)]
+ let k_br = image.get_pixel(left + 1, bottom).channels4();
+ #[allow(deprecated)]
+ let k_tr = image.get_pixel(left + 1, bottom + 1).channels4();
+
+ let frac_v = fraction_vertical;
+ let frac_h = fraction_horizontal;
+
+ let fact_tr = frac_v * frac_h;
+ let fact_tl = frac_v * (1. - frac_h);
+ let fact_br = (1. - frac_v) * frac_h;
+ let fact_bl = (1. - frac_v) * (1. - frac_h);
+
+ let mix = |br: S, tr: S, bl: S, tl: S| {
+ <S as NumCast>::from(
+ fact_br * br.to_f32().unwrap()
+ + fact_tr * tr.to_f32().unwrap()
+ + fact_bl * bl.to_f32().unwrap()
+ + fact_tl * tl.to_f32().unwrap(),
+ )
+ .expect("Average sample value should fit into sample type")
+ };
+
+ (
+ mix(k_br.0, k_tr.0, k_bl.0, k_tl.0),
+ mix(k_br.1, k_tr.1, k_bl.1, k_tl.1),
+ mix(k_br.2, k_tr.2, k_bl.2, k_tl.2),
+ mix(k_br.3, k_tr.3, k_bl.3, k_tl.3),
+ )
+}
+
+/// Perform a 3x3 box filter on the supplied image.
+/// ```kernel``` is an array of the filter weights of length 9.
+pub fn filter3x3<I, P, S>(image: &I, kernel: &[f32]) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ // The kernel's input positions relative to the current pixel.
+ let taps: &[(isize, isize)] = &[
+ (-1, -1),
+ (0, -1),
+ (1, -1),
+ (-1, 0),
+ (0, 0),
+ (1, 0),
+ (-1, 1),
+ (0, 1),
+ (1, 1),
+ ];
+
+ let (width, height) = image.dimensions();
+
+ let mut out = ImageBuffer::new(width, height);
+
+ let max = S::DEFAULT_MAX_VALUE;
+ let max: f32 = NumCast::from(max).unwrap();
+
+ let sum = match kernel.iter().fold(0.0, |s, &item| s + item) {
+ x if x == 0.0 => 1.0,
+ sum => sum,
+ };
+ let sum = (sum, sum, sum, sum);
+
+ for y in 1..height - 1 {
+ for x in 1..width - 1 {
+ let mut t = (0.0, 0.0, 0.0, 0.0);
+
+ // TODO: There is no need to recalculate the kernel for each pixel.
+ // Only a subtract and addition is needed for pixels after the first
+ // in each row.
+ for (&k, &(a, b)) in kernel.iter().zip(taps.iter()) {
+ let k = (k, k, k, k);
+ let x0 = x as isize + a;
+ let y0 = y as isize + b;
+
+ let p = image.get_pixel(x0 as u32, y0 as u32);
+
+ #[allow(deprecated)]
+ let (k1, k2, k3, k4) = p.channels4();
+
+ let vec: (f32, f32, f32, f32) = (
+ NumCast::from(k1).unwrap(),
+ NumCast::from(k2).unwrap(),
+ NumCast::from(k3).unwrap(),
+ NumCast::from(k4).unwrap(),
+ );
+
+ t.0 += vec.0 * k.0;
+ t.1 += vec.1 * k.1;
+ t.2 += vec.2 * k.2;
+ t.3 += vec.3 * k.3;
+ }
+
+ let (t1, t2, t3, t4) = (t.0 / sum.0, t.1 / sum.1, t.2 / sum.2, t.3 / sum.3);
+
+ #[allow(deprecated)]
+ let t = Pixel::from_channels(
+ NumCast::from(clamp(t1, 0.0, max)).unwrap(),
+ NumCast::from(clamp(t2, 0.0, max)).unwrap(),
+ NumCast::from(clamp(t3, 0.0, max)).unwrap(),
+ NumCast::from(clamp(t4, 0.0, max)).unwrap(),
+ );
+
+ out.put_pixel(x, y, t);
+ }
+ }
+
+ out
+}
+
+/// Resize the supplied image to the specified dimensions.
+/// ```nwidth``` and ```nheight``` are the new dimensions.
+/// ```filter``` is the sampling filter to use.
+pub fn resize<I: GenericImageView>(
+ image: &I,
+ nwidth: u32,
+ nheight: u32,
+ filter: FilterType,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+where
+ I::Pixel: 'static,
+ <I::Pixel as Pixel>::Subpixel: 'static,
+{
+ // check if the new dimensions are the same as the old. if they are, make a copy instead of resampling
+ if (nwidth, nheight) == image.dimensions() {
+ let mut tmp = ImageBuffer::new(image.width(), image.height());
+ tmp.copy_from(image, 0, 0).unwrap();
+ return tmp;
+ }
+
+ let mut method = match filter {
+ FilterType::Nearest => Filter {
+ kernel: Box::new(box_kernel),
+ support: 0.0,
+ },
+ FilterType::Triangle => Filter {
+ kernel: Box::new(triangle_kernel),
+ support: 1.0,
+ },
+ FilterType::CatmullRom => Filter {
+ kernel: Box::new(catmullrom_kernel),
+ support: 2.0,
+ },
+ FilterType::Gaussian => Filter {
+ kernel: Box::new(gaussian_kernel),
+ support: 3.0,
+ },
+ FilterType::Lanczos3 => Filter {
+ kernel: Box::new(lanczos3_kernel),
+ support: 3.0,
+ },
+ };
+
+ // Note: tmp is not necessarily actually Rgba
+ let tmp: Rgba32FImage = vertical_sample(image, nheight, &mut method);
+ horizontal_sample(&tmp, nwidth, &mut method)
+}
+
+/// Performs a Gaussian blur on the supplied image.
+/// ```sigma``` is a measure of how much to blur by.
+pub fn blur<I: GenericImageView>(
+ image: &I,
+ sigma: f32,
+) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
+where
+ I::Pixel: 'static,
+{
+ let sigma = if sigma <= 0.0 { 1.0 } else { sigma };
+
+ let mut method = Filter {
+ kernel: Box::new(|x| gaussian(x, sigma)),
+ support: 2.0 * sigma,
+ };
+
+ let (width, height) = image.dimensions();
+
+ // Keep width and height the same for horizontal and
+ // vertical sampling.
+ // Note: tmp is not necessarily actually Rgba
+ let tmp: Rgba32FImage = vertical_sample(image, height, &mut method);
+ horizontal_sample(&tmp, width, &mut method)
+}
+
+/// Performs an unsharpen mask on the supplied image.
+/// ```sigma``` is the amount to blur the image by.
+/// ```threshold``` is the threshold for minimal brightness change that will be sharpened.
+///
+/// See <https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking>
+pub fn unsharpen<I, P, S>(image: &I, sigma: f32, threshold: i32) -> ImageBuffer<P, Vec<S>>
+where
+ I: GenericImageView<Pixel = P>,
+ P: Pixel<Subpixel = S> + 'static,
+ S: Primitive + 'static,
+{
+ let mut tmp = blur(image, sigma);
+
+ let max = S::DEFAULT_MAX_VALUE;
+ let max: i32 = NumCast::from(max).unwrap();
+ let (width, height) = image.dimensions();
+
+ for y in 0..height {
+ for x in 0..width {
+ let a = image.get_pixel(x, y);
+ let b = tmp.get_pixel_mut(x, y);
+
+ let p = a.map2(b, |c, d| {
+ let ic: i32 = NumCast::from(c).unwrap();
+ let id: i32 = NumCast::from(d).unwrap();
+
+ let diff = (ic - id).abs();
+
+ if diff > threshold {
+ let e = clamp(ic + diff, 0, max); // FIXME what does this do for f32? clamp 0-1 integers??
+
+ NumCast::from(e).unwrap()
+ } else {
+ c
+ }
+ });
+
+ *b = p;
+ }
+ }
+
+ tmp
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{resize, sample_bilinear, sample_nearest, FilterType};
+ use crate::{GenericImageView, ImageBuffer, RgbImage};
+ #[cfg(feature = "benchmarks")]
+ use test;
+
+ #[bench]
+ #[cfg(all(feature = "benchmarks", feature = "png"))]
+ fn bench_resize(b: &mut test::Bencher) {
+ use std::path::Path;
+ let img = crate::open(&Path::new("./examples/fractal.png")).unwrap();
+ b.iter(|| {
+ test::black_box(resize(&img, 200, 200, FilterType::Nearest));
+ });
+ b.bytes = 800 * 800 * 3 + 200 * 200 * 3;
+ }
+
+ #[test]
+ #[cfg(feature = "png")]
+ fn test_resize_same_size() {
+ use std::path::Path;
+ let img = crate::open(&Path::new("./examples/fractal.png")).unwrap();
+ let resize = img.resize(img.width(), img.height(), FilterType::Triangle);
+ assert!(img.pixels().eq(resize.pixels()))
+ }
+
+ #[test]
+ #[cfg(feature = "png")]
+ fn test_sample_bilinear() {
+ use std::path::Path;
+ let img = crate::open(&Path::new("./examples/fractal.png")).unwrap();
+ assert!(sample_bilinear(&img, 0., 0.).is_some());
+ assert!(sample_bilinear(&img, 1., 0.).is_some());
+ assert!(sample_bilinear(&img, 0., 1.).is_some());
+ assert!(sample_bilinear(&img, 1., 1.).is_some());
+ assert!(sample_bilinear(&img, 0.5, 0.5).is_some());
+
+ assert!(sample_bilinear(&img, 1.2, 0.5).is_none());
+ assert!(sample_bilinear(&img, 0.5, 1.2).is_none());
+ assert!(sample_bilinear(&img, 1.2, 1.2).is_none());
+
+ assert!(sample_bilinear(&img, -0.1, 0.2).is_none());
+ assert!(sample_bilinear(&img, 0.2, -0.1).is_none());
+ assert!(sample_bilinear(&img, -0.1, -0.1).is_none());
+ }
+ #[test]
+ #[cfg(feature = "png")]
+ fn test_sample_nearest() {
+ use std::path::Path;
+ let img = crate::open(&Path::new("./examples/fractal.png")).unwrap();
+ assert!(sample_nearest(&img, 0., 0.).is_some());
+ assert!(sample_nearest(&img, 1., 0.).is_some());
+ assert!(sample_nearest(&img, 0., 1.).is_some());
+ assert!(sample_nearest(&img, 1., 1.).is_some());
+ assert!(sample_nearest(&img, 0.5, 0.5).is_some());
+
+ assert!(sample_nearest(&img, 1.2, 0.5).is_none());
+ assert!(sample_nearest(&img, 0.5, 1.2).is_none());
+ assert!(sample_nearest(&img, 1.2, 1.2).is_none());
+
+ assert!(sample_nearest(&img, -0.1, 0.2).is_none());
+ assert!(sample_nearest(&img, 0.2, -0.1).is_none());
+ assert!(sample_nearest(&img, -0.1, -0.1).is_none());
+ }
+ #[test]
+ fn test_sample_bilinear_correctness() {
+ use crate::Rgba;
+ let img = ImageBuffer::from_fn(2, 2, |x, y| match (x, y) {
+ (0, 0) => Rgba([255, 0, 0, 0]),
+ (0, 1) => Rgba([0, 255, 0, 0]),
+ (1, 0) => Rgba([0, 0, 255, 0]),
+ (1, 1) => Rgba([0, 0, 0, 255]),
+ _ => panic!(),
+ });
+ assert_eq!(sample_bilinear(&img, 0.5, 0.5), Some(Rgba([64; 4])));
+ assert_eq!(sample_bilinear(&img, 0.0, 0.0), Some(Rgba([255, 0, 0, 0])));
+ assert_eq!(sample_bilinear(&img, 0.0, 1.0), Some(Rgba([0, 255, 0, 0])));
+ assert_eq!(sample_bilinear(&img, 1.0, 0.0), Some(Rgba([0, 0, 255, 0])));
+ assert_eq!(sample_bilinear(&img, 1.0, 1.0), Some(Rgba([0, 0, 0, 255])));
+
+ assert_eq!(
+ sample_bilinear(&img, 0.5, 0.0),
+ Some(Rgba([128, 0, 128, 0]))
+ );
+ assert_eq!(
+ sample_bilinear(&img, 0.0, 0.5),
+ Some(Rgba([128, 128, 0, 0]))
+ );
+ assert_eq!(
+ sample_bilinear(&img, 0.5, 1.0),
+ Some(Rgba([0, 128, 0, 128]))
+ );
+ assert_eq!(
+ sample_bilinear(&img, 1.0, 0.5),
+ Some(Rgba([0, 0, 128, 128]))
+ );
+ }
+ #[test]
+ fn test_sample_nearest_correctness() {
+ use crate::Rgba;
+ let img = ImageBuffer::from_fn(2, 2, |x, y| match (x, y) {
+ (0, 0) => Rgba([255, 0, 0, 0]),
+ (0, 1) => Rgba([0, 255, 0, 0]),
+ (1, 0) => Rgba([0, 0, 255, 0]),
+ (1, 1) => Rgba([0, 0, 0, 255]),
+ _ => panic!(),
+ });
+
+ assert_eq!(sample_nearest(&img, 0.0, 0.0), Some(Rgba([255, 0, 0, 0])));
+ assert_eq!(sample_nearest(&img, 0.0, 1.0), Some(Rgba([0, 255, 0, 0])));
+ assert_eq!(sample_nearest(&img, 1.0, 0.0), Some(Rgba([0, 0, 255, 0])));
+ assert_eq!(sample_nearest(&img, 1.0, 1.0), Some(Rgba([0, 0, 0, 255])));
+
+ assert_eq!(sample_nearest(&img, 0.5, 0.5), Some(Rgba([0, 0, 0, 255])));
+ assert_eq!(sample_nearest(&img, 0.5, 0.0), Some(Rgba([0, 0, 255, 0])));
+ assert_eq!(sample_nearest(&img, 0.0, 0.5), Some(Rgba([0, 255, 0, 0])));
+ assert_eq!(sample_nearest(&img, 0.5, 1.0), Some(Rgba([0, 0, 0, 255])));
+ assert_eq!(sample_nearest(&img, 1.0, 0.5), Some(Rgba([0, 0, 0, 255])));
+ }
+
+ #[bench]
+ #[cfg(all(feature = "benchmarks", feature = "tiff"))]
+ fn bench_resize_same_size(b: &mut test::Bencher) {
+ let path = concat!(
+ env!("CARGO_MANIFEST_DIR"),
+ "/tests/images/tiff/testsuite/mandrill.tiff"
+ );
+ let image = crate::open(path).unwrap();
+ b.iter(|| {
+ test::black_box(image.resize(image.width(), image.height(), FilterType::CatmullRom));
+ });
+ b.bytes = (image.width() * image.height() * 3) as u64;
+ }
+
+ #[test]
+ fn test_issue_186() {
+ let img: RgbImage = ImageBuffer::new(100, 100);
+ let _ = resize(&img, 50, 50, FilterType::Lanczos3);
+ }
+
+ #[bench]
+ #[cfg(all(feature = "benchmarks", feature = "tiff"))]
+ fn bench_thumbnail(b: &mut test::Bencher) {
+ let path = concat!(
+ env!("CARGO_MANIFEST_DIR"),
+ "/tests/images/tiff/testsuite/mandrill.tiff"
+ );
+ let image = crate::open(path).unwrap();
+ b.iter(|| {
+ test::black_box(image.thumbnail(256, 256));
+ });
+ b.bytes = 512 * 512 * 4 + 256 * 256 * 4;
+ }
+
+ #[bench]
+ #[cfg(all(feature = "benchmarks", feature = "tiff"))]
+ fn bench_thumbnail_upsize(b: &mut test::Bencher) {
+ let path = concat!(
+ env!("CARGO_MANIFEST_DIR"),
+ "/tests/images/tiff/testsuite/mandrill.tiff"
+ );
+ let image = crate::open(path).unwrap().thumbnail(256, 256);
+ b.iter(|| {
+ test::black_box(image.thumbnail(512, 512));
+ });
+ b.bytes = 512 * 512 * 4 + 256 * 256 * 4;
+ }
+
+ #[bench]
+ #[cfg(all(feature = "benchmarks", feature = "tiff"))]
+ fn bench_thumbnail_upsize_irregular(b: &mut test::Bencher) {
+ let path = concat!(
+ env!("CARGO_MANIFEST_DIR"),
+ "/tests/images/tiff/testsuite/mandrill.tiff"
+ );
+ let image = crate::open(path).unwrap().thumbnail(193, 193);
+ b.iter(|| {
+ test::black_box(image.thumbnail(256, 256));
+ });
+ b.bytes = 193 * 193 * 4 + 256 * 256 * 4;
+ }
+
+ #[test]
+ #[cfg(feature = "png")]
+ fn resize_transparent_image() {
+ use super::FilterType::{CatmullRom, Gaussian, Lanczos3, Nearest, Triangle};
+ use crate::imageops::crop_imm;
+ use crate::RgbaImage;
+
+ fn assert_resize(image: &RgbaImage, filter: FilterType) {
+ let resized = resize(image, 16, 16, filter);
+ let cropped = crop_imm(&resized, 5, 5, 6, 6).to_image();
+ for pixel in cropped.pixels() {
+ let alpha = pixel.0[3];
+ assert!(
+ alpha != 254 && alpha != 253,
+ "alpha value: {}, {:?}",
+ alpha,
+ filter
+ );
+ }
+ }
+
+ let path = concat!(
+ env!("CARGO_MANIFEST_DIR"),
+ "/tests/images/png/transparency/tp1n3p08.png"
+ );
+ let img = crate::open(path).unwrap();
+ let rgba8 = img.as_rgba8().unwrap();
+ let filters = &[Nearest, Triangle, CatmullRom, Gaussian, Lanczos3];
+ for filter in filters {
+ assert_resize(rgba8, *filter);
+ }
+ }
+
+ #[test]
+ fn bug_1600() {
+ let image = crate::RgbaImage::from_raw(629, 627, vec![255; 629 * 627 * 4]).unwrap();
+ let result = resize(&image, 22, 22, FilterType::Lanczos3);
+ assert!(result.into_raw().into_iter().any(|c| c != 0));
+ }
+}
diff --git a/vendor/image/src/io/free_functions.rs b/vendor/image/src/io/free_functions.rs
new file mode 100644
index 0000000..d6047d7
--- /dev/null
+++ b/vendor/image/src/io/free_functions.rs
@@ -0,0 +1,312 @@
+use std::fs::File;
+use std::io::{BufRead, BufReader, BufWriter, Seek};
+use std::path::Path;
+use std::u32;
+
+use crate::codecs::*;
+
+use crate::dynimage::DynamicImage;
+use crate::error::{ImageError, ImageFormatHint, ImageResult};
+use crate::image;
+use crate::image::ImageFormat;
+#[allow(unused_imports)] // When no features are supported
+use crate::image::{ImageDecoder, ImageEncoder};
+use crate::{
+ color,
+ error::{UnsupportedError, UnsupportedErrorKind},
+ ImageOutputFormat,
+};
+
+pub(crate) fn open_impl(path: &Path) -> ImageResult<DynamicImage> {
+ let buffered_read = BufReader::new(File::open(path).map_err(ImageError::IoError)?);
+
+ load(buffered_read, ImageFormat::from_path(path)?)
+}
+
+/// Create a new image from a Reader.
+///
+/// Assumes the reader is already buffered. For optimal performance,
+/// consider wrapping the reader with a `BufReader::new()`.
+///
+/// Try [`io::Reader`] for more advanced uses.
+///
+/// [`io::Reader`]: io/struct.Reader.html
+#[allow(unused_variables)]
+// r is unused if no features are supported.
+pub fn load<R: BufRead + Seek>(r: R, format: ImageFormat) -> ImageResult<DynamicImage> {
+ load_inner(r, super::Limits::default(), format)
+}
+
+pub(crate) trait DecoderVisitor {
+ type Result;
+ fn visit_decoder<'a, D: ImageDecoder<'a>>(self, decoder: D) -> ImageResult<Self::Result>;
+}
+
+pub(crate) fn load_decoder<R: BufRead + Seek, V: DecoderVisitor>(
+ r: R,
+ format: ImageFormat,
+ limits: super::Limits,
+ visitor: V,
+) -> ImageResult<V::Result> {
+ #[allow(unreachable_patterns)]
+ // Default is unreachable if all features are supported.
+ match format {
+ #[cfg(feature = "avif-decoder")]
+ image::ImageFormat::Avif => visitor.visit_decoder(avif::AvifDecoder::new(r)?),
+ #[cfg(feature = "png")]
+ image::ImageFormat::Png => visitor.visit_decoder(png::PngDecoder::with_limits(r, limits)?),
+ #[cfg(feature = "gif")]
+ image::ImageFormat::Gif => visitor.visit_decoder(gif::GifDecoder::new(r)?),
+ #[cfg(feature = "jpeg")]
+ image::ImageFormat::Jpeg => visitor.visit_decoder(jpeg::JpegDecoder::new(r)?),
+ #[cfg(feature = "webp")]
+ image::ImageFormat::WebP => visitor.visit_decoder(webp::WebPDecoder::new(r)?),
+ #[cfg(feature = "tiff")]
+ image::ImageFormat::Tiff => visitor.visit_decoder(tiff::TiffDecoder::new(r)?),
+ #[cfg(feature = "tga")]
+ image::ImageFormat::Tga => visitor.visit_decoder(tga::TgaDecoder::new(r)?),
+ #[cfg(feature = "dds")]
+ image::ImageFormat::Dds => visitor.visit_decoder(dds::DdsDecoder::new(r)?),
+ #[cfg(feature = "bmp")]
+ image::ImageFormat::Bmp => visitor.visit_decoder(bmp::BmpDecoder::new(r)?),
+ #[cfg(feature = "ico")]
+ image::ImageFormat::Ico => visitor.visit_decoder(ico::IcoDecoder::new(r)?),
+ #[cfg(feature = "hdr")]
+ image::ImageFormat::Hdr => visitor.visit_decoder(hdr::HdrAdapter::new(BufReader::new(r))?),
+ #[cfg(feature = "exr")]
+ image::ImageFormat::OpenExr => visitor.visit_decoder(openexr::OpenExrDecoder::new(r)?),
+ #[cfg(feature = "pnm")]
+ image::ImageFormat::Pnm => visitor.visit_decoder(pnm::PnmDecoder::new(r)?),
+ #[cfg(feature = "farbfeld")]
+ image::ImageFormat::Farbfeld => visitor.visit_decoder(farbfeld::FarbfeldDecoder::new(r)?),
+ #[cfg(feature = "qoi")]
+ image::ImageFormat::Qoi => visitor.visit_decoder(qoi::QoiDecoder::new(r)?),
+ _ => Err(ImageError::Unsupported(
+ ImageFormatHint::Exact(format).into(),
+ )),
+ }
+}
+
+pub(crate) fn load_inner<R: BufRead + Seek>(
+ r: R,
+ limits: super::Limits,
+ format: ImageFormat,
+) -> ImageResult<DynamicImage> {
+ struct LoadVisitor(super::Limits);
+
+ impl DecoderVisitor for LoadVisitor {
+ type Result = DynamicImage;
+
+ fn visit_decoder<'a, D: ImageDecoder<'a>>(
+ self,
+ mut decoder: D,
+ ) -> ImageResult<Self::Result> {
+ let mut limits = self.0;
+ // Check that we do not allocate a bigger buffer than we are allowed to
+ // FIXME: should this rather go in `DynamicImage::from_decoder` somehow?
+ limits.reserve(decoder.total_bytes())?;
+ decoder.set_limits(limits)?;
+ DynamicImage::from_decoder(decoder)
+ }
+ }
+
+ load_decoder(r, format, limits.clone(), LoadVisitor(limits))
+}
+
+pub(crate) fn image_dimensions_impl(path: &Path) -> ImageResult<(u32, u32)> {
+ let format = image::ImageFormat::from_path(path)?;
+ let reader = BufReader::new(File::open(path)?);
+ image_dimensions_with_format_impl(reader, format)
+}
+
+#[allow(unused_variables)]
+// fin is unused if no features are supported.
+pub(crate) fn image_dimensions_with_format_impl<R: BufRead + Seek>(
+ buffered_read: R,
+ format: ImageFormat,
+) -> ImageResult<(u32, u32)> {
+ struct DimVisitor;
+
+ impl DecoderVisitor for DimVisitor {
+ type Result = (u32, u32);
+ fn visit_decoder<'a, D: ImageDecoder<'a>>(self, decoder: D) -> ImageResult<Self::Result> {
+ Ok(decoder.dimensions())
+ }
+ }
+
+ load_decoder(buffered_read, format, super::Limits::default(), DimVisitor)
+}
+
+#[allow(unused_variables)]
+// Most variables when no features are supported
+pub(crate) fn save_buffer_impl(
+ path: &Path,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color: color::ColorType,
+) -> ImageResult<()> {
+ let format = ImageFormat::from_path(path)?;
+ save_buffer_with_format_impl(path, buf, width, height, color, format)
+}
+
+#[allow(unused_variables)]
+// Most variables when no features are supported
+pub(crate) fn save_buffer_with_format_impl(
+ path: &Path,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color: color::ColorType,
+ format: ImageFormat,
+) -> ImageResult<()> {
+ let buffered_file_write = &mut BufWriter::new(File::create(path)?); // always seekable
+
+ let format = match format {
+ #[cfg(feature = "pnm")]
+ image::ImageFormat::Pnm => {
+ let ext = path
+ .extension()
+ .and_then(|s| s.to_str())
+ .map_or("".to_string(), |s| s.to_ascii_lowercase());
+ ImageOutputFormat::Pnm(match &*ext {
+ "pbm" => pnm::PnmSubtype::Bitmap(pnm::SampleEncoding::Binary),
+ "pgm" => pnm::PnmSubtype::Graymap(pnm::SampleEncoding::Binary),
+ "ppm" => pnm::PnmSubtype::Pixmap(pnm::SampleEncoding::Binary),
+ "pam" => pnm::PnmSubtype::ArbitraryMap,
+ _ => {
+ return Err(ImageError::Unsupported(
+ ImageFormatHint::Exact(format).into(),
+ ))
+ } // Unsupported Pnm subtype.
+ })
+ }
+ // #[cfg(feature = "hdr")]
+ // image::ImageFormat::Hdr => hdr::HdrEncoder::new(fout).encode(&[Rgb<f32>], width, height), // usize
+ format => format.into(),
+ };
+
+ write_buffer_impl(buffered_file_write, buf, width, height, color, format)
+}
+
+#[allow(unused_variables)]
+// Most variables when no features are supported
+pub(crate) fn write_buffer_impl<W: std::io::Write + Seek>(
+ buffered_write: &mut W,
+ buf: &[u8],
+ width: u32,
+ height: u32,
+ color: color::ColorType,
+ format: ImageOutputFormat,
+) -> ImageResult<()> {
+ match format {
+ #[cfg(feature = "png")]
+ ImageOutputFormat::Png => {
+ png::PngEncoder::new(buffered_write).write_image(buf, width, height, color)
+ }
+ #[cfg(feature = "jpeg")]
+ ImageOutputFormat::Jpeg(quality) => {
+ jpeg::JpegEncoder::new_with_quality(buffered_write, quality)
+ .write_image(buf, width, height, color)
+ }
+ #[cfg(feature = "pnm")]
+ ImageOutputFormat::Pnm(subtype) => pnm::PnmEncoder::new(buffered_write)
+ .with_subtype(subtype)
+ .write_image(buf, width, height, color),
+ #[cfg(feature = "gif")]
+ ImageOutputFormat::Gif => {
+ gif::GifEncoder::new(buffered_write).encode(buf, width, height, color)
+ }
+ #[cfg(feature = "ico")]
+ ImageOutputFormat::Ico => {
+ ico::IcoEncoder::new(buffered_write).write_image(buf, width, height, color)
+ }
+ #[cfg(feature = "bmp")]
+ ImageOutputFormat::Bmp => {
+ bmp::BmpEncoder::new(buffered_write).write_image(buf, width, height, color)
+ }
+ #[cfg(feature = "farbfeld")]
+ ImageOutputFormat::Farbfeld => {
+ farbfeld::FarbfeldEncoder::new(buffered_write).write_image(buf, width, height, color)
+ }
+ #[cfg(feature = "tga")]
+ ImageOutputFormat::Tga => {
+ tga::TgaEncoder::new(buffered_write).write_image(buf, width, height, color)
+ }
+ #[cfg(feature = "exr")]
+ ImageOutputFormat::OpenExr => {
+ openexr::OpenExrEncoder::new(buffered_write).write_image(buf, width, height, color)
+ }
+ #[cfg(feature = "tiff")]
+ ImageOutputFormat::Tiff => {
+ tiff::TiffEncoder::new(buffered_write).write_image(buf, width, height, color)
+ }
+ #[cfg(feature = "avif-encoder")]
+ ImageOutputFormat::Avif => {
+ avif::AvifEncoder::new(buffered_write).write_image(buf, width, height, color)
+ }
+ #[cfg(feature = "qoi")]
+ ImageOutputFormat::Qoi => {
+ qoi::QoiEncoder::new(buffered_write).write_image(buf, width, height, color)
+ }
+ #[cfg(feature = "webp-encoder")]
+ ImageOutputFormat::WebP => {
+ webp::WebPEncoder::new(buffered_write).write_image(buf, width, height, color)
+ }
+
+ image::ImageOutputFormat::Unsupported(msg) => Err(ImageError::Unsupported(
+ UnsupportedError::from_format_and_kind(
+ ImageFormatHint::Unknown,
+ UnsupportedErrorKind::Format(ImageFormatHint::Name(msg)),
+ ),
+ )),
+ }
+}
+
+static MAGIC_BYTES: [(&[u8], ImageFormat); 23] = [
+ (b"\x89PNG\r\n\x1a\n", ImageFormat::Png),
+ (&[0xff, 0xd8, 0xff], ImageFormat::Jpeg),
+ (b"GIF89a", ImageFormat::Gif),
+ (b"GIF87a", ImageFormat::Gif),
+ (b"RIFF", ImageFormat::WebP), // TODO: better magic byte detection, see https://github.com/image-rs/image/issues/660
+ (b"MM\x00*", ImageFormat::Tiff),
+ (b"II*\x00", ImageFormat::Tiff),
+ (b"DDS ", ImageFormat::Dds),
+ (b"BM", ImageFormat::Bmp),
+ (&[0, 0, 1, 0], ImageFormat::Ico),
+ (b"#?RADIANCE", ImageFormat::Hdr),
+ (b"P1", ImageFormat::Pnm),
+ (b"P2", ImageFormat::Pnm),
+ (b"P3", ImageFormat::Pnm),
+ (b"P4", ImageFormat::Pnm),
+ (b"P5", ImageFormat::Pnm),
+ (b"P6", ImageFormat::Pnm),
+ (b"P7", ImageFormat::Pnm),
+ (b"farbfeld", ImageFormat::Farbfeld),
+ (b"\0\0\0 ftypavif", ImageFormat::Avif),
+ (b"\0\0\0\x1cftypavif", ImageFormat::Avif),
+ (&[0x76, 0x2f, 0x31, 0x01], ImageFormat::OpenExr), // = &exr::meta::magic_number::BYTES
+ (b"qoif", ImageFormat::Qoi),
+];
+
+/// Guess image format from memory block
+///
+/// Makes an educated guess about the image format based on the Magic Bytes at the beginning.
+/// TGA is not supported by this function.
+/// This is not to be trusted on the validity of the whole memory block
+pub fn guess_format(buffer: &[u8]) -> ImageResult<ImageFormat> {
+ match guess_format_impl(buffer) {
+ Some(format) => Ok(format),
+ None => Err(ImageError::Unsupported(ImageFormatHint::Unknown.into())),
+ }
+}
+
+pub(crate) fn guess_format_impl(buffer: &[u8]) -> Option<ImageFormat> {
+ for &(signature, format) in &MAGIC_BYTES {
+ if buffer.starts_with(signature) {
+ return Some(format);
+ }
+ }
+
+ None
+}
diff --git a/vendor/image/src/io/mod.rs b/vendor/image/src/io/mod.rs
new file mode 100644
index 0000000..8fbc6e2
--- /dev/null
+++ b/vendor/image/src/io/mod.rs
@@ -0,0 +1,166 @@
+//! Input and output of images.
+
+use std::convert::TryFrom;
+
+use crate::{error, ImageError, ImageResult};
+
+pub(crate) mod free_functions;
+mod reader;
+
+pub use self::reader::Reader;
+
+/// Set of supported strict limits for a decoder.
+#[derive(Clone, Debug, Eq, PartialEq, Hash)]
+#[allow(missing_copy_implementations)]
+#[allow(clippy::manual_non_exhaustive)]
+pub struct LimitSupport {
+ _non_exhaustive: (),
+}
+
+#[allow(clippy::derivable_impls)]
+impl Default for LimitSupport {
+ fn default() -> LimitSupport {
+ LimitSupport {
+ _non_exhaustive: (),
+ }
+ }
+}
+
+/// Resource limits for decoding.
+///
+/// Limits can be either *strict* or *non-strict*. Non-strict limits are best-effort
+/// limits where the library does not guarantee that limit will not be exceeded. Do note
+/// that it is still considered a bug if a non-strict limit is exceeded, however as
+/// some of the underlying decoders do not support not support such limits one cannot
+/// rely on these limits being supported. For stric limits the library makes a stronger
+/// guarantee that the limit will not be exceeded. Exceeding a strict limit is considered
+/// a critical bug. If a decoder cannot guarantee that it will uphold a strict limit it
+/// *must* fail with `image::error::LimitErrorKind::Unsupported`.
+///
+/// Currently the only strict limits supported are the `max_image_width` and `max_image_height`
+/// limits, however more will be added in the future. [`LimitSupport`] will default to support
+/// being false and decoders should enable support for the limits they support in
+/// [`ImageDecoder::set_limits`].
+///
+/// The limit check should only ever fail if a limit will be exceeded or an unsupported strict
+/// limit is used.
+///
+/// [`LimitSupport`]: ./struct.LimitSupport.html
+/// [`ImageDecoder::set_limits`]: ../trait.ImageDecoder.html#method.set_limits
+#[derive(Clone, Debug, Eq, PartialEq, Hash)]
+#[allow(missing_copy_implementations)]
+#[allow(clippy::manual_non_exhaustive)]
+pub struct Limits {
+ /// The maximum allowed image width. This limit is strict. The default is no limit.
+ pub max_image_width: Option<u32>,
+ /// The maximum allowed image height. This limit is strict. The default is no limit.
+ pub max_image_height: Option<u32>,
+ /// The maximum allowed sum of allocations allocated by the decoder at any one time excluding
+ /// allocator overhead. This limit is non-strict by default and some decoders may ignore it.
+ /// The default is 512MiB.
+ pub max_alloc: Option<u64>,
+ _non_exhaustive: (),
+}
+
+impl Default for Limits {
+ fn default() -> Limits {
+ Limits {
+ max_image_width: None,
+ max_image_height: None,
+ max_alloc: Some(512 * 1024 * 1024),
+ _non_exhaustive: (),
+ }
+ }
+}
+
+impl Limits {
+ /// Disable all limits.
+ pub fn no_limits() -> Limits {
+ Limits {
+ max_image_width: None,
+ max_image_height: None,
+ max_alloc: None,
+ _non_exhaustive: (),
+ }
+ }
+
+ /// This function checks that all currently set strict limits are supported.
+ pub fn check_support(&self, _supported: &LimitSupport) -> ImageResult<()> {
+ Ok(())
+ }
+
+ /// This function checks the `max_image_width` and `max_image_height` limits given
+ /// the image width and height.
+ pub fn check_dimensions(&self, width: u32, height: u32) -> ImageResult<()> {
+ if let Some(max_width) = self.max_image_width {
+ if width > max_width {
+ return Err(ImageError::Limits(error::LimitError::from_kind(
+ error::LimitErrorKind::DimensionError,
+ )));
+ }
+ }
+
+ if let Some(max_height) = self.max_image_height {
+ if height > max_height {
+ return Err(ImageError::Limits(error::LimitError::from_kind(
+ error::LimitErrorKind::DimensionError,
+ )));
+ }
+ }
+
+ Ok(())
+ }
+
+ /// This function checks that the current limit allows for reserving the set amount
+ /// of bytes, it then reduces the limit accordingly.
+ pub fn reserve(&mut self, amount: u64) -> ImageResult<()> {
+ if let Some(max_alloc) = self.max_alloc.as_mut() {
+ if *max_alloc < amount {
+ return Err(ImageError::Limits(error::LimitError::from_kind(
+ error::LimitErrorKind::InsufficientMemory,
+ )));
+ }
+
+ *max_alloc -= amount;
+ }
+
+ Ok(())
+ }
+
+ /// This function acts identically to [`reserve`], but takes a `usize` for convenience.
+ pub fn reserve_usize(&mut self, amount: usize) -> ImageResult<()> {
+ match u64::try_from(amount) {
+ Ok(n) => self.reserve(n),
+ Err(_) if self.max_alloc.is_some() => Err(ImageError::Limits(
+ error::LimitError::from_kind(error::LimitErrorKind::InsufficientMemory),
+ )),
+ Err(_) => {
+ // Out of bounds, but we weren't asked to consider any limit.
+ Ok(())
+ }
+ }
+ }
+
+ /// This function increases the `max_alloc` limit with amount. Should only be used
+ /// together with [`reserve`].
+ ///
+ /// [`reserve`]: #method.reserve
+ pub fn free(&mut self, amount: u64) {
+ if let Some(max_alloc) = self.max_alloc.as_mut() {
+ *max_alloc = max_alloc.saturating_add(amount);
+ }
+ }
+
+ /// This function acts identically to [`free`], but takes a `usize` for convenience.
+ pub fn free_usize(&mut self, amount: usize) {
+ match u64::try_from(amount) {
+ Ok(n) => self.free(n),
+ Err(_) if self.max_alloc.is_some() => {
+ panic!("max_alloc is set, we should have exited earlier when the reserve failed");
+ }
+ Err(_) => {
+ // Out of bounds, but we weren't asked to consider any limit.
+ }
+ }
+ }
+}
diff --git a/vendor/image/src/io/reader.rs b/vendor/image/src/io/reader.rs
new file mode 100644
index 0000000..660780f
--- /dev/null
+++ b/vendor/image/src/io/reader.rs
@@ -0,0 +1,239 @@
+use std::fs::File;
+use std::io::{self, BufRead, BufReader, Cursor, Read, Seek, SeekFrom};
+use std::path::Path;
+
+use crate::dynimage::DynamicImage;
+use crate::error::{ImageFormatHint, UnsupportedError, UnsupportedErrorKind};
+use crate::image::ImageFormat;
+use crate::{ImageError, ImageResult};
+
+use super::free_functions;
+
+/// A multi-format image reader.
+///
+/// Wraps an input reader to facilitate automatic detection of an image's format, appropriate
+/// decoding method, and dispatches into the set of supported [`ImageDecoder`] implementations.
+///
+/// ## Usage
+///
+/// Opening a file, deducing the format based on the file path automatically, and trying to decode
+/// the image contained can be performed by constructing the reader and immediately consuming it.
+///
+/// ```no_run
+/// # use image::ImageError;
+/// # use image::io::Reader;
+/// # fn main() -> Result<(), ImageError> {
+/// let image = Reader::open("path/to/image.png")?
+/// .decode()?;
+/// # Ok(()) }
+/// ```
+///
+/// It is also possible to make a guess based on the content. This is especially handy if the
+/// source is some blob in memory and you have constructed the reader in another way. Here is an
+/// example with a `pnm` black-and-white subformat that encodes its pixel matrix with ascii values.
+///
+/// ```
+/// # use image::ImageError;
+/// # use image::io::Reader;
+/// # fn main() -> Result<(), ImageError> {
+/// use std::io::Cursor;
+/// use image::ImageFormat;
+///
+/// let raw_data = b"P1 2 2\n\
+/// 0 1\n\
+/// 1 0\n";
+///
+/// let mut reader = Reader::new(Cursor::new(raw_data))
+/// .with_guessed_format()
+/// .expect("Cursor io never fails");
+/// assert_eq!(reader.format(), Some(ImageFormat::Pnm));
+///
+/// # #[cfg(feature = "pnm")]
+/// let image = reader.decode()?;
+/// # Ok(()) }
+/// ```
+///
+/// As a final fallback or if only a specific format must be used, the reader always allows manual
+/// specification of the supposed image format with [`set_format`].
+///
+/// [`set_format`]: #method.set_format
+/// [`ImageDecoder`]: ../trait.ImageDecoder.html
+pub struct Reader<R: Read> {
+ /// The reader. Should be buffered.
+ inner: R,
+ /// The format, if one has been set or deduced.
+ format: Option<ImageFormat>,
+ /// Decoding limits
+ limits: super::Limits,
+}
+
+impl<R: Read> Reader<R> {
+ /// Create a new image reader without a preset format.
+ ///
+ /// Assumes the reader is already buffered. For optimal performance,
+ /// consider wrapping the reader with a `BufReader::new()`.
+ ///
+ /// It is possible to guess the format based on the content of the read object with
+ /// [`with_guessed_format`], or to set the format directly with [`set_format`].
+ ///
+ /// [`with_guessed_format`]: #method.with_guessed_format
+ /// [`set_format`]: method.set_format
+ pub fn new(buffered_reader: R) -> Self {
+ Reader {
+ inner: buffered_reader,
+ format: None,
+ limits: super::Limits::default(),
+ }
+ }
+
+ /// Construct a reader with specified format.
+ ///
+ /// Assumes the reader is already buffered. For optimal performance,
+ /// consider wrapping the reader with a `BufReader::new()`.
+ pub fn with_format(buffered_reader: R, format: ImageFormat) -> Self {
+ Reader {
+ inner: buffered_reader,
+ format: Some(format),
+ limits: super::Limits::default(),
+ }
+ }
+
+ /// Get the currently determined format.
+ pub fn format(&self) -> Option<ImageFormat> {
+ self.format
+ }
+
+ /// Supply the format as which to interpret the read image.
+ pub fn set_format(&mut self, format: ImageFormat) {
+ self.format = Some(format);
+ }
+
+ /// Remove the current information on the image format.
+ ///
+ /// Note that many operations require format information to be present and will return e.g. an
+ /// `ImageError::Unsupported` when the image format has not been set.
+ pub fn clear_format(&mut self) {
+ self.format = None;
+ }
+
+ /// Disable all decoding limits.
+ pub fn no_limits(&mut self) {
+ self.limits = super::Limits::no_limits();
+ }
+
+ /// Set a custom set of decoding limits.
+ pub fn limits(&mut self, limits: super::Limits) {
+ self.limits = limits;
+ }
+
+ /// Unwrap the reader.
+ pub fn into_inner(self) -> R {
+ self.inner
+ }
+}
+
+impl Reader<BufReader<File>> {
+ /// Open a file to read, format will be guessed from path.
+ ///
+ /// This will not attempt any io operation on the opened file.
+ ///
+ /// If you want to inspect the content for a better guess on the format, which does not depend
+ /// on file extensions, follow this call with a call to [`with_guessed_format`].
+ ///
+ /// [`with_guessed_format`]: #method.with_guessed_format
+ pub fn open<P>(path: P) -> io::Result<Self>
+ where
+ P: AsRef<Path>,
+ {
+ Self::open_impl(path.as_ref())
+ }
+
+ fn open_impl(path: &Path) -> io::Result<Self> {
+ Ok(Reader {
+ inner: BufReader::new(File::open(path)?),
+ format: ImageFormat::from_path(path).ok(),
+ limits: super::Limits::default(),
+ })
+ }
+}
+
+impl<R: BufRead + Seek> Reader<R> {
+ /// Make a format guess based on the content, replacing it on success.
+ ///
+ /// Returns `Ok` with the guess if no io error occurs. Additionally, replaces the current
+ /// format if the guess was successful. If the guess was unable to determine a format then
+ /// the current format of the reader is unchanged.
+ ///
+ /// Returns an error if the underlying reader fails. The format is unchanged. The error is a
+ /// `std::io::Error` and not `ImageError` since the only error case is an error when the
+ /// underlying reader seeks.
+ ///
+ /// When an error occurs, the reader may not have been properly reset and it is potentially
+ /// hazardous to continue with more io.
+ ///
+ /// ## Usage
+ ///
+ /// This supplements the path based type deduction from [`open`](Reader::open) with content based deduction.
+ /// This is more common in Linux and UNIX operating systems and also helpful if the path can
+ /// not be directly controlled.
+ ///
+ /// ```no_run
+ /// # use image::ImageError;
+ /// # use image::io::Reader;
+ /// # fn main() -> Result<(), ImageError> {
+ /// let image = Reader::open("image.unknown")?
+ /// .with_guessed_format()?
+ /// .decode()?;
+ /// # Ok(()) }
+ /// ```
+ pub fn with_guessed_format(mut self) -> io::Result<Self> {
+ let format = self.guess_format()?;
+ // Replace format if found, keep current state if not.
+ self.format = format.or(self.format);
+ Ok(self)
+ }
+
+ fn guess_format(&mut self) -> io::Result<Option<ImageFormat>> {
+ let mut start = [0; 16];
+
+ // Save current offset, read start, restore offset.
+ let cur = self.inner.stream_position()?;
+ let len = io::copy(
+ // Accept shorter files but read at most 16 bytes.
+ &mut self.inner.by_ref().take(16),
+ &mut Cursor::new(&mut start[..]),
+ )?;
+ self.inner.seek(SeekFrom::Start(cur))?;
+
+ Ok(free_functions::guess_format_impl(&start[..len as usize]))
+ }
+
+ /// Read the image dimensions.
+ ///
+ /// Uses the current format to construct the correct reader for the format.
+ ///
+ /// If no format was determined, returns an `ImageError::Unsupported`.
+ pub fn into_dimensions(mut self) -> ImageResult<(u32, u32)> {
+ let format = self.require_format()?;
+ free_functions::image_dimensions_with_format_impl(self.inner, format)
+ }
+
+ /// Read the image (replaces `load`).
+ ///
+ /// Uses the current format to construct the correct reader for the format.
+ ///
+ /// If no format was determined, returns an `ImageError::Unsupported`.
+ pub fn decode(mut self) -> ImageResult<DynamicImage> {
+ let format = self.require_format()?;
+ free_functions::load_inner(self.inner, self.limits, format)
+ }
+
+ fn require_format(&mut self) -> ImageResult<ImageFormat> {
+ self.format.ok_or_else(|| {
+ ImageError::Unsupported(UnsupportedError::from_format_and_kind(
+ ImageFormatHint::Unknown,
+ UnsupportedErrorKind::Format(ImageFormatHint::Unknown),
+ ))
+ })
+ }
+}
diff --git a/vendor/image/src/lib.rs b/vendor/image/src/lib.rs
new file mode 100644
index 0000000..88ad3c4
--- /dev/null
+++ b/vendor/image/src/lib.rs
@@ -0,0 +1,310 @@
+//! # Overview
+//!
+//! This crate provides native rust implementations of image encoding and decoding as well as some
+//! basic image manipulation functions. Additional documentation can currently also be found in the
+//! [README.md file which is most easily viewed on
+//! github](https://github.com/image-rs/image/blob/master/README.md).
+//!
+//! There are two core problems for which this library provides solutions: a unified interface for image
+//! encodings and simple generic buffers for their content. It's possible to use either feature
+//! without the other. The focus is on a small and stable set of common operations that can be
+//! supplemented by other specialized crates. The library also prefers safe solutions with few
+//! dependencies.
+//!
+//! # High level API
+//!
+//! Load images using [`io::Reader`]:
+//!
+//! ```rust,no_run
+//! use std::io::Cursor;
+//! use image::io::Reader as ImageReader;
+//! # fn main() -> Result<(), image::ImageError> {
+//! # let bytes = vec![0u8];
+//!
+//! let img = ImageReader::open("myimage.png")?.decode()?;
+//! let img2 = ImageReader::new(Cursor::new(bytes)).with_guessed_format()?.decode()?;
+//! # Ok(())
+//! # }
+//! ```
+//!
+//! And save them using [`save`] or [`write_to`] methods:
+//!
+//! ```rust,no_run
+//! # use std::io::{Write, Cursor};
+//! # use image::{DynamicImage, ImageOutputFormat};
+//! # #[cfg(feature = "png")]
+//! # fn main() -> Result<(), image::ImageError> {
+//! # let img: DynamicImage = unimplemented!();
+//! # let img2: DynamicImage = unimplemented!();
+//! img.save("empty.jpg")?;
+//!
+//! let mut bytes: Vec<u8> = Vec::new();
+//! img2.write_to(&mut Cursor::new(&mut bytes), image::ImageOutputFormat::Png)?;
+//! # Ok(())
+//! # }
+//! # #[cfg(not(feature = "png"))] fn main() {}
+//! ```
+//!
+//! With default features, the crate includes support for [many common image formats](codecs/index.html#supported-formats).
+//!
+//! [`save`]: enum.DynamicImage.html#method.save
+//! [`write_to`]: enum.DynamicImage.html#method.write_to
+//! [`io::Reader`]: io/struct.Reader.html
+//!
+//! # Image buffers
+//!
+//! The two main types for storing images:
+//! * [`ImageBuffer`] which holds statically typed image contents.
+//! * [`DynamicImage`] which is an enum over the supported ImageBuffer formats
+//! and supports conversions between them.
+//!
+//! As well as a few more specialized options:
+//! * [`GenericImage`] trait for a mutable image buffer.
+//! * [`GenericImageView`] trait for read only references to a GenericImage.
+//! * [`flat`] module containing types for interoperability with generic channel
+//! matrices and foreign interfaces.
+//!
+//! [`GenericImageView`]: trait.GenericImageView.html
+//! [`GenericImage`]: trait.GenericImage.html
+//! [`ImageBuffer`]: struct.ImageBuffer.html
+//! [`DynamicImage`]: enum.DynamicImage.html
+//! [`flat`]: flat/index.html
+//!
+//! # Low level encoding/decoding API
+//!
+//! Implementations of [`ImageEncoder`] provides low level control over encoding:
+//! ```rust,no_run
+//! # use std::io::Write;
+//! # use image::DynamicImage;
+//! # use image::ImageEncoder;
+//! # #[cfg(feature = "jpeg")]
+//! # fn main() -> Result<(), image::ImageError> {
+//! # use image::codecs::jpeg::JpegEncoder;
+//! # let img: DynamicImage = unimplemented!();
+//! # let writer: Box<dyn Write> = unimplemented!();
+//! let encoder = JpegEncoder::new_with_quality(&mut writer, 95);
+//! img.write_with_encoder(encoder)?;
+//! # Ok(())
+//! # }
+//! # #[cfg(not(feature = "jpeg"))] fn main() {}
+//! ```
+//! While [`ImageDecoder`] and [`ImageDecoderRect`] give access to more advanced decoding options:
+//!
+//! ```rust,no_run
+//! # use std::io::Read;
+//! # use image::DynamicImage;
+//! # use image::ImageDecoder;
+//! # #[cfg(feature = "png")]
+//! # fn main() -> Result<(), image::ImageError> {
+//! # use image::codecs::png::PngDecoder;
+//! # let img: DynamicImage = unimplemented!();
+//! # let reader: Box<dyn Read> = unimplemented!();
+//! let decoder = PngDecoder::new(&mut reader)?;
+//! let icc = decoder.icc_profile();
+//! let img = DynamicImage::from_decoder(decoder)?;
+//! # Ok(())
+//! # }
+//! # #[cfg(not(feature = "png"))] fn main() {}
+//! ```
+//!
+//! [`DynamicImage::from_decoder`]: enum.DynamicImage.html#method.from_decoder
+//! [`ImageDecoderRect`]: trait.ImageDecoderRect.html
+//! [`ImageDecoder`]: trait.ImageDecoder.html
+//! [`ImageEncoder`]: trait.ImageEncoder.html
+#![warn(missing_docs)]
+#![warn(unused_qualifications)]
+#![deny(unreachable_pub)]
+#![deny(deprecated)]
+#![deny(missing_copy_implementations)]
+#![cfg_attr(all(test, feature = "benchmarks"), feature(test))]
+// it's a bit of a pain otherwise
+#![allow(clippy::many_single_char_names)]
+// it's a backwards compatibility break
+#![allow(clippy::wrong_self_convention, clippy::enum_variant_names)]
+
+#[cfg(all(test, feature = "benchmarks"))]
+extern crate test;
+
+#[cfg(test)]
+#[macro_use]
+extern crate quickcheck;
+
+pub use crate::color::{ColorType, ExtendedColorType};
+
+pub use crate::color::{Luma, LumaA, Rgb, Rgba};
+
+pub use crate::error::{ImageError, ImageResult};
+
+pub use crate::image::{
+ AnimationDecoder,
+ GenericImage,
+ GenericImageView,
+ ImageDecoder,
+ ImageDecoderRect,
+ ImageEncoder,
+ ImageFormat,
+ ImageOutputFormat,
+ // Iterators
+ Pixels,
+ Progress,
+ SubImage,
+};
+
+pub use crate::buffer_::{
+ GrayAlphaImage,
+ GrayImage,
+ // Image types
+ ImageBuffer,
+ Rgb32FImage,
+ RgbImage,
+ Rgba32FImage,
+ RgbaImage,
+};
+
+pub use crate::flat::FlatSamples;
+
+// Traits
+pub use crate::traits::{EncodableLayout, Pixel, PixelWithColorType, Primitive};
+
+// Opening and loading images
+pub use crate::dynimage::{
+ image_dimensions, load_from_memory, load_from_memory_with_format, open, save_buffer,
+ save_buffer_with_format, write_buffer_with_format,
+};
+pub use crate::io::free_functions::{guess_format, load};
+
+pub use crate::dynimage::DynamicImage;
+
+pub use crate::animation::{Delay, Frame, Frames};
+
+// More detailed error type
+pub mod error;
+
+/// Iterators and other auxiliary structure for the `ImageBuffer` type.
+pub mod buffer {
+ // Only those not exported at the top-level
+ pub use crate::buffer_::{
+ ConvertBuffer, EnumeratePixels, EnumeratePixelsMut, EnumerateRows, EnumerateRowsMut,
+ Pixels, PixelsMut, Rows, RowsMut,
+ };
+}
+
+// Math utils
+pub mod math;
+
+// Image processing functions
+pub mod imageops;
+
+// Io bindings
+pub mod io;
+
+// Buffer representations for ffi.
+pub mod flat;
+
+/// Encoding and decoding for various image file formats.
+///
+/// # Supported formats
+///
+/// <!--- NOTE: Make sure to keep this table in sync with the README -->
+///
+/// | Format | Decoding | Encoding |
+/// | ------ | -------- | -------- |
+/// | AVIF | Only 8-bit | Lossy |
+/// | BMP | Yes | Rgb8, Rgba8, Gray8, GrayA8 |
+/// | DDS | DXT1, DXT3, DXT5 | No |
+/// | Farbfeld | Yes | Yes |
+/// | GIF | Yes | Yes |
+/// | ICO | Yes | Yes |
+/// | JPEG | Baseline and progressive | Baseline JPEG |
+/// | OpenEXR | Rgb32F, Rgba32F (no dwa compression) | Rgb32F, Rgba32F (no dwa compression) |
+/// | PNG | All supported color types | Same as decoding |
+/// | PNM | PBM, PGM, PPM, standard PAM | Yes |
+/// | QOI | Yes | Yes |
+/// | TGA | Yes | Rgb8, Rgba8, Bgr8, Bgra8, Gray8, GrayA8 |
+/// | TIFF | Baseline(no fax support) + LZW + PackBits | Rgb8, Rgba8, Gray8 |
+/// | WebP | Yes | Rgb8, Rgba8 |
+///
+/// ## A note on format specific features
+///
+/// One of the main goals of `image` is stability, in runtime but also for programmers. This
+/// ensures that performance as well as safety fixes reach a majority of its user base with little
+/// effort. Re-exporting all details of its dependencies would run counter to this goal as it
+/// linked _all_ major version bumps between them and `image`. As such, we are wary of exposing too
+/// many details, or configuration options, that are not shared between different image formats.
+///
+/// Nevertheless, the advantage of precise control is hard to ignore. We will thus consider
+/// _wrappers_, not direct re-exports, in either of the following cases:
+///
+/// 1. A standard specifies that configuration _x_ is required for decoders/encoders and there
+/// exists an essentially canonical way to control it.
+/// 2. At least two different implementations agree on some (sub-)set of features in practice.
+/// 3. A technical argument including measurements of the performance, space benefits, or otherwise
+/// objectively quantified benefits can be made, and the added interface is unlikely to require
+/// breaking changes.
+///
+/// Features that fulfill two or more criteria are preferred.
+///
+/// Re-exports of dependencies that reach version `1` will be discussed when it happens.
+pub mod codecs {
+ #[cfg(any(feature = "avif-encoder", feature = "avif-decoder"))]
+ pub mod avif;
+ #[cfg(feature = "bmp")]
+ pub mod bmp;
+ #[cfg(feature = "dds")]
+ pub mod dds;
+ #[cfg(feature = "dxt")]
+ #[deprecated = "DXT support will be removed or reworked in a future version. Prefer the `squish` crate instead. See https://github.com/image-rs/image/issues/1623"]
+ pub mod dxt;
+ #[cfg(feature = "farbfeld")]
+ pub mod farbfeld;
+ #[cfg(feature = "gif")]
+ pub mod gif;
+ #[cfg(feature = "hdr")]
+ pub mod hdr;
+ #[cfg(feature = "ico")]
+ pub mod ico;
+ #[cfg(feature = "jpeg")]
+ pub mod jpeg;
+ #[cfg(feature = "exr")]
+ pub mod openexr;
+ #[cfg(feature = "png")]
+ pub mod png;
+ #[cfg(feature = "pnm")]
+ pub mod pnm;
+ #[cfg(feature = "qoi")]
+ pub mod qoi;
+ #[cfg(feature = "tga")]
+ pub mod tga;
+ #[cfg(feature = "tiff")]
+ pub mod tiff;
+ #[cfg(any(feature = "webp", feature = "webp-encoder"))]
+ pub mod webp;
+}
+
+mod animation;
+#[path = "buffer.rs"]
+mod buffer_;
+mod color;
+mod dynimage;
+mod image;
+mod traits;
+mod utils;
+
+// Can't use the macro-call itself within the `doc` attribute. So force it to eval it as part of
+// the macro invocation.
+//
+// The inspiration for the macro and implementation is from
+// <https://github.com/GuillaumeGomez/doc-comment>
+//
+// MIT License
+//
+// Copyright (c) 2018 Guillaume Gomez
+macro_rules! insert_as_doc {
+ { $content:expr } => {
+ #[allow(unused_doc_comments)]
+ #[doc = $content] extern { }
+ }
+}
+
+// Provides the README.md as doc, to ensure the example works!
+insert_as_doc!(include_str!("../README.md"));
diff --git a/vendor/image/src/math/mod.rs b/vendor/image/src/math/mod.rs
new file mode 100644
index 0000000..43b5b82
--- /dev/null
+++ b/vendor/image/src/math/mod.rs
@@ -0,0 +1,6 @@
+//! Mathematical helper functions and types.
+mod rect;
+mod utils;
+
+pub use self::rect::Rect;
+pub(super) use utils::resize_dimensions;
diff --git a/vendor/image/src/math/rect.rs b/vendor/image/src/math/rect.rs
new file mode 100644
index 0000000..74696be
--- /dev/null
+++ b/vendor/image/src/math/rect.rs
@@ -0,0 +1,12 @@
+/// A Rectangle defined by its top left corner, width and height.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub struct Rect {
+ /// The x coordinate of the top left corner.
+ pub x: u32,
+ /// The y coordinate of the top left corner.
+ pub y: u32,
+ /// The rectangle's width.
+ pub width: u32,
+ /// The rectangle's height.
+ pub height: u32,
+}
diff --git a/vendor/image/src/math/utils.rs b/vendor/image/src/math/utils.rs
new file mode 100644
index 0000000..3a1f121
--- /dev/null
+++ b/vendor/image/src/math/utils.rs
@@ -0,0 +1,123 @@
+//! Shared mathematical utility functions.
+
+use std::cmp::max;
+
+/// Calculates the width and height an image should be resized to.
+/// This preserves aspect ratio, and based on the `fill` parameter
+/// will either fill the dimensions to fit inside the smaller constraint
+/// (will overflow the specified bounds on one axis to preserve
+/// aspect ratio), or will shrink so that both dimensions are
+/// completely contained within the given `width` and `height`,
+/// with empty space on one axis.
+pub(crate) fn resize_dimensions(
+ width: u32,
+ height: u32,
+ nwidth: u32,
+ nheight: u32,
+ fill: bool,
+) -> (u32, u32) {
+ let wratio = nwidth as f64 / width as f64;
+ let hratio = nheight as f64 / height as f64;
+
+ let ratio = if fill {
+ f64::max(wratio, hratio)
+ } else {
+ f64::min(wratio, hratio)
+ };
+
+ let nw = max((width as f64 * ratio).round() as u64, 1);
+ let nh = max((height as f64 * ratio).round() as u64, 1);
+
+ if nw > u64::from(u32::MAX) {
+ let ratio = u32::MAX as f64 / width as f64;
+ (u32::MAX, max((height as f64 * ratio).round() as u32, 1))
+ } else if nh > u64::from(u32::MAX) {
+ let ratio = u32::MAX as f64 / height as f64;
+ (max((width as f64 * ratio).round() as u32, 1), u32::MAX)
+ } else {
+ (nw as u32, nh as u32)
+ }
+}
+
+#[cfg(test)]
+mod test {
+ quickcheck! {
+ fn resize_bounds_correctly_width(old_w: u32, new_w: u32) -> bool {
+ if old_w == 0 || new_w == 0 { return true; }
+ // In this case, the scaling is limited by scaling of height.
+ // We could check that case separately but it does not conform to the same expectation.
+ if new_w as u64 * 400u64 >= old_w as u64 * u64::from(u32::MAX) { return true; }
+
+ let result = super::resize_dimensions(old_w, 400, new_w, ::std::u32::MAX, false);
+ let exact = (400 as f64 * new_w as f64 / old_w as f64).round() as u32;
+ result.0 == new_w && result.1 == exact.max(1)
+ }
+ }
+
+ quickcheck! {
+ fn resize_bounds_correctly_height(old_h: u32, new_h: u32) -> bool {
+ if old_h == 0 || new_h == 0 { return true; }
+ // In this case, the scaling is limited by scaling of width.
+ // We could check that case separately but it does not conform to the same expectation.
+ if 400u64 * new_h as u64 >= old_h as u64 * u64::from(u32::MAX) { return true; }
+
+ let result = super::resize_dimensions(400, old_h, ::std::u32::MAX, new_h, false);
+ let exact = (400 as f64 * new_h as f64 / old_h as f64).round() as u32;
+ result.1 == new_h && result.0 == exact.max(1)
+ }
+ }
+
+ #[test]
+ fn resize_handles_fill() {
+ let result = super::resize_dimensions(100, 200, 200, 500, true);
+ assert!(result.0 == 250);
+ assert!(result.1 == 500);
+
+ let result = super::resize_dimensions(200, 100, 500, 200, true);
+ assert!(result.0 == 500);
+ assert!(result.1 == 250);
+ }
+
+ #[test]
+ fn resize_never_rounds_to_zero() {
+ let result = super::resize_dimensions(1, 150, 128, 128, false);
+ assert!(result.0 > 0);
+ assert!(result.1 > 0);
+ }
+
+ #[test]
+ fn resize_handles_overflow() {
+ let result = super::resize_dimensions(100, ::std::u32::MAX, 200, ::std::u32::MAX, true);
+ assert!(result.0 == 100);
+ assert!(result.1 == ::std::u32::MAX);
+
+ let result = super::resize_dimensions(::std::u32::MAX, 100, ::std::u32::MAX, 200, true);
+ assert!(result.0 == ::std::u32::MAX);
+ assert!(result.1 == 100);
+ }
+
+ #[test]
+ fn resize_rounds() {
+ // Only truncation will result in (3840, 2229) and (2160, 3719)
+ let result = super::resize_dimensions(4264, 2476, 3840, 2160, true);
+ assert_eq!(result, (3840, 2230));
+
+ let result = super::resize_dimensions(2476, 4264, 2160, 3840, false);
+ assert_eq!(result, (2160, 3720));
+ }
+
+ #[test]
+ fn resize_handles_zero() {
+ let result = super::resize_dimensions(0, 100, 100, 100, false);
+ assert_eq!(result, (1, 100));
+
+ let result = super::resize_dimensions(100, 0, 100, 100, false);
+ assert_eq!(result, (100, 1));
+
+ let result = super::resize_dimensions(100, 100, 0, 100, false);
+ assert_eq!(result, (1, 1));
+
+ let result = super::resize_dimensions(100, 100, 100, 0, false);
+ assert_eq!(result, (1, 1));
+ }
+}
diff --git a/vendor/image/src/traits.rs b/vendor/image/src/traits.rs
new file mode 100644
index 0000000..56daaa0
--- /dev/null
+++ b/vendor/image/src/traits.rs
@@ -0,0 +1,370 @@
+//! This module provides useful traits that were deprecated in rust
+
+// Note copied from the stdlib under MIT license
+
+use num_traits::{Bounded, Num, NumCast};
+use std::ops::AddAssign;
+
+use crate::color::{ColorType, Luma, LumaA, Rgb, Rgba};
+
+/// Types which are safe to treat as an immutable byte slice in a pixel layout
+/// for image encoding.
+pub trait EncodableLayout: seals::EncodableLayout {
+ /// Get the bytes of this value.
+ fn as_bytes(&self) -> &[u8];
+}
+
+impl EncodableLayout for [u8] {
+ fn as_bytes(&self) -> &[u8] {
+ bytemuck::cast_slice(self)
+ }
+}
+
+impl EncodableLayout for [u16] {
+ fn as_bytes(&self) -> &[u8] {
+ bytemuck::cast_slice(self)
+ }
+}
+
+impl EncodableLayout for [f32] {
+ fn as_bytes(&self) -> &[u8] {
+ bytemuck::cast_slice(self)
+ }
+}
+
+/// The type of each channel in a pixel. For example, this can be `u8`, `u16`, `f32`.
+// TODO rename to `PixelComponent`? Split up into separate traits? Seal?
+pub trait Primitive: Copy + NumCast + Num + PartialOrd<Self> + Clone + Bounded {
+ /// The maximum value for this type of primitive within the context of color.
+ /// For floats, the maximum is `1.0`, whereas the integer types inherit their usual maximum values.
+ const DEFAULT_MAX_VALUE: Self;
+
+ /// The minimum value for this type of primitive within the context of color.
+ /// For floats, the minimum is `0.0`, whereas the integer types inherit their usual minimum values.
+ const DEFAULT_MIN_VALUE: Self;
+}
+
+macro_rules! declare_primitive {
+ ($base:ty: ($from:expr)..$to:expr) => {
+ impl Primitive for $base {
+ const DEFAULT_MAX_VALUE: Self = $to;
+ const DEFAULT_MIN_VALUE: Self = $from;
+ }
+ };
+}
+
+declare_primitive!(usize: (0)..Self::MAX);
+declare_primitive!(u8: (0)..Self::MAX);
+declare_primitive!(u16: (0)..Self::MAX);
+declare_primitive!(u32: (0)..Self::MAX);
+declare_primitive!(u64: (0)..Self::MAX);
+
+declare_primitive!(isize: (Self::MIN)..Self::MAX);
+declare_primitive!(i8: (Self::MIN)..Self::MAX);
+declare_primitive!(i16: (Self::MIN)..Self::MAX);
+declare_primitive!(i32: (Self::MIN)..Self::MAX);
+declare_primitive!(i64: (Self::MIN)..Self::MAX);
+declare_primitive!(f32: (0.0)..1.0);
+declare_primitive!(f64: (0.0)..1.0);
+
+/// An Enlargable::Larger value should be enough to calculate
+/// the sum (average) of a few hundred or thousand Enlargeable values.
+pub trait Enlargeable: Sized + Bounded + NumCast {
+ type Larger: Copy + NumCast + Num + PartialOrd<Self::Larger> + Clone + Bounded + AddAssign;
+
+ fn clamp_from(n: Self::Larger) -> Self {
+ if n > Self::max_value().to_larger() {
+ Self::max_value()
+ } else if n < Self::min_value().to_larger() {
+ Self::min_value()
+ } else {
+ NumCast::from(n).unwrap()
+ }
+ }
+
+ fn to_larger(self) -> Self::Larger {
+ NumCast::from(self).unwrap()
+ }
+}
+
+impl Enlargeable for u8 {
+ type Larger = u32;
+}
+impl Enlargeable for u16 {
+ type Larger = u32;
+}
+impl Enlargeable for u32 {
+ type Larger = u64;
+}
+impl Enlargeable for u64 {
+ type Larger = u128;
+}
+impl Enlargeable for usize {
+ // Note: On 32-bit architectures, u64 should be enough here.
+ type Larger = u128;
+}
+impl Enlargeable for i8 {
+ type Larger = i32;
+}
+impl Enlargeable for i16 {
+ type Larger = i32;
+}
+impl Enlargeable for i32 {
+ type Larger = i64;
+}
+impl Enlargeable for i64 {
+ type Larger = i128;
+}
+impl Enlargeable for isize {
+ // Note: On 32-bit architectures, i64 should be enough here.
+ type Larger = i128;
+}
+impl Enlargeable for f32 {
+ type Larger = f64;
+}
+impl Enlargeable for f64 {
+ type Larger = f64;
+}
+
+/// Linear interpolation without involving floating numbers.
+pub trait Lerp: Bounded + NumCast {
+ type Ratio: Primitive;
+
+ fn lerp(a: Self, b: Self, ratio: Self::Ratio) -> Self {
+ let a = <Self::Ratio as NumCast>::from(a).unwrap();
+ let b = <Self::Ratio as NumCast>::from(b).unwrap();
+
+ let res = a + (b - a) * ratio;
+
+ if res > NumCast::from(Self::max_value()).unwrap() {
+ Self::max_value()
+ } else if res < NumCast::from(0).unwrap() {
+ NumCast::from(0).unwrap()
+ } else {
+ NumCast::from(res).unwrap()
+ }
+ }
+}
+
+impl Lerp for u8 {
+ type Ratio = f32;
+}
+
+impl Lerp for u16 {
+ type Ratio = f32;
+}
+
+impl Lerp for u32 {
+ type Ratio = f64;
+}
+
+impl Lerp for f32 {
+ type Ratio = f32;
+
+ fn lerp(a: Self, b: Self, ratio: Self::Ratio) -> Self {
+ a + (b - a) * ratio
+ }
+}
+
+/// The pixel with an associated `ColorType`.
+/// Not all possible pixels represent one of the predefined `ColorType`s.
+pub trait PixelWithColorType: Pixel + self::private::SealedPixelWithColorType {
+ /// This pixel has the format of one of the predefined `ColorType`s,
+ /// such as `Rgb8`, `La16` or `Rgba32F`.
+ /// This is needed for automatically detecting
+ /// a color format when saving an image as a file.
+ const COLOR_TYPE: ColorType;
+}
+
+impl PixelWithColorType for Rgb<u8> {
+ const COLOR_TYPE: ColorType = ColorType::Rgb8;
+}
+impl PixelWithColorType for Rgb<u16> {
+ const COLOR_TYPE: ColorType = ColorType::Rgb16;
+}
+impl PixelWithColorType for Rgb<f32> {
+ const COLOR_TYPE: ColorType = ColorType::Rgb32F;
+}
+
+impl PixelWithColorType for Rgba<u8> {
+ const COLOR_TYPE: ColorType = ColorType::Rgba8;
+}
+impl PixelWithColorType for Rgba<u16> {
+ const COLOR_TYPE: ColorType = ColorType::Rgba16;
+}
+impl PixelWithColorType for Rgba<f32> {
+ const COLOR_TYPE: ColorType = ColorType::Rgba32F;
+}
+
+impl PixelWithColorType for Luma<u8> {
+ const COLOR_TYPE: ColorType = ColorType::L8;
+}
+impl PixelWithColorType for Luma<u16> {
+ const COLOR_TYPE: ColorType = ColorType::L16;
+}
+impl PixelWithColorType for LumaA<u8> {
+ const COLOR_TYPE: ColorType = ColorType::La8;
+}
+impl PixelWithColorType for LumaA<u16> {
+ const COLOR_TYPE: ColorType = ColorType::La16;
+}
+
+/// Prevents down-stream users from implementing the `Primitive` trait
+mod private {
+ use crate::color::*;
+
+ pub trait SealedPixelWithColorType {}
+ impl SealedPixelWithColorType for Rgb<u8> {}
+ impl SealedPixelWithColorType for Rgb<u16> {}
+ impl SealedPixelWithColorType for Rgb<f32> {}
+
+ impl SealedPixelWithColorType for Rgba<u8> {}
+ impl SealedPixelWithColorType for Rgba<u16> {}
+ impl SealedPixelWithColorType for Rgba<f32> {}
+
+ impl SealedPixelWithColorType for Luma<u8> {}
+ impl SealedPixelWithColorType for LumaA<u8> {}
+
+ impl SealedPixelWithColorType for Luma<u16> {}
+ impl SealedPixelWithColorType for LumaA<u16> {}
+}
+
+/// A generalized pixel.
+///
+/// A pixel object is usually not used standalone but as a view into an image buffer.
+pub trait Pixel: Copy + Clone {
+ /// The scalar type that is used to store each channel in this pixel.
+ type Subpixel: Primitive;
+
+ /// The number of channels of this pixel type.
+ const CHANNEL_COUNT: u8;
+
+ /// Returns the components as a slice.
+ fn channels(&self) -> &[Self::Subpixel];
+
+ /// Returns the components as a mutable slice
+ fn channels_mut(&mut self) -> &mut [Self::Subpixel];
+
+ /// A string that can help to interpret the meaning each channel
+ /// See [gimp babl](http://gegl.org/babl/).
+ const COLOR_MODEL: &'static str;
+
+ /// Returns the channels of this pixel as a 4 tuple. If the pixel
+ /// has less than 4 channels the remainder is filled with the maximum value
+ #[deprecated(since = "0.24.0", note = "Use `channels()` or `channels_mut()`")]
+ fn channels4(
+ &self,
+ ) -> (
+ Self::Subpixel,
+ Self::Subpixel,
+ Self::Subpixel,
+ Self::Subpixel,
+ );
+
+ /// Construct a pixel from the 4 channels a, b, c and d.
+ /// If the pixel does not contain 4 channels the extra are ignored.
+ #[deprecated(
+ since = "0.24.0",
+ note = "Use the constructor of the pixel, for example `Rgba([r,g,b,a])` or `Pixel::from_slice`"
+ )]
+ fn from_channels(
+ a: Self::Subpixel,
+ b: Self::Subpixel,
+ c: Self::Subpixel,
+ d: Self::Subpixel,
+ ) -> Self;
+
+ /// Returns a view into a slice.
+ ///
+ /// Note: The slice length is not checked on creation. Thus the caller has to ensure
+ /// that the slice is long enough to prevent panics if the pixel is used later on.
+ fn from_slice(slice: &[Self::Subpixel]) -> &Self;
+
+ /// Returns mutable view into a mutable slice.
+ ///
+ /// Note: The slice length is not checked on creation. Thus the caller has to ensure
+ /// that the slice is long enough to prevent panics if the pixel is used later on.
+ fn from_slice_mut(slice: &mut [Self::Subpixel]) -> &mut Self;
+
+ /// Convert this pixel to RGB
+ fn to_rgb(&self) -> Rgb<Self::Subpixel>;
+
+ /// Convert this pixel to RGB with an alpha channel
+ fn to_rgba(&self) -> Rgba<Self::Subpixel>;
+
+ /// Convert this pixel to luma
+ fn to_luma(&self) -> Luma<Self::Subpixel>;
+
+ /// Convert this pixel to luma with an alpha channel
+ fn to_luma_alpha(&self) -> LumaA<Self::Subpixel>;
+
+ /// Apply the function ```f``` to each channel of this pixel.
+ fn map<F>(&self, f: F) -> Self
+ where
+ F: FnMut(Self::Subpixel) -> Self::Subpixel;
+
+ /// Apply the function ```f``` to each channel of this pixel.
+ fn apply<F>(&mut self, f: F)
+ where
+ F: FnMut(Self::Subpixel) -> Self::Subpixel;
+
+ /// Apply the function ```f``` to each channel except the alpha channel.
+ /// Apply the function ```g``` to the alpha channel.
+ fn map_with_alpha<F, G>(&self, f: F, g: G) -> Self
+ where
+ F: FnMut(Self::Subpixel) -> Self::Subpixel,
+ G: FnMut(Self::Subpixel) -> Self::Subpixel;
+
+ /// Apply the function ```f``` to each channel except the alpha channel.
+ /// Apply the function ```g``` to the alpha channel. Works in-place.
+ fn apply_with_alpha<F, G>(&mut self, f: F, g: G)
+ where
+ F: FnMut(Self::Subpixel) -> Self::Subpixel,
+ G: FnMut(Self::Subpixel) -> Self::Subpixel;
+
+ /// Apply the function ```f``` to each channel except the alpha channel.
+ fn map_without_alpha<F>(&self, f: F) -> Self
+ where
+ F: FnMut(Self::Subpixel) -> Self::Subpixel,
+ {
+ let mut this = *self;
+ this.apply_with_alpha(f, |x| x);
+ this
+ }
+
+ /// Apply the function ```f``` to each channel except the alpha channel.
+ /// Works in place.
+ fn apply_without_alpha<F>(&mut self, f: F)
+ where
+ F: FnMut(Self::Subpixel) -> Self::Subpixel,
+ {
+ self.apply_with_alpha(f, |x| x);
+ }
+
+ /// Apply the function ```f``` to each channel of this pixel and
+ /// ```other``` pairwise.
+ fn map2<F>(&self, other: &Self, f: F) -> Self
+ where
+ F: FnMut(Self::Subpixel, Self::Subpixel) -> Self::Subpixel;
+
+ /// Apply the function ```f``` to each channel of this pixel and
+ /// ```other``` pairwise. Works in-place.
+ fn apply2<F>(&mut self, other: &Self, f: F)
+ where
+ F: FnMut(Self::Subpixel, Self::Subpixel) -> Self::Subpixel;
+
+ /// Invert this pixel
+ fn invert(&mut self);
+
+ /// Blend the color of a given pixel into ourself, taking into account alpha channels
+ fn blend(&mut self, other: &Self);
+}
+
+/// Private module for supertraits of sealed traits.
+mod seals {
+ pub trait EncodableLayout {}
+
+ impl EncodableLayout for [u8] {}
+ impl EncodableLayout for [u16] {}
+ impl EncodableLayout for [f32] {}
+}
diff --git a/vendor/image/src/utils/mod.rs b/vendor/image/src/utils/mod.rs
new file mode 100644
index 0000000..529c60f
--- /dev/null
+++ b/vendor/image/src/utils/mod.rs
@@ -0,0 +1,128 @@
+//! Utilities
+
+use std::iter::repeat;
+
+#[inline(always)]
+pub(crate) fn expand_packed<F>(buf: &mut [u8], channels: usize, bit_depth: u8, mut func: F)
+where
+ F: FnMut(u8, &mut [u8]),
+{
+ let pixels = buf.len() / channels * bit_depth as usize;
+ let extra = pixels % 8;
+ let entries = pixels / 8
+ + match extra {
+ 0 => 0,
+ _ => 1,
+ };
+ let mask = ((1u16 << bit_depth) - 1) as u8;
+ let i = (0..entries)
+ .rev() // Reverse iterator
+ .flat_map(|idx|
+ // This has to be reversed to
+ (0..8/bit_depth).map(|i| i*bit_depth).zip(repeat(idx)))
+ .skip(extra);
+ let buf_len = buf.len();
+ let j_inv = (channels..buf_len).step_by(channels);
+ for ((shift, i), j_inv) in i.zip(j_inv) {
+ let j = buf_len - j_inv;
+ let pixel = (buf[i] & (mask << shift)) >> shift;
+ func(pixel, &mut buf[j..(j + channels)])
+ }
+}
+
+/// Expand a buffer of packed 1, 2, or 4 bits integers into u8's. Assumes that
+/// every `row_size` entries there are padding bits up to the next byte boundary.
+#[allow(dead_code)]
+// When no image formats that use it are enabled
+pub(crate) fn expand_bits(bit_depth: u8, row_size: u32, buf: &[u8]) -> Vec<u8> {
+ // Note: this conversion assumes that the scanlines begin on byte boundaries
+ let mask = (1u8 << bit_depth as usize) - 1;
+ let scaling_factor = 255 / ((1 << bit_depth as usize) - 1);
+ let bit_width = row_size * u32::from(bit_depth);
+ let skip = if bit_width % 8 == 0 {
+ 0
+ } else {
+ (8 - bit_width % 8) / u32::from(bit_depth)
+ };
+ let row_len = row_size + skip;
+ let mut p = Vec::new();
+ let mut i = 0;
+ for v in buf {
+ for shift_inv in 1..=8 / bit_depth {
+ let shift = 8 - bit_depth * shift_inv;
+ // skip the pixels that can be neglected because scanlines should
+ // start at byte boundaries
+ if i % (row_len as usize) < (row_size as usize) {
+ let pixel = (v & mask << shift as usize) >> shift as usize;
+ p.push(pixel * scaling_factor);
+ }
+ i += 1;
+ }
+ }
+ p
+}
+
+/// Checks if the provided dimensions would cause an overflow.
+#[allow(dead_code)]
+// When no image formats that use it are enabled
+pub(crate) fn check_dimension_overflow(width: u32, height: u32, bytes_per_pixel: u8) -> bool {
+ width as u64 * height as u64 > std::u64::MAX / bytes_per_pixel as u64
+}
+
+#[allow(dead_code)]
+// When no image formats that use it are enabled
+pub(crate) fn vec_copy_to_u8<T>(vec: &[T]) -> Vec<u8>
+where
+ T: bytemuck::Pod,
+{
+ bytemuck::cast_slice(vec).to_owned()
+}
+
+#[inline]
+pub(crate) fn clamp<N>(a: N, min: N, max: N) -> N
+where
+ N: PartialOrd,
+{
+ if a < min {
+ min
+ } else if a > max {
+ max
+ } else {
+ a
+ }
+}
+
+#[cfg(test)]
+mod test {
+ #[test]
+ fn gray_to_luma8_skip() {
+ let check = |bit_depth, w, from, to| {
+ assert_eq!(super::expand_bits(bit_depth, w, from), to);
+ };
+ // Bit depth 1, skip is more than half a byte
+ check(
+ 1,
+ 10,
+ &[0b11110000, 0b11000000, 0b00001111, 0b11000000],
+ vec![
+ 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255,
+ ],
+ );
+ // Bit depth 2, skip is more than half a byte
+ check(
+ 2,
+ 5,
+ &[0b11110000, 0b11000000, 0b00001111, 0b11000000],
+ vec![255, 255, 0, 0, 255, 0, 0, 255, 255, 255],
+ );
+ // Bit depth 2, skip is 0
+ check(
+ 2,
+ 4,
+ &[0b11110000, 0b00001111],
+ vec![255, 255, 0, 0, 0, 0, 255, 255],
+ );
+ // Bit depth 4, skip is half a byte
+ check(4, 1, &[0b11110011, 0b00001100], vec![255, 0]);
+ }
+}