aboutsummaryrefslogtreecommitdiff
path: root/vendor/num-traits/src/ops
diff options
context:
space:
mode:
authorValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
committerValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
commit1b6a04ca5504955c571d1c97504fb45ea0befee4 (patch)
tree7579f518b23313e8a9748a88ab6173d5e030b227 /vendor/num-traits/src/ops
parent5ecd8cf2cba827454317368b68571df0d13d7842 (diff)
downloadfparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.tar.xz
fparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.zip
Initial vendor packages
Signed-off-by: Valentin Popov <valentin@popov.link>
Diffstat (limited to 'vendor/num-traits/src/ops')
-rw-r--r--vendor/num-traits/src/ops/bytes.rs403
-rw-r--r--vendor/num-traits/src/ops/checked.rs261
-rw-r--r--vendor/num-traits/src/ops/euclid.rs339
-rw-r--r--vendor/num-traits/src/ops/inv.rs47
-rw-r--r--vendor/num-traits/src/ops/mod.rs8
-rw-r--r--vendor/num-traits/src/ops/mul_add.rs149
-rw-r--r--vendor/num-traits/src/ops/overflowing.rs96
-rw-r--r--vendor/num-traits/src/ops/saturating.rs130
-rw-r--r--vendor/num-traits/src/ops/wrapping.rs327
9 files changed, 1760 insertions, 0 deletions
diff --git a/vendor/num-traits/src/ops/bytes.rs b/vendor/num-traits/src/ops/bytes.rs
new file mode 100644
index 0000000..4df9ecd
--- /dev/null
+++ b/vendor/num-traits/src/ops/bytes.rs
@@ -0,0 +1,403 @@
+use core::borrow::{Borrow, BorrowMut};
+use core::cmp::{Eq, Ord, PartialEq, PartialOrd};
+use core::fmt::Debug;
+use core::hash::Hash;
+#[cfg(not(has_int_to_from_bytes))]
+use core::mem::transmute;
+
+pub trait NumBytes:
+ Debug
+ + AsRef<[u8]>
+ + AsMut<[u8]>
+ + PartialEq
+ + Eq
+ + PartialOrd
+ + Ord
+ + Hash
+ + Borrow<[u8]>
+ + BorrowMut<[u8]>
+{
+}
+
+impl<T> NumBytes for T where
+ T: Debug
+ + AsRef<[u8]>
+ + AsMut<[u8]>
+ + PartialEq
+ + Eq
+ + PartialOrd
+ + Ord
+ + Hash
+ + Borrow<[u8]>
+ + BorrowMut<[u8]>
+ + ?Sized
+{
+}
+
+pub trait ToBytes {
+ type Bytes: NumBytes;
+
+ /// Return the memory representation of this number as a byte array in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::ToBytes;
+ ///
+ /// let bytes = ToBytes::to_be_bytes(&0x12345678u32);
+ /// assert_eq!(bytes, [0x12, 0x34, 0x56, 0x78]);
+ /// ```
+ fn to_be_bytes(&self) -> Self::Bytes;
+
+ /// Return the memory representation of this number as a byte array in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::ToBytes;
+ ///
+ /// let bytes = ToBytes::to_le_bytes(&0x12345678u32);
+ /// assert_eq!(bytes, [0x78, 0x56, 0x34, 0x12]);
+ /// ```
+ fn to_le_bytes(&self) -> Self::Bytes;
+
+ /// Return the memory representation of this number as a byte array in native byte order.
+ ///
+ /// As the target platform's native endianness is used,
+ /// portable code should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
+ ///
+ /// [`to_be_bytes`]: #method.to_be_bytes
+ /// [`to_le_bytes`]: #method.to_le_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::ToBytes;
+ ///
+ /// #[cfg(target_endian = "big")]
+ /// let expected = [0x12, 0x34, 0x56, 0x78];
+ ///
+ /// #[cfg(target_endian = "little")]
+ /// let expected = [0x78, 0x56, 0x34, 0x12];
+ ///
+ /// let bytes = ToBytes::to_ne_bytes(&0x12345678u32);
+ /// assert_eq!(bytes, expected)
+ /// ```
+ fn to_ne_bytes(&self) -> Self::Bytes {
+ #[cfg(target_endian = "big")]
+ let bytes = self.to_be_bytes();
+ #[cfg(target_endian = "little")]
+ let bytes = self.to_le_bytes();
+ bytes
+ }
+}
+
+pub trait FromBytes: Sized {
+ type Bytes: NumBytes + ?Sized;
+
+ /// Create a number from its representation as a byte array in big endian.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::FromBytes;
+ ///
+ /// let value: u32 = FromBytes::from_be_bytes(&[0x12, 0x34, 0x56, 0x78]);
+ /// assert_eq!(value, 0x12345678);
+ /// ```
+ fn from_be_bytes(bytes: &Self::Bytes) -> Self;
+
+ /// Create a number from its representation as a byte array in little endian.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::FromBytes;
+ ///
+ /// let value: u32 = FromBytes::from_le_bytes(&[0x78, 0x56, 0x34, 0x12]);
+ /// assert_eq!(value, 0x12345678);
+ /// ```
+ fn from_le_bytes(bytes: &Self::Bytes) -> Self;
+
+ /// Create a number from its memory representation as a byte array in native endianness.
+ ///
+ /// As the target platform's native endianness is used,
+ /// portable code likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as appropriate instead.
+ ///
+ /// [`from_be_bytes`]: #method.from_be_bytes
+ /// [`from_le_bytes`]: #method.from_le_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::FromBytes;
+ ///
+ /// #[cfg(target_endian = "big")]
+ /// let bytes = [0x12, 0x34, 0x56, 0x78];
+ ///
+ /// #[cfg(target_endian = "little")]
+ /// let bytes = [0x78, 0x56, 0x34, 0x12];
+ ///
+ /// let value: u32 = FromBytes::from_ne_bytes(&bytes);
+ /// assert_eq!(value, 0x12345678)
+ /// ```
+ fn from_ne_bytes(bytes: &Self::Bytes) -> Self {
+ #[cfg(target_endian = "big")]
+ let this = Self::from_be_bytes(bytes);
+ #[cfg(target_endian = "little")]
+ let this = Self::from_le_bytes(bytes);
+ this
+ }
+}
+
+macro_rules! float_to_from_bytes_impl {
+ ($T:ty, $L:expr) => {
+ #[cfg(has_float_to_from_bytes)]
+ impl ToBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn to_be_bytes(&self) -> Self::Bytes {
+ <$T>::to_be_bytes(*self)
+ }
+
+ #[inline]
+ fn to_le_bytes(&self) -> Self::Bytes {
+ <$T>::to_le_bytes(*self)
+ }
+
+ #[inline]
+ fn to_ne_bytes(&self) -> Self::Bytes {
+ <$T>::to_ne_bytes(*self)
+ }
+ }
+
+ #[cfg(has_float_to_from_bytes)]
+ impl FromBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn from_be_bytes(bytes: &Self::Bytes) -> Self {
+ <$T>::from_be_bytes(*bytes)
+ }
+
+ #[inline]
+ fn from_le_bytes(bytes: &Self::Bytes) -> Self {
+ <$T>::from_le_bytes(*bytes)
+ }
+
+ #[inline]
+ fn from_ne_bytes(bytes: &Self::Bytes) -> Self {
+ <$T>::from_ne_bytes(*bytes)
+ }
+ }
+
+ #[cfg(not(has_float_to_from_bytes))]
+ impl ToBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn to_be_bytes(&self) -> Self::Bytes {
+ ToBytes::to_be_bytes(&self.to_bits())
+ }
+
+ #[inline]
+ fn to_le_bytes(&self) -> Self::Bytes {
+ ToBytes::to_le_bytes(&self.to_bits())
+ }
+
+ #[inline]
+ fn to_ne_bytes(&self) -> Self::Bytes {
+ ToBytes::to_ne_bytes(&self.to_bits())
+ }
+ }
+
+ #[cfg(not(has_float_to_from_bytes))]
+ impl FromBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn from_be_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_bits(FromBytes::from_be_bytes(bytes))
+ }
+
+ #[inline]
+ fn from_le_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_bits(FromBytes::from_le_bytes(bytes))
+ }
+
+ #[inline]
+ fn from_ne_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_bits(FromBytes::from_ne_bytes(bytes))
+ }
+ }
+ };
+}
+
+macro_rules! int_to_from_bytes_impl {
+ ($T:ty, $L:expr) => {
+ #[cfg(has_int_to_from_bytes)]
+ impl ToBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn to_be_bytes(&self) -> Self::Bytes {
+ <$T>::to_be_bytes(*self)
+ }
+
+ #[inline]
+ fn to_le_bytes(&self) -> Self::Bytes {
+ <$T>::to_le_bytes(*self)
+ }
+
+ #[inline]
+ fn to_ne_bytes(&self) -> Self::Bytes {
+ <$T>::to_ne_bytes(*self)
+ }
+ }
+
+ #[cfg(has_int_to_from_bytes)]
+ impl FromBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn from_be_bytes(bytes: &Self::Bytes) -> Self {
+ <$T>::from_be_bytes(*bytes)
+ }
+
+ #[inline]
+ fn from_le_bytes(bytes: &Self::Bytes) -> Self {
+ <$T>::from_le_bytes(*bytes)
+ }
+
+ #[inline]
+ fn from_ne_bytes(bytes: &Self::Bytes) -> Self {
+ <$T>::from_ne_bytes(*bytes)
+ }
+ }
+
+ #[cfg(not(has_int_to_from_bytes))]
+ impl ToBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn to_be_bytes(&self) -> Self::Bytes {
+ <$T as ToBytes>::to_ne_bytes(&<$T>::to_be(*self))
+ }
+
+ #[inline]
+ fn to_le_bytes(&self) -> Self::Bytes {
+ <$T as ToBytes>::to_ne_bytes(&<$T>::to_le(*self))
+ }
+
+ #[inline]
+ fn to_ne_bytes(&self) -> Self::Bytes {
+ unsafe { transmute(*self) }
+ }
+ }
+
+ #[cfg(not(has_int_to_from_bytes))]
+ impl FromBytes for $T {
+ type Bytes = [u8; $L];
+
+ #[inline]
+ fn from_be_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_be(<Self as FromBytes>::from_ne_bytes(bytes))
+ }
+
+ #[inline]
+ fn from_le_bytes(bytes: &Self::Bytes) -> Self {
+ Self::from_le(<Self as FromBytes>::from_ne_bytes(bytes))
+ }
+
+ #[inline]
+ fn from_ne_bytes(bytes: &Self::Bytes) -> Self {
+ unsafe { transmute(*bytes) }
+ }
+ }
+ };
+}
+
+int_to_from_bytes_impl!(u8, 1);
+int_to_from_bytes_impl!(u16, 2);
+int_to_from_bytes_impl!(u32, 4);
+int_to_from_bytes_impl!(u64, 8);
+int_to_from_bytes_impl!(u128, 16);
+#[cfg(target_pointer_width = "64")]
+int_to_from_bytes_impl!(usize, 8);
+#[cfg(target_pointer_width = "32")]
+int_to_from_bytes_impl!(usize, 4);
+
+int_to_from_bytes_impl!(i8, 1);
+int_to_from_bytes_impl!(i16, 2);
+int_to_from_bytes_impl!(i32, 4);
+int_to_from_bytes_impl!(i64, 8);
+int_to_from_bytes_impl!(i128, 16);
+#[cfg(target_pointer_width = "64")]
+int_to_from_bytes_impl!(isize, 8);
+#[cfg(target_pointer_width = "32")]
+int_to_from_bytes_impl!(isize, 4);
+
+float_to_from_bytes_impl!(f32, 4);
+float_to_from_bytes_impl!(f64, 8);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ macro_rules! check_to_from_bytes {
+ ($( $ty:ty )+) => {$({
+ let n = 1;
+ let be = <$ty as ToBytes>::to_be_bytes(&n);
+ let le = <$ty as ToBytes>::to_le_bytes(&n);
+ let ne = <$ty as ToBytes>::to_ne_bytes(&n);
+
+ assert_eq!(*be.last().unwrap(), 1);
+ assert_eq!(*le.first().unwrap(), 1);
+ if cfg!(target_endian = "big") {
+ assert_eq!(*ne.last().unwrap(), 1);
+ } else {
+ assert_eq!(*ne.first().unwrap(), 1);
+ }
+
+ assert_eq!(<$ty as FromBytes>::from_be_bytes(&be), n);
+ assert_eq!(<$ty as FromBytes>::from_le_bytes(&le), n);
+ if cfg!(target_endian = "big") {
+ assert_eq!(<$ty as FromBytes>::from_ne_bytes(&be), n);
+ } else {
+ assert_eq!(<$ty as FromBytes>::from_ne_bytes(&le), n);
+ }
+ })+}
+ }
+
+ #[test]
+ fn convert_between_int_and_bytes() {
+ check_to_from_bytes!(u8 u16 u32 u64 u128 usize);
+ check_to_from_bytes!(i8 i16 i32 i64 i128 isize);
+ }
+
+ #[test]
+ fn convert_between_float_and_bytes() {
+ macro_rules! check_to_from_bytes {
+ ($( $ty:ty )+) => {$(
+ let n: $ty = 3.14;
+
+ let be = <$ty as ToBytes>::to_be_bytes(&n);
+ let le = <$ty as ToBytes>::to_le_bytes(&n);
+ let ne = <$ty as ToBytes>::to_ne_bytes(&n);
+
+ assert_eq!(<$ty as FromBytes>::from_be_bytes(&be), n);
+ assert_eq!(<$ty as FromBytes>::from_le_bytes(&le), n);
+ if cfg!(target_endian = "big") {
+ assert_eq!(ne, be);
+ assert_eq!(<$ty as FromBytes>::from_ne_bytes(&be), n);
+ } else {
+ assert_eq!(ne, le);
+ assert_eq!(<$ty as FromBytes>::from_ne_bytes(&le), n);
+ }
+ )+}
+ }
+
+ check_to_from_bytes!(f32 f64);
+ }
+}
diff --git a/vendor/num-traits/src/ops/checked.rs b/vendor/num-traits/src/ops/checked.rs
new file mode 100644
index 0000000..da1eb3e
--- /dev/null
+++ b/vendor/num-traits/src/ops/checked.rs
@@ -0,0 +1,261 @@
+use core::ops::{Add, Div, Mul, Rem, Shl, Shr, Sub};
+
+/// Performs addition that returns `None` instead of wrapping around on
+/// overflow.
+pub trait CheckedAdd: Sized + Add<Self, Output = Self> {
+ /// Adds two numbers, checking for overflow. If overflow happens, `None` is
+ /// returned.
+ fn checked_add(&self, v: &Self) -> Option<Self>;
+}
+
+macro_rules! checked_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, v: &$t) -> Option<$t> {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+}
+
+checked_impl!(CheckedAdd, checked_add, u8);
+checked_impl!(CheckedAdd, checked_add, u16);
+checked_impl!(CheckedAdd, checked_add, u32);
+checked_impl!(CheckedAdd, checked_add, u64);
+checked_impl!(CheckedAdd, checked_add, usize);
+checked_impl!(CheckedAdd, checked_add, u128);
+
+checked_impl!(CheckedAdd, checked_add, i8);
+checked_impl!(CheckedAdd, checked_add, i16);
+checked_impl!(CheckedAdd, checked_add, i32);
+checked_impl!(CheckedAdd, checked_add, i64);
+checked_impl!(CheckedAdd, checked_add, isize);
+checked_impl!(CheckedAdd, checked_add, i128);
+
+/// Performs subtraction that returns `None` instead of wrapping around on underflow.
+pub trait CheckedSub: Sized + Sub<Self, Output = Self> {
+ /// Subtracts two numbers, checking for underflow. If underflow happens,
+ /// `None` is returned.
+ fn checked_sub(&self, v: &Self) -> Option<Self>;
+}
+
+checked_impl!(CheckedSub, checked_sub, u8);
+checked_impl!(CheckedSub, checked_sub, u16);
+checked_impl!(CheckedSub, checked_sub, u32);
+checked_impl!(CheckedSub, checked_sub, u64);
+checked_impl!(CheckedSub, checked_sub, usize);
+checked_impl!(CheckedSub, checked_sub, u128);
+
+checked_impl!(CheckedSub, checked_sub, i8);
+checked_impl!(CheckedSub, checked_sub, i16);
+checked_impl!(CheckedSub, checked_sub, i32);
+checked_impl!(CheckedSub, checked_sub, i64);
+checked_impl!(CheckedSub, checked_sub, isize);
+checked_impl!(CheckedSub, checked_sub, i128);
+
+/// Performs multiplication that returns `None` instead of wrapping around on underflow or
+/// overflow.
+pub trait CheckedMul: Sized + Mul<Self, Output = Self> {
+ /// Multiplies two numbers, checking for underflow or overflow. If underflow
+ /// or overflow happens, `None` is returned.
+ fn checked_mul(&self, v: &Self) -> Option<Self>;
+}
+
+checked_impl!(CheckedMul, checked_mul, u8);
+checked_impl!(CheckedMul, checked_mul, u16);
+checked_impl!(CheckedMul, checked_mul, u32);
+checked_impl!(CheckedMul, checked_mul, u64);
+checked_impl!(CheckedMul, checked_mul, usize);
+checked_impl!(CheckedMul, checked_mul, u128);
+
+checked_impl!(CheckedMul, checked_mul, i8);
+checked_impl!(CheckedMul, checked_mul, i16);
+checked_impl!(CheckedMul, checked_mul, i32);
+checked_impl!(CheckedMul, checked_mul, i64);
+checked_impl!(CheckedMul, checked_mul, isize);
+checked_impl!(CheckedMul, checked_mul, i128);
+
+/// Performs division that returns `None` instead of panicking on division by zero and instead of
+/// wrapping around on underflow and overflow.
+pub trait CheckedDiv: Sized + Div<Self, Output = Self> {
+ /// Divides two numbers, checking for underflow, overflow and division by
+ /// zero. If any of that happens, `None` is returned.
+ fn checked_div(&self, v: &Self) -> Option<Self>;
+}
+
+checked_impl!(CheckedDiv, checked_div, u8);
+checked_impl!(CheckedDiv, checked_div, u16);
+checked_impl!(CheckedDiv, checked_div, u32);
+checked_impl!(CheckedDiv, checked_div, u64);
+checked_impl!(CheckedDiv, checked_div, usize);
+checked_impl!(CheckedDiv, checked_div, u128);
+
+checked_impl!(CheckedDiv, checked_div, i8);
+checked_impl!(CheckedDiv, checked_div, i16);
+checked_impl!(CheckedDiv, checked_div, i32);
+checked_impl!(CheckedDiv, checked_div, i64);
+checked_impl!(CheckedDiv, checked_div, isize);
+checked_impl!(CheckedDiv, checked_div, i128);
+
+/// Performs an integral remainder that returns `None` instead of panicking on division by zero and
+/// instead of wrapping around on underflow and overflow.
+pub trait CheckedRem: Sized + Rem<Self, Output = Self> {
+ /// Finds the remainder of dividing two numbers, checking for underflow, overflow and division
+ /// by zero. If any of that happens, `None` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::CheckedRem;
+ /// use std::i32::MIN;
+ ///
+ /// assert_eq!(CheckedRem::checked_rem(&10, &7), Some(3));
+ /// assert_eq!(CheckedRem::checked_rem(&10, &-7), Some(3));
+ /// assert_eq!(CheckedRem::checked_rem(&-10, &7), Some(-3));
+ /// assert_eq!(CheckedRem::checked_rem(&-10, &-7), Some(-3));
+ ///
+ /// assert_eq!(CheckedRem::checked_rem(&10, &0), None);
+ ///
+ /// assert_eq!(CheckedRem::checked_rem(&MIN, &1), Some(0));
+ /// assert_eq!(CheckedRem::checked_rem(&MIN, &-1), None);
+ /// ```
+ fn checked_rem(&self, v: &Self) -> Option<Self>;
+}
+
+checked_impl!(CheckedRem, checked_rem, u8);
+checked_impl!(CheckedRem, checked_rem, u16);
+checked_impl!(CheckedRem, checked_rem, u32);
+checked_impl!(CheckedRem, checked_rem, u64);
+checked_impl!(CheckedRem, checked_rem, usize);
+checked_impl!(CheckedRem, checked_rem, u128);
+
+checked_impl!(CheckedRem, checked_rem, i8);
+checked_impl!(CheckedRem, checked_rem, i16);
+checked_impl!(CheckedRem, checked_rem, i32);
+checked_impl!(CheckedRem, checked_rem, i64);
+checked_impl!(CheckedRem, checked_rem, isize);
+checked_impl!(CheckedRem, checked_rem, i128);
+
+macro_rules! checked_impl_unary {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self) -> Option<$t> {
+ <$t>::$method(*self)
+ }
+ }
+ };
+}
+
+/// Performs negation that returns `None` if the result can't be represented.
+pub trait CheckedNeg: Sized {
+ /// Negates a number, returning `None` for results that can't be represented, like signed `MIN`
+ /// values that can't be positive, or non-zero unsigned values that can't be negative.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::CheckedNeg;
+ /// use std::i32::MIN;
+ ///
+ /// assert_eq!(CheckedNeg::checked_neg(&1_i32), Some(-1));
+ /// assert_eq!(CheckedNeg::checked_neg(&-1_i32), Some(1));
+ /// assert_eq!(CheckedNeg::checked_neg(&MIN), None);
+ ///
+ /// assert_eq!(CheckedNeg::checked_neg(&0_u32), Some(0));
+ /// assert_eq!(CheckedNeg::checked_neg(&1_u32), None);
+ /// ```
+ fn checked_neg(&self) -> Option<Self>;
+}
+
+checked_impl_unary!(CheckedNeg, checked_neg, u8);
+checked_impl_unary!(CheckedNeg, checked_neg, u16);
+checked_impl_unary!(CheckedNeg, checked_neg, u32);
+checked_impl_unary!(CheckedNeg, checked_neg, u64);
+checked_impl_unary!(CheckedNeg, checked_neg, usize);
+checked_impl_unary!(CheckedNeg, checked_neg, u128);
+
+checked_impl_unary!(CheckedNeg, checked_neg, i8);
+checked_impl_unary!(CheckedNeg, checked_neg, i16);
+checked_impl_unary!(CheckedNeg, checked_neg, i32);
+checked_impl_unary!(CheckedNeg, checked_neg, i64);
+checked_impl_unary!(CheckedNeg, checked_neg, isize);
+checked_impl_unary!(CheckedNeg, checked_neg, i128);
+
+/// Performs a left shift that returns `None` on shifts larger than
+/// or equal to the type width.
+pub trait CheckedShl: Sized + Shl<u32, Output = Self> {
+ /// Checked shift left. Computes `self << rhs`, returning `None`
+ /// if `rhs` is larger than or equal to the number of bits in `self`.
+ ///
+ /// ```
+ /// use num_traits::CheckedShl;
+ ///
+ /// let x: u16 = 0x0001;
+ ///
+ /// assert_eq!(CheckedShl::checked_shl(&x, 0), Some(0x0001));
+ /// assert_eq!(CheckedShl::checked_shl(&x, 1), Some(0x0002));
+ /// assert_eq!(CheckedShl::checked_shl(&x, 15), Some(0x8000));
+ /// assert_eq!(CheckedShl::checked_shl(&x, 16), None);
+ /// ```
+ fn checked_shl(&self, rhs: u32) -> Option<Self>;
+}
+
+macro_rules! checked_shift_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, rhs: u32) -> Option<$t> {
+ <$t>::$method(*self, rhs)
+ }
+ }
+ };
+}
+
+checked_shift_impl!(CheckedShl, checked_shl, u8);
+checked_shift_impl!(CheckedShl, checked_shl, u16);
+checked_shift_impl!(CheckedShl, checked_shl, u32);
+checked_shift_impl!(CheckedShl, checked_shl, u64);
+checked_shift_impl!(CheckedShl, checked_shl, usize);
+checked_shift_impl!(CheckedShl, checked_shl, u128);
+
+checked_shift_impl!(CheckedShl, checked_shl, i8);
+checked_shift_impl!(CheckedShl, checked_shl, i16);
+checked_shift_impl!(CheckedShl, checked_shl, i32);
+checked_shift_impl!(CheckedShl, checked_shl, i64);
+checked_shift_impl!(CheckedShl, checked_shl, isize);
+checked_shift_impl!(CheckedShl, checked_shl, i128);
+
+/// Performs a right shift that returns `None` on shifts larger than
+/// or equal to the type width.
+pub trait CheckedShr: Sized + Shr<u32, Output = Self> {
+ /// Checked shift right. Computes `self >> rhs`, returning `None`
+ /// if `rhs` is larger than or equal to the number of bits in `self`.
+ ///
+ /// ```
+ /// use num_traits::CheckedShr;
+ ///
+ /// let x: u16 = 0x8000;
+ ///
+ /// assert_eq!(CheckedShr::checked_shr(&x, 0), Some(0x8000));
+ /// assert_eq!(CheckedShr::checked_shr(&x, 1), Some(0x4000));
+ /// assert_eq!(CheckedShr::checked_shr(&x, 15), Some(0x0001));
+ /// assert_eq!(CheckedShr::checked_shr(&x, 16), None);
+ /// ```
+ fn checked_shr(&self, rhs: u32) -> Option<Self>;
+}
+
+checked_shift_impl!(CheckedShr, checked_shr, u8);
+checked_shift_impl!(CheckedShr, checked_shr, u16);
+checked_shift_impl!(CheckedShr, checked_shr, u32);
+checked_shift_impl!(CheckedShr, checked_shr, u64);
+checked_shift_impl!(CheckedShr, checked_shr, usize);
+checked_shift_impl!(CheckedShr, checked_shr, u128);
+
+checked_shift_impl!(CheckedShr, checked_shr, i8);
+checked_shift_impl!(CheckedShr, checked_shr, i16);
+checked_shift_impl!(CheckedShr, checked_shr, i32);
+checked_shift_impl!(CheckedShr, checked_shr, i64);
+checked_shift_impl!(CheckedShr, checked_shr, isize);
+checked_shift_impl!(CheckedShr, checked_shr, i128);
diff --git a/vendor/num-traits/src/ops/euclid.rs b/vendor/num-traits/src/ops/euclid.rs
new file mode 100644
index 0000000..4547fee
--- /dev/null
+++ b/vendor/num-traits/src/ops/euclid.rs
@@ -0,0 +1,339 @@
+use core::ops::{Div, Rem};
+
+pub trait Euclid: Sized + Div<Self, Output = Self> + Rem<Self, Output = Self> {
+ /// Calculates Euclidean division, the matching method for `rem_euclid`.
+ ///
+ /// This computes the integer `n` such that
+ /// `self = n * v + self.rem_euclid(v)`.
+ /// In other words, the result is `self / v` rounded to the integer `n`
+ /// such that `self >= n * v`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::Euclid;
+ ///
+ /// let a: i32 = 7;
+ /// let b: i32 = 4;
+ /// assert_eq!(Euclid::div_euclid(&a, &b), 1); // 7 > 4 * 1
+ /// assert_eq!(Euclid::div_euclid(&-a, &b), -2); // -7 >= 4 * -2
+ /// assert_eq!(Euclid::div_euclid(&a, &-b), -1); // 7 >= -4 * -1
+ /// assert_eq!(Euclid::div_euclid(&-a, &-b), 2); // -7 >= -4 * 2
+ /// ```
+ fn div_euclid(&self, v: &Self) -> Self;
+
+ /// Calculates the least nonnegative remainder of `self (mod v)`.
+ ///
+ /// In particular, the return value `r` satisfies `0.0 <= r < v.abs()` in
+ /// most cases. However, due to a floating point round-off error it can
+ /// result in `r == v.abs()`, violating the mathematical definition, if
+ /// `self` is much smaller than `v.abs()` in magnitude and `self < 0.0`.
+ /// This result is not an element of the function's codomain, but it is the
+ /// closest floating point number in the real numbers and thus fulfills the
+ /// property `self == self.div_euclid(v) * v + self.rem_euclid(v)`
+ /// approximatively.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::Euclid;
+ ///
+ /// let a: i32 = 7;
+ /// let b: i32 = 4;
+ /// assert_eq!(Euclid::rem_euclid(&a, &b), 3);
+ /// assert_eq!(Euclid::rem_euclid(&-a, &b), 1);
+ /// assert_eq!(Euclid::rem_euclid(&a, &-b), 3);
+ /// assert_eq!(Euclid::rem_euclid(&-a, &-b), 1);
+ /// ```
+ fn rem_euclid(&self, v: &Self) -> Self;
+}
+
+macro_rules! euclid_forward_impl {
+ ($($t:ty)*) => {$(
+ #[cfg(has_div_euclid)]
+ impl Euclid for $t {
+ #[inline]
+ fn div_euclid(&self, v: &$t) -> Self {
+ <$t>::div_euclid(*self, *v)
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &$t) -> Self {
+ <$t>::rem_euclid(*self, *v)
+ }
+ }
+ )*}
+}
+
+macro_rules! euclid_int_impl {
+ ($($t:ty)*) => {$(
+ euclid_forward_impl!($t);
+
+ #[cfg(not(has_div_euclid))]
+ impl Euclid for $t {
+ #[inline]
+ fn div_euclid(&self, v: &$t) -> Self {
+ let q = self / v;
+ if self % v < 0 {
+ return if *v > 0 { q - 1 } else { q + 1 }
+ }
+ q
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &$t) -> Self {
+ let r = self % v;
+ if r < 0 {
+ if *v < 0 {
+ r - v
+ } else {
+ r + v
+ }
+ } else {
+ r
+ }
+ }
+ }
+ )*}
+}
+
+macro_rules! euclid_uint_impl {
+ ($($t:ty)*) => {$(
+ euclid_forward_impl!($t);
+
+ #[cfg(not(has_div_euclid))]
+ impl Euclid for $t {
+ #[inline]
+ fn div_euclid(&self, v: &$t) -> Self {
+ self / v
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &$t) -> Self {
+ self % v
+ }
+ }
+ )*}
+}
+
+euclid_int_impl!(isize i8 i16 i32 i64 i128);
+euclid_uint_impl!(usize u8 u16 u32 u64 u128);
+
+#[cfg(all(has_div_euclid, feature = "std"))]
+euclid_forward_impl!(f32 f64);
+
+#[cfg(not(all(has_div_euclid, feature = "std")))]
+impl Euclid for f32 {
+ #[inline]
+ fn div_euclid(&self, v: &f32) -> f32 {
+ let q = <f32 as crate::float::FloatCore>::trunc(self / v);
+ if self % v < 0.0 {
+ return if *v > 0.0 { q - 1.0 } else { q + 1.0 };
+ }
+ q
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &f32) -> f32 {
+ let r = self % v;
+ if r < 0.0 {
+ r + <f32 as crate::float::FloatCore>::abs(*v)
+ } else {
+ r
+ }
+ }
+}
+
+#[cfg(not(all(has_div_euclid, feature = "std")))]
+impl Euclid for f64 {
+ #[inline]
+ fn div_euclid(&self, v: &f64) -> f64 {
+ let q = <f64 as crate::float::FloatCore>::trunc(self / v);
+ if self % v < 0.0 {
+ return if *v > 0.0 { q - 1.0 } else { q + 1.0 };
+ }
+ q
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &f64) -> f64 {
+ let r = self % v;
+ if r < 0.0 {
+ r + <f64 as crate::float::FloatCore>::abs(*v)
+ } else {
+ r
+ }
+ }
+}
+
+pub trait CheckedEuclid: Euclid {
+ /// Performs euclid division that returns `None` instead of panicking on division by zero
+ /// and instead of wrapping around on underflow and overflow.
+ fn checked_div_euclid(&self, v: &Self) -> Option<Self>;
+
+ /// Finds the euclid remainder of dividing two numbers, checking for underflow, overflow and
+ /// division by zero. If any of that happens, `None` is returned.
+ fn checked_rem_euclid(&self, v: &Self) -> Option<Self>;
+}
+
+macro_rules! checked_euclid_forward_impl {
+ ($($t:ty)*) => {$(
+ #[cfg(has_div_euclid)]
+ impl CheckedEuclid for $t {
+ #[inline]
+ fn checked_div_euclid(&self, v: &$t) -> Option<Self> {
+ <$t>::checked_div_euclid(*self, *v)
+ }
+
+ #[inline]
+ fn checked_rem_euclid(&self, v: &$t) -> Option<Self> {
+ <$t>::checked_rem_euclid(*self, *v)
+ }
+ }
+ )*}
+}
+
+macro_rules! checked_euclid_int_impl {
+ ($($t:ty)*) => {$(
+ checked_euclid_forward_impl!($t);
+
+ #[cfg(not(has_div_euclid))]
+ impl CheckedEuclid for $t {
+ #[inline]
+ fn checked_div_euclid(&self, v: &$t) -> Option<$t> {
+ if *v == 0 || (*self == Self::min_value() && *v == -1) {
+ None
+ } else {
+ Some(Euclid::div_euclid(self, v))
+ }
+ }
+
+ #[inline]
+ fn checked_rem_euclid(&self, v: &$t) -> Option<$t> {
+ if *v == 0 || (*self == Self::min_value() && *v == -1) {
+ None
+ } else {
+ Some(Euclid::rem_euclid(self, v))
+ }
+ }
+ }
+ )*}
+}
+
+macro_rules! checked_euclid_uint_impl {
+ ($($t:ty)*) => {$(
+ checked_euclid_forward_impl!($t);
+
+ #[cfg(not(has_div_euclid))]
+ impl CheckedEuclid for $t {
+ #[inline]
+ fn checked_div_euclid(&self, v: &$t) -> Option<$t> {
+ if *v == 0 {
+ None
+ } else {
+ Some(Euclid::div_euclid(self, v))
+ }
+ }
+
+ #[inline]
+ fn checked_rem_euclid(&self, v: &$t) -> Option<$t> {
+ if *v == 0 {
+ None
+ } else {
+ Some(Euclid::rem_euclid(self, v))
+ }
+ }
+ }
+ )*}
+}
+
+checked_euclid_int_impl!(isize i8 i16 i32 i64 i128);
+checked_euclid_uint_impl!(usize u8 u16 u32 u64 u128);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn euclid_unsigned() {
+ macro_rules! test_euclid {
+ ($($t:ident)+) => {
+ $(
+ {
+ let x: $t = 10;
+ let y: $t = 3;
+ assert_eq!(Euclid::div_euclid(&x, &y), 3);
+ assert_eq!(Euclid::rem_euclid(&x, &y), 1);
+ }
+ )+
+ };
+ }
+
+ test_euclid!(usize u8 u16 u32 u64);
+ }
+
+ #[test]
+ fn euclid_signed() {
+ macro_rules! test_euclid {
+ ($($t:ident)+) => {
+ $(
+ {
+ let x: $t = 10;
+ let y: $t = -3;
+ assert_eq!(Euclid::div_euclid(&x, &y), -3);
+ assert_eq!(Euclid::div_euclid(&-x, &y), 4);
+ assert_eq!(Euclid::rem_euclid(&x, &y), 1);
+ assert_eq!(Euclid::rem_euclid(&-x, &y), 2);
+ let x: $t = $t::min_value() + 1;
+ let y: $t = -1;
+ assert_eq!(Euclid::div_euclid(&x, &y), $t::max_value());
+ }
+ )+
+ };
+ }
+
+ test_euclid!(isize i8 i16 i32 i64 i128);
+ }
+
+ #[test]
+ fn euclid_float() {
+ macro_rules! test_euclid {
+ ($($t:ident)+) => {
+ $(
+ {
+ let x: $t = 12.1;
+ let y: $t = 3.2;
+ assert!(Euclid::div_euclid(&x, &y) * y + Euclid::rem_euclid(&x, &y) - x
+ <= 46.4 * <$t as crate::float::FloatCore>::epsilon());
+ assert!(Euclid::div_euclid(&x, &-y) * -y + Euclid::rem_euclid(&x, &-y) - x
+ <= 46.4 * <$t as crate::float::FloatCore>::epsilon());
+ assert!(Euclid::div_euclid(&-x, &y) * y + Euclid::rem_euclid(&-x, &y) + x
+ <= 46.4 * <$t as crate::float::FloatCore>::epsilon());
+ assert!(Euclid::div_euclid(&-x, &-y) * -y + Euclid::rem_euclid(&-x, &-y) + x
+ <= 46.4 * <$t as crate::float::FloatCore>::epsilon());
+ }
+ )+
+ };
+ }
+
+ test_euclid!(f32 f64);
+ }
+
+ #[test]
+ fn euclid_checked() {
+ macro_rules! test_euclid_checked {
+ ($($t:ident)+) => {
+ $(
+ {
+ assert_eq!(CheckedEuclid::checked_div_euclid(&$t::min_value(), &-1), None);
+ assert_eq!(CheckedEuclid::checked_rem_euclid(&$t::min_value(), &-1), None);
+ assert_eq!(CheckedEuclid::checked_div_euclid(&1, &0), None);
+ assert_eq!(CheckedEuclid::checked_rem_euclid(&1, &0), None);
+ }
+ )+
+ };
+ }
+
+ test_euclid_checked!(isize i8 i16 i32 i64 i128);
+ }
+}
diff --git a/vendor/num-traits/src/ops/inv.rs b/vendor/num-traits/src/ops/inv.rs
new file mode 100644
index 0000000..7087d09
--- /dev/null
+++ b/vendor/num-traits/src/ops/inv.rs
@@ -0,0 +1,47 @@
+/// Unary operator for retrieving the multiplicative inverse, or reciprocal, of a value.
+pub trait Inv {
+ /// The result after applying the operator.
+ type Output;
+
+ /// Returns the multiplicative inverse of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::f64::INFINITY;
+ /// use num_traits::Inv;
+ ///
+ /// assert_eq!(7.0.inv() * 7.0, 1.0);
+ /// assert_eq!((-0.0).inv(), -INFINITY);
+ /// ```
+ fn inv(self) -> Self::Output;
+}
+
+impl Inv for f32 {
+ type Output = f32;
+ #[inline]
+ fn inv(self) -> f32 {
+ 1.0 / self
+ }
+}
+impl Inv for f64 {
+ type Output = f64;
+ #[inline]
+ fn inv(self) -> f64 {
+ 1.0 / self
+ }
+}
+impl<'a> Inv for &'a f32 {
+ type Output = f32;
+ #[inline]
+ fn inv(self) -> f32 {
+ 1.0 / *self
+ }
+}
+impl<'a> Inv for &'a f64 {
+ type Output = f64;
+ #[inline]
+ fn inv(self) -> f64 {
+ 1.0 / *self
+ }
+}
diff --git a/vendor/num-traits/src/ops/mod.rs b/vendor/num-traits/src/ops/mod.rs
new file mode 100644
index 0000000..2128d86
--- /dev/null
+++ b/vendor/num-traits/src/ops/mod.rs
@@ -0,0 +1,8 @@
+pub mod bytes;
+pub mod checked;
+pub mod euclid;
+pub mod inv;
+pub mod mul_add;
+pub mod overflowing;
+pub mod saturating;
+pub mod wrapping;
diff --git a/vendor/num-traits/src/ops/mul_add.rs b/vendor/num-traits/src/ops/mul_add.rs
new file mode 100644
index 0000000..51beb55
--- /dev/null
+++ b/vendor/num-traits/src/ops/mul_add.rs
@@ -0,0 +1,149 @@
+/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
+/// error, yielding a more accurate result than an unfused multiply-add.
+///
+/// Using `mul_add` can be more performant than an unfused multiply-add if
+/// the target architecture has a dedicated `fma` CPU instruction.
+///
+/// Note that `A` and `B` are `Self` by default, but this is not mandatory.
+///
+/// # Example
+///
+/// ```
+/// use std::f32;
+///
+/// let m = 10.0_f32;
+/// let x = 4.0_f32;
+/// let b = 60.0_f32;
+///
+/// // 100.0
+/// let abs_difference = (m.mul_add(x, b) - (m*x + b)).abs();
+///
+/// assert!(abs_difference <= 100.0 * f32::EPSILON);
+/// ```
+pub trait MulAdd<A = Self, B = Self> {
+ /// The resulting type after applying the fused multiply-add.
+ type Output;
+
+ /// Performs the fused multiply-add operation `(self * a) + b`
+ fn mul_add(self, a: A, b: B) -> Self::Output;
+}
+
+/// The fused multiply-add assignment operation `*self = (*self * a) + b`
+pub trait MulAddAssign<A = Self, B = Self> {
+ /// Performs the fused multiply-add assignment operation `*self = (*self * a) + b`
+ fn mul_add_assign(&mut self, a: A, b: B);
+}
+
+#[cfg(any(feature = "std", feature = "libm"))]
+impl MulAdd<f32, f32> for f32 {
+ type Output = Self;
+
+ #[inline]
+ fn mul_add(self, a: Self, b: Self) -> Self::Output {
+ <Self as crate::Float>::mul_add(self, a, b)
+ }
+}
+
+#[cfg(any(feature = "std", feature = "libm"))]
+impl MulAdd<f64, f64> for f64 {
+ type Output = Self;
+
+ #[inline]
+ fn mul_add(self, a: Self, b: Self) -> Self::Output {
+ <Self as crate::Float>::mul_add(self, a, b)
+ }
+}
+
+macro_rules! mul_add_impl {
+ ($trait_name:ident for $($t:ty)*) => {$(
+ impl $trait_name for $t {
+ type Output = Self;
+
+ #[inline]
+ fn mul_add(self, a: Self, b: Self) -> Self::Output {
+ (self * a) + b
+ }
+ }
+ )*}
+}
+
+mul_add_impl!(MulAdd for isize i8 i16 i32 i64 i128);
+mul_add_impl!(MulAdd for usize u8 u16 u32 u64 u128);
+
+#[cfg(any(feature = "std", feature = "libm"))]
+impl MulAddAssign<f32, f32> for f32 {
+ #[inline]
+ fn mul_add_assign(&mut self, a: Self, b: Self) {
+ *self = <Self as crate::Float>::mul_add(*self, a, b)
+ }
+}
+
+#[cfg(any(feature = "std", feature = "libm"))]
+impl MulAddAssign<f64, f64> for f64 {
+ #[inline]
+ fn mul_add_assign(&mut self, a: Self, b: Self) {
+ *self = <Self as crate::Float>::mul_add(*self, a, b)
+ }
+}
+
+macro_rules! mul_add_assign_impl {
+ ($trait_name:ident for $($t:ty)*) => {$(
+ impl $trait_name for $t {
+ #[inline]
+ fn mul_add_assign(&mut self, a: Self, b: Self) {
+ *self = (*self * a) + b
+ }
+ }
+ )*}
+}
+
+mul_add_assign_impl!(MulAddAssign for isize i8 i16 i32 i64 i128);
+mul_add_assign_impl!(MulAddAssign for usize u8 u16 u32 u64 u128);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn mul_add_integer() {
+ macro_rules! test_mul_add {
+ ($($t:ident)+) => {
+ $(
+ {
+ let m: $t = 2;
+ let x: $t = 3;
+ let b: $t = 4;
+
+ assert_eq!(MulAdd::mul_add(m, x, b), (m*x + b));
+ }
+ )+
+ };
+ }
+
+ test_mul_add!(usize u8 u16 u32 u64 isize i8 i16 i32 i64);
+ }
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn mul_add_float() {
+ macro_rules! test_mul_add {
+ ($($t:ident)+) => {
+ $(
+ {
+ use core::$t;
+
+ let m: $t = 12.0;
+ let x: $t = 3.4;
+ let b: $t = 5.6;
+
+ let abs_difference = (MulAdd::mul_add(m, x, b) - (m*x + b)).abs();
+
+ assert!(abs_difference <= 46.4 * $t::EPSILON);
+ }
+ )+
+ };
+ }
+
+ test_mul_add!(f32 f64);
+ }
+}
diff --git a/vendor/num-traits/src/ops/overflowing.rs b/vendor/num-traits/src/ops/overflowing.rs
new file mode 100644
index 0000000..c7a35a5
--- /dev/null
+++ b/vendor/num-traits/src/ops/overflowing.rs
@@ -0,0 +1,96 @@
+use core::ops::{Add, Mul, Sub};
+use core::{i128, i16, i32, i64, i8, isize};
+use core::{u128, u16, u32, u64, u8, usize};
+
+macro_rules! overflowing_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, v: &Self) -> (Self, bool) {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+}
+
+/// Performs addition with a flag for overflow.
+pub trait OverflowingAdd: Sized + Add<Self, Output = Self> {
+ /// Returns a tuple of the sum along with a boolean indicating whether an arithmetic overflow would occur.
+ /// If an overflow would have occurred then the wrapped value is returned.
+ fn overflowing_add(&self, v: &Self) -> (Self, bool);
+}
+
+overflowing_impl!(OverflowingAdd, overflowing_add, u8);
+overflowing_impl!(OverflowingAdd, overflowing_add, u16);
+overflowing_impl!(OverflowingAdd, overflowing_add, u32);
+overflowing_impl!(OverflowingAdd, overflowing_add, u64);
+overflowing_impl!(OverflowingAdd, overflowing_add, usize);
+overflowing_impl!(OverflowingAdd, overflowing_add, u128);
+
+overflowing_impl!(OverflowingAdd, overflowing_add, i8);
+overflowing_impl!(OverflowingAdd, overflowing_add, i16);
+overflowing_impl!(OverflowingAdd, overflowing_add, i32);
+overflowing_impl!(OverflowingAdd, overflowing_add, i64);
+overflowing_impl!(OverflowingAdd, overflowing_add, isize);
+overflowing_impl!(OverflowingAdd, overflowing_add, i128);
+
+/// Performs substraction with a flag for overflow.
+pub trait OverflowingSub: Sized + Sub<Self, Output = Self> {
+ /// Returns a tuple of the difference along with a boolean indicating whether an arithmetic overflow would occur.
+ /// If an overflow would have occurred then the wrapped value is returned.
+ fn overflowing_sub(&self, v: &Self) -> (Self, bool);
+}
+
+overflowing_impl!(OverflowingSub, overflowing_sub, u8);
+overflowing_impl!(OverflowingSub, overflowing_sub, u16);
+overflowing_impl!(OverflowingSub, overflowing_sub, u32);
+overflowing_impl!(OverflowingSub, overflowing_sub, u64);
+overflowing_impl!(OverflowingSub, overflowing_sub, usize);
+overflowing_impl!(OverflowingSub, overflowing_sub, u128);
+
+overflowing_impl!(OverflowingSub, overflowing_sub, i8);
+overflowing_impl!(OverflowingSub, overflowing_sub, i16);
+overflowing_impl!(OverflowingSub, overflowing_sub, i32);
+overflowing_impl!(OverflowingSub, overflowing_sub, i64);
+overflowing_impl!(OverflowingSub, overflowing_sub, isize);
+overflowing_impl!(OverflowingSub, overflowing_sub, i128);
+
+/// Performs multiplication with a flag for overflow.
+pub trait OverflowingMul: Sized + Mul<Self, Output = Self> {
+ /// Returns a tuple of the product along with a boolean indicating whether an arithmetic overflow would occur.
+ /// If an overflow would have occurred then the wrapped value is returned.
+ fn overflowing_mul(&self, v: &Self) -> (Self, bool);
+}
+
+overflowing_impl!(OverflowingMul, overflowing_mul, u8);
+overflowing_impl!(OverflowingMul, overflowing_mul, u16);
+overflowing_impl!(OverflowingMul, overflowing_mul, u32);
+overflowing_impl!(OverflowingMul, overflowing_mul, u64);
+overflowing_impl!(OverflowingMul, overflowing_mul, usize);
+overflowing_impl!(OverflowingMul, overflowing_mul, u128);
+
+overflowing_impl!(OverflowingMul, overflowing_mul, i8);
+overflowing_impl!(OverflowingMul, overflowing_mul, i16);
+overflowing_impl!(OverflowingMul, overflowing_mul, i32);
+overflowing_impl!(OverflowingMul, overflowing_mul, i64);
+overflowing_impl!(OverflowingMul, overflowing_mul, isize);
+overflowing_impl!(OverflowingMul, overflowing_mul, i128);
+
+#[test]
+fn test_overflowing_traits() {
+ fn overflowing_add<T: OverflowingAdd>(a: T, b: T) -> (T, bool) {
+ a.overflowing_add(&b)
+ }
+ fn overflowing_sub<T: OverflowingSub>(a: T, b: T) -> (T, bool) {
+ a.overflowing_sub(&b)
+ }
+ fn overflowing_mul<T: OverflowingMul>(a: T, b: T) -> (T, bool) {
+ a.overflowing_mul(&b)
+ }
+ assert_eq!(overflowing_add(5i16, 2), (7, false));
+ assert_eq!(overflowing_add(i16::MAX, 1), (i16::MIN, true));
+ assert_eq!(overflowing_sub(5i16, 2), (3, false));
+ assert_eq!(overflowing_sub(i16::MIN, 1), (i16::MAX, true));
+ assert_eq!(overflowing_mul(5i16, 2), (10, false));
+ assert_eq!(overflowing_mul(1_000_000_000i32, 10), (1410065408, true));
+}
diff --git a/vendor/num-traits/src/ops/saturating.rs b/vendor/num-traits/src/ops/saturating.rs
new file mode 100644
index 0000000..16a0045
--- /dev/null
+++ b/vendor/num-traits/src/ops/saturating.rs
@@ -0,0 +1,130 @@
+use core::ops::{Add, Mul, Sub};
+
+/// Saturating math operations. Deprecated, use `SaturatingAdd`, `SaturatingSub` and
+/// `SaturatingMul` instead.
+pub trait Saturating {
+ /// Saturating addition operator.
+ /// Returns a+b, saturating at the numeric bounds instead of overflowing.
+ fn saturating_add(self, v: Self) -> Self;
+
+ /// Saturating subtraction operator.
+ /// Returns a-b, saturating at the numeric bounds instead of overflowing.
+ fn saturating_sub(self, v: Self) -> Self;
+}
+
+macro_rules! deprecated_saturating_impl {
+ ($trait_name:ident for $($t:ty)*) => {$(
+ impl $trait_name for $t {
+ #[inline]
+ fn saturating_add(self, v: Self) -> Self {
+ Self::saturating_add(self, v)
+ }
+
+ #[inline]
+ fn saturating_sub(self, v: Self) -> Self {
+ Self::saturating_sub(self, v)
+ }
+ }
+ )*}
+}
+
+deprecated_saturating_impl!(Saturating for isize i8 i16 i32 i64 i128);
+deprecated_saturating_impl!(Saturating for usize u8 u16 u32 u64 u128);
+
+macro_rules! saturating_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, v: &Self) -> Self {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+}
+
+/// Performs addition that saturates at the numeric bounds instead of overflowing.
+pub trait SaturatingAdd: Sized + Add<Self, Output = Self> {
+ /// Saturating addition. Computes `self + other`, saturating at the relevant high or low boundary of
+ /// the type.
+ fn saturating_add(&self, v: &Self) -> Self;
+}
+
+saturating_impl!(SaturatingAdd, saturating_add, u8);
+saturating_impl!(SaturatingAdd, saturating_add, u16);
+saturating_impl!(SaturatingAdd, saturating_add, u32);
+saturating_impl!(SaturatingAdd, saturating_add, u64);
+saturating_impl!(SaturatingAdd, saturating_add, usize);
+saturating_impl!(SaturatingAdd, saturating_add, u128);
+
+saturating_impl!(SaturatingAdd, saturating_add, i8);
+saturating_impl!(SaturatingAdd, saturating_add, i16);
+saturating_impl!(SaturatingAdd, saturating_add, i32);
+saturating_impl!(SaturatingAdd, saturating_add, i64);
+saturating_impl!(SaturatingAdd, saturating_add, isize);
+saturating_impl!(SaturatingAdd, saturating_add, i128);
+
+/// Performs subtraction that saturates at the numeric bounds instead of overflowing.
+pub trait SaturatingSub: Sized + Sub<Self, Output = Self> {
+ /// Saturating subtraction. Computes `self - other`, saturating at the relevant high or low boundary of
+ /// the type.
+ fn saturating_sub(&self, v: &Self) -> Self;
+}
+
+saturating_impl!(SaturatingSub, saturating_sub, u8);
+saturating_impl!(SaturatingSub, saturating_sub, u16);
+saturating_impl!(SaturatingSub, saturating_sub, u32);
+saturating_impl!(SaturatingSub, saturating_sub, u64);
+saturating_impl!(SaturatingSub, saturating_sub, usize);
+saturating_impl!(SaturatingSub, saturating_sub, u128);
+
+saturating_impl!(SaturatingSub, saturating_sub, i8);
+saturating_impl!(SaturatingSub, saturating_sub, i16);
+saturating_impl!(SaturatingSub, saturating_sub, i32);
+saturating_impl!(SaturatingSub, saturating_sub, i64);
+saturating_impl!(SaturatingSub, saturating_sub, isize);
+saturating_impl!(SaturatingSub, saturating_sub, i128);
+
+/// Performs multiplication that saturates at the numeric bounds instead of overflowing.
+pub trait SaturatingMul: Sized + Mul<Self, Output = Self> {
+ /// Saturating multiplication. Computes `self * other`, saturating at the relevant high or low boundary of
+ /// the type.
+ fn saturating_mul(&self, v: &Self) -> Self;
+}
+
+saturating_impl!(SaturatingMul, saturating_mul, u8);
+saturating_impl!(SaturatingMul, saturating_mul, u16);
+saturating_impl!(SaturatingMul, saturating_mul, u32);
+saturating_impl!(SaturatingMul, saturating_mul, u64);
+saturating_impl!(SaturatingMul, saturating_mul, usize);
+saturating_impl!(SaturatingMul, saturating_mul, u128);
+
+saturating_impl!(SaturatingMul, saturating_mul, i8);
+saturating_impl!(SaturatingMul, saturating_mul, i16);
+saturating_impl!(SaturatingMul, saturating_mul, i32);
+saturating_impl!(SaturatingMul, saturating_mul, i64);
+saturating_impl!(SaturatingMul, saturating_mul, isize);
+saturating_impl!(SaturatingMul, saturating_mul, i128);
+
+// TODO: add SaturatingNeg for signed integer primitives once the saturating_neg() API is stable.
+
+#[test]
+fn test_saturating_traits() {
+ fn saturating_add<T: SaturatingAdd>(a: T, b: T) -> T {
+ a.saturating_add(&b)
+ }
+ fn saturating_sub<T: SaturatingSub>(a: T, b: T) -> T {
+ a.saturating_sub(&b)
+ }
+ fn saturating_mul<T: SaturatingMul>(a: T, b: T) -> T {
+ a.saturating_mul(&b)
+ }
+ assert_eq!(saturating_add(255, 1), 255u8);
+ assert_eq!(saturating_add(127, 1), 127i8);
+ assert_eq!(saturating_add(-128, -1), -128i8);
+ assert_eq!(saturating_sub(0, 1), 0u8);
+ assert_eq!(saturating_sub(-128, 1), -128i8);
+ assert_eq!(saturating_sub(127, -1), 127i8);
+ assert_eq!(saturating_mul(255, 2), 255u8);
+ assert_eq!(saturating_mul(127, 2), 127i8);
+ assert_eq!(saturating_mul(-128, 2), -128i8);
+}
diff --git a/vendor/num-traits/src/ops/wrapping.rs b/vendor/num-traits/src/ops/wrapping.rs
new file mode 100644
index 0000000..3a8b331
--- /dev/null
+++ b/vendor/num-traits/src/ops/wrapping.rs
@@ -0,0 +1,327 @@
+use core::num::Wrapping;
+use core::ops::{Add, Mul, Neg, Shl, Shr, Sub};
+
+macro_rules! wrapping_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, v: &Self) -> Self {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+ ($trait_name:ident, $method:ident, $t:ty, $rhs:ty) => {
+ impl $trait_name<$rhs> for $t {
+ #[inline]
+ fn $method(&self, v: &$rhs) -> Self {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+}
+
+/// Performs addition that wraps around on overflow.
+pub trait WrappingAdd: Sized + Add<Self, Output = Self> {
+ /// Wrapping (modular) addition. Computes `self + other`, wrapping around at the boundary of
+ /// the type.
+ fn wrapping_add(&self, v: &Self) -> Self;
+}
+
+wrapping_impl!(WrappingAdd, wrapping_add, u8);
+wrapping_impl!(WrappingAdd, wrapping_add, u16);
+wrapping_impl!(WrappingAdd, wrapping_add, u32);
+wrapping_impl!(WrappingAdd, wrapping_add, u64);
+wrapping_impl!(WrappingAdd, wrapping_add, usize);
+wrapping_impl!(WrappingAdd, wrapping_add, u128);
+
+wrapping_impl!(WrappingAdd, wrapping_add, i8);
+wrapping_impl!(WrappingAdd, wrapping_add, i16);
+wrapping_impl!(WrappingAdd, wrapping_add, i32);
+wrapping_impl!(WrappingAdd, wrapping_add, i64);
+wrapping_impl!(WrappingAdd, wrapping_add, isize);
+wrapping_impl!(WrappingAdd, wrapping_add, i128);
+
+/// Performs subtraction that wraps around on overflow.
+pub trait WrappingSub: Sized + Sub<Self, Output = Self> {
+ /// Wrapping (modular) subtraction. Computes `self - other`, wrapping around at the boundary
+ /// of the type.
+ fn wrapping_sub(&self, v: &Self) -> Self;
+}
+
+wrapping_impl!(WrappingSub, wrapping_sub, u8);
+wrapping_impl!(WrappingSub, wrapping_sub, u16);
+wrapping_impl!(WrappingSub, wrapping_sub, u32);
+wrapping_impl!(WrappingSub, wrapping_sub, u64);
+wrapping_impl!(WrappingSub, wrapping_sub, usize);
+wrapping_impl!(WrappingSub, wrapping_sub, u128);
+
+wrapping_impl!(WrappingSub, wrapping_sub, i8);
+wrapping_impl!(WrappingSub, wrapping_sub, i16);
+wrapping_impl!(WrappingSub, wrapping_sub, i32);
+wrapping_impl!(WrappingSub, wrapping_sub, i64);
+wrapping_impl!(WrappingSub, wrapping_sub, isize);
+wrapping_impl!(WrappingSub, wrapping_sub, i128);
+
+/// Performs multiplication that wraps around on overflow.
+pub trait WrappingMul: Sized + Mul<Self, Output = Self> {
+ /// Wrapping (modular) multiplication. Computes `self * other`, wrapping around at the boundary
+ /// of the type.
+ fn wrapping_mul(&self, v: &Self) -> Self;
+}
+
+wrapping_impl!(WrappingMul, wrapping_mul, u8);
+wrapping_impl!(WrappingMul, wrapping_mul, u16);
+wrapping_impl!(WrappingMul, wrapping_mul, u32);
+wrapping_impl!(WrappingMul, wrapping_mul, u64);
+wrapping_impl!(WrappingMul, wrapping_mul, usize);
+wrapping_impl!(WrappingMul, wrapping_mul, u128);
+
+wrapping_impl!(WrappingMul, wrapping_mul, i8);
+wrapping_impl!(WrappingMul, wrapping_mul, i16);
+wrapping_impl!(WrappingMul, wrapping_mul, i32);
+wrapping_impl!(WrappingMul, wrapping_mul, i64);
+wrapping_impl!(WrappingMul, wrapping_mul, isize);
+wrapping_impl!(WrappingMul, wrapping_mul, i128);
+
+macro_rules! wrapping_unary_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self) -> $t {
+ <$t>::$method(*self)
+ }
+ }
+ };
+}
+
+/// Performs a negation that does not panic.
+pub trait WrappingNeg: Sized {
+ /// Wrapping (modular) negation. Computes `-self`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// Since unsigned types do not have negative equivalents
+ /// all applications of this function will wrap (except for `-0`).
+ /// For values smaller than the corresponding signed type's maximum
+ /// the result is the same as casting the corresponding signed value.
+ /// Any larger values are equivalent to `MAX + 1 - (val - MAX - 1)` where
+ /// `MAX` is the corresponding signed type's maximum.
+ ///
+ /// ```
+ /// use num_traits::WrappingNeg;
+ ///
+ /// assert_eq!(100i8.wrapping_neg(), -100);
+ /// assert_eq!((-100i8).wrapping_neg(), 100);
+ /// assert_eq!((-128i8).wrapping_neg(), -128); // wrapped!
+ /// ```
+ fn wrapping_neg(&self) -> Self;
+}
+
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u8);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u16);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u32);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u64);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, usize);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u128);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i8);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i16);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i32);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i64);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, isize);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i128);
+
+macro_rules! wrapping_shift_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, rhs: u32) -> $t {
+ <$t>::$method(*self, rhs)
+ }
+ }
+ };
+}
+
+/// Performs a left shift that does not panic.
+pub trait WrappingShl: Sized + Shl<usize, Output = Self> {
+ /// Panic-free bitwise shift-left; yields `self << mask(rhs)`,
+ /// where `mask` removes any high order bits of `rhs` that would
+ /// cause the shift to exceed the bitwidth of the type.
+ ///
+ /// ```
+ /// use num_traits::WrappingShl;
+ ///
+ /// let x: u16 = 0x0001;
+ ///
+ /// assert_eq!(WrappingShl::wrapping_shl(&x, 0), 0x0001);
+ /// assert_eq!(WrappingShl::wrapping_shl(&x, 1), 0x0002);
+ /// assert_eq!(WrappingShl::wrapping_shl(&x, 15), 0x8000);
+ /// assert_eq!(WrappingShl::wrapping_shl(&x, 16), 0x0001);
+ /// ```
+ fn wrapping_shl(&self, rhs: u32) -> Self;
+}
+
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u8);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u16);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u32);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u64);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, usize);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u128);
+
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i8);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i16);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i32);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i64);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, isize);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i128);
+
+/// Performs a right shift that does not panic.
+pub trait WrappingShr: Sized + Shr<usize, Output = Self> {
+ /// Panic-free bitwise shift-right; yields `self >> mask(rhs)`,
+ /// where `mask` removes any high order bits of `rhs` that would
+ /// cause the shift to exceed the bitwidth of the type.
+ ///
+ /// ```
+ /// use num_traits::WrappingShr;
+ ///
+ /// let x: u16 = 0x8000;
+ ///
+ /// assert_eq!(WrappingShr::wrapping_shr(&x, 0), 0x8000);
+ /// assert_eq!(WrappingShr::wrapping_shr(&x, 1), 0x4000);
+ /// assert_eq!(WrappingShr::wrapping_shr(&x, 15), 0x0001);
+ /// assert_eq!(WrappingShr::wrapping_shr(&x, 16), 0x8000);
+ /// ```
+ fn wrapping_shr(&self, rhs: u32) -> Self;
+}
+
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u8);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u16);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u32);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u64);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, usize);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u128);
+
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i8);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i16);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i32);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i64);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, isize);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i128);
+
+// Well this is a bit funny, but all the more appropriate.
+impl<T: WrappingAdd> WrappingAdd for Wrapping<T>
+where
+ Wrapping<T>: Add<Output = Wrapping<T>>,
+{
+ fn wrapping_add(&self, v: &Self) -> Self {
+ Wrapping(self.0.wrapping_add(&v.0))
+ }
+}
+impl<T: WrappingSub> WrappingSub for Wrapping<T>
+where
+ Wrapping<T>: Sub<Output = Wrapping<T>>,
+{
+ fn wrapping_sub(&self, v: &Self) -> Self {
+ Wrapping(self.0.wrapping_sub(&v.0))
+ }
+}
+impl<T: WrappingMul> WrappingMul for Wrapping<T>
+where
+ Wrapping<T>: Mul<Output = Wrapping<T>>,
+{
+ fn wrapping_mul(&self, v: &Self) -> Self {
+ Wrapping(self.0.wrapping_mul(&v.0))
+ }
+}
+impl<T: WrappingNeg> WrappingNeg for Wrapping<T>
+where
+ Wrapping<T>: Neg<Output = Wrapping<T>>,
+{
+ fn wrapping_neg(&self) -> Self {
+ Wrapping(self.0.wrapping_neg())
+ }
+}
+impl<T: WrappingShl> WrappingShl for Wrapping<T>
+where
+ Wrapping<T>: Shl<usize, Output = Wrapping<T>>,
+{
+ fn wrapping_shl(&self, rhs: u32) -> Self {
+ Wrapping(self.0.wrapping_shl(rhs))
+ }
+}
+impl<T: WrappingShr> WrappingShr for Wrapping<T>
+where
+ Wrapping<T>: Shr<usize, Output = Wrapping<T>>,
+{
+ fn wrapping_shr(&self, rhs: u32) -> Self {
+ Wrapping(self.0.wrapping_shr(rhs))
+ }
+}
+
+#[test]
+fn test_wrapping_traits() {
+ fn wrapping_add<T: WrappingAdd>(a: T, b: T) -> T {
+ a.wrapping_add(&b)
+ }
+ fn wrapping_sub<T: WrappingSub>(a: T, b: T) -> T {
+ a.wrapping_sub(&b)
+ }
+ fn wrapping_mul<T: WrappingMul>(a: T, b: T) -> T {
+ a.wrapping_mul(&b)
+ }
+ fn wrapping_neg<T: WrappingNeg>(a: T) -> T {
+ a.wrapping_neg()
+ }
+ fn wrapping_shl<T: WrappingShl>(a: T, b: u32) -> T {
+ a.wrapping_shl(b)
+ }
+ fn wrapping_shr<T: WrappingShr>(a: T, b: u32) -> T {
+ a.wrapping_shr(b)
+ }
+ assert_eq!(wrapping_add(255, 1), 0u8);
+ assert_eq!(wrapping_sub(0, 1), 255u8);
+ assert_eq!(wrapping_mul(255, 2), 254u8);
+ assert_eq!(wrapping_neg(255), 1u8);
+ assert_eq!(wrapping_shl(255, 8), 255u8);
+ assert_eq!(wrapping_shr(255, 8), 255u8);
+ assert_eq!(wrapping_add(255, 1), (Wrapping(255u8) + Wrapping(1u8)).0);
+ assert_eq!(wrapping_sub(0, 1), (Wrapping(0u8) - Wrapping(1u8)).0);
+ assert_eq!(wrapping_mul(255, 2), (Wrapping(255u8) * Wrapping(2u8)).0);
+ assert_eq!(wrapping_neg(255), (-Wrapping(255u8)).0);
+ assert_eq!(wrapping_shl(255, 8), (Wrapping(255u8) << 8).0);
+ assert_eq!(wrapping_shr(255, 8), (Wrapping(255u8) >> 8).0);
+}
+
+#[test]
+fn wrapping_is_wrappingadd() {
+ fn require_wrappingadd<T: WrappingAdd>(_: &T) {}
+ require_wrappingadd(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingsub() {
+ fn require_wrappingsub<T: WrappingSub>(_: &T) {}
+ require_wrappingsub(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingmul() {
+ fn require_wrappingmul<T: WrappingMul>(_: &T) {}
+ require_wrappingmul(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingneg() {
+ fn require_wrappingneg<T: WrappingNeg>(_: &T) {}
+ require_wrappingneg(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingshl() {
+ fn require_wrappingshl<T: WrappingShl>(_: &T) {}
+ require_wrappingshl(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingshr() {
+ fn require_wrappingshr<T: WrappingShr>(_: &T) {}
+ require_wrappingshr(&Wrapping(42));
+}