aboutsummaryrefslogtreecommitdiff
path: root/vendor/serde_json/src/lexical
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/serde_json/src/lexical')
-rw-r--r--vendor/serde_json/src/lexical/algorithm.rs196
-rw-r--r--vendor/serde_json/src/lexical/bhcomp.rs218
-rw-r--r--vendor/serde_json/src/lexical/bignum.rs33
-rw-r--r--vendor/serde_json/src/lexical/cached.rs82
-rw-r--r--vendor/serde_json/src/lexical/cached_float80.rs206
-rw-r--r--vendor/serde_json/src/lexical/digit.rs18
-rw-r--r--vendor/serde_json/src/lexical/errors.rs132
-rw-r--r--vendor/serde_json/src/lexical/exponent.rs50
-rw-r--r--vendor/serde_json/src/lexical/float.rs183
-rw-r--r--vendor/serde_json/src/lexical/large_powers.rs9
-rw-r--r--vendor/serde_json/src/lexical/large_powers32.rs183
-rw-r--r--vendor/serde_json/src/lexical/large_powers64.rs625
-rw-r--r--vendor/serde_json/src/lexical/math.rs886
-rw-r--r--vendor/serde_json/src/lexical/mod.rs38
-rw-r--r--vendor/serde_json/src/lexical/num.rs440
-rw-r--r--vendor/serde_json/src/lexical/parse.rs83
-rw-r--r--vendor/serde_json/src/lexical/rounding.rs231
-rw-r--r--vendor/serde_json/src/lexical/shift.rs46
-rw-r--r--vendor/serde_json/src/lexical/small_powers.rs70
19 files changed, 0 insertions, 3729 deletions
diff --git a/vendor/serde_json/src/lexical/algorithm.rs b/vendor/serde_json/src/lexical/algorithm.rs
deleted file mode 100644
index eaa5e7e..0000000
--- a/vendor/serde_json/src/lexical/algorithm.rs
+++ /dev/null
@@ -1,196 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Algorithms to efficiently convert strings to floats.
-
-use super::bhcomp::*;
-use super::cached::*;
-use super::errors::*;
-use super::float::ExtendedFloat;
-use super::num::*;
-use super::small_powers::*;
-
-// FAST
-// ----
-
-/// Convert mantissa to exact value for a non-base2 power.
-///
-/// Returns the resulting float and if the value can be represented exactly.
-pub(crate) fn fast_path<F>(mantissa: u64, exponent: i32) -> Option<F>
-where
- F: Float,
-{
- // `mantissa >> (F::MANTISSA_SIZE+1) != 0` effectively checks if the
- // value has a no bits above the hidden bit, which is what we want.
- let (min_exp, max_exp) = F::exponent_limit();
- let shift_exp = F::mantissa_limit();
- let mantissa_size = F::MANTISSA_SIZE + 1;
- if mantissa == 0 {
- Some(F::ZERO)
- } else if mantissa >> mantissa_size != 0 {
- // Would require truncation of the mantissa.
- None
- } else if exponent == 0 {
- // 0 exponent, same as value, exact representation.
- let float = F::as_cast(mantissa);
- Some(float)
- } else if exponent >= min_exp && exponent <= max_exp {
- // Value can be exactly represented, return the value.
- // Do not use powi, since powi can incrementally introduce
- // error.
- let float = F::as_cast(mantissa);
- Some(float.pow10(exponent))
- } else if exponent >= 0 && exponent <= max_exp + shift_exp {
- // Check to see if we have a disguised fast-path, where the
- // number of digits in the mantissa is very small, but and
- // so digits can be shifted from the exponent to the mantissa.
- // https://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
- let small_powers = POW10_64;
- let shift = exponent - max_exp;
- let power = small_powers[shift as usize];
-
- // Compute the product of the power, if it overflows,
- // prematurely return early, otherwise, if we didn't overshoot,
- // we can get an exact value.
- let value = match mantissa.checked_mul(power) {
- None => return None,
- Some(value) => value,
- };
- if value >> mantissa_size != 0 {
- None
- } else {
- // Use powi, since it's correct, and faster on
- // the fast-path.
- let float = F::as_cast(value);
- Some(float.pow10(max_exp))
- }
- } else {
- // Cannot be exactly represented, exponent too small or too big,
- // would require truncation.
- None
- }
-}
-
-// MODERATE
-// --------
-
-/// Multiply the floating-point by the exponent.
-///
-/// Multiply by pre-calculated powers of the base, modify the extended-
-/// float, and return if new value and if the value can be represented
-/// accurately.
-fn multiply_exponent_extended<F>(fp: &mut ExtendedFloat, exponent: i32, truncated: bool) -> bool
-where
- F: Float,
-{
- let powers = ExtendedFloat::get_powers();
- let exponent = exponent.saturating_add(powers.bias);
- let small_index = exponent % powers.step;
- let large_index = exponent / powers.step;
- if exponent < 0 {
- // Guaranteed underflow (assign 0).
- fp.mant = 0;
- true
- } else if large_index as usize >= powers.large.len() {
- // Overflow (assign infinity)
- fp.mant = 1 << 63;
- fp.exp = 0x7FF;
- true
- } else {
- // Within the valid exponent range, multiply by the large and small
- // exponents and return the resulting value.
-
- // Track errors to as a factor of unit in last-precision.
- let mut errors: u32 = 0;
- if truncated {
- errors += u64::error_halfscale();
- }
-
- // Multiply by the small power.
- // Check if we can directly multiply by an integer, if not,
- // use extended-precision multiplication.
- match fp
- .mant
- .overflowing_mul(powers.get_small_int(small_index as usize))
- {
- // Overflow, multiplication unsuccessful, go slow path.
- (_, true) => {
- fp.normalize();
- fp.imul(&powers.get_small(small_index as usize));
- errors += u64::error_halfscale();
- }
- // No overflow, multiplication successful.
- (mant, false) => {
- fp.mant = mant;
- fp.normalize();
- }
- }
-
- // Multiply by the large power
- fp.imul(&powers.get_large(large_index as usize));
- if errors > 0 {
- errors += 1;
- }
- errors += u64::error_halfscale();
-
- // Normalize the floating point (and the errors).
- let shift = fp.normalize();
- errors <<= shift;
-
- u64::error_is_accurate::<F>(errors, fp)
- }
-}
-
-/// Create a precise native float using an intermediate extended-precision float.
-///
-/// Return the float approximation and if the value can be accurately
-/// represented with mantissa bits of precision.
-#[inline]
-pub(crate) fn moderate_path<F>(
- mantissa: u64,
- exponent: i32,
- truncated: bool,
-) -> (ExtendedFloat, bool)
-where
- F: Float,
-{
- let mut fp = ExtendedFloat {
- mant: mantissa,
- exp: 0,
- };
- let valid = multiply_exponent_extended::<F>(&mut fp, exponent, truncated);
- (fp, valid)
-}
-
-// FALLBACK
-// --------
-
-/// Fallback path when the fast path does not work.
-///
-/// Uses the moderate path, if applicable, otherwise, uses the slow path
-/// as required.
-pub(crate) fn fallback_path<F>(
- integer: &[u8],
- fraction: &[u8],
- mantissa: u64,
- exponent: i32,
- mantissa_exponent: i32,
- truncated: bool,
-) -> F
-where
- F: Float,
-{
- // Moderate path (use an extended 80-bit representation).
- let (fp, valid) = moderate_path::<F>(mantissa, mantissa_exponent, truncated);
- if valid {
- return fp.into_float::<F>();
- }
-
- // Slow path, fast path didn't work.
- let b = fp.into_downward_float::<F>();
- if b.is_special() {
- // We have a non-finite number, we get to leave early.
- b
- } else {
- bhcomp(b, integer, fraction, exponent)
- }
-}
diff --git a/vendor/serde_json/src/lexical/bhcomp.rs b/vendor/serde_json/src/lexical/bhcomp.rs
deleted file mode 100644
index 1f2a7bb..0000000
--- a/vendor/serde_json/src/lexical/bhcomp.rs
+++ /dev/null
@@ -1,218 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Compare the mantissa to the halfway representation of the float.
-//!
-//! Compares the actual significant digits of the mantissa to the
-//! theoretical digits from `b+h`, scaled into the proper range.
-
-use super::bignum::*;
-use super::digit::*;
-use super::exponent::*;
-use super::float::*;
-use super::math::*;
-use super::num::*;
-use super::rounding::*;
-use core::{cmp, mem};
-
-// MANTISSA
-
-/// Parse the full mantissa into a big integer.
-///
-/// Max digits is the maximum number of digits plus one.
-fn parse_mantissa<F>(integer: &[u8], fraction: &[u8]) -> Bigint
-where
- F: Float,
-{
- // Main loop
- let small_powers = POW10_LIMB;
- let step = small_powers.len() - 2;
- let max_digits = F::MAX_DIGITS - 1;
- let mut counter = 0;
- let mut value: Limb = 0;
- let mut i: usize = 0;
- let mut result = Bigint::default();
-
- // Iteratively process all the data in the mantissa.
- for &digit in integer.iter().chain(fraction) {
- // We've parsed the max digits using small values, add to bignum
- if counter == step {
- result.imul_small(small_powers[counter]);
- result.iadd_small(value);
- counter = 0;
- value = 0;
- }
-
- value *= 10;
- value += as_limb(to_digit(digit).unwrap());
-
- i += 1;
- counter += 1;
- if i == max_digits {
- break;
- }
- }
-
- // We will always have a remainder, as long as we entered the loop
- // once, or counter % step is 0.
- if counter != 0 {
- result.imul_small(small_powers[counter]);
- result.iadd_small(value);
- }
-
- // If we have any remaining digits after the last value, we need
- // to add a 1 after the rest of the array, it doesn't matter where,
- // just move it up. This is good for the worst-possible float
- // representation. We also need to return an index.
- // Since we already trimmed trailing zeros, we know there has
- // to be a non-zero digit if there are any left.
- if i < integer.len() + fraction.len() {
- result.imul_small(10);
- result.iadd_small(1);
- }
-
- result
-}
-
-// FLOAT OPS
-
-/// Calculate `b` from a a representation of `b` as a float.
-#[inline]
-pub(super) fn b_extended<F: Float>(f: F) -> ExtendedFloat {
- ExtendedFloat::from_float(f)
-}
-
-/// Calculate `b+h` from a a representation of `b` as a float.
-#[inline]
-pub(super) fn bh_extended<F: Float>(f: F) -> ExtendedFloat {
- // None of these can overflow.
- let b = b_extended(f);
- ExtendedFloat {
- mant: (b.mant << 1) + 1,
- exp: b.exp - 1,
- }
-}
-
-// ROUNDING
-
-/// Custom round-nearest, tie-event algorithm for bhcomp.
-#[inline]
-fn round_nearest_tie_even(fp: &mut ExtendedFloat, shift: i32, is_truncated: bool) {
- let (mut is_above, mut is_halfway) = round_nearest(fp, shift);
- if is_halfway && is_truncated {
- is_above = true;
- is_halfway = false;
- }
- tie_even(fp, is_above, is_halfway);
-}
-
-// BHCOMP
-
-/// Calculate the mantissa for a big integer with a positive exponent.
-fn large_atof<F>(mantissa: Bigint, exponent: i32) -> F
-where
- F: Float,
-{
- let bits = mem::size_of::<u64>() * 8;
-
- // Simple, we just need to multiply by the power of the radix.
- // Now, we can calculate the mantissa and the exponent from this.
- // The binary exponent is the binary exponent for the mantissa
- // shifted to the hidden bit.
- let mut bigmant = mantissa;
- bigmant.imul_pow10(exponent as u32);
-
- // Get the exact representation of the float from the big integer.
- let (mant, is_truncated) = bigmant.hi64();
- let exp = bigmant.bit_length() as i32 - bits as i32;
- let mut fp = ExtendedFloat { mant, exp };
- fp.round_to_native::<F, _>(|fp, shift| round_nearest_tie_even(fp, shift, is_truncated));
- into_float(fp)
-}
-
-/// Calculate the mantissa for a big integer with a negative exponent.
-///
-/// This invokes the comparison with `b+h`.
-fn small_atof<F>(mantissa: Bigint, exponent: i32, f: F) -> F
-where
- F: Float,
-{
- // Get the significant digits and radix exponent for the real digits.
- let mut real_digits = mantissa;
- let real_exp = exponent;
- debug_assert!(real_exp < 0);
-
- // Get the significant digits and the binary exponent for `b+h`.
- let theor = bh_extended(f);
- let mut theor_digits = Bigint::from_u64(theor.mant);
- let theor_exp = theor.exp;
-
- // We need to scale the real digits and `b+h` digits to be the same
- // order. We currently have `real_exp`, in `radix`, that needs to be
- // shifted to `theor_digits` (since it is negative), and `theor_exp`
- // to either `theor_digits` or `real_digits` as a power of 2 (since it
- // may be positive or negative). Try to remove as many powers of 2
- // as possible. All values are relative to `theor_digits`, that is,
- // reflect the power you need to multiply `theor_digits` by.
-
- // Can remove a power-of-two, since the radix is 10.
- // Both are on opposite-sides of equation, can factor out a
- // power of two.
- //
- // Example: 10^-10, 2^-10 -> ( 0, 10, 0)
- // Example: 10^-10, 2^-15 -> (-5, 10, 0)
- // Example: 10^-10, 2^-5 -> ( 5, 10, 0)
- // Example: 10^-10, 2^5 -> (15, 10, 0)
- let binary_exp = theor_exp - real_exp;
- let halfradix_exp = -real_exp;
- let radix_exp = 0;
-
- // Carry out our multiplication.
- if halfradix_exp != 0 {
- theor_digits.imul_pow5(halfradix_exp as u32);
- }
- if radix_exp != 0 {
- theor_digits.imul_pow10(radix_exp as u32);
- }
- if binary_exp > 0 {
- theor_digits.imul_pow2(binary_exp as u32);
- } else if binary_exp < 0 {
- real_digits.imul_pow2(-binary_exp as u32);
- }
-
- // Compare real digits to theoretical digits and round the float.
- match real_digits.compare(&theor_digits) {
- cmp::Ordering::Greater => f.next_positive(),
- cmp::Ordering::Less => f,
- cmp::Ordering::Equal => f.round_positive_even(),
- }
-}
-
-/// Calculate the exact value of the float.
-///
-/// Note: fraction must not have trailing zeros.
-pub(crate) fn bhcomp<F>(b: F, integer: &[u8], mut fraction: &[u8], exponent: i32) -> F
-where
- F: Float,
-{
- // Calculate the number of integer digits and use that to determine
- // where the significant digits start in the fraction.
- let integer_digits = integer.len();
- let fraction_digits = fraction.len();
- let digits_start = if integer_digits == 0 {
- let start = fraction.iter().take_while(|&x| *x == b'0').count();
- fraction = &fraction[start..];
- start
- } else {
- 0
- };
- let sci_exp = scientific_exponent(exponent, integer_digits, digits_start);
- let count = F::MAX_DIGITS.min(integer_digits + fraction_digits - digits_start);
- let scaled_exponent = sci_exp + 1 - count as i32;
-
- let mantissa = parse_mantissa::<F>(integer, fraction);
- if scaled_exponent >= 0 {
- large_atof(mantissa, scaled_exponent)
- } else {
- small_atof(mantissa, scaled_exponent, b)
- }
-}
diff --git a/vendor/serde_json/src/lexical/bignum.rs b/vendor/serde_json/src/lexical/bignum.rs
deleted file mode 100644
index f9551f5..0000000
--- a/vendor/serde_json/src/lexical/bignum.rs
+++ /dev/null
@@ -1,33 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Big integer type definition.
-
-use super::math::*;
-use alloc::vec::Vec;
-
-/// Storage for a big integer type.
-#[derive(Clone, PartialEq, Eq)]
-pub(crate) struct Bigint {
- /// Internal storage for the Bigint, in little-endian order.
- pub(crate) data: Vec<Limb>,
-}
-
-impl Default for Bigint {
- fn default() -> Self {
- Bigint {
- data: Vec::with_capacity(20),
- }
- }
-}
-
-impl Math for Bigint {
- #[inline]
- fn data(&self) -> &Vec<Limb> {
- &self.data
- }
-
- #[inline]
- fn data_mut(&mut self) -> &mut Vec<Limb> {
- &mut self.data
- }
-}
diff --git a/vendor/serde_json/src/lexical/cached.rs b/vendor/serde_json/src/lexical/cached.rs
deleted file mode 100644
index ef5a9fe..0000000
--- a/vendor/serde_json/src/lexical/cached.rs
+++ /dev/null
@@ -1,82 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Cached powers trait for extended-precision floats.
-
-use super::cached_float80;
-use super::float::ExtendedFloat;
-
-// POWERS
-
-/// Precalculated powers that uses two-separate arrays for memory-efficiency.
-#[doc(hidden)]
-pub(crate) struct ExtendedFloatArray {
- // Pre-calculated mantissa for the powers.
- pub mant: &'static [u64],
- // Pre-calculated binary exponents for the powers.
- pub exp: &'static [i32],
-}
-
-/// Allow indexing of values without bounds checking
-impl ExtendedFloatArray {
- #[inline]
- pub fn get_extended_float(&self, index: usize) -> ExtendedFloat {
- let mant = self.mant[index];
- let exp = self.exp[index];
- ExtendedFloat { mant, exp }
- }
-
- #[inline]
- pub fn len(&self) -> usize {
- self.mant.len()
- }
-}
-
-// MODERATE PATH POWERS
-
-/// Precalculated powers of base N for the moderate path.
-#[doc(hidden)]
-pub(crate) struct ModeratePathPowers {
- // Pre-calculated small powers.
- pub small: ExtendedFloatArray,
- // Pre-calculated large powers.
- pub large: ExtendedFloatArray,
- /// Pre-calculated small powers as 64-bit integers
- pub small_int: &'static [u64],
- // Step between large powers and number of small powers.
- pub step: i32,
- // Exponent bias for the large powers.
- pub bias: i32,
-}
-
-/// Allow indexing of values without bounds checking
-impl ModeratePathPowers {
- #[inline]
- pub fn get_small(&self, index: usize) -> ExtendedFloat {
- self.small.get_extended_float(index)
- }
-
- #[inline]
- pub fn get_large(&self, index: usize) -> ExtendedFloat {
- self.large.get_extended_float(index)
- }
-
- #[inline]
- pub fn get_small_int(&self, index: usize) -> u64 {
- self.small_int[index]
- }
-}
-
-// CACHED EXTENDED POWERS
-
-/// Cached powers as a trait for a floating-point type.
-pub(crate) trait ModeratePathCache {
- /// Get cached powers.
- fn get_powers() -> &'static ModeratePathPowers;
-}
-
-impl ModeratePathCache for ExtendedFloat {
- #[inline]
- fn get_powers() -> &'static ModeratePathPowers {
- cached_float80::get_powers()
- }
-}
diff --git a/vendor/serde_json/src/lexical/cached_float80.rs b/vendor/serde_json/src/lexical/cached_float80.rs
deleted file mode 100644
index 9beda3d..0000000
--- a/vendor/serde_json/src/lexical/cached_float80.rs
+++ /dev/null
@@ -1,206 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Cached exponents for basen values with 80-bit extended floats.
-//!
-//! Exact versions of base**n as an extended-precision float, with both
-//! large and small powers. Use the large powers to minimize the amount
-//! of compounded error.
-//!
-//! These values were calculated using Python, using the arbitrary-precision
-//! integer to calculate exact extended-representation of each value.
-//! These values are all normalized.
-
-use super::cached::{ExtendedFloatArray, ModeratePathPowers};
-
-// LOW-LEVEL
-// ---------
-
-// BASE10
-
-const BASE10_SMALL_MANTISSA: [u64; 10] = [
- 9223372036854775808, // 10^0
- 11529215046068469760, // 10^1
- 14411518807585587200, // 10^2
- 18014398509481984000, // 10^3
- 11258999068426240000, // 10^4
- 14073748835532800000, // 10^5
- 17592186044416000000, // 10^6
- 10995116277760000000, // 10^7
- 13743895347200000000, // 10^8
- 17179869184000000000, // 10^9
-];
-const BASE10_SMALL_EXPONENT: [i32; 10] = [
- -63, // 10^0
- -60, // 10^1
- -57, // 10^2
- -54, // 10^3
- -50, // 10^4
- -47, // 10^5
- -44, // 10^6
- -40, // 10^7
- -37, // 10^8
- -34, // 10^9
-];
-const BASE10_LARGE_MANTISSA: [u64; 66] = [
- 11555125961253852697, // 10^-350
- 13451937075301367670, // 10^-340
- 15660115838168849784, // 10^-330
- 18230774251475056848, // 10^-320
- 10611707258198326947, // 10^-310
- 12353653155963782858, // 10^-300
- 14381545078898527261, // 10^-290
- 16742321987285426889, // 10^-280
- 9745314011399999080, // 10^-270
- 11345038669416679861, // 10^-260
- 13207363278391631158, // 10^-250
- 15375394465392026070, // 10^-240
- 17899314949046850752, // 10^-230
- 10418772551374772303, // 10^-220
- 12129047596099288555, // 10^-210
- 14120069793541087484, // 10^-200
- 16437924692338667210, // 10^-190
- 9568131466127621947, // 10^-180
- 11138771039116687545, // 10^-170
- 12967236152753102995, // 10^-160
- 15095849699286165408, // 10^-150
- 17573882009934360870, // 10^-140
- 10229345649675443343, // 10^-130
- 11908525658859223294, // 10^-120
- 13863348470604074297, // 10^-110
- 16139061738043178685, // 10^-100
- 9394170331095332911, // 10^-90
- 10936253623915059621, // 10^-80
- 12731474852090538039, // 10^-70
- 14821387422376473014, // 10^-60
- 17254365866976409468, // 10^-50
- 10043362776618689222, // 10^-40
- 11692013098647223345, // 10^-30
- 13611294676837538538, // 10^-20
- 15845632502852867518, // 10^-10
- 9223372036854775808, // 10^0
- 10737418240000000000, // 10^10
- 12500000000000000000, // 10^20
- 14551915228366851806, // 10^30
- 16940658945086006781, // 10^40
- 9860761315262647567, // 10^50
- 11479437019748901445, // 10^60
- 13363823550460978230, // 10^70
- 15557538194652854267, // 10^80
- 18111358157653424735, // 10^90
- 10542197943230523224, // 10^100
- 12272733663244316382, // 10^110
- 14287342391028437277, // 10^120
- 16632655625031838749, // 10^130
- 9681479787123295682, // 10^140
- 11270725851789228247, // 10^150
- 13120851772591970218, // 10^160
- 15274681817498023410, // 10^170
- 17782069995880619867, // 10^180
- 10350527006597618960, // 10^190
- 12049599325514420588, // 10^200
- 14027579833653779454, // 10^210
- 16330252207878254650, // 10^220
- 9505457831475799117, // 10^230
- 11065809325636130661, // 10^240
- 12882297539194266616, // 10^250
- 14996968138956309548, // 10^260
- 17458768723248864463, // 10^270
- 10162340898095201970, // 10^280
- 11830521861667747109, // 10^290
- 13772540099066387756, // 10^300
-];
-const BASE10_LARGE_EXPONENT: [i32; 66] = [
- -1226, // 10^-350
- -1193, // 10^-340
- -1160, // 10^-330
- -1127, // 10^-320
- -1093, // 10^-310
- -1060, // 10^-300
- -1027, // 10^-290
- -994, // 10^-280
- -960, // 10^-270
- -927, // 10^-260
- -894, // 10^-250
- -861, // 10^-240
- -828, // 10^-230
- -794, // 10^-220
- -761, // 10^-210
- -728, // 10^-200
- -695, // 10^-190
- -661, // 10^-180
- -628, // 10^-170
- -595, // 10^-160
- -562, // 10^-150
- -529, // 10^-140
- -495, // 10^-130
- -462, // 10^-120
- -429, // 10^-110
- -396, // 10^-100
- -362, // 10^-90
- -329, // 10^-80
- -296, // 10^-70
- -263, // 10^-60
- -230, // 10^-50
- -196, // 10^-40
- -163, // 10^-30
- -130, // 10^-20
- -97, // 10^-10
- -63, // 10^0
- -30, // 10^10
- 3, // 10^20
- 36, // 10^30
- 69, // 10^40
- 103, // 10^50
- 136, // 10^60
- 169, // 10^70
- 202, // 10^80
- 235, // 10^90
- 269, // 10^100
- 302, // 10^110
- 335, // 10^120
- 368, // 10^130
- 402, // 10^140
- 435, // 10^150
- 468, // 10^160
- 501, // 10^170
- 534, // 10^180
- 568, // 10^190
- 601, // 10^200
- 634, // 10^210
- 667, // 10^220
- 701, // 10^230
- 734, // 10^240
- 767, // 10^250
- 800, // 10^260
- 833, // 10^270
- 867, // 10^280
- 900, // 10^290
- 933, // 10^300
-];
-const BASE10_SMALL_INT_POWERS: [u64; 10] = [
- 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000,
-];
-const BASE10_STEP: i32 = 10;
-const BASE10_BIAS: i32 = 350;
-
-// HIGH LEVEL
-// ----------
-
-const BASE10_POWERS: ModeratePathPowers = ModeratePathPowers {
- small: ExtendedFloatArray {
- mant: &BASE10_SMALL_MANTISSA,
- exp: &BASE10_SMALL_EXPONENT,
- },
- large: ExtendedFloatArray {
- mant: &BASE10_LARGE_MANTISSA,
- exp: &BASE10_LARGE_EXPONENT,
- },
- small_int: &BASE10_SMALL_INT_POWERS,
- step: BASE10_STEP,
- bias: BASE10_BIAS,
-};
-
-/// Get powers from base.
-pub(crate) fn get_powers() -> &'static ModeratePathPowers {
- &BASE10_POWERS
-}
diff --git a/vendor/serde_json/src/lexical/digit.rs b/vendor/serde_json/src/lexical/digit.rs
deleted file mode 100644
index 3d150a1..0000000
--- a/vendor/serde_json/src/lexical/digit.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Helpers to convert and add digits from characters.
-
-// Convert u8 to digit.
-#[inline]
-pub(crate) fn to_digit(c: u8) -> Option<u32> {
- (c as char).to_digit(10)
-}
-
-// Add digit to mantissa.
-#[inline]
-pub(crate) fn add_digit(value: u64, digit: u32) -> Option<u64> {
- match value.checked_mul(10) {
- None => None,
- Some(n) => n.checked_add(digit as u64),
- }
-}
diff --git a/vendor/serde_json/src/lexical/errors.rs b/vendor/serde_json/src/lexical/errors.rs
deleted file mode 100644
index f4f41cd..0000000
--- a/vendor/serde_json/src/lexical/errors.rs
+++ /dev/null
@@ -1,132 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Estimate the error in an 80-bit approximation of a float.
-//!
-//! This estimates the error in a floating-point representation.
-//!
-//! This implementation is loosely based off the Golang implementation,
-//! found here: <https://golang.org/src/strconv/atof.go>
-
-use super::float::*;
-use super::num::*;
-use super::rounding::*;
-
-pub(crate) trait FloatErrors {
- /// Get the full error scale.
- fn error_scale() -> u32;
- /// Get the half error scale.
- fn error_halfscale() -> u32;
- /// Determine if the number of errors is tolerable for float precision.
- fn error_is_accurate<F: Float>(count: u32, fp: &ExtendedFloat) -> bool;
-}
-
-/// Check if the error is accurate with a round-nearest rounding scheme.
-#[inline]
-fn nearest_error_is_accurate(errors: u64, fp: &ExtendedFloat, extrabits: u64) -> bool {
- // Round-to-nearest, need to use the halfway point.
- if extrabits == 65 {
- // Underflow, we have a shift larger than the mantissa.
- // Representation is valid **only** if the value is close enough
- // overflow to the next bit within errors. If it overflows,
- // the representation is **not** valid.
- !fp.mant.overflowing_add(errors).1
- } else {
- let mask: u64 = lower_n_mask(extrabits);
- let extra: u64 = fp.mant & mask;
-
- // Round-to-nearest, need to check if we're close to halfway.
- // IE, b10100 | 100000, where `|` signifies the truncation point.
- let halfway: u64 = lower_n_halfway(extrabits);
- let cmp1 = halfway.wrapping_sub(errors) < extra;
- let cmp2 = extra < halfway.wrapping_add(errors);
-
- // If both comparisons are true, we have significant rounding error,
- // and the value cannot be exactly represented. Otherwise, the
- // representation is valid.
- !(cmp1 && cmp2)
- }
-}
-
-impl FloatErrors for u64 {
- #[inline]
- fn error_scale() -> u32 {
- 8
- }
-
- #[inline]
- fn error_halfscale() -> u32 {
- u64::error_scale() / 2
- }
-
- #[inline]
- fn error_is_accurate<F: Float>(count: u32, fp: &ExtendedFloat) -> bool {
- // Determine if extended-precision float is a good approximation.
- // If the error has affected too many units, the float will be
- // inaccurate, or if the representation is too close to halfway
- // that any operations could affect this halfway representation.
- // See the documentation for dtoa for more information.
- let bias = -(F::EXPONENT_BIAS - F::MANTISSA_SIZE);
- let denormal_exp = bias - 63;
- // This is always a valid u32, since (denormal_exp - fp.exp)
- // will always be positive and the significand size is {23, 52}.
- let extrabits = if fp.exp <= denormal_exp {
- 64 - F::MANTISSA_SIZE + denormal_exp - fp.exp
- } else {
- 63 - F::MANTISSA_SIZE
- };
-
- // Our logic is as follows: we want to determine if the actual
- // mantissa and the errors during calculation differ significantly
- // from the rounding point. The rounding point for round-nearest
- // is the halfway point, IE, this when the truncated bits start
- // with b1000..., while the rounding point for the round-toward
- // is when the truncated bits are equal to 0.
- // To do so, we can check whether the rounding point +/- the error
- // are >/< the actual lower n bits.
- //
- // For whether we need to use signed or unsigned types for this
- // analysis, see this example, using u8 rather than u64 to simplify
- // things.
- //
- // # Comparisons
- // cmp1 = (halfway - errors) < extra
- // cmp1 = extra < (halfway + errors)
- //
- // # Large Extrabits, Low Errors
- //
- // extrabits = 8
- // halfway = 0b10000000
- // extra = 0b10000010
- // errors = 0b00000100
- // halfway - errors = 0b01111100
- // halfway + errors = 0b10000100
- //
- // Unsigned:
- // halfway - errors = 124
- // halfway + errors = 132
- // extra = 130
- // cmp1 = true
- // cmp2 = true
- // Signed:
- // halfway - errors = 124
- // halfway + errors = -124
- // extra = -126
- // cmp1 = false
- // cmp2 = true
- //
- // # Conclusion
- //
- // Since errors will always be small, and since we want to detect
- // if the representation is accurate, we need to use an **unsigned**
- // type for comparisons.
-
- let extrabits = extrabits as u64;
- let errors = count as u64;
- if extrabits > 65 {
- // Underflow, we have a literal 0.
- return true;
- }
-
- nearest_error_is_accurate(errors, fp, extrabits)
- }
-}
diff --git a/vendor/serde_json/src/lexical/exponent.rs b/vendor/serde_json/src/lexical/exponent.rs
deleted file mode 100644
index 6fc5197..0000000
--- a/vendor/serde_json/src/lexical/exponent.rs
+++ /dev/null
@@ -1,50 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Utilities to calculate exponents.
-
-/// Convert usize into i32 without overflow.
-///
-/// This is needed to ensure when adjusting the exponent relative to
-/// the mantissa we do not overflow for comically-long exponents.
-#[inline]
-fn into_i32(value: usize) -> i32 {
- if value > i32::max_value() as usize {
- i32::max_value()
- } else {
- value as i32
- }
-}
-
-// EXPONENT CALCULATION
-
-// Calculate the scientific notation exponent without overflow.
-//
-// For example, 0.1 would be -1, and 10 would be 1 in base 10.
-#[inline]
-pub(crate) fn scientific_exponent(
- exponent: i32,
- integer_digits: usize,
- fraction_start: usize,
-) -> i32 {
- if integer_digits == 0 {
- let fraction_start = into_i32(fraction_start);
- exponent.saturating_sub(fraction_start).saturating_sub(1)
- } else {
- let integer_shift = into_i32(integer_digits - 1);
- exponent.saturating_add(integer_shift)
- }
-}
-
-// Calculate the mantissa exponent without overflow.
-//
-// Remove the number of digits that contributed to the mantissa past
-// the dot, and add the number of truncated digits from the mantissa,
-// to calculate the scaling factor for the mantissa from a raw exponent.
-#[inline]
-pub(crate) fn mantissa_exponent(exponent: i32, fraction_digits: usize, truncated: usize) -> i32 {
- if fraction_digits > truncated {
- exponent.saturating_sub(into_i32(fraction_digits - truncated))
- } else {
- exponent.saturating_add(into_i32(truncated - fraction_digits))
- }
-}
diff --git a/vendor/serde_json/src/lexical/float.rs b/vendor/serde_json/src/lexical/float.rs
deleted file mode 100644
index 2d434a2..0000000
--- a/vendor/serde_json/src/lexical/float.rs
+++ /dev/null
@@ -1,183 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-// FLOAT TYPE
-
-use super::num::*;
-use super::rounding::*;
-use super::shift::*;
-
-/// Extended precision floating-point type.
-///
-/// Private implementation, exposed only for testing purposes.
-#[doc(hidden)]
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub(crate) struct ExtendedFloat {
- /// Mantissa for the extended-precision float.
- pub mant: u64,
- /// Binary exponent for the extended-precision float.
- pub exp: i32,
-}
-
-impl ExtendedFloat {
- // PROPERTIES
-
- // OPERATIONS
-
- /// Multiply two normalized extended-precision floats, as if by `a*b`.
- ///
- /// The precision is maximal when the numbers are normalized, however,
- /// decent precision will occur as long as both values have high bits
- /// set. The result is not normalized.
- ///
- /// Algorithm:
- /// 1. Non-signed multiplication of mantissas (requires 2x as many bits as input).
- /// 2. Normalization of the result (not done here).
- /// 3. Addition of exponents.
- pub(crate) fn mul(&self, b: &ExtendedFloat) -> ExtendedFloat {
- // Logic check, values must be decently normalized prior to multiplication.
- debug_assert!((self.mant & u64::HIMASK != 0) && (b.mant & u64::HIMASK != 0));
-
- // Extract high-and-low masks.
- let ah = self.mant >> u64::HALF;
- let al = self.mant & u64::LOMASK;
- let bh = b.mant >> u64::HALF;
- let bl = b.mant & u64::LOMASK;
-
- // Get our products
- let ah_bl = ah * bl;
- let al_bh = al * bh;
- let al_bl = al * bl;
- let ah_bh = ah * bh;
-
- let mut tmp = (ah_bl & u64::LOMASK) + (al_bh & u64::LOMASK) + (al_bl >> u64::HALF);
- // round up
- tmp += 1 << (u64::HALF - 1);
-
- ExtendedFloat {
- mant: ah_bh + (ah_bl >> u64::HALF) + (al_bh >> u64::HALF) + (tmp >> u64::HALF),
- exp: self.exp + b.exp + u64::FULL,
- }
- }
-
- /// Multiply in-place, as if by `a*b`.
- ///
- /// The result is not normalized.
- #[inline]
- pub(crate) fn imul(&mut self, b: &ExtendedFloat) {
- *self = self.mul(b);
- }
-
- // NORMALIZE
-
- /// Normalize float-point number.
- ///
- /// Shift the mantissa so the number of leading zeros is 0, or the value
- /// itself is 0.
- ///
- /// Get the number of bytes shifted.
- #[inline]
- pub(crate) fn normalize(&mut self) -> u32 {
- // Note:
- // Using the cltz intrinsic via leading_zeros is way faster (~10x)
- // than shifting 1-bit at a time, via while loop, and also way
- // faster (~2x) than an unrolled loop that checks at 32, 16, 4,
- // 2, and 1 bit.
- //
- // Using a modulus of pow2 (which will get optimized to a bitwise
- // and with 0x3F or faster) is slightly slower than an if/then,
- // however, removing the if/then will likely optimize more branched
- // code as it removes conditional logic.
-
- // Calculate the number of leading zeros, and then zero-out
- // any overflowing bits, to avoid shl overflow when self.mant == 0.
- let shift = if self.mant == 0 {
- 0
- } else {
- self.mant.leading_zeros()
- };
- shl(self, shift as i32);
- shift
- }
-
- // ROUND
-
- /// Lossy round float-point number to native mantissa boundaries.
- #[inline]
- pub(crate) fn round_to_native<F, Algorithm>(&mut self, algorithm: Algorithm)
- where
- F: Float,
- Algorithm: FnOnce(&mut ExtendedFloat, i32),
- {
- round_to_native::<F, _>(self, algorithm);
- }
-
- // FROM
-
- /// Create extended float from native float.
- #[inline]
- pub fn from_float<F: Float>(f: F) -> ExtendedFloat {
- from_float(f)
- }
-
- // INTO
-
- /// Convert into default-rounded, lower-precision native float.
- #[inline]
- pub(crate) fn into_float<F: Float>(mut self) -> F {
- self.round_to_native::<F, _>(round_nearest_tie_even);
- into_float(self)
- }
-
- /// Convert into downward-rounded, lower-precision native float.
- #[inline]
- pub(crate) fn into_downward_float<F: Float>(mut self) -> F {
- self.round_to_native::<F, _>(round_downward);
- into_float(self)
- }
-}
-
-// FROM FLOAT
-
-// Import ExtendedFloat from native float.
-#[inline]
-pub(crate) fn from_float<F>(f: F) -> ExtendedFloat
-where
- F: Float,
-{
- ExtendedFloat {
- mant: u64::as_cast(f.mantissa()),
- exp: f.exponent(),
- }
-}
-
-// INTO FLOAT
-
-// Export extended-precision float to native float.
-//
-// The extended-precision float must be in native float representation,
-// with overflow/underflow appropriately handled.
-#[inline]
-pub(crate) fn into_float<F>(fp: ExtendedFloat) -> F
-where
- F: Float,
-{
- // Export floating-point number.
- if fp.mant == 0 || fp.exp < F::DENORMAL_EXPONENT {
- // sub-denormal, underflow
- F::ZERO
- } else if fp.exp >= F::MAX_EXPONENT {
- // overflow
- F::from_bits(F::INFINITY_BITS)
- } else {
- // calculate the exp and fraction bits, and return a float from bits.
- let exp: u64;
- if (fp.exp == F::DENORMAL_EXPONENT) && (fp.mant & F::HIDDEN_BIT_MASK.as_u64()) == 0 {
- exp = 0;
- } else {
- exp = (fp.exp + F::EXPONENT_BIAS) as u64;
- }
- let exp = exp << F::MANTISSA_SIZE;
- let mant = fp.mant & F::MANTISSA_MASK.as_u64();
- F::from_bits(F::Unsigned::as_cast(mant | exp))
- }
-}
diff --git a/vendor/serde_json/src/lexical/large_powers.rs b/vendor/serde_json/src/lexical/large_powers.rs
deleted file mode 100644
index c63ce1c..0000000
--- a/vendor/serde_json/src/lexical/large_powers.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Precalculated large powers for limbs.
-
-#[cfg(limb_width_32)]
-pub(crate) use super::large_powers32::*;
-
-#[cfg(limb_width_64)]
-pub(crate) use super::large_powers64::*;
diff --git a/vendor/serde_json/src/lexical/large_powers32.rs b/vendor/serde_json/src/lexical/large_powers32.rs
deleted file mode 100644
index 7991197..0000000
--- a/vendor/serde_json/src/lexical/large_powers32.rs
+++ /dev/null
@@ -1,183 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Precalculated large powers for 32-bit limbs.
-
-/// Large powers (&[u32]) for base5 operations.
-const POW5_1: [u32; 1] = [5];
-const POW5_2: [u32; 1] = [25];
-const POW5_3: [u32; 1] = [625];
-const POW5_4: [u32; 1] = [390625];
-const POW5_5: [u32; 2] = [2264035265, 35];
-const POW5_6: [u32; 3] = [2242703233, 762134875, 1262];
-const POW5_7: [u32; 5] = [3211403009, 1849224548, 3668416493, 3913284084, 1593091];
-const POW5_8: [u32; 10] = [
- 781532673, 64985353, 253049085, 594863151, 3553621484, 3288652808, 3167596762, 2788392729,
- 3911132675, 590,
-];
-const POW5_9: [u32; 19] = [
- 2553183233, 3201533787, 3638140786, 303378311, 1809731782, 3477761648, 3583367183, 649228654,
- 2915460784, 487929380, 1011012442, 1677677582, 3428152256, 1710878487, 1438394610, 2161952759,
- 4100910556, 1608314830, 349175,
-];
-const POW5_10: [u32; 38] = [
- 4234999809, 2012377703, 2408924892, 1570150255, 3090844311, 3273530073, 1187251475, 2498123591,
- 3364452033, 1148564857, 687371067, 2854068671, 1883165473, 505794538, 2988060450, 3159489326,
- 2531348317, 3215191468, 849106862, 3892080979, 3288073877, 2242451748, 4183778142, 2995818208,
- 2477501924, 325481258, 2487842652, 1774082830, 1933815724, 2962865281, 1168579910, 2724829000,
- 2360374019, 2315984659, 2360052375, 3251779801, 1664357844, 28,
-];
-const POW5_11: [u32; 75] = [
- 689565697, 4116392818, 1853628763, 516071302, 2568769159, 365238920, 336250165, 1283268122,
- 3425490969, 248595470, 2305176814, 2111925499, 507770399, 2681111421, 589114268, 591287751,
- 1708941527, 4098957707, 475844916, 3378731398, 2452339615, 2817037361, 2678008327, 1656645978,
- 2383430340, 73103988, 448667107, 2329420453, 3124020241, 3625235717, 3208634035, 2412059158,
- 2981664444, 4117622508, 838560765, 3069470027, 270153238, 1802868219, 3692709886, 2161737865,
- 2159912357, 2585798786, 837488486, 4237238160, 2540319504, 3798629246, 3748148874, 1021550776,
- 2386715342, 1973637538, 1823520457, 1146713475, 833971519, 3277251466, 905620390, 26278816,
- 2680483154, 2294040859, 373297482, 5996609, 4109575006, 512575049, 917036550, 1942311753,
- 2816916778, 3248920332, 1192784020, 3537586671, 2456567643, 2925660628, 759380297, 888447942,
- 3559939476, 3654687237, 805,
-];
-const POW5_12: [u32; 149] = [
- 322166785, 3809044581, 2994556223, 1239584207, 3962455841, 4001882964, 3053876612, 915114683,
- 2783289745, 785739093, 4253185907, 3931164994, 1370983858, 2553556126, 3360742076, 2255410929,
- 422849554, 2457422215, 3539495362, 1720790602, 1908931983, 1470596141, 592794347, 4219465164,
- 4085652704, 941661409, 2534650953, 885063988, 2355909854, 2812815516, 767256131, 3821757683,
- 2155151105, 3817418473, 281116564, 2834395026, 2821201622, 2524625843, 1511330880, 2572352493,
- 330571332, 2951088579, 2730271766, 4044456479, 4212286644, 2444937588, 3603420843, 2387148597,
- 1142537539, 3299235429, 1751012624, 861228086, 2873722519, 230498814, 1023297821, 2553128038,
- 3421129895, 2651917435, 2042981258, 1606787143, 2228751918, 447345732, 1930371132, 1784132011,
- 3612538790, 2275925090, 2487567871, 1080427616, 2009179183, 3383506781, 3899054063, 1950782960,
- 2168622213, 2717674390, 3616636027, 2079341593, 1530129217, 1461057425, 2406264415, 3674671357,
- 2972036238, 2019354295, 1455849819, 1866918619, 1324269294, 424891864, 2722422332, 2641594816,
- 1400249021, 3482963993, 3734946379, 225889849, 1891545473, 777383150, 3589824633, 4117601611,
- 4220028667, 334453379, 1083130821, 1060342180, 4208163139, 1489826908, 4163762246, 1096580926,
- 689301528, 2336054516, 1782865703, 4175148410, 3398369392, 2329412588, 3001580596, 59740741,
- 3202189932, 3351895776, 246185302, 718535188, 3772647488, 4151666556, 4055698133, 2461934110,
- 2281316281, 3466396836, 3536023465, 1064267812, 2955456354, 2423805422, 3627960790, 1325057500,
- 3876919979, 2009959531, 175455101, 184092852, 2358785571, 3842977831, 2485266289, 487121622,
- 4159252710, 4075707558, 459389244, 300652075, 2521346588, 3458976673, 888631636, 2076098096,
- 3844514585, 2363697580, 3729421522, 3051115477, 649395,
-];
-const POW5_13: [u32; 298] = [
- 711442433, 3564261005, 2399042279, 4170849936, 4010295575, 1423987028, 330414929, 1349249065,
- 4213813618, 3852031822, 4040843590, 2154565331, 3094013374, 1159028371, 3227065538, 2115927092,
- 2085102554, 488590542, 2609619432, 3602898805, 3812736528, 3269439096, 23816114, 253984538,
- 1035905997, 2942969204, 3400787671, 338562688, 1637191975, 740509713, 2264962817, 3410753922,
- 4162231428, 2282041228, 1759373012, 3155367777, 4278913285, 1420532801, 1981002276, 438054990,
- 1006507643, 1142697287, 1332538012, 2029019521, 3949305784, 818392641, 2491288846, 2716584663,
- 3648886102, 556814413, 444795339, 4071412999, 1066321706, 4253169466, 2510832316, 672091442,
- 4083256000, 2165985028, 1841538484, 3549854235, 364431512, 3707648143, 1162785440, 2268641545,
- 281340310, 735693841, 848809228, 1700785200, 2919703985, 4094234344, 58530286, 965505005,
- 1000010347, 3381961808, 3040089923, 1973852082, 2890971585, 1019960210, 4292895237, 2821887841,
- 3756675650, 3951282907, 3885870583, 1008791145, 503998487, 1881258362, 1949332730, 392996726,
- 2012973814, 3970014187, 2461725150, 2942547730, 3728066699, 2766901132, 3778532841, 1085564064,
- 2278673896, 1116879805, 3448726271, 774279411, 157211670, 1506320155, 531168605, 1362654525,
- 956967721, 2148871960, 769186085, 4186232894, 2055679604, 3248365487, 3981268013, 3975787984,
- 2489510517, 3309046495, 212771124, 933418041, 3371839114, 562115198, 1853601831, 757336096,
- 1354633440, 1486083256, 2872126393, 522920738, 1141587749, 3210903262, 1926940553, 3054024853,
- 2021162538, 2262742000, 1877899947, 3147002868, 669840763, 4158174590, 4238502559, 1023731922,
- 3386840011, 829588074, 3449720188, 2835142880, 2999162007, 813056473, 482949569, 638108879,
- 3067201471, 1026714238, 4004452838, 2383667807, 3999477803, 771648919, 630660440, 3827121348,
- 176185980, 2878191002, 2666149832, 3909811063, 2429163983, 2665690412, 907266128, 4269332098,
- 2022665808, 1527122180, 3072053668, 1072477492, 3006022924, 549664855, 2800340954, 37352654,
- 1212772743, 2711280533, 3029527946, 2511120040, 1305308377, 3474662224, 4226330922, 442988428,
- 954940108, 3274548099, 4212288177, 2688499880, 3982226758, 3922609956, 1279948029, 1939943640,
- 3650489901, 2733364929, 2494263275, 1864579964, 1225941120, 2390465139, 1267503249, 3533240729,
- 904410805, 2842550015, 2517736241, 1796069820, 3335274381, 673539835, 1924694759, 3598098235,
- 2792633405, 16535707, 3703535497, 3592841791, 2929082877, 1317622811, 294990855, 1396706563,
- 2383271770, 3853857605, 277813677, 277580220, 1101318484, 3761974115, 1132150143, 2544692622,
- 3419825776, 743770306, 1695464553, 1548693232, 2421159615, 2575672031, 2678971806, 1591267897,
- 626546738, 3823443129, 267710932, 1455435162, 2353985540, 3248523795, 335348168, 3872552561,
- 2814522612, 2634118860, 3503767026, 1301019273, 1414467789, 722985138, 3070909565, 4253482569,
- 3744939841, 558142907, 2229819389, 13833173, 77003966, 2763671364, 3905603970, 2931990126,
- 2280419384, 1879090457, 2934846267, 4284933164, 2331863845, 62191163, 3178861020, 1522063815,
- 785672270, 1215568492, 2936443917, 802972489, 2956820173, 3916732783, 2893572089, 1391232801,
- 3168640330, 2396859648, 894950918, 1103583736, 961991865, 2807302642, 305977505, 3054505899,
- 1048256994, 781017659, 2459278754, 3164823415, 537658277, 905753687, 464963300, 4149131560,
- 1029507924, 2278300961, 1231291503, 414073408, 3630740085, 2345841814, 475358196, 3258243317,
- 4167625072, 4178911231, 2927355042, 655438830, 3138378018, 623200562, 2785714112, 273403236,
- 807993669, 98,
-];
-const POW5_14: [u32; 595] = [
- 1691320321, 2671006246, 1682531301, 2072858707, 1240508969, 3108358191, 1125119096, 2470144952,
- 1610099978, 1690632660, 1941696884, 2663506355, 1006364675, 3909158537, 4147711374, 1072663936,
- 4078768933, 745751659, 4123687570, 471458681, 655028926, 4113407388, 3945524552, 985625313,
- 1254424514, 2127508744, 570530434, 945388122, 3194649404, 2589065070, 2731705399, 202030749,
- 2090780394, 3348662271, 1481754777, 1130635472, 4025144705, 1924486271, 2578567861, 125491448,
- 1558036315, 994248173, 3817216711, 763950077, 1030439870, 959586474, 3845661701, 483795093,
- 1637944470, 2275463649, 3398804829, 1758016486, 2665513698, 2004912571, 1094885097, 4223064276,
- 3307819021, 651121777, 1757003305, 3603542336, 129917786, 2215974994, 3042386306, 2205352757,
- 3944939700, 3710987569, 97967515, 1217242524, 930630949, 3660328512, 1787663098, 1784141600,
- 2500542892, 4034561586, 3444961378, 785043562, 3869499367, 885623728, 2625011087, 3053789617,
- 1965731793, 3900511934, 2648823592, 3851062028, 3321968688, 799195417, 1011847510, 1369129160,
- 1348009103, 2876796955, 2915408967, 3305284948, 263399535, 1715990604, 2645821294, 1587844552,
- 2624912049, 3035631499, 2306636348, 3499275462, 675152704, 854794152, 4004972748, 1739996642,
- 1333476491, 4012621867, 3658792931, 3297985728, 2864481726, 3066357406, 785287846, 1671499798,
- 433044045, 1919608025, 264833858, 3999983367, 1116778570, 1301982149, 4213901070, 4081649357,
- 536169226, 1389008649, 188923873, 373495152, 2551132278, 1800758715, 3951840330, 2632334454,
- 3118778225, 1034046547, 1862428410, 3037609062, 1994608505, 29051798, 2571685694, 264151332,
- 2260643090, 2717535964, 3508441116, 3283713017, 1903365635, 923575694, 1219598101, 2288281570,
- 3676533911, 1014136356, 555142354, 2389170030, 4185108175, 884862419, 836141292, 2957159173,
- 1997444768, 4233903127, 2876184692, 3089125070, 1480848293, 1097600237, 299700527, 2507669891,
- 2982628312, 2114881043, 2529576251, 2812279824, 2987750993, 4241938954, 2204775591, 1037094060,
- 829315638, 1231047149, 52608178, 3735136637, 3455232602, 962039123, 488286513, 50685385,
- 3516451821, 843975207, 1572355722, 675489076, 2428445672, 1555117248, 3708476086, 10375249,
- 4172112346, 2117510871, 2227658327, 3187664554, 3050656558, 328034318, 3179601324, 1247769761,
- 3439263953, 1431538938, 2962525068, 1213366289, 3813013550, 2651093719, 1860661503, 3933716208,
- 264320617, 789980519, 2257856172, 102000748, 977269860, 1113845122, 3008928583, 1461738106,
- 557786285, 2926560363, 1038106190, 3643478847, 828004507, 457818698, 1933056971, 373408056,
- 2076808229, 3160935130, 2781854874, 2519636100, 177606000, 4237103862, 3977834316, 1621936232,
- 2599050516, 319893558, 3343370366, 765044144, 976657331, 7026264, 294277429, 3829376742,
- 3029627280, 2705178718, 3614653880, 230519152, 3288033233, 293525479, 3805751881, 3227511198,
- 2520308544, 3648103003, 1111086184, 437622105, 2232033852, 3239146386, 584244184, 1450926016,
- 2462430443, 3226534010, 298582169, 4214576928, 1762099469, 964985185, 1585788148, 1641127666,
- 787006566, 2315956284, 3258232694, 2275058964, 2541003317, 1508235863, 2613339827, 4080647514,
- 1152057965, 3149266279, 731345410, 914737650, 65395712, 1884566942, 1379520432, 2611027720,
- 4163073378, 2619704967, 2746552541, 1388822415, 3005141199, 843440249, 4288674003, 3136174279,
- 4051522914, 4144149433, 3427566947, 3419023197, 3758479825, 3893877676, 96899594, 1657725776,
- 253618880, 434129337, 1499045748, 2996992534, 4036042074, 2110713869, 906222950, 928326225,
- 2541827893, 1604330202, 226792470, 4022228930, 815850898, 1466012310, 3377712199, 292769859,
- 2822055597, 3225701344, 3052947004, 385831222, 705324593, 4030158636, 3540280538, 2982120874,
- 2136414455, 255762046, 3852783591, 3262064164, 2358991588, 3756586117, 4143612643, 3326743817,
- 2897365738, 807711264, 3719310016, 3721264861, 3627337076, 944539331, 3640975513, 3712525681,
- 1162911839, 2008243316, 2179489649, 2867584109, 261861553, 3570253908, 2062868357, 2220328623,
- 3857004679, 3744109002, 4138041873, 1451860932, 2364975637, 2802161722, 2680106834, 753401584,
- 1223182946, 1245401957, 4163377735, 3565815922, 2216942838, 4036140094, 71979081, 3924559643,
- 400477238, 551750683, 1174153235, 859969898, 1185921017, 1711399735, 812991545, 4051735761,
- 3549118738, 1631653329, 3631835958, 3648867800, 1206500363, 2155893137, 361030362, 3454286017,
- 2505909489, 1083595169, 453595313, 1510564703, 1706163902, 1632924345, 1381875722, 1661526119,
- 1082778324, 3571910052, 1140625929, 851544870, 1145546234, 2938573139, 907528924, 1304752338,
- 1764668294, 1788942063, 1700368828, 104979467, 1413911959, 3327497828, 1956384744, 1272712474,
- 2815637534, 3307809377, 1320574940, 1111968962, 4073107827, 434096622, 169451929, 3201183459,
- 3331028877, 2852366972, 3369830128, 2924794558, 3106537952, 3739481231, 1612955817, 4138608722,
- 2721281595, 2755775390, 843505117, 982234295, 1157276611, 814674632, 4246504726, 3532006708,
- 992340967, 1647538031, 204696133, 193866982, 3899126129, 300851698, 1379496684, 1759463683,
- 1354782756, 1374637239, 3410883240, 1073406229, 3038431791, 1053909855, 3607043270, 173719711,
- 3733903830, 171820911, 1573050589, 932781534, 4183534770, 2158849555, 372245998, 3573073830,
- 841339264, 2759200520, 1610547277, 2603293319, 3890906486, 1557138278, 3964109906, 677238797,
- 537994297, 1124184993, 4287078344, 4207654540, 2943022776, 2977947524, 3255359985, 4098397558,
- 2274666217, 2915862060, 243524940, 2467726756, 2869020032, 507521339, 3403121914, 522051455,
- 1803903108, 3471254194, 473535371, 1948602036, 3352095732, 3116527002, 1795743673, 775867940,
- 2551469548, 3757442064, 3162525227, 3765412747, 3040105484, 1927625810, 48214767, 2997207130,
- 1342349989, 2536583992, 1501320191, 3592287317, 887432730, 967585477, 3334212779, 948663609,
- 1064513472, 15386372, 2465931737, 3230242590, 3036652803, 2063155087, 1927500726, 2821790499,
- 2187774383, 501520074, 3688568496, 3606711121, 2576459247, 3176542345, 378322447, 156541411,
- 1400607301, 1406179107, 677848877, 2253753529, 193196070, 4207435024, 4166396241, 509467541,
- 2906024136, 1221753746, 3375413222, 431327897, 2749265123, 2848827671, 3412997614, 2051920238,
- 1283516885, 1300498239, 1957256104, 2634010560, 3531900395, 360276850, 1461184973, 2012063967,
- 2873572430, 2914608609, 4289554777, 1539331673, 1859532928, 4213441063, 538215691, 3512720863,
- 4258743698, 3040408445, 982396546, 343095663, 4138069496, 1021581857, 214185242, 1968079460,
- 2864275059, 3347192726, 4096783459, 3259169450, 3707808869, 142485006, 399610869, 230556456,
- 2219467721, 4191227798, 2242548189, 3136366572, 179755707, 3464881829, 452317775, 3887426070,
- 3446430233, 1473370015, 1576807208, 3964523248, 419325089, 2373067114, 1596072055, 1928415752,
- 3635452689, 1005598891, 3335462724, 3290848636, 3669078247, 1178176812, 2110774376, 3068593619,
- 1253036518, 908857731, 3631223047, 4138506423, 2903592318, 3596915748, 3289036113, 3721512676,
- 2704409359, 3386016968, 3676268074, 2185259502, 1096257611, 3360076717, 3548676554, 170167319,
- 3360064287, 3899940843, 9640,
-];
-
-pub(crate) const POW5: [&'static [u32]; 14] = [
- &POW5_1, &POW5_2, &POW5_3, &POW5_4, &POW5_5, &POW5_6, &POW5_7, &POW5_8, &POW5_9, &POW5_10,
- &POW5_11, &POW5_12, &POW5_13, &POW5_14,
-];
diff --git a/vendor/serde_json/src/lexical/large_powers64.rs b/vendor/serde_json/src/lexical/large_powers64.rs
deleted file mode 100644
index ee36561..0000000
--- a/vendor/serde_json/src/lexical/large_powers64.rs
+++ /dev/null
@@ -1,625 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Precalculated large powers for 64-bit limbs.
-
-/// Large powers (&[u64]) for base5 operations.
-const POW5_1: [u64; 1] = [5];
-const POW5_2: [u64; 1] = [25];
-const POW5_3: [u64; 1] = [625];
-const POW5_4: [u64; 1] = [390625];
-const POW5_5: [u64; 1] = [152587890625];
-const POW5_6: [u64; 2] = [3273344365508751233, 1262];
-const POW5_7: [u64; 3] = [7942358959831785217, 16807427164405733357, 1593091];
-const POW5_8: [u64; 5] = [
- 279109966635548161,
- 2554917779393558781,
- 14124656261812188652,
- 11976055582626787546,
- 2537941837315,
-];
-const POW5_9: [u64; 10] = [
- 13750482914757213185,
- 1302999927698857842,
- 14936872543252795590,
- 2788415840139466767,
- 2095640732773017264,
- 7205570348933370714,
- 7348167152523113408,
- 9285516396840364274,
- 6907659600622710236,
- 349175,
-];
-const POW5_10: [u64; 19] = [
- 8643096425819600897,
- 6743743997439985372,
- 14059704609098336919,
- 10729359125898331411,
- 4933048501514368705,
- 12258131603170554683,
- 2172371001088594721,
- 13569903330219142946,
- 13809142207969578845,
- 16716360519037769646,
- 9631256923806107285,
- 12866941232305103710,
- 1397931361048440292,
- 7619627737732970332,
- 12725409486282665900,
- 11703051443360963910,
- 9947078370803086083,
- 13966287901448440471,
- 121923442132,
-];
-const POW5_11: [u64; 38] = [
- 17679772531488845825,
- 2216509366347768155,
- 1568689219195129479,
- 5511594616325588277,
- 1067709417009240089,
- 9070650952098657518,
- 11515285870634858015,
- 2539561553659505564,
- 17604889300961091799,
- 14511540856854204724,
- 12099083339557485471,
- 7115240299237943815,
- 313979240050606788,
- 10004784664717172195,
- 15570268847930131473,
- 10359715202835930803,
- 17685054012115162812,
- 13183273382855797757,
- 7743260039872919062,
- 9284593436392572926,
- 11105921222066415013,
- 18198799323400703846,
- 16314988383739458320,
- 4387527177871570570,
- 8476708682254672590,
- 4925096874831034057,
- 14075687868072027455,
- 112866656203221926,
- 9852830467773230418,
- 25755239915196746,
- 2201493076310172510,
- 8342165458688466438,
- 13954006576066379050,
- 15193819059903295636,
- 12565616718911389531,
- 3815854855847885129,
- 15696762163583540628,
- 805,
-];
-const POW5_12: [u64; 75] = [
- 16359721904723189761,
- 5323973632697650495,
- 17187956456762001185,
- 3930387638628283780,
- 3374723710406992273,
- 16884225088663222131,
- 10967440051041439154,
- 9686916182456720060,
- 10554548046311730194,
- 7390739362393647554,
- 6316162333127736719,
- 18122464886584070891,
- 4044404959645932768,
- 3801320885861987401,
- 12080950653257274590,
- 16414324262488991299,
- 16395687498836410113,
- 12173633940896186260,
- 10843185433142632150,
- 11048169832730399808,
- 12674828934734683716,
- 17370808310130582550,
- 10500926985433408692,
- 10252725158410704555,
- 14170108270502067523,
- 3698946465517688080,
- 989984870770509463,
- 10965601426733943069,
- 11389898658438335655,
- 6901098232861256586,
- 1921335291173932590,
- 7662788640922083388,
- 9775023833308395430,
- 4640401278902814207,
- 14532050972198413359,
- 8378549018693130223,
- 11672322628395371653,
- 8930704142764178555,
- 6275193859483102017,
- 15782593304269205087,
- 8673060659034172558,
- 8018354414354334043,
- 1824896661540749038,
- 11345563346725559868,
- 14959216444480821949,
- 970189517688324683,
- 3338835207603007873,
- 17684964260791738489,
- 1436466329061721851,
- 4554134986752476101,
- 6398757850768963907,
- 4709779218751158342,
- 10033277748582410264,
- 17932125878679265063,
- 10004750887749091440,
- 256584531835386932,
- 14396282740722731628,
- 3086085133731396950,
- 17831272085689600064,
- 10573926491412564693,
- 14888061047859191737,
- 4570995450261499817,
- 10410165022312935266,
- 5691078631447480790,
- 8632710455805418155,
- 790672778942823293,
- 16505464105756800547,
- 2092171438149740401,
- 17505030673829275878,
- 1291290830058928444,
- 14856191690683232796,
- 8916773426496500052,
- 10152003807578858265,
- 13104441193763861714,
- 649395,
-];
-const POW5_13: [u64; 149] = [
- 15308384451594534913,
- 17913664074042735335,
- 6115977719198531863,
- 5794980608663993169,
- 16544350702855106930,
- 9253787637781258566,
- 4977988951675168190,
- 9087837664087448770,
- 2098480401110016986,
- 15474332540882100712,
- 14042133997396540944,
- 1090855284423485362,
- 12639956485351058381,
- 1454115676006639319,
- 3180465001342538023,
- 14649076551958697729,
- 9801292446545910916,
- 13552201410826594004,
- 6101141927469189381,
- 1881431857880609316,
- 4907847477899433595,
- 8714572486973123228,
- 3514969632331374520,
- 11667642286891470094,
- 2391499697425323350,
- 17486585679659076043,
- 18267223761882105642,
- 2886610765822313148,
- 9302834862968900288,
- 15246507846733637044,
- 15924227519624562840,
- 9743741243284697760,
- 3159780987244964246,
- 7304816812369628428,
- 17584602612559717809,
- 4146812420657846766,
- 14525415362681041515,
- 8477630142371600195,
- 4380695748062263745,
- 12119915994367943173,
- 16970630866565485122,
- 4332724980155264503,
- 8079943140620527639,
- 1687908087554405626,
- 17051081099834002166,
- 12638146269730763230,
- 11883749876933445771,
- 4662462156371383785,
- 4796962238316531176,
- 3325504751659868927,
- 6469595803187862550,
- 5852556621152583005,
- 9229334792448387881,
- 17979733373938620709,
- 13951623534175792756,
- 17075879371091039277,
- 14212246479457938037,
- 4008999959804158260,
- 2414266395366403722,
- 3252733766253918247,
- 6382678985007829216,
- 2245927470982310841,
- 13790724502051307301,
- 13116936866733148041,
- 9718402891306794538,
- 13516274400356104875,
- 17859223875778049403,
- 4396895129099725471,
- 3563053650368467915,
- 12176845952536972668,
- 3492050964335269015,
- 2740656767075170753,
- 4409704077614761919,
- 10237775279597492710,
- 3314206875098230827,
- 16437361028114095448,
- 12361736225407656572,
- 16792510651790145480,
- 11449053143229929935,
- 18336641737580333136,
- 6558939822118891088,
- 4606255756908155300,
- 2360792578991605004,
- 160428430149144538,
- 11644861220729221511,
- 10785178451159739786,
- 14923560618031934681,
- 1902620814992781610,
- 14064076995338910412,
- 11547019064112212657,
- 16847481479966225734,
- 8331994491163145469,
- 11739712981738851885,
- 8008309968651120619,
- 10266969595459035264,
- 15175153381217702033,
- 12208659352573720245,
- 7714061140750342961,
- 2892831567213510541,
- 15453714249045017319,
- 71020323573871677,
- 15431137995750602633,
- 5659146884637671933,
- 5998809010488554503,
- 16552192379299157850,
- 1192197967194298797,
- 16157555793424861524,
- 10929371590994640255,
- 3194469143425738352,
- 6651586784672005225,
- 11062427140788057791,
- 6834443579468668318,
- 16421563197797455922,
- 6251046422506172884,
- 13952303462156793860,
- 16632486601871393224,
- 11313454360291325172,
- 5587835232504462834,
- 3105197524618514637,
- 18268568531031972989,
- 2397205535804309313,
- 59413027864729597,
- 11869878125348715710,
- 12592801707270523266,
- 8070632061321113656,
- 18403647807860650811,
- 267109013517069093,
- 6537214311028855260,
- 5220826919973709902,
- 3448740582779163661,
- 16822239213112884941,
- 5975299384311048185,
- 10294433804430712138,
- 4739856055412448774,
- 12057273038326387897,
- 13119002941950056609,
- 3354445304051737058,
- 13592813067499314594,
- 3890182464434078629,
- 17820384357466425060,
- 9785228118969879380,
- 1778431746734556271,
- 10075313876350055029,
- 13994048489400919028,
- 17948287074199726448,
- 2815088342305858722,
- 2676626035777198370,
- 1174257960026283968,
- 421714788677,
-];
-const POW5_14: [u64; 298] = [
- 11471884475673051137,
- 8902860357476377573,
- 13350296775839230505,
- 10609191786344608888,
- 7261211985859587338,
- 11439672689354862964,
- 16789708072300570627,
- 4607056528866348430,
- 3202978990421512997,
- 2024899620433984146,
- 17666950207239811774,
- 4233228489390288200,
- 9137580478688460738,
- 4060411066587388546,
- 11119949806060600124,
- 867715462473090103,
- 14382394941384869610,
- 4856042377419278489,
- 8265605599571137921,
- 538981667666252469,
- 4270263388700786523,
- 3281140600308898503,
- 4121392524544394174,
- 2077884106245940229,
- 9773041957329767574,
- 7550623316597646685,
- 8611033926449791714,
- 18137922955420802793,
- 2796546741236224013,
- 15477096484628446761,
- 9517540128113714010,
- 9471917970500821378,
- 15938570248662483124,
- 5228016831978462619,
- 15720991252586974501,
- 7662829825220776698,
- 17328310068068434348,
- 3371736428170309730,
- 3803724952191098855,
- 13115926536504376719,
- 16752571196153442257,
- 16540185467776259880,
- 3432518182450051120,
- 5880364967211798870,
- 12355748840305392783,
- 14196090758536469575,
- 7370123524686686319,
- 6819740424617592686,
- 13037938013537368753,
- 15029273671291927100,
- 3671312928327205696,
- 7473228676544792780,
- 17234079691312938123,
- 14164740848093544419,
- 13169904779481875902,
- 7179036968465894054,
- 8244653688947194445,
- 17179797746073799490,
- 5591970751047577674,
- 17530550506268329742,
- 5965746721852312330,
- 1604149463243472865,
- 7734199791463116918,
- 11305790396015856714,
- 4441196105025505137,
- 13046431581185664762,
- 124776524294606713,
- 1134521334706523966,
- 11671728093344476434,
- 14103440020972933148,
- 3966727403013869059,
- 9828094508409132821,
- 4355682486381147287,
- 10261407143988481234,
- 3800455155249557199,
- 12700901937937547500,
- 18184475466894579360,
- 13267691151779895412,
- 4714157123477697445,
- 10770360171308585263,
- 9083344917597998040,
- 12078649873810212155,
- 18218989082046199377,
- 4454285072780637351,
- 5287307245618354742,
- 16042289702059031730,
- 4131926574212754010,
- 217692071448455473,
- 3624845916216282093,
- 2901203491797614218,
- 6679177724033967080,
- 44561358851332790,
- 9094639944041587162,
- 13690915012276084311,
- 1408896670826320686,
- 5359130319612337580,
- 6148412925099835601,
- 5211368532286409612,
- 11386360825549027374,
- 16895182466965795071,
- 3392940493846427241,
- 438089879085393580,
- 4783928372776399972,
- 6278117363595909959,
- 12569481049412674733,
- 15648622492570893902,
- 1966316336235305115,
- 1603775390515993547,
- 13576113010204316709,
- 10821754650102840474,
- 18198222517222903152,
- 6966163076615302988,
- 1373932372410129684,
- 3285839581819684990,
- 30177575069719475,
- 16447047871247307061,
- 11618654126674833808,
- 990072222556306872,
- 1260682336135768017,
- 13862055046689532489,
- 15668483092844698432,
- 1879572630092764264,
- 13912027797058626108,
- 6231679788219816920,
- 13857858054844167403,
- 18101470072534728857,
- 4144579812461609229,
- 7048589655616599284,
- 9946956499532694630,
- 9771303850109874038,
- 6477823708780339765,
- 17526247621747041971,
- 13525995675852669549,
- 3928768291901239810,
- 8094153383078124544,
- 11214278667728965552,
- 11251547162596832610,
- 5964946855123292381,
- 3622548288590237903,
- 13469765967150053587,
- 17798986288523466082,
- 14684592818807932259,
- 16724077276802963921,
- 7119877993753121290,
- 1864571304902781632,
- 12871984921385213812,
- 9065447042604670298,
- 3987130777300360550,
- 6890545752116901685,
- 17275341711601865750,
- 6296474927799264658,
- 1257436973037243463,
- 13854281781965301421,
- 1657132483318662716,
- 17309399540017292849,
- 12808111630089217242,
- 1098489625264462071,
- 14010458905686364135,
- 16134414519481621220,
- 14288255900328821475,
- 3469093466388187882,
- 15982710881468295872,
- 4056765540058056052,
- 15945176389096104089,
- 8625339365793505375,
- 12316179968863788913,
- 15334123773538054321,
- 9536238824220581765,
- 16080825720106203271,
- 6235695225418121745,
- 12035192956458019349,
- 3235835166714703698,
- 5348960676912581218,
- 15315062772709464647,
- 17335089708021308662,
- 16855855317958414409,
- 2369751139431140406,
- 3693542588628609043,
- 7350405893393987577,
- 17402072586341663801,
- 7007897690013647122,
- 15671767872059304758,
- 9259490518292347915,
- 14836045474406130394,
- 4654005815464502513,
- 6487825998330548401,
- 7013356660323385022,
- 7136200343936679946,
- 15341236858676437716,
- 3657357368867197449,
- 12621075530054608378,
- 5603868621997066972,
- 7683447656788439942,
- 450883379216880060,
- 14291494350184945047,
- 5466258454997635048,
- 14206933098432772126,
- 4775870327277641692,
- 1864430798867181939,
- 13748978265070608793,
- 12250822864261576589,
- 12561896977498605296,
- 16060949594257359328,
- 17775189113543311529,
- 11835965177892927035,
- 4218664174878121437,
- 3499000902478111683,
- 15169853304359126294,
- 7076121963053575143,
- 832652347668916805,
- 1292148207755194737,
- 7556838978364207852,
- 5904021986723518500,
- 4610244652288570024,
- 4526508363195533871,
- 746120481022614726,
- 737965197247830486,
- 4006266184415762653,
- 9272188239892688050,
- 15346235246415709678,
- 11850675997347533184,
- 11181059668610842701,
- 6687857983250662774,
- 2908718488661492818,
- 4828337780126983225,
- 18071738646453002184,
- 12790187227727197880,
- 17602483480871623153,
- 12523532189621855977,
- 10598805712727696716,
- 2179787555896149376,
- 2242193929457337594,
- 14908923241136742532,
- 8369182018012550027,
- 13385381554043022324,
- 3332327430110633913,
- 16138090784046208492,
- 16172324607469047339,
- 8279089815915615244,
- 12872906602736235247,
- 10894545290539475621,
- 15428756545851905023,
- 4155747980686992922,
- 4074479178894544043,
- 66083965608603584,
- 13873786284662268377,
- 8861183628277687555,
- 12119497911296021430,
- 2154012318305274287,
- 15490706314503067312,
- 13643145488710608367,
- 672340241093017103,
- 6039493278284091973,
- 9679797700977436461,
- 18070795828318171174,
- 2188146431134935377,
- 5247392385741514952,
- 1852539214842869734,
- 12235621681634112739,
- 8812930319623534062,
- 5585597406294108629,
- 11312989214475901864,
- 1547377291787797995,
- 8641748937186208205,
- 12518148659168623694,
- 6611379197521520985,
- 18096591571068008576,
- 15087021227100112139,
- 13058454842015958418,
- 1473584652966833794,
- 4387660670140018168,
- 8452836916843525402,
- 14376083294443363955,
- 13998026203969090659,
- 611968444648172645,
- 990232438801273845,
- 18001186324715561929,
- 13470591857250177501,
- 14881554140239420091,
- 16696367836720124495,
- 6328076032778459673,
- 17027497695968504616,
- 10192245646262428833,
- 8282482589527318647,
- 4319014353374321425,
- 14134087271041670980,
- 5060230880114618599,
- 13179509240430058600,
- 3903514232614801894,
- 17774749744702165255,
- 15448635507030969726,
- 15983775238358480209,
- 14542832143965487887,
- 9385618098039514666,
- 14431419612662304843,
- 730863073501675978,
- 16750118380379734815,
- 9640,
-];
-
-pub(crate) const POW5: [&[u64]; 14] = [
- &POW5_1, &POW5_2, &POW5_3, &POW5_4, &POW5_5, &POW5_6, &POW5_7, &POW5_8, &POW5_9, &POW5_10,
- &POW5_11, &POW5_12, &POW5_13, &POW5_14,
-];
diff --git a/vendor/serde_json/src/lexical/math.rs b/vendor/serde_json/src/lexical/math.rs
deleted file mode 100644
index d7122bf..0000000
--- a/vendor/serde_json/src/lexical/math.rs
+++ /dev/null
@@ -1,886 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Building-blocks for arbitrary-precision math.
-//!
-//! These algorithms assume little-endian order for the large integer
-//! buffers, so for a `vec![0, 1, 2, 3]`, `3` is the most significant limb,
-//! and `0` is the least significant limb.
-
-use super::large_powers;
-use super::num::*;
-use super::small_powers::*;
-use alloc::vec::Vec;
-use core::{cmp, iter, mem};
-
-// ALIASES
-// -------
-
-// Type for a single limb of the big integer.
-//
-// A limb is analogous to a digit in base10, except, it stores 32-bit
-// or 64-bit numbers instead.
-//
-// This should be all-known 64-bit platforms supported by Rust.
-// https://forge.rust-lang.org/platform-support.html
-//
-// Platforms where native 128-bit multiplication is explicitly supported:
-// - x86_64 (Supported via `MUL`).
-// - mips64 (Supported via `DMULTU`, which `HI` and `LO` can be read-from).
-//
-// Platforms where native 64-bit multiplication is supported and
-// you can extract hi-lo for 64-bit multiplications.
-// aarch64 (Requires `UMULH` and `MUL` to capture high and low bits).
-// powerpc64 (Requires `MULHDU` and `MULLD` to capture high and low bits).
-//
-// Platforms where native 128-bit multiplication is not supported,
-// requiring software emulation.
-// sparc64 (`UMUL` only supported double-word arguments).
-
-// 32-BIT LIMB
-#[cfg(limb_width_32)]
-pub type Limb = u32;
-
-#[cfg(limb_width_32)]
-pub const POW5_LIMB: &[Limb] = &POW5_32;
-
-#[cfg(limb_width_32)]
-pub const POW10_LIMB: &[Limb] = &POW10_32;
-
-#[cfg(limb_width_32)]
-type Wide = u64;
-
-// 64-BIT LIMB
-#[cfg(limb_width_64)]
-pub type Limb = u64;
-
-#[cfg(limb_width_64)]
-pub const POW5_LIMB: &[Limb] = &POW5_64;
-
-#[cfg(limb_width_64)]
-pub const POW10_LIMB: &[Limb] = &POW10_64;
-
-#[cfg(limb_width_64)]
-type Wide = u128;
-
-/// Cast to limb type.
-#[inline]
-pub(crate) fn as_limb<T: Integer>(t: T) -> Limb {
- Limb::as_cast(t)
-}
-
-/// Cast to wide type.
-#[inline]
-fn as_wide<T: Integer>(t: T) -> Wide {
- Wide::as_cast(t)
-}
-
-// SPLIT
-// -----
-
-/// Split u64 into limbs, in little-endian order.
-#[inline]
-#[cfg(limb_width_32)]
-fn split_u64(x: u64) -> [Limb; 2] {
- [as_limb(x), as_limb(x >> 32)]
-}
-
-/// Split u64 into limbs, in little-endian order.
-#[inline]
-#[cfg(limb_width_64)]
-fn split_u64(x: u64) -> [Limb; 1] {
- [as_limb(x)]
-}
-
-// HI64
-// ----
-
-// NONZERO
-
-/// Check if any of the remaining bits are non-zero.
-#[inline]
-pub fn nonzero<T: Integer>(x: &[T], rindex: usize) -> bool {
- let len = x.len();
- let slc = &x[..len - rindex];
- slc.iter().rev().any(|&x| x != T::ZERO)
-}
-
-/// Shift 64-bit integer to high 64-bits.
-#[inline]
-fn u64_to_hi64_1(r0: u64) -> (u64, bool) {
- debug_assert!(r0 != 0);
- let ls = r0.leading_zeros();
- (r0 << ls, false)
-}
-
-/// Shift 2 64-bit integers to high 64-bits.
-#[inline]
-fn u64_to_hi64_2(r0: u64, r1: u64) -> (u64, bool) {
- debug_assert!(r0 != 0);
- let ls = r0.leading_zeros();
- let rs = 64 - ls;
- let v = match ls {
- 0 => r0,
- _ => (r0 << ls) | (r1 >> rs),
- };
- let n = r1 << ls != 0;
- (v, n)
-}
-
-/// Trait to export the high 64-bits from a little-endian slice.
-trait Hi64<T>: AsRef<[T]> {
- /// Get the hi64 bits from a 1-limb slice.
- fn hi64_1(&self) -> (u64, bool);
-
- /// Get the hi64 bits from a 2-limb slice.
- fn hi64_2(&self) -> (u64, bool);
-
- /// Get the hi64 bits from a 3-limb slice.
- fn hi64_3(&self) -> (u64, bool);
-
- /// High-level exporter to extract the high 64 bits from a little-endian slice.
- #[inline]
- fn hi64(&self) -> (u64, bool) {
- match self.as_ref().len() {
- 0 => (0, false),
- 1 => self.hi64_1(),
- 2 => self.hi64_2(),
- _ => self.hi64_3(),
- }
- }
-}
-
-impl Hi64<u32> for [u32] {
- #[inline]
- fn hi64_1(&self) -> (u64, bool) {
- debug_assert!(self.len() == 1);
- let r0 = self[0] as u64;
- u64_to_hi64_1(r0)
- }
-
- #[inline]
- fn hi64_2(&self) -> (u64, bool) {
- debug_assert!(self.len() == 2);
- let r0 = (self[1] as u64) << 32;
- let r1 = self[0] as u64;
- u64_to_hi64_1(r0 | r1)
- }
-
- #[inline]
- fn hi64_3(&self) -> (u64, bool) {
- debug_assert!(self.len() >= 3);
- let r0 = self[self.len() - 1] as u64;
- let r1 = (self[self.len() - 2] as u64) << 32;
- let r2 = self[self.len() - 3] as u64;
- let (v, n) = u64_to_hi64_2(r0, r1 | r2);
- (v, n || nonzero(self, 3))
- }
-}
-
-impl Hi64<u64> for [u64] {
- #[inline]
- fn hi64_1(&self) -> (u64, bool) {
- debug_assert!(self.len() == 1);
- let r0 = self[0];
- u64_to_hi64_1(r0)
- }
-
- #[inline]
- fn hi64_2(&self) -> (u64, bool) {
- debug_assert!(self.len() >= 2);
- let r0 = self[self.len() - 1];
- let r1 = self[self.len() - 2];
- let (v, n) = u64_to_hi64_2(r0, r1);
- (v, n || nonzero(self, 2))
- }
-
- #[inline]
- fn hi64_3(&self) -> (u64, bool) {
- self.hi64_2()
- }
-}
-
-// SCALAR
-// ------
-
-// Scalar-to-scalar operations, for building-blocks for arbitrary-precision
-// operations.
-
-mod scalar {
- use super::*;
-
- // ADDITION
-
- /// Add two small integers and return the resulting value and if overflow happens.
- #[inline]
- pub fn add(x: Limb, y: Limb) -> (Limb, bool) {
- x.overflowing_add(y)
- }
-
- /// AddAssign two small integers and return if overflow happens.
- #[inline]
- pub fn iadd(x: &mut Limb, y: Limb) -> bool {
- let t = add(*x, y);
- *x = t.0;
- t.1
- }
-
- // SUBTRACTION
-
- /// Subtract two small integers and return the resulting value and if overflow happens.
- #[inline]
- pub fn sub(x: Limb, y: Limb) -> (Limb, bool) {
- x.overflowing_sub(y)
- }
-
- /// SubAssign two small integers and return if overflow happens.
- #[inline]
- pub fn isub(x: &mut Limb, y: Limb) -> bool {
- let t = sub(*x, y);
- *x = t.0;
- t.1
- }
-
- // MULTIPLICATION
-
- /// Multiply two small integers (with carry) (and return the overflow contribution).
- ///
- /// Returns the (low, high) components.
- #[inline]
- pub fn mul(x: Limb, y: Limb, carry: Limb) -> (Limb, Limb) {
- // Cannot overflow, as long as wide is 2x as wide. This is because
- // the following is always true:
- // `Wide::max_value() - (Narrow::max_value() * Narrow::max_value()) >= Narrow::max_value()`
- let z: Wide = as_wide(x) * as_wide(y) + as_wide(carry);
- let bits = mem::size_of::<Limb>() * 8;
- (as_limb(z), as_limb(z >> bits))
- }
-
- /// Multiply two small integers (with carry) (and return if overflow happens).
- #[inline]
- pub fn imul(x: &mut Limb, y: Limb, carry: Limb) -> Limb {
- let t = mul(*x, y, carry);
- *x = t.0;
- t.1
- }
-} // scalar
-
-// SMALL
-// -----
-
-// Large-to-small operations, to modify a big integer from a native scalar.
-
-mod small {
- use super::*;
-
- // MULTIPLICATIION
-
- /// ADDITION
-
- /// Implied AddAssign implementation for adding a small integer to bigint.
- ///
- /// Allows us to choose a start-index in x to store, to allow incrementing
- /// from a non-zero start.
- #[inline]
- pub fn iadd_impl(x: &mut Vec<Limb>, y: Limb, xstart: usize) {
- if x.len() <= xstart {
- x.push(y);
- } else {
- // Initial add
- let mut carry = scalar::iadd(&mut x[xstart], y);
-
- // Increment until overflow stops occurring.
- let mut size = xstart + 1;
- while carry && size < x.len() {
- carry = scalar::iadd(&mut x[size], 1);
- size += 1;
- }
-
- // If we overflowed the buffer entirely, need to add 1 to the end
- // of the buffer.
- if carry {
- x.push(1);
- }
- }
- }
-
- /// AddAssign small integer to bigint.
- #[inline]
- pub fn iadd(x: &mut Vec<Limb>, y: Limb) {
- iadd_impl(x, y, 0);
- }
-
- // SUBTRACTION
-
- /// SubAssign small integer to bigint.
- /// Does not do overflowing subtraction.
- #[inline]
- pub fn isub_impl(x: &mut Vec<Limb>, y: Limb, xstart: usize) {
- debug_assert!(x.len() > xstart && (x[xstart] >= y || x.len() > xstart + 1));
-
- // Initial subtraction
- let mut carry = scalar::isub(&mut x[xstart], y);
-
- // Increment until overflow stops occurring.
- let mut size = xstart + 1;
- while carry && size < x.len() {
- carry = scalar::isub(&mut x[size], 1);
- size += 1;
- }
- normalize(x);
- }
-
- // MULTIPLICATION
-
- /// MulAssign small integer to bigint.
- #[inline]
- pub fn imul(x: &mut Vec<Limb>, y: Limb) {
- // Multiply iteratively over all elements, adding the carry each time.
- let mut carry: Limb = 0;
- for xi in &mut *x {
- carry = scalar::imul(xi, y, carry);
- }
-
- // Overflow of value, add to end.
- if carry != 0 {
- x.push(carry);
- }
- }
-
- /// Mul small integer to bigint.
- #[inline]
- pub fn mul(x: &[Limb], y: Limb) -> Vec<Limb> {
- let mut z = Vec::<Limb>::default();
- z.extend_from_slice(x);
- imul(&mut z, y);
- z
- }
-
- /// MulAssign by a power.
- ///
- /// Theoretically...
- ///
- /// Use an exponentiation by squaring method, since it reduces the time
- /// complexity of the multiplication to ~`O(log(n))` for the squaring,
- /// and `O(n*m)` for the result. Since `m` is typically a lower-order
- /// factor, this significantly reduces the number of multiplications
- /// we need to do. Iteratively multiplying by small powers follows
- /// the nth triangular number series, which scales as `O(p^2)`, but
- /// where `p` is `n+m`. In short, it scales very poorly.
- ///
- /// Practically....
- ///
- /// Exponentiation by Squaring:
- /// running 2 tests
- /// test bigcomp_f32_lexical ... bench: 1,018 ns/iter (+/- 78)
- /// test bigcomp_f64_lexical ... bench: 3,639 ns/iter (+/- 1,007)
- ///
- /// Exponentiation by Iterative Small Powers:
- /// running 2 tests
- /// test bigcomp_f32_lexical ... bench: 518 ns/iter (+/- 31)
- /// test bigcomp_f64_lexical ... bench: 583 ns/iter (+/- 47)
- ///
- /// Exponentiation by Iterative Large Powers (of 2):
- /// running 2 tests
- /// test bigcomp_f32_lexical ... bench: 671 ns/iter (+/- 31)
- /// test bigcomp_f64_lexical ... bench: 1,394 ns/iter (+/- 47)
- ///
- /// Even using worst-case scenarios, exponentiation by squaring is
- /// significantly slower for our workloads. Just multiply by small powers,
- /// in simple cases, and use precalculated large powers in other cases.
- pub fn imul_pow5(x: &mut Vec<Limb>, n: u32) {
- use super::large::KARATSUBA_CUTOFF;
-
- let small_powers = POW5_LIMB;
- let large_powers = large_powers::POW5;
-
- if n == 0 {
- // No exponent, just return.
- // The 0-index of the large powers is `2^0`, which is 1, so we want
- // to make sure we don't take that path with a literal 0.
- return;
- }
-
- // We want to use the asymptotically faster algorithm if we're going
- // to be using Karabatsu multiplication sometime during the result,
- // otherwise, just use exponentiation by squaring.
- let bit_length = 32 - n.leading_zeros() as usize;
- debug_assert!(bit_length != 0 && bit_length <= large_powers.len());
- if x.len() + large_powers[bit_length - 1].len() < 2 * KARATSUBA_CUTOFF {
- // We can use iterative small powers to make this faster for the
- // easy cases.
-
- // Multiply by the largest small power until n < step.
- let step = small_powers.len() - 1;
- let power = small_powers[step];
- let mut n = n as usize;
- while n >= step {
- imul(x, power);
- n -= step;
- }
-
- // Multiply by the remainder.
- imul(x, small_powers[n]);
- } else {
- // In theory, this code should be asymptotically a lot faster,
- // in practice, our small::imul seems to be the limiting step,
- // and large imul is slow as well.
-
- // Multiply by higher order powers.
- let mut idx: usize = 0;
- let mut bit: usize = 1;
- let mut n = n as usize;
- while n != 0 {
- if n & bit != 0 {
- debug_assert!(idx < large_powers.len());
- large::imul(x, large_powers[idx]);
- n ^= bit;
- }
- idx += 1;
- bit <<= 1;
- }
- }
- }
-
- // BIT LENGTH
-
- /// Get number of leading zero bits in the storage.
- #[inline]
- pub fn leading_zeros(x: &[Limb]) -> usize {
- x.last().map_or(0, |x| x.leading_zeros() as usize)
- }
-
- /// Calculate the bit-length of the big-integer.
- #[inline]
- pub fn bit_length(x: &[Limb]) -> usize {
- let bits = mem::size_of::<Limb>() * 8;
- // Avoid overflowing, calculate via total number of bits
- // minus leading zero bits.
- let nlz = leading_zeros(x);
- bits.checked_mul(x.len())
- .map_or_else(usize::max_value, |v| v - nlz)
- }
-
- // SHL
-
- /// Shift-left bits inside a buffer.
- ///
- /// Assumes `n < Limb::BITS`, IE, internally shifting bits.
- #[inline]
- pub fn ishl_bits(x: &mut Vec<Limb>, n: usize) {
- // Need to shift by the number of `bits % Limb::BITS)`.
- let bits = mem::size_of::<Limb>() * 8;
- debug_assert!(n < bits);
- if n == 0 {
- return;
- }
-
- // Internally, for each item, we shift left by n, and add the previous
- // right shifted limb-bits.
- // For example, we transform (for u8) shifted left 2, to:
- // b10100100 b01000010
- // b10 b10010001 b00001000
- let rshift = bits - n;
- let lshift = n;
- let mut prev: Limb = 0;
- for xi in &mut *x {
- let tmp = *xi;
- *xi <<= lshift;
- *xi |= prev >> rshift;
- prev = tmp;
- }
-
- // Always push the carry, even if it creates a non-normal result.
- let carry = prev >> rshift;
- if carry != 0 {
- x.push(carry);
- }
- }
-
- /// Shift-left `n` digits inside a buffer.
- ///
- /// Assumes `n` is not 0.
- #[inline]
- pub fn ishl_limbs(x: &mut Vec<Limb>, n: usize) {
- debug_assert!(n != 0);
- if !x.is_empty() {
- x.reserve(n);
- x.splice(..0, iter::repeat(0).take(n));
- }
- }
-
- /// Shift-left buffer by n bits.
- #[inline]
- pub fn ishl(x: &mut Vec<Limb>, n: usize) {
- let bits = mem::size_of::<Limb>() * 8;
- // Need to pad with zeros for the number of `bits / Limb::BITS`,
- // and shift-left with carry for `bits % Limb::BITS`.
- let rem = n % bits;
- let div = n / bits;
- ishl_bits(x, rem);
- if div != 0 {
- ishl_limbs(x, div);
- }
- }
-
- // NORMALIZE
-
- /// Normalize the container by popping any leading zeros.
- #[inline]
- pub fn normalize(x: &mut Vec<Limb>) {
- // Remove leading zero if we cause underflow. Since we're dividing
- // by a small power, we have at max 1 int removed.
- while x.last() == Some(&0) {
- x.pop();
- }
- }
-} // small
-
-// LARGE
-// -----
-
-// Large-to-large operations, to modify a big integer from a native scalar.
-
-mod large {
- use super::*;
-
- // RELATIVE OPERATORS
-
- /// Compare `x` to `y`, in little-endian order.
- #[inline]
- pub fn compare(x: &[Limb], y: &[Limb]) -> cmp::Ordering {
- if x.len() > y.len() {
- cmp::Ordering::Greater
- } else if x.len() < y.len() {
- cmp::Ordering::Less
- } else {
- let iter = x.iter().rev().zip(y.iter().rev());
- for (&xi, &yi) in iter {
- if xi > yi {
- return cmp::Ordering::Greater;
- } else if xi < yi {
- return cmp::Ordering::Less;
- }
- }
- // Equal case.
- cmp::Ordering::Equal
- }
- }
-
- /// Check if x is less than y.
- #[inline]
- pub fn less(x: &[Limb], y: &[Limb]) -> bool {
- compare(x, y) == cmp::Ordering::Less
- }
-
- /// Check if x is greater than or equal to y.
- #[inline]
- pub fn greater_equal(x: &[Limb], y: &[Limb]) -> bool {
- !less(x, y)
- }
-
- // ADDITION
-
- /// Implied AddAssign implementation for bigints.
- ///
- /// Allows us to choose a start-index in x to store, so we can avoid
- /// padding the buffer with zeros when not needed, optimized for vectors.
- pub fn iadd_impl(x: &mut Vec<Limb>, y: &[Limb], xstart: usize) {
- // The effective x buffer is from `xstart..x.len()`, so we need to treat
- // that as the current range. If the effective y buffer is longer, need
- // to resize to that, + the start index.
- if y.len() > x.len() - xstart {
- x.resize(y.len() + xstart, 0);
- }
-
- // Iteratively add elements from y to x.
- let mut carry = false;
- for (xi, yi) in x[xstart..].iter_mut().zip(y.iter()) {
- // Only one op of the two can overflow, since we added at max
- // Limb::max_value() + Limb::max_value(). Add the previous carry,
- // and store the current carry for the next.
- let mut tmp = scalar::iadd(xi, *yi);
- if carry {
- tmp |= scalar::iadd(xi, 1);
- }
- carry = tmp;
- }
-
- // Overflow from the previous bit.
- if carry {
- small::iadd_impl(x, 1, y.len() + xstart);
- }
- }
-
- /// AddAssign bigint to bigint.
- #[inline]
- pub fn iadd(x: &mut Vec<Limb>, y: &[Limb]) {
- iadd_impl(x, y, 0);
- }
-
- /// Add bigint to bigint.
- #[inline]
- pub fn add(x: &[Limb], y: &[Limb]) -> Vec<Limb> {
- let mut z = Vec::<Limb>::default();
- z.extend_from_slice(x);
- iadd(&mut z, y);
- z
- }
-
- // SUBTRACTION
-
- /// SubAssign bigint to bigint.
- pub fn isub(x: &mut Vec<Limb>, y: &[Limb]) {
- // Basic underflow checks.
- debug_assert!(greater_equal(x, y));
-
- // Iteratively add elements from y to x.
- let mut carry = false;
- for (xi, yi) in x.iter_mut().zip(y.iter()) {
- // Only one op of the two can overflow, since we added at max
- // Limb::max_value() + Limb::max_value(). Add the previous carry,
- // and store the current carry for the next.
- let mut tmp = scalar::isub(xi, *yi);
- if carry {
- tmp |= scalar::isub(xi, 1);
- }
- carry = tmp;
- }
-
- if carry {
- small::isub_impl(x, 1, y.len());
- } else {
- small::normalize(x);
- }
- }
-
- // MULTIPLICATION
-
- /// Number of digits to bottom-out to asymptotically slow algorithms.
- ///
- /// Karatsuba tends to out-perform long-multiplication at ~320-640 bits,
- /// so we go halfway, while Newton division tends to out-perform
- /// Algorithm D at ~1024 bits. We can toggle this for optimal performance.
- pub const KARATSUBA_CUTOFF: usize = 32;
-
- /// Grade-school multiplication algorithm.
- ///
- /// Slow, naive algorithm, using limb-bit bases and just shifting left for
- /// each iteration. This could be optimized with numerous other algorithms,
- /// but it's extremely simple, and works in O(n*m) time, which is fine
- /// by me. Each iteration, of which there are `m` iterations, requires
- /// `n` multiplications, and `n` additions, or grade-school multiplication.
- fn long_mul(x: &[Limb], y: &[Limb]) -> Vec<Limb> {
- // Using the immutable value, multiply by all the scalars in y, using
- // the algorithm defined above. Use a single buffer to avoid
- // frequent reallocations. Handle the first case to avoid a redundant
- // addition, since we know y.len() >= 1.
- let mut z: Vec<Limb> = small::mul(x, y[0]);
- z.resize(x.len() + y.len(), 0);
-
- // Handle the iterative cases.
- for (i, &yi) in y[1..].iter().enumerate() {
- let zi: Vec<Limb> = small::mul(x, yi);
- iadd_impl(&mut z, &zi, i + 1);
- }
-
- small::normalize(&mut z);
-
- z
- }
-
- /// Split two buffers into halfway, into (lo, hi).
- #[inline]
- pub fn karatsuba_split(z: &[Limb], m: usize) -> (&[Limb], &[Limb]) {
- (&z[..m], &z[m..])
- }
-
- /// Karatsuba multiplication algorithm with roughly equal input sizes.
- ///
- /// Assumes `y.len() >= x.len()`.
- fn karatsuba_mul(x: &[Limb], y: &[Limb]) -> Vec<Limb> {
- if y.len() <= KARATSUBA_CUTOFF {
- // Bottom-out to long division for small cases.
- long_mul(x, y)
- } else if x.len() < y.len() / 2 {
- karatsuba_uneven_mul(x, y)
- } else {
- // Do our 3 multiplications.
- let m = y.len() / 2;
- let (xl, xh) = karatsuba_split(x, m);
- let (yl, yh) = karatsuba_split(y, m);
- let sumx = add(xl, xh);
- let sumy = add(yl, yh);
- let z0 = karatsuba_mul(xl, yl);
- let mut z1 = karatsuba_mul(&sumx, &sumy);
- let z2 = karatsuba_mul(xh, yh);
- // Properly scale z1, which is `z1 - z2 - zo`.
- isub(&mut z1, &z2);
- isub(&mut z1, &z0);
-
- // Create our result, which is equal to, in little-endian order:
- // [z0, z1 - z2 - z0, z2]
- // z1 must be shifted m digits (2^(32m)) over.
- // z2 must be shifted 2*m digits (2^(64m)) over.
- let len = z0.len().max(m + z1.len()).max(2 * m + z2.len());
- let mut result = z0;
- result.reserve_exact(len - result.len());
- iadd_impl(&mut result, &z1, m);
- iadd_impl(&mut result, &z2, 2 * m);
-
- result
- }
- }
-
- /// Karatsuba multiplication algorithm where y is substantially larger than x.
- ///
- /// Assumes `y.len() >= x.len()`.
- fn karatsuba_uneven_mul(x: &[Limb], mut y: &[Limb]) -> Vec<Limb> {
- let mut result = Vec::<Limb>::default();
- result.resize(x.len() + y.len(), 0);
-
- // This effectively is like grade-school multiplication between
- // two numbers, except we're using splits on `y`, and the intermediate
- // step is a Karatsuba multiplication.
- let mut start = 0;
- while !y.is_empty() {
- let m = x.len().min(y.len());
- let (yl, yh) = karatsuba_split(y, m);
- let prod = karatsuba_mul(x, yl);
- iadd_impl(&mut result, &prod, start);
- y = yh;
- start += m;
- }
- small::normalize(&mut result);
-
- result
- }
-
- /// Forwarder to the proper Karatsuba algorithm.
- #[inline]
- fn karatsuba_mul_fwd(x: &[Limb], y: &[Limb]) -> Vec<Limb> {
- if x.len() < y.len() {
- karatsuba_mul(x, y)
- } else {
- karatsuba_mul(y, x)
- }
- }
-
- /// MulAssign bigint to bigint.
- #[inline]
- pub fn imul(x: &mut Vec<Limb>, y: &[Limb]) {
- if y.len() == 1 {
- small::imul(x, y[0]);
- } else {
- // We're not really in a condition where using Karatsuba
- // multiplication makes sense, so we're just going to use long
- // division. ~20% speedup compared to:
- // *x = karatsuba_mul_fwd(x, y);
- *x = karatsuba_mul_fwd(x, y);
- }
- }
-} // large
-
-// TRAITS
-// ------
-
-/// Traits for shared operations for big integers.
-///
-/// None of these are implemented using normal traits, since these
-/// are very expensive operations, and we want to deliberately
-/// and explicitly use these functions.
-pub(crate) trait Math: Clone + Sized + Default {
- // DATA
-
- /// Get access to the underlying data
- fn data(&self) -> &Vec<Limb>;
-
- /// Get access to the underlying data
- fn data_mut(&mut self) -> &mut Vec<Limb>;
-
- // RELATIVE OPERATIONS
-
- /// Compare self to y.
- #[inline]
- fn compare(&self, y: &Self) -> cmp::Ordering {
- large::compare(self.data(), y.data())
- }
-
- // PROPERTIES
-
- /// Get the high 64-bits from the bigint and if there are remaining bits.
- #[inline]
- fn hi64(&self) -> (u64, bool) {
- self.data().as_slice().hi64()
- }
-
- /// Calculate the bit-length of the big-integer.
- /// Returns usize::max_value() if the value overflows,
- /// IE, if `self.data().len() > usize::max_value() / 8`.
- #[inline]
- fn bit_length(&self) -> usize {
- small::bit_length(self.data())
- }
-
- // INTEGER CONVERSIONS
-
- /// Create new big integer from u64.
- #[inline]
- fn from_u64(x: u64) -> Self {
- let mut v = Self::default();
- let slc = split_u64(x);
- v.data_mut().extend_from_slice(&slc);
- v.normalize();
- v
- }
-
- // NORMALIZE
-
- /// Normalize the integer, so any leading zero values are removed.
- #[inline]
- fn normalize(&mut self) {
- small::normalize(self.data_mut());
- }
-
- // ADDITION
-
- /// AddAssign small integer.
- #[inline]
- fn iadd_small(&mut self, y: Limb) {
- small::iadd(self.data_mut(), y);
- }
-
- // MULTIPLICATION
-
- /// MulAssign small integer.
- #[inline]
- fn imul_small(&mut self, y: Limb) {
- small::imul(self.data_mut(), y);
- }
-
- /// Multiply by a power of 2.
- #[inline]
- fn imul_pow2(&mut self, n: u32) {
- self.ishl(n as usize);
- }
-
- /// Multiply by a power of 5.
- #[inline]
- fn imul_pow5(&mut self, n: u32) {
- small::imul_pow5(self.data_mut(), n);
- }
-
- /// MulAssign by a power of 10.
- #[inline]
- fn imul_pow10(&mut self, n: u32) {
- self.imul_pow5(n);
- self.imul_pow2(n);
- }
-
- // SHIFTS
-
- /// Shift-left the entire buffer n bits.
- #[inline]
- fn ishl(&mut self, n: usize) {
- small::ishl(self.data_mut(), n);
- }
-}
diff --git a/vendor/serde_json/src/lexical/mod.rs b/vendor/serde_json/src/lexical/mod.rs
deleted file mode 100644
index b1a45e2..0000000
--- a/vendor/serde_json/src/lexical/mod.rs
+++ /dev/null
@@ -1,38 +0,0 @@
-// The code in this module is derived from the `lexical` crate by @Alexhuszagh
-// which the author condensed into this minimal subset for use in serde_json.
-// For the serde_json use case we care more about reliably round tripping all
-// possible floating point values than about parsing any arbitrarily long string
-// of digits with perfect accuracy, as the latter would take a high cost in
-// compile time and performance.
-//
-// Dual licensed as MIT and Apache 2.0 just like the rest of serde_json, but
-// copyright Alexander Huszagh.
-
-//! Fast, minimal float-parsing algorithm.
-
-// MODULES
-pub(crate) mod algorithm;
-mod bhcomp;
-mod bignum;
-mod cached;
-mod cached_float80;
-mod digit;
-mod errors;
-pub(crate) mod exponent;
-pub(crate) mod float;
-mod large_powers;
-pub(crate) mod math;
-pub(crate) mod num;
-pub(crate) mod parse;
-pub(crate) mod rounding;
-mod shift;
-mod small_powers;
-
-#[cfg(limb_width_32)]
-mod large_powers32;
-
-#[cfg(limb_width_64)]
-mod large_powers64;
-
-// API
-pub use self::parse::{parse_concise_float, parse_truncated_float};
diff --git a/vendor/serde_json/src/lexical/num.rs b/vendor/serde_json/src/lexical/num.rs
deleted file mode 100644
index e47e003..0000000
--- a/vendor/serde_json/src/lexical/num.rs
+++ /dev/null
@@ -1,440 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Utilities for Rust numbers.
-
-use core::ops;
-
-/// Precalculated values of radix**i for i in range [0, arr.len()-1].
-/// Each value can be **exactly** represented as that type.
-const F32_POW10: [f32; 11] = [
- 1.0,
- 10.0,
- 100.0,
- 1000.0,
- 10000.0,
- 100000.0,
- 1000000.0,
- 10000000.0,
- 100000000.0,
- 1000000000.0,
- 10000000000.0,
-];
-
-/// Precalculated values of radix**i for i in range [0, arr.len()-1].
-/// Each value can be **exactly** represented as that type.
-const F64_POW10: [f64; 23] = [
- 1.0,
- 10.0,
- 100.0,
- 1000.0,
- 10000.0,
- 100000.0,
- 1000000.0,
- 10000000.0,
- 100000000.0,
- 1000000000.0,
- 10000000000.0,
- 100000000000.0,
- 1000000000000.0,
- 10000000000000.0,
- 100000000000000.0,
- 1000000000000000.0,
- 10000000000000000.0,
- 100000000000000000.0,
- 1000000000000000000.0,
- 10000000000000000000.0,
- 100000000000000000000.0,
- 1000000000000000000000.0,
- 10000000000000000000000.0,
-];
-
-/// Type that can be converted to primitive with `as`.
-pub trait AsPrimitive: Sized + Copy + PartialOrd {
- fn as_u32(self) -> u32;
- fn as_u64(self) -> u64;
- fn as_u128(self) -> u128;
- fn as_usize(self) -> usize;
- fn as_f32(self) -> f32;
- fn as_f64(self) -> f64;
-}
-
-macro_rules! as_primitive_impl {
- ($($ty:ident)*) => {
- $(
- impl AsPrimitive for $ty {
- #[inline]
- fn as_u32(self) -> u32 {
- self as u32
- }
-
- #[inline]
- fn as_u64(self) -> u64 {
- self as u64
- }
-
- #[inline]
- fn as_u128(self) -> u128 {
- self as u128
- }
-
- #[inline]
- fn as_usize(self) -> usize {
- self as usize
- }
-
- #[inline]
- fn as_f32(self) -> f32 {
- self as f32
- }
-
- #[inline]
- fn as_f64(self) -> f64 {
- self as f64
- }
- }
- )*
- };
-}
-
-as_primitive_impl! { u32 u64 u128 usize f32 f64 }
-
-/// An interface for casting between machine scalars.
-pub trait AsCast: AsPrimitive {
- /// Creates a number from another value that can be converted into
- /// a primitive via the `AsPrimitive` trait.
- fn as_cast<N: AsPrimitive>(n: N) -> Self;
-}
-
-macro_rules! as_cast_impl {
- ($ty:ident, $method:ident) => {
- impl AsCast for $ty {
- #[inline]
- fn as_cast<N: AsPrimitive>(n: N) -> Self {
- n.$method()
- }
- }
- };
-}
-
-as_cast_impl!(u32, as_u32);
-as_cast_impl!(u64, as_u64);
-as_cast_impl!(u128, as_u128);
-as_cast_impl!(usize, as_usize);
-as_cast_impl!(f32, as_f32);
-as_cast_impl!(f64, as_f64);
-
-/// Numerical type trait.
-pub trait Number: AsCast + ops::Add<Output = Self> {}
-
-macro_rules! number_impl {
- ($($ty:ident)*) => {
- $(
- impl Number for $ty {}
- )*
- };
-}
-
-number_impl! { u32 u64 u128 usize f32 f64 }
-
-/// Defines a trait that supports integral operations.
-pub trait Integer: Number + ops::BitAnd<Output = Self> + ops::Shr<i32, Output = Self> {
- const ZERO: Self;
-}
-
-macro_rules! integer_impl {
- ($($ty:tt)*) => {
- $(
- impl Integer for $ty {
- const ZERO: Self = 0;
- }
- )*
- };
-}
-
-integer_impl! { u32 u64 u128 usize }
-
-/// Type trait for the mantissa type.
-pub trait Mantissa: Integer {
- /// Mask to extract the high bits from the integer.
- const HIMASK: Self;
- /// Mask to extract the low bits from the integer.
- const LOMASK: Self;
- /// Full size of the integer, in bits.
- const FULL: i32;
- /// Half size of the integer, in bits.
- const HALF: i32 = Self::FULL / 2;
-}
-
-impl Mantissa for u64 {
- const HIMASK: u64 = 0xFFFFFFFF00000000;
- const LOMASK: u64 = 0x00000000FFFFFFFF;
- const FULL: i32 = 64;
-}
-
-/// Get exact exponent limit for radix.
-pub trait Float: Number {
- /// Unsigned type of the same size.
- type Unsigned: Integer;
-
- /// Literal zero.
- const ZERO: Self;
- /// Maximum number of digits that can contribute in the mantissa.
- ///
- /// We can exactly represent a float in radix `b` from radix 2 if
- /// `b` is divisible by 2. This function calculates the exact number of
- /// digits required to exactly represent that float.
- ///
- /// According to the "Handbook of Floating Point Arithmetic",
- /// for IEEE754, with emin being the min exponent, p2 being the
- /// precision, and b being the radix, the number of digits follows as:
- ///
- /// `−emin + p2 + ⌊(emin + 1) log(2, b) − log(1 − 2^(−p2), b)⌋`
- ///
- /// For f32, this follows as:
- /// emin = -126
- /// p2 = 24
- ///
- /// For f64, this follows as:
- /// emin = -1022
- /// p2 = 53
- ///
- /// In Python:
- /// `-emin + p2 + math.floor((emin+1)*math.log(2, b) - math.log(1-2**(-p2), b))`
- ///
- /// This was used to calculate the maximum number of digits for [2, 36].
- const MAX_DIGITS: usize;
-
- // MASKS
-
- /// Bitmask for the sign bit.
- const SIGN_MASK: Self::Unsigned;
- /// Bitmask for the exponent, including the hidden bit.
- const EXPONENT_MASK: Self::Unsigned;
- /// Bitmask for the hidden bit in exponent, which is an implicit 1 in the fraction.
- const HIDDEN_BIT_MASK: Self::Unsigned;
- /// Bitmask for the mantissa (fraction), excluding the hidden bit.
- const MANTISSA_MASK: Self::Unsigned;
-
- // PROPERTIES
-
- /// Positive infinity as bits.
- const INFINITY_BITS: Self::Unsigned;
- /// Positive infinity as bits.
- const NEGATIVE_INFINITY_BITS: Self::Unsigned;
- /// Size of the significand (mantissa) without hidden bit.
- const MANTISSA_SIZE: i32;
- /// Bias of the exponet
- const EXPONENT_BIAS: i32;
- /// Exponent portion of a denormal float.
- const DENORMAL_EXPONENT: i32;
- /// Maximum exponent value in float.
- const MAX_EXPONENT: i32;
-
- // ROUNDING
-
- /// Default number of bits to shift (or 64 - mantissa size - 1).
- const DEFAULT_SHIFT: i32;
- /// Mask to determine if a full-carry occurred (1 in bit above hidden bit).
- const CARRY_MASK: u64;
-
- /// Get min and max exponent limits (exact) from radix.
- fn exponent_limit() -> (i32, i32);
-
- /// Get the number of digits that can be shifted from exponent to mantissa.
- fn mantissa_limit() -> i32;
-
- // Re-exported methods from std.
- fn pow10(self, n: i32) -> Self;
- fn from_bits(u: Self::Unsigned) -> Self;
- fn to_bits(self) -> Self::Unsigned;
- fn is_sign_positive(self) -> bool;
- fn is_sign_negative(self) -> bool;
-
- /// Returns true if the float is a denormal.
- #[inline]
- fn is_denormal(self) -> bool {
- self.to_bits() & Self::EXPONENT_MASK == Self::Unsigned::ZERO
- }
-
- /// Returns true if the float is a NaN or Infinite.
- #[inline]
- fn is_special(self) -> bool {
- self.to_bits() & Self::EXPONENT_MASK == Self::EXPONENT_MASK
- }
-
- /// Returns true if the float is infinite.
- #[inline]
- fn is_inf(self) -> bool {
- self.is_special() && (self.to_bits() & Self::MANTISSA_MASK) == Self::Unsigned::ZERO
- }
-
- /// Get exponent component from the float.
- #[inline]
- fn exponent(self) -> i32 {
- if self.is_denormal() {
- return Self::DENORMAL_EXPONENT;
- }
-
- let bits = self.to_bits();
- let biased_e = ((bits & Self::EXPONENT_MASK) >> Self::MANTISSA_SIZE).as_u32();
- biased_e as i32 - Self::EXPONENT_BIAS
- }
-
- /// Get mantissa (significand) component from float.
- #[inline]
- fn mantissa(self) -> Self::Unsigned {
- let bits = self.to_bits();
- let s = bits & Self::MANTISSA_MASK;
- if !self.is_denormal() {
- s + Self::HIDDEN_BIT_MASK
- } else {
- s
- }
- }
-
- /// Get next greater float for a positive float.
- /// Value must be >= 0.0 and < INFINITY.
- #[inline]
- fn next_positive(self) -> Self {
- debug_assert!(self.is_sign_positive() && !self.is_inf());
- Self::from_bits(self.to_bits() + Self::Unsigned::as_cast(1u32))
- }
-
- /// Round a positive number to even.
- #[inline]
- fn round_positive_even(self) -> Self {
- if self.mantissa() & Self::Unsigned::as_cast(1u32) == Self::Unsigned::as_cast(1u32) {
- self.next_positive()
- } else {
- self
- }
- }
-}
-
-impl Float for f32 {
- type Unsigned = u32;
-
- const ZERO: f32 = 0.0;
- const MAX_DIGITS: usize = 114;
- const SIGN_MASK: u32 = 0x80000000;
- const EXPONENT_MASK: u32 = 0x7F800000;
- const HIDDEN_BIT_MASK: u32 = 0x00800000;
- const MANTISSA_MASK: u32 = 0x007FFFFF;
- const INFINITY_BITS: u32 = 0x7F800000;
- const NEGATIVE_INFINITY_BITS: u32 = Self::INFINITY_BITS | Self::SIGN_MASK;
- const MANTISSA_SIZE: i32 = 23;
- const EXPONENT_BIAS: i32 = 127 + Self::MANTISSA_SIZE;
- const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS;
- const MAX_EXPONENT: i32 = 0xFF - Self::EXPONENT_BIAS;
- const DEFAULT_SHIFT: i32 = u64::FULL - f32::MANTISSA_SIZE - 1;
- const CARRY_MASK: u64 = 0x1000000;
-
- #[inline]
- fn exponent_limit() -> (i32, i32) {
- (-10, 10)
- }
-
- #[inline]
- fn mantissa_limit() -> i32 {
- 7
- }
-
- #[inline]
- fn pow10(self, n: i32) -> f32 {
- // Check the exponent is within bounds in debug builds.
- debug_assert!({
- let (min, max) = Self::exponent_limit();
- n >= min && n <= max
- });
-
- if n > 0 {
- self * F32_POW10[n as usize]
- } else {
- self / F32_POW10[-n as usize]
- }
- }
-
- #[inline]
- fn from_bits(u: u32) -> f32 {
- f32::from_bits(u)
- }
-
- #[inline]
- fn to_bits(self) -> u32 {
- f32::to_bits(self)
- }
-
- #[inline]
- fn is_sign_positive(self) -> bool {
- f32::is_sign_positive(self)
- }
-
- #[inline]
- fn is_sign_negative(self) -> bool {
- f32::is_sign_negative(self)
- }
-}
-
-impl Float for f64 {
- type Unsigned = u64;
-
- const ZERO: f64 = 0.0;
- const MAX_DIGITS: usize = 769;
- const SIGN_MASK: u64 = 0x8000000000000000;
- const EXPONENT_MASK: u64 = 0x7FF0000000000000;
- const HIDDEN_BIT_MASK: u64 = 0x0010000000000000;
- const MANTISSA_MASK: u64 = 0x000FFFFFFFFFFFFF;
- const INFINITY_BITS: u64 = 0x7FF0000000000000;
- const NEGATIVE_INFINITY_BITS: u64 = Self::INFINITY_BITS | Self::SIGN_MASK;
- const MANTISSA_SIZE: i32 = 52;
- const EXPONENT_BIAS: i32 = 1023 + Self::MANTISSA_SIZE;
- const DENORMAL_EXPONENT: i32 = 1 - Self::EXPONENT_BIAS;
- const MAX_EXPONENT: i32 = 0x7FF - Self::EXPONENT_BIAS;
- const DEFAULT_SHIFT: i32 = u64::FULL - f64::MANTISSA_SIZE - 1;
- const CARRY_MASK: u64 = 0x20000000000000;
-
- #[inline]
- fn exponent_limit() -> (i32, i32) {
- (-22, 22)
- }
-
- #[inline]
- fn mantissa_limit() -> i32 {
- 15
- }
-
- #[inline]
- fn pow10(self, n: i32) -> f64 {
- // Check the exponent is within bounds in debug builds.
- debug_assert!({
- let (min, max) = Self::exponent_limit();
- n >= min && n <= max
- });
-
- if n > 0 {
- self * F64_POW10[n as usize]
- } else {
- self / F64_POW10[-n as usize]
- }
- }
-
- #[inline]
- fn from_bits(u: u64) -> f64 {
- f64::from_bits(u)
- }
-
- #[inline]
- fn to_bits(self) -> u64 {
- f64::to_bits(self)
- }
-
- #[inline]
- fn is_sign_positive(self) -> bool {
- f64::is_sign_positive(self)
- }
-
- #[inline]
- fn is_sign_negative(self) -> bool {
- f64::is_sign_negative(self)
- }
-}
diff --git a/vendor/serde_json/src/lexical/parse.rs b/vendor/serde_json/src/lexical/parse.rs
deleted file mode 100644
index e3d7f1e..0000000
--- a/vendor/serde_json/src/lexical/parse.rs
+++ /dev/null
@@ -1,83 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-use super::algorithm::*;
-use super::bhcomp::*;
-use super::digit::*;
-use super::exponent::*;
-use super::num::*;
-
-// PARSERS
-// -------
-
-/// Parse float for which the entire integer and fraction parts fit into a 64
-/// bit mantissa.
-pub fn parse_concise_float<F>(mantissa: u64, mant_exp: i32) -> F
-where
- F: Float,
-{
- if let Some(float) = fast_path(mantissa, mant_exp) {
- return float;
- }
-
- // Moderate path (use an extended 80-bit representation).
- let truncated = false;
- let (fp, valid) = moderate_path::<F>(mantissa, mant_exp, truncated);
- if valid {
- return fp.into_float::<F>();
- }
-
- let b = fp.into_downward_float::<F>();
- if b.is_special() {
- // We have a non-finite number, we get to leave early.
- return b;
- }
-
- // Slow path, fast path didn't work.
- let mut buffer = itoa::Buffer::new();
- let integer = buffer.format(mantissa).as_bytes();
- let fraction = &[];
- bhcomp(b, integer, fraction, mant_exp)
-}
-
-/// Parse float from extracted float components.
-///
-/// * `integer` - Slice containing the integer digits.
-/// * `fraction` - Slice containing the fraction digits.
-/// * `exponent` - Parsed, 32-bit exponent.
-///
-/// Precondition: The integer must not have leading zeros.
-pub fn parse_truncated_float<F>(integer: &[u8], mut fraction: &[u8], exponent: i32) -> F
-where
- F: Float,
-{
- // Trim trailing zeroes from the fraction part.
- while fraction.last() == Some(&b'0') {
- fraction = &fraction[..fraction.len() - 1];
- }
-
- // Calculate the number of truncated digits.
- let mut truncated = 0;
- let mut mantissa: u64 = 0;
- let mut iter = integer.iter().chain(fraction);
- for &c in &mut iter {
- mantissa = match add_digit(mantissa, to_digit(c).unwrap()) {
- Some(v) => v,
- None => {
- truncated = 1 + iter.count();
- break;
- }
- };
- }
-
- let mant_exp = mantissa_exponent(exponent, fraction.len(), truncated);
- let is_truncated = true;
-
- fallback_path(
- integer,
- fraction,
- mantissa,
- exponent,
- mant_exp,
- is_truncated,
- )
-}
diff --git a/vendor/serde_json/src/lexical/rounding.rs b/vendor/serde_json/src/lexical/rounding.rs
deleted file mode 100644
index 6ec1292..0000000
--- a/vendor/serde_json/src/lexical/rounding.rs
+++ /dev/null
@@ -1,231 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Defines rounding schemes for floating-point numbers.
-
-use super::float::ExtendedFloat;
-use super::num::*;
-use super::shift::*;
-use core::mem;
-
-// MASKS
-
-/// Calculate a scalar factor of 2 above the halfway point.
-#[inline]
-pub(crate) fn nth_bit(n: u64) -> u64 {
- let bits: u64 = mem::size_of::<u64>() as u64 * 8;
- debug_assert!(n < bits, "nth_bit() overflow in shl.");
-
- 1 << n
-}
-
-/// Generate a bitwise mask for the lower `n` bits.
-#[inline]
-pub(crate) fn lower_n_mask(n: u64) -> u64 {
- let bits: u64 = mem::size_of::<u64>() as u64 * 8;
- debug_assert!(n <= bits, "lower_n_mask() overflow in shl.");
-
- if n == bits {
- u64::max_value()
- } else {
- (1 << n) - 1
- }
-}
-
-/// Calculate the halfway point for the lower `n` bits.
-#[inline]
-pub(crate) fn lower_n_halfway(n: u64) -> u64 {
- let bits: u64 = mem::size_of::<u64>() as u64 * 8;
- debug_assert!(n <= bits, "lower_n_halfway() overflow in shl.");
-
- if n == 0 {
- 0
- } else {
- nth_bit(n - 1)
- }
-}
-
-/// Calculate a bitwise mask with `n` 1 bits starting at the `bit` position.
-#[inline]
-pub(crate) fn internal_n_mask(bit: u64, n: u64) -> u64 {
- let bits: u64 = mem::size_of::<u64>() as u64 * 8;
- debug_assert!(bit <= bits, "internal_n_halfway() overflow in shl.");
- debug_assert!(n <= bits, "internal_n_halfway() overflow in shl.");
- debug_assert!(bit >= n, "internal_n_halfway() overflow in sub.");
-
- lower_n_mask(bit) ^ lower_n_mask(bit - n)
-}
-
-// NEAREST ROUNDING
-
-// Shift right N-bytes and round to the nearest.
-//
-// Return if we are above halfway and if we are halfway.
-#[inline]
-pub(crate) fn round_nearest(fp: &mut ExtendedFloat, shift: i32) -> (bool, bool) {
- // Extract the truncated bits using mask.
- // Calculate if the value of the truncated bits are either above
- // the mid-way point, or equal to it.
- //
- // For example, for 4 truncated bytes, the mask would be b1111
- // and the midway point would be b1000.
- let mask: u64 = lower_n_mask(shift as u64);
- let halfway: u64 = lower_n_halfway(shift as u64);
-
- let truncated_bits = fp.mant & mask;
- let is_above = truncated_bits > halfway;
- let is_halfway = truncated_bits == halfway;
-
- // Bit shift so the leading bit is in the hidden bit.
- overflowing_shr(fp, shift);
-
- (is_above, is_halfway)
-}
-
-// Tie rounded floating point to event.
-#[inline]
-pub(crate) fn tie_even(fp: &mut ExtendedFloat, is_above: bool, is_halfway: bool) {
- // Extract the last bit after shifting (and determine if it is odd).
- let is_odd = fp.mant & 1 == 1;
-
- // Calculate if we need to roundup.
- // We need to roundup if we are above halfway, or if we are odd
- // and at half-way (need to tie-to-even).
- if is_above || (is_odd && is_halfway) {
- fp.mant += 1;
- }
-}
-
-// Shift right N-bytes and round nearest, tie-to-even.
-//
-// Floating-point arithmetic uses round to nearest, ties to even,
-// which rounds to the nearest value, if the value is halfway in between,
-// round to an even value.
-#[inline]
-pub(crate) fn round_nearest_tie_even(fp: &mut ExtendedFloat, shift: i32) {
- let (is_above, is_halfway) = round_nearest(fp, shift);
- tie_even(fp, is_above, is_halfway);
-}
-
-// DIRECTED ROUNDING
-
-// Shift right N-bytes and round towards a direction.
-//
-// Return if we have any truncated bytes.
-#[inline]
-fn round_toward(fp: &mut ExtendedFloat, shift: i32) -> bool {
- let mask: u64 = lower_n_mask(shift as u64);
- let truncated_bits = fp.mant & mask;
-
- // Bit shift so the leading bit is in the hidden bit.
- overflowing_shr(fp, shift);
-
- truncated_bits != 0
-}
-
-// Round down.
-#[inline]
-fn downard(_: &mut ExtendedFloat, _: bool) {}
-
-// Shift right N-bytes and round toward zero.
-//
-// Floating-point arithmetic defines round toward zero, which rounds
-// towards positive zero.
-#[inline]
-pub(crate) fn round_downward(fp: &mut ExtendedFloat, shift: i32) {
- // Bit shift so the leading bit is in the hidden bit.
- // No rounding schemes, so we just ignore everything else.
- let is_truncated = round_toward(fp, shift);
- downard(fp, is_truncated);
-}
-
-// ROUND TO FLOAT
-
-// Shift the ExtendedFloat fraction to the fraction bits in a native float.
-//
-// Floating-point arithmetic uses round to nearest, ties to even,
-// which rounds to the nearest value, if the value is halfway in between,
-// round to an even value.
-#[inline]
-pub(crate) fn round_to_float<F, Algorithm>(fp: &mut ExtendedFloat, algorithm: Algorithm)
-where
- F: Float,
- Algorithm: FnOnce(&mut ExtendedFloat, i32),
-{
- // Calculate the difference to allow a single calculation
- // rather than a loop, to minimize the number of ops required.
- // This does underflow detection.
- let final_exp = fp.exp + F::DEFAULT_SHIFT;
- if final_exp < F::DENORMAL_EXPONENT {
- // We would end up with a denormal exponent, try to round to more
- // digits. Only shift right if we can avoid zeroing out the value,
- // which requires the exponent diff to be < M::BITS. The value
- // is already normalized, so we shouldn't have any issue zeroing
- // out the value.
- let diff = F::DENORMAL_EXPONENT - fp.exp;
- if diff <= u64::FULL {
- // We can avoid underflow, can get a valid representation.
- algorithm(fp, diff);
- } else {
- // Certain underflow, assign literal 0s.
- fp.mant = 0;
- fp.exp = 0;
- }
- } else {
- algorithm(fp, F::DEFAULT_SHIFT);
- }
-
- if fp.mant & F::CARRY_MASK == F::CARRY_MASK {
- // Roundup carried over to 1 past the hidden bit.
- shr(fp, 1);
- }
-}
-
-// AVOID OVERFLOW/UNDERFLOW
-
-// Avoid overflow for large values, shift left as needed.
-//
-// Shift until a 1-bit is in the hidden bit, if the mantissa is not 0.
-#[inline]
-pub(crate) fn avoid_overflow<F>(fp: &mut ExtendedFloat)
-where
- F: Float,
-{
- // Calculate the difference to allow a single calculation
- // rather than a loop, minimizing the number of ops required.
- if fp.exp >= F::MAX_EXPONENT {
- let diff = fp.exp - F::MAX_EXPONENT;
- if diff <= F::MANTISSA_SIZE {
- // Our overflow mask needs to start at the hidden bit, or at
- // `F::MANTISSA_SIZE+1`, and needs to have `diff+1` bits set,
- // to see if our value overflows.
- let bit = (F::MANTISSA_SIZE + 1) as u64;
- let n = (diff + 1) as u64;
- let mask = internal_n_mask(bit, n);
- if (fp.mant & mask) == 0 {
- // If we have no 1-bit in the hidden-bit position,
- // which is index 0, we need to shift 1.
- let shift = diff + 1;
- shl(fp, shift);
- }
- }
- }
-}
-
-// ROUND TO NATIVE
-
-// Round an extended-precision float to a native float representation.
-#[inline]
-pub(crate) fn round_to_native<F, Algorithm>(fp: &mut ExtendedFloat, algorithm: Algorithm)
-where
- F: Float,
- Algorithm: FnOnce(&mut ExtendedFloat, i32),
-{
- // Shift all the way left, to ensure a consistent representation.
- // The following right-shifts do not work for a non-normalized number.
- fp.normalize();
-
- // Round so the fraction is in a native mantissa representation,
- // and avoid overflow/underflow.
- round_to_float::<F, _>(fp, algorithm);
- avoid_overflow::<F>(fp);
-}
diff --git a/vendor/serde_json/src/lexical/shift.rs b/vendor/serde_json/src/lexical/shift.rs
deleted file mode 100644
index a0bae01..0000000
--- a/vendor/serde_json/src/lexical/shift.rs
+++ /dev/null
@@ -1,46 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Bit-shift helpers.
-
-use super::float::ExtendedFloat;
-use core::mem;
-
-// Shift extended-precision float right `shift` bytes.
-#[inline]
-pub(crate) fn shr(fp: &mut ExtendedFloat, shift: i32) {
- let bits: u64 = mem::size_of::<u64>() as u64 * 8;
- debug_assert!((shift as u64) < bits, "shr() overflow in shift right.");
-
- fp.mant >>= shift;
- fp.exp += shift;
-}
-
-// Shift extended-precision float right `shift` bytes.
-//
-// Accepts when the shift is the same as the type size, and
-// sets the value to 0.
-#[inline]
-pub(crate) fn overflowing_shr(fp: &mut ExtendedFloat, shift: i32) {
- let bits: u64 = mem::size_of::<u64>() as u64 * 8;
- debug_assert!(
- (shift as u64) <= bits,
- "overflowing_shr() overflow in shift right."
- );
-
- fp.mant = if shift as u64 == bits {
- 0
- } else {
- fp.mant >> shift
- };
- fp.exp += shift;
-}
-
-// Shift extended-precision float left `shift` bytes.
-#[inline]
-pub(crate) fn shl(fp: &mut ExtendedFloat, shift: i32) {
- let bits: u64 = mem::size_of::<u64>() as u64 * 8;
- debug_assert!((shift as u64) < bits, "shl() overflow in shift left.");
-
- fp.mant <<= shift;
- fp.exp -= shift;
-}
diff --git a/vendor/serde_json/src/lexical/small_powers.rs b/vendor/serde_json/src/lexical/small_powers.rs
deleted file mode 100644
index 219d826..0000000
--- a/vendor/serde_json/src/lexical/small_powers.rs
+++ /dev/null
@@ -1,70 +0,0 @@
-// Adapted from https://github.com/Alexhuszagh/rust-lexical.
-
-//! Pre-computed small powers.
-
-// 32 BIT
-#[cfg(limb_width_32)]
-pub(crate) const POW5_32: [u32; 14] = [
- 1, 5, 25, 125, 625, 3125, 15625, 78125, 390625, 1953125, 9765625, 48828125, 244140625,
- 1220703125,
-];
-
-#[cfg(limb_width_32)]
-pub(crate) const POW10_32: [u32; 10] = [
- 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000,
-];
-
-// 64 BIT
-#[cfg(limb_width_64)]
-pub(crate) const POW5_64: [u64; 28] = [
- 1,
- 5,
- 25,
- 125,
- 625,
- 3125,
- 15625,
- 78125,
- 390625,
- 1953125,
- 9765625,
- 48828125,
- 244140625,
- 1220703125,
- 6103515625,
- 30517578125,
- 152587890625,
- 762939453125,
- 3814697265625,
- 19073486328125,
- 95367431640625,
- 476837158203125,
- 2384185791015625,
- 11920928955078125,
- 59604644775390625,
- 298023223876953125,
- 1490116119384765625,
- 7450580596923828125,
-];
-pub(crate) const POW10_64: [u64; 20] = [
- 1,
- 10,
- 100,
- 1000,
- 10000,
- 100000,
- 1000000,
- 10000000,
- 100000000,
- 1000000000,
- 10000000000,
- 100000000000,
- 1000000000000,
- 10000000000000,
- 100000000000000,
- 1000000000000000,
- 10000000000000000,
- 100000000000000000,
- 1000000000000000000,
- 10000000000000000000,
-];