summaryrefslogtreecommitdiff
path: root/vendor/rand_core-0.3.1/src
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/rand_core-0.3.1/src')
-rw-r--r--vendor/rand_core-0.3.1/src/block.rs499
-rw-r--r--vendor/rand_core-0.3.1/src/error.rs177
-rw-r--r--vendor/rand_core-0.3.1/src/impls.rs165
-rw-r--r--vendor/rand_core-0.3.1/src/le.rs68
-rw-r--r--vendor/rand_core-0.3.1/src/lib.rs46
5 files changed, 955 insertions, 0 deletions
diff --git a/vendor/rand_core-0.3.1/src/block.rs b/vendor/rand_core-0.3.1/src/block.rs
new file mode 100644
index 0000000..3045b94
--- /dev/null
+++ b/vendor/rand_core-0.3.1/src/block.rs
@@ -0,0 +1,499 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The `BlockRngCore` trait and implementation helpers
+//!
+//! The [`BlockRngCore`] trait exists to assist in the implementation of RNGs
+//! which generate a block of data in a cache instead of returning generated
+//! values directly.
+//!
+//! Usage of this trait is optional, but provides two advantages:
+//! implementations only need to concern themselves with generation of the
+//! block, not the various [`RngCore`] methods (especially [`fill_bytes`], where
+//! the optimal implementations are not trivial), and this allows
+//! `ReseedingRng` (see [`rand`](https://docs.rs/rand) crate) perform periodic
+//! reseeding with very low overhead.
+//!
+//! # Example
+//!
+//! ```norun
+//! use rand_core::block::{BlockRngCore, BlockRng};
+//!
+//! struct MyRngCore;
+//!
+//! impl BlockRngCore for MyRngCore {
+//! type Results = [u32; 16];
+//!
+//! fn generate(&mut self, results: &mut Self::Results) {
+//! unimplemented!()
+//! }
+//! }
+//!
+//! impl SeedableRng for MyRngCore {
+//! type Seed = unimplemented!();
+//! fn from_seed(seed: Self::Seed) -> Self {
+//! unimplemented!()
+//! }
+//! }
+//!
+//! // optionally, also implement CryptoRng for MyRngCore
+//!
+//! // Final RNG.
+//! type MyRng = BlockRng<u32, MyRngCore>;
+//! ```
+//!
+//! [`BlockRngCore`]: crate::block::BlockRngCore
+//! [`fill_bytes`]: RngCore::fill_bytes
+
+use core::convert::AsRef;
+use core::fmt;
+use {RngCore, CryptoRng, SeedableRng, Error};
+use impls::{fill_via_u32_chunks, fill_via_u64_chunks};
+
+/// A trait for RNGs which do not generate random numbers individually, but in
+/// blocks (typically `[u32; N]`). This technique is commonly used by
+/// cryptographic RNGs to improve performance.
+///
+/// See the [module][crate::block] documentation for details.
+pub trait BlockRngCore {
+ /// Results element type, e.g. `u32`.
+ type Item;
+
+ /// Results type. This is the 'block' an RNG implementing `BlockRngCore`
+ /// generates, which will usually be an array like `[u32; 16]`.
+ type Results: AsRef<[Self::Item]> + AsMut<[Self::Item]> + Default;
+
+ /// Generate a new block of results.
+ fn generate(&mut self, results: &mut Self::Results);
+}
+
+
+/// A wrapper type implementing [`RngCore`] for some type implementing
+/// [`BlockRngCore`] with `u32` array buffer; i.e. this can be used to implement
+/// a full RNG from just a `generate` function.
+///
+/// The `core` field may be accessed directly but the results buffer may not.
+/// PRNG implementations can simply use a type alias
+/// (`pub type MyRng = BlockRng<MyRngCore>;`) but might prefer to use a
+/// wrapper type (`pub struct MyRng(BlockRng<MyRngCore>);`); the latter must
+/// re-implement `RngCore` but hides the implementation details and allows
+/// extra functionality to be defined on the RNG
+/// (e.g. `impl MyRng { fn set_stream(...){...} }`).
+///
+/// `BlockRng` has heavily optimized implementations of the [`RngCore`] methods
+/// reading values from the results buffer, as well as
+/// calling [`BlockRngCore::generate`] directly on the output array when
+/// [`fill_bytes`] / [`try_fill_bytes`] is called on a large array. These methods
+/// also handle the bookkeeping of when to generate a new batch of values.
+///
+/// No whole generated `u32` values are thown away and all values are consumed
+/// in-order. [`next_u32`] simply takes the next available `u32` value.
+/// [`next_u64`] is implemented by combining two `u32` values, least
+/// significant first. [`fill_bytes`] and [`try_fill_bytes`] consume a whole
+/// number of `u32` values, converting each `u32` to a byte slice in
+/// little-endian order. If the requested byte length is not a multiple of 4,
+/// some bytes will be discarded.
+///
+/// See also [`BlockRng64`] which uses `u64` array buffers. Currently there is
+/// no direct support for other buffer types.
+///
+/// For easy initialization `BlockRng` also implements [`SeedableRng`].
+///
+/// [`next_u32`]: RngCore::next_u32
+/// [`next_u64`]: RngCore::next_u64
+/// [`fill_bytes`]: RngCore::fill_bytes
+/// [`try_fill_bytes`]: RngCore::try_fill_bytes
+#[derive(Clone)]
+#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
+pub struct BlockRng<R: BlockRngCore + ?Sized> {
+ results: R::Results,
+ index: usize,
+ /// The *core* part of the RNG, implementing the `generate` function.
+ pub core: R,
+}
+
+// Custom Debug implementation that does not expose the contents of `results`.
+impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng<R> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("BlockRng")
+ .field("core", &self.core)
+ .field("result_len", &self.results.as_ref().len())
+ .field("index", &self.index)
+ .finish()
+ }
+}
+
+impl<R: BlockRngCore> BlockRng<R> {
+ /// Create a new `BlockRng` from an existing RNG implementing
+ /// `BlockRngCore`. Results will be generated on first use.
+ pub fn new(core: R) -> BlockRng<R>{
+ let results_empty = R::Results::default();
+ BlockRng {
+ core,
+ index: results_empty.as_ref().len(),
+ results: results_empty,
+ }
+ }
+
+ /// Get the index into the result buffer.
+ ///
+ /// If this is equal to or larger than the size of the result buffer then
+ /// the buffer is "empty" and `generate()` must be called to produce new
+ /// results.
+ pub fn index(&self) -> usize {
+ self.index
+ }
+
+ /// Reset the number of available results.
+ /// This will force a new set of results to be generated on next use.
+ pub fn reset(&mut self) {
+ self.index = self.results.as_ref().len();
+ }
+
+ /// Generate a new set of results immediately, setting the index to the
+ /// given value.
+ pub fn generate_and_set(&mut self, index: usize) {
+ assert!(index < self.results.as_ref().len());
+ self.core.generate(&mut self.results);
+ self.index = index;
+ }
+}
+
+impl<R: BlockRngCore<Item=u32>> RngCore for BlockRng<R>
+where <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>
+{
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ if self.index >= self.results.as_ref().len() {
+ self.generate_and_set(0);
+ }
+
+ let value = self.results.as_ref()[self.index];
+ self.index += 1;
+ value
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ let read_u64 = |results: &[u32], index| {
+ if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
+ // requires little-endian CPU supporting unaligned reads:
+ unsafe { *(&results[index] as *const u32 as *const u64) }
+ } else {
+ let x = u64::from(results[index]);
+ let y = u64::from(results[index + 1]);
+ (y << 32) | x
+ }
+ };
+
+ let len = self.results.as_ref().len();
+
+ let index = self.index;
+ if index < len-1 {
+ self.index += 2;
+ // Read an u64 from the current index
+ read_u64(self.results.as_ref(), index)
+ } else if index >= len {
+ self.generate_and_set(2);
+ read_u64(self.results.as_ref(), 0)
+ } else {
+ let x = u64::from(self.results.as_ref()[len-1]);
+ self.generate_and_set(1);
+ let y = u64::from(self.results.as_ref()[0]);
+ (y << 32) | x
+ }
+ }
+
+ // As an optimization we try to write directly into the output buffer.
+ // This is only enabled for little-endian platforms where unaligned writes
+ // are known to be safe and fast.
+ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ let mut filled = 0;
+
+ // Continue filling from the current set of results
+ if self.index < self.results.as_ref().len() {
+ let (consumed_u32, filled_u8) =
+ fill_via_u32_chunks(&self.results.as_ref()[self.index..],
+ dest);
+
+ self.index += consumed_u32;
+ filled += filled_u8;
+ }
+
+ let len_remainder =
+ (dest.len() - filled) % (self.results.as_ref().len() * 4);
+ let end_direct = dest.len() - len_remainder;
+
+ while filled < end_direct {
+ let dest_u32: &mut R::Results = unsafe {
+ &mut *(dest[filled..].as_mut_ptr() as
+ *mut <R as BlockRngCore>::Results)
+ };
+ self.core.generate(dest_u32);
+ filled += self.results.as_ref().len() * 4;
+ self.index = self.results.as_ref().len();
+ }
+
+ if len_remainder > 0 {
+ self.core.generate(&mut self.results);
+ let (consumed_u32, _) =
+ fill_via_u32_chunks(self.results.as_ref(),
+ &mut dest[filled..]);
+
+ self.index = consumed_u32;
+ }
+ }
+
+ #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ let mut read_len = 0;
+ while read_len < dest.len() {
+ if self.index >= self.results.as_ref().len() {
+ self.generate_and_set(0);
+ }
+ let (consumed_u32, filled_u8) =
+ fill_via_u32_chunks(&self.results.as_ref()[self.index..],
+ &mut dest[read_len..]);
+
+ self.index += consumed_u32;
+ read_len += filled_u8;
+ }
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.fill_bytes(dest);
+ Ok(())
+ }
+}
+
+impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng<R> {
+ type Seed = R::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ Self::new(R::from_seed(seed))
+ }
+
+ fn seed_from_u64(seed: u64) -> Self {
+ Self::new(R::seed_from_u64(seed))
+ }
+
+ fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> {
+ Ok(Self::new(R::from_rng(rng)?))
+ }
+}
+
+
+
+/// A wrapper type implementing [`RngCore`] for some type implementing
+/// [`BlockRngCore`] with `u64` array buffer; i.e. this can be used to implement
+/// a full RNG from just a `generate` function.
+///
+/// This is similar to [`BlockRng`], but specialized for algorithms that operate
+/// on `u64` values.
+///
+/// No whole generated `u64` values are thrown away and all values are consumed
+/// in-order. [`next_u64`] simply takes the next available `u64` value.
+/// [`next_u32`] is however a bit special: half of a `u64` is consumed, leaving
+/// the other half in the buffer. If the next function called is [`next_u32`]
+/// then the other half is then consumed, however both [`next_u64`] and
+/// [`fill_bytes`] discard the rest of any half-consumed `u64`s when called.
+///
+/// [`fill_bytes`] and [`try_fill_bytes`] consume a whole number of `u64`
+/// values. If the requested length is not a multiple of 8, some bytes will be
+/// discarded.
+///
+/// [`next_u32`]: RngCore::next_u32
+/// [`next_u64`]: RngCore::next_u64
+/// [`fill_bytes`]: RngCore::fill_bytes
+/// [`try_fill_bytes`]: RngCore::try_fill_bytes
+#[derive(Clone)]
+#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
+pub struct BlockRng64<R: BlockRngCore + ?Sized> {
+ results: R::Results,
+ index: usize,
+ half_used: bool, // true if only half of the previous result is used
+ /// The *core* part of the RNG, implementing the `generate` function.
+ pub core: R,
+}
+
+// Custom Debug implementation that does not expose the contents of `results`.
+impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng64<R> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_struct("BlockRng64")
+ .field("core", &self.core)
+ .field("result_len", &self.results.as_ref().len())
+ .field("index", &self.index)
+ .field("half_used", &self.half_used)
+ .finish()
+ }
+}
+
+impl<R: BlockRngCore> BlockRng64<R> {
+ /// Create a new `BlockRng` from an existing RNG implementing
+ /// `BlockRngCore`. Results will be generated on first use.
+ pub fn new(core: R) -> BlockRng64<R>{
+ let results_empty = R::Results::default();
+ BlockRng64 {
+ core,
+ index: results_empty.as_ref().len(),
+ half_used: false,
+ results: results_empty,
+ }
+ }
+
+ /// Get the index into the result buffer.
+ ///
+ /// If this is equal to or larger than the size of the result buffer then
+ /// the buffer is "empty" and `generate()` must be called to produce new
+ /// results.
+ pub fn index(&self) -> usize {
+ self.index
+ }
+
+ /// Reset the number of available results.
+ /// This will force a new set of results to be generated on next use.
+ pub fn reset(&mut self) {
+ self.index = self.results.as_ref().len();
+ self.half_used = false;
+ }
+
+ /// Generate a new set of results immediately, setting the index to the
+ /// given value.
+ pub fn generate_and_set(&mut self, index: usize) {
+ assert!(index < self.results.as_ref().len());
+ self.core.generate(&mut self.results);
+ self.index = index;
+ self.half_used = false;
+ }
+}
+
+impl<R: BlockRngCore<Item=u64>> RngCore for BlockRng64<R>
+where <R as BlockRngCore>::Results: AsRef<[u64]> + AsMut<[u64]>
+{
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ let mut index = self.index * 2 - self.half_used as usize;
+ if index >= self.results.as_ref().len() * 2 {
+ self.core.generate(&mut self.results);
+ self.index = 0;
+ // `self.half_used` is by definition `false`
+ self.half_used = false;
+ index = 0;
+ }
+
+ self.half_used = !self.half_used;
+ self.index += self.half_used as usize;
+
+ // Index as if this is a u32 slice.
+ unsafe {
+ let results =
+ &*(self.results.as_ref() as *const [u64] as *const [u32]);
+ if cfg!(target_endian = "little") {
+ *results.get_unchecked(index)
+ } else {
+ *results.get_unchecked(index ^ 1)
+ }
+ }
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ if self.index >= self.results.as_ref().len() {
+ self.core.generate(&mut self.results);
+ self.index = 0;
+ }
+
+ let value = self.results.as_ref()[self.index];
+ self.index += 1;
+ self.half_used = false;
+ value
+ }
+
+ // As an optimization we try to write directly into the output buffer.
+ // This is only enabled for little-endian platforms where unaligned writes
+ // are known to be safe and fast.
+ #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ let mut filled = 0;
+ self.half_used = false;
+
+ // Continue filling from the current set of results
+ if self.index < self.results.as_ref().len() {
+ let (consumed_u64, filled_u8) =
+ fill_via_u64_chunks(&self.results.as_ref()[self.index..],
+ dest);
+
+ self.index += consumed_u64;
+ filled += filled_u8;
+ }
+
+ let len_remainder =
+ (dest.len() - filled) % (self.results.as_ref().len() * 8);
+ let end_direct = dest.len() - len_remainder;
+
+ while filled < end_direct {
+ let dest_u64: &mut R::Results = unsafe {
+ ::core::mem::transmute(dest[filled..].as_mut_ptr())
+ };
+ self.core.generate(dest_u64);
+ filled += self.results.as_ref().len() * 8;
+ self.index = self.results.as_ref().len();
+ }
+
+ if len_remainder > 0 {
+ self.core.generate(&mut self.results);
+ let (consumed_u64, _) =
+ fill_via_u64_chunks(&mut self.results.as_ref(),
+ &mut dest[filled..]);
+
+ self.index = consumed_u64;
+ }
+ }
+
+ #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ let mut read_len = 0;
+ self.half_used = false;
+ while read_len < dest.len() {
+ if self.index as usize >= self.results.as_ref().len() {
+ self.core.generate(&mut self.results);
+ self.index = 0;
+ }
+
+ let (consumed_u64, filled_u8) =
+ fill_via_u64_chunks(&self.results.as_ref()[self.index as usize..],
+ &mut dest[read_len..]);
+
+ self.index += consumed_u64;
+ read_len += filled_u8;
+ }
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ Ok(self.fill_bytes(dest))
+ }
+}
+
+impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng64<R> {
+ type Seed = R::Seed;
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ Self::new(R::from_seed(seed))
+ }
+
+ fn seed_from_u64(seed: u64) -> Self {
+ Self::new(R::seed_from_u64(seed))
+ }
+
+ fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> {
+ Ok(Self::new(R::from_rng(rng)?))
+ }
+}
+
+impl<R: BlockRngCore + CryptoRng> CryptoRng for BlockRng<R> {}
diff --git a/vendor/rand_core-0.3.1/src/error.rs b/vendor/rand_core-0.3.1/src/error.rs
new file mode 100644
index 0000000..5a8459e
--- /dev/null
+++ b/vendor/rand_core-0.3.1/src/error.rs
@@ -0,0 +1,177 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Error types
+
+use core::fmt;
+
+#[cfg(feature="std")]
+use std::error::Error as stdError;
+#[cfg(feature="std")]
+use std::io;
+
+/// Error kind which can be matched over.
+#[derive(PartialEq, Eq, Debug, Copy, Clone)]
+pub enum ErrorKind {
+ /// Feature is not available; not recoverable.
+ ///
+ /// This is the most permanent failure type and implies the error cannot be
+ /// resolved simply by retrying (e.g. the feature may not exist in this
+ /// build of the application or on the current platform).
+ Unavailable,
+ /// General failure; there may be a chance of recovery on retry.
+ ///
+ /// This is the catch-all kind for errors from known and unknown sources
+ /// which do not have a more specific kind / handling method.
+ ///
+ /// It is suggested to retry a couple of times or retry later when
+ /// handling; some error sources may be able to resolve themselves,
+ /// although this is not likely.
+ Unexpected,
+ /// A transient failure which likely can be resolved or worked around.
+ ///
+ /// This error kind exists for a few specific cases where it is known that
+ /// the error likely can be resolved internally, but is reported anyway.
+ Transient,
+ /// Not ready yet: recommended to try again a little later.
+ ///
+ /// This error kind implies the generator needs more time or needs some
+ /// other part of the application to do something else first before it is
+ /// ready for use; for example this may be used by external generators
+ /// which require time for initialization.
+ NotReady,
+ #[doc(hidden)]
+ __Nonexhaustive,
+}
+
+impl ErrorKind {
+ /// True if this kind of error may resolve itself on retry.
+ ///
+ /// See also `should_wait()`.
+ pub fn should_retry(self) -> bool {
+ self != ErrorKind::Unavailable
+ }
+
+ /// True if we should retry but wait before retrying
+ ///
+ /// This implies `should_retry()` is true.
+ pub fn should_wait(self) -> bool {
+ self == ErrorKind::NotReady
+ }
+
+ /// A description of this error kind
+ pub fn description(self) -> &'static str {
+ match self {
+ ErrorKind::Unavailable => "permanently unavailable",
+ ErrorKind::Unexpected => "unexpected failure",
+ ErrorKind::Transient => "transient failure",
+ ErrorKind::NotReady => "not ready yet",
+ ErrorKind::__Nonexhaustive => unreachable!(),
+ }
+ }
+}
+
+
+/// Error type of random number generators
+///
+/// This is a relatively simple error type, designed for compatibility with and
+/// without the Rust `std` library. It embeds a "kind" code, a message (static
+/// string only), and an optional chained cause (`std` only). The `kind` and
+/// `msg` fields can be accessed directly; cause can be accessed via
+/// `std::error::Error::cause` or `Error::take_cause`. Construction can only be
+/// done via `Error::new` or `Error::with_cause`.
+#[derive(Debug)]
+pub struct Error {
+ /// The error kind
+ pub kind: ErrorKind,
+ /// The error message
+ pub msg: &'static str,
+ #[cfg(feature="std")]
+ cause: Option<Box<stdError + Send + Sync>>,
+}
+
+impl Error {
+ /// Create a new instance, with specified kind and a message.
+ pub fn new(kind: ErrorKind, msg: &'static str) -> Self {
+ #[cfg(feature="std")] {
+ Error { kind, msg, cause: None }
+ }
+ #[cfg(not(feature="std"))] {
+ Error { kind, msg }
+ }
+ }
+
+ /// Create a new instance, with specified kind, message, and a
+ /// chained cause.
+ ///
+ /// Note: `stdError` is an alias for `std::error::Error`.
+ ///
+ /// If not targetting `std` (i.e. `no_std`), this function is replaced by
+ /// another with the same prototype, except that there are no bounds on the
+ /// type `E` (because both `Box` and `stdError` are unavailable), and the
+ /// `cause` is ignored.
+ #[cfg(feature="std")]
+ pub fn with_cause<E>(kind: ErrorKind, msg: &'static str, cause: E) -> Self
+ where E: Into<Box<stdError + Send + Sync>>
+ {
+ Error { kind, msg, cause: Some(cause.into()) }
+ }
+
+ /// Create a new instance, with specified kind, message, and a
+ /// chained cause.
+ ///
+ /// In `no_std` mode the *cause* is ignored.
+ #[cfg(not(feature="std"))]
+ pub fn with_cause<E>(kind: ErrorKind, msg: &'static str, _cause: E) -> Self {
+ Error { kind, msg }
+ }
+
+ /// Take the cause, if any. This allows the embedded cause to be extracted.
+ /// This uses `Option::take`, leaving `self` with no cause.
+ #[cfg(feature="std")]
+ pub fn take_cause(&mut self) -> Option<Box<stdError + Send + Sync>> {
+ self.cause.take()
+ }
+}
+
+impl fmt::Display for Error {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ #[cfg(feature="std")] {
+ if let Some(ref cause) = self.cause {
+ return write!(f, "{} ({}); cause: {}",
+ self.msg, self.kind.description(), cause);
+ }
+ }
+ write!(f, "{} ({})", self.msg, self.kind.description())
+ }
+}
+
+#[cfg(feature="std")]
+impl stdError for Error {
+ fn description(&self) -> &str {
+ self.msg
+ }
+
+ fn cause(&self) -> Option<&stdError> {
+ self.cause.as_ref().map(|e| e.as_ref() as &stdError)
+ }
+}
+
+#[cfg(feature="std")]
+impl From<Error> for io::Error {
+ fn from(error: Error) -> Self {
+ use std::io::ErrorKind::*;
+ match error.kind {
+ ErrorKind::Unavailable => io::Error::new(NotFound, error),
+ ErrorKind::Unexpected |
+ ErrorKind::Transient => io::Error::new(Other, error),
+ ErrorKind::NotReady => io::Error::new(WouldBlock, error),
+ ErrorKind::__Nonexhaustive => unreachable!(),
+ }
+ }
+}
diff --git a/vendor/rand_core-0.3.1/src/impls.rs b/vendor/rand_core-0.3.1/src/impls.rs
new file mode 100644
index 0000000..57bdd07
--- /dev/null
+++ b/vendor/rand_core-0.3.1/src/impls.rs
@@ -0,0 +1,165 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Helper functions for implementing `RngCore` functions.
+//!
+//! For cross-platform reproducibility, these functions all use Little Endian:
+//! least-significant part first. For example, `next_u64_via_u32` takes `u32`
+//! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32`
+//! from `next_u64` in little-endian order, one should use `next_u64() as u32`.
+//!
+//! Byte-swapping (like the std `to_le` functions) is only needed to convert
+//! to/from byte sequences, and since its purpose is reproducibility,
+//! non-reproducible sources (e.g. `OsRng`) need not bother with it.
+
+use core::intrinsics::transmute;
+use core::ptr::copy_nonoverlapping;
+use core::slice;
+use core::cmp::min;
+use core::mem::size_of;
+use RngCore;
+
+
+/// Implement `next_u64` via `next_u32`, little-endian order.
+pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
+ // Use LE; we explicitly generate one value before the next.
+ let x = u64::from(rng.next_u32());
+ let y = u64::from(rng.next_u32());
+ (y << 32) | x
+}
+
+/// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order.
+///
+/// The fastest way to fill a slice is usually to work as long as possible with
+/// integers. That is why this method mostly uses `next_u64`, and only when
+/// there are 4 or less bytes remaining at the end of the slice it uses
+/// `next_u32` once.
+pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
+ let mut left = dest;
+ while left.len() >= 8 {
+ let (l, r) = {left}.split_at_mut(8);
+ left = r;
+ let chunk: [u8; 8] = unsafe {
+ transmute(rng.next_u64().to_le())
+ };
+ l.copy_from_slice(&chunk);
+ }
+ let n = left.len();
+ if n > 4 {
+ let chunk: [u8; 8] = unsafe {
+ transmute(rng.next_u64().to_le())
+ };
+ left.copy_from_slice(&chunk[..n]);
+ } else if n > 0 {
+ let chunk: [u8; 4] = unsafe {
+ transmute(rng.next_u32().to_le())
+ };
+ left.copy_from_slice(&chunk[..n]);
+ }
+}
+
+macro_rules! impl_uint_from_fill {
+ ($rng:expr, $ty:ty, $N:expr) => ({
+ debug_assert!($N == size_of::<$ty>());
+
+ let mut int: $ty = 0;
+ unsafe {
+ let ptr = &mut int as *mut $ty as *mut u8;
+ let slice = slice::from_raw_parts_mut(ptr, $N);
+ $rng.fill_bytes(slice);
+ }
+ int
+ });
+}
+
+macro_rules! fill_via_chunks {
+ ($src:expr, $dst:expr, $ty:ty, $size:expr) => ({
+ let chunk_size_u8 = min($src.len() * $size, $dst.len());
+ let chunk_size = (chunk_size_u8 + $size - 1) / $size;
+ if cfg!(target_endian="little") {
+ unsafe {
+ copy_nonoverlapping(
+ $src.as_ptr() as *const u8,
+ $dst.as_mut_ptr(),
+ chunk_size_u8);
+ }
+ } else {
+ for (&n, chunk) in $src.iter().zip($dst.chunks_mut($size)) {
+ let tmp = n.to_le();
+ let src_ptr = &tmp as *const $ty as *const u8;
+ unsafe {
+ copy_nonoverlapping(src_ptr,
+ chunk.as_mut_ptr(),
+ chunk.len());
+ }
+ }
+ }
+
+ (chunk_size, chunk_size_u8)
+ });
+}
+
+/// Implement `fill_bytes` by reading chunks from the output buffer of a block
+/// based RNG.
+///
+/// The return values are `(consumed_u32, filled_u8)`.
+///
+/// `filled_u8` is the number of filled bytes in `dest`, which may be less than
+/// the length of `dest`.
+/// `consumed_u32` is the number of words consumed from `src`, which is the same
+/// as `filled_u8 / 4` rounded up.
+///
+/// # Example
+/// (from `IsaacRng`)
+///
+/// ```ignore
+/// fn fill_bytes(&mut self, dest: &mut [u8]) {
+/// let mut read_len = 0;
+/// while read_len < dest.len() {
+/// if self.index >= self.rsl.len() {
+/// self.isaac();
+/// }
+///
+/// let (consumed_u32, filled_u8) =
+/// impls::fill_via_u32_chunks(&mut self.rsl[self.index..],
+/// &mut dest[read_len..]);
+///
+/// self.index += consumed_u32;
+/// read_len += filled_u8;
+/// }
+/// }
+/// ```
+pub fn fill_via_u32_chunks(src: &[u32], dest: &mut [u8]) -> (usize, usize) {
+ fill_via_chunks!(src, dest, u32, 4)
+}
+
+/// Implement `fill_bytes` by reading chunks from the output buffer of a block
+/// based RNG.
+///
+/// The return values are `(consumed_u64, filled_u8)`.
+/// `filled_u8` is the number of filled bytes in `dest`, which may be less than
+/// the length of `dest`.
+/// `consumed_u64` is the number of words consumed from `src`, which is the same
+/// as `filled_u8 / 8` rounded up.
+///
+/// See `fill_via_u32_chunks` for an example.
+pub fn fill_via_u64_chunks(src: &[u64], dest: &mut [u8]) -> (usize, usize) {
+ fill_via_chunks!(src, dest, u64, 8)
+}
+
+/// Implement `next_u32` via `fill_bytes`, little-endian order.
+pub fn next_u32_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u32 {
+ impl_uint_from_fill!(rng, u32, 4)
+}
+
+/// Implement `next_u64` via `fill_bytes`, little-endian order.
+pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
+ impl_uint_from_fill!(rng, u64, 8)
+}
+
+// TODO: implement tests for the above
diff --git a/vendor/rand_core-0.3.1/src/le.rs b/vendor/rand_core-0.3.1/src/le.rs
new file mode 100644
index 0000000..266651f
--- /dev/null
+++ b/vendor/rand_core-0.3.1/src/le.rs
@@ -0,0 +1,68 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Little-Endian utilities
+//!
+//! Little-Endian order has been chosen for internal usage; this makes some
+//! useful functions available.
+
+use core::ptr;
+
+macro_rules! read_slice {
+ ($src:expr, $dst:expr, $size:expr, $which:ident) => {{
+ assert_eq!($src.len(), $size * $dst.len());
+
+ unsafe {
+ ptr::copy_nonoverlapping(
+ $src.as_ptr(),
+ $dst.as_mut_ptr() as *mut u8,
+ $src.len());
+ }
+ for v in $dst.iter_mut() {
+ *v = v.$which();
+ }
+ }};
+}
+
+/// Reads unsigned 32 bit integers from `src` into `dst`.
+/// Borrowed from the `byteorder` crate.
+#[inline]
+pub fn read_u32_into(src: &[u8], dst: &mut [u32]) {
+ read_slice!(src, dst, 4, to_le);
+}
+
+/// Reads unsigned 64 bit integers from `src` into `dst`.
+/// Borrowed from the `byteorder` crate.
+#[inline]
+pub fn read_u64_into(src: &[u8], dst: &mut [u64]) {
+ read_slice!(src, dst, 8, to_le);
+}
+
+#[test]
+fn test_read() {
+ let bytes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
+
+ let mut buf = [0u32; 4];
+ read_u32_into(&bytes, &mut buf);
+ assert_eq!(buf[0], 0x04030201);
+ assert_eq!(buf[3], 0x100F0E0D);
+
+ let mut buf = [0u32; 3];
+ read_u32_into(&bytes[1..13], &mut buf); // unaligned
+ assert_eq!(buf[0], 0x05040302);
+ assert_eq!(buf[2], 0x0D0C0B0A);
+
+ let mut buf = [0u64; 2];
+ read_u64_into(&bytes, &mut buf);
+ assert_eq!(buf[0], 0x0807060504030201);
+ assert_eq!(buf[1], 0x100F0E0D0C0B0A09);
+
+ let mut buf = [0u64; 1];
+ read_u64_into(&bytes[7..15], &mut buf); // unaligned
+ assert_eq!(buf[0], 0x0F0E0D0C0B0A0908);
+}
diff --git a/vendor/rand_core-0.3.1/src/lib.rs b/vendor/rand_core-0.3.1/src/lib.rs
new file mode 100644
index 0000000..8923142
--- /dev/null
+++ b/vendor/rand_core-0.3.1/src/lib.rs
@@ -0,0 +1,46 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2017-2018 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Random number generation traits
+//!
+//! This version of `rand_core` is a compatibility shim around version 0.3.
+//!
+//! This crate is mainly of interest to crates publishing implementations of
+//! [`RngCore`]. Other users are encouraged to use the [`rand`] crate instead
+//! which re-exports the main traits and error types.
+//!
+//! [`RngCore`] is the core trait implemented by algorithmic pseudo-random number
+//! generators and external random-number sources.
+//!
+//! [`SeedableRng`] is an extension trait for construction from fixed seeds and
+//! other random number generators.
+//!
+//! [`Error`] is provided for error-handling. It is safe to use in `no_std`
+//! environments.
+//!
+//! The [`impls`] and [`le`] sub-modules include a few small functions to assist
+//! implementation of [`RngCore`].
+//!
+//! [`rand`]: https://docs.rs/rand
+
+#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png",
+ html_favicon_url = "https://www.rust-lang.org/favicon.ico",
+ html_root_url = "https://rust-random.github.io/rand/")]
+
+#![deny(missing_docs)]
+#![deny(missing_debug_implementations)]
+#![doc(test(attr(allow(unused_variables), deny(warnings))))]
+
+#![no_std]
+
+extern crate rand_core as core4;
+
+pub use core4::{ErrorKind, Error};
+pub use core4::{block, impls, le};
+pub use core4::{RngCore, CryptoRng, SeedableRng};