summaryrefslogtreecommitdiff
path: root/vendor/bytemuck/src
diff options
context:
space:
mode:
authorValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
committerValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
commit1b6a04ca5504955c571d1c97504fb45ea0befee4 (patch)
tree7579f518b23313e8a9748a88ab6173d5e030b227 /vendor/bytemuck/src
parent5ecd8cf2cba827454317368b68571df0d13d7842 (diff)
downloadfparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.tar.xz
fparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.zip
Initial vendor packages
Signed-off-by: Valentin Popov <valentin@popov.link>
Diffstat (limited to 'vendor/bytemuck/src')
-rw-r--r--vendor/bytemuck/src/allocation.rs689
-rw-r--r--vendor/bytemuck/src/anybitpattern.rs61
-rw-r--r--vendor/bytemuck/src/checked.rs522
-rw-r--r--vendor/bytemuck/src/contiguous.rs202
-rw-r--r--vendor/bytemuck/src/internal.rs402
-rw-r--r--vendor/bytemuck/src/lib.rs457
-rw-r--r--vendor/bytemuck/src/must.rs203
-rw-r--r--vendor/bytemuck/src/no_uninit.rs80
-rw-r--r--vendor/bytemuck/src/offset_of.rs135
-rw-r--r--vendor/bytemuck/src/pod.rs165
-rw-r--r--vendor/bytemuck/src/pod_in_option.rs27
-rw-r--r--vendor/bytemuck/src/transparent.rs288
-rw-r--r--vendor/bytemuck/src/zeroable.rs245
-rw-r--r--vendor/bytemuck/src/zeroable_in_option.rs35
14 files changed, 3511 insertions, 0 deletions
diff --git a/vendor/bytemuck/src/allocation.rs b/vendor/bytemuck/src/allocation.rs
new file mode 100644
index 0000000..a2633b5
--- /dev/null
+++ b/vendor/bytemuck/src/allocation.rs
@@ -0,0 +1,689 @@
+#![cfg(feature = "extern_crate_alloc")]
+
+//! Stuff to boost things in the `alloc` crate.
+//!
+//! * You must enable the `extern_crate_alloc` feature of `bytemuck` or you will
+//! not be able to use this module! This is generally done by adding the
+//! feature to the dependency in Cargo.toml like so:
+//!
+//! `bytemuck = { version = "VERSION_YOU_ARE_USING", features =
+//! ["extern_crate_alloc"]}`
+
+use super::*;
+#[cfg(target_has_atomic = "ptr")]
+use alloc::sync::Arc;
+use alloc::{
+ alloc::{alloc_zeroed, Layout},
+ boxed::Box,
+ rc::Rc,
+ vec,
+ vec::Vec,
+};
+
+/// As [`try_cast_box`](try_cast_box), but unwraps for you.
+#[inline]
+pub fn cast_box<A: NoUninit, B: AnyBitPattern>(input: Box<A>) -> Box<B> {
+ try_cast_box(input).map_err(|(e, _v)| e).unwrap()
+}
+
+/// Attempts to cast the content type of a [`Box`](alloc::boxed::Box).
+///
+/// On failure you get back an error along with the starting `Box`.
+///
+/// ## Failure
+///
+/// * The start and end content type of the `Box` must have the exact same
+/// alignment.
+/// * The start and end size of the `Box` must have the exact same size.
+#[inline]
+pub fn try_cast_box<A: NoUninit, B: AnyBitPattern>(
+ input: Box<A>,
+) -> Result<Box<B>, (PodCastError, Box<A>)> {
+ if align_of::<A>() != align_of::<B>() {
+ Err((PodCastError::AlignmentMismatch, input))
+ } else if size_of::<A>() != size_of::<B>() {
+ Err((PodCastError::SizeMismatch, input))
+ } else {
+ // Note(Lokathor): This is much simpler than with the Vec casting!
+ let ptr: *mut B = Box::into_raw(input) as *mut B;
+ Ok(unsafe { Box::from_raw(ptr) })
+ }
+}
+
+/// Allocates a `Box<T>` with all of the contents being zeroed out.
+///
+/// This uses the global allocator to create a zeroed allocation and _then_
+/// turns it into a Box. In other words, it's 100% assured that the zeroed data
+/// won't be put temporarily on the stack. You can make a box of any size
+/// without fear of a stack overflow.
+///
+/// ## Failure
+///
+/// This fails if the allocation fails.
+#[inline]
+pub fn try_zeroed_box<T: Zeroable>() -> Result<Box<T>, ()> {
+ if size_of::<T>() == 0 {
+ // This will not allocate but simply create a dangling pointer.
+ let dangling = core::ptr::NonNull::dangling().as_ptr();
+ return Ok(unsafe { Box::from_raw(dangling) });
+ }
+ let layout = Layout::new::<T>();
+ let ptr = unsafe { alloc_zeroed(layout) };
+ if ptr.is_null() {
+ // we don't know what the error is because `alloc_zeroed` is a dumb API
+ Err(())
+ } else {
+ Ok(unsafe { Box::<T>::from_raw(ptr as *mut T) })
+ }
+}
+
+/// As [`try_zeroed_box`], but unwraps for you.
+#[inline]
+pub fn zeroed_box<T: Zeroable>() -> Box<T> {
+ try_zeroed_box().unwrap()
+}
+
+/// Allocates a `Vec<T>` of length and capacity exactly equal to `length` and
+/// all elements zeroed.
+///
+/// ## Failure
+///
+/// This fails if the allocation fails, or if a layout cannot be calculated for
+/// the allocation.
+pub fn try_zeroed_vec<T: Zeroable>(length: usize) -> Result<Vec<T>, ()> {
+ if length == 0 {
+ Ok(Vec::new())
+ } else {
+ let boxed_slice = try_zeroed_slice_box(length)?;
+ Ok(boxed_slice.into_vec())
+ }
+}
+
+/// As [`try_zeroed_vec`] but unwraps for you
+pub fn zeroed_vec<T: Zeroable>(length: usize) -> Vec<T> {
+ try_zeroed_vec(length).unwrap()
+}
+
+/// Allocates a `Box<[T]>` with all contents being zeroed out.
+///
+/// This uses the global allocator to create a zeroed allocation and _then_
+/// turns it into a Box. In other words, it's 100% assured that the zeroed data
+/// won't be put temporarily on the stack. You can make a box of any size
+/// without fear of a stack overflow.
+///
+/// ## Failure
+///
+/// This fails if the allocation fails, or if a layout cannot be calculated for
+/// the allocation.
+#[inline]
+pub fn try_zeroed_slice_box<T: Zeroable>(
+ length: usize,
+) -> Result<Box<[T]>, ()> {
+ if size_of::<T>() == 0 || length == 0 {
+ // This will not allocate but simply create a dangling slice pointer.
+ let dangling = core::ptr::NonNull::dangling().as_ptr();
+ let dangling_slice = core::ptr::slice_from_raw_parts_mut(dangling, length);
+ return Ok(unsafe { Box::from_raw(dangling_slice) });
+ }
+ let layout = core::alloc::Layout::array::<T>(length).map_err(|_| ())?;
+ let ptr = unsafe { alloc_zeroed(layout) };
+ if ptr.is_null() {
+ // we don't know what the error is because `alloc_zeroed` is a dumb API
+ Err(())
+ } else {
+ let slice =
+ unsafe { core::slice::from_raw_parts_mut(ptr as *mut T, length) };
+ Ok(unsafe { Box::<[T]>::from_raw(slice) })
+ }
+}
+
+/// As [`try_zeroed_slice_box`](try_zeroed_slice_box), but unwraps for you.
+pub fn zeroed_slice_box<T: Zeroable>(length: usize) -> Box<[T]> {
+ try_zeroed_slice_box(length).unwrap()
+}
+
+/// As [`try_cast_slice_box`](try_cast_slice_box), but unwraps for you.
+#[inline]
+pub fn cast_slice_box<A: NoUninit, B: AnyBitPattern>(
+ input: Box<[A]>,
+) -> Box<[B]> {
+ try_cast_slice_box(input).map_err(|(e, _v)| e).unwrap()
+}
+
+/// Attempts to cast the content type of a `Box<[T]>`.
+///
+/// On failure you get back an error along with the starting `Box<[T]>`.
+///
+/// ## Failure
+///
+/// * The start and end content type of the `Box<[T]>` must have the exact same
+/// alignment.
+/// * The start and end content size in bytes of the `Box<[T]>` must be the
+/// exact same.
+#[inline]
+pub fn try_cast_slice_box<A: NoUninit, B: AnyBitPattern>(
+ input: Box<[A]>,
+) -> Result<Box<[B]>, (PodCastError, Box<[A]>)> {
+ if align_of::<A>() != align_of::<B>() {
+ Err((PodCastError::AlignmentMismatch, input))
+ } else if size_of::<A>() != size_of::<B>() {
+ if size_of::<A>() * input.len() % size_of::<B>() != 0 {
+ // If the size in bytes of the underlying buffer does not match an exact
+ // multiple of the size of B, we cannot cast between them.
+ Err((PodCastError::SizeMismatch, input))
+ } else {
+ // Because the size is an exact multiple, we can now change the length
+ // of the slice and recreate the Box
+ // NOTE: This is a valid operation because according to the docs of
+ // std::alloc::GlobalAlloc::dealloc(), the Layout that was used to alloc
+ // the block must be the same Layout that is used to dealloc the block.
+ // Luckily, Layout only stores two things, the alignment, and the size in
+ // bytes. So as long as both of those stay the same, the Layout will
+ // remain a valid input to dealloc.
+ let length = size_of::<A>() * input.len() / size_of::<B>();
+ let box_ptr: *mut A = Box::into_raw(input) as *mut A;
+ let ptr: *mut [B] =
+ unsafe { core::slice::from_raw_parts_mut(box_ptr as *mut B, length) };
+ Ok(unsafe { Box::<[B]>::from_raw(ptr) })
+ }
+ } else {
+ let box_ptr: *mut [A] = Box::into_raw(input);
+ let ptr: *mut [B] = box_ptr as *mut [B];
+ Ok(unsafe { Box::<[B]>::from_raw(ptr) })
+ }
+}
+
+/// As [`try_cast_vec`](try_cast_vec), but unwraps for you.
+#[inline]
+pub fn cast_vec<A: NoUninit, B: AnyBitPattern>(input: Vec<A>) -> Vec<B> {
+ try_cast_vec(input).map_err(|(e, _v)| e).unwrap()
+}
+
+/// Attempts to cast the content type of a [`Vec`](alloc::vec::Vec).
+///
+/// On failure you get back an error along with the starting `Vec`.
+///
+/// ## Failure
+///
+/// * The start and end content type of the `Vec` must have the exact same
+/// alignment.
+/// * The start and end content size in bytes of the `Vec` must be the exact
+/// same.
+/// * The start and end capacity in bytes of the `Vec` must be the exact same.
+#[inline]
+pub fn try_cast_vec<A: NoUninit, B: AnyBitPattern>(
+ input: Vec<A>,
+) -> Result<Vec<B>, (PodCastError, Vec<A>)> {
+ if align_of::<A>() != align_of::<B>() {
+ Err((PodCastError::AlignmentMismatch, input))
+ } else if size_of::<A>() != size_of::<B>() {
+ if size_of::<A>() * input.len() % size_of::<B>() != 0
+ || size_of::<A>() * input.capacity() % size_of::<B>() != 0
+ {
+ // If the size in bytes of the underlying buffer does not match an exact
+ // multiple of the size of B, we cannot cast between them.
+ // Note that we have to pay special attention to make sure that both
+ // length and capacity are valid under B, as we do not want to
+ // change which bytes are considered part of the initialized slice
+ // of the Vec
+ Err((PodCastError::SizeMismatch, input))
+ } else {
+ // Because the size is an exact multiple, we can now change the length and
+ // capacity and recreate the Vec
+ // NOTE: This is a valid operation because according to the docs of
+ // std::alloc::GlobalAlloc::dealloc(), the Layout that was used to alloc
+ // the block must be the same Layout that is used to dealloc the block.
+ // Luckily, Layout only stores two things, the alignment, and the size in
+ // bytes. So as long as both of those stay the same, the Layout will
+ // remain a valid input to dealloc.
+
+ // Note(Lokathor): First we record the length and capacity, which don't
+ // have any secret provenance metadata.
+ let length: usize = size_of::<A>() * input.len() / size_of::<B>();
+ let capacity: usize = size_of::<A>() * input.capacity() / size_of::<B>();
+ // Note(Lokathor): Next we "pre-forget" the old Vec by wrapping with
+ // ManuallyDrop, because if we used `core::mem::forget` after taking the
+ // pointer then that would invalidate our pointer. In nightly there's a
+ // "into raw parts" method, which we can switch this too eventually.
+ let mut manual_drop_vec = ManuallyDrop::new(input);
+ let vec_ptr: *mut A = manual_drop_vec.as_mut_ptr();
+ let ptr: *mut B = vec_ptr as *mut B;
+ Ok(unsafe { Vec::from_raw_parts(ptr, length, capacity) })
+ }
+ } else {
+ // Note(Lokathor): First we record the length and capacity, which don't have
+ // any secret provenance metadata.
+ let length: usize = input.len();
+ let capacity: usize = input.capacity();
+ // Note(Lokathor): Next we "pre-forget" the old Vec by wrapping with
+ // ManuallyDrop, because if we used `core::mem::forget` after taking the
+ // pointer then that would invalidate our pointer. In nightly there's a
+ // "into raw parts" method, which we can switch this too eventually.
+ let mut manual_drop_vec = ManuallyDrop::new(input);
+ let vec_ptr: *mut A = manual_drop_vec.as_mut_ptr();
+ let ptr: *mut B = vec_ptr as *mut B;
+ Ok(unsafe { Vec::from_raw_parts(ptr, length, capacity) })
+ }
+}
+
+/// This "collects" a slice of pod data into a vec of a different pod type.
+///
+/// Unlike with [`cast_slice`] and [`cast_slice_mut`], this will always work.
+///
+/// The output vec will be of a minimal size/capacity to hold the slice given.
+///
+/// ```rust
+/// # use bytemuck::*;
+/// let halfwords: [u16; 4] = [5, 6, 7, 8];
+/// let vec_of_words: Vec<u32> = pod_collect_to_vec(&halfwords);
+/// if cfg!(target_endian = "little") {
+/// assert_eq!(&vec_of_words[..], &[0x0006_0005, 0x0008_0007][..])
+/// } else {
+/// assert_eq!(&vec_of_words[..], &[0x0005_0006, 0x0007_0008][..])
+/// }
+/// ```
+pub fn pod_collect_to_vec<A: NoUninit, B: NoUninit + AnyBitPattern>(
+ src: &[A],
+) -> Vec<B> {
+ let src_size = size_of_val(src);
+ // Note(Lokathor): dst_count is rounded up so that the dest will always be at
+ // least as many bytes as the src.
+ let dst_count = src_size / size_of::<B>()
+ + if src_size % size_of::<B>() != 0 { 1 } else { 0 };
+ let mut dst = vec![B::zeroed(); dst_count];
+
+ let src_bytes: &[u8] = cast_slice(src);
+ let dst_bytes: &mut [u8] = cast_slice_mut(&mut dst[..]);
+ dst_bytes[..src_size].copy_from_slice(src_bytes);
+ dst
+}
+
+/// As [`try_cast_rc`](try_cast_rc), but unwraps for you.
+#[inline]
+pub fn cast_rc<A: NoUninit + AnyBitPattern, B: NoUninit + AnyBitPattern>(
+ input: Rc<A>,
+) -> Rc<B> {
+ try_cast_rc(input).map_err(|(e, _v)| e).unwrap()
+}
+
+/// Attempts to cast the content type of a [`Rc`](alloc::rc::Rc).
+///
+/// On failure you get back an error along with the starting `Rc`.
+///
+/// The bounds on this function are the same as [`cast_mut`], because a user
+/// could call `Rc::get_unchecked_mut` on the output, which could be observable
+/// in the input.
+///
+/// ## Failure
+///
+/// * The start and end content type of the `Rc` must have the exact same
+/// alignment.
+/// * The start and end size of the `Rc` must have the exact same size.
+#[inline]
+pub fn try_cast_rc<A: NoUninit + AnyBitPattern, B: NoUninit + AnyBitPattern>(
+ input: Rc<A>,
+) -> Result<Rc<B>, (PodCastError, Rc<A>)> {
+ if align_of::<A>() != align_of::<B>() {
+ Err((PodCastError::AlignmentMismatch, input))
+ } else if size_of::<A>() != size_of::<B>() {
+ Err((PodCastError::SizeMismatch, input))
+ } else {
+ // Safety: Rc::from_raw requires size and alignment match, which is met.
+ let ptr: *const B = Rc::into_raw(input) as *const B;
+ Ok(unsafe { Rc::from_raw(ptr) })
+ }
+}
+
+/// As [`try_cast_arc`](try_cast_arc), but unwraps for you.
+#[inline]
+#[cfg(target_has_atomic = "ptr")]
+pub fn cast_arc<A: NoUninit + AnyBitPattern, B: NoUninit + AnyBitPattern>(
+ input: Arc<A>,
+) -> Arc<B> {
+ try_cast_arc(input).map_err(|(e, _v)| e).unwrap()
+}
+
+/// Attempts to cast the content type of a [`Arc`](alloc::sync::Arc).
+///
+/// On failure you get back an error along with the starting `Arc`.
+///
+/// The bounds on this function are the same as [`cast_mut`], because a user
+/// could call `Rc::get_unchecked_mut` on the output, which could be observable
+/// in the input.
+///
+/// ## Failure
+///
+/// * The start and end content type of the `Arc` must have the exact same
+/// alignment.
+/// * The start and end size of the `Arc` must have the exact same size.
+#[inline]
+#[cfg(target_has_atomic = "ptr")]
+pub fn try_cast_arc<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + AnyBitPattern,
+>(
+ input: Arc<A>,
+) -> Result<Arc<B>, (PodCastError, Arc<A>)> {
+ if align_of::<A>() != align_of::<B>() {
+ Err((PodCastError::AlignmentMismatch, input))
+ } else if size_of::<A>() != size_of::<B>() {
+ Err((PodCastError::SizeMismatch, input))
+ } else {
+ // Safety: Arc::from_raw requires size and alignment match, which is met.
+ let ptr: *const B = Arc::into_raw(input) as *const B;
+ Ok(unsafe { Arc::from_raw(ptr) })
+ }
+}
+
+/// As [`try_cast_slice_rc`](try_cast_slice_rc), but unwraps for you.
+#[inline]
+pub fn cast_slice_rc<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + AnyBitPattern,
+>(
+ input: Rc<[A]>,
+) -> Rc<[B]> {
+ try_cast_slice_rc(input).map_err(|(e, _v)| e).unwrap()
+}
+
+/// Attempts to cast the content type of a `Rc<[T]>`.
+///
+/// On failure you get back an error along with the starting `Rc<[T]>`.
+///
+/// The bounds on this function are the same as [`cast_mut`], because a user
+/// could call `Rc::get_unchecked_mut` on the output, which could be observable
+/// in the input.
+///
+/// ## Failure
+///
+/// * The start and end content type of the `Rc<[T]>` must have the exact same
+/// alignment.
+/// * The start and end content size in bytes of the `Rc<[T]>` must be the exact
+/// same.
+#[inline]
+pub fn try_cast_slice_rc<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + AnyBitPattern,
+>(
+ input: Rc<[A]>,
+) -> Result<Rc<[B]>, (PodCastError, Rc<[A]>)> {
+ if align_of::<A>() != align_of::<B>() {
+ Err((PodCastError::AlignmentMismatch, input))
+ } else if size_of::<A>() != size_of::<B>() {
+ if size_of::<A>() * input.len() % size_of::<B>() != 0 {
+ // If the size in bytes of the underlying buffer does not match an exact
+ // multiple of the size of B, we cannot cast between them.
+ Err((PodCastError::SizeMismatch, input))
+ } else {
+ // Because the size is an exact multiple, we can now change the length
+ // of the slice and recreate the Rc
+ // NOTE: This is a valid operation because according to the docs of
+ // std::rc::Rc::from_raw(), the type U that was in the original Rc<U>
+ // acquired from Rc::into_raw() must have the same size alignment and
+ // size of the type T in the new Rc<T>. So as long as both the size
+ // and alignment stay the same, the Rc will remain a valid Rc.
+ let length = size_of::<A>() * input.len() / size_of::<B>();
+ let rc_ptr: *const A = Rc::into_raw(input) as *const A;
+ // Must use ptr::slice_from_raw_parts, because we cannot make an
+ // intermediate const reference, because it has mutable provenance,
+ // nor an intermediate mutable reference, because it could be aliased.
+ let ptr = core::ptr::slice_from_raw_parts(rc_ptr as *const B, length);
+ Ok(unsafe { Rc::<[B]>::from_raw(ptr) })
+ }
+ } else {
+ let rc_ptr: *const [A] = Rc::into_raw(input);
+ let ptr: *const [B] = rc_ptr as *const [B];
+ Ok(unsafe { Rc::<[B]>::from_raw(ptr) })
+ }
+}
+
+/// As [`try_cast_slice_arc`](try_cast_slice_arc), but unwraps for you.
+#[inline]
+#[cfg(target_has_atomic = "ptr")]
+pub fn cast_slice_arc<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + AnyBitPattern,
+>(
+ input: Arc<[A]>,
+) -> Arc<[B]> {
+ try_cast_slice_arc(input).map_err(|(e, _v)| e).unwrap()
+}
+
+/// Attempts to cast the content type of a `Arc<[T]>`.
+///
+/// On failure you get back an error along with the starting `Arc<[T]>`.
+///
+/// The bounds on this function are the same as [`cast_mut`], because a user
+/// could call `Rc::get_unchecked_mut` on the output, which could be observable
+/// in the input.
+///
+/// ## Failure
+///
+/// * The start and end content type of the `Arc<[T]>` must have the exact same
+/// alignment.
+/// * The start and end content size in bytes of the `Arc<[T]>` must be the
+/// exact same.
+#[inline]
+#[cfg(target_has_atomic = "ptr")]
+pub fn try_cast_slice_arc<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + AnyBitPattern,
+>(
+ input: Arc<[A]>,
+) -> Result<Arc<[B]>, (PodCastError, Arc<[A]>)> {
+ if align_of::<A>() != align_of::<B>() {
+ Err((PodCastError::AlignmentMismatch, input))
+ } else if size_of::<A>() != size_of::<B>() {
+ if size_of::<A>() * input.len() % size_of::<B>() != 0 {
+ // If the size in bytes of the underlying buffer does not match an exact
+ // multiple of the size of B, we cannot cast between them.
+ Err((PodCastError::SizeMismatch, input))
+ } else {
+ // Because the size is an exact multiple, we can now change the length
+ // of the slice and recreate the Arc
+ // NOTE: This is a valid operation because according to the docs of
+ // std::sync::Arc::from_raw(), the type U that was in the original Arc<U>
+ // acquired from Arc::into_raw() must have the same size alignment and
+ // size of the type T in the new Arc<T>. So as long as both the size
+ // and alignment stay the same, the Arc will remain a valid Arc.
+ let length = size_of::<A>() * input.len() / size_of::<B>();
+ let arc_ptr: *const A = Arc::into_raw(input) as *const A;
+ // Must use ptr::slice_from_raw_parts, because we cannot make an
+ // intermediate const reference, because it has mutable provenance,
+ // nor an intermediate mutable reference, because it could be aliased.
+ let ptr = core::ptr::slice_from_raw_parts(arc_ptr as *const B, length);
+ Ok(unsafe { Arc::<[B]>::from_raw(ptr) })
+ }
+ } else {
+ let arc_ptr: *const [A] = Arc::into_raw(input);
+ let ptr: *const [B] = arc_ptr as *const [B];
+ Ok(unsafe { Arc::<[B]>::from_raw(ptr) })
+ }
+}
+
+/// An extension trait for `TransparentWrapper` and alloc types.
+pub trait TransparentWrapperAlloc<Inner: ?Sized>:
+ TransparentWrapper<Inner>
+{
+ /// Convert a vec of the inner type into a vec of the wrapper type.
+ fn wrap_vec(s: Vec<Inner>) -> Vec<Self>
+ where
+ Self: Sized,
+ Inner: Sized,
+ {
+ let mut s = core::mem::ManuallyDrop::new(s);
+
+ let length = s.len();
+ let capacity = s.capacity();
+ let ptr = s.as_mut_ptr();
+
+ unsafe {
+ // SAFETY:
+ // * ptr comes from Vec (and will not be double-dropped)
+ // * the two types have the identical representation
+ // * the len and capacity fields are valid
+ Vec::from_raw_parts(ptr as *mut Self, length, capacity)
+ }
+ }
+
+ /// Convert a box to the inner type into a box to the wrapper
+ /// type.
+ #[inline]
+ fn wrap_box(s: Box<Inner>) -> Box<Self> {
+ assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
+
+ unsafe {
+ // A pointer cast doesn't work here because rustc can't tell that
+ // the vtables match (because of the `?Sized` restriction relaxation).
+ // A `transmute` doesn't work because the sizes are unspecified.
+ //
+ // SAFETY:
+ // * The unsafe contract requires that pointers to Inner and Self have
+ // identical representations
+ // * Box is guaranteed to have representation identical to a (non-null)
+ // pointer
+ // * The pointer comes from a box (and thus satisfies all safety
+ // requirements of Box)
+ let inner_ptr: *mut Inner = Box::into_raw(s);
+ let wrapper_ptr: *mut Self = transmute!(inner_ptr);
+ Box::from_raw(wrapper_ptr)
+ }
+ }
+
+ /// Convert an [`Rc`](alloc::rc::Rc) to the inner type into an `Rc` to the
+ /// wrapper type.
+ #[inline]
+ fn wrap_rc(s: Rc<Inner>) -> Rc<Self> {
+ assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
+
+ unsafe {
+ // A pointer cast doesn't work here because rustc can't tell that
+ // the vtables match (because of the `?Sized` restriction relaxation).
+ // A `transmute` doesn't work because the layout of Rc is unspecified.
+ //
+ // SAFETY:
+ // * The unsafe contract requires that pointers to Inner and Self have
+ // identical representations, and that the size and alignment of Inner
+ // and Self are the same, which meets the safety requirements of
+ // Rc::from_raw
+ let inner_ptr: *const Inner = Rc::into_raw(s);
+ let wrapper_ptr: *const Self = transmute!(inner_ptr);
+ Rc::from_raw(wrapper_ptr)
+ }
+ }
+
+ /// Convert an [`Arc`](alloc::sync::Arc) to the inner type into an `Arc` to
+ /// the wrapper type.
+ #[inline]
+ #[cfg(target_has_atomic = "ptr")]
+ fn wrap_arc(s: Arc<Inner>) -> Arc<Self> {
+ assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
+
+ unsafe {
+ // A pointer cast doesn't work here because rustc can't tell that
+ // the vtables match (because of the `?Sized` restriction relaxation).
+ // A `transmute` doesn't work because the layout of Arc is unspecified.
+ //
+ // SAFETY:
+ // * The unsafe contract requires that pointers to Inner and Self have
+ // identical representations, and that the size and alignment of Inner
+ // and Self are the same, which meets the safety requirements of
+ // Arc::from_raw
+ let inner_ptr: *const Inner = Arc::into_raw(s);
+ let wrapper_ptr: *const Self = transmute!(inner_ptr);
+ Arc::from_raw(wrapper_ptr)
+ }
+ }
+
+ /// Convert a vec of the wrapper type into a vec of the inner type.
+ fn peel_vec(s: Vec<Self>) -> Vec<Inner>
+ where
+ Self: Sized,
+ Inner: Sized,
+ {
+ let mut s = core::mem::ManuallyDrop::new(s);
+
+ let length = s.len();
+ let capacity = s.capacity();
+ let ptr = s.as_mut_ptr();
+
+ unsafe {
+ // SAFETY:
+ // * ptr comes from Vec (and will not be double-dropped)
+ // * the two types have the identical representation
+ // * the len and capacity fields are valid
+ Vec::from_raw_parts(ptr as *mut Inner, length, capacity)
+ }
+ }
+
+ /// Convert a box to the wrapper type into a box to the inner
+ /// type.
+ #[inline]
+ fn peel_box(s: Box<Self>) -> Box<Inner> {
+ assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
+
+ unsafe {
+ // A pointer cast doesn't work here because rustc can't tell that
+ // the vtables match (because of the `?Sized` restriction relaxation).
+ // A `transmute` doesn't work because the sizes are unspecified.
+ //
+ // SAFETY:
+ // * The unsafe contract requires that pointers to Inner and Self have
+ // identical representations
+ // * Box is guaranteed to have representation identical to a (non-null)
+ // pointer
+ // * The pointer comes from a box (and thus satisfies all safety
+ // requirements of Box)
+ let wrapper_ptr: *mut Self = Box::into_raw(s);
+ let inner_ptr: *mut Inner = transmute!(wrapper_ptr);
+ Box::from_raw(inner_ptr)
+ }
+ }
+
+ /// Convert an [`Rc`](alloc::rc::Rc) to the wrapper type into an `Rc` to the
+ /// inner type.
+ #[inline]
+ fn peel_rc(s: Rc<Self>) -> Rc<Inner> {
+ assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
+
+ unsafe {
+ // A pointer cast doesn't work here because rustc can't tell that
+ // the vtables match (because of the `?Sized` restriction relaxation).
+ // A `transmute` doesn't work because the layout of Rc is unspecified.
+ //
+ // SAFETY:
+ // * The unsafe contract requires that pointers to Inner and Self have
+ // identical representations, and that the size and alignment of Inner
+ // and Self are the same, which meets the safety requirements of
+ // Rc::from_raw
+ let wrapper_ptr: *const Self = Rc::into_raw(s);
+ let inner_ptr: *const Inner = transmute!(wrapper_ptr);
+ Rc::from_raw(inner_ptr)
+ }
+ }
+
+ /// Convert an [`Arc`](alloc::sync::Arc) to the wrapper type into an `Arc` to
+ /// the inner type.
+ #[inline]
+ #[cfg(target_has_atomic = "ptr")]
+ fn peel_arc(s: Arc<Self>) -> Arc<Inner> {
+ assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
+
+ unsafe {
+ // A pointer cast doesn't work here because rustc can't tell that
+ // the vtables match (because of the `?Sized` restriction relaxation).
+ // A `transmute` doesn't work because the layout of Arc is unspecified.
+ //
+ // SAFETY:
+ // * The unsafe contract requires that pointers to Inner and Self have
+ // identical representations, and that the size and alignment of Inner
+ // and Self are the same, which meets the safety requirements of
+ // Arc::from_raw
+ let wrapper_ptr: *const Self = Arc::into_raw(s);
+ let inner_ptr: *const Inner = transmute!(wrapper_ptr);
+ Arc::from_raw(inner_ptr)
+ }
+ }
+}
+
+impl<I: ?Sized, T: ?Sized + TransparentWrapper<I>> TransparentWrapperAlloc<I> for T {}
diff --git a/vendor/bytemuck/src/anybitpattern.rs b/vendor/bytemuck/src/anybitpattern.rs
new file mode 100644
index 0000000..a759738
--- /dev/null
+++ b/vendor/bytemuck/src/anybitpattern.rs
@@ -0,0 +1,61 @@
+use crate::{Pod, Zeroable};
+
+/// Marker trait for "plain old data" types that are valid for any bit pattern.
+///
+/// The requirements for this is very similar to [`Pod`],
+/// except that the type can allow uninit (or padding) bytes.
+/// This limits what you can do with a type of this kind, but also broadens the
+/// included types to `repr(C)` `struct`s that contain padding as well as
+/// `union`s. Notably, you can only cast *immutable* references and *owned*
+/// values into [`AnyBitPattern`] types, not *mutable* references.
+///
+/// [`Pod`] is a subset of [`AnyBitPattern`], meaning that any `T: Pod` is also
+/// [`AnyBitPattern`] but any `T: AnyBitPattern` is not necessarily [`Pod`].
+///
+/// [`AnyBitPattern`] is a subset of [`Zeroable`], meaning that any `T:
+/// AnyBitPattern` is also [`Zeroable`], but any `T: Zeroable` is not
+/// necessarily [`AnyBitPattern ]
+///
+/// # Derive
+///
+/// A `#[derive(AnyBitPattern)]` macro is provided under the `derive` feature
+/// flag which will automatically validate the requirements of this trait and
+/// implement the trait for you for both structs and enums. This is the
+/// recommended method for implementing the trait, however it's also possible to
+/// do manually. If you implement it manually, you *must* carefully follow the
+/// below safety rules.
+///
+/// * *NOTE: even `C-style`, fieldless enums are intentionally **excluded** from
+/// this trait, since it is **unsound** for an enum to have a discriminant value
+/// that is not one of its defined variants.
+///
+/// # Safety
+///
+/// Similar to [`Pod`] except we disregard the rule about it must not contain
+/// uninit bytes. Still, this is a quite strong guarantee about a type, so *be
+/// careful* when implementing it manually.
+///
+/// * The type must be inhabited (eg: no
+/// [Infallible](core::convert::Infallible)).
+/// * The type must be valid for any bit pattern of its backing memory.
+/// * Structs need to have all fields also be `AnyBitPattern`.
+/// * It is disallowed for types to contain pointer types, `Cell`, `UnsafeCell`,
+/// atomics, and any other forms of interior mutability.
+/// * More precisely: A shared reference to the type must allow reads, and
+/// *only* reads. RustBelt's separation logic is based on the notion that a
+/// type is allowed to define a sharing predicate, its own invariant that must
+/// hold for shared references, and this predicate is the reasoning that allow
+/// it to deal with atomic and cells etc. We require the sharing predicate to
+/// be trivial and permit only read-only access.
+/// * There's probably more, don't mess it up (I mean it).
+pub unsafe trait AnyBitPattern:
+ Zeroable + Sized + Copy + 'static
+{
+}
+
+unsafe impl<T: Pod> AnyBitPattern for T {}
+
+#[cfg(feature = "zeroable_maybe_uninit")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "zeroable_maybe_uninit")))]
+unsafe impl<T> AnyBitPattern for core::mem::MaybeUninit<T> where T: AnyBitPattern
+{}
diff --git a/vendor/bytemuck/src/checked.rs b/vendor/bytemuck/src/checked.rs
new file mode 100644
index 0000000..722c31d
--- /dev/null
+++ b/vendor/bytemuck/src/checked.rs
@@ -0,0 +1,522 @@
+//! Checked versions of the casting functions exposed in crate root
+//! that support [`CheckedBitPattern`] types.
+
+use crate::{
+ internal::{self, something_went_wrong},
+ AnyBitPattern, NoUninit,
+};
+
+/// A marker trait that allows types that have some invalid bit patterns to be
+/// used in places that otherwise require [`AnyBitPattern`] or [`Pod`] types by
+/// performing a runtime check on a perticular set of bits. This is particularly
+/// useful for types like fieldless ('C-style') enums, [`char`], bool, and
+/// structs containing them.
+///
+/// To do this, we define a `Bits` type which is a type with equivalent layout
+/// to `Self` other than the invalid bit patterns which disallow `Self` from
+/// being [`AnyBitPattern`]. This `Bits` type must itself implement
+/// [`AnyBitPattern`]. Then, we implement a function that checks whether a
+/// certain instance of the `Bits` is also a valid bit pattern of `Self`. If
+/// this check passes, then we can allow casting from the `Bits` to `Self` (and
+/// therefore, any type which is able to be cast to `Bits` is also able to be
+/// cast to `Self`).
+///
+/// [`AnyBitPattern`] is a subset of [`CheckedBitPattern`], meaning that any `T:
+/// AnyBitPattern` is also [`CheckedBitPattern`]. This means you can also use
+/// any [`AnyBitPattern`] type in the checked versions of casting functions in
+/// this module. If it's possible, prefer implementing [`AnyBitPattern`] for
+/// your type directly instead of [`CheckedBitPattern`] as it gives greater
+/// flexibility.
+///
+/// # Derive
+///
+/// A `#[derive(CheckedBitPattern)]` macro is provided under the `derive`
+/// feature flag which will automatically validate the requirements of this
+/// trait and implement the trait for you for both enums and structs. This is
+/// the recommended method for implementing the trait, however it's also
+/// possible to do manually.
+///
+/// # Example
+///
+/// If manually implementing the trait, we can do something like so:
+///
+/// ```rust
+/// use bytemuck::{CheckedBitPattern, NoUninit};
+///
+/// #[repr(u32)]
+/// #[derive(Copy, Clone)]
+/// enum MyEnum {
+/// Variant0 = 0,
+/// Variant1 = 1,
+/// Variant2 = 2,
+/// }
+///
+/// unsafe impl CheckedBitPattern for MyEnum {
+/// type Bits = u32;
+///
+/// fn is_valid_bit_pattern(bits: &u32) -> bool {
+/// match *bits {
+/// 0 | 1 | 2 => true,
+/// _ => false,
+/// }
+/// }
+/// }
+///
+/// // It is often useful to also implement `NoUninit` on our `CheckedBitPattern` types.
+/// // This will allow us to do casting of mutable references (and mutable slices).
+/// // It is not always possible to do so, but in this case we have no padding so it is.
+/// unsafe impl NoUninit for MyEnum {}
+/// ```
+///
+/// We can now use relevant casting functions. For example,
+///
+/// ```rust
+/// # use bytemuck::{CheckedBitPattern, NoUninit};
+/// # #[repr(u32)]
+/// # #[derive(Copy, Clone, PartialEq, Eq, Debug)]
+/// # enum MyEnum {
+/// # Variant0 = 0,
+/// # Variant1 = 1,
+/// # Variant2 = 2,
+/// # }
+/// # unsafe impl NoUninit for MyEnum {}
+/// # unsafe impl CheckedBitPattern for MyEnum {
+/// # type Bits = u32;
+/// # fn is_valid_bit_pattern(bits: &u32) -> bool {
+/// # match *bits {
+/// # 0 | 1 | 2 => true,
+/// # _ => false,
+/// # }
+/// # }
+/// # }
+/// use bytemuck::{bytes_of, bytes_of_mut};
+/// use bytemuck::checked;
+///
+/// let bytes = bytes_of(&2u32);
+/// let result = checked::try_from_bytes::<MyEnum>(bytes);
+/// assert_eq!(result, Ok(&MyEnum::Variant2));
+///
+/// // Fails for invalid discriminant
+/// let bytes = bytes_of(&100u32);
+/// let result = checked::try_from_bytes::<MyEnum>(bytes);
+/// assert!(result.is_err());
+///
+/// // Since we implemented NoUninit, we can also cast mutably from an original type
+/// // that is `NoUninit + AnyBitPattern`:
+/// let mut my_u32 = 2u32;
+/// {
+/// let as_enum_mut = checked::cast_mut::<_, MyEnum>(&mut my_u32);
+/// assert_eq!(as_enum_mut, &mut MyEnum::Variant2);
+/// *as_enum_mut = MyEnum::Variant0;
+/// }
+/// assert_eq!(my_u32, 0u32);
+/// ```
+///
+/// # Safety
+///
+/// * `Self` *must* have the same layout as the specified `Bits` except for
+/// the possible invalid bit patterns being checked during
+/// [`is_valid_bit_pattern`].
+/// * This almost certainly means your type must be `#[repr(C)]` or a similar
+/// specified repr, but if you think you know better, you probably don't. If
+/// you still think you know better, be careful and have fun. And don't mess
+/// it up (I mean it).
+/// * If [`is_valid_bit_pattern`] returns true, then the bit pattern contained
+/// in `bits` must also be valid for an instance of `Self`.
+/// * Probably more, don't mess it up (I mean it 2.0)
+///
+/// [`is_valid_bit_pattern`]: CheckedBitPattern::is_valid_bit_pattern
+/// [`Pod`]: crate::Pod
+pub unsafe trait CheckedBitPattern: Copy {
+ /// `Self` *must* have the same layout as the specified `Bits` except for
+ /// the possible invalid bit patterns being checked during
+ /// [`is_valid_bit_pattern`].
+ ///
+ /// [`is_valid_bit_pattern`]: CheckedBitPattern::is_valid_bit_pattern
+ type Bits: AnyBitPattern;
+
+ /// If this function returns true, then it must be valid to reinterpret `bits`
+ /// as `&Self`.
+ fn is_valid_bit_pattern(bits: &Self::Bits) -> bool;
+}
+
+unsafe impl<T: AnyBitPattern> CheckedBitPattern for T {
+ type Bits = T;
+
+ #[inline(always)]
+ fn is_valid_bit_pattern(_bits: &T) -> bool {
+ true
+ }
+}
+
+unsafe impl CheckedBitPattern for char {
+ type Bits = u32;
+
+ #[inline]
+ fn is_valid_bit_pattern(bits: &Self::Bits) -> bool {
+ core::char::from_u32(*bits).is_some()
+ }
+}
+
+unsafe impl CheckedBitPattern for bool {
+ type Bits = u8;
+
+ #[inline]
+ fn is_valid_bit_pattern(bits: &Self::Bits) -> bool {
+ match *bits {
+ 0 | 1 => true,
+ _ => false,
+ }
+ }
+}
+
+// Rust 1.70.0 documents that NonZero[int] has the same layout as [int].
+macro_rules! impl_checked_for_nonzero {
+ ($($nonzero:ty: $primitive:ty),* $(,)?) => {
+ $(
+ unsafe impl CheckedBitPattern for $nonzero {
+ type Bits = $primitive;
+
+ #[inline]
+ fn is_valid_bit_pattern(bits: &Self::Bits) -> bool {
+ *bits != 0
+ }
+ }
+ )*
+ };
+}
+impl_checked_for_nonzero! {
+ core::num::NonZeroU8: u8,
+ core::num::NonZeroI8: i8,
+ core::num::NonZeroU16: u16,
+ core::num::NonZeroI16: i16,
+ core::num::NonZeroU32: u32,
+ core::num::NonZeroI32: i32,
+ core::num::NonZeroU64: u64,
+ core::num::NonZeroI64: i64,
+ core::num::NonZeroI128: i128,
+ core::num::NonZeroU128: u128,
+ core::num::NonZeroUsize: usize,
+ core::num::NonZeroIsize: isize,
+}
+
+/// The things that can go wrong when casting between [`CheckedBitPattern`] data
+/// forms.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub enum CheckedCastError {
+ /// An error occurred during a true-[`Pod`] cast
+ ///
+ /// [`Pod`]: crate::Pod
+ PodCastError(crate::PodCastError),
+ /// When casting to a [`CheckedBitPattern`] type, it is possible that the
+ /// original data contains an invalid bit pattern. If so, the cast will
+ /// fail and this error will be returned. Will never happen on casts
+ /// between [`Pod`] types.
+ ///
+ /// [`Pod`]: crate::Pod
+ InvalidBitPattern,
+}
+
+#[cfg(not(target_arch = "spirv"))]
+impl core::fmt::Display for CheckedCastError {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ write!(f, "{:?}", self)
+ }
+}
+#[cfg(feature = "extern_crate_std")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "extern_crate_std")))]
+impl std::error::Error for CheckedCastError {}
+
+impl From<crate::PodCastError> for CheckedCastError {
+ fn from(err: crate::PodCastError) -> CheckedCastError {
+ CheckedCastError::PodCastError(err)
+ }
+}
+
+/// Re-interprets `&[u8]` as `&T`.
+///
+/// ## Failure
+///
+/// * If the slice isn't aligned for the new type
+/// * If the slice's length isn’t exactly the size of the new type
+/// * If the slice contains an invalid bit pattern for `T`
+#[inline]
+pub fn try_from_bytes<T: CheckedBitPattern>(
+ s: &[u8],
+) -> Result<&T, CheckedCastError> {
+ let pod = crate::try_from_bytes(s)?;
+
+ if <T as CheckedBitPattern>::is_valid_bit_pattern(pod) {
+ Ok(unsafe { &*(pod as *const <T as CheckedBitPattern>::Bits as *const T) })
+ } else {
+ Err(CheckedCastError::InvalidBitPattern)
+ }
+}
+
+/// Re-interprets `&mut [u8]` as `&mut T`.
+///
+/// ## Failure
+///
+/// * If the slice isn't aligned for the new type
+/// * If the slice's length isn’t exactly the size of the new type
+/// * If the slice contains an invalid bit pattern for `T`
+#[inline]
+pub fn try_from_bytes_mut<T: CheckedBitPattern + NoUninit>(
+ s: &mut [u8],
+) -> Result<&mut T, CheckedCastError> {
+ let pod = unsafe { internal::try_from_bytes_mut(s) }?;
+
+ if <T as CheckedBitPattern>::is_valid_bit_pattern(pod) {
+ Ok(unsafe { &mut *(pod as *mut <T as CheckedBitPattern>::Bits as *mut T) })
+ } else {
+ Err(CheckedCastError::InvalidBitPattern)
+ }
+}
+
+/// Reads from the bytes as if they were a `T`.
+///
+/// ## Failure
+/// * If the `bytes` length is not equal to `size_of::<T>()`.
+/// * If the slice contains an invalid bit pattern for `T`
+#[inline]
+pub fn try_pod_read_unaligned<T: CheckedBitPattern>(
+ bytes: &[u8],
+) -> Result<T, CheckedCastError> {
+ let pod = crate::try_pod_read_unaligned(bytes)?;
+
+ if <T as CheckedBitPattern>::is_valid_bit_pattern(&pod) {
+ Ok(unsafe { transmute!(pod) })
+ } else {
+ Err(CheckedCastError::InvalidBitPattern)
+ }
+}
+
+/// Try to cast `T` into `U`.
+///
+/// Note that for this particular type of cast, alignment isn't a factor. The
+/// input value is semantically copied into the function and then returned to a
+/// new memory location which will have whatever the required alignment of the
+/// output type is.
+///
+/// ## Failure
+///
+/// * If the types don't have the same size this fails.
+/// * If `a` contains an invalid bit pattern for `B` this fails.
+#[inline]
+pub fn try_cast<A: NoUninit, B: CheckedBitPattern>(
+ a: A,
+) -> Result<B, CheckedCastError> {
+ let pod = crate::try_cast(a)?;
+
+ if <B as CheckedBitPattern>::is_valid_bit_pattern(&pod) {
+ Ok(unsafe { transmute!(pod) })
+ } else {
+ Err(CheckedCastError::InvalidBitPattern)
+ }
+}
+
+/// Try to convert a `&T` into `&U`.
+///
+/// ## Failure
+///
+/// * If the reference isn't aligned in the new type
+/// * If the source type and target type aren't the same size.
+/// * If `a` contains an invalid bit pattern for `B` this fails.
+#[inline]
+pub fn try_cast_ref<A: NoUninit, B: CheckedBitPattern>(
+ a: &A,
+) -> Result<&B, CheckedCastError> {
+ let pod = crate::try_cast_ref(a)?;
+
+ if <B as CheckedBitPattern>::is_valid_bit_pattern(pod) {
+ Ok(unsafe { &*(pod as *const <B as CheckedBitPattern>::Bits as *const B) })
+ } else {
+ Err(CheckedCastError::InvalidBitPattern)
+ }
+}
+
+/// Try to convert a `&mut T` into `&mut U`.
+///
+/// As [`try_cast_ref`], but `mut`.
+#[inline]
+pub fn try_cast_mut<
+ A: NoUninit + AnyBitPattern,
+ B: CheckedBitPattern + NoUninit,
+>(
+ a: &mut A,
+) -> Result<&mut B, CheckedCastError> {
+ let pod = unsafe { internal::try_cast_mut(a) }?;
+
+ if <B as CheckedBitPattern>::is_valid_bit_pattern(pod) {
+ Ok(unsafe { &mut *(pod as *mut <B as CheckedBitPattern>::Bits as *mut B) })
+ } else {
+ Err(CheckedCastError::InvalidBitPattern)
+ }
+}
+
+/// Try to convert `&[A]` into `&[B]` (possibly with a change in length).
+///
+/// * `input.as_ptr() as usize == output.as_ptr() as usize`
+/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
+///
+/// ## Failure
+///
+/// * If the target type has a greater alignment requirement and the input slice
+/// isn't aligned.
+/// * If the target element type is a different size from the current element
+/// type, and the output slice wouldn't be a whole number of elements when
+/// accounting for the size change (eg: 3 `u16` values is 1.5 `u32` values, so
+/// that's a failure).
+/// * Similarly, you can't convert between a [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
+/// and a non-ZST.
+/// * If any element of the converted slice would contain an invalid bit pattern
+/// for `B` this fails.
+#[inline]
+pub fn try_cast_slice<A: NoUninit, B: CheckedBitPattern>(
+ a: &[A],
+) -> Result<&[B], CheckedCastError> {
+ let pod = crate::try_cast_slice(a)?;
+
+ if pod.iter().all(|pod| <B as CheckedBitPattern>::is_valid_bit_pattern(pod)) {
+ Ok(unsafe {
+ core::slice::from_raw_parts(pod.as_ptr() as *const B, pod.len())
+ })
+ } else {
+ Err(CheckedCastError::InvalidBitPattern)
+ }
+}
+
+/// Try to convert `&mut [A]` into `&mut [B]` (possibly with a change in
+/// length).
+///
+/// As [`try_cast_slice`], but `&mut`.
+#[inline]
+pub fn try_cast_slice_mut<
+ A: NoUninit + AnyBitPattern,
+ B: CheckedBitPattern + NoUninit,
+>(
+ a: &mut [A],
+) -> Result<&mut [B], CheckedCastError> {
+ let pod = unsafe { internal::try_cast_slice_mut(a) }?;
+
+ if pod.iter().all(|pod| <B as CheckedBitPattern>::is_valid_bit_pattern(pod)) {
+ Ok(unsafe {
+ core::slice::from_raw_parts_mut(pod.as_mut_ptr() as *mut B, pod.len())
+ })
+ } else {
+ Err(CheckedCastError::InvalidBitPattern)
+ }
+}
+
+/// Re-interprets `&[u8]` as `&T`.
+///
+/// ## Panics
+///
+/// This is [`try_from_bytes`] but will panic on error.
+#[inline]
+pub fn from_bytes<T: CheckedBitPattern>(s: &[u8]) -> &T {
+ match try_from_bytes(s) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("from_bytes", e),
+ }
+}
+
+/// Re-interprets `&mut [u8]` as `&mut T`.
+///
+/// ## Panics
+///
+/// This is [`try_from_bytes_mut`] but will panic on error.
+#[inline]
+pub fn from_bytes_mut<T: NoUninit + CheckedBitPattern>(s: &mut [u8]) -> &mut T {
+ match try_from_bytes_mut(s) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("from_bytes_mut", e),
+ }
+}
+
+/// Reads the slice into a `T` value.
+///
+/// ## Panics
+/// * This is like `try_pod_read_unaligned` but will panic on failure.
+#[inline]
+pub fn pod_read_unaligned<T: CheckedBitPattern>(bytes: &[u8]) -> T {
+ match try_pod_read_unaligned(bytes) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("pod_read_unaligned", e),
+ }
+}
+
+/// Cast `T` into `U`
+///
+/// ## Panics
+///
+/// * This is like [`try_cast`](try_cast), but will panic on a size mismatch.
+#[inline]
+pub fn cast<A: NoUninit, B: CheckedBitPattern>(a: A) -> B {
+ match try_cast(a) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("cast", e),
+ }
+}
+
+/// Cast `&mut T` into `&mut U`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_mut`] but will panic on error.
+#[inline]
+pub fn cast_mut<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + CheckedBitPattern,
+>(
+ a: &mut A,
+) -> &mut B {
+ match try_cast_mut(a) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("cast_mut", e),
+ }
+}
+
+/// Cast `&T` into `&U`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_ref`] but will panic on error.
+#[inline]
+pub fn cast_ref<A: NoUninit, B: CheckedBitPattern>(a: &A) -> &B {
+ match try_cast_ref(a) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("cast_ref", e),
+ }
+}
+
+/// Cast `&[A]` into `&[B]`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_slice`] but will panic on error.
+#[inline]
+pub fn cast_slice<A: NoUninit, B: CheckedBitPattern>(a: &[A]) -> &[B] {
+ match try_cast_slice(a) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("cast_slice", e),
+ }
+}
+
+/// Cast `&mut [T]` into `&mut [U]`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_slice_mut`] but will panic on error.
+#[inline]
+pub fn cast_slice_mut<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + CheckedBitPattern,
+>(
+ a: &mut [A],
+) -> &mut [B] {
+ match try_cast_slice_mut(a) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("cast_slice_mut", e),
+ }
+}
diff --git a/vendor/bytemuck/src/contiguous.rs b/vendor/bytemuck/src/contiguous.rs
new file mode 100644
index 0000000..538514b
--- /dev/null
+++ b/vendor/bytemuck/src/contiguous.rs
@@ -0,0 +1,202 @@
+use super::*;
+
+/// A trait indicating that:
+///
+/// 1. A type has an equivalent representation to some known integral type.
+/// 2. All instances of this type fall in a fixed range of values.
+/// 3. Within that range, there are no gaps.
+///
+/// This is generally useful for fieldless enums (aka "c-style" enums), however
+/// it's important that it only be used for those with an explicit `#[repr]`, as
+/// `#[repr(Rust)]` fieldess enums have an unspecified layout.
+///
+/// Additionally, you shouldn't assume that all implementations are enums. Any
+/// type which meets the requirements above while following the rules under
+/// "Safety" below is valid.
+///
+/// # Example
+///
+/// ```
+/// # use bytemuck::Contiguous;
+/// #[repr(u8)]
+/// #[derive(Debug, Copy, Clone, PartialEq)]
+/// enum Foo {
+/// A = 0,
+/// B = 1,
+/// C = 2,
+/// D = 3,
+/// E = 4,
+/// }
+/// unsafe impl Contiguous for Foo {
+/// type Int = u8;
+/// const MIN_VALUE: u8 = Foo::A as u8;
+/// const MAX_VALUE: u8 = Foo::E as u8;
+/// }
+/// assert_eq!(Foo::from_integer(3).unwrap(), Foo::D);
+/// assert_eq!(Foo::from_integer(8), None);
+/// assert_eq!(Foo::C.into_integer(), 2);
+/// ```
+/// # Safety
+///
+/// This is an unsafe trait, and incorrectly implementing it is undefined
+/// behavior.
+///
+/// Informally, by implementing it, you're asserting that `C` is identical to
+/// the integral type `C::Int`, and that every `C` falls between `C::MIN_VALUE`
+/// and `C::MAX_VALUE` exactly once, without any gaps.
+///
+/// Precisely, the guarantees you must uphold when implementing `Contiguous` for
+/// some type `C` are:
+///
+/// 1. The size of `C` and `C::Int` must be the same, and neither may be a ZST.
+/// (Note: alignment is explicitly allowed to differ)
+///
+/// 2. `C::Int` must be a primitive integer, and not a wrapper type. In the
+/// future, this may be lifted to include cases where the behavior is
+/// identical for a relevant set of traits (Ord, arithmetic, ...).
+///
+/// 3. All `C::Int`s which are in the *inclusive* range between `C::MIN_VALUE`
+/// and `C::MAX_VALUE` are bitwise identical to unique valid instances of
+/// `C`.
+///
+/// 4. There exist no instances of `C` such that their bitpatterns, when
+/// interpreted as instances of `C::Int`, fall outside of the `MAX_VALUE` /
+/// `MIN_VALUE` range -- It is legal for unsafe code to assume that if it
+/// gets a `C` that implements `Contiguous`, it is in the appropriate range.
+///
+/// 5. Finally, you promise not to provide overridden implementations of
+/// `Contiguous::from_integer` and `Contiguous::into_integer`.
+///
+/// For clarity, the following rules could be derived from the above, but are
+/// listed explicitly:
+///
+/// - `C::MAX_VALUE` must be greater or equal to `C::MIN_VALUE` (therefore, `C`
+/// must be an inhabited type).
+///
+/// - There exist no two values between `MIN_VALUE` and `MAX_VALUE` such that
+/// when interpreted as a `C` they are considered identical (by, say, match).
+pub unsafe trait Contiguous: Copy + 'static {
+ /// The primitive integer type with an identical representation to this
+ /// type.
+ ///
+ /// Contiguous is broadly intended for use with fieldless enums, and for
+ /// these the correct integer type is easy: The enum should have a
+ /// `#[repr(Int)]` or `#[repr(C)]` attribute, (if it does not, it is
+ /// *unsound* to implement `Contiguous`!).
+ ///
+ /// - For `#[repr(Int)]`, use the listed `Int`. e.g. `#[repr(u8)]` should use
+ /// `type Int = u8`.
+ ///
+ /// - For `#[repr(C)]`, use whichever type the C compiler will use to
+ /// represent the given enum. This is usually `c_int` (from `std::os::raw`
+ /// or `libc`), but it's up to you to make the determination as the
+ /// implementer of the unsafe trait.
+ ///
+ /// For precise rules, see the list under "Safety" above.
+ type Int: Copy + Ord;
+
+ /// The upper *inclusive* bound for valid instances of this type.
+ const MAX_VALUE: Self::Int;
+
+ /// The lower *inclusive* bound for valid instances of this type.
+ const MIN_VALUE: Self::Int;
+
+ /// If `value` is within the range for valid instances of this type,
+ /// returns `Some(converted_value)`, otherwise, returns `None`.
+ ///
+ /// This is a trait method so that you can write `value.into_integer()` in
+ /// your code. It is a contract of this trait that if you implement
+ /// `Contiguous` on your type you **must not** override this method.
+ ///
+ /// # Panics
+ ///
+ /// We will not panic for any correct implementation of `Contiguous`, but
+ /// *may* panic if we detect an incorrect one.
+ ///
+ /// This is undefined behavior regardless, so it could have been the nasal
+ /// demons at that point anyway ;).
+ #[inline]
+ fn from_integer(value: Self::Int) -> Option<Self> {
+ // Guard against an illegal implementation of Contiguous. Annoyingly we
+ // can't rely on `transmute` to do this for us (see below), but
+ // whatever, this gets compiled into nothing in release.
+ assert!(size_of::<Self>() == size_of::<Self::Int>());
+ if Self::MIN_VALUE <= value && value <= Self::MAX_VALUE {
+ // SAFETY: We've checked their bounds (and their size, even though
+ // they've sworn under the Oath Of Unsafe Rust that that already
+ // matched) so this is allowed by `Contiguous`'s unsafe contract.
+ //
+ // So, the `transmute!`. ideally we'd use transmute here, which
+ // is more obviously safe. Sadly, we can't, as these types still
+ // have unspecified sizes.
+ Some(unsafe { transmute!(value) })
+ } else {
+ None
+ }
+ }
+
+ /// Perform the conversion from `C` into the underlying integral type. This
+ /// mostly exists otherwise generic code would need unsafe for the `value as
+ /// integer`
+ ///
+ /// This is a trait method so that you can write `value.into_integer()` in
+ /// your code. It is a contract of this trait that if you implement
+ /// `Contiguous` on your type you **must not** override this method.
+ ///
+ /// # Panics
+ ///
+ /// We will not panic for any correct implementation of `Contiguous`, but
+ /// *may* panic if we detect an incorrect one.
+ ///
+ /// This is undefined behavior regardless, so it could have been the nasal
+ /// demons at that point anyway ;).
+ #[inline]
+ fn into_integer(self) -> Self::Int {
+ // Guard against an illegal implementation of Contiguous. Annoyingly we
+ // can't rely on `transmute` to do the size check for us (see
+ // `from_integer's comment`), but whatever, this gets compiled into
+ // nothing in release. Note that we don't check the result of cast
+ assert!(size_of::<Self>() == size_of::<Self::Int>());
+
+ // SAFETY: The unsafe contract requires that these have identical
+ // representations, and that the range be entirely valid. Using
+ // transmute! instead of transmute here is annoying, but is required
+ // as `Self` and `Self::Int` have unspecified sizes still.
+ unsafe { transmute!(self) }
+ }
+}
+
+macro_rules! impl_contiguous {
+ ($($src:ty as $repr:ident in [$min:expr, $max:expr];)*) => {$(
+ unsafe impl Contiguous for $src {
+ type Int = $repr;
+ const MAX_VALUE: $repr = $max;
+ const MIN_VALUE: $repr = $min;
+ }
+ )*};
+}
+
+impl_contiguous! {
+ bool as u8 in [0, 1];
+
+ u8 as u8 in [0, u8::max_value()];
+ u16 as u16 in [0, u16::max_value()];
+ u32 as u32 in [0, u32::max_value()];
+ u64 as u64 in [0, u64::max_value()];
+ u128 as u128 in [0, u128::max_value()];
+ usize as usize in [0, usize::max_value()];
+
+ i8 as i8 in [i8::min_value(), i8::max_value()];
+ i16 as i16 in [i16::min_value(), i16::max_value()];
+ i32 as i32 in [i32::min_value(), i32::max_value()];
+ i64 as i64 in [i64::min_value(), i64::max_value()];
+ i128 as i128 in [i128::min_value(), i128::max_value()];
+ isize as isize in [isize::min_value(), isize::max_value()];
+
+ NonZeroU8 as u8 in [1, u8::max_value()];
+ NonZeroU16 as u16 in [1, u16::max_value()];
+ NonZeroU32 as u32 in [1, u32::max_value()];
+ NonZeroU64 as u64 in [1, u64::max_value()];
+ NonZeroU128 as u128 in [1, u128::max_value()];
+ NonZeroUsize as usize in [1, usize::max_value()];
+}
diff --git a/vendor/bytemuck/src/internal.rs b/vendor/bytemuck/src/internal.rs
new file mode 100644
index 0000000..3ede50f
--- /dev/null
+++ b/vendor/bytemuck/src/internal.rs
@@ -0,0 +1,402 @@
+//! Internal implementation of casting functions not bound by marker traits
+//! and therefore marked as unsafe. This is used so that we don't need to
+//! duplicate the business logic contained in these functions between the
+//! versions exported in the crate root, `checked`, and `relaxed` modules.
+#![allow(unused_unsafe)]
+
+use crate::PodCastError;
+use core::{marker::*, mem::*};
+
+/*
+
+Note(Lokathor): We've switched all of the `unwrap` to `match` because there is
+apparently a bug: https://github.com/rust-lang/rust/issues/68667
+and it doesn't seem to show up in simple godbolt examples but has been reported
+as having an impact when there's a cast mixed in with other more complicated
+code around it. Rustc/LLVM ends up missing that the `Err` can't ever happen for
+particular type combinations, and then it doesn't fully eliminated the panic
+possibility code branch.
+
+*/
+
+/// Immediately panics.
+#[cfg(not(target_arch = "spirv"))]
+#[cold]
+#[inline(never)]
+pub(crate) fn something_went_wrong<D: core::fmt::Display>(
+ _src: &str, _err: D,
+) -> ! {
+ // Note(Lokathor): Keeping the panic here makes the panic _formatting_ go
+ // here too, which helps assembly readability and also helps keep down
+ // the inline pressure.
+ panic!("{src}>{err}", src = _src, err = _err);
+}
+
+/// Immediately panics.
+#[cfg(target_arch = "spirv")]
+#[cold]
+#[inline(never)]
+pub(crate) fn something_went_wrong<D>(_src: &str, _err: D) -> ! {
+ // Note: On the spirv targets from [rust-gpu](https://github.com/EmbarkStudios/rust-gpu)
+ // panic formatting cannot be used. We we just give a generic error message
+ // The chance that the panicking version of these functions will ever get
+ // called on spir-v targets with invalid inputs is small, but giving a
+ // simple error message is better than no error message at all.
+ panic!("Called a panicing helper from bytemuck which paniced");
+}
+
+/// Re-interprets `&T` as `&[u8]`.
+///
+/// Any ZST becomes an empty slice, and in that case the pointer value of that
+/// empty slice might not match the pointer value of the input reference.
+#[inline(always)]
+pub(crate) unsafe fn bytes_of<T: Copy>(t: &T) -> &[u8] {
+ if size_of::<T>() == 0 {
+ &[]
+ } else {
+ match try_cast_slice::<T, u8>(core::slice::from_ref(t)) {
+ Ok(s) => s,
+ Err(_) => unreachable!(),
+ }
+ }
+}
+
+/// Re-interprets `&mut T` as `&mut [u8]`.
+///
+/// Any ZST becomes an empty slice, and in that case the pointer value of that
+/// empty slice might not match the pointer value of the input reference.
+#[inline]
+pub(crate) unsafe fn bytes_of_mut<T: Copy>(t: &mut T) -> &mut [u8] {
+ if size_of::<T>() == 0 {
+ &mut []
+ } else {
+ match try_cast_slice_mut::<T, u8>(core::slice::from_mut(t)) {
+ Ok(s) => s,
+ Err(_) => unreachable!(),
+ }
+ }
+}
+
+/// Re-interprets `&[u8]` as `&T`.
+///
+/// ## Panics
+///
+/// This is [`try_from_bytes`] but will panic on error.
+#[inline]
+pub(crate) unsafe fn from_bytes<T: Copy>(s: &[u8]) -> &T {
+ match try_from_bytes(s) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("from_bytes", e),
+ }
+}
+
+/// Re-interprets `&mut [u8]` as `&mut T`.
+///
+/// ## Panics
+///
+/// This is [`try_from_bytes_mut`] but will panic on error.
+#[inline]
+pub(crate) unsafe fn from_bytes_mut<T: Copy>(s: &mut [u8]) -> &mut T {
+ match try_from_bytes_mut(s) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("from_bytes_mut", e),
+ }
+}
+
+/// Reads from the bytes as if they were a `T`.
+///
+/// ## Failure
+/// * If the `bytes` length is not equal to `size_of::<T>()`.
+#[inline]
+pub(crate) unsafe fn try_pod_read_unaligned<T: Copy>(
+ bytes: &[u8],
+) -> Result<T, PodCastError> {
+ if bytes.len() != size_of::<T>() {
+ Err(PodCastError::SizeMismatch)
+ } else {
+ Ok(unsafe { (bytes.as_ptr() as *const T).read_unaligned() })
+ }
+}
+
+/// Reads the slice into a `T` value.
+///
+/// ## Panics
+/// * This is like `try_pod_read_unaligned` but will panic on failure.
+#[inline]
+pub(crate) unsafe fn pod_read_unaligned<T: Copy>(bytes: &[u8]) -> T {
+ match try_pod_read_unaligned(bytes) {
+ Ok(t) => t,
+ Err(e) => something_went_wrong("pod_read_unaligned", e),
+ }
+}
+
+/// Checks if `ptr` is aligned to an `align` memory boundary.
+///
+/// ## Panics
+/// * If `align` is not a power of two. This includes when `align` is zero.
+#[inline]
+pub(crate) fn is_aligned_to(ptr: *const (), align: usize) -> bool {
+ #[cfg(feature = "align_offset")]
+ {
+ // This is in a way better than `ptr as usize % align == 0`,
+ // because casting a pointer to an integer has the side effect that it
+ // exposes the pointer's provenance, which may theoretically inhibit
+ // some compiler optimizations.
+ ptr.align_offset(align) == 0
+ }
+ #[cfg(not(feature = "align_offset"))]
+ {
+ ((ptr as usize) % align) == 0
+ }
+}
+
+/// Re-interprets `&[u8]` as `&T`.
+///
+/// ## Failure
+///
+/// * If the slice isn't aligned for the new type
+/// * If the slice's length isn’t exactly the size of the new type
+#[inline]
+pub(crate) unsafe fn try_from_bytes<T: Copy>(
+ s: &[u8],
+) -> Result<&T, PodCastError> {
+ if s.len() != size_of::<T>() {
+ Err(PodCastError::SizeMismatch)
+ } else if !is_aligned_to(s.as_ptr() as *const (), align_of::<T>()) {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else {
+ Ok(unsafe { &*(s.as_ptr() as *const T) })
+ }
+}
+
+/// Re-interprets `&mut [u8]` as `&mut T`.
+///
+/// ## Failure
+///
+/// * If the slice isn't aligned for the new type
+/// * If the slice's length isn’t exactly the size of the new type
+#[inline]
+pub(crate) unsafe fn try_from_bytes_mut<T: Copy>(
+ s: &mut [u8],
+) -> Result<&mut T, PodCastError> {
+ if s.len() != size_of::<T>() {
+ Err(PodCastError::SizeMismatch)
+ } else if !is_aligned_to(s.as_ptr() as *const (), align_of::<T>()) {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else {
+ Ok(unsafe { &mut *(s.as_mut_ptr() as *mut T) })
+ }
+}
+
+/// Cast `T` into `U`
+///
+/// ## Panics
+///
+/// * This is like [`try_cast`](try_cast), but will panic on a size mismatch.
+#[inline]
+pub(crate) unsafe fn cast<A: Copy, B: Copy>(a: A) -> B {
+ if size_of::<A>() == size_of::<B>() {
+ unsafe { transmute!(a) }
+ } else {
+ something_went_wrong("cast", PodCastError::SizeMismatch)
+ }
+}
+
+/// Cast `&mut T` into `&mut U`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_mut`] but will panic on error.
+#[inline]
+pub(crate) unsafe fn cast_mut<A: Copy, B: Copy>(a: &mut A) -> &mut B {
+ if size_of::<A>() == size_of::<B>() && align_of::<A>() >= align_of::<B>() {
+ // Plz mr compiler, just notice that we can't ever hit Err in this case.
+ match try_cast_mut(a) {
+ Ok(b) => b,
+ Err(_) => unreachable!(),
+ }
+ } else {
+ match try_cast_mut(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast_mut", e),
+ }
+ }
+}
+
+/// Cast `&T` into `&U`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_ref`] but will panic on error.
+#[inline]
+pub(crate) unsafe fn cast_ref<A: Copy, B: Copy>(a: &A) -> &B {
+ if size_of::<A>() == size_of::<B>() && align_of::<A>() >= align_of::<B>() {
+ // Plz mr compiler, just notice that we can't ever hit Err in this case.
+ match try_cast_ref(a) {
+ Ok(b) => b,
+ Err(_) => unreachable!(),
+ }
+ } else {
+ match try_cast_ref(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast_ref", e),
+ }
+ }
+}
+
+/// Cast `&[A]` into `&[B]`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_slice`] but will panic on error.
+#[inline]
+pub(crate) unsafe fn cast_slice<A: Copy, B: Copy>(a: &[A]) -> &[B] {
+ match try_cast_slice(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast_slice", e),
+ }
+}
+
+/// Cast `&mut [T]` into `&mut [U]`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_slice_mut`] but will panic on error.
+#[inline]
+pub(crate) unsafe fn cast_slice_mut<A: Copy, B: Copy>(a: &mut [A]) -> &mut [B] {
+ match try_cast_slice_mut(a) {
+ Ok(b) => b,
+ Err(e) => something_went_wrong("cast_slice_mut", e),
+ }
+}
+
+/// Try to cast `T` into `U`.
+///
+/// Note that for this particular type of cast, alignment isn't a factor. The
+/// input value is semantically copied into the function and then returned to a
+/// new memory location which will have whatever the required alignment of the
+/// output type is.
+///
+/// ## Failure
+///
+/// * If the types don't have the same size this fails.
+#[inline]
+pub(crate) unsafe fn try_cast<A: Copy, B: Copy>(
+ a: A,
+) -> Result<B, PodCastError> {
+ if size_of::<A>() == size_of::<B>() {
+ Ok(unsafe { transmute!(a) })
+ } else {
+ Err(PodCastError::SizeMismatch)
+ }
+}
+
+/// Try to convert a `&T` into `&U`.
+///
+/// ## Failure
+///
+/// * If the reference isn't aligned in the new type
+/// * If the source type and target type aren't the same size.
+#[inline]
+pub(crate) unsafe fn try_cast_ref<A: Copy, B: Copy>(
+ a: &A,
+) -> Result<&B, PodCastError> {
+ // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
+ // after monomorphization.
+ if align_of::<B>() > align_of::<A>()
+ && !is_aligned_to(a as *const A as *const (), align_of::<B>())
+ {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else if size_of::<B>() == size_of::<A>() {
+ Ok(unsafe { &*(a as *const A as *const B) })
+ } else {
+ Err(PodCastError::SizeMismatch)
+ }
+}
+
+/// Try to convert a `&mut T` into `&mut U`.
+///
+/// As [`try_cast_ref`], but `mut`.
+#[inline]
+pub(crate) unsafe fn try_cast_mut<A: Copy, B: Copy>(
+ a: &mut A,
+) -> Result<&mut B, PodCastError> {
+ // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
+ // after monomorphization.
+ if align_of::<B>() > align_of::<A>()
+ && !is_aligned_to(a as *const A as *const (), align_of::<B>())
+ {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else if size_of::<B>() == size_of::<A>() {
+ Ok(unsafe { &mut *(a as *mut A as *mut B) })
+ } else {
+ Err(PodCastError::SizeMismatch)
+ }
+}
+
+/// Try to convert `&[A]` into `&[B]` (possibly with a change in length).
+///
+/// * `input.as_ptr() as usize == output.as_ptr() as usize`
+/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
+///
+/// ## Failure
+///
+/// * If the target type has a greater alignment requirement and the input slice
+/// isn't aligned.
+/// * If the target element type is a different size from the current element
+/// type, and the output slice wouldn't be a whole number of elements when
+/// accounting for the size change (eg: 3 `u16` values is 1.5 `u32` values, so
+/// that's a failure).
+/// * Similarly, you can't convert between a [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
+/// and a non-ZST.
+#[inline]
+pub(crate) unsafe fn try_cast_slice<A: Copy, B: Copy>(
+ a: &[A],
+) -> Result<&[B], PodCastError> {
+ // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
+ // after monomorphization.
+ if align_of::<B>() > align_of::<A>()
+ && !is_aligned_to(a.as_ptr() as *const (), align_of::<B>())
+ {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else if size_of::<B>() == size_of::<A>() {
+ Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, a.len()) })
+ } else if size_of::<A>() == 0 || size_of::<B>() == 0 {
+ Err(PodCastError::SizeMismatch)
+ } else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
+ let new_len = core::mem::size_of_val(a) / size_of::<B>();
+ Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) })
+ } else {
+ Err(PodCastError::OutputSliceWouldHaveSlop)
+ }
+}
+
+/// Try to convert `&mut [A]` into `&mut [B]` (possibly with a change in
+/// length).
+///
+/// As [`try_cast_slice`], but `&mut`.
+#[inline]
+pub(crate) unsafe fn try_cast_slice_mut<A: Copy, B: Copy>(
+ a: &mut [A],
+) -> Result<&mut [B], PodCastError> {
+ // Note(Lokathor): everything with `align_of` and `size_of` will optimize away
+ // after monomorphization.
+ if align_of::<B>() > align_of::<A>()
+ && !is_aligned_to(a.as_ptr() as *const (), align_of::<B>())
+ {
+ Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
+ } else if size_of::<B>() == size_of::<A>() {
+ Ok(unsafe {
+ core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, a.len())
+ })
+ } else if size_of::<A>() == 0 || size_of::<B>() == 0 {
+ Err(PodCastError::SizeMismatch)
+ } else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
+ let new_len = core::mem::size_of_val(a) / size_of::<B>();
+ Ok(unsafe {
+ core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len)
+ })
+ } else {
+ Err(PodCastError::OutputSliceWouldHaveSlop)
+ }
+}
diff --git a/vendor/bytemuck/src/lib.rs b/vendor/bytemuck/src/lib.rs
new file mode 100644
index 0000000..000dacb
--- /dev/null
+++ b/vendor/bytemuck/src/lib.rs
@@ -0,0 +1,457 @@
+#![no_std]
+#![warn(missing_docs)]
+#![allow(clippy::match_like_matches_macro)]
+#![allow(clippy::uninlined_format_args)]
+#![cfg_attr(feature = "nightly_docs", feature(doc_cfg))]
+#![cfg_attr(feature = "nightly_portable_simd", feature(portable_simd))]
+#![cfg_attr(feature = "nightly_stdsimd", feature(stdsimd))]
+
+//! This crate gives small utilities for casting between plain data types.
+//!
+//! ## Basics
+//!
+//! Data comes in five basic forms in Rust, so we have five basic casting
+//! functions:
+//!
+//! * `T` uses [`cast`]
+//! * `&T` uses [`cast_ref`]
+//! * `&mut T` uses [`cast_mut`]
+//! * `&[T]` uses [`cast_slice`]
+//! * `&mut [T]` uses [`cast_slice_mut`]
+//!
+//! Some casts will never fail (eg: `cast::<u32, f32>` always works), other
+//! casts might fail (eg: `cast_ref::<[u8; 4], u32>` will fail if the reference
+//! isn't already aligned to 4). Each casting function has a "try" version which
+//! will return a `Result`, and the "normal" version which will simply panic on
+//! invalid input.
+//!
+//! ## Using Your Own Types
+//!
+//! All the functions here are guarded by the [`Pod`] trait, which is a
+//! sub-trait of the [`Zeroable`] trait.
+//!
+//! If you're very sure that your type is eligible, you can implement those
+//! traits for your type and then they'll have full casting support. However,
+//! these traits are `unsafe`, and you should carefully read the requirements
+//! before adding the them to your own types.
+//!
+//! ## Features
+//!
+//! * This crate is core only by default, but if you're using Rust 1.36 or later
+//! you can enable the `extern_crate_alloc` cargo feature for some additional
+//! methods related to `Box` and `Vec`. Note that the `docs.rs` documentation
+//! is always built with `extern_crate_alloc` cargo feature enabled.
+
+#[cfg(all(target_arch = "aarch64", feature = "aarch64_simd"))]
+use core::arch::aarch64;
+#[cfg(all(target_arch = "wasm32", feature = "wasm_simd"))]
+use core::arch::wasm32;
+#[cfg(target_arch = "x86")]
+use core::arch::x86;
+#[cfg(target_arch = "x86_64")]
+use core::arch::x86_64;
+//
+use core::{marker::*, mem::*, num::*, ptr::*};
+
+// Used from macros to ensure we aren't using some locally defined name and
+// actually are referencing libcore. This also would allow pre-2018 edition
+// crates to use our macros, but I'm not sure how important that is.
+#[doc(hidden)]
+pub use ::core as __core;
+
+#[cfg(not(feature = "min_const_generics"))]
+macro_rules! impl_unsafe_marker_for_array {
+ ( $marker:ident , $( $n:expr ),* ) => {
+ $(unsafe impl<T> $marker for [T; $n] where T: $marker {})*
+ }
+}
+
+/// A macro to transmute between two types without requiring knowing size
+/// statically.
+macro_rules! transmute {
+ ($val:expr) => {
+ ::core::mem::transmute_copy(&::core::mem::ManuallyDrop::new($val))
+ };
+}
+
+/// A macro to implement marker traits for various simd types.
+/// #[allow(unused)] because the impls are only compiled on relevant platforms
+/// with relevant cargo features enabled.
+#[allow(unused)]
+macro_rules! impl_unsafe_marker_for_simd {
+ ($(#[cfg($cfg_predicate:meta)])? unsafe impl $trait:ident for $platform:ident :: {}) => {};
+ ($(#[cfg($cfg_predicate:meta)])? unsafe impl $trait:ident for $platform:ident :: { $first_type:ident $(, $types:ident)* $(,)? }) => {
+ $( #[cfg($cfg_predicate)] )?
+ $( #[cfg_attr(feature = "nightly_docs", doc(cfg($cfg_predicate)))] )?
+ unsafe impl $trait for $platform::$first_type {}
+ $( #[cfg($cfg_predicate)] )? // To prevent recursion errors if nothing is going to be expanded anyway.
+ impl_unsafe_marker_for_simd!($( #[cfg($cfg_predicate)] )? unsafe impl $trait for $platform::{ $( $types ),* });
+ };
+}
+
+#[cfg(feature = "extern_crate_std")]
+extern crate std;
+
+#[cfg(feature = "extern_crate_alloc")]
+extern crate alloc;
+#[cfg(feature = "extern_crate_alloc")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "extern_crate_alloc")))]
+pub mod allocation;
+#[cfg(feature = "extern_crate_alloc")]
+pub use allocation::*;
+
+mod anybitpattern;
+pub use anybitpattern::*;
+
+pub mod checked;
+pub use checked::CheckedBitPattern;
+
+mod internal;
+
+mod zeroable;
+pub use zeroable::*;
+mod zeroable_in_option;
+pub use zeroable_in_option::*;
+
+mod pod;
+pub use pod::*;
+mod pod_in_option;
+pub use pod_in_option::*;
+
+#[cfg(feature = "must_cast")]
+mod must;
+#[cfg(feature = "must_cast")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "must_cast")))]
+pub use must::*;
+
+mod no_uninit;
+pub use no_uninit::*;
+
+mod contiguous;
+pub use contiguous::*;
+
+mod offset_of;
+pub use offset_of::*;
+
+mod transparent;
+pub use transparent::*;
+
+#[cfg(feature = "derive")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "derive")))]
+pub use bytemuck_derive::{
+ AnyBitPattern, ByteEq, ByteHash, CheckedBitPattern, Contiguous, NoUninit,
+ Pod, TransparentWrapper, Zeroable,
+};
+
+/// The things that can go wrong when casting between [`Pod`] data forms.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub enum PodCastError {
+ /// You tried to cast a slice to an element type with a higher alignment
+ /// requirement but the slice wasn't aligned.
+ TargetAlignmentGreaterAndInputNotAligned,
+ /// If the element size changes then the output slice changes length
+ /// accordingly. If the output slice wouldn't be a whole number of elements
+ /// then the conversion fails.
+ OutputSliceWouldHaveSlop,
+ /// When casting a slice you can't convert between ZST elements and non-ZST
+ /// elements. When casting an individual `T`, `&T`, or `&mut T` value the
+ /// source size and destination size must be an exact match.
+ SizeMismatch,
+ /// For this type of cast the alignments must be exactly the same and they
+ /// were not so now you're sad.
+ ///
+ /// This error is generated **only** by operations that cast allocated types
+ /// (such as `Box` and `Vec`), because in that case the alignment must stay
+ /// exact.
+ AlignmentMismatch,
+}
+#[cfg(not(target_arch = "spirv"))]
+impl core::fmt::Display for PodCastError {
+ fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+ write!(f, "{:?}", self)
+ }
+}
+#[cfg(feature = "extern_crate_std")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "extern_crate_std")))]
+impl std::error::Error for PodCastError {}
+
+/// Re-interprets `&T` as `&[u8]`.
+///
+/// Any ZST becomes an empty slice, and in that case the pointer value of that
+/// empty slice might not match the pointer value of the input reference.
+#[inline]
+pub fn bytes_of<T: NoUninit>(t: &T) -> &[u8] {
+ unsafe { internal::bytes_of(t) }
+}
+
+/// Re-interprets `&mut T` as `&mut [u8]`.
+///
+/// Any ZST becomes an empty slice, and in that case the pointer value of that
+/// empty slice might not match the pointer value of the input reference.
+#[inline]
+pub fn bytes_of_mut<T: NoUninit + AnyBitPattern>(t: &mut T) -> &mut [u8] {
+ unsafe { internal::bytes_of_mut(t) }
+}
+
+/// Re-interprets `&[u8]` as `&T`.
+///
+/// ## Panics
+///
+/// This is [`try_from_bytes`] but will panic on error.
+#[inline]
+pub fn from_bytes<T: AnyBitPattern>(s: &[u8]) -> &T {
+ unsafe { internal::from_bytes(s) }
+}
+
+/// Re-interprets `&mut [u8]` as `&mut T`.
+///
+/// ## Panics
+///
+/// This is [`try_from_bytes_mut`] but will panic on error.
+#[inline]
+pub fn from_bytes_mut<T: NoUninit + AnyBitPattern>(s: &mut [u8]) -> &mut T {
+ unsafe { internal::from_bytes_mut(s) }
+}
+
+/// Reads from the bytes as if they were a `T`.
+///
+/// ## Failure
+/// * If the `bytes` length is not equal to `size_of::<T>()`.
+#[inline]
+pub fn try_pod_read_unaligned<T: AnyBitPattern>(
+ bytes: &[u8],
+) -> Result<T, PodCastError> {
+ unsafe { internal::try_pod_read_unaligned(bytes) }
+}
+
+/// Reads the slice into a `T` value.
+///
+/// ## Panics
+/// * This is like `try_pod_read_unaligned` but will panic on failure.
+#[inline]
+pub fn pod_read_unaligned<T: AnyBitPattern>(bytes: &[u8]) -> T {
+ unsafe { internal::pod_read_unaligned(bytes) }
+}
+
+/// Re-interprets `&[u8]` as `&T`.
+///
+/// ## Failure
+///
+/// * If the slice isn't aligned for the new type
+/// * If the slice's length isn’t exactly the size of the new type
+#[inline]
+pub fn try_from_bytes<T: AnyBitPattern>(s: &[u8]) -> Result<&T, PodCastError> {
+ unsafe { internal::try_from_bytes(s) }
+}
+
+/// Re-interprets `&mut [u8]` as `&mut T`.
+///
+/// ## Failure
+///
+/// * If the slice isn't aligned for the new type
+/// * If the slice's length isn’t exactly the size of the new type
+#[inline]
+pub fn try_from_bytes_mut<T: NoUninit + AnyBitPattern>(
+ s: &mut [u8],
+) -> Result<&mut T, PodCastError> {
+ unsafe { internal::try_from_bytes_mut(s) }
+}
+
+/// Cast `T` into `U`
+///
+/// ## Panics
+///
+/// * This is like [`try_cast`](try_cast), but will panic on a size mismatch.
+#[inline]
+pub fn cast<A: NoUninit, B: AnyBitPattern>(a: A) -> B {
+ unsafe { internal::cast(a) }
+}
+
+/// Cast `&mut T` into `&mut U`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_mut`] but will panic on error.
+#[inline]
+pub fn cast_mut<A: NoUninit + AnyBitPattern, B: NoUninit + AnyBitPattern>(
+ a: &mut A,
+) -> &mut B {
+ unsafe { internal::cast_mut(a) }
+}
+
+/// Cast `&T` into `&U`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_ref`] but will panic on error.
+#[inline]
+pub fn cast_ref<A: NoUninit, B: AnyBitPattern>(a: &A) -> &B {
+ unsafe { internal::cast_ref(a) }
+}
+
+/// Cast `&[A]` into `&[B]`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_slice`] but will panic on error.
+#[inline]
+pub fn cast_slice<A: NoUninit, B: AnyBitPattern>(a: &[A]) -> &[B] {
+ unsafe { internal::cast_slice(a) }
+}
+
+/// Cast `&mut [T]` into `&mut [U]`.
+///
+/// ## Panics
+///
+/// This is [`try_cast_slice_mut`] but will panic on error.
+#[inline]
+pub fn cast_slice_mut<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + AnyBitPattern,
+>(
+ a: &mut [A],
+) -> &mut [B] {
+ unsafe { internal::cast_slice_mut(a) }
+}
+
+/// As `align_to`, but safe because of the [`Pod`] bound.
+#[inline]
+pub fn pod_align_to<T: NoUninit, U: AnyBitPattern>(
+ vals: &[T],
+) -> (&[T], &[U], &[T]) {
+ unsafe { vals.align_to::<U>() }
+}
+
+/// As `align_to_mut`, but safe because of the [`Pod`] bound.
+#[inline]
+pub fn pod_align_to_mut<
+ T: NoUninit + AnyBitPattern,
+ U: NoUninit + AnyBitPattern,
+>(
+ vals: &mut [T],
+) -> (&mut [T], &mut [U], &mut [T]) {
+ unsafe { vals.align_to_mut::<U>() }
+}
+
+/// Try to cast `T` into `U`.
+///
+/// Note that for this particular type of cast, alignment isn't a factor. The
+/// input value is semantically copied into the function and then returned to a
+/// new memory location which will have whatever the required alignment of the
+/// output type is.
+///
+/// ## Failure
+///
+/// * If the types don't have the same size this fails.
+#[inline]
+pub fn try_cast<A: NoUninit, B: AnyBitPattern>(
+ a: A,
+) -> Result<B, PodCastError> {
+ unsafe { internal::try_cast(a) }
+}
+
+/// Try to convert a `&T` into `&U`.
+///
+/// ## Failure
+///
+/// * If the reference isn't aligned in the new type
+/// * If the source type and target type aren't the same size.
+#[inline]
+pub fn try_cast_ref<A: NoUninit, B: AnyBitPattern>(
+ a: &A,
+) -> Result<&B, PodCastError> {
+ unsafe { internal::try_cast_ref(a) }
+}
+
+/// Try to convert a `&mut T` into `&mut U`.
+///
+/// As [`try_cast_ref`], but `mut`.
+#[inline]
+pub fn try_cast_mut<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + AnyBitPattern,
+>(
+ a: &mut A,
+) -> Result<&mut B, PodCastError> {
+ unsafe { internal::try_cast_mut(a) }
+}
+
+/// Try to convert `&[A]` into `&[B]` (possibly with a change in length).
+///
+/// * `input.as_ptr() as usize == output.as_ptr() as usize`
+/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
+///
+/// ## Failure
+///
+/// * If the target type has a greater alignment requirement and the input slice
+/// isn't aligned.
+/// * If the target element type is a different size from the current element
+/// type, and the output slice wouldn't be a whole number of elements when
+/// accounting for the size change (eg: 3 `u16` values is 1.5 `u32` values, so
+/// that's a failure).
+/// * Similarly, you can't convert between a [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
+/// and a non-ZST.
+#[inline]
+pub fn try_cast_slice<A: NoUninit, B: AnyBitPattern>(
+ a: &[A],
+) -> Result<&[B], PodCastError> {
+ unsafe { internal::try_cast_slice(a) }
+}
+
+/// Try to convert `&mut [A]` into `&mut [B]` (possibly with a change in
+/// length).
+///
+/// As [`try_cast_slice`], but `&mut`.
+#[inline]
+pub fn try_cast_slice_mut<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + AnyBitPattern,
+>(
+ a: &mut [A],
+) -> Result<&mut [B], PodCastError> {
+ unsafe { internal::try_cast_slice_mut(a) }
+}
+
+/// Fill all bytes of `target` with zeroes (see [`Zeroable`]).
+///
+/// This is similar to `*target = Zeroable::zeroed()`, but guarantees that any
+/// padding bytes in `target` are zeroed as well.
+///
+/// See also [`fill_zeroes`], if you have a slice rather than a single value.
+#[inline]
+pub fn write_zeroes<T: Zeroable>(target: &mut T) {
+ struct EnsureZeroWrite<T>(*mut T);
+ impl<T> Drop for EnsureZeroWrite<T> {
+ #[inline(always)]
+ fn drop(&mut self) {
+ unsafe {
+ core::ptr::write_bytes(self.0, 0u8, 1);
+ }
+ }
+ }
+ unsafe {
+ let guard = EnsureZeroWrite(target);
+ core::ptr::drop_in_place(guard.0);
+ drop(guard);
+ }
+}
+
+/// Fill all bytes of `slice` with zeroes (see [`Zeroable`]).
+///
+/// This is similar to `slice.fill(Zeroable::zeroed())`, but guarantees that any
+/// padding bytes in `slice` are zeroed as well.
+///
+/// See also [`write_zeroes`], which zeroes all bytes of a single value rather
+/// than a slice.
+#[inline]
+pub fn fill_zeroes<T: Zeroable>(slice: &mut [T]) {
+ if core::mem::needs_drop::<T>() {
+ // If `T` needs to be dropped then we have to do this one item at a time, in
+ // case one of the intermediate drops does a panic.
+ slice.iter_mut().for_each(write_zeroes);
+ } else {
+ // Otherwise we can be really fast and just fill everthing with zeros.
+ let len = core::mem::size_of_val::<[T]>(slice);
+ unsafe { core::ptr::write_bytes(slice.as_mut_ptr() as *mut u8, 0u8, len) }
+ }
+}
diff --git a/vendor/bytemuck/src/must.rs b/vendor/bytemuck/src/must.rs
new file mode 100644
index 0000000..8373e71
--- /dev/null
+++ b/vendor/bytemuck/src/must.rs
@@ -0,0 +1,203 @@
+#![allow(clippy::module_name_repetitions)]
+#![allow(clippy::let_unit_value)]
+#![allow(clippy::let_underscore_untyped)]
+#![allow(clippy::ptr_as_ptr)]
+
+use crate::{AnyBitPattern, NoUninit};
+use core::mem::{align_of, size_of};
+
+struct Cast<A, B>((A, B));
+impl<A, B> Cast<A, B> {
+ const ASSERT_ALIGN_GREATER_THAN_EQUAL: () =
+ assert!(align_of::<A>() >= align_of::<B>());
+ const ASSERT_SIZE_EQUAL: () = assert!(size_of::<A>() == size_of::<B>());
+ const ASSERT_SIZE_MULTIPLE_OF: () = assert!(
+ (size_of::<A>() == 0) == (size_of::<B>() == 0)
+ && (size_of::<A>() % size_of::<B>() == 0)
+ );
+}
+
+// Workaround for https://github.com/rust-lang/miri/issues/2423.
+// Miri currently doesn't see post-monomorphization errors until runtime,
+// so `compile_fail` tests relying on post-monomorphization errors don't
+// actually fail. Instead use `should_panic` under miri as a workaround.
+#[cfg(miri)]
+macro_rules! post_mono_compile_fail_doctest {
+ () => {
+ "```should_panic"
+ };
+}
+#[cfg(not(miri))]
+macro_rules! post_mono_compile_fail_doctest {
+ () => {
+ "```compile_fail,E0080"
+ };
+}
+
+/// Cast `A` into `B` if infalliable, or fail to compile.
+///
+/// Note that for this particular type of cast, alignment isn't a factor. The
+/// input value is semantically copied into the function and then returned to a
+/// new memory location which will have whatever the required alignment of the
+/// output type is.
+///
+/// ## Failure
+///
+/// * If the types don't have the same size this fails to compile.
+///
+/// ## Examples
+/// ```
+/// // compiles:
+/// let bytes: [u8; 2] = bytemuck::must_cast(12_u16);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// // fails to compile (size mismatch):
+/// let bytes : [u8; 3] = bytemuck::must_cast(12_u16);
+/// ```
+#[inline]
+pub fn must_cast<A: NoUninit, B: AnyBitPattern>(a: A) -> B {
+ let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
+ unsafe { transmute!(a) }
+}
+
+/// Convert `&A` into `&B` if infalliable, or fail to compile.
+///
+/// ## Failure
+///
+/// * If the target type has a greater alignment requirement.
+/// * If the source type and target type aren't the same size.
+///
+/// ## Examples
+/// ```
+/// // compiles:
+/// let bytes: &[u8; 2] = bytemuck::must_cast_ref(&12_u16);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// // fails to compile (size mismatch):
+/// let bytes : &[u8; 3] = bytemuck::must_cast_ref(&12_u16);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// // fails to compile (alignment requirements increased):
+/// let bytes : &u16 = bytemuck::must_cast_ref(&[1u8, 2u8]);
+/// ```
+#[inline]
+pub fn must_cast_ref<A: NoUninit, B: AnyBitPattern>(a: &A) -> &B {
+ let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
+ let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
+ unsafe { &*(a as *const A as *const B) }
+}
+
+/// Convert a `&mut A` into `&mut B` if infalliable, or fail to compile.
+///
+/// As [`must_cast_ref`], but `mut`.
+///
+/// ## Examples
+/// ```
+/// let mut i = 12_u16;
+/// // compiles:
+/// let bytes: &mut [u8; 2] = bytemuck::must_cast_mut(&mut i);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// # let mut bytes: &mut [u8; 2] = &mut [1, 2];
+/// // fails to compile (alignment requirements increased):
+/// let i : &mut u16 = bytemuck::must_cast_mut(bytes);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// # let mut i = 12_u16;
+/// // fails to compile (size mismatch):
+/// let bytes : &mut [u8; 3] = bytemuck::must_cast_mut(&mut i);
+/// ```
+#[inline]
+pub fn must_cast_mut<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + AnyBitPattern,
+>(
+ a: &mut A,
+) -> &mut B {
+ let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
+ let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
+ unsafe { &mut *(a as *mut A as *mut B) }
+}
+
+/// Convert `&[A]` into `&[B]` (possibly with a change in length) if
+/// infalliable, or fail to compile.
+///
+/// * `input.as_ptr() as usize == output.as_ptr() as usize`
+/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
+///
+/// ## Failure
+///
+/// * If the target type has a greater alignment requirement.
+/// * If the target element type doesn't evenly fit into the the current element
+/// type (eg: 3 `u16` values is 1.5 `u32` values, so that's a failure).
+/// * Similarly, you can't convert between a [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
+/// and a non-ZST.
+///
+/// ## Examples
+/// ```
+/// let indicies: &[u16] = &[1, 2, 3];
+/// // compiles:
+/// let bytes: &[u8] = bytemuck::must_cast_slice(indicies);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// # let bytes : &[u8] = &[1, 0, 2, 0, 3, 0];
+/// // fails to compile (bytes.len() might not be a multiple of 2):
+/// let byte_pairs : &[[u8; 2]] = bytemuck::must_cast_slice(bytes);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// # let byte_pairs : &[[u8; 2]] = &[[1, 0], [2, 0], [3, 0]];
+/// // fails to compile (alignment requirements increased):
+/// let indicies : &[u16] = bytemuck::must_cast_slice(byte_pairs);
+/// ```
+#[inline]
+pub fn must_cast_slice<A: NoUninit, B: AnyBitPattern>(a: &[A]) -> &[B] {
+ let _ = Cast::<A, B>::ASSERT_SIZE_MULTIPLE_OF;
+ let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
+ let new_len = if size_of::<A>() == size_of::<B>() {
+ a.len()
+ } else {
+ a.len() * (size_of::<A>() / size_of::<B>())
+ };
+ unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) }
+}
+
+/// Convert `&mut [A]` into `&mut [B]` (possibly with a change in length) if
+/// infalliable, or fail to compile.
+///
+/// As [`must_cast_slice`], but `&mut`.
+///
+/// ## Examples
+/// ```
+/// let mut indicies = [1, 2, 3];
+/// let indicies: &mut [u16] = &mut indicies;
+/// // compiles:
+/// let bytes: &mut [u8] = bytemuck::must_cast_slice_mut(indicies);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// # let mut bytes = [1, 0, 2, 0, 3, 0];
+/// # let bytes : &mut [u8] = &mut bytes[..];
+/// // fails to compile (bytes.len() might not be a multiple of 2):
+/// let byte_pairs : &mut [[u8; 2]] = bytemuck::must_cast_slice_mut(bytes);
+/// ```
+#[doc = post_mono_compile_fail_doctest!()]
+/// # let mut byte_pairs = [[1, 0], [2, 0], [3, 0]];
+/// # let byte_pairs : &mut [[u8; 2]] = &mut byte_pairs[..];
+/// // fails to compile (alignment requirements increased):
+/// let indicies : &mut [u16] = bytemuck::must_cast_slice_mut(byte_pairs);
+/// ```
+#[inline]
+pub fn must_cast_slice_mut<
+ A: NoUninit + AnyBitPattern,
+ B: NoUninit + AnyBitPattern,
+>(
+ a: &mut [A],
+) -> &mut [B] {
+ let _ = Cast::<A, B>::ASSERT_SIZE_MULTIPLE_OF;
+ let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
+ let new_len = if size_of::<A>() == size_of::<B>() {
+ a.len()
+ } else {
+ a.len() * (size_of::<A>() / size_of::<B>())
+ };
+ unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len) }
+}
diff --git a/vendor/bytemuck/src/no_uninit.rs b/vendor/bytemuck/src/no_uninit.rs
new file mode 100644
index 0000000..5fda0c9
--- /dev/null
+++ b/vendor/bytemuck/src/no_uninit.rs
@@ -0,0 +1,80 @@
+use crate::Pod;
+use core::num::{
+ NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize,
+ NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize,
+};
+
+/// Marker trait for "plain old data" types with no uninit (or padding) bytes.
+///
+/// The requirements for this is very similar to [`Pod`],
+/// except that it doesn't require that all bit patterns of the type are valid,
+/// i.e. it does not require the type to be [`Zeroable`][crate::Zeroable].
+/// This limits what you can do with a type of this kind, but also broadens the
+/// included types to things like C-style enums. Notably, you can only cast from
+/// *immutable* references to a [`NoUninit`] type into *immutable* references of
+/// any other type, no casting of mutable references or mutable references to
+/// slices etc.
+///
+/// [`Pod`] is a subset of [`NoUninit`], meaning that any `T: Pod` is also
+/// [`NoUninit`] but any `T: NoUninit` is not necessarily [`Pod`]. If possible,
+/// prefer implementing [`Pod`] directly. To get more [`Pod`]-like functionality
+/// for a type that is only [`NoUninit`], consider also implementing
+/// [`CheckedBitPattern`][crate::CheckedBitPattern].
+///
+/// # Derive
+///
+/// A `#[derive(NoUninit)]` macro is provided under the `derive` feature flag
+/// which will automatically validate the requirements of this trait and
+/// implement the trait for you for both enums and structs. This is the
+/// recommended method for implementing the trait, however it's also possible to
+/// do manually. If you implement it manually, you *must* carefully follow the
+/// below safety rules.
+///
+/// # Safety
+///
+/// The same as [`Pod`] except we disregard the rule about it must
+/// allow any bit pattern (i.e. it does not need to be
+/// [`Zeroable`][crate::Zeroable]). Still, this is a quite strong guarantee
+/// about a type, so *be careful* whem implementing it manually.
+///
+/// * The type must be inhabited (eg: no
+/// [Infallible](core::convert::Infallible)).
+/// * The type must not contain any uninit (or padding) bytes, either in the
+/// middle or on the end (eg: no `#[repr(C)] struct Foo(u8, u16)`, which has
+/// padding in the middle, and also no `#[repr(C)] struct Foo(u16, u8)`, which
+/// has padding on the end).
+/// * Structs need to have all fields also be `NoUninit`.
+/// * Structs need to be `repr(C)` or `repr(transparent)`. In the case of
+/// `repr(C)`, the `packed` and `align` repr modifiers can be used as long as
+/// all other rules end up being followed.
+/// * Enums need to have an explicit `#[repr(Int)]`
+/// * Enums must have only fieldless variants
+/// * It is disallowed for types to contain pointer types, `Cell`, `UnsafeCell`,
+/// atomics, and any other forms of interior mutability.
+/// * More precisely: A shared reference to the type must allow reads, and
+/// *only* reads. RustBelt's separation logic is based on the notion that a
+/// type is allowed to define a sharing predicate, its own invariant that must
+/// hold for shared references, and this predicate is the reasoning that allow
+/// it to deal with atomic and cells etc. We require the sharing predicate to
+/// be trivial and permit only read-only access.
+/// * There's probably more, don't mess it up (I mean it).
+pub unsafe trait NoUninit: Sized + Copy + 'static {}
+
+unsafe impl<T: Pod> NoUninit for T {}
+
+unsafe impl NoUninit for char {}
+
+unsafe impl NoUninit for bool {}
+
+unsafe impl NoUninit for NonZeroU8 {}
+unsafe impl NoUninit for NonZeroI8 {}
+unsafe impl NoUninit for NonZeroU16 {}
+unsafe impl NoUninit for NonZeroI16 {}
+unsafe impl NoUninit for NonZeroU32 {}
+unsafe impl NoUninit for NonZeroI32 {}
+unsafe impl NoUninit for NonZeroU64 {}
+unsafe impl NoUninit for NonZeroI64 {}
+unsafe impl NoUninit for NonZeroU128 {}
+unsafe impl NoUninit for NonZeroI128 {}
+unsafe impl NoUninit for NonZeroUsize {}
+unsafe impl NoUninit for NonZeroIsize {}
diff --git a/vendor/bytemuck/src/offset_of.rs b/vendor/bytemuck/src/offset_of.rs
new file mode 100644
index 0000000..7e8aedf
--- /dev/null
+++ b/vendor/bytemuck/src/offset_of.rs
@@ -0,0 +1,135 @@
+#![forbid(unsafe_code)]
+
+/// Find the offset in bytes of the given `$field` of `$Type`. Requires an
+/// already initialized `$instance` value to work with.
+///
+/// This is similar to the macro from [`memoffset`](https://docs.rs/memoffset),
+/// however it uses no `unsafe` code.
+///
+/// This macro has a 3-argument and 2-argument version.
+/// * In the 3-arg version you specify an instance of the type, the type itself,
+/// and the field name.
+/// * In the 2-arg version the macro will call the [`default`](Default::default)
+/// method to make a temporary instance of the type for you.
+///
+/// The output of this macro is the byte offset of the field (as a `usize`). The
+/// calculations of the macro are fixed across the entire program, but if the
+/// type used is `repr(Rust)` then they're *not* fixed across compilations or
+/// compilers.
+///
+/// ## Examples
+///
+/// ### 3-arg Usage
+///
+/// ```rust
+/// # use bytemuck::offset_of;
+/// // enums can't derive default, and for this example we don't pick one
+/// enum MyExampleEnum {
+/// A,
+/// B,
+/// C,
+/// }
+///
+/// // so now our struct here doesn't have Default
+/// #[repr(C)]
+/// struct MyNotDefaultType {
+/// pub counter: i32,
+/// pub some_field: MyExampleEnum,
+/// }
+///
+/// // but we provide an instance of the type and it's all good.
+/// let val = MyNotDefaultType { counter: 5, some_field: MyExampleEnum::A };
+/// assert_eq!(offset_of!(val, MyNotDefaultType, some_field), 4);
+/// ```
+///
+/// ### 2-arg Usage
+///
+/// ```rust
+/// # use bytemuck::offset_of;
+/// #[derive(Default)]
+/// #[repr(C)]
+/// struct Vertex {
+/// pub loc: [f32; 3],
+/// pub color: [f32; 3],
+/// }
+/// // if the type impls Default the macro can make its own default instance.
+/// assert_eq!(offset_of!(Vertex, loc), 0);
+/// assert_eq!(offset_of!(Vertex, color), 12);
+/// ```
+///
+/// # Usage with `#[repr(packed)]` structs
+///
+/// Attempting to compute the offset of a `#[repr(packed)]` struct with
+/// `bytemuck::offset_of!` requires an `unsafe` block. We hope to relax this in
+/// the future, but currently it is required to work around a soundness hole in
+/// Rust (See [rust-lang/rust#27060]).
+///
+/// [rust-lang/rust#27060]: https://github.com/rust-lang/rust/issues/27060
+///
+/// <p style="background:rgba(255,181,77,0.16);padding:0.75em;">
+/// <strong>Warning:</strong> This is only true for versions of bytemuck >
+/// 1.4.0. Previous versions of
+/// <code style="background:rgba(41,24,0,0.1);">bytemuck::offset_of!</code>
+/// will only emit a warning when used on the field of a packed struct in safe
+/// code, which can lead to unsoundness.
+/// </p>
+///
+/// For example, the following will fail to compile:
+///
+/// ```compile_fail
+/// #[repr(C, packed)]
+/// #[derive(Default)]
+/// struct Example {
+/// field: u32,
+/// }
+/// // Doesn't compile:
+/// let _offset = bytemuck::offset_of!(Example, field);
+/// ```
+///
+/// While the error message this generates will mention the
+/// `safe_packed_borrows` lint, the macro will still fail to compile even if
+/// that lint is `#[allow]`ed:
+///
+/// ```compile_fail
+/// # #[repr(C, packed)] #[derive(Default)] struct Example { field: u32 }
+/// // Still doesn't compile:
+/// #[allow(safe_packed_borrows)]
+/// {
+/// let _offset = bytemuck::offset_of!(Example, field);
+/// }
+/// ```
+///
+/// This *can* be worked around by using `unsafe`, but it is only sound to do so
+/// if you can guarantee that taking a reference to the field is sound.
+///
+/// In practice, this means it only works for fields of align(1) types, or if
+/// you know the field's offset in advance (defeating the point of `offset_of`)
+/// and can prove that the struct's alignment and the field's offset are enough
+/// to prove the field's alignment.
+///
+/// Once the `raw_ref` macros are available, a future version of this crate will
+/// use them to lift the limitations of packed structs. For the duration of the
+/// `1.x` version of this crate that will be behind an on-by-default cargo
+/// feature (to maintain minimum rust version support).
+#[macro_export]
+macro_rules! offset_of {
+ ($instance:expr, $Type:path, $field:tt) => {{
+ #[forbid(safe_packed_borrows)]
+ {
+ // This helps us guard against field access going through a Deref impl.
+ #[allow(clippy::unneeded_field_pattern)]
+ let $Type { $field: _, .. };
+ let reference: &$Type = &$instance;
+ let address = reference as *const _ as usize;
+ let field_pointer = &reference.$field as *const _ as usize;
+ // These asserts/unwraps are compiled away at release, and defend against
+ // the case where somehow a deref impl is still invoked.
+ let result = field_pointer.checked_sub(address).unwrap();
+ assert!(result <= $crate::__core::mem::size_of::<$Type>());
+ result
+ }
+ }};
+ ($Type:path, $field:tt) => {{
+ $crate::offset_of!(<$Type as Default>::default(), $Type, $field)
+ }};
+}
diff --git a/vendor/bytemuck/src/pod.rs b/vendor/bytemuck/src/pod.rs
new file mode 100644
index 0000000..2cec1c2
--- /dev/null
+++ b/vendor/bytemuck/src/pod.rs
@@ -0,0 +1,165 @@
+use super::*;
+
+/// Marker trait for "plain old data".
+///
+/// The point of this trait is that once something is marked "plain old data"
+/// you can really go to town with the bit fiddling and bit casting. Therefore,
+/// it's a relatively strong claim to make about a type. Do not add this to your
+/// type casually.
+///
+/// **Reminder:** The results of casting around bytes between data types are
+/// _endian dependant_. Little-endian machines are the most common, but
+/// big-endian machines do exist (and big-endian is also used for "network
+/// order" bytes).
+///
+/// ## Safety
+///
+/// * The type must be inhabited (eg: no
+/// [Infallible](core::convert::Infallible)).
+/// * The type must allow any bit pattern (eg: no `bool` or `char`, which have
+/// illegal bit patterns).
+/// * The type must not contain any uninit (or padding) bytes, either in the
+/// middle or on the end (eg: no `#[repr(C)] struct Foo(u8, u16)`, which has
+/// padding in the middle, and also no `#[repr(C)] struct Foo(u16, u8)`, which
+/// has padding on the end).
+/// * The type needs to have all fields also be `Pod`.
+/// * The type needs to be `repr(C)` or `repr(transparent)`. In the case of
+/// `repr(C)`, the `packed` and `align` repr modifiers can be used as long as
+/// all other rules end up being followed.
+/// * It is disallowed for types to contain pointer types, `Cell`, `UnsafeCell`,
+/// atomics, and any other forms of interior mutability.
+/// * More precisely: A shared reference to the type must allow reads, and
+/// *only* reads. RustBelt's separation logic is based on the notion that a
+/// type is allowed to define a sharing predicate, its own invariant that must
+/// hold for shared references, and this predicate is the reasoning that allow
+/// it to deal with atomic and cells etc. We require the sharing predicate to
+/// be trivial and permit only read-only access.
+pub unsafe trait Pod: Zeroable + Copy + 'static {}
+
+unsafe impl Pod for () {}
+unsafe impl Pod for u8 {}
+unsafe impl Pod for i8 {}
+unsafe impl Pod for u16 {}
+unsafe impl Pod for i16 {}
+unsafe impl Pod for u32 {}
+unsafe impl Pod for i32 {}
+unsafe impl Pod for u64 {}
+unsafe impl Pod for i64 {}
+unsafe impl Pod for usize {}
+unsafe impl Pod for isize {}
+unsafe impl Pod for u128 {}
+unsafe impl Pod for i128 {}
+unsafe impl Pod for f32 {}
+unsafe impl Pod for f64 {}
+unsafe impl<T: Pod> Pod for Wrapping<T> {}
+
+#[cfg(feature = "unsound_ptr_pod_impl")]
+#[cfg_attr(
+ feature = "nightly_docs",
+ doc(cfg(feature = "unsound_ptr_pod_impl"))
+)]
+unsafe impl<T: 'static> Pod for *mut T {}
+#[cfg(feature = "unsound_ptr_pod_impl")]
+#[cfg_attr(
+ feature = "nightly_docs",
+ doc(cfg(feature = "unsound_ptr_pod_impl"))
+)]
+unsafe impl<T: 'static> Pod for *const T {}
+#[cfg(feature = "unsound_ptr_pod_impl")]
+#[cfg_attr(
+ feature = "nightly_docs",
+ doc(cfg(feature = "unsound_ptr_pod_impl"))
+)]
+unsafe impl<T: 'static> PodInOption for NonNull<T> {}
+
+unsafe impl<T: ?Sized + 'static> Pod for PhantomData<T> {}
+unsafe impl Pod for PhantomPinned {}
+unsafe impl<T: Pod> Pod for ManuallyDrop<T> {}
+
+// Note(Lokathor): MaybeUninit can NEVER be Pod.
+
+#[cfg(feature = "min_const_generics")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "min_const_generics")))]
+unsafe impl<T, const N: usize> Pod for [T; N] where T: Pod {}
+
+#[cfg(not(feature = "min_const_generics"))]
+impl_unsafe_marker_for_array!(
+ Pod, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256,
+ 512, 1024, 2048, 4096
+);
+
+impl_unsafe_marker_for_simd!(
+ #[cfg(all(target_arch = "wasm32", feature = "wasm_simd"))]
+ unsafe impl Pod for wasm32::{v128}
+);
+
+impl_unsafe_marker_for_simd!(
+ #[cfg(all(target_arch = "aarch64", feature = "aarch64_simd"))]
+ unsafe impl Pod for aarch64::{
+ float32x2_t, float32x2x2_t, float32x2x3_t, float32x2x4_t, float32x4_t,
+ float32x4x2_t, float32x4x3_t, float32x4x4_t, float64x1_t, float64x1x2_t,
+ float64x1x3_t, float64x1x4_t, float64x2_t, float64x2x2_t, float64x2x3_t,
+ float64x2x4_t, int16x4_t, int16x4x2_t, int16x4x3_t, int16x4x4_t, int16x8_t,
+ int16x8x2_t, int16x8x3_t, int16x8x4_t, int32x2_t, int32x2x2_t, int32x2x3_t,
+ int32x2x4_t, int32x4_t, int32x4x2_t, int32x4x3_t, int32x4x4_t, int64x1_t,
+ int64x1x2_t, int64x1x3_t, int64x1x4_t, int64x2_t, int64x2x2_t, int64x2x3_t,
+ int64x2x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int8x8_t,
+ int8x8x2_t, int8x8x3_t, int8x8x4_t, poly16x4_t, poly16x4x2_t, poly16x4x3_t,
+ poly16x4x4_t, poly16x8_t, poly16x8x2_t, poly16x8x3_t, poly16x8x4_t,
+ poly64x1_t, poly64x1x2_t, poly64x1x3_t, poly64x1x4_t, poly64x2_t,
+ poly64x2x2_t, poly64x2x3_t, poly64x2x4_t, poly8x16_t, poly8x16x2_t,
+ poly8x16x3_t, poly8x16x4_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, poly8x8x4_t,
+ uint16x4_t, uint16x4x2_t, uint16x4x3_t, uint16x4x4_t, uint16x8_t,
+ uint16x8x2_t, uint16x8x3_t, uint16x8x4_t, uint32x2_t, uint32x2x2_t,
+ uint32x2x3_t, uint32x2x4_t, uint32x4_t, uint32x4x2_t, uint32x4x3_t,
+ uint32x4x4_t, uint64x1_t, uint64x1x2_t, uint64x1x3_t, uint64x1x4_t,
+ uint64x2_t, uint64x2x2_t, uint64x2x3_t, uint64x2x4_t, uint8x16_t,
+ uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint8x8_t, uint8x8x2_t,
+ uint8x8x3_t, uint8x8x4_t,
+ }
+);
+
+impl_unsafe_marker_for_simd!(
+ #[cfg(target_arch = "x86")]
+ unsafe impl Pod for x86::{
+ __m128i, __m128, __m128d,
+ __m256i, __m256, __m256d,
+ }
+);
+
+impl_unsafe_marker_for_simd!(
+ #[cfg(target_arch = "x86_64")]
+ unsafe impl Pod for x86_64::{
+ __m128i, __m128, __m128d,
+ __m256i, __m256, __m256d,
+ }
+);
+
+#[cfg(feature = "nightly_portable_simd")]
+#[cfg_attr(
+ feature = "nightly_docs",
+ doc(cfg(feature = "nightly_portable_simd"))
+)]
+unsafe impl<T, const N: usize> Pod for core::simd::Simd<T, N>
+where
+ T: core::simd::SimdElement + Pod,
+ core::simd::LaneCount<N>: core::simd::SupportedLaneCount,
+{
+}
+
+impl_unsafe_marker_for_simd!(
+ #[cfg(all(target_arch = "x86", feature = "nightly_stdsimd"))]
+ unsafe impl Pod for x86::{
+ __m128bh, __m256bh, __m512,
+ __m512bh, __m512d, __m512i,
+ }
+);
+
+impl_unsafe_marker_for_simd!(
+ #[cfg(all(target_arch = "x86_64", feature = "nightly_stdsimd"))]
+ unsafe impl Pod for x86_64::{
+ __m128bh, __m256bh, __m512,
+ __m512bh, __m512d, __m512i,
+ }
+);
diff --git a/vendor/bytemuck/src/pod_in_option.rs b/vendor/bytemuck/src/pod_in_option.rs
new file mode 100644
index 0000000..3327e99
--- /dev/null
+++ b/vendor/bytemuck/src/pod_in_option.rs
@@ -0,0 +1,27 @@
+use super::*;
+
+// Note(Lokathor): This is the neat part!!
+unsafe impl<T: PodInOption> Pod for Option<T> {}
+
+/// Trait for types which are [Pod](Pod) when wrapped in
+/// [Option](core::option::Option).
+///
+/// ## Safety
+///
+/// * `Option<T>` must uphold the same invariants as [Pod](Pod).
+/// * **Reminder:** pointers are **not** pod! **Do not** mix this trait with a
+/// newtype over [NonNull](core::ptr::NonNull).
+pub unsafe trait PodInOption: ZeroableInOption + Copy + 'static {}
+
+unsafe impl PodInOption for NonZeroI8 {}
+unsafe impl PodInOption for NonZeroI16 {}
+unsafe impl PodInOption for NonZeroI32 {}
+unsafe impl PodInOption for NonZeroI64 {}
+unsafe impl PodInOption for NonZeroI128 {}
+unsafe impl PodInOption for NonZeroIsize {}
+unsafe impl PodInOption for NonZeroU8 {}
+unsafe impl PodInOption for NonZeroU16 {}
+unsafe impl PodInOption for NonZeroU32 {}
+unsafe impl PodInOption for NonZeroU64 {}
+unsafe impl PodInOption for NonZeroU128 {}
+unsafe impl PodInOption for NonZeroUsize {}
diff --git a/vendor/bytemuck/src/transparent.rs b/vendor/bytemuck/src/transparent.rs
new file mode 100644
index 0000000..5b9fe0e
--- /dev/null
+++ b/vendor/bytemuck/src/transparent.rs
@@ -0,0 +1,288 @@
+use super::*;
+
+/// A trait which indicates that a type is a `#[repr(transparent)]` wrapper
+/// around the `Inner` value.
+///
+/// This allows safely copy transmuting between the `Inner` type and the
+/// `TransparentWrapper` type. Functions like `wrap_{}` convert from the inner
+/// type to the wrapper type and `peel_{}` functions do the inverse conversion
+/// from the wrapper type to the inner type. We deliberately do not call the
+/// wrapper-removing methods "unwrap" because at this point that word is too
+/// strongly tied to the Option/ Result methods.
+///
+/// # Safety
+///
+/// The safety contract of `TransparentWrapper` is relatively simple:
+///
+/// For a given `Wrapper` which implements `TransparentWrapper<Inner>`:
+///
+/// 1. `Wrapper` must be a wrapper around `Inner` with an identical data
+/// representations. This either means that it must be a
+/// `#[repr(transparent)]` struct which contains a either a field of type
+/// `Inner` (or a field of some other transparent wrapper for `Inner`) as
+/// the only non-ZST field.
+///
+/// 2. Any fields *other* than the `Inner` field must be trivially constructable
+/// ZSTs, for example `PhantomData`, `PhantomPinned`, etc. (When deriving
+/// `TransparentWrapper` on a type with ZST fields, the ZST fields must be
+/// [`Zeroable`]).
+///
+/// 3. The `Wrapper` may not impose additional alignment requirements over
+/// `Inner`.
+/// - Note: this is currently guaranteed by `repr(transparent)`, but there
+/// have been discussions of lifting it, so it's stated here explicitly.
+///
+/// 4. All functions on `TransparentWrapper` **may not** be overridden.
+///
+/// ## Caveats
+///
+/// If the wrapper imposes additional constraints upon the inner type which are
+/// required for safety, it's responsible for ensuring those still hold -- this
+/// generally requires preventing access to instances of the inner type, as
+/// implementing `TransparentWrapper<U> for T` means anybody can call
+/// `T::cast_ref(any_instance_of_u)`.
+///
+/// For example, it would be invalid to implement TransparentWrapper for `str`
+/// to implement `TransparentWrapper` around `[u8]` because of this.
+///
+/// # Examples
+///
+/// ## Basic
+///
+/// ```
+/// use bytemuck::TransparentWrapper;
+/// # #[derive(Default)]
+/// # struct SomeStruct(u32);
+///
+/// #[repr(transparent)]
+/// struct MyWrapper(SomeStruct);
+///
+/// unsafe impl TransparentWrapper<SomeStruct> for MyWrapper {}
+///
+/// // interpret a reference to &SomeStruct as a &MyWrapper
+/// let thing = SomeStruct::default();
+/// let inner_ref: &MyWrapper = MyWrapper::wrap_ref(&thing);
+///
+/// // Works with &mut too.
+/// let mut mut_thing = SomeStruct::default();
+/// let inner_mut: &mut MyWrapper = MyWrapper::wrap_mut(&mut mut_thing);
+///
+/// # let _ = (inner_ref, inner_mut); // silence warnings
+/// ```
+///
+/// ## Use with dynamically sized types
+///
+/// ```
+/// use bytemuck::TransparentWrapper;
+///
+/// #[repr(transparent)]
+/// struct Slice<T>([T]);
+///
+/// unsafe impl<T> TransparentWrapper<[T]> for Slice<T> {}
+///
+/// let s = Slice::wrap_ref(&[1u32, 2, 3]);
+/// assert_eq!(&s.0, &[1, 2, 3]);
+///
+/// let mut buf = [1, 2, 3u8];
+/// let sm = Slice::wrap_mut(&mut buf);
+/// ```
+///
+/// ## Deriving
+///
+/// When deriving, the non-wrapped fields must uphold all the normal requirements,
+/// and must also be `Zeroable`.
+///
+#[cfg_attr(feature = "derive", doc = "```")]
+#[cfg_attr(
+ not(feature = "derive"),
+ doc = "```ignore
+// This example requires the `derive` feature."
+)]
+/// use bytemuck::TransparentWrapper;
+/// use std::marker::PhantomData;
+///
+/// #[derive(TransparentWrapper)]
+/// #[repr(transparent)]
+/// #[transparent(usize)]
+/// struct Wrapper<T: ?Sized>(usize, PhantomData<T>); // PhantomData<T> implements Zeroable for all T
+/// ```
+///
+/// Here, an error will occur, because `MyZst` does not implement `Zeroable`.
+///
+#[cfg_attr(feature = "derive", doc = "```compile_fail")]
+#[cfg_attr(
+ not(feature = "derive"),
+ doc = "```ignore
+// This example requires the `derive` feature."
+)]
+/// use bytemuck::TransparentWrapper;
+/// struct MyZst;
+///
+/// #[derive(TransparentWrapper)]
+/// #[repr(transparent)]
+/// #[transparent(usize)]
+/// struct Wrapper(usize, MyZst); // MyZst does not implement Zeroable
+/// ```
+pub unsafe trait TransparentWrapper<Inner: ?Sized> {
+ /// Convert the inner type into the wrapper type.
+ #[inline]
+ fn wrap(s: Inner) -> Self
+ where
+ Self: Sized,
+ Inner: Sized,
+ {
+ // SAFETY: The unsafe contract requires that `Self` and `Inner` have
+ // identical representations.
+ unsafe { transmute!(s) }
+ }
+
+ /// Convert a reference to the inner type into a reference to the wrapper
+ /// type.
+ #[inline]
+ fn wrap_ref(s: &Inner) -> &Self {
+ unsafe {
+ assert!(size_of::<*const Inner>() == size_of::<*const Self>());
+ // A pointer cast doesn't work here because rustc can't tell that
+ // the vtables match (because of the `?Sized` restriction relaxation).
+ // A `transmute` doesn't work because the sizes are unspecified.
+ //
+ // SAFETY: The unsafe contract requires that these two have
+ // identical representations.
+ let inner_ptr = s as *const Inner;
+ let wrapper_ptr: *const Self = transmute!(inner_ptr);
+ &*wrapper_ptr
+ }
+ }
+
+ /// Convert a mutable reference to the inner type into a mutable reference to
+ /// the wrapper type.
+ #[inline]
+ fn wrap_mut(s: &mut Inner) -> &mut Self {
+ unsafe {
+ assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
+ // A pointer cast doesn't work here because rustc can't tell that
+ // the vtables match (because of the `?Sized` restriction relaxation).
+ // A `transmute` doesn't work because the sizes are unspecified.
+ //
+ // SAFETY: The unsafe contract requires that these two have
+ // identical representations.
+ let inner_ptr = s as *mut Inner;
+ let wrapper_ptr: *mut Self = transmute!(inner_ptr);
+ &mut *wrapper_ptr
+ }
+ }
+
+ /// Convert a slice to the inner type into a slice to the wrapper type.
+ #[inline]
+ fn wrap_slice(s: &[Inner]) -> &[Self]
+ where
+ Self: Sized,
+ Inner: Sized,
+ {
+ unsafe {
+ assert!(size_of::<*const Inner>() == size_of::<*const Self>());
+ assert!(align_of::<*const Inner>() == align_of::<*const Self>());
+ // SAFETY: The unsafe contract requires that these two have
+ // identical representations (size and alignment).
+ core::slice::from_raw_parts(s.as_ptr() as *const Self, s.len())
+ }
+ }
+
+ /// Convert a mutable slice to the inner type into a mutable slice to the
+ /// wrapper type.
+ #[inline]
+ fn wrap_slice_mut(s: &mut [Inner]) -> &mut [Self]
+ where
+ Self: Sized,
+ Inner: Sized,
+ {
+ unsafe {
+ assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
+ assert!(align_of::<*mut Inner>() == align_of::<*mut Self>());
+ // SAFETY: The unsafe contract requires that these two have
+ // identical representations (size and alignment).
+ core::slice::from_raw_parts_mut(s.as_mut_ptr() as *mut Self, s.len())
+ }
+ }
+
+ /// Convert the wrapper type into the inner type.
+ #[inline]
+ fn peel(s: Self) -> Inner
+ where
+ Self: Sized,
+ Inner: Sized,
+ {
+ unsafe { transmute!(s) }
+ }
+
+ /// Convert a reference to the wrapper type into a reference to the inner
+ /// type.
+ #[inline]
+ fn peel_ref(s: &Self) -> &Inner {
+ unsafe {
+ assert!(size_of::<*const Inner>() == size_of::<*const Self>());
+ // A pointer cast doesn't work here because rustc can't tell that
+ // the vtables match (because of the `?Sized` restriction relaxation).
+ // A `transmute` doesn't work because the sizes are unspecified.
+ //
+ // SAFETY: The unsafe contract requires that these two have
+ // identical representations.
+ let wrapper_ptr = s as *const Self;
+ let inner_ptr: *const Inner = transmute!(wrapper_ptr);
+ &*inner_ptr
+ }
+ }
+
+ /// Convert a mutable reference to the wrapper type into a mutable reference
+ /// to the inner type.
+ #[inline]
+ fn peel_mut(s: &mut Self) -> &mut Inner {
+ unsafe {
+ assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
+ // A pointer cast doesn't work here because rustc can't tell that
+ // the vtables match (because of the `?Sized` restriction relaxation).
+ // A `transmute` doesn't work because the sizes are unspecified.
+ //
+ // SAFETY: The unsafe contract requires that these two have
+ // identical representations.
+ let wrapper_ptr = s as *mut Self;
+ let inner_ptr: *mut Inner = transmute!(wrapper_ptr);
+ &mut *inner_ptr
+ }
+ }
+
+ /// Convert a slice to the wrapped type into a slice to the inner type.
+ #[inline]
+ fn peel_slice(s: &[Self]) -> &[Inner]
+ where
+ Self: Sized,
+ Inner: Sized,
+ {
+ unsafe {
+ assert!(size_of::<*const Inner>() == size_of::<*const Self>());
+ assert!(align_of::<*const Inner>() == align_of::<*const Self>());
+ // SAFETY: The unsafe contract requires that these two have
+ // identical representations (size and alignment).
+ core::slice::from_raw_parts(s.as_ptr() as *const Inner, s.len())
+ }
+ }
+
+ /// Convert a mutable slice to the wrapped type into a mutable slice to the
+ /// inner type.
+ #[inline]
+ fn peel_slice_mut(s: &mut [Self]) -> &mut [Inner]
+ where
+ Self: Sized,
+ Inner: Sized,
+ {
+ unsafe {
+ assert!(size_of::<*mut Inner>() == size_of::<*mut Self>());
+ assert!(align_of::<*mut Inner>() == align_of::<*mut Self>());
+ // SAFETY: The unsafe contract requires that these two have
+ // identical representations (size and alignment).
+ core::slice::from_raw_parts_mut(s.as_mut_ptr() as *mut Inner, s.len())
+ }
+ }
+}
+
+unsafe impl<T> TransparentWrapper<T> for core::num::Wrapping<T> {}
diff --git a/vendor/bytemuck/src/zeroable.rs b/vendor/bytemuck/src/zeroable.rs
new file mode 100644
index 0000000..b64a9bf
--- /dev/null
+++ b/vendor/bytemuck/src/zeroable.rs
@@ -0,0 +1,245 @@
+use super::*;
+
+/// Trait for types that can be safely created with
+/// [`zeroed`](core::mem::zeroed).
+///
+/// An all-zeroes value may or may not be the same value as the
+/// [Default](core::default::Default) value of the type.
+///
+/// ## Safety
+///
+/// * Your type must be inhabited (eg: no
+/// [Infallible](core::convert::Infallible)).
+/// * Your type must be allowed to be an "all zeroes" bit pattern (eg: no
+/// [`NonNull<T>`](core::ptr::NonNull)).
+///
+/// ## Features
+///
+/// Some `impl`s are feature gated due to the MSRV policy:
+///
+/// * `MaybeUninit<T>` was not available in 1.34.0, but is available under the
+/// `zeroable_maybe_uninit` feature flag.
+/// * `Atomic*` types require Rust 1.60.0 or later to work on certain platforms,
+/// but is available under the `zeroable_atomics` feature flag.
+/// * `[T; N]` for arbitrary `N` requires the `min_const_generics` feature flag.
+pub unsafe trait Zeroable: Sized {
+ /// Calls [`zeroed`](core::mem::zeroed).
+ ///
+ /// This is a trait method so that you can write `MyType::zeroed()` in your
+ /// code. It is a contract of this trait that if you implement it on your type
+ /// you **must not** override this method.
+ #[inline]
+ fn zeroed() -> Self {
+ unsafe { core::mem::zeroed() }
+ }
+}
+unsafe impl Zeroable for () {}
+unsafe impl Zeroable for bool {}
+unsafe impl Zeroable for char {}
+unsafe impl Zeroable for u8 {}
+unsafe impl Zeroable for i8 {}
+unsafe impl Zeroable for u16 {}
+unsafe impl Zeroable for i16 {}
+unsafe impl Zeroable for u32 {}
+unsafe impl Zeroable for i32 {}
+unsafe impl Zeroable for u64 {}
+unsafe impl Zeroable for i64 {}
+unsafe impl Zeroable for usize {}
+unsafe impl Zeroable for isize {}
+unsafe impl Zeroable for u128 {}
+unsafe impl Zeroable for i128 {}
+unsafe impl Zeroable for f32 {}
+unsafe impl Zeroable for f64 {}
+unsafe impl<T: Zeroable> Zeroable for Wrapping<T> {}
+unsafe impl<T: Zeroable> Zeroable for core::cmp::Reverse<T> {}
+
+// Note: we can't implement this for all `T: ?Sized` types because it would
+// create NULL pointers for vtables.
+// Maybe one day this could be changed to be implemented for
+// `T: ?Sized where <T as core::ptr::Pointee>::Metadata: Zeroable`.
+unsafe impl<T> Zeroable for *mut T {}
+unsafe impl<T> Zeroable for *const T {}
+unsafe impl<T> Zeroable for *mut [T] {}
+unsafe impl<T> Zeroable for *const [T] {}
+unsafe impl Zeroable for *mut str {}
+unsafe impl Zeroable for *const str {}
+
+unsafe impl<T: ?Sized> Zeroable for PhantomData<T> {}
+unsafe impl Zeroable for PhantomPinned {}
+unsafe impl<T: Zeroable> Zeroable for ManuallyDrop<T> {}
+unsafe impl<T: Zeroable> Zeroable for core::cell::UnsafeCell<T> {}
+unsafe impl<T: Zeroable> Zeroable for core::cell::Cell<T> {}
+
+#[cfg(feature = "zeroable_atomics")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "zeroable_atomics")))]
+mod atomic_impls {
+ use super::Zeroable;
+
+ #[cfg(target_has_atomic = "8")]
+ unsafe impl Zeroable for core::sync::atomic::AtomicBool {}
+ #[cfg(target_has_atomic = "8")]
+ unsafe impl Zeroable for core::sync::atomic::AtomicU8 {}
+ #[cfg(target_has_atomic = "8")]
+ unsafe impl Zeroable for core::sync::atomic::AtomicI8 {}
+
+ #[cfg(target_has_atomic = "16")]
+ unsafe impl Zeroable for core::sync::atomic::AtomicU16 {}
+ #[cfg(target_has_atomic = "16")]
+ unsafe impl Zeroable for core::sync::atomic::AtomicI16 {}
+
+ #[cfg(target_has_atomic = "32")]
+ unsafe impl Zeroable for core::sync::atomic::AtomicU32 {}
+ #[cfg(target_has_atomic = "32")]
+ unsafe impl Zeroable for core::sync::atomic::AtomicI32 {}
+
+ #[cfg(target_has_atomic = "64")]
+ unsafe impl Zeroable for core::sync::atomic::AtomicU64 {}
+ #[cfg(target_has_atomic = "64")]
+ unsafe impl Zeroable for core::sync::atomic::AtomicI64 {}
+
+ #[cfg(target_has_atomic = "ptr")]
+ unsafe impl Zeroable for core::sync::atomic::AtomicUsize {}
+ #[cfg(target_has_atomic = "ptr")]
+ unsafe impl Zeroable for core::sync::atomic::AtomicIsize {}
+
+ #[cfg(target_has_atomic = "ptr")]
+ unsafe impl<T> Zeroable for core::sync::atomic::AtomicPtr<T> {}
+}
+
+#[cfg(feature = "zeroable_maybe_uninit")]
+#[cfg_attr(
+ feature = "nightly_docs",
+ doc(cfg(feature = "zeroable_maybe_uninit"))
+)]
+unsafe impl<T> Zeroable for core::mem::MaybeUninit<T> {}
+
+unsafe impl<A: Zeroable> Zeroable for (A,) {}
+unsafe impl<A: Zeroable, B: Zeroable> Zeroable for (A, B) {}
+unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable> Zeroable for (A, B, C) {}
+unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable> Zeroable
+ for (A, B, C, D)
+{
+}
+unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable, E: Zeroable>
+ Zeroable for (A, B, C, D, E)
+{
+}
+unsafe impl<
+ A: Zeroable,
+ B: Zeroable,
+ C: Zeroable,
+ D: Zeroable,
+ E: Zeroable,
+ F: Zeroable,
+ > Zeroable for (A, B, C, D, E, F)
+{
+}
+unsafe impl<
+ A: Zeroable,
+ B: Zeroable,
+ C: Zeroable,
+ D: Zeroable,
+ E: Zeroable,
+ F: Zeroable,
+ G: Zeroable,
+ > Zeroable for (A, B, C, D, E, F, G)
+{
+}
+unsafe impl<
+ A: Zeroable,
+ B: Zeroable,
+ C: Zeroable,
+ D: Zeroable,
+ E: Zeroable,
+ F: Zeroable,
+ G: Zeroable,
+ H: Zeroable,
+ > Zeroable for (A, B, C, D, E, F, G, H)
+{
+}
+
+#[cfg(feature = "min_const_generics")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "min_const_generics")))]
+unsafe impl<T, const N: usize> Zeroable for [T; N] where T: Zeroable {}
+
+#[cfg(not(feature = "min_const_generics"))]
+impl_unsafe_marker_for_array!(
+ Zeroable, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256,
+ 512, 1024, 2048, 4096
+);
+
+impl_unsafe_marker_for_simd!(
+ #[cfg(all(target_arch = "wasm32", feature = "wasm_simd"))]
+ unsafe impl Zeroable for wasm32::{v128}
+);
+
+impl_unsafe_marker_for_simd!(
+ #[cfg(all(target_arch = "aarch64", feature = "aarch64_simd"))]
+ unsafe impl Zeroable for aarch64::{
+ float32x2_t, float32x2x2_t, float32x2x3_t, float32x2x4_t, float32x4_t,
+ float32x4x2_t, float32x4x3_t, float32x4x4_t, float64x1_t, float64x1x2_t,
+ float64x1x3_t, float64x1x4_t, float64x2_t, float64x2x2_t, float64x2x3_t,
+ float64x2x4_t, int16x4_t, int16x4x2_t, int16x4x3_t, int16x4x4_t, int16x8_t,
+ int16x8x2_t, int16x8x3_t, int16x8x4_t, int32x2_t, int32x2x2_t, int32x2x3_t,
+ int32x2x4_t, int32x4_t, int32x4x2_t, int32x4x3_t, int32x4x4_t, int64x1_t,
+ int64x1x2_t, int64x1x3_t, int64x1x4_t, int64x2_t, int64x2x2_t, int64x2x3_t,
+ int64x2x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int8x8_t,
+ int8x8x2_t, int8x8x3_t, int8x8x4_t, poly16x4_t, poly16x4x2_t, poly16x4x3_t,
+ poly16x4x4_t, poly16x8_t, poly16x8x2_t, poly16x8x3_t, poly16x8x4_t,
+ poly64x1_t, poly64x1x2_t, poly64x1x3_t, poly64x1x4_t, poly64x2_t,
+ poly64x2x2_t, poly64x2x3_t, poly64x2x4_t, poly8x16_t, poly8x16x2_t,
+ poly8x16x3_t, poly8x16x4_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, poly8x8x4_t,
+ uint16x4_t, uint16x4x2_t, uint16x4x3_t, uint16x4x4_t, uint16x8_t,
+ uint16x8x2_t, uint16x8x3_t, uint16x8x4_t, uint32x2_t, uint32x2x2_t,
+ uint32x2x3_t, uint32x2x4_t, uint32x4_t, uint32x4x2_t, uint32x4x3_t,
+ uint32x4x4_t, uint64x1_t, uint64x1x2_t, uint64x1x3_t, uint64x1x4_t,
+ uint64x2_t, uint64x2x2_t, uint64x2x3_t, uint64x2x4_t, uint8x16_t,
+ uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint8x8_t, uint8x8x2_t,
+ uint8x8x3_t, uint8x8x4_t,
+ }
+);
+
+impl_unsafe_marker_for_simd!(
+ #[cfg(target_arch = "x86")]
+ unsafe impl Zeroable for x86::{
+ __m128i, __m128, __m128d,
+ __m256i, __m256, __m256d,
+ }
+);
+
+impl_unsafe_marker_for_simd!(
+ #[cfg(target_arch = "x86_64")]
+ unsafe impl Zeroable for x86_64::{
+ __m128i, __m128, __m128d,
+ __m256i, __m256, __m256d,
+ }
+);
+
+#[cfg(feature = "nightly_portable_simd")]
+#[cfg_attr(
+ feature = "nightly_docs",
+ doc(cfg(feature = "nightly_portable_simd"))
+)]
+unsafe impl<T, const N: usize> Zeroable for core::simd::Simd<T, N>
+where
+ T: core::simd::SimdElement + Zeroable,
+ core::simd::LaneCount<N>: core::simd::SupportedLaneCount,
+{
+}
+
+impl_unsafe_marker_for_simd!(
+ #[cfg(all(target_arch = "x86", feature = "nightly_stdsimd"))]
+ unsafe impl Zeroable for x86::{
+ __m128bh, __m256bh, __m512,
+ __m512bh, __m512d, __m512i,
+ }
+);
+
+impl_unsafe_marker_for_simd!(
+ #[cfg(all(target_arch = "x86_64", feature = "nightly_stdsimd"))]
+ unsafe impl Zeroable for x86_64::{
+ __m128bh, __m256bh, __m512,
+ __m512bh, __m512d, __m512i,
+ }
+);
diff --git a/vendor/bytemuck/src/zeroable_in_option.rs b/vendor/bytemuck/src/zeroable_in_option.rs
new file mode 100644
index 0000000..c4cf158
--- /dev/null
+++ b/vendor/bytemuck/src/zeroable_in_option.rs
@@ -0,0 +1,35 @@
+use super::*;
+
+// Note(Lokathor): This is the neat part!!
+unsafe impl<T: ZeroableInOption> Zeroable for Option<T> {}
+
+/// Trait for types which are [Zeroable](Zeroable) when wrapped in
+/// [Option](core::option::Option).
+///
+/// ## Safety
+///
+/// * `Option<YourType>` must uphold the same invariants as
+/// [Zeroable](Zeroable).
+pub unsafe trait ZeroableInOption: Sized {}
+
+unsafe impl ZeroableInOption for NonZeroI8 {}
+unsafe impl ZeroableInOption for NonZeroI16 {}
+unsafe impl ZeroableInOption for NonZeroI32 {}
+unsafe impl ZeroableInOption for NonZeroI64 {}
+unsafe impl ZeroableInOption for NonZeroI128 {}
+unsafe impl ZeroableInOption for NonZeroIsize {}
+unsafe impl ZeroableInOption for NonZeroU8 {}
+unsafe impl ZeroableInOption for NonZeroU16 {}
+unsafe impl ZeroableInOption for NonZeroU32 {}
+unsafe impl ZeroableInOption for NonZeroU64 {}
+unsafe impl ZeroableInOption for NonZeroU128 {}
+unsafe impl ZeroableInOption for NonZeroUsize {}
+
+// Note: this does not create NULL vtable because we get `None` anyway.
+unsafe impl<T: ?Sized> ZeroableInOption for NonNull<T> {}
+unsafe impl<T: ?Sized> ZeroableInOption for &'_ T {}
+unsafe impl<T: ?Sized> ZeroableInOption for &'_ mut T {}
+
+#[cfg(feature = "extern_crate_alloc")]
+#[cfg_attr(feature = "nightly_docs", doc(cfg(feature = "extern_crate_alloc")))]
+unsafe impl<T: ?Sized> ZeroableInOption for alloc::boxed::Box<T> {}