diff options
author | Valentin Popov <valentin@popov.link> | 2024-01-08 00:21:28 +0300 |
---|---|---|
committer | Valentin Popov <valentin@popov.link> | 2024-01-08 00:21:28 +0300 |
commit | 1b6a04ca5504955c571d1c97504fb45ea0befee4 (patch) | |
tree | 7579f518b23313e8a9748a88ab6173d5e030b227 /vendor/portable-atomic/src/imp/float.rs | |
parent | 5ecd8cf2cba827454317368b68571df0d13d7842 (diff) | |
download | fparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.tar.xz fparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.zip |
Initial vendor packages
Signed-off-by: Valentin Popov <valentin@popov.link>
Diffstat (limited to 'vendor/portable-atomic/src/imp/float.rs')
-rw-r--r-- | vendor/portable-atomic/src/imp/float.rs | 215 |
1 files changed, 215 insertions, 0 deletions
diff --git a/vendor/portable-atomic/src/imp/float.rs b/vendor/portable-atomic/src/imp/float.rs new file mode 100644 index 0000000..965f983 --- /dev/null +++ b/vendor/portable-atomic/src/imp/float.rs @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// AtomicF{32,64} implementation based on AtomicU{32,64}. +// +// This module provides atomic float implementations using atomic integer. +// +// Note that most of `fetch_*` operations of atomic floats are implemented using +// CAS loops, which can be slower than equivalent operations of atomic integers. +// +// GPU targets have atomic instructions for float, so GPU targets will use +// architecture-specific implementations instead of this implementation in the +// future: https://github.com/taiki-e/portable-atomic/issues/34 + +#![cfg_attr( + all(target_pointer_width = "16", not(feature = "fallback")), + allow(unused_imports, unused_macros) +)] + +use core::{cell::UnsafeCell, sync::atomic::Ordering}; + +macro_rules! atomic_float { + ( + $atomic_type:ident, $float_type:ident, $atomic_int_type:ident, $int_type:ident, + $align:literal + ) => { + #[repr(C, align($align))] + pub(crate) struct $atomic_type { + v: UnsafeCell<$float_type>, + } + + // Send is implicitly implemented. + // SAFETY: any data races are prevented by atomic operations. + unsafe impl Sync for $atomic_type {} + + impl $atomic_type { + #[inline] + pub(crate) const fn new(v: $float_type) -> Self { + Self { v: UnsafeCell::new(v) } + } + + #[inline] + pub(crate) fn is_lock_free() -> bool { + crate::$atomic_int_type::is_lock_free() + } + #[inline] + pub(crate) const fn is_always_lock_free() -> bool { + crate::$atomic_int_type::is_always_lock_free() + } + + #[inline] + pub(crate) fn get_mut(&mut self) -> &mut $float_type { + // SAFETY: the mutable reference guarantees unique ownership. + // (UnsafeCell::get_mut requires Rust 1.50) + unsafe { &mut *self.v.get() } + } + + #[inline] + pub(crate) fn into_inner(self) -> $float_type { + self.v.into_inner() + } + + #[inline] + #[cfg_attr( + any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), + track_caller + )] + pub(crate) fn load(&self, order: Ordering) -> $float_type { + $float_type::from_bits(self.as_bits().load(order)) + } + + #[inline] + #[cfg_attr( + any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), + track_caller + )] + pub(crate) fn store(&self, val: $float_type, order: Ordering) { + self.as_bits().store(val.to_bits(), order) + } + + const_fn! { + const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))]; + #[inline] + pub(crate) const fn as_bits(&self) -> &crate::$atomic_int_type { + // SAFETY: $atomic_type and $atomic_int_type have the same layout, + // and there is no concurrent access to the value that does not go through this method. + unsafe { &*(self as *const Self as *const crate::$atomic_int_type) } + } + } + + #[inline] + pub(crate) const fn as_ptr(&self) -> *mut $float_type { + self.v.get() + } + } + + cfg_has_atomic_cas! { + impl $atomic_type { + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub(crate) fn swap(&self, val: $float_type, order: Ordering) -> $float_type { + $float_type::from_bits(self.as_bits().swap(val.to_bits(), order)) + } + + #[inline] + #[cfg_attr( + any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), + track_caller + )] + pub(crate) fn compare_exchange( + &self, + current: $float_type, + new: $float_type, + success: Ordering, + failure: Ordering, + ) -> Result<$float_type, $float_type> { + match self.as_bits().compare_exchange( + current.to_bits(), + new.to_bits(), + success, + failure, + ) { + Ok(v) => Ok($float_type::from_bits(v)), + Err(v) => Err($float_type::from_bits(v)), + } + } + + #[inline] + #[cfg_attr( + any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri), + track_caller + )] + pub(crate) fn compare_exchange_weak( + &self, + current: $float_type, + new: $float_type, + success: Ordering, + failure: Ordering, + ) -> Result<$float_type, $float_type> { + match self.as_bits().compare_exchange_weak( + current.to_bits(), + new.to_bits(), + success, + failure, + ) { + Ok(v) => Ok($float_type::from_bits(v)), + Err(v) => Err($float_type::from_bits(v)), + } + } + + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub(crate) fn fetch_add(&self, val: $float_type, order: Ordering) -> $float_type { + self.fetch_update_(order, |x| x + val) + } + + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub(crate) fn fetch_sub(&self, val: $float_type, order: Ordering) -> $float_type { + self.fetch_update_(order, |x| x - val) + } + + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + fn fetch_update_<F>(&self, order: Ordering, mut f: F) -> $float_type + where + F: FnMut($float_type) -> $float_type, + { + // This is a private function and all instances of `f` only operate on the value + // loaded, so there is no need to synchronize the first load/failed CAS. + let mut prev = self.load(Ordering::Relaxed); + loop { + let next = f(prev); + match self.compare_exchange_weak(prev, next, order, Ordering::Relaxed) { + Ok(x) => return x, + Err(next_prev) => prev = next_prev, + } + } + } + + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub(crate) fn fetch_max(&self, val: $float_type, order: Ordering) -> $float_type { + self.fetch_update_(order, |x| x.max(val)) + } + + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub(crate) fn fetch_min(&self, val: $float_type, order: Ordering) -> $float_type { + self.fetch_update_(order, |x| x.min(val)) + } + + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub(crate) fn fetch_neg(&self, order: Ordering) -> $float_type { + const NEG_MASK: $int_type = !0 / 2 + 1; + $float_type::from_bits(self.as_bits().fetch_xor(NEG_MASK, order)) + } + + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub(crate) fn fetch_abs(&self, order: Ordering) -> $float_type { + const ABS_MASK: $int_type = !0 / 2; + $float_type::from_bits(self.as_bits().fetch_and(ABS_MASK, order)) + } + } + } // cfg_has_atomic_cas! + }; +} + +cfg_has_atomic_32! { + atomic_float!(AtomicF32, f32, AtomicU32, u32, 4); +} +cfg_has_atomic_64! { + atomic_float!(AtomicF64, f64, AtomicU64, u64, 8); +} |