aboutsummaryrefslogtreecommitdiff
path: root/vendor/portable-atomic/src/imp/float.rs
diff options
context:
space:
mode:
authorValentin Popov <valentin@popov.link>2024-07-19 15:37:58 +0300
committerValentin Popov <valentin@popov.link>2024-07-19 15:37:58 +0300
commita990de90fe41456a23e58bd087d2f107d321f3a1 (patch)
tree15afc392522a9e85dc3332235e311b7d39352ea9 /vendor/portable-atomic/src/imp/float.rs
parent3d48cd3f81164bbfc1a755dc1d4a9a02f98c8ddd (diff)
downloadfparkan-a990de90fe41456a23e58bd087d2f107d321f3a1.tar.xz
fparkan-a990de90fe41456a23e58bd087d2f107d321f3a1.zip
Deleted vendor folder
Diffstat (limited to 'vendor/portable-atomic/src/imp/float.rs')
-rw-r--r--vendor/portable-atomic/src/imp/float.rs215
1 files changed, 0 insertions, 215 deletions
diff --git a/vendor/portable-atomic/src/imp/float.rs b/vendor/portable-atomic/src/imp/float.rs
deleted file mode 100644
index 965f983..0000000
--- a/vendor/portable-atomic/src/imp/float.rs
+++ /dev/null
@@ -1,215 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-// AtomicF{32,64} implementation based on AtomicU{32,64}.
-//
-// This module provides atomic float implementations using atomic integer.
-//
-// Note that most of `fetch_*` operations of atomic floats are implemented using
-// CAS loops, which can be slower than equivalent operations of atomic integers.
-//
-// GPU targets have atomic instructions for float, so GPU targets will use
-// architecture-specific implementations instead of this implementation in the
-// future: https://github.com/taiki-e/portable-atomic/issues/34
-
-#![cfg_attr(
- all(target_pointer_width = "16", not(feature = "fallback")),
- allow(unused_imports, unused_macros)
-)]
-
-use core::{cell::UnsafeCell, sync::atomic::Ordering};
-
-macro_rules! atomic_float {
- (
- $atomic_type:ident, $float_type:ident, $atomic_int_type:ident, $int_type:ident,
- $align:literal
- ) => {
- #[repr(C, align($align))]
- pub(crate) struct $atomic_type {
- v: UnsafeCell<$float_type>,
- }
-
- // Send is implicitly implemented.
- // SAFETY: any data races are prevented by atomic operations.
- unsafe impl Sync for $atomic_type {}
-
- impl $atomic_type {
- #[inline]
- pub(crate) const fn new(v: $float_type) -> Self {
- Self { v: UnsafeCell::new(v) }
- }
-
- #[inline]
- pub(crate) fn is_lock_free() -> bool {
- crate::$atomic_int_type::is_lock_free()
- }
- #[inline]
- pub(crate) const fn is_always_lock_free() -> bool {
- crate::$atomic_int_type::is_always_lock_free()
- }
-
- #[inline]
- pub(crate) fn get_mut(&mut self) -> &mut $float_type {
- // SAFETY: the mutable reference guarantees unique ownership.
- // (UnsafeCell::get_mut requires Rust 1.50)
- unsafe { &mut *self.v.get() }
- }
-
- #[inline]
- pub(crate) fn into_inner(self) -> $float_type {
- self.v.into_inner()
- }
-
- #[inline]
- #[cfg_attr(
- any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
- track_caller
- )]
- pub(crate) fn load(&self, order: Ordering) -> $float_type {
- $float_type::from_bits(self.as_bits().load(order))
- }
-
- #[inline]
- #[cfg_attr(
- any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
- track_caller
- )]
- pub(crate) fn store(&self, val: $float_type, order: Ordering) {
- self.as_bits().store(val.to_bits(), order)
- }
-
- const_fn! {
- const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))];
- #[inline]
- pub(crate) const fn as_bits(&self) -> &crate::$atomic_int_type {
- // SAFETY: $atomic_type and $atomic_int_type have the same layout,
- // and there is no concurrent access to the value that does not go through this method.
- unsafe { &*(self as *const Self as *const crate::$atomic_int_type) }
- }
- }
-
- #[inline]
- pub(crate) const fn as_ptr(&self) -> *mut $float_type {
- self.v.get()
- }
- }
-
- cfg_has_atomic_cas! {
- impl $atomic_type {
- #[inline]
- #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub(crate) fn swap(&self, val: $float_type, order: Ordering) -> $float_type {
- $float_type::from_bits(self.as_bits().swap(val.to_bits(), order))
- }
-
- #[inline]
- #[cfg_attr(
- any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
- track_caller
- )]
- pub(crate) fn compare_exchange(
- &self,
- current: $float_type,
- new: $float_type,
- success: Ordering,
- failure: Ordering,
- ) -> Result<$float_type, $float_type> {
- match self.as_bits().compare_exchange(
- current.to_bits(),
- new.to_bits(),
- success,
- failure,
- ) {
- Ok(v) => Ok($float_type::from_bits(v)),
- Err(v) => Err($float_type::from_bits(v)),
- }
- }
-
- #[inline]
- #[cfg_attr(
- any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
- track_caller
- )]
- pub(crate) fn compare_exchange_weak(
- &self,
- current: $float_type,
- new: $float_type,
- success: Ordering,
- failure: Ordering,
- ) -> Result<$float_type, $float_type> {
- match self.as_bits().compare_exchange_weak(
- current.to_bits(),
- new.to_bits(),
- success,
- failure,
- ) {
- Ok(v) => Ok($float_type::from_bits(v)),
- Err(v) => Err($float_type::from_bits(v)),
- }
- }
-
- #[inline]
- #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub(crate) fn fetch_add(&self, val: $float_type, order: Ordering) -> $float_type {
- self.fetch_update_(order, |x| x + val)
- }
-
- #[inline]
- #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub(crate) fn fetch_sub(&self, val: $float_type, order: Ordering) -> $float_type {
- self.fetch_update_(order, |x| x - val)
- }
-
- #[inline]
- #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- fn fetch_update_<F>(&self, order: Ordering, mut f: F) -> $float_type
- where
- F: FnMut($float_type) -> $float_type,
- {
- // This is a private function and all instances of `f` only operate on the value
- // loaded, so there is no need to synchronize the first load/failed CAS.
- let mut prev = self.load(Ordering::Relaxed);
- loop {
- let next = f(prev);
- match self.compare_exchange_weak(prev, next, order, Ordering::Relaxed) {
- Ok(x) => return x,
- Err(next_prev) => prev = next_prev,
- }
- }
- }
-
- #[inline]
- #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub(crate) fn fetch_max(&self, val: $float_type, order: Ordering) -> $float_type {
- self.fetch_update_(order, |x| x.max(val))
- }
-
- #[inline]
- #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub(crate) fn fetch_min(&self, val: $float_type, order: Ordering) -> $float_type {
- self.fetch_update_(order, |x| x.min(val))
- }
-
- #[inline]
- #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub(crate) fn fetch_neg(&self, order: Ordering) -> $float_type {
- const NEG_MASK: $int_type = !0 / 2 + 1;
- $float_type::from_bits(self.as_bits().fetch_xor(NEG_MASK, order))
- }
-
- #[inline]
- #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
- pub(crate) fn fetch_abs(&self, order: Ordering) -> $float_type {
- const ABS_MASK: $int_type = !0 / 2;
- $float_type::from_bits(self.as_bits().fetch_and(ABS_MASK, order))
- }
- }
- } // cfg_has_atomic_cas!
- };
-}
-
-cfg_has_atomic_32! {
- atomic_float!(AtomicF32, f32, AtomicU32, u32, 4);
-}
-cfg_has_atomic_64! {
- atomic_float!(AtomicF64, f64, AtomicU64, u64, 8);
-}