aboutsummaryrefslogtreecommitdiff
path: root/vendor/portable-atomic/src/imp/interrupt
diff options
context:
space:
mode:
authorValentin Popov <valentin@popov.link>2024-07-19 15:37:58 +0300
committerValentin Popov <valentin@popov.link>2024-07-19 15:37:58 +0300
commita990de90fe41456a23e58bd087d2f107d321f3a1 (patch)
tree15afc392522a9e85dc3332235e311b7d39352ea9 /vendor/portable-atomic/src/imp/interrupt
parent3d48cd3f81164bbfc1a755dc1d4a9a02f98c8ddd (diff)
downloadfparkan-a990de90fe41456a23e58bd087d2f107d321f3a1.tar.xz
fparkan-a990de90fe41456a23e58bd087d2f107d321f3a1.zip
Deleted vendor folder
Diffstat (limited to 'vendor/portable-atomic/src/imp/interrupt')
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/README.md27
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/armv4t.rs158
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/armv6m.rs48
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/avr.rs57
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/mod.rs903
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/msp430.rs64
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/riscv.rs81
-rw-r--r--vendor/portable-atomic/src/imp/interrupt/xtensa.rs47
8 files changed, 0 insertions, 1385 deletions
diff --git a/vendor/portable-atomic/src/imp/interrupt/README.md b/vendor/portable-atomic/src/imp/interrupt/README.md
deleted file mode 100644
index edc5fbf..0000000
--- a/vendor/portable-atomic/src/imp/interrupt/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Implementation of disabling interrupts
-
-This module is used to provide atomic CAS for targets where atomic CAS is not available in the standard library.
-
-- On MSP430 and AVR, they are always single-core, so this module is always used.
-- On ARMv6-M (thumbv6m), pre-v6 ARM (e.g., thumbv4t, thumbv5te), RISC-V without A-extension, and Xtensa, they could be multi-core, so this module is used when the `unsafe-assume-single-core` feature is enabled.
-
-The implementation uses privileged instructions to disable interrupts, so it usually doesn't work on unprivileged mode.
-Enabling this feature in an environment where privileged instructions are not available, or if the instructions used are not sufficient to disable interrupts in the system, it is also usually considered **unsound**, although the details are system-dependent.
-
-Consider using the [`critical-section` feature](../../../README.md#optional-features-critical-section) for systems that cannot use the `unsafe-assume-single-core` feature.
-
-For some targets, the implementation can be changed by explicitly enabling features.
-
-- On ARMv6-M, this disables interrupts by modifying the PRIMASK register.
-- On pre-v6 ARM, this disables interrupts by modifying the I (IRQ mask) bit of the CPSR.
-- On pre-v6 ARM with the `disable-fiq` feature, this disables interrupts by modifying the I (IRQ mask) bit and F (FIQ mask) bit of the CPSR.
-- On RISC-V (without A-extension), this disables interrupts by modifying the MIE (Machine Interrupt Enable) bit of the `mstatus` register.
-- On RISC-V (without A-extension) with the `s-mode` feature, this disables interrupts by modifying the SIE (Supervisor Interrupt Enable) bit of the `sstatus` register.
-- On RISC-V (without A-extension) with the `force-amo` feature, this uses AMO instructions for RMWs that have corresponding AMO instructions even if A-extension is disabled. For other RMWs, this disables interrupts as usual.
-- On MSP430, this disables interrupts by modifying the GIE (Global Interrupt Enable) bit of the status register (SR).
-- On AVR, this disables interrupts by modifying the I (Global Interrupt Enable) bit of the status register (SREG).
-- On Xtensa, this disables interrupts by modifying the PS special register.
-
-Some operations don't require disabling interrupts (loads and stores on targets except for AVR, but additionally on MSP430 {8,16}-bit `add,sub,and,or,xor,not`, on RISC-V with the `force-amo` feature 32-bit(RV32)/{32,64}-bit(RV64) `swap,fetch_{add,sub,and,or,xor,not,max,min},add,sub,and,or,xor,not` and {8,16}-bit `fetch_{and,or,xor,not},and,or,xor,not`). However, when the `critical-section` feature is enabled, critical sections are taken for all atomic operations.
-
-Feel free to submit an issue if your target is not supported yet.
diff --git a/vendor/portable-atomic/src/imp/interrupt/armv4t.rs b/vendor/portable-atomic/src/imp/interrupt/armv4t.rs
deleted file mode 100644
index 20f7089..0000000
--- a/vendor/portable-atomic/src/imp/interrupt/armv4t.rs
+++ /dev/null
@@ -1,158 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-// Refs: https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/The-System-Level-Programmers--Model/ARM-processor-modes-and-ARM-core-registers/Program-Status-Registers--PSRs-?lang=en
-//
-// Generated asm:
-// - armv5te https://godbolt.org/z/Teh7WajMs
-
-#[cfg(not(portable_atomic_no_asm))]
-use core::arch::asm;
-
-// - 0x80 - I (IRQ mask) bit (1 << 7)
-// - 0x40 - F (FIQ mask) bit (1 << 6)
-// We disable only IRQs by default. See also https://github.com/taiki-e/portable-atomic/pull/28#issuecomment-1214146912.
-#[cfg(not(portable_atomic_disable_fiq))]
-macro_rules! mask {
- () => {
- "0x80"
- };
-}
-#[cfg(portable_atomic_disable_fiq)]
-macro_rules! mask {
- () => {
- "0xC0" // 0x80 | 0x40
- };
-}
-
-pub(super) type State = u32;
-
-/// Disables interrupts and returns the previous interrupt state.
-#[inline]
-#[instruction_set(arm::a32)]
-pub(super) fn disable() -> State {
- let cpsr: State;
- // SAFETY: reading CPSR and disabling interrupts are safe.
- // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
- unsafe {
- asm!(
- "mrs {prev}, cpsr",
- concat!("orr {new}, {prev}, ", mask!()),
- "msr cpsr_c, {new}",
- prev = out(reg) cpsr,
- new = out(reg) _,
- // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
- options(nostack, preserves_flags),
- );
- }
- cpsr
-}
-
-/// Restores the previous interrupt state.
-///
-/// # Safety
-///
-/// The state must be the one retrieved by the previous `disable`.
-#[inline]
-#[instruction_set(arm::a32)]
-pub(super) unsafe fn restore(cpsr: State) {
- // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
- //
- // This clobbers the control field mask byte of CPSR. See msp430.rs to safety on this.
- // (preserves_flags is fine because we only clobber the I, F, T, and M bits of CPSR.)
- //
- // Refs: https://developer.arm.com/documentation/dui0473/m/arm-and-thumb-instructions/msr--general-purpose-register-to-psr-
- unsafe {
- // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
- asm!("msr cpsr_c, {0}", in(reg) cpsr, options(nostack, preserves_flags));
- }
-}
-
-// On pre-v6 ARM, we cannot use core::sync::atomic here because they call the
-// `__sync_*` builtins for non-relaxed load/store (because pre-v6 ARM doesn't
-// have Data Memory Barrier).
-//
-// Generated asm:
-// - armv5te https://godbolt.org/z/bMxK7M8Ta
-pub(crate) mod atomic {
- #[cfg(not(portable_atomic_no_asm))]
- use core::arch::asm;
- use core::{cell::UnsafeCell, sync::atomic::Ordering};
-
- macro_rules! atomic {
- ($([$($generics:tt)*])? $atomic_type:ident, $value_type:ty, $asm_suffix:tt) => {
- #[repr(transparent)]
- pub(crate) struct $atomic_type $(<$($generics)*>)? {
- v: UnsafeCell<$value_type>,
- }
-
- // Send is implicitly implemented for atomic integers, but not for atomic pointers.
- // SAFETY: any data races are prevented by atomic operations.
- unsafe impl $(<$($generics)*>)? Send for $atomic_type $(<$($generics)*>)? {}
- // SAFETY: any data races are prevented by atomic operations.
- unsafe impl $(<$($generics)*>)? Sync for $atomic_type $(<$($generics)*>)? {}
-
- impl $(<$($generics)*>)? $atomic_type $(<$($generics)*>)? {
- #[inline]
- pub(crate) fn load(&self, order: Ordering) -> $value_type {
- let src = self.v.get();
- // SAFETY: any data races are prevented by atomic intrinsics and the raw
- // pointer passed in is valid because we got it from a reference.
- unsafe {
- let out;
- match order {
- Ordering::Relaxed => {
- asm!(
- concat!("ldr", $asm_suffix, " {out}, [{src}]"),
- src = in(reg) src,
- out = lateout(reg) out,
- options(nostack, preserves_flags, readonly),
- );
- }
- Ordering::Acquire | Ordering::SeqCst => {
- // inline asm without nomem/readonly implies compiler fence.
- // And compiler fence is fine because the user explicitly declares that
- // the system is single-core by using an unsafe cfg.
- asm!(
- concat!("ldr", $asm_suffix, " {out}, [{src}]"),
- src = in(reg) src,
- out = lateout(reg) out,
- options(nostack, preserves_flags),
- );
- }
- _ => unreachable!("{:?}", order),
- }
- out
- }
- }
-
- #[inline]
- pub(crate) fn store(&self, val: $value_type, _order: Ordering) {
- let dst = self.v.get();
- // SAFETY: any data races are prevented by atomic intrinsics and the raw
- // pointer passed in is valid because we got it from a reference.
- unsafe {
- // inline asm without nomem/readonly implies compiler fence.
- // And compiler fence is fine because the user explicitly declares that
- // the system is single-core by using an unsafe cfg.
- asm!(
- concat!("str", $asm_suffix, " {val}, [{dst}]"),
- dst = in(reg) dst,
- val = in(reg) val,
- options(nostack, preserves_flags),
- );
- }
- }
- }
- };
- }
-
- atomic!(AtomicI8, i8, "b");
- atomic!(AtomicU8, u8, "b");
- atomic!(AtomicI16, i16, "h");
- atomic!(AtomicU16, u16, "h");
- atomic!(AtomicI32, i32, "");
- atomic!(AtomicU32, u32, "");
- atomic!(AtomicIsize, isize, "");
- atomic!(AtomicUsize, usize, "");
- atomic!([T] AtomicPtr, *mut T, "");
-}
diff --git a/vendor/portable-atomic/src/imp/interrupt/armv6m.rs b/vendor/portable-atomic/src/imp/interrupt/armv6m.rs
deleted file mode 100644
index 85037a3..0000000
--- a/vendor/portable-atomic/src/imp/interrupt/armv6m.rs
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-// Adapted from https://github.com/rust-embedded/cortex-m.
-//
-// Generated asm:
-// - armv6-m https://godbolt.org/z/YxME38xcM
-
-#[cfg(not(portable_atomic_no_asm))]
-use core::arch::asm;
-
-pub(super) use core::sync::atomic;
-
-pub(super) type State = u32;
-
-/// Disables interrupts and returns the previous interrupt state.
-#[inline]
-pub(super) fn disable() -> State {
- let r: State;
- // SAFETY: reading the priority mask register and disabling interrupts are safe.
- // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
- unsafe {
- // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
- asm!(
- "mrs {0}, PRIMASK",
- "cpsid i",
- out(reg) r,
- options(nostack, preserves_flags),
- );
- }
- r
-}
-
-/// Restores the previous interrupt state.
-///
-/// # Safety
-///
-/// The state must be the one retrieved by the previous `disable`.
-#[inline]
-pub(super) unsafe fn restore(r: State) {
- if r & 0x1 == 0 {
- // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
- // and we've checked that interrupts were enabled before disabling interrupts.
- unsafe {
- // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
- asm!("cpsie i", options(nostack, preserves_flags));
- }
- }
-}
diff --git a/vendor/portable-atomic/src/imp/interrupt/avr.rs b/vendor/portable-atomic/src/imp/interrupt/avr.rs
deleted file mode 100644
index 76d99c1..0000000
--- a/vendor/portable-atomic/src/imp/interrupt/avr.rs
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-// Adapted from https://github.com/Rahix/avr-device.
-//
-// Refs:
-// - AVR Instruction Set Manual https://ww1.microchip.com/downloads/en/DeviceDoc/AVR-InstructionSet-Manual-DS40002198.pdf
-
-#[cfg(not(portable_atomic_no_asm))]
-use core::arch::asm;
-
-pub(super) type State = u8;
-
-/// Disables interrupts and returns the previous interrupt state.
-#[inline]
-pub(super) fn disable() -> State {
- let sreg: State;
- // SAFETY: reading the status register (SREG) and disabling interrupts are safe.
- // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
- unsafe {
- // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
- // Do not use `preserves_flags` because CLI modifies the I bit of the status register (SREG).
- // Refs: https://ww1.microchip.com/downloads/en/DeviceDoc/AVR-InstructionSet-Manual-DS40002198.pdf#page=58
- #[cfg(not(portable_atomic_no_asm))]
- asm!(
- "in {0}, 0x3F",
- "cli",
- out(reg) sreg,
- options(nostack),
- );
- #[cfg(portable_atomic_no_asm)]
- {
- llvm_asm!("in $0, 0x3F" : "=r"(sreg) ::: "volatile");
- llvm_asm!("cli" ::: "memory" : "volatile");
- }
- }
- sreg
-}
-
-/// Restores the previous interrupt state.
-///
-/// # Safety
-///
-/// The state must be the one retrieved by the previous `disable`.
-#[inline]
-pub(super) unsafe fn restore(sreg: State) {
- // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
- unsafe {
- // This clobbers the entire status register. See msp430.rs to safety on this.
- //
- // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
- // Do not use `preserves_flags` because OUT modifies the status register (SREG).
- #[cfg(not(portable_atomic_no_asm))]
- asm!("out 0x3F, {0}", in(reg) sreg, options(nostack));
- #[cfg(portable_atomic_no_asm)]
- llvm_asm!("out 0x3F, $0" :: "r"(sreg) : "memory" : "volatile");
- }
-}
diff --git a/vendor/portable-atomic/src/imp/interrupt/mod.rs b/vendor/portable-atomic/src/imp/interrupt/mod.rs
deleted file mode 100644
index e0ed0f6..0000000
--- a/vendor/portable-atomic/src/imp/interrupt/mod.rs
+++ /dev/null
@@ -1,903 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-// Critical section based fallback implementations
-//
-// This module supports two different critical section implementations:
-// - Built-in "disable all interrupts".
-// - Call into the `critical-section` crate (which allows the user to plug any implementation).
-//
-// The `critical-section`-based fallback is enabled when the user asks for it with the `critical-section`
-// Cargo feature.
-//
-// The "disable interrupts" fallback is not sound on multi-core systems.
-// Also, this uses privileged instructions to disable interrupts, so it usually
-// doesn't work on unprivileged mode. Using this fallback in an environment where privileged
-// instructions are not available is also usually considered **unsound**,
-// although the details are system-dependent.
-//
-// Therefore, this implementation will only be enabled in one of the following cases:
-//
-// - When the user explicitly declares that the system is single-core and that
-// privileged instructions are available using an unsafe cfg.
-// - When we can safely assume that the system is single-core and that
-// privileged instructions are available on the system.
-//
-// AVR, which is single core[^avr1] and LLVM also generates code that disables
-// interrupts [^avr2] in atomic ops by default, is considered the latter.
-// MSP430 as well.
-//
-// See also README.md of this directory.
-//
-// [^avr1]: https://github.com/llvm/llvm-project/blob/llvmorg-17.0.0-rc2/llvm/lib/Target/AVR/AVRExpandPseudoInsts.cpp#L1074
-// [^avr2]: https://github.com/llvm/llvm-project/blob/llvmorg-17.0.0-rc2/llvm/test/CodeGen/AVR/atomics/load16.ll#L5
-
-// On some platforms, atomic load/store can be implemented in a more efficient
-// way than disabling interrupts. On MSP430, some RMWs that do not return the
-// previous value can also be optimized.
-//
-// Note: On single-core systems, it is okay to use critical session-based
-// CAS together with atomic load/store. The load/store will not be
-// called while interrupts are disabled, and since the load/store is
-// atomic, it is not affected by interrupts even if interrupts are enabled.
-#[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
-use arch::atomic;
-
-#[cfg(not(feature = "critical-section"))]
-#[cfg_attr(
- all(
- target_arch = "arm",
- any(target_feature = "mclass", portable_atomic_target_feature = "mclass"),
- ),
- path = "armv6m.rs"
-)]
-#[cfg_attr(
- all(
- target_arch = "arm",
- not(any(target_feature = "mclass", portable_atomic_target_feature = "mclass")),
- ),
- path = "armv4t.rs"
-)]
-#[cfg_attr(target_arch = "avr", path = "avr.rs")]
-#[cfg_attr(target_arch = "msp430", path = "msp430.rs")]
-#[cfg_attr(any(target_arch = "riscv32", target_arch = "riscv64"), path = "riscv.rs")]
-#[cfg_attr(target_arch = "xtensa", path = "xtensa.rs")]
-mod arch;
-
-use core::{cell::UnsafeCell, sync::atomic::Ordering};
-
-// Critical section implementations might use locks internally.
-#[cfg(feature = "critical-section")]
-const IS_ALWAYS_LOCK_FREE: bool = false;
-
-// Consider atomic operations based on disabling interrupts on single-core
-// systems are lock-free. (We consider the pre-v6 ARM Linux's atomic operations
-// provided in a similar way by the Linux kernel to be lock-free.)
-#[cfg(not(feature = "critical-section"))]
-const IS_ALWAYS_LOCK_FREE: bool = true;
-
-#[cfg(feature = "critical-section")]
-#[inline]
-fn with<F, R>(f: F) -> R
-where
- F: FnOnce() -> R,
-{
- critical_section::with(|_| f())
-}
-
-#[cfg(not(feature = "critical-section"))]
-#[inline]
-fn with<F, R>(f: F) -> R
-where
- F: FnOnce() -> R,
-{
- // Get current interrupt state and disable interrupts
- let state = arch::disable();
-
- let r = f();
-
- // Restore interrupt state
- // SAFETY: the state was retrieved by the previous `disable`.
- unsafe { arch::restore(state) }
-
- r
-}
-
-#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
-#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
-#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
-#[cfg_attr(target_pointer_width = "128", repr(C, align(16)))]
-pub(crate) struct AtomicPtr<T> {
- p: UnsafeCell<*mut T>,
-}
-
-// SAFETY: any data races are prevented by disabling interrupts or
-// atomic intrinsics (see module-level comments).
-unsafe impl<T> Send for AtomicPtr<T> {}
-// SAFETY: any data races are prevented by disabling interrupts or
-// atomic intrinsics (see module-level comments).
-unsafe impl<T> Sync for AtomicPtr<T> {}
-
-impl<T> AtomicPtr<T> {
- #[inline]
- pub(crate) const fn new(p: *mut T) -> Self {
- Self { p: UnsafeCell::new(p) }
- }
-
- #[inline]
- pub(crate) fn is_lock_free() -> bool {
- Self::is_always_lock_free()
- }
- #[inline]
- pub(crate) const fn is_always_lock_free() -> bool {
- IS_ALWAYS_LOCK_FREE
- }
-
- #[inline]
- pub(crate) fn get_mut(&mut self) -> &mut *mut T {
- // SAFETY: the mutable reference guarantees unique ownership.
- // (UnsafeCell::get_mut requires Rust 1.50)
- unsafe { &mut *self.p.get() }
- }
-
- #[inline]
- pub(crate) fn into_inner(self) -> *mut T {
- self.p.into_inner()
- }
-
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn load(&self, order: Ordering) -> *mut T {
- crate::utils::assert_load_ordering(order);
- #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
- {
- self.as_native().load(order)
- }
- #[cfg(any(target_arch = "avr", feature = "critical-section"))]
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe { self.p.get().read() })
- }
-
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn store(&self, ptr: *mut T, order: Ordering) {
- crate::utils::assert_store_ordering(order);
- #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
- {
- self.as_native().store(ptr, order);
- }
- #[cfg(any(target_arch = "avr", feature = "critical-section"))]
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe { self.p.get().write(ptr) });
- }
-
- #[inline]
- pub(crate) fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
- let _ = order;
- #[cfg(portable_atomic_force_amo)]
- {
- self.as_native().swap(ptr, order)
- }
- #[cfg(not(portable_atomic_force_amo))]
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.p.get().read();
- self.p.get().write(ptr);
- prev
- })
- }
-
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn compare_exchange(
- &self,
- current: *mut T,
- new: *mut T,
- success: Ordering,
- failure: Ordering,
- ) -> Result<*mut T, *mut T> {
- crate::utils::assert_compare_exchange_ordering(success, failure);
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.p.get().read();
- if prev == current {
- self.p.get().write(new);
- Ok(prev)
- } else {
- Err(prev)
- }
- })
- }
-
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn compare_exchange_weak(
- &self,
- current: *mut T,
- new: *mut T,
- success: Ordering,
- failure: Ordering,
- ) -> Result<*mut T, *mut T> {
- self.compare_exchange(current, new, success, failure)
- }
-
- #[inline]
- pub(crate) const fn as_ptr(&self) -> *mut *mut T {
- self.p.get()
- }
-
- #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
- #[inline]
- fn as_native(&self) -> &atomic::AtomicPtr<T> {
- // SAFETY: AtomicPtr and atomic::AtomicPtr have the same layout and
- // guarantee atomicity in a compatible way. (see module-level comments)
- unsafe { &*(self as *const Self as *const atomic::AtomicPtr<T>) }
- }
-}
-
-macro_rules! atomic_int {
- (base, $atomic_type:ident, $int_type:ident, $align:literal) => {
- #[repr(C, align($align))]
- pub(crate) struct $atomic_type {
- v: UnsafeCell<$int_type>,
- }
-
- // Send is implicitly implemented.
- // SAFETY: any data races are prevented by disabling interrupts or
- // atomic intrinsics (see module-level comments).
- unsafe impl Sync for $atomic_type {}
-
- impl $atomic_type {
- #[inline]
- pub(crate) const fn new(v: $int_type) -> Self {
- Self { v: UnsafeCell::new(v) }
- }
-
- #[inline]
- pub(crate) fn is_lock_free() -> bool {
- Self::is_always_lock_free()
- }
- #[inline]
- pub(crate) const fn is_always_lock_free() -> bool {
- IS_ALWAYS_LOCK_FREE
- }
-
- #[inline]
- pub(crate) fn get_mut(&mut self) -> &mut $int_type {
- // SAFETY: the mutable reference guarantees unique ownership.
- // (UnsafeCell::get_mut requires Rust 1.50)
- unsafe { &mut *self.v.get() }
- }
-
- #[inline]
- pub(crate) fn into_inner(self) -> $int_type {
- self.v.into_inner()
- }
-
- #[inline]
- pub(crate) const fn as_ptr(&self) -> *mut $int_type {
- self.v.get()
- }
- }
- };
- (load_store_atomic $([$kind:ident])?, $atomic_type:ident, $int_type:ident, $align:literal) => {
- atomic_int!(base, $atomic_type, $int_type, $align);
- #[cfg(not(portable_atomic_force_amo))]
- atomic_int!(cas[emulate], $atomic_type, $int_type);
- #[cfg(portable_atomic_force_amo)]
- atomic_int!(cas $([$kind])?, $atomic_type, $int_type);
- impl $atomic_type {
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn load(&self, order: Ordering) -> $int_type {
- crate::utils::assert_load_ordering(order);
- #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
- {
- self.as_native().load(order)
- }
- #[cfg(any(target_arch = "avr", feature = "critical-section"))]
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe { self.v.get().read() })
- }
-
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn store(&self, val: $int_type, order: Ordering) {
- crate::utils::assert_store_ordering(order);
- #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
- {
- self.as_native().store(val, order);
- }
- #[cfg(any(target_arch = "avr", feature = "critical-section"))]
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe { self.v.get().write(val) });
- }
-
- #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
- #[inline]
- fn as_native(&self) -> &atomic::$atomic_type {
- // SAFETY: $atomic_type and atomic::$atomic_type have the same layout and
- // guarantee atomicity in a compatible way. (see module-level comments)
- unsafe { &*(self as *const Self as *const atomic::$atomic_type) }
- }
- }
-
- #[cfg(not(all(target_arch = "msp430", not(feature = "critical-section"))))]
- impl_default_no_fetch_ops!($atomic_type, $int_type);
- impl_default_bit_opts!($atomic_type, $int_type);
- #[cfg(not(all(target_arch = "msp430", not(feature = "critical-section"))))]
- impl $atomic_type {
- #[inline]
- pub(crate) fn not(&self, order: Ordering) {
- self.fetch_not(order);
- }
- }
- #[cfg(all(target_arch = "msp430", not(feature = "critical-section")))]
- impl $atomic_type {
- #[inline]
- pub(crate) fn add(&self, val: $int_type, order: Ordering) {
- self.as_native().add(val, order);
- }
- #[inline]
- pub(crate) fn sub(&self, val: $int_type, order: Ordering) {
- self.as_native().sub(val, order);
- }
- #[inline]
- pub(crate) fn and(&self, val: $int_type, order: Ordering) {
- self.as_native().and(val, order);
- }
- #[inline]
- pub(crate) fn or(&self, val: $int_type, order: Ordering) {
- self.as_native().or(val, order);
- }
- #[inline]
- pub(crate) fn xor(&self, val: $int_type, order: Ordering) {
- self.as_native().xor(val, order);
- }
- #[inline]
- pub(crate) fn not(&self, order: Ordering) {
- self.as_native().not(order);
- }
- }
- };
- (load_store_critical_session, $atomic_type:ident, $int_type:ident, $align:literal) => {
- atomic_int!(base, $atomic_type, $int_type, $align);
- atomic_int!(cas[emulate], $atomic_type, $int_type);
- impl_default_no_fetch_ops!($atomic_type, $int_type);
- impl_default_bit_opts!($atomic_type, $int_type);
- impl $atomic_type {
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn load(&self, order: Ordering) -> $int_type {
- crate::utils::assert_load_ordering(order);
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe { self.v.get().read() })
- }
-
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn store(&self, val: $int_type, order: Ordering) {
- crate::utils::assert_store_ordering(order);
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe { self.v.get().write(val) });
- }
-
- #[inline]
- pub(crate) fn not(&self, order: Ordering) {
- self.fetch_not(order);
- }
- }
- };
- (cas[emulate], $atomic_type:ident, $int_type:ident) => {
- impl $atomic_type {
- #[inline]
- pub(crate) fn swap(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(val);
- prev
- })
- }
-
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn compare_exchange(
- &self,
- current: $int_type,
- new: $int_type,
- success: Ordering,
- failure: Ordering,
- ) -> Result<$int_type, $int_type> {
- crate::utils::assert_compare_exchange_ordering(success, failure);
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- if prev == current {
- self.v.get().write(new);
- Ok(prev)
- } else {
- Err(prev)
- }
- })
- }
-
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn compare_exchange_weak(
- &self,
- current: $int_type,
- new: $int_type,
- success: Ordering,
- failure: Ordering,
- ) -> Result<$int_type, $int_type> {
- self.compare_exchange(current, new, success, failure)
- }
-
- #[inline]
- pub(crate) fn fetch_add(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(prev.wrapping_add(val));
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_sub(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(prev.wrapping_sub(val));
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_and(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(prev & val);
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_nand(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(!(prev & val));
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_or(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(prev | val);
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_xor(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(prev ^ val);
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_max(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(core::cmp::max(prev, val));
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_min(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(core::cmp::min(prev, val));
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_not(&self, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(!prev);
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(prev.wrapping_neg());
- prev
- })
- }
- #[inline]
- pub(crate) fn neg(&self, order: Ordering) {
- self.fetch_neg(order);
- }
- }
- };
- // cfg(portable_atomic_force_amo) 32-bit(RV32)/{32,64}-bit(RV64) RMW
- (cas, $atomic_type:ident, $int_type:ident) => {
- impl $atomic_type {
- #[inline]
- pub(crate) fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
- self.as_native().swap(val, order)
- }
-
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn compare_exchange(
- &self,
- current: $int_type,
- new: $int_type,
- success: Ordering,
- failure: Ordering,
- ) -> Result<$int_type, $int_type> {
- crate::utils::assert_compare_exchange_ordering(success, failure);
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- if prev == current {
- self.v.get().write(new);
- Ok(prev)
- } else {
- Err(prev)
- }
- })
- }
-
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn compare_exchange_weak(
- &self,
- current: $int_type,
- new: $int_type,
- success: Ordering,
- failure: Ordering,
- ) -> Result<$int_type, $int_type> {
- self.compare_exchange(current, new, success, failure)
- }
-
- #[inline]
- pub(crate) fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
- self.as_native().fetch_add(val, order)
- }
- #[inline]
- pub(crate) fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
- self.as_native().fetch_sub(val, order)
- }
- #[inline]
- pub(crate) fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
- self.as_native().fetch_and(val, order)
- }
-
- #[inline]
- pub(crate) fn fetch_nand(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(!(prev & val));
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
- self.as_native().fetch_or(val, order)
- }
- #[inline]
- pub(crate) fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
- self.as_native().fetch_xor(val, order)
- }
- #[inline]
- pub(crate) fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
- self.as_native().fetch_max(val, order)
- }
- #[inline]
- pub(crate) fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
- self.as_native().fetch_min(val, order)
- }
- #[inline]
- pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
- self.as_native().fetch_not(order)
- }
-
- #[inline]
- pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(prev.wrapping_neg());
- prev
- })
- }
- #[inline]
- pub(crate) fn neg(&self, order: Ordering) {
- self.fetch_neg(order);
- }
- }
- };
- // cfg(portable_atomic_force_amo) {8,16}-bit RMW
- (cas[sub_word], $atomic_type:ident, $int_type:ident) => {
- impl $atomic_type {
- #[inline]
- pub(crate) fn swap(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(val);
- prev
- })
- }
-
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn compare_exchange(
- &self,
- current: $int_type,
- new: $int_type,
- success: Ordering,
- failure: Ordering,
- ) -> Result<$int_type, $int_type> {
- crate::utils::assert_compare_exchange_ordering(success, failure);
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- if prev == current {
- self.v.get().write(new);
- Ok(prev)
- } else {
- Err(prev)
- }
- })
- }
-
- #[inline]
- #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
- pub(crate) fn compare_exchange_weak(
- &self,
- current: $int_type,
- new: $int_type,
- success: Ordering,
- failure: Ordering,
- ) -> Result<$int_type, $int_type> {
- self.compare_exchange(current, new, success, failure)
- }
-
- #[inline]
- pub(crate) fn fetch_add(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(prev.wrapping_add(val));
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_sub(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(prev.wrapping_sub(val));
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
- self.as_native().fetch_and(val, order)
- }
-
- #[inline]
- pub(crate) fn fetch_nand(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(!(prev & val));
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
- self.as_native().fetch_or(val, order)
- }
- #[inline]
- pub(crate) fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
- self.as_native().fetch_xor(val, order)
- }
-
- #[inline]
- pub(crate) fn fetch_max(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(core::cmp::max(prev, val));
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_min(&self, val: $int_type, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(core::cmp::min(prev, val));
- prev
- })
- }
-
- #[inline]
- pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
- self.as_native().fetch_not(order)
- }
-
- #[inline]
- pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type {
- // SAFETY: any data races are prevented by disabling interrupts (see
- // module-level comments) and the raw pointer is valid because we got it
- // from a reference.
- with(|| unsafe {
- let prev = self.v.get().read();
- self.v.get().write(prev.wrapping_neg());
- prev
- })
- }
- #[inline]
- pub(crate) fn neg(&self, order: Ordering) {
- self.fetch_neg(order);
- }
- }
- };
-}
-
-#[cfg(target_pointer_width = "16")]
-atomic_int!(load_store_atomic, AtomicIsize, isize, 2);
-#[cfg(target_pointer_width = "16")]
-atomic_int!(load_store_atomic, AtomicUsize, usize, 2);
-#[cfg(target_pointer_width = "32")]
-atomic_int!(load_store_atomic, AtomicIsize, isize, 4);
-#[cfg(target_pointer_width = "32")]
-atomic_int!(load_store_atomic, AtomicUsize, usize, 4);
-#[cfg(target_pointer_width = "64")]
-atomic_int!(load_store_atomic, AtomicIsize, isize, 8);
-#[cfg(target_pointer_width = "64")]
-atomic_int!(load_store_atomic, AtomicUsize, usize, 8);
-#[cfg(target_pointer_width = "128")]
-atomic_int!(load_store_atomic, AtomicIsize, isize, 16);
-#[cfg(target_pointer_width = "128")]
-atomic_int!(load_store_atomic, AtomicUsize, usize, 16);
-
-atomic_int!(load_store_atomic[sub_word], AtomicI8, i8, 1);
-atomic_int!(load_store_atomic[sub_word], AtomicU8, u8, 1);
-atomic_int!(load_store_atomic[sub_word], AtomicI16, i16, 2);
-atomic_int!(load_store_atomic[sub_word], AtomicU16, u16, 2);
-
-#[cfg(not(target_pointer_width = "16"))]
-atomic_int!(load_store_atomic, AtomicI32, i32, 4);
-#[cfg(not(target_pointer_width = "16"))]
-atomic_int!(load_store_atomic, AtomicU32, u32, 4);
-#[cfg(target_pointer_width = "16")]
-#[cfg(any(test, feature = "fallback"))]
-atomic_int!(load_store_critical_session, AtomicI32, i32, 4);
-#[cfg(target_pointer_width = "16")]
-#[cfg(any(test, feature = "fallback"))]
-atomic_int!(load_store_critical_session, AtomicU32, u32, 4);
-
-#[cfg(not(any(target_pointer_width = "16", target_pointer_width = "32")))]
-atomic_int!(load_store_atomic, AtomicI64, i64, 8);
-#[cfg(not(any(target_pointer_width = "16", target_pointer_width = "32")))]
-atomic_int!(load_store_atomic, AtomicU64, u64, 8);
-#[cfg(any(target_pointer_width = "16", target_pointer_width = "32"))]
-#[cfg(any(test, feature = "fallback"))]
-atomic_int!(load_store_critical_session, AtomicI64, i64, 8);
-#[cfg(any(target_pointer_width = "16", target_pointer_width = "32"))]
-#[cfg(any(test, feature = "fallback"))]
-atomic_int!(load_store_critical_session, AtomicU64, u64, 8);
-
-#[cfg(any(test, feature = "fallback"))]
-atomic_int!(load_store_critical_session, AtomicI128, i128, 16);
-#[cfg(any(test, feature = "fallback"))]
-atomic_int!(load_store_critical_session, AtomicU128, u128, 16);
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- test_atomic_ptr_single_thread!();
- test_atomic_int_single_thread!(i8);
- test_atomic_int_single_thread!(u8);
- test_atomic_int_single_thread!(i16);
- test_atomic_int_single_thread!(u16);
- test_atomic_int_single_thread!(i32);
- test_atomic_int_single_thread!(u32);
- test_atomic_int_single_thread!(i64);
- test_atomic_int_single_thread!(u64);
- test_atomic_int_single_thread!(i128);
- test_atomic_int_single_thread!(u128);
- test_atomic_int_single_thread!(isize);
- test_atomic_int_single_thread!(usize);
-}
diff --git a/vendor/portable-atomic/src/imp/interrupt/msp430.rs b/vendor/portable-atomic/src/imp/interrupt/msp430.rs
deleted file mode 100644
index 8c1ca80..0000000
--- a/vendor/portable-atomic/src/imp/interrupt/msp430.rs
+++ /dev/null
@@ -1,64 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-// Adapted from https://github.com/rust-embedded/msp430.
-//
-// See also src/imp/msp430.rs.
-//
-// Refs: https://www.ti.com/lit/ug/slau208q/slau208q.pdf
-
-#[cfg(not(portable_atomic_no_asm))]
-use core::arch::asm;
-
-pub(super) use super::super::msp430 as atomic;
-
-pub(super) type State = u16;
-
-/// Disables interrupts and returns the previous interrupt state.
-#[inline]
-pub(super) fn disable() -> State {
- let r: State;
- // SAFETY: reading the status register and disabling interrupts are safe.
- // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
- unsafe {
- // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
- // Do not use `preserves_flags` because DINT modifies the GIE (global interrupt enable) bit of the status register.
- #[cfg(not(portable_atomic_no_asm))]
- asm!(
- "mov R2, {0}",
- "dint {{ nop",
- out(reg) r,
- options(nostack),
- );
- #[cfg(portable_atomic_no_asm)]
- {
- llvm_asm!("mov R2, $0" : "=r"(r) ::: "volatile");
- llvm_asm!("dint { nop" ::: "memory" : "volatile");
- }
- }
- r
-}
-
-/// Restores the previous interrupt state.
-///
-/// # Safety
-///
-/// The state must be the one retrieved by the previous `disable`.
-#[inline]
-pub(super) unsafe fn restore(r: State) {
- // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
- unsafe {
- // This clobbers the entire status register, but we never explicitly modify
- // flags within a critical session, and the only flags that may be changed
- // within a critical session are the arithmetic flags that are changed as
- // a side effect of arithmetic operations, etc., which LLVM recognizes,
- // so it is safe to clobber them here.
- // See also the discussion at https://github.com/taiki-e/portable-atomic/pull/40.
- //
- // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
- // Do not use `preserves_flags` because MOV modifies the status register.
- #[cfg(not(portable_atomic_no_asm))]
- asm!("nop {{ mov {0}, R2 {{ nop", in(reg) r, options(nostack));
- #[cfg(portable_atomic_no_asm)]
- llvm_asm!("nop { mov $0, R2 { nop" :: "r"(r) : "memory" : "volatile");
- }
-}
diff --git a/vendor/portable-atomic/src/imp/interrupt/riscv.rs b/vendor/portable-atomic/src/imp/interrupt/riscv.rs
deleted file mode 100644
index 65b1af2..0000000
--- a/vendor/portable-atomic/src/imp/interrupt/riscv.rs
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-// Refs:
-// - https://five-embeddev.com/riscv-isa-manual/latest/machine.html#machine-status-registers-mstatus-and-mstatush
-// - https://five-embeddev.com/riscv-isa-manual/latest/supervisor.html#sstatus
-//
-// Generated asm:
-// - riscv64gc https://godbolt.org/z/osbzsT679
-
-#[cfg(not(portable_atomic_no_asm))]
-use core::arch::asm;
-
-pub(super) use super::super::riscv as atomic;
-
-// Status register
-#[cfg(not(portable_atomic_s_mode))]
-macro_rules! status {
- () => {
- "mstatus"
- };
-}
-#[cfg(portable_atomic_s_mode)]
-macro_rules! status {
- () => {
- "sstatus"
- };
-}
-
-// MIE (Machine Interrupt Enable) bit (1 << 3)
-#[cfg(not(portable_atomic_s_mode))]
-const MASK: State = 0x8;
-#[cfg(not(portable_atomic_s_mode))]
-macro_rules! mask {
- () => {
- "0x8"
- };
-}
-// SIE (Supervisor Interrupt Enable) bit (1 << 1)
-#[cfg(portable_atomic_s_mode)]
-const MASK: State = 0x2;
-#[cfg(portable_atomic_s_mode)]
-macro_rules! mask {
- () => {
- "0x2"
- };
-}
-
-#[cfg(target_arch = "riscv32")]
-pub(super) type State = u32;
-#[cfg(target_arch = "riscv64")]
-pub(super) type State = u64;
-
-/// Disables interrupts and returns the previous interrupt state.
-#[inline]
-pub(super) fn disable() -> State {
- let r: State;
- // SAFETY: reading mstatus and disabling interrupts is safe.
- // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
- unsafe {
- // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
- asm!(concat!("csrrci {0}, ", status!(), ", ", mask!()), out(reg) r, options(nostack, preserves_flags));
- }
- r
-}
-
-/// Restores the previous interrupt state.
-///
-/// # Safety
-///
-/// The state must be the one retrieved by the previous `disable`.
-#[inline]
-pub(super) unsafe fn restore(r: State) {
- if r & MASK != 0 {
- // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
- // and we've checked that interrupts were enabled before disabling interrupts.
- unsafe {
- // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
- asm!(concat!("csrsi ", status!(), ", ", mask!()), options(nostack, preserves_flags));
- }
- }
-}
diff --git a/vendor/portable-atomic/src/imp/interrupt/xtensa.rs b/vendor/portable-atomic/src/imp/interrupt/xtensa.rs
deleted file mode 100644
index bc6d117..0000000
--- a/vendor/portable-atomic/src/imp/interrupt/xtensa.rs
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: Apache-2.0 OR MIT
-
-// Refs:
-// - Xtensa Instruction Set Architecture (ISA) Reference Manual https://0x04.net/~mwk/doc/xtensa.pdf
-// - Linux kernel's Xtensa atomic implementation https://github.com/torvalds/linux/blob/v6.1/arch/xtensa/include/asm/atomic.h
-
-use core::arch::asm;
-
-pub(super) use core::sync::atomic;
-
-pub(super) type State = u32;
-
-/// Disables interrupts and returns the previous interrupt state.
-#[inline]
-pub(super) fn disable() -> State {
- let r: State;
- // SAFETY: reading the PS special register and disabling all interrupts is safe.
- // (see module-level comments of interrupt/mod.rs on the safety of using privileged instructions)
- unsafe {
- // Do not use `nomem` and `readonly` because prevent subsequent memory accesses from being reordered before interrupts are disabled.
- // Interrupt level 15 to disable all interrupts.
- // SYNC after RSIL is not required.
- asm!("rsil {0}, 15", out(reg) r, options(nostack));
- }
- r
-}
-
-/// Restores the previous interrupt state.
-///
-/// # Safety
-///
-/// The state must be the one retrieved by the previous `disable`.
-#[inline]
-pub(super) unsafe fn restore(r: State) {
- // SAFETY: the caller must guarantee that the state was retrieved by the previous `disable`,
- // and we've checked that interrupts were enabled before disabling interrupts.
- unsafe {
- // Do not use `nomem` and `readonly` because prevent preceding memory accesses from being reordered after interrupts are enabled.
- // SYNC after WSR is required to guarantee that subsequent RSIL read the written value.
- asm!(
- "wsr.ps {0}",
- "rsync",
- in(reg) r,
- options(nostack),
- );
- }
-}