summaryrefslogtreecommitdiff
path: root/vendor/portable-atomic/src/imp/x86.rs
diff options
context:
space:
mode:
authorValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
committerValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
commit1b6a04ca5504955c571d1c97504fb45ea0befee4 (patch)
tree7579f518b23313e8a9748a88ab6173d5e030b227 /vendor/portable-atomic/src/imp/x86.rs
parent5ecd8cf2cba827454317368b68571df0d13d7842 (diff)
downloadfparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.tar.xz
fparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.zip
Initial vendor packages
Signed-off-by: Valentin Popov <valentin@popov.link>
Diffstat (limited to 'vendor/portable-atomic/src/imp/x86.rs')
-rw-r--r--vendor/portable-atomic/src/imp/x86.rs227
1 files changed, 227 insertions, 0 deletions
diff --git a/vendor/portable-atomic/src/imp/x86.rs b/vendor/portable-atomic/src/imp/x86.rs
new file mode 100644
index 0000000..9ef4d74
--- /dev/null
+++ b/vendor/portable-atomic/src/imp/x86.rs
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+// Atomic operations implementation on x86/x86_64.
+//
+// This module provides atomic operations not supported by LLVM or optimizes
+// cases where LLVM code generation is not optimal.
+//
+// Note: On Miri and ThreadSanitizer which do not support inline assembly, we don't use
+// this module and use CAS loop instead.
+//
+// Refs:
+// - x86 and amd64 instruction reference https://www.felixcloutier.com/x86
+//
+// Generated asm:
+// - x86_64 https://godbolt.org/z/d17eTs5Ec
+
+use core::{arch::asm, sync::atomic::Ordering};
+
+use super::core_atomic::{
+ AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, AtomicU32, AtomicU64,
+ AtomicU8, AtomicUsize,
+};
+
+#[cfg(target_pointer_width = "32")]
+macro_rules! ptr_modifier {
+ () => {
+ ":e"
+ };
+}
+#[cfg(target_pointer_width = "64")]
+macro_rules! ptr_modifier {
+ () => {
+ ""
+ };
+}
+
+macro_rules! atomic_int {
+ ($atomic_type:ident, $ptr_size:tt) => {
+ impl $atomic_type {
+ #[inline]
+ pub(crate) fn not(&self, _order: Ordering) {
+ let dst = self.as_ptr();
+ // SAFETY: any data races are prevented by atomic intrinsics and the raw
+ // pointer passed in is valid because we got it from a reference.
+ //
+ // https://www.felixcloutier.com/x86/not
+ unsafe {
+ // atomic RMW is always SeqCst.
+ asm!(
+ concat!("lock not ", $ptr_size, " ptr [{dst", ptr_modifier!(), "}]"),
+ dst = in(reg) dst,
+ options(nostack, preserves_flags),
+ );
+ }
+ }
+ #[inline]
+ pub(crate) fn neg(&self, _order: Ordering) {
+ let dst = self.as_ptr();
+ // SAFETY: any data races are prevented by atomic intrinsics and the raw
+ // pointer passed in is valid because we got it from a reference.
+ //
+ // https://www.felixcloutier.com/x86/neg
+ unsafe {
+ // atomic RMW is always SeqCst.
+ asm!(
+ concat!("lock neg ", $ptr_size, " ptr [{dst", ptr_modifier!(), "}]"),
+ dst = in(reg) dst,
+ // Do not use `preserves_flags` because NEG modifies the CF, OF, SF, ZF, AF, and PF flag.
+ options(nostack),
+ );
+ }
+ }
+ }
+ };
+}
+
+atomic_int!(AtomicI8, "byte");
+atomic_int!(AtomicU8, "byte");
+atomic_int!(AtomicI16, "word");
+atomic_int!(AtomicU16, "word");
+atomic_int!(AtomicI32, "dword");
+atomic_int!(AtomicU32, "dword");
+#[cfg(target_arch = "x86_64")]
+atomic_int!(AtomicI64, "qword");
+#[cfg(target_arch = "x86_64")]
+atomic_int!(AtomicU64, "qword");
+#[cfg(target_pointer_width = "32")]
+atomic_int!(AtomicIsize, "dword");
+#[cfg(target_pointer_width = "32")]
+atomic_int!(AtomicUsize, "dword");
+#[cfg(target_pointer_width = "64")]
+atomic_int!(AtomicIsize, "qword");
+#[cfg(target_pointer_width = "64")]
+atomic_int!(AtomicUsize, "qword");
+
+#[cfg(target_arch = "x86")]
+impl AtomicI64 {
+ #[inline]
+ pub(crate) fn not(&self, order: Ordering) {
+ self.fetch_not(order);
+ }
+ #[inline]
+ pub(crate) fn neg(&self, order: Ordering) {
+ self.fetch_neg(order);
+ }
+}
+#[cfg(target_arch = "x86")]
+impl AtomicU64 {
+ #[inline]
+ pub(crate) fn not(&self, order: Ordering) {
+ self.fetch_not(order);
+ }
+ #[inline]
+ pub(crate) fn neg(&self, order: Ordering) {
+ self.fetch_neg(order);
+ }
+}
+
+macro_rules! atomic_bit_opts {
+ ($atomic_type:ident, $int_type:ident, $val_modifier:tt, $ptr_size:tt) => {
+ // LLVM 14 and older don't support generating `lock bt{s,r,c}`.
+ // LLVM 15 only supports generating `lock bt{s,r,c}` for immediate bit offsets.
+ // LLVM 16+ can generate `lock bt{s,r,c}` for both immediate and register bit offsets.
+ // https://godbolt.org/z/TGhr5z4ds
+ // So, use fetch_* based implementations on LLVM 16+, otherwise use asm based implementations.
+ #[cfg(portable_atomic_llvm_16)]
+ impl_default_bit_opts!($atomic_type, $int_type);
+ #[cfg(not(portable_atomic_llvm_16))]
+ impl $atomic_type {
+ #[inline]
+ pub(crate) fn bit_set(&self, bit: u32, _order: Ordering) -> bool {
+ let dst = self.as_ptr();
+ // SAFETY: any data races are prevented by atomic intrinsics and the raw
+ // pointer passed in is valid because we got it from a reference.
+ // the masking by the bit size of the type ensures that we do not shift
+ // out of bounds.
+ //
+ // https://www.felixcloutier.com/x86/bts
+ unsafe {
+ let r: u8;
+ // atomic RMW is always SeqCst.
+ asm!(
+ concat!("lock bts ", $ptr_size, " ptr [{dst", ptr_modifier!(), "}], {bit", $val_modifier, "}"),
+ "setb {r}",
+ dst = in(reg) dst,
+ bit = in(reg) (bit & ($int_type::BITS - 1)) as $int_type,
+ r = out(reg_byte) r,
+ // Do not use `preserves_flags` because BTS modifies the CF flag.
+ options(nostack),
+ );
+ r != 0
+ }
+ }
+ #[inline]
+ pub(crate) fn bit_clear(&self, bit: u32, _order: Ordering) -> bool {
+ let dst = self.as_ptr();
+ // SAFETY: any data races are prevented by atomic intrinsics and the raw
+ // pointer passed in is valid because we got it from a reference.
+ // the masking by the bit size of the type ensures that we do not shift
+ // out of bounds.
+ //
+ // https://www.felixcloutier.com/x86/btr
+ unsafe {
+ let r: u8;
+ // atomic RMW is always SeqCst.
+ asm!(
+ concat!("lock btr ", $ptr_size, " ptr [{dst", ptr_modifier!(), "}], {bit", $val_modifier, "}"),
+ "setb {r}",
+ dst = in(reg) dst,
+ bit = in(reg) (bit & ($int_type::BITS - 1)) as $int_type,
+ r = out(reg_byte) r,
+ // Do not use `preserves_flags` because BTR modifies the CF flag.
+ options(nostack),
+ );
+ r != 0
+ }
+ }
+ #[inline]
+ pub(crate) fn bit_toggle(&self, bit: u32, _order: Ordering) -> bool {
+ let dst = self.as_ptr();
+ // SAFETY: any data races are prevented by atomic intrinsics and the raw
+ // pointer passed in is valid because we got it from a reference.
+ // the masking by the bit size of the type ensures that we do not shift
+ // out of bounds.
+ //
+ // https://www.felixcloutier.com/x86/btc
+ unsafe {
+ let r: u8;
+ // atomic RMW is always SeqCst.
+ asm!(
+ concat!("lock btc ", $ptr_size, " ptr [{dst", ptr_modifier!(), "}], {bit", $val_modifier, "}"),
+ "setb {r}",
+ dst = in(reg) dst,
+ bit = in(reg) (bit & ($int_type::BITS - 1)) as $int_type,
+ r = out(reg_byte) r,
+ // Do not use `preserves_flags` because BTC modifies the CF flag.
+ options(nostack),
+ );
+ r != 0
+ }
+ }
+ }
+ };
+}
+
+impl_default_bit_opts!(AtomicI8, i8);
+impl_default_bit_opts!(AtomicU8, u8);
+atomic_bit_opts!(AtomicI16, i16, ":x", "word");
+atomic_bit_opts!(AtomicU16, u16, ":x", "word");
+atomic_bit_opts!(AtomicI32, i32, ":e", "dword");
+atomic_bit_opts!(AtomicU32, u32, ":e", "dword");
+#[cfg(target_arch = "x86_64")]
+atomic_bit_opts!(AtomicI64, i64, "", "qword");
+#[cfg(target_arch = "x86_64")]
+atomic_bit_opts!(AtomicU64, u64, "", "qword");
+#[cfg(target_arch = "x86")]
+impl_default_bit_opts!(AtomicI64, i64);
+#[cfg(target_arch = "x86")]
+impl_default_bit_opts!(AtomicU64, u64);
+#[cfg(target_pointer_width = "32")]
+atomic_bit_opts!(AtomicIsize, isize, ":e", "dword");
+#[cfg(target_pointer_width = "32")]
+atomic_bit_opts!(AtomicUsize, usize, ":e", "dword");
+#[cfg(target_pointer_width = "64")]
+atomic_bit_opts!(AtomicIsize, isize, "", "qword");
+#[cfg(target_pointer_width = "64")]
+atomic_bit_opts!(AtomicUsize, usize, "", "qword");