aboutsummaryrefslogtreecommitdiff
path: root/vendor/half/src/binary16/convert.rs
diff options
context:
space:
mode:
authorValentin Popov <valentin@popov.link>2024-07-19 15:37:58 +0300
committerValentin Popov <valentin@popov.link>2024-07-19 15:37:58 +0300
commita990de90fe41456a23e58bd087d2f107d321f3a1 (patch)
tree15afc392522a9e85dc3332235e311b7d39352ea9 /vendor/half/src/binary16/convert.rs
parent3d48cd3f81164bbfc1a755dc1d4a9a02f98c8ddd (diff)
downloadfparkan-a990de90fe41456a23e58bd087d2f107d321f3a1.tar.xz
fparkan-a990de90fe41456a23e58bd087d2f107d321f3a1.zip
Deleted vendor folder
Diffstat (limited to 'vendor/half/src/binary16/convert.rs')
-rw-r--r--vendor/half/src/binary16/convert.rs752
1 files changed, 0 insertions, 752 deletions
diff --git a/vendor/half/src/binary16/convert.rs b/vendor/half/src/binary16/convert.rs
deleted file mode 100644
index b96910f..0000000
--- a/vendor/half/src/binary16/convert.rs
+++ /dev/null
@@ -1,752 +0,0 @@
-#![allow(dead_code, unused_imports)]
-use crate::leading_zeros::leading_zeros_u16;
-use core::mem;
-
-macro_rules! convert_fn {
- (fn $name:ident($($var:ident : $vartype:ty),+) -> $restype:ty {
- if feature("f16c") { $f16c:expr }
- else { $fallback:expr }}) => {
- #[inline]
- pub(crate) fn $name($($var: $vartype),+) -> $restype {
- // Use CPU feature detection if using std
- #[cfg(all(
- feature = "use-intrinsics",
- feature = "std",
- any(target_arch = "x86", target_arch = "x86_64"),
- not(target_feature = "f16c")
- ))]
- {
- if is_x86_feature_detected!("f16c") {
- $f16c
- } else {
- $fallback
- }
- }
- // Use intrinsics directly when a compile target or using no_std
- #[cfg(all(
- feature = "use-intrinsics",
- any(target_arch = "x86", target_arch = "x86_64"),
- target_feature = "f16c"
- ))]
- {
- $f16c
- }
- // Fallback to software
- #[cfg(any(
- not(feature = "use-intrinsics"),
- not(any(target_arch = "x86", target_arch = "x86_64")),
- all(not(feature = "std"), not(target_feature = "f16c"))
- ))]
- {
- $fallback
- }
- }
- };
-}
-
-convert_fn! {
- fn f32_to_f16(f: f32) -> u16 {
- if feature("f16c") {
- unsafe { x86::f32_to_f16_x86_f16c(f) }
- } else {
- f32_to_f16_fallback(f)
- }
- }
-}
-
-convert_fn! {
- fn f64_to_f16(f: f64) -> u16 {
- if feature("f16c") {
- unsafe { x86::f32_to_f16_x86_f16c(f as f32) }
- } else {
- f64_to_f16_fallback(f)
- }
- }
-}
-
-convert_fn! {
- fn f16_to_f32(i: u16) -> f32 {
- if feature("f16c") {
- unsafe { x86::f16_to_f32_x86_f16c(i) }
- } else {
- f16_to_f32_fallback(i)
- }
- }
-}
-
-convert_fn! {
- fn f16_to_f64(i: u16) -> f64 {
- if feature("f16c") {
- unsafe { x86::f16_to_f32_x86_f16c(i) as f64 }
- } else {
- f16_to_f64_fallback(i)
- }
- }
-}
-
-convert_fn! {
- fn f32x4_to_f16x4(f: &[f32; 4]) -> [u16; 4] {
- if feature("f16c") {
- unsafe { x86::f32x4_to_f16x4_x86_f16c(f) }
- } else {
- f32x4_to_f16x4_fallback(f)
- }
- }
-}
-
-convert_fn! {
- fn f16x4_to_f32x4(i: &[u16; 4]) -> [f32; 4] {
- if feature("f16c") {
- unsafe { x86::f16x4_to_f32x4_x86_f16c(i) }
- } else {
- f16x4_to_f32x4_fallback(i)
- }
- }
-}
-
-convert_fn! {
- fn f64x4_to_f16x4(f: &[f64; 4]) -> [u16; 4] {
- if feature("f16c") {
- unsafe { x86::f64x4_to_f16x4_x86_f16c(f) }
- } else {
- f64x4_to_f16x4_fallback(f)
- }
- }
-}
-
-convert_fn! {
- fn f16x4_to_f64x4(i: &[u16; 4]) -> [f64; 4] {
- if feature("f16c") {
- unsafe { x86::f16x4_to_f64x4_x86_f16c(i) }
- } else {
- f16x4_to_f64x4_fallback(i)
- }
- }
-}
-
-convert_fn! {
- fn f32x8_to_f16x8(f: &[f32; 8]) -> [u16; 8] {
- if feature("f16c") {
- unsafe { x86::f32x8_to_f16x8_x86_f16c(f) }
- } else {
- f32x8_to_f16x8_fallback(f)
- }
- }
-}
-
-convert_fn! {
- fn f16x8_to_f32x8(i: &[u16; 8]) -> [f32; 8] {
- if feature("f16c") {
- unsafe { x86::f16x8_to_f32x8_x86_f16c(i) }
- } else {
- f16x8_to_f32x8_fallback(i)
- }
- }
-}
-
-convert_fn! {
- fn f64x8_to_f16x8(f: &[f64; 8]) -> [u16; 8] {
- if feature("f16c") {
- unsafe { x86::f64x8_to_f16x8_x86_f16c(f) }
- } else {
- f64x8_to_f16x8_fallback(f)
- }
- }
-}
-
-convert_fn! {
- fn f16x8_to_f64x8(i: &[u16; 8]) -> [f64; 8] {
- if feature("f16c") {
- unsafe { x86::f16x8_to_f64x8_x86_f16c(i) }
- } else {
- f16x8_to_f64x8_fallback(i)
- }
- }
-}
-
-convert_fn! {
- fn f32_to_f16_slice(src: &[f32], dst: &mut [u16]) -> () {
- if feature("f16c") {
- convert_chunked_slice_8(src, dst, x86::f32x8_to_f16x8_x86_f16c,
- x86::f32x4_to_f16x4_x86_f16c)
- } else {
- slice_fallback(src, dst, f32_to_f16_fallback)
- }
- }
-}
-
-convert_fn! {
- fn f16_to_f32_slice(src: &[u16], dst: &mut [f32]) -> () {
- if feature("f16c") {
- convert_chunked_slice_8(src, dst, x86::f16x8_to_f32x8_x86_f16c,
- x86::f16x4_to_f32x4_x86_f16c)
- } else {
- slice_fallback(src, dst, f16_to_f32_fallback)
- }
- }
-}
-
-convert_fn! {
- fn f64_to_f16_slice(src: &[f64], dst: &mut [u16]) -> () {
- if feature("f16c") {
- convert_chunked_slice_8(src, dst, x86::f64x8_to_f16x8_x86_f16c,
- x86::f64x4_to_f16x4_x86_f16c)
- } else {
- slice_fallback(src, dst, f64_to_f16_fallback)
- }
- }
-}
-
-convert_fn! {
- fn f16_to_f64_slice(src: &[u16], dst: &mut [f64]) -> () {
- if feature("f16c") {
- convert_chunked_slice_8(src, dst, x86::f16x8_to_f64x8_x86_f16c,
- x86::f16x4_to_f64x4_x86_f16c)
- } else {
- slice_fallback(src, dst, f16_to_f64_fallback)
- }
- }
-}
-
-/// Chunks sliced into x8 or x4 arrays
-#[inline]
-fn convert_chunked_slice_8<S: Copy + Default, D: Copy>(
- src: &[S],
- dst: &mut [D],
- fn8: unsafe fn(&[S; 8]) -> [D; 8],
- fn4: unsafe fn(&[S; 4]) -> [D; 4],
-) {
- assert_eq!(src.len(), dst.len());
-
- // TODO: Can be further optimized with array_chunks when it becomes stabilized
-
- let src_chunks = src.chunks_exact(8);
- let mut dst_chunks = dst.chunks_exact_mut(8);
- let src_remainder = src_chunks.remainder();
- for (s, d) in src_chunks.zip(&mut dst_chunks) {
- let chunk: &[S; 8] = s.try_into().unwrap();
- d.copy_from_slice(unsafe { &fn8(chunk) });
- }
-
- // Process remainder
- if src_remainder.len() > 4 {
- let mut buf: [S; 8] = Default::default();
- buf[..src_remainder.len()].copy_from_slice(src_remainder);
- let vec = unsafe { fn8(&buf) };
- let dst_remainder = dst_chunks.into_remainder();
- dst_remainder.copy_from_slice(&vec[..dst_remainder.len()]);
- } else if !src_remainder.is_empty() {
- let mut buf: [S; 4] = Default::default();
- buf[..src_remainder.len()].copy_from_slice(src_remainder);
- let vec = unsafe { fn4(&buf) };
- let dst_remainder = dst_chunks.into_remainder();
- dst_remainder.copy_from_slice(&vec[..dst_remainder.len()]);
- }
-}
-
-/// Chunks sliced into x4 arrays
-#[inline]
-fn convert_chunked_slice_4<S: Copy + Default, D: Copy>(
- src: &[S],
- dst: &mut [D],
- f: unsafe fn(&[S; 4]) -> [D; 4],
-) {
- assert_eq!(src.len(), dst.len());
-
- // TODO: Can be further optimized with array_chunks when it becomes stabilized
-
- let src_chunks = src.chunks_exact(4);
- let mut dst_chunks = dst.chunks_exact_mut(4);
- let src_remainder = src_chunks.remainder();
- for (s, d) in src_chunks.zip(&mut dst_chunks) {
- let chunk: &[S; 4] = s.try_into().unwrap();
- d.copy_from_slice(unsafe { &f(chunk) });
- }
-
- // Process remainder
- if !src_remainder.is_empty() {
- let mut buf: [S; 4] = Default::default();
- buf[..src_remainder.len()].copy_from_slice(src_remainder);
- let vec = unsafe { f(&buf) };
- let dst_remainder = dst_chunks.into_remainder();
- dst_remainder.copy_from_slice(&vec[..dst_remainder.len()]);
- }
-}
-
-/////////////// Fallbacks ////////////////
-
-// In the below functions, round to nearest, with ties to even.
-// Let us call the most significant bit that will be shifted out the round_bit.
-//
-// Round up if either
-// a) Removed part > tie.
-// (mantissa & round_bit) != 0 && (mantissa & (round_bit - 1)) != 0
-// b) Removed part == tie, and retained part is odd.
-// (mantissa & round_bit) != 0 && (mantissa & (2 * round_bit)) != 0
-// (If removed part == tie and retained part is even, do not round up.)
-// These two conditions can be combined into one:
-// (mantissa & round_bit) != 0 && (mantissa & ((round_bit - 1) | (2 * round_bit))) != 0
-// which can be simplified into
-// (mantissa & round_bit) != 0 && (mantissa & (3 * round_bit - 1)) != 0
-
-#[inline]
-pub(crate) const fn f32_to_f16_fallback(value: f32) -> u16 {
- // TODO: Replace mem::transmute with to_bits() once to_bits is const-stabilized
- // Convert to raw bytes
- let x: u32 = unsafe { mem::transmute(value) };
-
- // Extract IEEE754 components
- let sign = x & 0x8000_0000u32;
- let exp = x & 0x7F80_0000u32;
- let man = x & 0x007F_FFFFu32;
-
- // Check for all exponent bits being set, which is Infinity or NaN
- if exp == 0x7F80_0000u32 {
- // Set mantissa MSB for NaN (and also keep shifted mantissa bits)
- let nan_bit = if man == 0 { 0 } else { 0x0200u32 };
- return ((sign >> 16) | 0x7C00u32 | nan_bit | (man >> 13)) as u16;
- }
-
- // The number is normalized, start assembling half precision version
- let half_sign = sign >> 16;
- // Unbias the exponent, then bias for half precision
- let unbiased_exp = ((exp >> 23) as i32) - 127;
- let half_exp = unbiased_exp + 15;
-
- // Check for exponent overflow, return +infinity
- if half_exp >= 0x1F {
- return (half_sign | 0x7C00u32) as u16;
- }
-
- // Check for underflow
- if half_exp <= 0 {
- // Check mantissa for what we can do
- if 14 - half_exp > 24 {
- // No rounding possibility, so this is a full underflow, return signed zero
- return half_sign as u16;
- }
- // Don't forget about hidden leading mantissa bit when assembling mantissa
- let man = man | 0x0080_0000u32;
- let mut half_man = man >> (14 - half_exp);
- // Check for rounding (see comment above functions)
- let round_bit = 1 << (13 - half_exp);
- if (man & round_bit) != 0 && (man & (3 * round_bit - 1)) != 0 {
- half_man += 1;
- }
- // No exponent for subnormals
- return (half_sign | half_man) as u16;
- }
-
- // Rebias the exponent
- let half_exp = (half_exp as u32) << 10;
- let half_man = man >> 13;
- // Check for rounding (see comment above functions)
- let round_bit = 0x0000_1000u32;
- if (man & round_bit) != 0 && (man & (3 * round_bit - 1)) != 0 {
- // Round it
- ((half_sign | half_exp | half_man) + 1) as u16
- } else {
- (half_sign | half_exp | half_man) as u16
- }
-}
-
-#[inline]
-pub(crate) const fn f64_to_f16_fallback(value: f64) -> u16 {
- // Convert to raw bytes, truncating the last 32-bits of mantissa; that precision will always
- // be lost on half-precision.
- // TODO: Replace mem::transmute with to_bits() once to_bits is const-stabilized
- let val: u64 = unsafe { mem::transmute(value) };
- let x = (val >> 32) as u32;
-
- // Extract IEEE754 components
- let sign = x & 0x8000_0000u32;
- let exp = x & 0x7FF0_0000u32;
- let man = x & 0x000F_FFFFu32;
-
- // Check for all exponent bits being set, which is Infinity or NaN
- if exp == 0x7FF0_0000u32 {
- // Set mantissa MSB for NaN (and also keep shifted mantissa bits).
- // We also have to check the last 32 bits.
- let nan_bit = if man == 0 && (val as u32 == 0) {
- 0
- } else {
- 0x0200u32
- };
- return ((sign >> 16) | 0x7C00u32 | nan_bit | (man >> 10)) as u16;
- }
-
- // The number is normalized, start assembling half precision version
- let half_sign = sign >> 16;
- // Unbias the exponent, then bias for half precision
- let unbiased_exp = ((exp >> 20) as i64) - 1023;
- let half_exp = unbiased_exp + 15;
-
- // Check for exponent overflow, return +infinity
- if half_exp >= 0x1F {
- return (half_sign | 0x7C00u32) as u16;
- }
-
- // Check for underflow
- if half_exp <= 0 {
- // Check mantissa for what we can do
- if 10 - half_exp > 21 {
- // No rounding possibility, so this is a full underflow, return signed zero
- return half_sign as u16;
- }
- // Don't forget about hidden leading mantissa bit when assembling mantissa
- let man = man | 0x0010_0000u32;
- let mut half_man = man >> (11 - half_exp);
- // Check for rounding (see comment above functions)
- let round_bit = 1 << (10 - half_exp);
- if (man & round_bit) != 0 && (man & (3 * round_bit - 1)) != 0 {
- half_man += 1;
- }
- // No exponent for subnormals
- return (half_sign | half_man) as u16;
- }
-
- // Rebias the exponent
- let half_exp = (half_exp as u32) << 10;
- let half_man = man >> 10;
- // Check for rounding (see comment above functions)
- let round_bit = 0x0000_0200u32;
- if (man & round_bit) != 0 && (man & (3 * round_bit - 1)) != 0 {
- // Round it
- ((half_sign | half_exp | half_man) + 1) as u16
- } else {
- (half_sign | half_exp | half_man) as u16
- }
-}
-
-#[inline]
-pub(crate) const fn f16_to_f32_fallback(i: u16) -> f32 {
- // Check for signed zero
- // TODO: Replace mem::transmute with from_bits() once from_bits is const-stabilized
- if i & 0x7FFFu16 == 0 {
- return unsafe { mem::transmute((i as u32) << 16) };
- }
-
- let half_sign = (i & 0x8000u16) as u32;
- let half_exp = (i & 0x7C00u16) as u32;
- let half_man = (i & 0x03FFu16) as u32;
-
- // Check for an infinity or NaN when all exponent bits set
- if half_exp == 0x7C00u32 {
- // Check for signed infinity if mantissa is zero
- if half_man == 0 {
- return unsafe { mem::transmute((half_sign << 16) | 0x7F80_0000u32) };
- } else {
- // NaN, keep current mantissa but also set most significiant mantissa bit
- return unsafe {
- mem::transmute((half_sign << 16) | 0x7FC0_0000u32 | (half_man << 13))
- };
- }
- }
-
- // Calculate single-precision components with adjusted exponent
- let sign = half_sign << 16;
- // Unbias exponent
- let unbiased_exp = ((half_exp as i32) >> 10) - 15;
-
- // Check for subnormals, which will be normalized by adjusting exponent
- if half_exp == 0 {
- // Calculate how much to adjust the exponent by
- let e = leading_zeros_u16(half_man as u16) - 6;
-
- // Rebias and adjust exponent
- let exp = (127 - 15 - e) << 23;
- let man = (half_man << (14 + e)) & 0x7F_FF_FFu32;
- return unsafe { mem::transmute(sign | exp | man) };
- }
-
- // Rebias exponent for a normalized normal
- let exp = ((unbiased_exp + 127) as u32) << 23;
- let man = (half_man & 0x03FFu32) << 13;
- unsafe { mem::transmute(sign | exp | man) }
-}
-
-#[inline]
-pub(crate) const fn f16_to_f64_fallback(i: u16) -> f64 {
- // Check for signed zero
- // TODO: Replace mem::transmute with from_bits() once from_bits is const-stabilized
- if i & 0x7FFFu16 == 0 {
- return unsafe { mem::transmute((i as u64) << 48) };
- }
-
- let half_sign = (i & 0x8000u16) as u64;
- let half_exp = (i & 0x7C00u16) as u64;
- let half_man = (i & 0x03FFu16) as u64;
-
- // Check for an infinity or NaN when all exponent bits set
- if half_exp == 0x7C00u64 {
- // Check for signed infinity if mantissa is zero
- if half_man == 0 {
- return unsafe { mem::transmute((half_sign << 48) | 0x7FF0_0000_0000_0000u64) };
- } else {
- // NaN, keep current mantissa but also set most significiant mantissa bit
- return unsafe {
- mem::transmute((half_sign << 48) | 0x7FF8_0000_0000_0000u64 | (half_man << 42))
- };
- }
- }
-
- // Calculate double-precision components with adjusted exponent
- let sign = half_sign << 48;
- // Unbias exponent
- let unbiased_exp = ((half_exp as i64) >> 10) - 15;
-
- // Check for subnormals, which will be normalized by adjusting exponent
- if half_exp == 0 {
- // Calculate how much to adjust the exponent by
- let e = leading_zeros_u16(half_man as u16) - 6;
-
- // Rebias and adjust exponent
- let exp = ((1023 - 15 - e) as u64) << 52;
- let man = (half_man << (43 + e)) & 0xF_FFFF_FFFF_FFFFu64;
- return unsafe { mem::transmute(sign | exp | man) };
- }
-
- // Rebias exponent for a normalized normal
- let exp = ((unbiased_exp + 1023) as u64) << 52;
- let man = (half_man & 0x03FFu64) << 42;
- unsafe { mem::transmute(sign | exp | man) }
-}
-
-#[inline]
-fn f16x4_to_f32x4_fallback(v: &[u16; 4]) -> [f32; 4] {
- [
- f16_to_f32_fallback(v[0]),
- f16_to_f32_fallback(v[1]),
- f16_to_f32_fallback(v[2]),
- f16_to_f32_fallback(v[3]),
- ]
-}
-
-#[inline]
-fn f32x4_to_f16x4_fallback(v: &[f32; 4]) -> [u16; 4] {
- [
- f32_to_f16_fallback(v[0]),
- f32_to_f16_fallback(v[1]),
- f32_to_f16_fallback(v[2]),
- f32_to_f16_fallback(v[3]),
- ]
-}
-
-#[inline]
-fn f16x4_to_f64x4_fallback(v: &[u16; 4]) -> [f64; 4] {
- [
- f16_to_f64_fallback(v[0]),
- f16_to_f64_fallback(v[1]),
- f16_to_f64_fallback(v[2]),
- f16_to_f64_fallback(v[3]),
- ]
-}
-
-#[inline]
-fn f64x4_to_f16x4_fallback(v: &[f64; 4]) -> [u16; 4] {
- [
- f64_to_f16_fallback(v[0]),
- f64_to_f16_fallback(v[1]),
- f64_to_f16_fallback(v[2]),
- f64_to_f16_fallback(v[3]),
- ]
-}
-
-#[inline]
-fn f16x8_to_f32x8_fallback(v: &[u16; 8]) -> [f32; 8] {
- [
- f16_to_f32_fallback(v[0]),
- f16_to_f32_fallback(v[1]),
- f16_to_f32_fallback(v[2]),
- f16_to_f32_fallback(v[3]),
- f16_to_f32_fallback(v[4]),
- f16_to_f32_fallback(v[5]),
- f16_to_f32_fallback(v[6]),
- f16_to_f32_fallback(v[7]),
- ]
-}
-
-#[inline]
-fn f32x8_to_f16x8_fallback(v: &[f32; 8]) -> [u16; 8] {
- [
- f32_to_f16_fallback(v[0]),
- f32_to_f16_fallback(v[1]),
- f32_to_f16_fallback(v[2]),
- f32_to_f16_fallback(v[3]),
- f32_to_f16_fallback(v[4]),
- f32_to_f16_fallback(v[5]),
- f32_to_f16_fallback(v[6]),
- f32_to_f16_fallback(v[7]),
- ]
-}
-
-#[inline]
-fn f16x8_to_f64x8_fallback(v: &[u16; 8]) -> [f64; 8] {
- [
- f16_to_f64_fallback(v[0]),
- f16_to_f64_fallback(v[1]),
- f16_to_f64_fallback(v[2]),
- f16_to_f64_fallback(v[3]),
- f16_to_f64_fallback(v[4]),
- f16_to_f64_fallback(v[5]),
- f16_to_f64_fallback(v[6]),
- f16_to_f64_fallback(v[7]),
- ]
-}
-
-#[inline]
-fn f64x8_to_f16x8_fallback(v: &[f64; 8]) -> [u16; 8] {
- [
- f64_to_f16_fallback(v[0]),
- f64_to_f16_fallback(v[1]),
- f64_to_f16_fallback(v[2]),
- f64_to_f16_fallback(v[3]),
- f64_to_f16_fallback(v[4]),
- f64_to_f16_fallback(v[5]),
- f64_to_f16_fallback(v[6]),
- f64_to_f16_fallback(v[7]),
- ]
-}
-
-#[inline]
-fn slice_fallback<S: Copy, D>(src: &[S], dst: &mut [D], f: fn(S) -> D) {
- assert_eq!(src.len(), dst.len());
- for (s, d) in src.iter().copied().zip(dst.iter_mut()) {
- *d = f(s);
- }
-}
-
-/////////////// x86/x86_64 f16c ////////////////
-#[cfg(all(
- feature = "use-intrinsics",
- any(target_arch = "x86", target_arch = "x86_64")
-))]
-mod x86 {
- use core::{mem::MaybeUninit, ptr};
-
- #[cfg(target_arch = "x86")]
- use core::arch::x86::{
- __m128, __m128i, __m256, _mm256_cvtph_ps, _mm256_cvtps_ph, _mm_cvtph_ps,
- _MM_FROUND_TO_NEAREST_INT,
- };
- #[cfg(target_arch = "x86_64")]
- use core::arch::x86_64::{
- __m128, __m128i, __m256, _mm256_cvtph_ps, _mm256_cvtps_ph, _mm_cvtph_ps, _mm_cvtps_ph,
- _MM_FROUND_TO_NEAREST_INT,
- };
-
- use super::convert_chunked_slice_8;
-
- #[target_feature(enable = "f16c")]
- #[inline]
- pub(super) unsafe fn f16_to_f32_x86_f16c(i: u16) -> f32 {
- let mut vec = MaybeUninit::<__m128i>::zeroed();
- vec.as_mut_ptr().cast::<u16>().write(i);
- let retval = _mm_cvtph_ps(vec.assume_init());
- *(&retval as *const __m128).cast()
- }
-
- #[target_feature(enable = "f16c")]
- #[inline]
- pub(super) unsafe fn f32_to_f16_x86_f16c(f: f32) -> u16 {
- let mut vec = MaybeUninit::<__m128>::zeroed();
- vec.as_mut_ptr().cast::<f32>().write(f);
- let retval = _mm_cvtps_ph(vec.assume_init(), _MM_FROUND_TO_NEAREST_INT);
- *(&retval as *const __m128i).cast()
- }
-
- #[target_feature(enable = "f16c")]
- #[inline]
- pub(super) unsafe fn f16x4_to_f32x4_x86_f16c(v: &[u16; 4]) -> [f32; 4] {
- let mut vec = MaybeUninit::<__m128i>::zeroed();
- ptr::copy_nonoverlapping(v.as_ptr(), vec.as_mut_ptr().cast(), 4);
- let retval = _mm_cvtph_ps(vec.assume_init());
- *(&retval as *const __m128).cast()
- }
-
- #[target_feature(enable = "f16c")]
- #[inline]
- pub(super) unsafe fn f32x4_to_f16x4_x86_f16c(v: &[f32; 4]) -> [u16; 4] {
- let mut vec = MaybeUninit::<__m128>::uninit();
- ptr::copy_nonoverlapping(v.as_ptr(), vec.as_mut_ptr().cast(), 4);
- let retval = _mm_cvtps_ph(vec.assume_init(), _MM_FROUND_TO_NEAREST_INT);
- *(&retval as *const __m128i).cast()
- }
-
- #[target_feature(enable = "f16c")]
- #[inline]
- pub(super) unsafe fn f16x4_to_f64x4_x86_f16c(v: &[u16; 4]) -> [f64; 4] {
- let array = f16x4_to_f32x4_x86_f16c(v);
- // Let compiler vectorize this regular cast for now.
- // TODO: investigate auto-detecting sse2/avx convert features
- [
- array[0] as f64,
- array[1] as f64,
- array[2] as f64,
- array[3] as f64,
- ]
- }
-
- #[target_feature(enable = "f16c")]
- #[inline]
- pub(super) unsafe fn f64x4_to_f16x4_x86_f16c(v: &[f64; 4]) -> [u16; 4] {
- // Let compiler vectorize this regular cast for now.
- // TODO: investigate auto-detecting sse2/avx convert features
- let v = [v[0] as f32, v[1] as f32, v[2] as f32, v[3] as f32];
- f32x4_to_f16x4_x86_f16c(&v)
- }
-
- #[target_feature(enable = "f16c")]
- #[inline]
- pub(super) unsafe fn f16x8_to_f32x8_x86_f16c(v: &[u16; 8]) -> [f32; 8] {
- let mut vec = MaybeUninit::<__m128i>::zeroed();
- ptr::copy_nonoverlapping(v.as_ptr(), vec.as_mut_ptr().cast(), 8);
- let retval = _mm256_cvtph_ps(vec.assume_init());
- *(&retval as *const __m256).cast()
- }
-
- #[target_feature(enable = "f16c")]
- #[inline]
- pub(super) unsafe fn f32x8_to_f16x8_x86_f16c(v: &[f32; 8]) -> [u16; 8] {
- let mut vec = MaybeUninit::<__m256>::uninit();
- ptr::copy_nonoverlapping(v.as_ptr(), vec.as_mut_ptr().cast(), 8);
- let retval = _mm256_cvtps_ph(vec.assume_init(), _MM_FROUND_TO_NEAREST_INT);
- *(&retval as *const __m128i).cast()
- }
-
- #[target_feature(enable = "f16c")]
- #[inline]
- pub(super) unsafe fn f16x8_to_f64x8_x86_f16c(v: &[u16; 8]) -> [f64; 8] {
- let array = f16x8_to_f32x8_x86_f16c(v);
- // Let compiler vectorize this regular cast for now.
- // TODO: investigate auto-detecting sse2/avx convert features
- [
- array[0] as f64,
- array[1] as f64,
- array[2] as f64,
- array[3] as f64,
- array[4] as f64,
- array[5] as f64,
- array[6] as f64,
- array[7] as f64,
- ]
- }
-
- #[target_feature(enable = "f16c")]
- #[inline]
- pub(super) unsafe fn f64x8_to_f16x8_x86_f16c(v: &[f64; 8]) -> [u16; 8] {
- // Let compiler vectorize this regular cast for now.
- // TODO: investigate auto-detecting sse2/avx convert features
- let v = [
- v[0] as f32,
- v[1] as f32,
- v[2] as f32,
- v[3] as f32,
- v[4] as f32,
- v[5] as f32,
- v[6] as f32,
- v[7] as f32,
- ];
- f32x8_to_f16x8_x86_f16c(&v)
- }
-}