aboutsummaryrefslogtreecommitdiff
path: root/vendor/redox_syscall/src/io
diff options
context:
space:
mode:
authorValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
committerValentin Popov <valentin@popov.link>2024-01-08 00:21:28 +0300
commit1b6a04ca5504955c571d1c97504fb45ea0befee4 (patch)
tree7579f518b23313e8a9748a88ab6173d5e030b227 /vendor/redox_syscall/src/io
parent5ecd8cf2cba827454317368b68571df0d13d7842 (diff)
downloadfparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.tar.xz
fparkan-1b6a04ca5504955c571d1c97504fb45ea0befee4.zip
Initial vendor packages
Signed-off-by: Valentin Popov <valentin@popov.link>
Diffstat (limited to 'vendor/redox_syscall/src/io')
-rw-r--r--vendor/redox_syscall/src/io/dma.rs219
-rw-r--r--vendor/redox_syscall/src/io/io.rs71
-rw-r--r--vendor/redox_syscall/src/io/mmio.rs168
-rw-r--r--vendor/redox_syscall/src/io/mod.rs15
-rw-r--r--vendor/redox_syscall/src/io/pio.rs90
5 files changed, 563 insertions, 0 deletions
diff --git a/vendor/redox_syscall/src/io/dma.rs b/vendor/redox_syscall/src/io/dma.rs
new file mode 100644
index 0000000..0613fc9
--- /dev/null
+++ b/vendor/redox_syscall/src/io/dma.rs
@@ -0,0 +1,219 @@
+use core::mem::{self, MaybeUninit};
+use core::ops::{Deref, DerefMut};
+use core::{ptr, slice};
+
+use crate::Result;
+use crate::{PartialAllocStrategy, PhysallocFlags, PhysmapFlags};
+use crate::arch::PAGE_SIZE;
+
+/// An RAII guard of a physical memory allocation. Currently all physically allocated memory are
+/// page-aligned and take up at least 4k of space (on x86_64).
+#[derive(Debug)]
+pub struct PhysBox {
+ address: usize,
+ size: usize
+}
+
+const fn round_up(x: usize) -> usize {
+ (x + PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE
+}
+fn assert_aligned(x: usize) {
+ assert_eq!(x % PAGE_SIZE, 0);
+}
+
+#[cfg(target_arch = "aarch64")]
+fn physmap_flags() -> PhysmapFlags {
+ // aarch64 currently must map DMA memory without caching to ensure coherence
+ crate::PHYSMAP_NO_CACHE | crate::PHYSMAP_WRITE
+}
+
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+fn physmap_flags() -> PhysmapFlags {
+ // x86 ensures cache coherence with DMA memory
+ crate::PHYSMAP_WRITE
+}
+
+impl PhysBox {
+ /// Construct a PhysBox from an address and a size. The address must be page-aligned, and the
+ /// size must similarly be a multiple of the page size.
+ ///
+ /// # Safety
+ /// This function is unsafe because when dropping, Self has to a valid allocation.
+ pub unsafe fn from_raw_parts(address: usize, size: usize) -> Self {
+ assert_aligned(address);
+ assert_aligned(size);
+
+ Self {
+ address,
+ size,
+ }
+ }
+
+ /// Retrieve the byte address in physical memory, of this allocation.
+ pub fn address(&self) -> usize {
+ self.address
+ }
+
+ /// Retrieve the size in bytes of the alloc.
+ pub fn size(&self) -> usize {
+ self.size
+ }
+
+ /// Allocate physical memory that must reside in 32-bit space.
+ pub fn new_in_32bit_space(size: usize) -> Result<Self> {
+ Self::new_with_flags(size, PhysallocFlags::SPACE_32)
+ }
+
+ pub fn new_with_flags(size: usize, flags: PhysallocFlags) -> Result<Self> {
+ assert!(!flags.contains(PhysallocFlags::PARTIAL_ALLOC));
+ assert_aligned(size);
+
+ let address = unsafe { crate::physalloc2(size, flags.bits())? };
+ Ok(unsafe { Self::from_raw_parts(address, size) })
+ }
+
+ /// "Partially" allocate physical memory, in the sense that the allocation may be smaller than
+ /// expected, but still with a minimum limit. This is particularly useful when the physical
+ /// memory space is fragmented, and a device supports scatter-gather I/O. In that case, the
+ /// driver can optimistically request e.g. 1 alloc of 1 MiB, with the minimum of 512 KiB. If
+ /// that first allocation only returns half the size, the driver can do another allocation
+ /// and then let the device use both buffers.
+ pub fn new_partial_allocation(size: usize, flags: PhysallocFlags, strategy: Option<PartialAllocStrategy>, mut min: usize) -> Result<Self> {
+ assert_aligned(size);
+ debug_assert!(!(flags.contains(PhysallocFlags::PARTIAL_ALLOC) && strategy.is_none()));
+
+ let address = unsafe { crate::physalloc3(size, flags.bits() | strategy.map_or(0, |s| s as usize), &mut min)? };
+ Ok(unsafe { Self::from_raw_parts(address, size) })
+ }
+
+ pub fn new(size: usize) -> Result<Self> {
+ assert_aligned(size);
+
+ let address = unsafe { crate::physalloc(size)? };
+ Ok(unsafe { Self::from_raw_parts(address, size) })
+ }
+}
+
+impl Drop for PhysBox {
+ fn drop(&mut self) {
+ let _ = unsafe { crate::physfree(self.address, self.size) };
+ }
+}
+
+pub struct Dma<T: ?Sized> {
+ phys: PhysBox,
+ virt: *mut T,
+}
+
+impl<T> Dma<T> {
+ pub fn from_physbox_uninit(phys: PhysBox) -> Result<Dma<MaybeUninit<T>>> {
+ let virt = unsafe { crate::physmap(phys.address, phys.size, physmap_flags())? } as *mut MaybeUninit<T>;
+
+ Ok(Dma {
+ phys,
+ virt,
+ })
+ }
+ pub fn from_physbox_zeroed(phys: PhysBox) -> Result<Dma<MaybeUninit<T>>> {
+ let this = Self::from_physbox_uninit(phys)?;
+ unsafe { ptr::write_bytes(this.virt as *mut MaybeUninit<u8>, 0, this.phys.size) }
+ Ok(this)
+ }
+
+ pub fn from_physbox(phys: PhysBox, value: T) -> Result<Self> {
+ let this = Self::from_physbox_uninit(phys)?;
+
+ Ok(unsafe {
+ ptr::write(this.virt, MaybeUninit::new(value));
+ this.assume_init()
+ })
+ }
+
+ pub fn new(value: T) -> Result<Self> {
+ let phys = PhysBox::new(round_up(mem::size_of::<T>()))?;
+ Self::from_physbox(phys, value)
+ }
+ pub fn zeroed() -> Result<Dma<MaybeUninit<T>>> {
+ let phys = PhysBox::new(round_up(mem::size_of::<T>()))?;
+ Self::from_physbox_zeroed(phys)
+ }
+}
+
+impl<T> Dma<MaybeUninit<T>> {
+ pub unsafe fn assume_init(self) -> Dma<T> {
+ let &Dma { phys: PhysBox { address, size }, virt } = &self;
+ mem::forget(self);
+
+ Dma {
+ phys: PhysBox { address, size },
+ virt: virt as *mut T,
+ }
+ }
+}
+impl<T: ?Sized> Dma<T> {
+ pub fn physical(&self) -> usize {
+ self.phys.address()
+ }
+ pub fn size(&self) -> usize {
+ self.phys.size()
+ }
+ pub fn phys(&self) -> &PhysBox {
+ &self.phys
+ }
+}
+
+impl<T> Dma<[T]> {
+ pub fn from_physbox_uninit_unsized(phys: PhysBox, len: usize) -> Result<Dma<[MaybeUninit<T>]>> {
+ let max_len = phys.size() / mem::size_of::<T>();
+ assert!(len <= max_len);
+
+ Ok(Dma {
+ virt: unsafe { slice::from_raw_parts_mut(crate::physmap(phys.address, phys.size, physmap_flags())? as *mut MaybeUninit<T>, len) } as *mut [MaybeUninit<T>],
+ phys,
+ })
+ }
+ pub fn from_physbox_zeroed_unsized(phys: PhysBox, len: usize) -> Result<Dma<[MaybeUninit<T>]>> {
+ let this = Self::from_physbox_uninit_unsized(phys, len)?;
+ unsafe { ptr::write_bytes(this.virt as *mut MaybeUninit<u8>, 0, this.phys.size()) }
+ Ok(this)
+ }
+ /// Creates a new DMA buffer with a size only known at runtime.
+ /// ## Safety
+ /// * `T` must be properly aligned.
+ /// * `T` must be valid as zeroed (i.e. no NonNull pointers).
+ pub unsafe fn zeroed_unsized(count: usize) -> Result<Self> {
+ let phys = PhysBox::new(round_up(mem::size_of::<T>() * count))?;
+ Ok(Self::from_physbox_zeroed_unsized(phys, count)?.assume_init())
+ }
+}
+impl<T> Dma<[MaybeUninit<T>]> {
+ pub unsafe fn assume_init(self) -> Dma<[T]> {
+ let &Dma { phys: PhysBox { address, size }, virt } = &self;
+ mem::forget(self);
+
+ Dma {
+ phys: PhysBox { address, size },
+ virt: virt as *mut [T],
+ }
+ }
+}
+
+impl<T: ?Sized> Deref for Dma<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ unsafe { &*self.virt }
+ }
+}
+
+impl<T: ?Sized> DerefMut for Dma<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.virt }
+ }
+}
+
+impl<T: ?Sized> Drop for Dma<T> {
+ fn drop(&mut self) {
+ unsafe { ptr::drop_in_place(self.virt) }
+ let _ = unsafe { crate::funmap(self.virt as *mut u8 as usize, self.phys.size) };
+ }
+}
diff --git a/vendor/redox_syscall/src/io/io.rs b/vendor/redox_syscall/src/io/io.rs
new file mode 100644
index 0000000..2c4acd3
--- /dev/null
+++ b/vendor/redox_syscall/src/io/io.rs
@@ -0,0 +1,71 @@
+use core::cmp::PartialEq;
+use core::ops::{BitAnd, BitOr, Not};
+
+pub trait Io {
+ type Value: Copy + PartialEq + BitAnd<Output = Self::Value> + BitOr<Output = Self::Value> + Not<Output = Self::Value>;
+
+ fn read(&self) -> Self::Value;
+ fn write(&mut self, value: Self::Value);
+
+ #[inline(always)]
+ fn readf(&self, flags: Self::Value) -> bool {
+ (self.read() & flags) as Self::Value == flags
+ }
+
+ #[inline(always)]
+ fn writef(&mut self, flags: Self::Value, value: bool) {
+ let tmp: Self::Value = match value {
+ true => self.read() | flags,
+ false => self.read() & !flags,
+ };
+ self.write(tmp);
+ }
+}
+
+pub struct ReadOnly<I> {
+ inner: I
+}
+
+impl<I> ReadOnly<I> {
+ pub const fn new(inner: I) -> ReadOnly<I> {
+ ReadOnly {
+ inner: inner
+ }
+ }
+}
+
+impl<I: Io> ReadOnly<I> {
+ #[inline(always)]
+ pub fn read(&self) -> I::Value {
+ self.inner.read()
+ }
+
+ #[inline(always)]
+ pub fn readf(&self, flags: I::Value) -> bool {
+ self.inner.readf(flags)
+ }
+}
+
+pub struct WriteOnly<I> {
+ inner: I
+}
+
+impl<I> WriteOnly<I> {
+ pub const fn new(inner: I) -> WriteOnly<I> {
+ WriteOnly {
+ inner: inner
+ }
+ }
+}
+
+impl<I: Io> WriteOnly<I> {
+ #[inline(always)]
+ pub fn write(&mut self, value: I::Value) {
+ self.inner.write(value)
+ }
+
+ #[inline(always)]
+ pub fn writef(&mut self, flags: I::Value, value: bool) {
+ self.inner.writef(flags, value)
+ }
+}
diff --git a/vendor/redox_syscall/src/io/mmio.rs b/vendor/redox_syscall/src/io/mmio.rs
new file mode 100644
index 0000000..ef8f603
--- /dev/null
+++ b/vendor/redox_syscall/src/io/mmio.rs
@@ -0,0 +1,168 @@
+use core::mem::MaybeUninit;
+use core::ptr;
+#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+use core::ops::{BitAnd, BitOr, Not};
+
+use super::io::Io;
+
+#[repr(packed)]
+pub struct Mmio<T> {
+ value: MaybeUninit<T>,
+}
+
+impl<T> Mmio<T> {
+ /// Create a new Mmio without initializing
+ #[deprecated = "unsound because it's possible to read even though it's uninitialized"]
+ pub fn new() -> Self {
+ unsafe { Self::uninit() }
+ }
+ pub unsafe fn zeroed() -> Self {
+ Self {
+ value: MaybeUninit::zeroed(),
+ }
+ }
+ pub unsafe fn uninit() -> Self {
+ Self {
+ value: MaybeUninit::uninit(),
+ }
+ }
+ pub const fn from(value: T) -> Self {
+ Self {
+ value: MaybeUninit::new(value),
+ }
+ }
+}
+
+// Generic implementation (WARNING: requires aligned pointers!)
+#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+impl<T> Io for Mmio<T> where T: Copy + PartialEq + BitAnd<Output = T> + BitOr<Output = T> + Not<Output = T> {
+ type Value = T;
+
+ fn read(&self) -> T {
+ unsafe { ptr::read_volatile(ptr::addr_of!(self.value).cast::<T>()) }
+ }
+
+ fn write(&mut self, value: T) {
+ unsafe { ptr::write_volatile(ptr::addr_of_mut!(self.value).cast::<T>(), value) };
+ }
+}
+
+// x86 u8 implementation
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+impl Io for Mmio<u8> {
+ type Value = u8;
+
+ fn read(&self) -> Self::Value {
+ unsafe {
+ let value: Self::Value;
+ let ptr: *const Self::Value = ptr::addr_of!(self.value).cast::<Self::Value>();
+ core::arch::asm!(
+ "mov {}, [{}]",
+ out(reg_byte) value,
+ in(reg) ptr
+ );
+ value
+ }
+ }
+
+ fn write(&mut self, value: Self::Value) {
+ unsafe {
+ let ptr: *mut Self::Value = ptr::addr_of_mut!(self.value).cast::<Self::Value>();
+ core::arch::asm!(
+ "mov [{}], {}",
+ in(reg) ptr,
+ in(reg_byte) value,
+ );
+ }
+ }
+}
+
+// x86 u16 implementation
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+impl Io for Mmio<u16> {
+ type Value = u16;
+
+ fn read(&self) -> Self::Value {
+ unsafe {
+ let value: Self::Value;
+ let ptr: *const Self::Value = ptr::addr_of!(self.value).cast::<Self::Value>();
+ core::arch::asm!(
+ "mov {:x}, [{}]",
+ out(reg) value,
+ in(reg) ptr
+ );
+ value
+ }
+ }
+
+ fn write(&mut self, value: Self::Value) {
+ unsafe {
+ let ptr: *mut Self::Value = ptr::addr_of_mut!(self.value).cast::<Self::Value>();
+ core::arch::asm!(
+ "mov [{}], {:x}",
+ in(reg) ptr,
+ in(reg) value,
+ );
+ }
+ }
+}
+
+// x86 u32 implementation
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+impl Io for Mmio<u32> {
+ type Value = u32;
+
+ fn read(&self) -> Self::Value {
+ unsafe {
+ let value: Self::Value;
+ let ptr: *const Self::Value = ptr::addr_of!(self.value).cast::<Self::Value>();
+ core::arch::asm!(
+ "mov {:e}, [{}]",
+ out(reg) value,
+ in(reg) ptr
+ );
+ value
+ }
+ }
+
+ fn write(&mut self, value: Self::Value) {
+ unsafe {
+ let ptr: *mut Self::Value = ptr::addr_of_mut!(self.value).cast::<Self::Value>();
+ core::arch::asm!(
+ "mov [{}], {:e}",
+ in(reg) ptr,
+ in(reg) value,
+ );
+ }
+ }
+}
+
+// x86 u64 implementation (x86_64 only)
+#[cfg(target_arch = "x86_64")]
+impl Io for Mmio<u64> {
+ type Value = u64;
+
+ fn read(&self) -> Self::Value {
+ unsafe {
+ let value: Self::Value;
+ let ptr: *const Self::Value = ptr::addr_of!(self.value).cast::<Self::Value>();
+ core::arch::asm!(
+ "mov {:r}, [{}]",
+ out(reg) value,
+ in(reg) ptr
+ );
+ value
+ }
+ }
+
+ fn write(&mut self, value: Self::Value) {
+ unsafe {
+ let ptr: *mut Self::Value = ptr::addr_of_mut!(self.value).cast::<Self::Value>();
+ core::arch::asm!(
+ "mov [{}], {:r}",
+ in(reg) ptr,
+ in(reg) value,
+ );
+ }
+ }
+}
diff --git a/vendor/redox_syscall/src/io/mod.rs b/vendor/redox_syscall/src/io/mod.rs
new file mode 100644
index 0000000..a225f06
--- /dev/null
+++ b/vendor/redox_syscall/src/io/mod.rs
@@ -0,0 +1,15 @@
+//! I/O functions
+
+pub use self::dma::*;
+pub use self::io::*;
+pub use self::mmio::*;
+
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+pub use self::pio::*;
+
+mod dma;
+mod io;
+mod mmio;
+
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+mod pio;
diff --git a/vendor/redox_syscall/src/io/pio.rs b/vendor/redox_syscall/src/io/pio.rs
new file mode 100644
index 0000000..8b837bc
--- /dev/null
+++ b/vendor/redox_syscall/src/io/pio.rs
@@ -0,0 +1,90 @@
+use core::arch::asm;
+use core::marker::PhantomData;
+
+use super::io::Io;
+
+/// Generic PIO
+#[derive(Copy, Clone)]
+pub struct Pio<T> {
+ port: u16,
+ value: PhantomData<T>,
+}
+
+impl<T> Pio<T> {
+ /// Create a PIO from a given port
+ pub const fn new(port: u16) -> Self {
+ Pio::<T> {
+ port,
+ value: PhantomData,
+ }
+ }
+}
+
+/// Read/Write for byte PIO
+impl Io for Pio<u8> {
+ type Value = u8;
+
+ /// Read
+ #[inline(always)]
+ fn read(&self) -> u8 {
+ let value: u8;
+ unsafe {
+ asm!("in al, dx", in("dx") self.port, out("al") value, options(nostack, nomem, preserves_flags));
+ }
+ value
+ }
+
+ /// Write
+ #[inline(always)]
+ fn write(&mut self, value: u8) {
+ unsafe {
+ asm!("out dx, al", in("dx") self.port, in("al") value, options(nostack, nomem, preserves_flags));
+ }
+ }
+}
+
+/// Read/Write for word PIO
+impl Io for Pio<u16> {
+ type Value = u16;
+
+ /// Read
+ #[inline(always)]
+ fn read(&self) -> u16 {
+ let value: u16;
+ unsafe {
+ asm!("in ax, dx", in("dx") self.port, out("ax") value, options(nostack, nomem, preserves_flags));
+ }
+ value
+ }
+
+ /// Write
+ #[inline(always)]
+ fn write(&mut self, value: u16) {
+ unsafe {
+ asm!("out dx, ax", in("dx") self.port, in("ax") value, options(nostack, nomem, preserves_flags));
+ }
+ }
+}
+
+/// Read/Write for doubleword PIO
+impl Io for Pio<u32> {
+ type Value = u32;
+
+ /// Read
+ #[inline(always)]
+ fn read(&self) -> u32 {
+ let value: u32;
+ unsafe {
+ asm!("in eax, dx", in("dx") self.port, out("eax") value, options(nostack, nomem, preserves_flags));
+ }
+ value
+ }
+
+ /// Write
+ #[inline(always)]
+ fn write(&mut self, value: u32) {
+ unsafe {
+ asm!("out dx, eax", in("dx") self.port, in("eax") value, options(nostack, nomem, preserves_flags));
+ }
+ }
+}