kernel/
io.rs

1// SPDX-License-Identifier: GPL-2.0
2
3//! Memory-mapped IO.
4//!
5//! C header: [`include/asm-generic/io.h`](srctree/include/asm-generic/io.h)
6
7use crate::{
8    bindings,
9    prelude::*, //
10};
11
12pub mod mem;
13pub mod poll;
14pub mod resource;
15
16pub use resource::Resource;
17
18/// Physical address type.
19///
20/// This is a type alias to either `u32` or `u64` depending on the config option
21/// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
22pub type PhysAddr = bindings::phys_addr_t;
23
24/// Resource Size type.
25///
26/// This is a type alias to either `u32` or `u64` depending on the config option
27/// `CONFIG_PHYS_ADDR_T_64BIT`, and it can be a u64 even on 32-bit architectures.
28pub type ResourceSize = bindings::resource_size_t;
29
30/// Raw representation of an MMIO region.
31///
32/// By itself, the existence of an instance of this structure does not provide any guarantees that
33/// the represented MMIO region does exist or is properly mapped.
34///
35/// Instead, the bus specific MMIO implementation must convert this raw representation into an `Io`
36/// instance providing the actual memory accessors. Only by the conversion into an `Io` structure
37/// any guarantees are given.
38pub struct IoRaw<const SIZE: usize = 0> {
39    addr: usize,
40    maxsize: usize,
41}
42
43impl<const SIZE: usize> IoRaw<SIZE> {
44    /// Returns a new `IoRaw` instance on success, an error otherwise.
45    pub fn new(addr: usize, maxsize: usize) -> Result<Self> {
46        if maxsize < SIZE {
47            return Err(EINVAL);
48        }
49
50        Ok(Self { addr, maxsize })
51    }
52
53    /// Returns the base address of the MMIO region.
54    #[inline]
55    pub fn addr(&self) -> usize {
56        self.addr
57    }
58
59    /// Returns the maximum size of the MMIO region.
60    #[inline]
61    pub fn maxsize(&self) -> usize {
62        self.maxsize
63    }
64}
65
66/// IO-mapped memory region.
67///
68/// The creator (usually a subsystem / bus such as PCI) is responsible for creating the
69/// mapping, performing an additional region request etc.
70///
71/// # Invariant
72///
73/// `addr` is the start and `maxsize` the length of valid I/O mapped memory region of size
74/// `maxsize`.
75///
76/// # Examples
77///
78/// ```no_run
79/// use kernel::{
80///     bindings,
81///     ffi::c_void,
82///     io::{
83///         Io,
84///         IoRaw,
85///         PhysAddr,
86///     },
87/// };
88/// use core::ops::Deref;
89///
90/// // See also `pci::Bar` for a real example.
91/// struct IoMem<const SIZE: usize>(IoRaw<SIZE>);
92///
93/// impl<const SIZE: usize> IoMem<SIZE> {
94///     /// # Safety
95///     ///
96///     /// [`paddr`, `paddr` + `SIZE`) must be a valid MMIO region that is mappable into the CPUs
97///     /// virtual address space.
98///     unsafe fn new(paddr: usize) -> Result<Self>{
99///         // SAFETY: By the safety requirements of this function [`paddr`, `paddr` + `SIZE`) is
100///         // valid for `ioremap`.
101///         let addr = unsafe { bindings::ioremap(paddr as PhysAddr, SIZE) };
102///         if addr.is_null() {
103///             return Err(ENOMEM);
104///         }
105///
106///         Ok(IoMem(IoRaw::new(addr as usize, SIZE)?))
107///     }
108/// }
109///
110/// impl<const SIZE: usize> Drop for IoMem<SIZE> {
111///     fn drop(&mut self) {
112///         // SAFETY: `self.0.addr()` is guaranteed to be properly mapped by `Self::new`.
113///         unsafe { bindings::iounmap(self.0.addr() as *mut c_void); };
114///     }
115/// }
116///
117/// impl<const SIZE: usize> Deref for IoMem<SIZE> {
118///    type Target = Io<SIZE>;
119///
120///    fn deref(&self) -> &Self::Target {
121///         // SAFETY: The memory range stored in `self` has been properly mapped in `Self::new`.
122///         unsafe { Io::from_raw(&self.0) }
123///    }
124/// }
125///
126///# fn no_run() -> Result<(), Error> {
127/// // SAFETY: Invalid usage for example purposes.
128/// let iomem = unsafe { IoMem::<{ core::mem::size_of::<u32>() }>::new(0xBAAAAAAD)? };
129/// iomem.write32(0x42, 0x0);
130/// assert!(iomem.try_write32(0x42, 0x0).is_ok());
131/// assert!(iomem.try_write32(0x42, 0x4).is_err());
132/// # Ok(())
133/// # }
134/// ```
135#[repr(transparent)]
136pub struct Io<const SIZE: usize = 0>(IoRaw<SIZE>);
137
138macro_rules! define_read {
139    ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident -> $type_name:ty) => {
140        /// Read IO data from a given offset known at compile time.
141        ///
142        /// Bound checks are performed on compile time, hence if the offset is not known at compile
143        /// time, the build will fail.
144        $(#[$attr])*
145        // Always inline to optimize out error path of `io_addr_assert`.
146        #[inline(always)]
147        pub fn $name(&self, offset: usize) -> $type_name {
148            let addr = self.io_addr_assert::<$type_name>(offset);
149
150            // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
151            unsafe { bindings::$c_fn(addr as *const c_void) }
152        }
153
154        /// Read IO data from a given offset.
155        ///
156        /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
157        /// out of bounds.
158        $(#[$attr])*
159        pub fn $try_name(&self, offset: usize) -> Result<$type_name> {
160            let addr = self.io_addr::<$type_name>(offset)?;
161
162            // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
163            Ok(unsafe { bindings::$c_fn(addr as *const c_void) })
164        }
165    };
166}
167
168macro_rules! define_write {
169    ($(#[$attr:meta])* $name:ident, $try_name:ident, $c_fn:ident <- $type_name:ty) => {
170        /// Write IO data from a given offset known at compile time.
171        ///
172        /// Bound checks are performed on compile time, hence if the offset is not known at compile
173        /// time, the build will fail.
174        $(#[$attr])*
175        // Always inline to optimize out error path of `io_addr_assert`.
176        #[inline(always)]
177        pub fn $name(&self, value: $type_name, offset: usize) {
178            let addr = self.io_addr_assert::<$type_name>(offset);
179
180            // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
181            unsafe { bindings::$c_fn(value, addr as *mut c_void) }
182        }
183
184        /// Write IO data from a given offset.
185        ///
186        /// Bound checks are performed on runtime, it fails if the offset (plus the type size) is
187        /// out of bounds.
188        $(#[$attr])*
189        pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {
190            let addr = self.io_addr::<$type_name>(offset)?;
191
192            // SAFETY: By the type invariant `addr` is a valid address for MMIO operations.
193            unsafe { bindings::$c_fn(value, addr as *mut c_void) }
194            Ok(())
195        }
196    };
197}
198
199impl<const SIZE: usize> Io<SIZE> {
200    /// Converts an `IoRaw` into an `Io` instance, providing the accessors to the MMIO mapping.
201    ///
202    /// # Safety
203    ///
204    /// Callers must ensure that `addr` is the start of a valid I/O mapped memory region of size
205    /// `maxsize`.
206    pub unsafe fn from_raw(raw: &IoRaw<SIZE>) -> &Self {
207        // SAFETY: `Io` is a transparent wrapper around `IoRaw`.
208        unsafe { &*core::ptr::from_ref(raw).cast() }
209    }
210
211    /// Returns the base address of this mapping.
212    #[inline]
213    pub fn addr(&self) -> usize {
214        self.0.addr()
215    }
216
217    /// Returns the maximum size of this mapping.
218    #[inline]
219    pub fn maxsize(&self) -> usize {
220        self.0.maxsize()
221    }
222
223    #[inline]
224    const fn offset_valid<U>(offset: usize, size: usize) -> bool {
225        let type_size = core::mem::size_of::<U>();
226        if let Some(end) = offset.checked_add(type_size) {
227            end <= size && offset % type_size == 0
228        } else {
229            false
230        }
231    }
232
233    #[inline]
234    fn io_addr<U>(&self, offset: usize) -> Result<usize> {
235        if !Self::offset_valid::<U>(offset, self.maxsize()) {
236            return Err(EINVAL);
237        }
238
239        // Probably no need to check, since the safety requirements of `Self::new` guarantee that
240        // this can't overflow.
241        self.addr().checked_add(offset).ok_or(EINVAL)
242    }
243
244    // Always inline to optimize out error path of `build_assert`.
245    #[inline(always)]
246    fn io_addr_assert<U>(&self, offset: usize) -> usize {
247        build_assert!(Self::offset_valid::<U>(offset, SIZE));
248
249        self.addr() + offset
250    }
251
252    define_read!(read8, try_read8, readb -> u8);
253    define_read!(read16, try_read16, readw -> u16);
254    define_read!(read32, try_read32, readl -> u32);
255    define_read!(
256        #[cfg(CONFIG_64BIT)]
257        read64,
258        try_read64,
259        readq -> u64
260    );
261
262    define_read!(read8_relaxed, try_read8_relaxed, readb_relaxed -> u8);
263    define_read!(read16_relaxed, try_read16_relaxed, readw_relaxed -> u16);
264    define_read!(read32_relaxed, try_read32_relaxed, readl_relaxed -> u32);
265    define_read!(
266        #[cfg(CONFIG_64BIT)]
267        read64_relaxed,
268        try_read64_relaxed,
269        readq_relaxed -> u64
270    );
271
272    define_write!(write8, try_write8, writeb <- u8);
273    define_write!(write16, try_write16, writew <- u16);
274    define_write!(write32, try_write32, writel <- u32);
275    define_write!(
276        #[cfg(CONFIG_64BIT)]
277        write64,
278        try_write64,
279        writeq <- u64
280    );
281
282    define_write!(write8_relaxed, try_write8_relaxed, writeb_relaxed <- u8);
283    define_write!(write16_relaxed, try_write16_relaxed, writew_relaxed <- u16);
284    define_write!(write32_relaxed, try_write32_relaxed, writel_relaxed <- u32);
285    define_write!(
286        #[cfg(CONFIG_64BIT)]
287        write64_relaxed,
288        try_write64_relaxed,
289        writeq_relaxed <- u64
290    );
291}