Skip to main content

kernel/
dma.rs

1// SPDX-License-Identifier: GPL-2.0
2
3//! Direct memory access (DMA).
4//!
5//! C header: [`include/linux/dma-mapping.h`](srctree/include/linux/dma-mapping.h)
6
7use crate::{
8    bindings, build_assert, device,
9    device::{Bound, Core},
10    error::{to_result, Result},
11    prelude::*,
12    sync::aref::ARef,
13    transmute::{AsBytes, FromBytes},
14};
15use core::ptr::NonNull;
16
17/// DMA address type.
18///
19/// Represents a bus address used for Direct Memory Access (DMA) operations.
20///
21/// This is an alias of the kernel's `dma_addr_t`, which may be `u32` or `u64` depending on
22/// `CONFIG_ARCH_DMA_ADDR_T_64BIT`.
23///
24/// Note that this may be `u64` even on 32-bit architectures.
25pub type DmaAddress = bindings::dma_addr_t;
26
27/// Trait to be implemented by DMA capable bus devices.
28///
29/// The [`dma::Device`](Device) trait should be implemented by bus specific device representations,
30/// where the underlying bus is DMA capable, such as:
31#[cfg_attr(CONFIG_PCI, doc = "* [`pci::Device`](kernel::pci::Device)")]
32/// * [`platform::Device`](::kernel::platform::Device)
33pub trait Device: AsRef<device::Device<Core>> {
34    /// Set up the device's DMA streaming addressing capabilities.
35    ///
36    /// This method is usually called once from `probe()` as soon as the device capabilities are
37    /// known.
38    ///
39    /// # Safety
40    ///
41    /// This method must not be called concurrently with any DMA allocation or mapping primitives,
42    /// such as [`CoherentAllocation::alloc_attrs`].
43    unsafe fn dma_set_mask(&self, mask: DmaMask) -> Result {
44        // SAFETY:
45        // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
46        // - The safety requirement of this function guarantees that there are no concurrent calls
47        //   to DMA allocation and mapping primitives using this mask.
48        to_result(unsafe { bindings::dma_set_mask(self.as_ref().as_raw(), mask.value()) })
49    }
50
51    /// Set up the device's DMA coherent addressing capabilities.
52    ///
53    /// This method is usually called once from `probe()` as soon as the device capabilities are
54    /// known.
55    ///
56    /// # Safety
57    ///
58    /// This method must not be called concurrently with any DMA allocation or mapping primitives,
59    /// such as [`CoherentAllocation::alloc_attrs`].
60    unsafe fn dma_set_coherent_mask(&self, mask: DmaMask) -> Result {
61        // SAFETY:
62        // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
63        // - The safety requirement of this function guarantees that there are no concurrent calls
64        //   to DMA allocation and mapping primitives using this mask.
65        to_result(unsafe { bindings::dma_set_coherent_mask(self.as_ref().as_raw(), mask.value()) })
66    }
67
68    /// Set up the device's DMA addressing capabilities.
69    ///
70    /// This is a combination of [`Device::dma_set_mask`] and [`Device::dma_set_coherent_mask`].
71    ///
72    /// This method is usually called once from `probe()` as soon as the device capabilities are
73    /// known.
74    ///
75    /// # Safety
76    ///
77    /// This method must not be called concurrently with any DMA allocation or mapping primitives,
78    /// such as [`CoherentAllocation::alloc_attrs`].
79    unsafe fn dma_set_mask_and_coherent(&self, mask: DmaMask) -> Result {
80        // SAFETY:
81        // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
82        // - The safety requirement of this function guarantees that there are no concurrent calls
83        //   to DMA allocation and mapping primitives using this mask.
84        to_result(unsafe {
85            bindings::dma_set_mask_and_coherent(self.as_ref().as_raw(), mask.value())
86        })
87    }
88
89    /// Set the maximum size of a single DMA segment the device may request.
90    ///
91    /// This method is usually called once from `probe()` as soon as the device capabilities are
92    /// known.
93    ///
94    /// # Safety
95    ///
96    /// This method must not be called concurrently with any DMA allocation or mapping primitives,
97    /// such as [`CoherentAllocation::alloc_attrs`].
98    unsafe fn dma_set_max_seg_size(&self, size: u32) {
99        // SAFETY:
100        // - By the type invariant of `device::Device`, `self.as_ref().as_raw()` is valid.
101        // - The safety requirement of this function guarantees that there are no concurrent calls
102        //   to DMA allocation and mapping primitives using this parameter.
103        unsafe { bindings::dma_set_max_seg_size(self.as_ref().as_raw(), size) }
104    }
105}
106
107/// A DMA mask that holds a bitmask with the lowest `n` bits set.
108///
109/// Use [`DmaMask::new`] or [`DmaMask::try_new`] to construct a value. Values
110/// are guaranteed to never exceed the bit width of `u64`.
111///
112/// This is the Rust equivalent of the C macro `DMA_BIT_MASK()`.
113#[derive(Debug, Clone, Copy, PartialEq, Eq)]
114pub struct DmaMask(u64);
115
116impl DmaMask {
117    /// Constructs a `DmaMask` with the lowest `n` bits set to `1`.
118    ///
119    /// For `n <= 64`, sets exactly the lowest `n` bits.
120    /// For `n > 64`, results in a build error.
121    ///
122    /// # Examples
123    ///
124    /// ```
125    /// use kernel::dma::DmaMask;
126    ///
127    /// let mask0 = DmaMask::new::<0>();
128    /// assert_eq!(mask0.value(), 0);
129    ///
130    /// let mask1 = DmaMask::new::<1>();
131    /// assert_eq!(mask1.value(), 0b1);
132    ///
133    /// let mask64 = DmaMask::new::<64>();
134    /// assert_eq!(mask64.value(), u64::MAX);
135    ///
136    /// // Build failure.
137    /// // let mask_overflow = DmaMask::new::<100>();
138    /// ```
139    #[inline]
140    pub const fn new<const N: u32>() -> Self {
141        let Ok(mask) = Self::try_new(N) else {
142            build_error!("Invalid DMA Mask.");
143        };
144
145        mask
146    }
147
148    /// Constructs a `DmaMask` with the lowest `n` bits set to `1`.
149    ///
150    /// For `n <= 64`, sets exactly the lowest `n` bits.
151    /// For `n > 64`, returns [`EINVAL`].
152    ///
153    /// # Examples
154    ///
155    /// ```
156    /// use kernel::dma::DmaMask;
157    ///
158    /// let mask0 = DmaMask::try_new(0)?;
159    /// assert_eq!(mask0.value(), 0);
160    ///
161    /// let mask1 = DmaMask::try_new(1)?;
162    /// assert_eq!(mask1.value(), 0b1);
163    ///
164    /// let mask64 = DmaMask::try_new(64)?;
165    /// assert_eq!(mask64.value(), u64::MAX);
166    ///
167    /// let mask_overflow = DmaMask::try_new(100);
168    /// assert!(mask_overflow.is_err());
169    /// # Ok::<(), Error>(())
170    /// ```
171    #[inline]
172    pub const fn try_new(n: u32) -> Result<Self> {
173        Ok(Self(match n {
174            0 => 0,
175            1..=64 => u64::MAX >> (64 - n),
176            _ => return Err(EINVAL),
177        }))
178    }
179
180    /// Returns the underlying `u64` bitmask value.
181    #[inline]
182    pub const fn value(&self) -> u64 {
183        self.0
184    }
185}
186
187/// Possible attributes associated with a DMA mapping.
188///
189/// They can be combined with the operators `|`, `&`, and `!`.
190///
191/// Values can be used from the [`attrs`] module.
192///
193/// # Examples
194///
195/// ```
196/// # use kernel::device::{Bound, Device};
197/// use kernel::dma::{attrs::*, CoherentAllocation};
198///
199/// # fn test(dev: &Device<Bound>) -> Result {
200/// let attribs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_WARN;
201/// let c: CoherentAllocation<u64> =
202///     CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, attribs)?;
203/// # Ok::<(), Error>(()) }
204/// ```
205#[derive(Clone, Copy, PartialEq)]
206#[repr(transparent)]
207pub struct Attrs(u32);
208
209impl Attrs {
210    /// Get the raw representation of this attribute.
211    pub(crate) fn as_raw(self) -> crate::ffi::c_ulong {
212        self.0 as crate::ffi::c_ulong
213    }
214
215    /// Check whether `flags` is contained in `self`.
216    pub fn contains(self, flags: Attrs) -> bool {
217        (self & flags) == flags
218    }
219}
220
221impl core::ops::BitOr for Attrs {
222    type Output = Self;
223    fn bitor(self, rhs: Self) -> Self::Output {
224        Self(self.0 | rhs.0)
225    }
226}
227
228impl core::ops::BitAnd for Attrs {
229    type Output = Self;
230    fn bitand(self, rhs: Self) -> Self::Output {
231        Self(self.0 & rhs.0)
232    }
233}
234
235impl core::ops::Not for Attrs {
236    type Output = Self;
237    fn not(self) -> Self::Output {
238        Self(!self.0)
239    }
240}
241
242/// DMA mapping attributes.
243pub mod attrs {
244    use super::Attrs;
245
246    /// Specifies that reads and writes to the mapping may be weakly ordered, that is that reads
247    /// and writes may pass each other.
248    pub const DMA_ATTR_WEAK_ORDERING: Attrs = Attrs(bindings::DMA_ATTR_WEAK_ORDERING);
249
250    /// Specifies that writes to the mapping may be buffered to improve performance.
251    pub const DMA_ATTR_WRITE_COMBINE: Attrs = Attrs(bindings::DMA_ATTR_WRITE_COMBINE);
252
253    /// Lets the platform to avoid creating a kernel virtual mapping for the allocated buffer.
254    pub const DMA_ATTR_NO_KERNEL_MAPPING: Attrs = Attrs(bindings::DMA_ATTR_NO_KERNEL_MAPPING);
255
256    /// Allows platform code to skip synchronization of the CPU cache for the given buffer assuming
257    /// that it has been already transferred to 'device' domain.
258    pub const DMA_ATTR_SKIP_CPU_SYNC: Attrs = Attrs(bindings::DMA_ATTR_SKIP_CPU_SYNC);
259
260    /// Forces contiguous allocation of the buffer in physical memory.
261    pub const DMA_ATTR_FORCE_CONTIGUOUS: Attrs = Attrs(bindings::DMA_ATTR_FORCE_CONTIGUOUS);
262
263    /// Hints DMA-mapping subsystem that it's probably not worth the time to try
264    /// to allocate memory to in a way that gives better TLB efficiency.
265    pub const DMA_ATTR_ALLOC_SINGLE_PAGES: Attrs = Attrs(bindings::DMA_ATTR_ALLOC_SINGLE_PAGES);
266
267    /// This tells the DMA-mapping subsystem to suppress allocation failure reports (similarly to
268    /// `__GFP_NOWARN`).
269    pub const DMA_ATTR_NO_WARN: Attrs = Attrs(bindings::DMA_ATTR_NO_WARN);
270
271    /// Indicates that the buffer is fully accessible at an elevated privilege level (and
272    /// ideally inaccessible or at least read-only at lesser-privileged levels).
273    pub const DMA_ATTR_PRIVILEGED: Attrs = Attrs(bindings::DMA_ATTR_PRIVILEGED);
274
275    /// Indicates that the buffer is MMIO memory.
276    pub const DMA_ATTR_MMIO: Attrs = Attrs(bindings::DMA_ATTR_MMIO);
277}
278
279/// DMA data direction.
280///
281/// Corresponds to the C [`enum dma_data_direction`].
282///
283/// [`enum dma_data_direction`]: srctree/include/linux/dma-direction.h
284#[derive(Copy, Clone, PartialEq, Eq, Debug)]
285#[repr(u32)]
286pub enum DataDirection {
287    /// The DMA mapping is for bidirectional data transfer.
288    ///
289    /// This is used when the buffer can be both read from and written to by the device.
290    /// The cache for the corresponding memory region is both flushed and invalidated.
291    Bidirectional = Self::const_cast(bindings::dma_data_direction_DMA_BIDIRECTIONAL),
292
293    /// The DMA mapping is for data transfer from memory to the device (write).
294    ///
295    /// The CPU has prepared data in the buffer, and the device will read it.
296    /// The cache for the corresponding memory region is flushed before device access.
297    ToDevice = Self::const_cast(bindings::dma_data_direction_DMA_TO_DEVICE),
298
299    /// The DMA mapping is for data transfer from the device to memory (read).
300    ///
301    /// The device will write data into the buffer for the CPU to read.
302    /// The cache for the corresponding memory region is invalidated before CPU access.
303    FromDevice = Self::const_cast(bindings::dma_data_direction_DMA_FROM_DEVICE),
304
305    /// The DMA mapping is not for data transfer.
306    ///
307    /// This is primarily for debugging purposes. With this direction, the DMA mapping API
308    /// will not perform any cache coherency operations.
309    None = Self::const_cast(bindings::dma_data_direction_DMA_NONE),
310}
311
312impl DataDirection {
313    /// Casts the bindgen-generated enum type to a `u32` at compile time.
314    ///
315    /// This function will cause a compile-time error if the underlying value of the
316    /// C enum is out of bounds for `u32`.
317    const fn const_cast(val: bindings::dma_data_direction) -> u32 {
318        // CAST: The C standard allows compilers to choose different integer types for enums.
319        // To safely check the value, we cast it to a wide signed integer type (`i128`)
320        // which can hold any standard C integer enum type without truncation.
321        let wide_val = val as i128;
322
323        // Check if the value is outside the valid range for the target type `u32`.
324        // CAST: `u32::MAX` is cast to `i128` to match the type of `wide_val` for the comparison.
325        if wide_val < 0 || wide_val > u32::MAX as i128 {
326            // Trigger a compile-time error in a const context.
327            build_error!("C enum value is out of bounds for the target type `u32`.");
328        }
329
330        // CAST: This cast is valid because the check above guarantees that `wide_val`
331        // is within the representable range of `u32`.
332        wide_val as u32
333    }
334}
335
336impl From<DataDirection> for bindings::dma_data_direction {
337    /// Returns the raw representation of [`enum dma_data_direction`].
338    fn from(direction: DataDirection) -> Self {
339        // CAST: `direction as u32` gets the underlying representation of our `#[repr(u32)]` enum.
340        // The subsequent cast to `Self` (the bindgen type) assumes the C enum is compatible
341        // with the enum variants of `DataDirection`, which is a valid assumption given our
342        // compile-time checks.
343        direction as u32 as Self
344    }
345}
346
347/// An abstraction of the `dma_alloc_coherent` API.
348///
349/// This is an abstraction around the `dma_alloc_coherent` API which is used to allocate and map
350/// large coherent DMA regions.
351///
352/// A [`CoherentAllocation`] instance contains a pointer to the allocated region (in the
353/// processor's virtual address space) and the device address which can be given to the device
354/// as the DMA address base of the region. The region is released once [`CoherentAllocation`]
355/// is dropped.
356///
357/// # Invariants
358///
359/// - For the lifetime of an instance of [`CoherentAllocation`], the `cpu_addr` is a valid pointer
360///   to an allocated region of coherent memory and `dma_handle` is the DMA address base of the
361///   region.
362/// - The size in bytes of the allocation is equal to `size_of::<T> * count`.
363/// - `size_of::<T> * count` fits into a `usize`.
364// TODO
365//
366// DMA allocations potentially carry device resources (e.g.IOMMU mappings), hence for soundness
367// reasons DMA allocation would need to be embedded in a `Devres` container, in order to ensure
368// that device resources can never survive device unbind.
369//
370// However, it is neither desirable nor necessary to protect the allocated memory of the DMA
371// allocation from surviving device unbind; it would require RCU read side critical sections to
372// access the memory, which may require subsequent unnecessary copies.
373//
374// Hence, find a way to revoke the device resources of a `CoherentAllocation`, but not the
375// entire `CoherentAllocation` including the allocated memory itself.
376pub struct CoherentAllocation<T: AsBytes + FromBytes> {
377    dev: ARef<device::Device>,
378    dma_handle: DmaAddress,
379    count: usize,
380    cpu_addr: NonNull<T>,
381    dma_attrs: Attrs,
382}
383
384impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
385    /// Allocates a region of `size_of::<T> * count` of coherent memory.
386    ///
387    /// # Examples
388    ///
389    /// ```
390    /// # use kernel::device::{Bound, Device};
391    /// use kernel::dma::{attrs::*, CoherentAllocation};
392    ///
393    /// # fn test(dev: &Device<Bound>) -> Result {
394    /// let c: CoherentAllocation<u64> =
395    ///     CoherentAllocation::alloc_attrs(dev, 4, GFP_KERNEL, DMA_ATTR_NO_WARN)?;
396    /// # Ok::<(), Error>(()) }
397    /// ```
398    pub fn alloc_attrs(
399        dev: &device::Device<Bound>,
400        count: usize,
401        gfp_flags: kernel::alloc::Flags,
402        dma_attrs: Attrs,
403    ) -> Result<CoherentAllocation<T>> {
404        build_assert!(
405            core::mem::size_of::<T>() > 0,
406            "It doesn't make sense for the allocated type to be a ZST"
407        );
408
409        let size = count
410            .checked_mul(core::mem::size_of::<T>())
411            .ok_or(EOVERFLOW)?;
412        let mut dma_handle = 0;
413        // SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
414        let addr = unsafe {
415            bindings::dma_alloc_attrs(
416                dev.as_raw(),
417                size,
418                &mut dma_handle,
419                gfp_flags.as_raw(),
420                dma_attrs.as_raw(),
421            )
422        };
423        let addr = NonNull::new(addr).ok_or(ENOMEM)?;
424        // INVARIANT:
425        // - We just successfully allocated a coherent region which is accessible for
426        //   `count` elements, hence the cpu address is valid. We also hold a refcounted reference
427        //   to the device.
428        // - The allocated `size` is equal to `size_of::<T> * count`.
429        // - The allocated `size` fits into a `usize`.
430        Ok(Self {
431            dev: dev.into(),
432            dma_handle,
433            count,
434            cpu_addr: addr.cast(),
435            dma_attrs,
436        })
437    }
438
439    /// Performs the same functionality as [`CoherentAllocation::alloc_attrs`], except the
440    /// `dma_attrs` is 0 by default.
441    pub fn alloc_coherent(
442        dev: &device::Device<Bound>,
443        count: usize,
444        gfp_flags: kernel::alloc::Flags,
445    ) -> Result<CoherentAllocation<T>> {
446        CoherentAllocation::alloc_attrs(dev, count, gfp_flags, Attrs(0))
447    }
448
449    /// Returns the number of elements `T` in this allocation.
450    ///
451    /// Note that this is not the size of the allocation in bytes, which is provided by
452    /// [`Self::size`].
453    pub fn count(&self) -> usize {
454        self.count
455    }
456
457    /// Returns the size in bytes of this allocation.
458    pub fn size(&self) -> usize {
459        // INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits into
460        // a `usize`.
461        self.count * core::mem::size_of::<T>()
462    }
463
464    /// Returns the raw pointer to the allocated region in the CPU's virtual address space.
465    #[inline]
466    pub fn as_ptr(&self) -> *const [T] {
467        core::ptr::slice_from_raw_parts(self.cpu_addr.as_ptr(), self.count)
468    }
469
470    /// Returns the raw pointer to the allocated region in the CPU's virtual address space as
471    /// a mutable pointer.
472    #[inline]
473    pub fn as_mut_ptr(&self) -> *mut [T] {
474        core::ptr::slice_from_raw_parts_mut(self.cpu_addr.as_ptr(), self.count)
475    }
476
477    /// Returns the base address to the allocated region in the CPU's virtual address space.
478    pub fn start_ptr(&self) -> *const T {
479        self.cpu_addr.as_ptr()
480    }
481
482    /// Returns the base address to the allocated region in the CPU's virtual address space as
483    /// a mutable pointer.
484    pub fn start_ptr_mut(&mut self) -> *mut T {
485        self.cpu_addr.as_ptr()
486    }
487
488    /// Returns a DMA handle which may be given to the device as the DMA address base of
489    /// the region.
490    pub fn dma_handle(&self) -> DmaAddress {
491        self.dma_handle
492    }
493
494    /// Returns a DMA handle starting at `offset` (in units of `T`) which may be given to the
495    /// device as the DMA address base of the region.
496    ///
497    /// Returns `EINVAL` if `offset` is not within the bounds of the allocation.
498    pub fn dma_handle_with_offset(&self, offset: usize) -> Result<DmaAddress> {
499        if offset >= self.count {
500            Err(EINVAL)
501        } else {
502            // INVARIANT: The type invariant of `Self` guarantees that `size_of::<T> * count` fits
503            // into a `usize`, and `offset` is inferior to `count`.
504            Ok(self.dma_handle + (offset * core::mem::size_of::<T>()) as DmaAddress)
505        }
506    }
507
508    /// Common helper to validate a range applied from the allocated region in the CPU's virtual
509    /// address space.
510    fn validate_range(&self, offset: usize, count: usize) -> Result {
511        if offset.checked_add(count).ok_or(EOVERFLOW)? > self.count {
512            return Err(EINVAL);
513        }
514        Ok(())
515    }
516
517    /// Returns the data from the region starting from `offset` as a slice.
518    /// `offset` and `count` are in units of `T`, not the number of bytes.
519    ///
520    /// For ringbuffer type of r/w access or use-cases where the pointer to the live data is needed,
521    /// [`CoherentAllocation::start_ptr`] or [`CoherentAllocation::start_ptr_mut`] could be used
522    /// instead.
523    ///
524    /// # Safety
525    ///
526    /// * Callers must ensure that the device does not read/write to/from memory while the returned
527    ///   slice is live.
528    /// * Callers must ensure that this call does not race with a write to the same region while
529    ///   the returned slice is live.
530    pub unsafe fn as_slice(&self, offset: usize, count: usize) -> Result<&[T]> {
531        self.validate_range(offset, count)?;
532        // SAFETY:
533        // - The pointer is valid due to type invariant on `CoherentAllocation`,
534        //   we've just checked that the range and index is within bounds. The immutability of the
535        //   data is also guaranteed by the safety requirements of the function.
536        // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
537        //   that `self.count` won't overflow early in the constructor.
538        Ok(unsafe { core::slice::from_raw_parts(self.start_ptr().add(offset), count) })
539    }
540
541    /// Performs the same functionality as [`CoherentAllocation::as_slice`], except that a mutable
542    /// slice is returned.
543    ///
544    /// # Safety
545    ///
546    /// * Callers must ensure that the device does not read/write to/from memory while the returned
547    ///   slice is live.
548    /// * Callers must ensure that this call does not race with a read or write to the same region
549    ///   while the returned slice is live.
550    pub unsafe fn as_slice_mut(&mut self, offset: usize, count: usize) -> Result<&mut [T]> {
551        self.validate_range(offset, count)?;
552        // SAFETY:
553        // - The pointer is valid due to type invariant on `CoherentAllocation`,
554        //   we've just checked that the range and index is within bounds. The immutability of the
555        //   data is also guaranteed by the safety requirements of the function.
556        // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
557        //   that `self.count` won't overflow early in the constructor.
558        Ok(unsafe { core::slice::from_raw_parts_mut(self.start_ptr_mut().add(offset), count) })
559    }
560
561    /// Writes data to the region starting from `offset`. `offset` is in units of `T`, not the
562    /// number of bytes.
563    ///
564    /// # Safety
565    ///
566    /// * Callers must ensure that this call does not race with a read or write to the same region
567    ///   that overlaps with this write.
568    ///
569    /// # Examples
570    ///
571    /// ```
572    /// # fn test(alloc: &mut kernel::dma::CoherentAllocation<u8>) -> Result {
573    /// let somedata: [u8; 4] = [0xf; 4];
574    /// let buf: &[u8] = &somedata;
575    /// // SAFETY: There is no concurrent HW operation on the device and no other R/W access to the
576    /// // region.
577    /// unsafe { alloc.write(buf, 0)?; }
578    /// # Ok::<(), Error>(()) }
579    /// ```
580    pub unsafe fn write(&mut self, src: &[T], offset: usize) -> Result {
581        self.validate_range(offset, src.len())?;
582        // SAFETY:
583        // - The pointer is valid due to type invariant on `CoherentAllocation`
584        //   and we've just checked that the range and index is within bounds.
585        // - `offset + count` can't overflow since it is smaller than `self.count` and we've checked
586        //   that `self.count` won't overflow early in the constructor.
587        unsafe {
588            core::ptr::copy_nonoverlapping(
589                src.as_ptr(),
590                self.start_ptr_mut().add(offset),
591                src.len(),
592            )
593        };
594        Ok(())
595    }
596
597    /// Reads the value of `field` and ensures that its type is [`FromBytes`].
598    ///
599    /// # Safety
600    ///
601    /// This must be called from the [`dma_read`] macro which ensures that the `field` pointer is
602    /// validated beforehand.
603    ///
604    /// Public but hidden since it should only be used from [`dma_read`] macro.
605    #[doc(hidden)]
606    pub unsafe fn field_read<F: FromBytes>(&self, field: *const F) -> F {
607        // SAFETY:
608        // - By the safety requirements field is valid.
609        // - Using read_volatile() here is not sound as per the usual rules, the usage here is
610        // a special exception with the following notes in place. When dealing with a potential
611        // race from a hardware or code outside kernel (e.g. user-space program), we need that
612        // read on a valid memory is not UB. Currently read_volatile() is used for this, and the
613        // rationale behind is that it should generate the same code as READ_ONCE() which the
614        // kernel already relies on to avoid UB on data races. Note that the usage of
615        // read_volatile() is limited to this particular case, it cannot be used to prevent
616        // the UB caused by racing between two kernel functions nor do they provide atomicity.
617        unsafe { field.read_volatile() }
618    }
619
620    /// Writes a value to `field` and ensures that its type is [`AsBytes`].
621    ///
622    /// # Safety
623    ///
624    /// This must be called from the [`dma_write`] macro which ensures that the `field` pointer is
625    /// validated beforehand.
626    ///
627    /// Public but hidden since it should only be used from [`dma_write`] macro.
628    #[doc(hidden)]
629    pub unsafe fn field_write<F: AsBytes>(&self, field: *mut F, val: F) {
630        // SAFETY:
631        // - By the safety requirements field is valid.
632        // - Using write_volatile() here is not sound as per the usual rules, the usage here is
633        // a special exception with the following notes in place. When dealing with a potential
634        // race from a hardware or code outside kernel (e.g. user-space program), we need that
635        // write on a valid memory is not UB. Currently write_volatile() is used for this, and the
636        // rationale behind is that it should generate the same code as WRITE_ONCE() which the
637        // kernel already relies on to avoid UB on data races. Note that the usage of
638        // write_volatile() is limited to this particular case, it cannot be used to prevent
639        // the UB caused by racing between two kernel functions nor do they provide atomicity.
640        unsafe { field.write_volatile(val) }
641    }
642}
643
644/// Note that the device configured to do DMA must be halted before this object is dropped.
645impl<T: AsBytes + FromBytes> Drop for CoherentAllocation<T> {
646    fn drop(&mut self) {
647        let size = self.count * core::mem::size_of::<T>();
648        // SAFETY: Device pointer is guaranteed as valid by the type invariant on `Device`.
649        // The cpu address, and the dma handle are valid due to the type invariants on
650        // `CoherentAllocation`.
651        unsafe {
652            bindings::dma_free_attrs(
653                self.dev.as_raw(),
654                size,
655                self.start_ptr_mut().cast(),
656                self.dma_handle,
657                self.dma_attrs.as_raw(),
658            )
659        }
660    }
661}
662
663// SAFETY: It is safe to send a `CoherentAllocation` to another thread if `T`
664// can be sent to another thread.
665unsafe impl<T: AsBytes + FromBytes + Send> Send for CoherentAllocation<T> {}
666
667/// Reads a field of an item from an allocated region of structs.
668///
669/// The syntax is of the form `kernel::dma_read!(dma, proj)` where `dma` is an expression evaluating
670/// to a [`CoherentAllocation`] and `proj` is a [projection specification](kernel::ptr::project!).
671///
672/// # Examples
673///
674/// ```
675/// use kernel::device::Device;
676/// use kernel::dma::{attrs::*, CoherentAllocation};
677///
678/// struct MyStruct { field: u32, }
679///
680/// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
681/// unsafe impl kernel::transmute::FromBytes for MyStruct{};
682/// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
683/// unsafe impl kernel::transmute::AsBytes for MyStruct{};
684///
685/// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
686/// let whole = kernel::dma_read!(alloc, [2]?);
687/// let field = kernel::dma_read!(alloc, [1]?.field);
688/// # Ok::<(), Error>(()) }
689/// ```
690#[macro_export]
691macro_rules! dma_read {
692    ($dma:expr, $($proj:tt)*) => {{
693        let dma = &$dma;
694        let ptr = $crate::ptr::project!(
695            $crate::dma::CoherentAllocation::as_ptr(dma), $($proj)*
696        );
697        // SAFETY: The pointer created by the projection is within the DMA region.
698        unsafe { $crate::dma::CoherentAllocation::field_read(dma, ptr) }
699    }};
700}
701
702/// Writes to a field of an item from an allocated region of structs.
703///
704/// The syntax is of the form `kernel::dma_write!(dma, proj, val)` where `dma` is an expression
705/// evaluating to a [`CoherentAllocation`], `proj` is a
706/// [projection specification](kernel::ptr::project!), and `val` is the value to be written to the
707/// projected location.
708///
709/// # Examples
710///
711/// ```
712/// use kernel::device::Device;
713/// use kernel::dma::{attrs::*, CoherentAllocation};
714///
715/// struct MyStruct { member: u32, }
716///
717/// // SAFETY: All bit patterns are acceptable values for `MyStruct`.
718/// unsafe impl kernel::transmute::FromBytes for MyStruct{};
719/// // SAFETY: Instances of `MyStruct` have no uninitialized portions.
720/// unsafe impl kernel::transmute::AsBytes for MyStruct{};
721///
722/// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
723/// kernel::dma_write!(alloc, [2]?.member, 0xf);
724/// kernel::dma_write!(alloc, [1]?, MyStruct { member: 0xf });
725/// # Ok::<(), Error>(()) }
726/// ```
727#[macro_export]
728macro_rules! dma_write {
729    (@parse [$dma:expr] [$($proj:tt)*] [, $val:expr]) => {{
730        let dma = &$dma;
731        let ptr = $crate::ptr::project!(
732            mut $crate::dma::CoherentAllocation::as_mut_ptr(dma), $($proj)*
733        );
734        let val = $val;
735        // SAFETY: The pointer created by the projection is within the DMA region.
736        unsafe { $crate::dma::CoherentAllocation::field_write(dma, ptr, val) }
737    }};
738    (@parse [$dma:expr] [$($proj:tt)*] [.$field:tt $($rest:tt)*]) => {
739        $crate::dma_write!(@parse [$dma] [$($proj)* .$field] [$($rest)*])
740    };
741    (@parse [$dma:expr] [$($proj:tt)*] [[$index:expr]? $($rest:tt)*]) => {
742        $crate::dma_write!(@parse [$dma] [$($proj)* [$index]?] [$($rest)*])
743    };
744    (@parse [$dma:expr] [$($proj:tt)*] [[$index:expr] $($rest:tt)*]) => {
745        $crate::dma_write!(@parse [$dma] [$($proj)* [$index]] [$($rest)*])
746    };
747    ($dma:expr, $($rest:tt)*) => {
748        $crate::dma_write!(@parse [$dma] [] [$($rest)*])
749    };
750}