kernel/uaccess.rs
1// SPDX-License-Identifier: GPL-2.0
2
3//! Slices to user space memory regions.
4//!
5//! C header: [`include/linux/uaccess.h`](srctree/include/linux/uaccess.h)
6
7use crate::{
8 alloc::{Allocator, Flags},
9 bindings,
10 dma::Coherent,
11 error::Result,
12 ffi::{c_char, c_void},
13 fs::file,
14 prelude::*,
15 transmute::{AsBytes, FromBytes},
16};
17use core::mem::{size_of, MaybeUninit};
18
19/// A pointer into userspace.
20///
21/// This is the Rust equivalent to C pointers tagged with `__user`.
22#[repr(transparent)]
23#[derive(Copy, Clone)]
24pub struct UserPtr(*mut c_void);
25
26impl UserPtr {
27 /// Create a `UserPtr` from an integer representing the userspace address.
28 #[inline]
29 pub fn from_addr(addr: usize) -> Self {
30 Self(addr as *mut c_void)
31 }
32
33 /// Create a `UserPtr` from a pointer representing the userspace address.
34 #[inline]
35 pub fn from_ptr(addr: *mut c_void) -> Self {
36 Self(addr)
37 }
38
39 /// Cast this userspace pointer to a raw const void pointer.
40 ///
41 /// It is up to the caller to use the returned pointer correctly.
42 #[inline]
43 pub fn as_const_ptr(self) -> *const c_void {
44 self.0
45 }
46
47 /// Cast this userspace pointer to a raw mutable void pointer.
48 ///
49 /// It is up to the caller to use the returned pointer correctly.
50 #[inline]
51 pub fn as_mut_ptr(self) -> *mut c_void {
52 self.0
53 }
54
55 /// Increment this user pointer by `add` bytes.
56 ///
57 /// This addition is wrapping, so wrapping around the address space does not result in a panic
58 /// even if `CONFIG_RUST_OVERFLOW_CHECKS` is enabled.
59 #[inline]
60 pub fn wrapping_byte_add(self, add: usize) -> UserPtr {
61 UserPtr(self.0.wrapping_byte_add(add))
62 }
63}
64
65/// A pointer to an area in userspace memory, which can be either read-only or read-write.
66///
67/// All methods on this struct are safe: attempting to read or write on bad addresses (either out of
68/// the bound of the slice or unmapped addresses) will return [`EFAULT`]. Concurrent access,
69/// *including data races to/from userspace memory*, is permitted, because fundamentally another
70/// userspace thread/process could always be modifying memory at the same time (in the same way that
71/// userspace Rust's [`std::io`] permits data races with the contents of files on disk). In the
72/// presence of a race, the exact byte values read/written are unspecified but the operation is
73/// well-defined. Kernelspace code should validate its copy of data after completing a read, and not
74/// expect that multiple reads of the same address will return the same value.
75///
76/// These APIs are designed to make it difficult to accidentally write TOCTOU (time-of-check to
77/// time-of-use) bugs. Every time a memory location is read, the reader's position is advanced by
78/// the read length and the next read will start from there. This helps prevent accidentally reading
79/// the same location twice and causing a TOCTOU bug.
80///
81/// Creating a [`UserSliceReader`] and/or [`UserSliceWriter`] consumes the `UserSlice`, helping
82/// ensure that there aren't multiple readers or writers to the same location.
83///
84/// If double-fetching a memory location is necessary for some reason, then that is done by creating
85/// multiple readers to the same memory location, e.g. using [`clone_reader`].
86///
87/// # Examples
88///
89/// Takes a region of userspace memory from the current process, and modify it by adding one to
90/// every byte in the region.
91///
92/// ```no_run
93/// use kernel::ffi::c_void;
94/// use kernel::uaccess::{UserPtr, UserSlice};
95///
96/// fn bytes_add_one(uptr: UserPtr, len: usize) -> Result {
97/// let (read, mut write) = UserSlice::new(uptr, len).reader_writer();
98///
99/// let mut buf = KVec::new();
100/// read.read_all(&mut buf, GFP_KERNEL)?;
101///
102/// for b in &mut buf {
103/// *b = b.wrapping_add(1);
104/// }
105///
106/// write.write_slice(&buf)?;
107/// Ok(())
108/// }
109/// ```
110///
111/// Example illustrating a TOCTOU (time-of-check to time-of-use) bug.
112///
113/// ```no_run
114/// use kernel::ffi::c_void;
115/// use kernel::uaccess::{UserPtr, UserSlice};
116///
117/// /// Returns whether the data in this region is valid.
118/// fn is_valid(uptr: UserPtr, len: usize) -> Result<bool> {
119/// let read = UserSlice::new(uptr, len).reader();
120///
121/// let mut buf = KVec::new();
122/// read.read_all(&mut buf, GFP_KERNEL)?;
123///
124/// todo!()
125/// }
126///
127/// /// Returns the bytes behind this user pointer if they are valid.
128/// fn get_bytes_if_valid(uptr: UserPtr, len: usize) -> Result<KVec<u8>> {
129/// if !is_valid(uptr, len)? {
130/// return Err(EINVAL);
131/// }
132///
133/// let read = UserSlice::new(uptr, len).reader();
134///
135/// let mut buf = KVec::new();
136/// read.read_all(&mut buf, GFP_KERNEL)?;
137///
138/// // THIS IS A BUG! The bytes could have changed since we checked them.
139/// //
140/// // To avoid this kind of bug, don't call `UserSlice::new` multiple
141/// // times with the same address.
142/// Ok(buf)
143/// }
144/// ```
145///
146/// [`std::io`]: https://doc.rust-lang.org/std/io/index.html
147/// [`clone_reader`]: UserSliceReader::clone_reader
148pub struct UserSlice {
149 ptr: UserPtr,
150 length: usize,
151}
152
153impl UserSlice {
154 /// Constructs a user slice from a raw pointer and a length in bytes.
155 ///
156 /// Constructing a [`UserSlice`] performs no checks on the provided address and length, it can
157 /// safely be constructed inside a kernel thread with no current userspace process. Reads and
158 /// writes wrap the kernel APIs `copy_from_user` and `copy_to_user`, which check the memory map
159 /// of the current process and enforce that the address range is within the user range (no
160 /// additional calls to `access_ok` are needed). Validity of the pointer is checked when you
161 /// attempt to read or write, not in the call to `UserSlice::new`.
162 ///
163 /// Callers must be careful to avoid time-of-check-time-of-use (TOCTOU) issues. The simplest way
164 /// is to create a single instance of [`UserSlice`] per user memory block as it reads each byte
165 /// at most once.
166 pub fn new(ptr: UserPtr, length: usize) -> Self {
167 UserSlice { ptr, length }
168 }
169
170 /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
171 ///
172 /// Fails with [`EFAULT`] if the read happens on a bad address.
173 pub fn read_all<A: Allocator>(self, buf: &mut Vec<u8, A>, flags: Flags) -> Result {
174 self.reader().read_all(buf, flags)
175 }
176
177 /// Constructs a [`UserSliceReader`].
178 pub fn reader(self) -> UserSliceReader {
179 UserSliceReader {
180 ptr: self.ptr,
181 length: self.length,
182 }
183 }
184
185 /// Constructs a [`UserSliceWriter`].
186 pub fn writer(self) -> UserSliceWriter {
187 UserSliceWriter {
188 ptr: self.ptr,
189 length: self.length,
190 }
191 }
192
193 /// Constructs both a [`UserSliceReader`] and a [`UserSliceWriter`].
194 ///
195 /// Usually when this is used, you will first read the data, and then overwrite it afterwards.
196 pub fn reader_writer(self) -> (UserSliceReader, UserSliceWriter) {
197 (
198 UserSliceReader {
199 ptr: self.ptr,
200 length: self.length,
201 },
202 UserSliceWriter {
203 ptr: self.ptr,
204 length: self.length,
205 },
206 )
207 }
208}
209
210/// A reader for [`UserSlice`].
211///
212/// Used to incrementally read from the user slice.
213pub struct UserSliceReader {
214 ptr: UserPtr,
215 length: usize,
216}
217
218impl UserSliceReader {
219 /// Skip the provided number of bytes.
220 ///
221 /// Returns an error if skipping more than the length of the buffer.
222 pub fn skip(&mut self, num_skip: usize) -> Result {
223 // Update `self.length` first since that's the fallible part of this operation.
224 self.length = self.length.checked_sub(num_skip).ok_or(EFAULT)?;
225 self.ptr = self.ptr.wrapping_byte_add(num_skip);
226 Ok(())
227 }
228
229 /// Create a reader that can access the same range of data.
230 ///
231 /// Reading from the clone does not advance the current reader.
232 ///
233 /// The caller should take care to not introduce TOCTOU issues, as described in the
234 /// documentation for [`UserSlice`].
235 pub fn clone_reader(&self) -> UserSliceReader {
236 UserSliceReader {
237 ptr: self.ptr,
238 length: self.length,
239 }
240 }
241
242 /// Returns the number of bytes left to be read from this reader.
243 ///
244 /// Note that even reading less than this number of bytes may fail.
245 pub fn len(&self) -> usize {
246 self.length
247 }
248
249 /// Returns `true` if no data is available in the io buffer.
250 pub fn is_empty(&self) -> bool {
251 self.length == 0
252 }
253
254 /// Reads raw data from the user slice into a kernel buffer.
255 ///
256 /// For a version that uses `&mut [u8]`, please see [`UserSliceReader::read_slice`].
257 ///
258 /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
259 /// bounds of this [`UserSliceReader`]. This call may modify `out` even if it returns an error.
260 ///
261 /// # Guarantees
262 ///
263 /// After a successful call to this method, all bytes in `out` are initialized.
264 pub fn read_raw(&mut self, out: &mut [MaybeUninit<u8>]) -> Result {
265 let len = out.len();
266 let out_ptr = out.as_mut_ptr().cast::<c_void>();
267 if len > self.length {
268 return Err(EFAULT);
269 }
270 // SAFETY: `out_ptr` points into a mutable slice of length `len`, so we may write
271 // that many bytes to it.
272 let res = unsafe { bindings::copy_from_user(out_ptr, self.ptr.as_const_ptr(), len) };
273 if res != 0 {
274 return Err(EFAULT);
275 }
276 self.ptr = self.ptr.wrapping_byte_add(len);
277 self.length -= len;
278 Ok(())
279 }
280
281 /// Reads raw data from the user slice into a kernel buffer.
282 ///
283 /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
284 /// bounds of this [`UserSliceReader`]. This call may modify `out` even if it returns an error.
285 pub fn read_slice(&mut self, out: &mut [u8]) -> Result {
286 // SAFETY: The types are compatible and `read_raw` doesn't write uninitialized bytes to
287 // `out`.
288 let out = unsafe { &mut *(core::ptr::from_mut(out) as *mut [MaybeUninit<u8>]) };
289 self.read_raw(out)
290 }
291
292 /// Reads raw data from the user slice into a kernel buffer partially.
293 ///
294 /// This is the same as [`Self::read_slice`] but considers the given `offset` into `out` and
295 /// truncates the read to the boundaries of `self` and `out`.
296 ///
297 /// On success, returns the number of bytes read.
298 pub fn read_slice_partial(&mut self, out: &mut [u8], offset: usize) -> Result<usize> {
299 let end = offset.saturating_add(self.len()).min(out.len());
300
301 let Some(dst) = out.get_mut(offset..end) else {
302 return Ok(0);
303 };
304
305 self.read_slice(dst)?;
306 Ok(dst.len())
307 }
308
309 /// Reads raw data from the user slice into a kernel buffer partially.
310 ///
311 /// This is the same as [`Self::read_slice_partial`] but updates the given [`file::Offset`] by
312 /// the number of bytes read.
313 ///
314 /// This is equivalent to C's `simple_write_to_buffer()`.
315 ///
316 /// On success, returns the number of bytes read.
317 pub fn read_slice_file(&mut self, out: &mut [u8], offset: &mut file::Offset) -> Result<usize> {
318 if offset.is_negative() {
319 return Err(EINVAL);
320 }
321
322 let Ok(offset_index) = (*offset).try_into() else {
323 return Ok(0);
324 };
325
326 let read = self.read_slice_partial(out, offset_index)?;
327
328 // OVERFLOW: `offset + read <= data.len() <= isize::MAX <= Offset::MAX`
329 *offset += read as i64;
330
331 Ok(read)
332 }
333
334 /// Reads a value of the specified type.
335 ///
336 /// Fails with [`EFAULT`] if the read happens on a bad address, or if the read goes out of
337 /// bounds of this [`UserSliceReader`].
338 pub fn read<T: FromBytes>(&mut self) -> Result<T> {
339 let len = size_of::<T>();
340 if len > self.length {
341 return Err(EFAULT);
342 }
343 let mut out: MaybeUninit<T> = MaybeUninit::uninit();
344 // SAFETY: The local variable `out` is valid for writing `size_of::<T>()` bytes.
345 //
346 // By using the _copy_from_user variant, we skip the check_object_size check that verifies
347 // the kernel pointer. This mirrors the logic on the C side that skips the check when the
348 // length is a compile-time constant.
349 let res = unsafe {
350 bindings::_copy_from_user(
351 out.as_mut_ptr().cast::<c_void>(),
352 self.ptr.as_const_ptr(),
353 len,
354 )
355 };
356 if res != 0 {
357 return Err(EFAULT);
358 }
359 self.ptr = self.ptr.wrapping_byte_add(len);
360 self.length -= len;
361 // SAFETY: The read above has initialized all bytes in `out`, and since `T` implements
362 // `FromBytes`, any bit-pattern is a valid value for this type.
363 Ok(unsafe { out.assume_init() })
364 }
365
366 /// Reads the entirety of the user slice, appending it to the end of the provided buffer.
367 ///
368 /// Fails with [`EFAULT`] if the read happens on a bad address.
369 pub fn read_all<A: Allocator>(mut self, buf: &mut Vec<u8, A>, flags: Flags) -> Result {
370 let len = self.length;
371 buf.reserve(len, flags)?;
372
373 // The call to `reserve` was successful, so the spare capacity is at least `len` bytes long.
374 self.read_raw(&mut buf.spare_capacity_mut()[..len])?;
375
376 // SAFETY: Since the call to `read_raw` was successful, so the next `len` bytes of the
377 // vector have been initialized.
378 unsafe { buf.inc_len(len) };
379 Ok(())
380 }
381
382 /// Read a NUL-terminated string from userspace and return it.
383 ///
384 /// The string is read into `buf` and a NUL-terminator is added if the end of `buf` is reached.
385 /// Since there must be space to add a NUL-terminator, the buffer must not be empty. The
386 /// returned `&CStr` points into `buf`.
387 ///
388 /// Fails with [`EFAULT`] if the read happens on a bad address (some data may have been
389 /// copied).
390 #[doc(alias = "strncpy_from_user")]
391 pub fn strcpy_into_buf<'buf>(self, buf: &'buf mut [u8]) -> Result<&'buf CStr> {
392 if buf.is_empty() {
393 return Err(EINVAL);
394 }
395
396 // SAFETY: The types are compatible and `strncpy_from_user` doesn't write uninitialized
397 // bytes to `buf`.
398 let mut dst = unsafe { &mut *(core::ptr::from_mut(buf) as *mut [MaybeUninit<u8>]) };
399
400 // We never read more than `self.length` bytes.
401 if dst.len() > self.length {
402 dst = &mut dst[..self.length];
403 }
404
405 let mut len = raw_strncpy_from_user(dst, self.ptr)?;
406 if len < dst.len() {
407 // Add one to include the NUL-terminator.
408 len += 1;
409 } else if len < buf.len() {
410 // This implies that `len == dst.len() < buf.len()`.
411 //
412 // This means that we could not fill the entire buffer, but we had to stop reading
413 // because we hit the `self.length` limit of this `UserSliceReader`. Since we did not
414 // fill the buffer, we treat this case as if we tried to read past the `self.length`
415 // limit and received a page fault, which is consistent with other `UserSliceReader`
416 // methods that also return page faults when you exceed `self.length`.
417 return Err(EFAULT);
418 } else {
419 // This implies that `len == buf.len()`.
420 //
421 // This means that we filled the buffer exactly. In this case, we add a NUL-terminator
422 // and return it. Unlike the `len < dst.len()` branch, don't modify `len` because it
423 // already represents the length including the NUL-terminator.
424 //
425 // SAFETY: Due to the check at the beginning, the buffer is not empty.
426 unsafe { *buf.last_mut().unwrap_unchecked() = 0 };
427 }
428
429 // This method consumes `self`, so it can only be called once, thus we do not need to
430 // update `self.length`. This sidesteps concerns such as whether `self.length` should be
431 // incremented by `len` or `len-1` in the `len == buf.len()` case.
432
433 // SAFETY: There are two cases:
434 // * If we hit the `len < dst.len()` case, then `raw_strncpy_from_user` guarantees that
435 // this slice contains exactly one NUL byte at the end of the string.
436 // * Otherwise, `raw_strncpy_from_user` guarantees that the string contained no NUL bytes,
437 // and we have since added a NUL byte at the end.
438 Ok(unsafe { CStr::from_bytes_with_nul_unchecked(&buf[..len]) })
439 }
440}
441
442/// A writer for [`UserSlice`].
443///
444/// Used to incrementally write into the user slice.
445pub struct UserSliceWriter {
446 ptr: UserPtr,
447 length: usize,
448}
449
450impl UserSliceWriter {
451 /// Returns the amount of space remaining in this buffer.
452 ///
453 /// Note that even writing less than this number of bytes may fail.
454 pub fn len(&self) -> usize {
455 self.length
456 }
457
458 /// Returns `true` if no more data can be written to this buffer.
459 pub fn is_empty(&self) -> bool {
460 self.length == 0
461 }
462
463 /// Low-level write from a raw pointer.
464 ///
465 /// # Safety
466 ///
467 /// The caller must ensure that `from` is valid for reads of `len` bytes.
468 unsafe fn write_raw(&mut self, from: *const u8, len: usize) -> Result {
469 if len > self.length {
470 return Err(EFAULT);
471 }
472
473 // SAFETY: Caller guarantees `from` is valid for `len` bytes (see this function's
474 // safety contract).
475 let res = unsafe { bindings::copy_to_user(self.ptr.as_mut_ptr(), from.cast(), len) };
476 if res != 0 {
477 return Err(EFAULT);
478 }
479 self.ptr = self.ptr.wrapping_byte_add(len);
480 self.length -= len;
481 Ok(())
482 }
483
484 /// Writes raw data to this user pointer from a kernel buffer.
485 ///
486 /// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
487 /// bounds of this [`UserSliceWriter`]. This call may modify the associated userspace slice even
488 /// if it returns an error.
489 pub fn write_slice(&mut self, data: &[u8]) -> Result {
490 // SAFETY: `data` is a valid slice, so `data.as_ptr()` is valid for
491 // reading `data.len()` bytes.
492 unsafe { self.write_raw(data.as_ptr(), data.len()) }
493 }
494
495 /// Writes raw data to this user pointer from a DMA coherent allocation.
496 ///
497 /// Copies `count` bytes from `alloc` starting from `offset` into this userspace slice.
498 ///
499 /// # Errors
500 ///
501 /// - [`EOVERFLOW`]: `offset + count` overflows.
502 /// - [`ERANGE`]: `offset + count` exceeds the size of `alloc`, or `count` exceeds the
503 /// size of the user-space buffer.
504 /// - [`EFAULT`]: the write hits a bad address or goes out of bounds of this
505 /// [`UserSliceWriter`].
506 ///
507 /// This call may modify the associated userspace slice even if it returns an error.
508 ///
509 /// Note: The memory may be concurrently modified by hardware (e.g., DMA). In such cases,
510 /// the copied data may be inconsistent, but this does not cause undefined behavior.
511 ///
512 /// # Example
513 ///
514 /// Copy the first 256 bytes of a DMA coherent allocation into a userspace buffer:
515 ///
516 /// ```no_run
517 /// use kernel::uaccess::UserSliceWriter;
518 /// use kernel::dma::Coherent;
519 ///
520 /// fn copy_dma_to_user(
521 /// mut writer: UserSliceWriter,
522 /// alloc: &Coherent<[u8]>,
523 /// ) -> Result {
524 /// writer.write_dma(alloc, 0, 256)
525 /// }
526 /// ```
527 pub fn write_dma(&mut self, alloc: &Coherent<[u8]>, offset: usize, count: usize) -> Result {
528 let len = alloc.size();
529 if offset.checked_add(count).ok_or(EOVERFLOW)? > len {
530 return Err(ERANGE);
531 }
532
533 if count > self.len() {
534 return Err(ERANGE);
535 }
536
537 // SAFETY: `as_ptr()` returns a valid pointer to a memory region of `count()` bytes, as
538 // guaranteed by the `Coherent` invariants. The check above ensures `offset + count <= len`.
539 let src_ptr = unsafe { alloc.as_ptr().cast::<u8>().add(offset) };
540
541 // Note: Use `write_raw` instead of `write_slice` because the allocation is coherent
542 // memory that hardware may modify (e.g., DMA); we cannot form a `&[u8]` slice over
543 // such volatile memory.
544 //
545 // SAFETY: `src_ptr` points into the allocation and is valid for `count` bytes (see above).
546 unsafe { self.write_raw(src_ptr, count) }
547 }
548
549 /// Writes raw data to this user pointer from a kernel buffer partially.
550 ///
551 /// This is the same as [`Self::write_slice`] but considers the given `offset` into `data` and
552 /// truncates the write to the boundaries of `self` and `data`.
553 ///
554 /// On success, returns the number of bytes written.
555 pub fn write_slice_partial(&mut self, data: &[u8], offset: usize) -> Result<usize> {
556 let end = offset.saturating_add(self.len()).min(data.len());
557
558 let Some(src) = data.get(offset..end) else {
559 return Ok(0);
560 };
561
562 self.write_slice(src)?;
563 Ok(src.len())
564 }
565
566 /// Writes raw data to this user pointer from a kernel buffer partially.
567 ///
568 /// This is the same as [`Self::write_slice_partial`] but updates the given [`file::Offset`] by
569 /// the number of bytes written.
570 ///
571 /// This is equivalent to C's `simple_read_from_buffer()`.
572 ///
573 /// On success, returns the number of bytes written.
574 pub fn write_slice_file(&mut self, data: &[u8], offset: &mut file::Offset) -> Result<usize> {
575 if offset.is_negative() {
576 return Err(EINVAL);
577 }
578
579 let Ok(offset_index) = (*offset).try_into() else {
580 return Ok(0);
581 };
582
583 let written = self.write_slice_partial(data, offset_index)?;
584
585 // OVERFLOW: `offset + written <= data.len() <= isize::MAX <= Offset::MAX`
586 *offset += written as i64;
587
588 Ok(written)
589 }
590
591 /// Writes the provided Rust value to this userspace pointer.
592 ///
593 /// Fails with [`EFAULT`] if the write happens on a bad address, or if the write goes out of
594 /// bounds of this [`UserSliceWriter`]. This call may modify the associated userspace slice even
595 /// if it returns an error.
596 pub fn write<T: AsBytes>(&mut self, value: &T) -> Result {
597 let len = size_of::<T>();
598 if len > self.length {
599 return Err(EFAULT);
600 }
601 // SAFETY: The reference points to a value of type `T`, so it is valid for reading
602 // `size_of::<T>()` bytes.
603 //
604 // By using the _copy_to_user variant, we skip the check_object_size check that verifies the
605 // kernel pointer. This mirrors the logic on the C side that skips the check when the length
606 // is a compile-time constant.
607 let res = unsafe {
608 bindings::_copy_to_user(
609 self.ptr.as_mut_ptr(),
610 core::ptr::from_ref(value).cast::<c_void>(),
611 len,
612 )
613 };
614 if res != 0 {
615 return Err(EFAULT);
616 }
617 self.ptr = self.ptr.wrapping_byte_add(len);
618 self.length -= len;
619 Ok(())
620 }
621}
622
623/// Reads a nul-terminated string into `dst` and returns the length.
624///
625/// This reads from userspace until a NUL byte is encountered, or until `dst.len()` bytes have been
626/// read. Fails with [`EFAULT`] if a read happens on a bad address (some data may have been
627/// copied). When the end of the buffer is encountered, no NUL byte is added, so the string is
628/// *not* guaranteed to be NUL-terminated when `Ok(dst.len())` is returned.
629///
630/// # Guarantees
631///
632/// When this function returns `Ok(len)`, it is guaranteed that the first `len` bytes of `dst` are
633/// initialized and non-zero. Furthermore, if `len < dst.len()`, then `dst[len]` is a NUL byte.
634#[inline]
635fn raw_strncpy_from_user(dst: &mut [MaybeUninit<u8>], src: UserPtr) -> Result<usize> {
636 // CAST: Slice lengths are guaranteed to be `<= isize::MAX`.
637 let len = dst.len() as isize;
638
639 // SAFETY: `dst` is valid for writing `dst.len()` bytes.
640 let res = unsafe {
641 bindings::strncpy_from_user(
642 dst.as_mut_ptr().cast::<c_char>(),
643 src.as_const_ptr().cast::<c_char>(),
644 len,
645 )
646 };
647
648 if res < 0 {
649 return Err(Error::from_errno(res as i32));
650 }
651
652 #[cfg(CONFIG_RUST_OVERFLOW_CHECKS)]
653 assert!(res <= len);
654
655 // GUARANTEES: `strncpy_from_user` was successful, so `dst` has contents in accordance with the
656 // guarantees of this function.
657 Ok(res as usize)
658}