kernel/sync/
lock.rs

1// SPDX-License-Identifier: GPL-2.0
2
3//! Generic kernel lock and guard.
4//!
5//! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
6//! spinlocks, raw spinlocks) to be provided with minimal effort.
7
8use super::LockClassKey;
9use crate::{
10    str::CStr,
11    types::{NotThreadSafe, Opaque, ScopeGuard},
12};
13use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
14use pin_init::{pin_data, pin_init, PinInit};
15
16pub mod mutex;
17pub mod spinlock;
18
19pub(super) mod global;
20pub use global::{GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
21
22/// The "backend" of a lock.
23///
24/// It is the actual implementation of the lock, without the need to repeat patterns used in all
25/// locks.
26///
27/// # Safety
28///
29/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
30///   is owned, that is, between calls to [`lock`] and [`unlock`].
31/// - Implementers must also ensure that [`relock`] uses the same locking method as the original
32///   lock operation.
33///
34/// [`lock`]: Backend::lock
35/// [`unlock`]: Backend::unlock
36/// [`relock`]: Backend::relock
37pub unsafe trait Backend {
38    /// The state required by the lock.
39    type State;
40
41    /// The state required to be kept between [`lock`] and [`unlock`].
42    ///
43    /// [`lock`]: Backend::lock
44    /// [`unlock`]: Backend::unlock
45    type GuardState;
46
47    /// Initialises the lock.
48    ///
49    /// # Safety
50    ///
51    /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must
52    /// remain valid for read indefinitely.
53    unsafe fn init(
54        ptr: *mut Self::State,
55        name: *const crate::ffi::c_char,
56        key: *mut bindings::lock_class_key,
57    );
58
59    /// Acquires the lock, making the caller its owner.
60    ///
61    /// # Safety
62    ///
63    /// Callers must ensure that [`Backend::init`] has been previously called.
64    #[must_use]
65    unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
66
67    /// Tries to acquire the lock.
68    ///
69    /// # Safety
70    ///
71    /// Callers must ensure that [`Backend::init`] has been previously called.
72    unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState>;
73
74    /// Releases the lock, giving up its ownership.
75    ///
76    /// # Safety
77    ///
78    /// It must only be called by the current owner of the lock.
79    unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState);
80
81    /// Reacquires the lock, making the caller its owner.
82    ///
83    /// # Safety
84    ///
85    /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or
86    /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now.
87    unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) {
88        // SAFETY: The safety requirements ensure that the lock is initialised.
89        *guard_state = unsafe { Self::lock(ptr) };
90    }
91
92    /// Asserts that the lock is held using lockdep.
93    ///
94    /// # Safety
95    ///
96    /// Callers must ensure that [`Backend::init`] has been previously called.
97    unsafe fn assert_is_held(ptr: *mut Self::State);
98}
99
100/// A mutual exclusion primitive.
101///
102/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
103/// [`Backend`] specified as the generic parameter `B`.
104#[repr(C)]
105#[pin_data]
106pub struct Lock<T: ?Sized, B: Backend> {
107    /// The kernel lock object.
108    #[pin]
109    state: Opaque<B::State>,
110
111    /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture
112    /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case
113    /// some architecture uses self-references now or in the future.
114    #[pin]
115    _pin: PhantomPinned,
116
117    /// The data protected by the lock.
118    pub(crate) data: UnsafeCell<T>,
119}
120
121// SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can.
122unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {}
123
124// SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the
125// data it protects is `Send`.
126unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
127
128impl<T, B: Backend> Lock<T, B> {
129    /// Constructs a new lock initialiser.
130    pub fn new(t: T, name: &'static CStr, key: Pin<&'static LockClassKey>) -> impl PinInit<Self> {
131        pin_init!(Self {
132            data: UnsafeCell::new(t),
133            _pin: PhantomPinned,
134            // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
135            // static lifetimes so they live indefinitely.
136            state <- Opaque::ffi_init(|slot| unsafe {
137                B::init(slot, name.as_char_ptr(), key.as_ptr())
138            }),
139        })
140    }
141}
142
143impl<B: Backend> Lock<(), B> {
144    /// Constructs a [`Lock`] from a raw pointer.
145    ///
146    /// This can be useful for interacting with a lock which was initialised outside of Rust.
147    ///
148    /// # Safety
149    ///
150    /// The caller promises that `ptr` points to a valid initialised instance of [`State`] during
151    /// the whole lifetime of `'a`.
152    ///
153    /// [`State`]: Backend::State
154    pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
155        // SAFETY:
156        // - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
157        // - Since the lock data type is `()` which is a ZST, `state` is the only non-ZST member of
158        //   the struct
159        // - Combined with `#[repr(C)]`, this guarantees `Self` has an equivalent data layout to
160        //   `B::State`.
161        unsafe { &*ptr.cast() }
162    }
163}
164
165impl<T: ?Sized, B: Backend> Lock<T, B> {
166    /// Acquires the lock and gives the caller access to the data protected by it.
167    pub fn lock(&self) -> Guard<'_, T, B> {
168        // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
169        // that `init` was called.
170        let state = unsafe { B::lock(self.state.get()) };
171        // SAFETY: The lock was just acquired.
172        unsafe { Guard::new(self, state) }
173    }
174
175    /// Tries to acquire the lock.
176    ///
177    /// Returns a guard that can be used to access the data protected by the lock if successful.
178    pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
179        // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
180        // that `init` was called.
181        unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) }
182    }
183}
184
185/// A lock guard.
186///
187/// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock
188/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
189/// protected by the lock.
190#[must_use = "the lock unlocks immediately when the guard is unused"]
191pub struct Guard<'a, T: ?Sized, B: Backend> {
192    pub(crate) lock: &'a Lock<T, B>,
193    pub(crate) state: B::GuardState,
194    _not_send: NotThreadSafe,
195}
196
197// SAFETY: `Guard` is sync when the data protected by the lock is also sync.
198unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
199
200impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
201    /// Returns the lock that this guard originates from.
202    ///
203    /// # Examples
204    ///
205    /// The following example shows how to use [`Guard::lock_ref()`] to assert the corresponding
206    /// lock is held.
207    ///
208    /// ```
209    /// # use kernel::{new_spinlock, sync::lock::{Backend, Guard, Lock}};
210    /// # use pin_init::stack_pin_init;
211    ///
212    /// fn assert_held<T, B: Backend>(guard: &Guard<'_, T, B>, lock: &Lock<T, B>) {
213    ///     // Address-equal means the same lock.
214    ///     assert!(core::ptr::eq(guard.lock_ref(), lock));
215    /// }
216    ///
217    /// // Creates a new lock on the stack.
218    /// stack_pin_init!{
219    ///     let l = new_spinlock!(42)
220    /// }
221    ///
222    /// let g = l.lock();
223    ///
224    /// // `g` originates from `l`.
225    /// assert_held(&g, &l);
226    /// ```
227    pub fn lock_ref(&self) -> &'a Lock<T, B> {
228        self.lock
229    }
230
231    pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
232        // SAFETY: The caller owns the lock, so it is safe to unlock it.
233        unsafe { B::unlock(self.lock.state.get(), &self.state) };
234
235        let _relock = ScopeGuard::new(||
236                // SAFETY: The lock was just unlocked above and is being relocked now.
237                unsafe { B::relock(self.lock.state.get(), &mut self.state) });
238
239        cb()
240    }
241}
242
243impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
244    type Target = T;
245
246    fn deref(&self) -> &Self::Target {
247        // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
248        unsafe { &*self.lock.data.get() }
249    }
250}
251
252impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> {
253    fn deref_mut(&mut self) -> &mut Self::Target {
254        // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
255        unsafe { &mut *self.lock.data.get() }
256    }
257}
258
259impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
260    fn drop(&mut self) {
261        // SAFETY: The caller owns the lock, so it is safe to unlock it.
262        unsafe { B::unlock(self.lock.state.get(), &self.state) };
263    }
264}
265
266impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
267    /// Constructs a new immutable lock guard.
268    ///
269    /// # Safety
270    ///
271    /// The caller must ensure that it owns the lock.
272    pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
273        // SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
274        unsafe { B::assert_is_held(lock.state.get()) };
275
276        Self {
277            lock,
278            state,
279            _not_send: NotThreadSafe,
280        }
281    }
282}