1 // SPDX-License-Identifier: GPL-2.0 2 3 //! Generic kernel lock and guard. 4 //! 5 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes, 6 //! spinlocks, raw spinlocks) to be provided with minimal effort. 7 8 use super::LockClassKey; 9 use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque, types::ScopeGuard}; 10 use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned}; 11 use macros::pin_data; 12 13 pub mod mutex; 14 pub mod spinlock; 15 16 /// The "backend" of a lock. 17 /// 18 /// It is the actual implementation of the lock, without the need to repeat patterns used in all 19 /// locks. 20 /// 21 /// # Safety 22 /// 23 /// - Implementers must ensure that only one thread/CPU may access the protected data once the lock 24 /// is owned, that is, between calls to `lock` and `unlock`. 25 /// - Implementers must also ensure that `relock` uses the same locking method as the original 26 /// lock operation. 27 pub unsafe trait Backend { 28 /// The state required by the lock. 29 type State; 30 31 /// The state required to be kept between lock and unlock. 32 type GuardState; 33 34 /// Initialises the lock. 35 /// 36 /// # Safety 37 /// 38 /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must 39 /// remain valid for read indefinitely. 40 unsafe fn init( 41 ptr: *mut Self::State, 42 name: *const core::ffi::c_char, 43 key: *mut bindings::lock_class_key, 44 ); 45 46 /// Acquires the lock, making the caller its owner. 47 /// 48 /// # Safety 49 /// 50 /// Callers must ensure that [`Backend::init`] has been previously called. 51 #[must_use] 52 unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState; 53 54 /// Releases the lock, giving up its ownership. 55 /// 56 /// # Safety 57 /// 58 /// It must only be called by the current owner of the lock. 59 unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState); 60 61 /// Reacquires the lock, making the caller its owner. 62 /// 63 /// # Safety 64 /// 65 /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or 66 /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now. 67 unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) { 68 // SAFETY: The safety requirements ensure that the lock is initialised. 69 *guard_state = unsafe { Self::lock(ptr) }; 70 } 71 } 72 73 /// A mutual exclusion primitive. 74 /// 75 /// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock backend 76 /// specified as the generic parameter `B`. 77 #[pin_data] 78 pub struct Lock<T: ?Sized, B: Backend> { 79 /// The kernel lock object. 80 #[pin] 81 state: Opaque<B::State>, 82 83 /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture 84 /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case 85 /// some architecture uses self-references now or in the future. 86 #[pin] 87 _pin: PhantomPinned, 88 89 /// The data protected by the lock. 90 pub(crate) data: UnsafeCell<T>, 91 } 92 93 // SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can. 94 unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {} 95 96 // SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the 97 // data it protects is `Send`. 98 unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {} 99 100 impl<T, B: Backend> Lock<T, B> { 101 /// Constructs a new lock initialiser. 102 #[allow(clippy::new_ret_no_self)] 103 pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> { 104 pin_init!(Self { 105 data: UnsafeCell::new(t), 106 _pin: PhantomPinned, 107 // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have 108 // static lifetimes so they live indefinitely. 109 state <- Opaque::ffi_init(|slot| unsafe { 110 B::init(slot, name.as_char_ptr(), key.as_ptr()) 111 }), 112 }) 113 } 114 } 115 116 impl<T: ?Sized, B: Backend> Lock<T, B> { 117 /// Acquires the lock and gives the caller access to the data protected by it. 118 pub fn lock(&self) -> Guard<'_, T, B> { 119 // SAFETY: The constructor of the type calls `init`, so the existence of the object proves 120 // that `init` was called. 121 let state = unsafe { B::lock(self.state.get()) }; 122 // SAFETY: The lock was just acquired. 123 unsafe { Guard::new(self, state) } 124 } 125 } 126 127 /// A lock guard. 128 /// 129 /// Allows mutual exclusion primitives that implement the `Backend` trait to automatically unlock 130 /// when a guard goes out of scope. It also provides a safe and convenient way to access the data 131 /// protected by the lock. 132 #[must_use = "the lock unlocks immediately when the guard is unused"] 133 pub struct Guard<'a, T: ?Sized, B: Backend> { 134 pub(crate) lock: &'a Lock<T, B>, 135 pub(crate) state: B::GuardState, 136 _not_send: PhantomData<*mut ()>, 137 } 138 139 // SAFETY: `Guard` is sync when the data protected by the lock is also sync. 140 unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {} 141 142 impl<T: ?Sized, B: Backend> Guard<'_, T, B> { 143 #[allow(dead_code)] 144 pub(crate) fn do_unlocked(&mut self, cb: impl FnOnce()) { 145 // SAFETY: The caller owns the lock, so it is safe to unlock it. 146 unsafe { B::unlock(self.lock.state.get(), &self.state) }; 147 148 // SAFETY: The lock was just unlocked above and is being relocked now. 149 let _relock = 150 ScopeGuard::new(|| unsafe { B::relock(self.lock.state.get(), &mut self.state) }); 151 152 cb(); 153 } 154 } 155 156 impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> { 157 type Target = T; 158 159 fn deref(&self) -> &Self::Target { 160 // SAFETY: The caller owns the lock, so it is safe to deref the protected data. 161 unsafe { &*self.lock.data.get() } 162 } 163 } 164 165 impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> { 166 fn deref_mut(&mut self) -> &mut Self::Target { 167 // SAFETY: The caller owns the lock, so it is safe to deref the protected data. 168 unsafe { &mut *self.lock.data.get() } 169 } 170 } 171 172 impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> { 173 fn drop(&mut self) { 174 // SAFETY: The caller owns the lock, so it is safe to unlock it. 175 unsafe { B::unlock(self.lock.state.get(), &self.state) }; 176 } 177 } 178 179 impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> { 180 /// Constructs a new immutable lock guard. 181 /// 182 /// # Safety 183 /// 184 /// The caller must ensure that it owns the lock. 185 pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self { 186 Self { 187 lock, 188 state, 189 _not_send: PhantomData, 190 } 191 } 192 } 193