1 // SPDX-License-Identifier: GPL-2.0 2 3 //! Generic kernel lock and guard. 4 //! 5 //! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes, 6 //! spinlocks, raw spinlocks) to be provided with minimal effort. 7 8 use super::LockClassKey; 9 use crate::{bindings, init::PinInit, pin_init, str::CStr, types::Opaque}; 10 use core::{cell::UnsafeCell, marker::PhantomData, marker::PhantomPinned}; 11 use macros::pin_data; 12 13 pub mod mutex; 14 15 /// The "backend" of a lock. 16 /// 17 /// It is the actual implementation of the lock, without the need to repeat patterns used in all 18 /// locks. 19 /// 20 /// # Safety 21 /// 22 /// - Implementers must ensure that only one thread/CPU may access the protected data once the lock 23 /// is owned, that is, between calls to `lock` and `unlock`. 24 pub unsafe trait Backend { 25 /// The state required by the lock. 26 type State; 27 28 /// The state required to be kept between lock and unlock. 29 type GuardState; 30 31 /// Initialises the lock. 32 /// 33 /// # Safety 34 /// 35 /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must 36 /// remain valid for read indefinitely. 37 unsafe fn init( 38 ptr: *mut Self::State, 39 name: *const core::ffi::c_char, 40 key: *mut bindings::lock_class_key, 41 ); 42 43 /// Acquires the lock, making the caller its owner. 44 /// 45 /// # Safety 46 /// 47 /// Callers must ensure that [`Backend::init`] has been previously called. 48 #[must_use] 49 unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState; 50 51 /// Releases the lock, giving up its ownership. 52 /// 53 /// # Safety 54 /// 55 /// It must only be called by the current owner of the lock. 56 unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState); 57 } 58 59 /// A mutual exclusion primitive. 60 /// 61 /// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock backend 62 /// specified as the generic parameter `B`. 63 #[pin_data] 64 pub struct Lock<T: ?Sized, B: Backend> { 65 /// The kernel lock object. 66 #[pin] 67 state: Opaque<B::State>, 68 69 /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture 70 /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case 71 /// some architecture uses self-references now or in the future. 72 #[pin] 73 _pin: PhantomPinned, 74 75 /// The data protected by the lock. 76 data: UnsafeCell<T>, 77 } 78 79 // SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can. 80 unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {} 81 82 // SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the 83 // data it protects is `Send`. 84 unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {} 85 86 impl<T, B: Backend> Lock<T, B> { 87 /// Constructs a new lock initialiser. 88 #[allow(clippy::new_ret_no_self)] 89 pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> { 90 pin_init!(Self { 91 data: UnsafeCell::new(t), 92 _pin: PhantomPinned, 93 // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have 94 // static lifetimes so they live indefinitely. 95 state <- Opaque::ffi_init(|slot| unsafe { 96 B::init(slot, name.as_char_ptr(), key.as_ptr()) 97 }), 98 }) 99 } 100 } 101 102 impl<T: ?Sized, B: Backend> Lock<T, B> { 103 /// Acquires the lock and gives the caller access to the data protected by it. 104 pub fn lock(&self) -> Guard<'_, T, B> { 105 // SAFETY: The constructor of the type calls `init`, so the existence of the object proves 106 // that `init` was called. 107 let state = unsafe { B::lock(self.state.get()) }; 108 // SAFETY: The lock was just acquired. 109 unsafe { Guard::new(self, state) } 110 } 111 } 112 113 /// A lock guard. 114 /// 115 /// Allows mutual exclusion primitives that implement the `Backend` trait to automatically unlock 116 /// when a guard goes out of scope. It also provides a safe and convenient way to access the data 117 /// protected by the lock. 118 #[must_use = "the lock unlocks immediately when the guard is unused"] 119 pub struct Guard<'a, T: ?Sized, B: Backend> { 120 pub(crate) lock: &'a Lock<T, B>, 121 pub(crate) state: B::GuardState, 122 _not_send: PhantomData<*mut ()>, 123 } 124 125 // SAFETY: `Guard` is sync when the data protected by the lock is also sync. 126 unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {} 127 128 impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> { 129 type Target = T; 130 131 fn deref(&self) -> &Self::Target { 132 // SAFETY: The caller owns the lock, so it is safe to deref the protected data. 133 unsafe { &*self.lock.data.get() } 134 } 135 } 136 137 impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> { 138 fn deref_mut(&mut self) -> &mut Self::Target { 139 // SAFETY: The caller owns the lock, so it is safe to deref the protected data. 140 unsafe { &mut *self.lock.data.get() } 141 } 142 } 143 144 impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> { 145 fn drop(&mut self) { 146 // SAFETY: The caller owns the lock, so it is safe to unlock it. 147 unsafe { B::unlock(self.lock.state.get(), &self.state) }; 148 } 149 } 150 151 impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> { 152 /// Constructs a new immutable lock guard. 153 /// 154 /// # Safety 155 /// 156 /// The caller must ensure that it owns the lock. 157 pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self { 158 Self { 159 lock, 160 state, 161 _not_send: PhantomData, 162 } 163 } 164 } 165