Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3//! Generic kernel lock and guard.
4//!
5//! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
6//! spinlocks, raw spinlocks) to be provided with minimal effort.
7
8use super::LockClassKey;
9use crate::{
10 init::PinInit,
11 pin_init,
12 str::CStr,
13 types::{NotThreadSafe, Opaque, ScopeGuard},
14};
15use core::{cell::UnsafeCell, marker::PhantomPinned};
16use macros::pin_data;
17
18pub mod mutex;
19pub mod spinlock;
20
21pub(super) mod global;
22pub use global::{GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
23
24/// The "backend" of a lock.
25///
26/// It is the actual implementation of the lock, without the need to repeat patterns used in all
27/// locks.
28///
29/// # Safety
30///
31/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
32/// is owned, that is, between calls to [`lock`] and [`unlock`].
33/// - Implementers must also ensure that [`relock`] uses the same locking method as the original
34/// lock operation.
35///
36/// [`lock`]: Backend::lock
37/// [`unlock`]: Backend::unlock
38/// [`relock`]: Backend::relock
39pub unsafe trait Backend {
40 /// The state required by the lock.
41 type State;
42
43 /// The state required to be kept between [`lock`] and [`unlock`].
44 ///
45 /// [`lock`]: Backend::lock
46 /// [`unlock`]: Backend::unlock
47 type GuardState;
48
49 /// Initialises the lock.
50 ///
51 /// # Safety
52 ///
53 /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must
54 /// remain valid for read indefinitely.
55 unsafe fn init(
56 ptr: *mut Self::State,
57 name: *const crate::ffi::c_char,
58 key: *mut bindings::lock_class_key,
59 );
60
61 /// Acquires the lock, making the caller its owner.
62 ///
63 /// # Safety
64 ///
65 /// Callers must ensure that [`Backend::init`] has been previously called.
66 #[must_use]
67 unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
68
69 /// Tries to acquire the lock.
70 ///
71 /// # Safety
72 ///
73 /// Callers must ensure that [`Backend::init`] has been previously called.
74 unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState>;
75
76 /// Releases the lock, giving up its ownership.
77 ///
78 /// # Safety
79 ///
80 /// It must only be called by the current owner of the lock.
81 unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState);
82
83 /// Reacquires the lock, making the caller its owner.
84 ///
85 /// # Safety
86 ///
87 /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or
88 /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now.
89 unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) {
90 // SAFETY: The safety requirements ensure that the lock is initialised.
91 *guard_state = unsafe { Self::lock(ptr) };
92 }
93}
94
95/// A mutual exclusion primitive.
96///
97/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
98/// [`Backend`] specified as the generic parameter `B`.
99#[pin_data]
100pub struct Lock<T: ?Sized, B: Backend> {
101 /// The kernel lock object.
102 #[pin]
103 state: Opaque<B::State>,
104
105 /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture
106 /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case
107 /// some architecture uses self-references now or in the future.
108 #[pin]
109 _pin: PhantomPinned,
110
111 /// The data protected by the lock.
112 pub(crate) data: UnsafeCell<T>,
113}
114
115// SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can.
116unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {}
117
118// SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the
119// data it protects is `Send`.
120unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
121
122impl<T, B: Backend> Lock<T, B> {
123 /// Constructs a new lock initialiser.
124 pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
125 pin_init!(Self {
126 data: UnsafeCell::new(t),
127 _pin: PhantomPinned,
128 // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
129 // static lifetimes so they live indefinitely.
130 state <- Opaque::ffi_init(|slot| unsafe {
131 B::init(slot, name.as_char_ptr(), key.as_ptr())
132 }),
133 })
134 }
135}
136
137impl<T: ?Sized, B: Backend> Lock<T, B> {
138 /// Acquires the lock and gives the caller access to the data protected by it.
139 pub fn lock(&self) -> Guard<'_, T, B> {
140 // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
141 // that `init` was called.
142 let state = unsafe { B::lock(self.state.get()) };
143 // SAFETY: The lock was just acquired.
144 unsafe { Guard::new(self, state) }
145 }
146
147 /// Tries to acquire the lock.
148 ///
149 /// Returns a guard that can be used to access the data protected by the lock if successful.
150 pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
151 // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
152 // that `init` was called.
153 unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) }
154 }
155}
156
157/// A lock guard.
158///
159/// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock
160/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
161/// protected by the lock.
162#[must_use = "the lock unlocks immediately when the guard is unused"]
163pub struct Guard<'a, T: ?Sized, B: Backend> {
164 pub(crate) lock: &'a Lock<T, B>,
165 pub(crate) state: B::GuardState,
166 _not_send: NotThreadSafe,
167}
168
169// SAFETY: `Guard` is sync when the data protected by the lock is also sync.
170unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
171
172impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
173 pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
174 // SAFETY: The caller owns the lock, so it is safe to unlock it.
175 unsafe { B::unlock(self.lock.state.get(), &self.state) };
176
177 let _relock = ScopeGuard::new(||
178 // SAFETY: The lock was just unlocked above and is being relocked now.
179 unsafe { B::relock(self.lock.state.get(), &mut self.state) });
180
181 cb()
182 }
183}
184
185impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
186 type Target = T;
187
188 fn deref(&self) -> &Self::Target {
189 // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
190 unsafe { &*self.lock.data.get() }
191 }
192}
193
194impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> {
195 fn deref_mut(&mut self) -> &mut Self::Target {
196 // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
197 unsafe { &mut *self.lock.data.get() }
198 }
199}
200
201impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
202 fn drop(&mut self) {
203 // SAFETY: The caller owns the lock, so it is safe to unlock it.
204 unsafe { B::unlock(self.lock.state.get(), &self.state) };
205 }
206}
207
208impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
209 /// Constructs a new immutable lock guard.
210 ///
211 /// # Safety
212 ///
213 /// The caller must ensure that it owns the lock.
214 pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
215 Self {
216 lock,
217 state,
218 _not_send: NotThreadSafe,
219 }
220 }
221}
1// SPDX-License-Identifier: GPL-2.0
2
3//! Generic kernel lock and guard.
4//!
5//! It contains a generic Rust lock and guard that allow for different backends (e.g., mutexes,
6//! spinlocks, raw spinlocks) to be provided with minimal effort.
7
8use super::LockClassKey;
9use crate::{
10 init::PinInit,
11 pin_init,
12 str::CStr,
13 types::{NotThreadSafe, Opaque, ScopeGuard},
14};
15use core::{cell::UnsafeCell, marker::PhantomPinned};
16use macros::pin_data;
17
18pub mod mutex;
19pub mod spinlock;
20
21pub(super) mod global;
22pub use global::{GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
23
24/// The "backend" of a lock.
25///
26/// It is the actual implementation of the lock, without the need to repeat patterns used in all
27/// locks.
28///
29/// # Safety
30///
31/// - Implementers must ensure that only one thread/CPU may access the protected data once the lock
32/// is owned, that is, between calls to [`lock`] and [`unlock`].
33/// - Implementers must also ensure that [`relock`] uses the same locking method as the original
34/// lock operation.
35///
36/// [`lock`]: Backend::lock
37/// [`unlock`]: Backend::unlock
38/// [`relock`]: Backend::relock
39pub unsafe trait Backend {
40 /// The state required by the lock.
41 type State;
42
43 /// The state required to be kept between [`lock`] and [`unlock`].
44 ///
45 /// [`lock`]: Backend::lock
46 /// [`unlock`]: Backend::unlock
47 type GuardState;
48
49 /// Initialises the lock.
50 ///
51 /// # Safety
52 ///
53 /// `ptr` must be valid for write for the duration of the call, while `name` and `key` must
54 /// remain valid for read indefinitely.
55 unsafe fn init(
56 ptr: *mut Self::State,
57 name: *const crate::ffi::c_char,
58 key: *mut bindings::lock_class_key,
59 );
60
61 /// Acquires the lock, making the caller its owner.
62 ///
63 /// # Safety
64 ///
65 /// Callers must ensure that [`Backend::init`] has been previously called.
66 #[must_use]
67 unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState;
68
69 /// Tries to acquire the lock.
70 ///
71 /// # Safety
72 ///
73 /// Callers must ensure that [`Backend::init`] has been previously called.
74 unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState>;
75
76 /// Releases the lock, giving up its ownership.
77 ///
78 /// # Safety
79 ///
80 /// It must only be called by the current owner of the lock.
81 unsafe fn unlock(ptr: *mut Self::State, guard_state: &Self::GuardState);
82
83 /// Reacquires the lock, making the caller its owner.
84 ///
85 /// # Safety
86 ///
87 /// Callers must ensure that `guard_state` comes from a previous call to [`Backend::lock`] (or
88 /// variant) that has been unlocked with [`Backend::unlock`] and will be relocked now.
89 unsafe fn relock(ptr: *mut Self::State, guard_state: &mut Self::GuardState) {
90 // SAFETY: The safety requirements ensure that the lock is initialised.
91 *guard_state = unsafe { Self::lock(ptr) };
92 }
93}
94
95/// A mutual exclusion primitive.
96///
97/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
98/// [`Backend`] specified as the generic parameter `B`.
99#[pin_data]
100pub struct Lock<T: ?Sized, B: Backend> {
101 /// The kernel lock object.
102 #[pin]
103 state: Opaque<B::State>,
104
105 /// Some locks are known to be self-referential (e.g., mutexes), while others are architecture
106 /// or config defined (e.g., spinlocks). So we conservatively require them to be pinned in case
107 /// some architecture uses self-references now or in the future.
108 #[pin]
109 _pin: PhantomPinned,
110
111 /// The data protected by the lock.
112 pub(crate) data: UnsafeCell<T>,
113}
114
115// SAFETY: `Lock` can be transferred across thread boundaries iff the data it protects can.
116unsafe impl<T: ?Sized + Send, B: Backend> Send for Lock<T, B> {}
117
118// SAFETY: `Lock` serialises the interior mutability it provides, so it is `Sync` as long as the
119// data it protects is `Send`.
120unsafe impl<T: ?Sized + Send, B: Backend> Sync for Lock<T, B> {}
121
122impl<T, B: Backend> Lock<T, B> {
123 /// Constructs a new lock initialiser.
124 pub fn new(t: T, name: &'static CStr, key: &'static LockClassKey) -> impl PinInit<Self> {
125 pin_init!(Self {
126 data: UnsafeCell::new(t),
127 _pin: PhantomPinned,
128 // SAFETY: `slot` is valid while the closure is called and both `name` and `key` have
129 // static lifetimes so they live indefinitely.
130 state <- Opaque::ffi_init(|slot| unsafe {
131 B::init(slot, name.as_char_ptr(), key.as_ptr())
132 }),
133 })
134 }
135}
136
137impl<T: ?Sized, B: Backend> Lock<T, B> {
138 /// Acquires the lock and gives the caller access to the data protected by it.
139 pub fn lock(&self) -> Guard<'_, T, B> {
140 // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
141 // that `init` was called.
142 let state = unsafe { B::lock(self.state.get()) };
143 // SAFETY: The lock was just acquired.
144 unsafe { Guard::new(self, state) }
145 }
146
147 /// Tries to acquire the lock.
148 ///
149 /// Returns a guard that can be used to access the data protected by the lock if successful.
150 pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
151 // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
152 // that `init` was called.
153 unsafe { B::try_lock(self.state.get()).map(|state| Guard::new(self, state)) }
154 }
155}
156
157/// A lock guard.
158///
159/// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock
160/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
161/// protected by the lock.
162#[must_use = "the lock unlocks immediately when the guard is unused"]
163pub struct Guard<'a, T: ?Sized, B: Backend> {
164 pub(crate) lock: &'a Lock<T, B>,
165 pub(crate) state: B::GuardState,
166 _not_send: NotThreadSafe,
167}
168
169// SAFETY: `Guard` is sync when the data protected by the lock is also sync.
170unsafe impl<T: Sync + ?Sized, B: Backend> Sync for Guard<'_, T, B> {}
171
172impl<T: ?Sized, B: Backend> Guard<'_, T, B> {
173 pub(crate) fn do_unlocked<U>(&mut self, cb: impl FnOnce() -> U) -> U {
174 // SAFETY: The caller owns the lock, so it is safe to unlock it.
175 unsafe { B::unlock(self.lock.state.get(), &self.state) };
176
177 let _relock = ScopeGuard::new(||
178 // SAFETY: The lock was just unlocked above and is being relocked now.
179 unsafe { B::relock(self.lock.state.get(), &mut self.state) });
180
181 cb()
182 }
183}
184
185impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
186 type Target = T;
187
188 fn deref(&self) -> &Self::Target {
189 // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
190 unsafe { &*self.lock.data.get() }
191 }
192}
193
194impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B> {
195 fn deref_mut(&mut self) -> &mut Self::Target {
196 // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
197 unsafe { &mut *self.lock.data.get() }
198 }
199}
200
201impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
202 fn drop(&mut self) {
203 // SAFETY: The caller owns the lock, so it is safe to unlock it.
204 unsafe { B::unlock(self.lock.state.get(), &self.state) };
205 }
206}
207
208impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
209 /// Constructs a new immutable lock guard.
210 ///
211 /// # Safety
212 ///
213 /// The caller must ensure that it owns the lock.
214 pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
215 Self {
216 lock,
217 state,
218 _not_send: NotThreadSafe,
219 }
220 }
221}