Loading...
Note: File does not exist in v4.10.11.
1// SPDX-License-Identifier: GPL-2.0
2
3//! This module provides a wrapper for the C `struct request` type.
4//!
5//! C header: [`include/linux/blk-mq.h`](srctree/include/linux/blk-mq.h)
6
7use crate::{
8 bindings,
9 block::mq::Operations,
10 error::Result,
11 types::{ARef, AlwaysRefCounted, Opaque},
12};
13use core::{
14 marker::PhantomData,
15 ptr::{addr_of_mut, NonNull},
16 sync::atomic::{AtomicU64, Ordering},
17};
18
19/// A wrapper around a blk-mq [`struct request`]. This represents an IO request.
20///
21/// # Implementation details
22///
23/// There are four states for a request that the Rust bindings care about:
24///
25/// 1. Request is owned by block layer (refcount 0).
26/// 2. Request is owned by driver but with zero [`ARef`]s in existence
27/// (refcount 1).
28/// 3. Request is owned by driver with exactly one [`ARef`] in existence
29/// (refcount 2).
30/// 4. Request is owned by driver with more than one [`ARef`] in existence
31/// (refcount > 2).
32///
33///
34/// We need to track 1 and 2 to ensure we fail tag to request conversions for
35/// requests that are not owned by the driver.
36///
37/// We need to track 3 and 4 to ensure that it is safe to end the request and hand
38/// back ownership to the block layer.
39///
40/// The states are tracked through the private `refcount` field of
41/// `RequestDataWrapper`. This structure lives in the private data area of the C
42/// [`struct request`].
43///
44/// # Invariants
45///
46/// * `self.0` is a valid [`struct request`] created by the C portion of the
47/// kernel.
48/// * The private data area associated with this request must be an initialized
49/// and valid `RequestDataWrapper<T>`.
50/// * `self` is reference counted by atomic modification of
51/// `self.wrapper_ref().refcount()`.
52///
53/// [`struct request`]: srctree/include/linux/blk-mq.h
54///
55#[repr(transparent)]
56pub struct Request<T: Operations>(Opaque<bindings::request>, PhantomData<T>);
57
58impl<T: Operations> Request<T> {
59 /// Create an [`ARef<Request>`] from a [`struct request`] pointer.
60 ///
61 /// # Safety
62 ///
63 /// * The caller must own a refcount on `ptr` that is transferred to the
64 /// returned [`ARef`].
65 /// * The type invariants for [`Request`] must hold for the pointee of `ptr`.
66 ///
67 /// [`struct request`]: srctree/include/linux/blk-mq.h
68 pub(crate) unsafe fn aref_from_raw(ptr: *mut bindings::request) -> ARef<Self> {
69 // INVARIANT: By the safety requirements of this function, invariants are upheld.
70 // SAFETY: By the safety requirement of this function, we own a
71 // reference count that we can pass to `ARef`.
72 unsafe { ARef::from_raw(NonNull::new_unchecked(ptr as *const Self as *mut Self)) }
73 }
74
75 /// Notify the block layer that a request is going to be processed now.
76 ///
77 /// The block layer uses this hook to do proper initializations such as
78 /// starting the timeout timer. It is a requirement that block device
79 /// drivers call this function when starting to process a request.
80 ///
81 /// # Safety
82 ///
83 /// The caller must have exclusive ownership of `self`, that is
84 /// `self.wrapper_ref().refcount() == 2`.
85 pub(crate) unsafe fn start_unchecked(this: &ARef<Self>) {
86 // SAFETY: By type invariant, `self.0` is a valid `struct request` and
87 // we have exclusive access.
88 unsafe { bindings::blk_mq_start_request(this.0.get()) };
89 }
90
91 /// Try to take exclusive ownership of `this` by dropping the refcount to 0.
92 /// This fails if `this` is not the only [`ARef`] pointing to the underlying
93 /// [`Request`].
94 ///
95 /// If the operation is successful, [`Ok`] is returned with a pointer to the
96 /// C [`struct request`]. If the operation fails, `this` is returned in the
97 /// [`Err`] variant.
98 ///
99 /// [`struct request`]: srctree/include/linux/blk-mq.h
100 fn try_set_end(this: ARef<Self>) -> Result<*mut bindings::request, ARef<Self>> {
101 // We can race with `TagSet::tag_to_rq`
102 if let Err(_old) = this.wrapper_ref().refcount().compare_exchange(
103 2,
104 0,
105 Ordering::Relaxed,
106 Ordering::Relaxed,
107 ) {
108 return Err(this);
109 }
110
111 let request_ptr = this.0.get();
112 core::mem::forget(this);
113
114 Ok(request_ptr)
115 }
116
117 /// Notify the block layer that the request has been completed without errors.
118 ///
119 /// This function will return [`Err`] if `this` is not the only [`ARef`]
120 /// referencing the request.
121 pub fn end_ok(this: ARef<Self>) -> Result<(), ARef<Self>> {
122 let request_ptr = Self::try_set_end(this)?;
123
124 // SAFETY: By type invariant, `this.0` was a valid `struct request`. The
125 // success of the call to `try_set_end` guarantees that there are no
126 // `ARef`s pointing to this request. Therefore it is safe to hand it
127 // back to the block layer.
128 unsafe { bindings::blk_mq_end_request(request_ptr, bindings::BLK_STS_OK as _) };
129
130 Ok(())
131 }
132
133 /// Return a pointer to the [`RequestDataWrapper`] stored in the private area
134 /// of the request structure.
135 ///
136 /// # Safety
137 ///
138 /// - `this` must point to a valid allocation of size at least size of
139 /// [`Self`] plus size of [`RequestDataWrapper`].
140 pub(crate) unsafe fn wrapper_ptr(this: *mut Self) -> NonNull<RequestDataWrapper> {
141 let request_ptr = this.cast::<bindings::request>();
142 // SAFETY: By safety requirements for this function, `this` is a
143 // valid allocation.
144 let wrapper_ptr =
145 unsafe { bindings::blk_mq_rq_to_pdu(request_ptr).cast::<RequestDataWrapper>() };
146 // SAFETY: By C API contract, wrapper_ptr points to a valid allocation
147 // and is not null.
148 unsafe { NonNull::new_unchecked(wrapper_ptr) }
149 }
150
151 /// Return a reference to the [`RequestDataWrapper`] stored in the private
152 /// area of the request structure.
153 pub(crate) fn wrapper_ref(&self) -> &RequestDataWrapper {
154 // SAFETY: By type invariant, `self.0` is a valid allocation. Further,
155 // the private data associated with this request is initialized and
156 // valid. The existence of `&self` guarantees that the private data is
157 // valid as a shared reference.
158 unsafe { Self::wrapper_ptr(self as *const Self as *mut Self).as_ref() }
159 }
160}
161
162/// A wrapper around data stored in the private area of the C [`struct request`].
163///
164/// [`struct request`]: srctree/include/linux/blk-mq.h
165pub(crate) struct RequestDataWrapper {
166 /// The Rust request refcount has the following states:
167 ///
168 /// - 0: The request is owned by C block layer.
169 /// - 1: The request is owned by Rust abstractions but there are no [`ARef`] references to it.
170 /// - 2+: There are [`ARef`] references to the request.
171 refcount: AtomicU64,
172}
173
174impl RequestDataWrapper {
175 /// Return a reference to the refcount of the request that is embedding
176 /// `self`.
177 pub(crate) fn refcount(&self) -> &AtomicU64 {
178 &self.refcount
179 }
180
181 /// Return a pointer to the refcount of the request that is embedding the
182 /// pointee of `this`.
183 ///
184 /// # Safety
185 ///
186 /// - `this` must point to a live allocation of at least the size of `Self`.
187 pub(crate) unsafe fn refcount_ptr(this: *mut Self) -> *mut AtomicU64 {
188 // SAFETY: Because of the safety requirements of this function, the
189 // field projection is safe.
190 unsafe { addr_of_mut!((*this).refcount) }
191 }
192}
193
194// SAFETY: Exclusive access is thread-safe for `Request`. `Request` has no `&mut
195// self` methods and `&self` methods that mutate `self` are internally
196// synchronized.
197unsafe impl<T: Operations> Send for Request<T> {}
198
199// SAFETY: Shared access is thread-safe for `Request`. `&self` methods that
200// mutate `self` are internally synchronized`
201unsafe impl<T: Operations> Sync for Request<T> {}
202
203/// Store the result of `op(target.load())` in target, returning new value of
204/// target.
205fn atomic_relaxed_op_return(target: &AtomicU64, op: impl Fn(u64) -> u64) -> u64 {
206 let old = target.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| Some(op(x)));
207
208 // SAFETY: Because the operation passed to `fetch_update` above always
209 // return `Some`, `old` will always be `Ok`.
210 let old = unsafe { old.unwrap_unchecked() };
211
212 op(old)
213}
214
215/// Store the result of `op(target.load)` in `target` if `target.load() !=
216/// pred`, returning [`true`] if the target was updated.
217fn atomic_relaxed_op_unless(target: &AtomicU64, op: impl Fn(u64) -> u64, pred: u64) -> bool {
218 target
219 .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| {
220 if x == pred {
221 None
222 } else {
223 Some(op(x))
224 }
225 })
226 .is_ok()
227}
228
229// SAFETY: All instances of `Request<T>` are reference counted. This
230// implementation of `AlwaysRefCounted` ensure that increments to the ref count
231// keeps the object alive in memory at least until a matching reference count
232// decrement is executed.
233unsafe impl<T: Operations> AlwaysRefCounted for Request<T> {
234 fn inc_ref(&self) {
235 let refcount = &self.wrapper_ref().refcount();
236
237 #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
238 let updated = atomic_relaxed_op_unless(refcount, |x| x + 1, 0);
239
240 #[cfg(CONFIG_DEBUG_MISC)]
241 if !updated {
242 panic!("Request refcount zero on clone")
243 }
244 }
245
246 unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
247 // SAFETY: The type invariants of `ARef` guarantee that `obj` is valid
248 // for read.
249 let wrapper_ptr = unsafe { Self::wrapper_ptr(obj.as_ptr()).as_ptr() };
250 // SAFETY: The type invariant of `Request` guarantees that the private
251 // data area is initialized and valid.
252 let refcount = unsafe { &*RequestDataWrapper::refcount_ptr(wrapper_ptr) };
253
254 #[cfg_attr(not(CONFIG_DEBUG_MISC), allow(unused_variables))]
255 let new_refcount = atomic_relaxed_op_return(refcount, |x| x - 1);
256
257 #[cfg(CONFIG_DEBUG_MISC)]
258 if new_refcount == 0 {
259 panic!("Request reached refcount zero in Rust abstractions");
260 }
261 }
262}