Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
5#include "blk-stat.h"
6#include "blk-mq-tag.h"
7
8struct blk_mq_tag_set;
9
10/**
11 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
12 */
13struct blk_mq_ctx {
14 struct {
15 spinlock_t lock;
16 struct list_head rq_list;
17 } ____cacheline_aligned_in_smp;
18
19 unsigned int cpu;
20 unsigned int index_hw;
21
22 /* incremented at dispatch time */
23 unsigned long rq_dispatched[2];
24 unsigned long rq_merged;
25
26 /* incremented at completion time */
27 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
28
29 struct request_queue *queue;
30 struct kobject kobj;
31} ____cacheline_aligned_in_smp;
32
33/*
34 * Bits for request->gstate. The lower two bits carry MQ_RQ_* state value
35 * and the upper bits the generation number.
36 */
37enum mq_rq_state {
38 MQ_RQ_IDLE = 0,
39 MQ_RQ_IN_FLIGHT = 1,
40 MQ_RQ_COMPLETE = 2,
41
42 MQ_RQ_STATE_BITS = 2,
43 MQ_RQ_STATE_MASK = (1 << MQ_RQ_STATE_BITS) - 1,
44 MQ_RQ_GEN_INC = 1 << MQ_RQ_STATE_BITS,
45};
46
47void blk_mq_freeze_queue(struct request_queue *q);
48void blk_mq_free_queue(struct request_queue *q);
49int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
50void blk_mq_wake_waiters(struct request_queue *q);
51bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
52void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
53bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
54 bool wait);
55struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
56 struct blk_mq_ctx *start);
57
58/*
59 * Internal helpers for allocating/freeing the request map
60 */
61void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
62 unsigned int hctx_idx);
63void blk_mq_free_rq_map(struct blk_mq_tags *tags);
64struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
65 unsigned int hctx_idx,
66 unsigned int nr_tags,
67 unsigned int reserved_tags);
68int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
69 unsigned int hctx_idx, unsigned int depth);
70
71/*
72 * Internal helpers for request insertion into sw queues
73 */
74void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
75 bool at_head);
76void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
77void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
78 struct list_head *list);
79
80/* Used by blk_insert_cloned_request() to issue request directly */
81blk_status_t blk_mq_request_issue_directly(struct request *rq);
82
83/*
84 * CPU -> queue mappings
85 */
86extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
87
88static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
89 int cpu)
90{
91 return q->queue_hw_ctx[q->mq_map[cpu]];
92}
93
94/*
95 * sysfs helpers
96 */
97extern void blk_mq_sysfs_init(struct request_queue *q);
98extern void blk_mq_sysfs_deinit(struct request_queue *q);
99extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
100extern int blk_mq_sysfs_register(struct request_queue *q);
101extern void blk_mq_sysfs_unregister(struct request_queue *q);
102extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
103
104void blk_mq_release(struct request_queue *q);
105
106/**
107 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
108 * @rq: target request.
109 */
110static inline int blk_mq_rq_state(struct request *rq)
111{
112 return READ_ONCE(rq->gstate) & MQ_RQ_STATE_MASK;
113}
114
115/**
116 * blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request
117 * @rq: target request.
118 * @state: new state to set.
119 *
120 * Set @rq's state to @state. The caller is responsible for ensuring that
121 * there are no other updaters. A request can transition into IN_FLIGHT
122 * only from IDLE and doing so increments the generation number.
123 */
124static inline void blk_mq_rq_update_state(struct request *rq,
125 enum mq_rq_state state)
126{
127 u64 old_val = READ_ONCE(rq->gstate);
128 u64 new_val = (old_val & ~MQ_RQ_STATE_MASK) | state;
129
130 if (state == MQ_RQ_IN_FLIGHT) {
131 WARN_ON_ONCE((old_val & MQ_RQ_STATE_MASK) != MQ_RQ_IDLE);
132 new_val += MQ_RQ_GEN_INC;
133 }
134
135 /* avoid exposing interim values */
136 WRITE_ONCE(rq->gstate, new_val);
137}
138
139static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
140 unsigned int cpu)
141{
142 return per_cpu_ptr(q->queue_ctx, cpu);
143}
144
145/*
146 * This assumes per-cpu software queueing queues. They could be per-node
147 * as well, for instance. For now this is hardcoded as-is. Note that we don't
148 * care about preemption, since we know the ctx's are persistent. This does
149 * mean that we can't rely on ctx always matching the currently running CPU.
150 */
151static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
152{
153 return __blk_mq_get_ctx(q, get_cpu());
154}
155
156static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
157{
158 put_cpu();
159}
160
161struct blk_mq_alloc_data {
162 /* input parameter */
163 struct request_queue *q;
164 blk_mq_req_flags_t flags;
165 unsigned int shallow_depth;
166
167 /* input & output parameter */
168 struct blk_mq_ctx *ctx;
169 struct blk_mq_hw_ctx *hctx;
170};
171
172static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
173{
174 if (data->flags & BLK_MQ_REQ_INTERNAL)
175 return data->hctx->sched_tags;
176
177 return data->hctx->tags;
178}
179
180static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
181{
182 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
183}
184
185static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
186{
187 return hctx->nr_ctx && hctx->tags;
188}
189
190void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
191 unsigned int inflight[2]);
192void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
193 unsigned int inflight[2]);
194
195static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
196{
197 struct request_queue *q = hctx->queue;
198
199 if (q->mq_ops->put_budget)
200 q->mq_ops->put_budget(hctx);
201}
202
203static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
204{
205 struct request_queue *q = hctx->queue;
206
207 if (q->mq_ops->get_budget)
208 return q->mq_ops->get_budget(hctx);
209 return true;
210}
211
212static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
213 struct request *rq)
214{
215 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
216 rq->tag = -1;
217
218 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
219 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
220 atomic_dec(&hctx->nr_active);
221 }
222}
223
224static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
225 struct request *rq)
226{
227 if (rq->tag == -1 || rq->internal_tag == -1)
228 return;
229
230 __blk_mq_put_driver_tag(hctx, rq);
231}
232
233static inline void blk_mq_put_driver_tag(struct request *rq)
234{
235 struct blk_mq_hw_ctx *hctx;
236
237 if (rq->tag == -1 || rq->internal_tag == -1)
238 return;
239
240 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
241 __blk_mq_put_driver_tag(hctx, rq);
242}
243
244#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
5#include <linux/blk-mq.h>
6#include "blk-stat.h"
7
8struct blk_mq_tag_set;
9
10struct blk_mq_ctxs {
11 struct kobject kobj;
12 struct blk_mq_ctx __percpu *queue_ctx;
13};
14
15/**
16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
17 */
18struct blk_mq_ctx {
19 struct {
20 spinlock_t lock;
21 struct list_head rq_lists[HCTX_MAX_TYPES];
22 } ____cacheline_aligned_in_smp;
23
24 unsigned int cpu;
25 unsigned short index_hw[HCTX_MAX_TYPES];
26 struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
27
28 struct request_queue *queue;
29 struct blk_mq_ctxs *ctxs;
30 struct kobject kobj;
31} ____cacheline_aligned_in_smp;
32
33enum {
34 BLK_MQ_NO_TAG = -1U,
35 BLK_MQ_TAG_MIN = 1,
36 BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1,
37};
38
39#define BLK_MQ_CPU_WORK_BATCH (8)
40
41typedef unsigned int __bitwise blk_insert_t;
42#define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01)
43
44void blk_mq_submit_bio(struct bio *bio);
45int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
46 unsigned int flags);
47void blk_mq_exit_queue(struct request_queue *q);
48int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
49void blk_mq_wake_waiters(struct request_queue *q);
50bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
51 unsigned int);
52void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
53struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
54 struct blk_mq_ctx *start);
55void blk_mq_put_rq_ref(struct request *rq);
56
57/*
58 * Internal helpers for allocating/freeing the request map
59 */
60void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
61 unsigned int hctx_idx);
62void blk_mq_free_rq_map(struct blk_mq_tags *tags);
63struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
64 unsigned int hctx_idx, unsigned int depth);
65void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
66 struct blk_mq_tags *tags,
67 unsigned int hctx_idx);
68
69/*
70 * CPU -> queue mappings
71 */
72extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
73
74/*
75 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
76 * @q: request queue
77 * @type: the hctx type index
78 * @cpu: CPU
79 */
80static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
81 enum hctx_type type,
82 unsigned int cpu)
83{
84 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
85}
86
87static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
88{
89 enum hctx_type type = HCTX_TYPE_DEFAULT;
90
91 /*
92 * The caller ensure that if REQ_POLLED, poll must be enabled.
93 */
94 if (opf & REQ_POLLED)
95 type = HCTX_TYPE_POLL;
96 else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
97 type = HCTX_TYPE_READ;
98 return type;
99}
100
101/*
102 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
103 * @q: request queue
104 * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
105 * @ctx: software queue cpu ctx
106 */
107static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
108 blk_opf_t opf,
109 struct blk_mq_ctx *ctx)
110{
111 return ctx->hctxs[blk_mq_get_hctx_type(opf)];
112}
113
114/*
115 * sysfs helpers
116 */
117extern void blk_mq_sysfs_init(struct request_queue *q);
118extern void blk_mq_sysfs_deinit(struct request_queue *q);
119int blk_mq_sysfs_register(struct gendisk *disk);
120void blk_mq_sysfs_unregister(struct gendisk *disk);
121int blk_mq_sysfs_register_hctxs(struct request_queue *q);
122void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
123extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
124void blk_mq_free_plug_rqs(struct blk_plug *plug);
125void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
126
127void blk_mq_cancel_work_sync(struct request_queue *q);
128
129void blk_mq_release(struct request_queue *q);
130
131static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
132 unsigned int cpu)
133{
134 return per_cpu_ptr(q->queue_ctx, cpu);
135}
136
137/*
138 * This assumes per-cpu software queueing queues. They could be per-node
139 * as well, for instance. For now this is hardcoded as-is. Note that we don't
140 * care about preemption, since we know the ctx's are persistent. This does
141 * mean that we can't rely on ctx always matching the currently running CPU.
142 */
143static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
144{
145 return __blk_mq_get_ctx(q, raw_smp_processor_id());
146}
147
148struct blk_mq_alloc_data {
149 /* input parameter */
150 struct request_queue *q;
151 blk_mq_req_flags_t flags;
152 unsigned int shallow_depth;
153 blk_opf_t cmd_flags;
154 req_flags_t rq_flags;
155
156 /* allocate multiple requests/tags in one go */
157 unsigned int nr_tags;
158 struct rq_list *cached_rqs;
159
160 /* input & output parameter */
161 struct blk_mq_ctx *ctx;
162 struct blk_mq_hw_ctx *hctx;
163};
164
165struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
166 unsigned int reserved_tags, int node, int alloc_policy);
167void blk_mq_free_tags(struct blk_mq_tags *tags);
168int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
169 struct sbitmap_queue *breserved_tags, unsigned int queue_depth,
170 unsigned int reserved, int node, int alloc_policy);
171
172unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
173unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
174 unsigned int *offset);
175void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
176 unsigned int tag);
177void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
178int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
179 struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
180void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
181 unsigned int size);
182void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
183
184void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
185void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
186 void *priv);
187void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
188 void *priv);
189
190static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
191 struct blk_mq_hw_ctx *hctx)
192{
193 if (!hctx)
194 return &bt->ws[0];
195 return sbq_wait_ptr(bt, &hctx->wait_index);
196}
197
198void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
199void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
200
201static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
202{
203 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
204 __blk_mq_tag_busy(hctx);
205}
206
207static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
208{
209 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
210 __blk_mq_tag_idle(hctx);
211}
212
213static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
214 unsigned int tag)
215{
216 return tag < tags->nr_reserved_tags;
217}
218
219static inline bool blk_mq_is_shared_tags(unsigned int flags)
220{
221 return flags & BLK_MQ_F_TAG_HCTX_SHARED;
222}
223
224static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
225{
226 if (data->rq_flags & RQF_SCHED_TAGS)
227 return data->hctx->sched_tags;
228 return data->hctx->tags;
229}
230
231static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
232{
233 /* Fast path: hardware queue is not stopped most of the time. */
234 if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
235 return false;
236
237 /*
238 * This barrier is used to order adding of dispatch list before and
239 * the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier
240 * in blk_mq_start_stopped_hw_queue() so that dispatch code could
241 * either see BLK_MQ_S_STOPPED is cleared or dispatch list is not
242 * empty to avoid missing dispatching requests.
243 */
244 smp_mb();
245
246 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
247}
248
249static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
250{
251 return hctx->nr_ctx && hctx->tags;
252}
253
254unsigned int blk_mq_in_flight(struct request_queue *q,
255 struct block_device *part);
256void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
257 unsigned int inflight[2]);
258
259static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
260 int budget_token)
261{
262 if (q->mq_ops->put_budget)
263 q->mq_ops->put_budget(q, budget_token);
264}
265
266static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
267{
268 if (q->mq_ops->get_budget)
269 return q->mq_ops->get_budget(q);
270 return 0;
271}
272
273static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
274{
275 if (token < 0)
276 return;
277
278 if (rq->q->mq_ops->set_rq_budget_token)
279 rq->q->mq_ops->set_rq_budget_token(rq, token);
280}
281
282static inline int blk_mq_get_rq_budget_token(struct request *rq)
283{
284 if (rq->q->mq_ops->get_rq_budget_token)
285 return rq->q->mq_ops->get_rq_budget_token(rq);
286 return -1;
287}
288
289static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
290 int val)
291{
292 if (blk_mq_is_shared_tags(hctx->flags))
293 atomic_add(val, &hctx->queue->nr_active_requests_shared_tags);
294 else
295 atomic_add(val, &hctx->nr_active);
296}
297
298static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
299{
300 __blk_mq_add_active_requests(hctx, 1);
301}
302
303static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
304 int val)
305{
306 if (blk_mq_is_shared_tags(hctx->flags))
307 atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
308 else
309 atomic_sub(val, &hctx->nr_active);
310}
311
312static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
313{
314 __blk_mq_sub_active_requests(hctx, 1);
315}
316
317static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
318 int val)
319{
320 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
321 __blk_mq_add_active_requests(hctx, val);
322}
323
324static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
325{
326 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
327 __blk_mq_inc_active_requests(hctx);
328}
329
330static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
331 int val)
332{
333 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
334 __blk_mq_sub_active_requests(hctx, val);
335}
336
337static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
338{
339 if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
340 __blk_mq_dec_active_requests(hctx);
341}
342
343static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
344{
345 if (blk_mq_is_shared_tags(hctx->flags))
346 return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
347 return atomic_read(&hctx->nr_active);
348}
349static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
350 struct request *rq)
351{
352 blk_mq_dec_active_requests(hctx);
353 blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
354 rq->tag = BLK_MQ_NO_TAG;
355}
356
357static inline void blk_mq_put_driver_tag(struct request *rq)
358{
359 if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
360 return;
361
362 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
363}
364
365bool __blk_mq_alloc_driver_tag(struct request *rq);
366
367static inline bool blk_mq_get_driver_tag(struct request *rq)
368{
369 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
370 return false;
371
372 return true;
373}
374
375static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
376{
377 int cpu;
378
379 for_each_possible_cpu(cpu)
380 qmap->mq_map[cpu] = 0;
381}
382
383/* Free all requests on the list */
384static inline void blk_mq_free_requests(struct list_head *list)
385{
386 while (!list_empty(list)) {
387 struct request *rq = list_entry_rq(list->next);
388
389 list_del_init(&rq->queuelist);
390 blk_mq_free_request(rq);
391 }
392}
393
394/*
395 * For shared tag users, we track the number of currently active users
396 * and attempt to provide a fair share of the tag depth for each of them.
397 */
398static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
399 struct sbitmap_queue *bt)
400{
401 unsigned int depth, users;
402
403 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
404 return true;
405
406 /*
407 * Don't try dividing an ant
408 */
409 if (bt->sb.depth == 1)
410 return true;
411
412 if (blk_mq_is_shared_tags(hctx->flags)) {
413 struct request_queue *q = hctx->queue;
414
415 if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
416 return true;
417 } else {
418 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
419 return true;
420 }
421
422 users = READ_ONCE(hctx->tags->active_queues);
423 if (!users)
424 return true;
425
426 /*
427 * Allow at least some tags
428 */
429 depth = max((bt->sb.depth + users - 1) / users, 4U);
430 return __blk_mq_active_requests(hctx) < depth;
431}
432
433/* run the code block in @dispatch_ops with rcu/srcu read lock held */
434#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
435do { \
436 if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
437 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
438 int srcu_idx; \
439 \
440 might_sleep_if(check_sleep); \
441 srcu_idx = srcu_read_lock(__tag_set->srcu); \
442 (dispatch_ops); \
443 srcu_read_unlock(__tag_set->srcu, srcu_idx); \
444 } else { \
445 rcu_read_lock(); \
446 (dispatch_ops); \
447 rcu_read_unlock(); \
448 } \
449} while (0)
450
451#define blk_mq_run_dispatch_ops(q, dispatch_ops) \
452 __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \
453
454static inline bool blk_mq_can_poll(struct request_queue *q)
455{
456 return (q->limits.features & BLK_FEAT_POLL) &&
457 q->tag_set->map[HCTX_TYPE_POLL].nr_queues;
458}
459
460#endif