Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef INT_BLK_MQ_H
  3#define INT_BLK_MQ_H
  4
 
  5#include "blk-stat.h"
  6#include "blk-mq-tag.h"
  7
  8struct blk_mq_tag_set;
  9
 10struct blk_mq_ctxs {
 11	struct kobject kobj;
 12	struct blk_mq_ctx __percpu	*queue_ctx;
 13};
 14
 15/**
 16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
 17 */
 18struct blk_mq_ctx {
 19	struct {
 20		spinlock_t		lock;
 21		struct list_head	rq_lists[HCTX_MAX_TYPES];
 22	} ____cacheline_aligned_in_smp;
 23
 24	unsigned int		cpu;
 25	unsigned short		index_hw[HCTX_MAX_TYPES];
 26	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
 27
 28	struct request_queue	*queue;
 29	struct blk_mq_ctxs      *ctxs;
 30	struct kobject		kobj;
 31} ____cacheline_aligned_in_smp;
 32
 
 
 
 
 
 
 
 
 
 
 
 33void blk_mq_submit_bio(struct bio *bio);
 34int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
 35		unsigned int flags);
 36void blk_mq_exit_queue(struct request_queue *q);
 37int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 38void blk_mq_wake_waiters(struct request_queue *q);
 39bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
 40			     unsigned int);
 41void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
 42				bool kick_requeue_list);
 43void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 44struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
 45					struct blk_mq_ctx *start);
 46void blk_mq_put_rq_ref(struct request *rq);
 47
 48/*
 49 * Internal helpers for allocating/freeing the request map
 50 */
 51void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 52		     unsigned int hctx_idx);
 53void blk_mq_free_rq_map(struct blk_mq_tags *tags);
 54struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
 55				unsigned int hctx_idx, unsigned int depth);
 56void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
 57			     struct blk_mq_tags *tags,
 58			     unsigned int hctx_idx);
 59/*
 60 * Internal helpers for request insertion into sw queues
 61 */
 62void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 63				bool at_head);
 64void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
 65				  bool run_queue);
 66void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 67				struct list_head *list);
 68void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
 69				    struct list_head *list);
 70
 71/*
 72 * CPU -> queue mappings
 73 */
 74extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
 75
 76/*
 77 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
 78 * @q: request queue
 79 * @type: the hctx type index
 80 * @cpu: CPU
 81 */
 82static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
 83							  enum hctx_type type,
 84							  unsigned int cpu)
 85{
 86	return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
 87}
 88
 89static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
 90{
 91	enum hctx_type type = HCTX_TYPE_DEFAULT;
 92
 93	/*
 94	 * The caller ensure that if REQ_POLLED, poll must be enabled.
 95	 */
 96	if (opf & REQ_POLLED)
 97		type = HCTX_TYPE_POLL;
 98	else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
 99		type = HCTX_TYPE_READ;
100	return type;
101}
102
103/*
104 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
105 * @q: request queue
106 * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
107 * @ctx: software queue cpu ctx
108 */
109static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
110						     blk_opf_t opf,
111						     struct blk_mq_ctx *ctx)
112{
113	return ctx->hctxs[blk_mq_get_hctx_type(opf)];
114}
115
116/*
117 * sysfs helpers
118 */
119extern void blk_mq_sysfs_init(struct request_queue *q);
120extern void blk_mq_sysfs_deinit(struct request_queue *q);
121int blk_mq_sysfs_register(struct gendisk *disk);
122void blk_mq_sysfs_unregister(struct gendisk *disk);
123int blk_mq_sysfs_register_hctxs(struct request_queue *q);
124void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
125extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
126void blk_mq_free_plug_rqs(struct blk_plug *plug);
127void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
128
129void blk_mq_cancel_work_sync(struct request_queue *q);
130
131void blk_mq_release(struct request_queue *q);
132
133static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
134					   unsigned int cpu)
135{
136	return per_cpu_ptr(q->queue_ctx, cpu);
137}
138
139/*
140 * This assumes per-cpu software queueing queues. They could be per-node
141 * as well, for instance. For now this is hardcoded as-is. Note that we don't
142 * care about preemption, since we know the ctx's are persistent. This does
143 * mean that we can't rely on ctx always matching the currently running CPU.
144 */
145static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
146{
147	return __blk_mq_get_ctx(q, raw_smp_processor_id());
148}
149
150struct blk_mq_alloc_data {
151	/* input parameter */
152	struct request_queue *q;
153	blk_mq_req_flags_t flags;
154	unsigned int shallow_depth;
155	blk_opf_t cmd_flags;
156	req_flags_t rq_flags;
157
158	/* allocate multiple requests/tags in one go */
159	unsigned int nr_tags;
160	struct request **cached_rq;
161
162	/* input & output parameter */
163	struct blk_mq_ctx *ctx;
164	struct blk_mq_hw_ctx *hctx;
165};
166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167static inline bool blk_mq_is_shared_tags(unsigned int flags)
168{
169	return flags & BLK_MQ_F_TAG_HCTX_SHARED;
170}
171
172static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
173{
174	if (!(data->rq_flags & RQF_ELV))
175		return data->hctx->tags;
176	return data->hctx->sched_tags;
177}
178
179static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
180{
 
 
 
 
 
 
 
 
 
 
 
 
 
181	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
182}
183
184static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
185{
186	return hctx->nr_ctx && hctx->tags;
187}
188
189unsigned int blk_mq_in_flight(struct request_queue *q,
190		struct block_device *part);
191void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
192		unsigned int inflight[2]);
193
194static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
195					      int budget_token)
196{
197	if (q->mq_ops->put_budget)
198		q->mq_ops->put_budget(q, budget_token);
199}
200
201static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
202{
203	if (q->mq_ops->get_budget)
204		return q->mq_ops->get_budget(q);
205	return 0;
206}
207
208static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
209{
210	if (token < 0)
211		return;
212
213	if (rq->q->mq_ops->set_rq_budget_token)
214		rq->q->mq_ops->set_rq_budget_token(rq, token);
215}
216
217static inline int blk_mq_get_rq_budget_token(struct request *rq)
218{
219	if (rq->q->mq_ops->get_rq_budget_token)
220		return rq->q->mq_ops->get_rq_budget_token(rq);
221	return -1;
222}
223
224static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
 
225{
226	if (blk_mq_is_shared_tags(hctx->flags))
227		atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
228	else
229		atomic_inc(&hctx->nr_active);
 
 
 
 
 
230}
231
232static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
233		int val)
234{
235	if (blk_mq_is_shared_tags(hctx->flags))
236		atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
237	else
238		atomic_sub(val, &hctx->nr_active);
239}
240
241static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
242{
243	__blk_mq_sub_active_requests(hctx, 1);
244}
245
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
247{
248	if (blk_mq_is_shared_tags(hctx->flags))
249		return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
250	return atomic_read(&hctx->nr_active);
251}
252static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
253					   struct request *rq)
254{
 
255	blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
256	rq->tag = BLK_MQ_NO_TAG;
257
258	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
259		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
260		__blk_mq_dec_active_requests(hctx);
261	}
262}
263
264static inline void blk_mq_put_driver_tag(struct request *rq)
265{
266	if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
267		return;
268
269	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
270}
271
272bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq);
273
274static inline bool blk_mq_get_driver_tag(struct request *rq)
275{
276	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
277
278	if (rq->tag != BLK_MQ_NO_TAG &&
279	    !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
280		hctx->tags->rqs[rq->tag] = rq;
281		return true;
282	}
283
284	return __blk_mq_get_driver_tag(hctx, rq);
285}
286
287static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
288{
289	int cpu;
290
291	for_each_possible_cpu(cpu)
292		qmap->mq_map[cpu] = 0;
293}
294
295/*
296 * blk_mq_plug() - Get caller context plug
297 * @bio : the bio being submitted by the caller context
298 *
299 * Plugging, by design, may delay the insertion of BIOs into the elevator in
300 * order to increase BIO merging opportunities. This however can cause BIO
301 * insertion order to change from the order in which submit_bio() is being
302 * executed in the case of multiple contexts concurrently issuing BIOs to a
303 * device, even if these context are synchronized to tightly control BIO issuing
304 * order. While this is not a problem with regular block devices, this ordering
305 * change can cause write BIO failures with zoned block devices as these
306 * require sequential write patterns to zones. Prevent this from happening by
307 * ignoring the plug state of a BIO issuing context if it is for a zoned block
308 * device and the BIO to plug is a write operation.
309 *
310 * Return current->plug if the bio can be plugged and NULL otherwise
311 */
312static inline struct blk_plug *blk_mq_plug( struct bio *bio)
313{
314	/* Zoned block device write operation case: do not plug the BIO */
315	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
316	    bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio)))
317		return NULL;
318
319	/*
320	 * For regular block devices or read operations, use the context plug
321	 * which may be NULL if blk_start_plug() was not executed.
322	 */
323	return current->plug;
324}
325
326/* Free all requests on the list */
327static inline void blk_mq_free_requests(struct list_head *list)
328{
329	while (!list_empty(list)) {
330		struct request *rq = list_entry_rq(list->next);
331
332		list_del_init(&rq->queuelist);
333		blk_mq_free_request(rq);
334	}
335}
336
337/*
338 * For shared tag users, we track the number of currently active users
339 * and attempt to provide a fair share of the tag depth for each of them.
340 */
341static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
342				  struct sbitmap_queue *bt)
343{
344	unsigned int depth, users;
345
346	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
347		return true;
348
349	/*
350	 * Don't try dividing an ant
351	 */
352	if (bt->sb.depth == 1)
353		return true;
354
355	if (blk_mq_is_shared_tags(hctx->flags)) {
356		struct request_queue *q = hctx->queue;
357
358		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
359			return true;
360	} else {
361		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
362			return true;
363	}
364
365	users = atomic_read(&hctx->tags->active_queues);
366
367	if (!users)
368		return true;
369
370	/*
371	 * Allow at least some tags
372	 */
373	depth = max((bt->sb.depth + users - 1) / users, 4U);
374	return __blk_mq_active_requests(hctx) < depth;
375}
376
377/* run the code block in @dispatch_ops with rcu/srcu read lock held */
378#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops)	\
379do {								\
380	if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) {		\
 
381		int srcu_idx;					\
382								\
383		might_sleep_if(check_sleep);			\
384		srcu_idx = srcu_read_lock((q)->tag_set->srcu);	\
385		(dispatch_ops);					\
386		srcu_read_unlock((q)->tag_set->srcu, srcu_idx);	\
387	} else {						\
388		rcu_read_lock();				\
389		(dispatch_ops);					\
390		rcu_read_unlock();				\
391	}							\
392} while (0)
393
394#define blk_mq_run_dispatch_ops(q, dispatch_ops)		\
395	__blk_mq_run_dispatch_ops(q, true, dispatch_ops)	\
 
 
 
 
 
 
396
397#endif
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef INT_BLK_MQ_H
  3#define INT_BLK_MQ_H
  4
  5#include <linux/blk-mq.h>
  6#include "blk-stat.h"
 
  7
  8struct blk_mq_tag_set;
  9
 10struct blk_mq_ctxs {
 11	struct kobject kobj;
 12	struct blk_mq_ctx __percpu	*queue_ctx;
 13};
 14
 15/**
 16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
 17 */
 18struct blk_mq_ctx {
 19	struct {
 20		spinlock_t		lock;
 21		struct list_head	rq_lists[HCTX_MAX_TYPES];
 22	} ____cacheline_aligned_in_smp;
 23
 24	unsigned int		cpu;
 25	unsigned short		index_hw[HCTX_MAX_TYPES];
 26	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
 27
 28	struct request_queue	*queue;
 29	struct blk_mq_ctxs      *ctxs;
 30	struct kobject		kobj;
 31} ____cacheline_aligned_in_smp;
 32
 33enum {
 34	BLK_MQ_NO_TAG		= -1U,
 35	BLK_MQ_TAG_MIN		= 1,
 36	BLK_MQ_TAG_MAX		= BLK_MQ_NO_TAG - 1,
 37};
 38
 39#define BLK_MQ_CPU_WORK_BATCH	(8)
 40
 41typedef unsigned int __bitwise blk_insert_t;
 42#define BLK_MQ_INSERT_AT_HEAD		((__force blk_insert_t)0x01)
 43
 44void blk_mq_submit_bio(struct bio *bio);
 45int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
 46		unsigned int flags);
 47void blk_mq_exit_queue(struct request_queue *q);
 48int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 49void blk_mq_wake_waiters(struct request_queue *q);
 50bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
 51			     unsigned int);
 
 
 52void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 53struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
 54					struct blk_mq_ctx *start);
 55void blk_mq_put_rq_ref(struct request *rq);
 56
 57/*
 58 * Internal helpers for allocating/freeing the request map
 59 */
 60void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 61		     unsigned int hctx_idx);
 62void blk_mq_free_rq_map(struct blk_mq_tags *tags);
 63struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
 64				unsigned int hctx_idx, unsigned int depth);
 65void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
 66			     struct blk_mq_tags *tags,
 67			     unsigned int hctx_idx);
 
 
 
 
 
 
 
 
 
 
 
 68
 69/*
 70 * CPU -> queue mappings
 71 */
 72extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
 73
 74/*
 75 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
 76 * @q: request queue
 77 * @type: the hctx type index
 78 * @cpu: CPU
 79 */
 80static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
 81							  enum hctx_type type,
 82							  unsigned int cpu)
 83{
 84	return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
 85}
 86
 87static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
 88{
 89	enum hctx_type type = HCTX_TYPE_DEFAULT;
 90
 91	/*
 92	 * The caller ensure that if REQ_POLLED, poll must be enabled.
 93	 */
 94	if (opf & REQ_POLLED)
 95		type = HCTX_TYPE_POLL;
 96	else if ((opf & REQ_OP_MASK) == REQ_OP_READ)
 97		type = HCTX_TYPE_READ;
 98	return type;
 99}
100
101/*
102 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
103 * @q: request queue
104 * @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
105 * @ctx: software queue cpu ctx
106 */
107static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
108						     blk_opf_t opf,
109						     struct blk_mq_ctx *ctx)
110{
111	return ctx->hctxs[blk_mq_get_hctx_type(opf)];
112}
113
114/*
115 * sysfs helpers
116 */
117extern void blk_mq_sysfs_init(struct request_queue *q);
118extern void blk_mq_sysfs_deinit(struct request_queue *q);
119int blk_mq_sysfs_register(struct gendisk *disk);
120void blk_mq_sysfs_unregister(struct gendisk *disk);
121int blk_mq_sysfs_register_hctxs(struct request_queue *q);
122void blk_mq_sysfs_unregister_hctxs(struct request_queue *q);
123extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
124void blk_mq_free_plug_rqs(struct blk_plug *plug);
125void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
126
127void blk_mq_cancel_work_sync(struct request_queue *q);
128
129void blk_mq_release(struct request_queue *q);
130
131static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
132					   unsigned int cpu)
133{
134	return per_cpu_ptr(q->queue_ctx, cpu);
135}
136
137/*
138 * This assumes per-cpu software queueing queues. They could be per-node
139 * as well, for instance. For now this is hardcoded as-is. Note that we don't
140 * care about preemption, since we know the ctx's are persistent. This does
141 * mean that we can't rely on ctx always matching the currently running CPU.
142 */
143static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
144{
145	return __blk_mq_get_ctx(q, raw_smp_processor_id());
146}
147
148struct blk_mq_alloc_data {
149	/* input parameter */
150	struct request_queue *q;
151	blk_mq_req_flags_t flags;
152	unsigned int shallow_depth;
153	blk_opf_t cmd_flags;
154	req_flags_t rq_flags;
155
156	/* allocate multiple requests/tags in one go */
157	unsigned int nr_tags;
158	struct rq_list *cached_rqs;
159
160	/* input & output parameter */
161	struct blk_mq_ctx *ctx;
162	struct blk_mq_hw_ctx *hctx;
163};
164
165struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
166		unsigned int reserved_tags, int node, int alloc_policy);
167void blk_mq_free_tags(struct blk_mq_tags *tags);
168int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
169		struct sbitmap_queue *breserved_tags, unsigned int queue_depth,
170		unsigned int reserved, int node, int alloc_policy);
171
172unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
173unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
174		unsigned int *offset);
175void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
176		unsigned int tag);
177void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
178int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
179		struct blk_mq_tags **tags, unsigned int depth, bool can_grow);
180void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
181		unsigned int size);
182void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
183
184void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
185void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
186		void *priv);
187void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
188		void *priv);
189
190static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
191						 struct blk_mq_hw_ctx *hctx)
192{
193	if (!hctx)
194		return &bt->ws[0];
195	return sbq_wait_ptr(bt, &hctx->wait_index);
196}
197
198void __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
199void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
200
201static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
202{
203	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
204		__blk_mq_tag_busy(hctx);
205}
206
207static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
208{
209	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
210		__blk_mq_tag_idle(hctx);
211}
212
213static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
214					  unsigned int tag)
215{
216	return tag < tags->nr_reserved_tags;
217}
218
219static inline bool blk_mq_is_shared_tags(unsigned int flags)
220{
221	return flags & BLK_MQ_F_TAG_HCTX_SHARED;
222}
223
224static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
225{
226	if (data->rq_flags & RQF_SCHED_TAGS)
227		return data->hctx->sched_tags;
228	return data->hctx->tags;
229}
230
231static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
232{
233	/* Fast path: hardware queue is not stopped most of the time. */
234	if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
235		return false;
236
237	/*
238	 * This barrier is used to order adding of dispatch list before and
239	 * the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier
240	 * in blk_mq_start_stopped_hw_queue() so that dispatch code could
241	 * either see BLK_MQ_S_STOPPED is cleared or dispatch list is not
242	 * empty to avoid missing dispatching requests.
243	 */
244	smp_mb();
245
246	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
247}
248
249static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
250{
251	return hctx->nr_ctx && hctx->tags;
252}
253
254unsigned int blk_mq_in_flight(struct request_queue *q,
255		struct block_device *part);
256void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
257		unsigned int inflight[2]);
258
259static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
260					      int budget_token)
261{
262	if (q->mq_ops->put_budget)
263		q->mq_ops->put_budget(q, budget_token);
264}
265
266static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
267{
268	if (q->mq_ops->get_budget)
269		return q->mq_ops->get_budget(q);
270	return 0;
271}
272
273static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
274{
275	if (token < 0)
276		return;
277
278	if (rq->q->mq_ops->set_rq_budget_token)
279		rq->q->mq_ops->set_rq_budget_token(rq, token);
280}
281
282static inline int blk_mq_get_rq_budget_token(struct request *rq)
283{
284	if (rq->q->mq_ops->get_rq_budget_token)
285		return rq->q->mq_ops->get_rq_budget_token(rq);
286	return -1;
287}
288
289static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
290						int val)
291{
292	if (blk_mq_is_shared_tags(hctx->flags))
293		atomic_add(val, &hctx->queue->nr_active_requests_shared_tags);
294	else
295		atomic_add(val, &hctx->nr_active);
296}
297
298static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
299{
300	__blk_mq_add_active_requests(hctx, 1);
301}
302
303static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
304		int val)
305{
306	if (blk_mq_is_shared_tags(hctx->flags))
307		atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
308	else
309		atomic_sub(val, &hctx->nr_active);
310}
311
312static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
313{
314	__blk_mq_sub_active_requests(hctx, 1);
315}
316
317static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
318					      int val)
319{
320	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
321		__blk_mq_add_active_requests(hctx, val);
322}
323
324static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
325{
326	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
327		__blk_mq_inc_active_requests(hctx);
328}
329
330static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
331					      int val)
332{
333	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
334		__blk_mq_sub_active_requests(hctx, val);
335}
336
337static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
338{
339	if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
340		__blk_mq_dec_active_requests(hctx);
341}
342
343static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
344{
345	if (blk_mq_is_shared_tags(hctx->flags))
346		return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
347	return atomic_read(&hctx->nr_active);
348}
349static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
350					   struct request *rq)
351{
352	blk_mq_dec_active_requests(hctx);
353	blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
354	rq->tag = BLK_MQ_NO_TAG;
 
 
 
 
 
355}
356
357static inline void blk_mq_put_driver_tag(struct request *rq)
358{
359	if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
360		return;
361
362	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
363}
364
365bool __blk_mq_alloc_driver_tag(struct request *rq);
366
367static inline bool blk_mq_get_driver_tag(struct request *rq)
368{
369	if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
370		return false;
371
372	return true;
 
 
 
 
 
 
373}
374
375static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
376{
377	int cpu;
378
379	for_each_possible_cpu(cpu)
380		qmap->mq_map[cpu] = 0;
381}
382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383/* Free all requests on the list */
384static inline void blk_mq_free_requests(struct list_head *list)
385{
386	while (!list_empty(list)) {
387		struct request *rq = list_entry_rq(list->next);
388
389		list_del_init(&rq->queuelist);
390		blk_mq_free_request(rq);
391	}
392}
393
394/*
395 * For shared tag users, we track the number of currently active users
396 * and attempt to provide a fair share of the tag depth for each of them.
397 */
398static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
399				  struct sbitmap_queue *bt)
400{
401	unsigned int depth, users;
402
403	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
404		return true;
405
406	/*
407	 * Don't try dividing an ant
408	 */
409	if (bt->sb.depth == 1)
410		return true;
411
412	if (blk_mq_is_shared_tags(hctx->flags)) {
413		struct request_queue *q = hctx->queue;
414
415		if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
416			return true;
417	} else {
418		if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
419			return true;
420	}
421
422	users = READ_ONCE(hctx->tags->active_queues);
 
423	if (!users)
424		return true;
425
426	/*
427	 * Allow at least some tags
428	 */
429	depth = max((bt->sb.depth + users - 1) / users, 4U);
430	return __blk_mq_active_requests(hctx) < depth;
431}
432
433/* run the code block in @dispatch_ops with rcu/srcu read lock held */
434#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops)	\
435do {								\
436	if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) {		\
437		struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
438		int srcu_idx;					\
439								\
440		might_sleep_if(check_sleep);			\
441		srcu_idx = srcu_read_lock(__tag_set->srcu);	\
442		(dispatch_ops);					\
443		srcu_read_unlock(__tag_set->srcu, srcu_idx);	\
444	} else {						\
445		rcu_read_lock();				\
446		(dispatch_ops);					\
447		rcu_read_unlock();				\
448	}							\
449} while (0)
450
451#define blk_mq_run_dispatch_ops(q, dispatch_ops)		\
452	__blk_mq_run_dispatch_ops(q, true, dispatch_ops)	\
453
454static inline bool blk_mq_can_poll(struct request_queue *q)
455{
456	return (q->limits.features & BLK_FEAT_POLL) &&
457		q->tag_set->map[HCTX_TYPE_POLL].nr_queues;
458}
459
460#endif