Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef INT_BLK_MQ_H
  3#define INT_BLK_MQ_H
  4
  5#include "blk-stat.h"
  6#include "blk-mq-tag.h"
  7
  8struct blk_mq_tag_set;
  9
 
 
 
 
 
 10/**
 11 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
 12 */
 13struct blk_mq_ctx {
 14	struct {
 15		spinlock_t		lock;
 16		struct list_head	rq_list;
 17	}  ____cacheline_aligned_in_smp;
 18
 19	unsigned int		cpu;
 20	unsigned int		index_hw;
 
 21
 22	/* incremented at dispatch time */
 23	unsigned long		rq_dispatched[2];
 24	unsigned long		rq_merged;
 25
 26	/* incremented at completion time */
 27	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
 28
 29	struct request_queue	*queue;
 
 30	struct kobject		kobj;
 31} ____cacheline_aligned_in_smp;
 32
 33/*
 34 * Bits for request->gstate.  The lower two bits carry MQ_RQ_* state value
 35 * and the upper bits the generation number.
 36 */
 37enum mq_rq_state {
 38	MQ_RQ_IDLE		= 0,
 39	MQ_RQ_IN_FLIGHT		= 1,
 40	MQ_RQ_COMPLETE		= 2,
 41
 42	MQ_RQ_STATE_BITS	= 2,
 43	MQ_RQ_STATE_MASK	= (1 << MQ_RQ_STATE_BITS) - 1,
 44	MQ_RQ_GEN_INC		= 1 << MQ_RQ_STATE_BITS,
 45};
 46
 47void blk_mq_freeze_queue(struct request_queue *q);
 48void blk_mq_free_queue(struct request_queue *q);
 49int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 50void blk_mq_wake_waiters(struct request_queue *q);
 51bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
 
 
 
 52void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 53bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
 54				bool wait);
 55struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
 56					struct blk_mq_ctx *start);
 57
 58/*
 59 * Internal helpers for allocating/freeing the request map
 60 */
 61void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 62		     unsigned int hctx_idx);
 63void blk_mq_free_rq_map(struct blk_mq_tags *tags);
 64struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
 65					unsigned int hctx_idx,
 66					unsigned int nr_tags,
 67					unsigned int reserved_tags);
 68int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 69		     unsigned int hctx_idx, unsigned int depth);
 70
 71/*
 72 * Internal helpers for request insertion into sw queues
 73 */
 74void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 75				bool at_head);
 76void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
 
 77void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 78				struct list_head *list);
 79
 80/* Used by blk_insert_cloned_request() to issue request directly */
 81blk_status_t blk_mq_request_issue_directly(struct request *rq);
 
 
 82
 83/*
 84 * CPU -> queue mappings
 85 */
 86extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
 
 
 
 
 
 
 
 
 
 
 
 
 
 87
 
 
 
 
 
 
 88static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
 89		int cpu)
 
 90{
 91	return q->queue_hw_ctx[q->mq_map[cpu]];
 
 
 
 
 
 
 
 
 
 
 92}
 93
 94/*
 95 * sysfs helpers
 96 */
 97extern void blk_mq_sysfs_init(struct request_queue *q);
 98extern void blk_mq_sysfs_deinit(struct request_queue *q);
 99extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
100extern int blk_mq_sysfs_register(struct request_queue *q);
101extern void blk_mq_sysfs_unregister(struct request_queue *q);
102extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
103
104void blk_mq_release(struct request_queue *q);
105
106/**
107 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
108 * @rq: target request.
109 */
110static inline int blk_mq_rq_state(struct request *rq)
111{
112	return READ_ONCE(rq->gstate) & MQ_RQ_STATE_MASK;
113}
114
115/**
116 * blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request
117 * @rq: target request.
118 * @state: new state to set.
119 *
120 * Set @rq's state to @state.  The caller is responsible for ensuring that
121 * there are no other updaters.  A request can transition into IN_FLIGHT
122 * only from IDLE and doing so increments the generation number.
123 */
124static inline void blk_mq_rq_update_state(struct request *rq,
125					  enum mq_rq_state state)
126{
127	u64 old_val = READ_ONCE(rq->gstate);
128	u64 new_val = (old_val & ~MQ_RQ_STATE_MASK) | state;
129
130	if (state == MQ_RQ_IN_FLIGHT) {
131		WARN_ON_ONCE((old_val & MQ_RQ_STATE_MASK) != MQ_RQ_IDLE);
132		new_val += MQ_RQ_GEN_INC;
133	}
134
135	/* avoid exposing interim values */
136	WRITE_ONCE(rq->gstate, new_val);
137}
138
139static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
140					   unsigned int cpu)
141{
142	return per_cpu_ptr(q->queue_ctx, cpu);
143}
144
145/*
146 * This assumes per-cpu software queueing queues. They could be per-node
147 * as well, for instance. For now this is hardcoded as-is. Note that we don't
148 * care about preemption, since we know the ctx's are persistent. This does
149 * mean that we can't rely on ctx always matching the currently running CPU.
150 */
151static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
152{
153	return __blk_mq_get_ctx(q, get_cpu());
154}
155
156static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
157{
158	put_cpu();
159}
160
161struct blk_mq_alloc_data {
162	/* input parameter */
163	struct request_queue *q;
164	blk_mq_req_flags_t flags;
165	unsigned int shallow_depth;
 
166
167	/* input & output parameter */
168	struct blk_mq_ctx *ctx;
169	struct blk_mq_hw_ctx *hctx;
170};
171
172static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
173{
174	if (data->flags & BLK_MQ_REQ_INTERNAL)
175		return data->hctx->sched_tags;
176
177	return data->hctx->tags;
178}
179
180static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
181{
182	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
183}
184
185static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
186{
187	return hctx->nr_ctx && hctx->tags;
188}
189
190void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
191		      unsigned int inflight[2]);
192void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
193			 unsigned int inflight[2]);
194
195static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
196{
197	struct request_queue *q = hctx->queue;
198
199	if (q->mq_ops->put_budget)
200		q->mq_ops->put_budget(hctx);
201}
202
203static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
204{
205	struct request_queue *q = hctx->queue;
206
207	if (q->mq_ops->get_budget)
208		return q->mq_ops->get_budget(hctx);
209	return true;
210}
211
212static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
213					   struct request *rq)
214{
215	blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
216	rq->tag = -1;
217
218	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
219		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
220		atomic_dec(&hctx->nr_active);
221	}
222}
223
224static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
225				       struct request *rq)
226{
227	if (rq->tag == -1 || rq->internal_tag == -1)
228		return;
229
230	__blk_mq_put_driver_tag(hctx, rq);
231}
232
233static inline void blk_mq_put_driver_tag(struct request *rq)
234{
235	struct blk_mq_hw_ctx *hctx;
236
237	if (rq->tag == -1 || rq->internal_tag == -1)
238		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
240	hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
241	__blk_mq_put_driver_tag(hctx, rq);
242}
243
244#endif
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef INT_BLK_MQ_H
  3#define INT_BLK_MQ_H
  4
  5#include "blk-stat.h"
  6#include "blk-mq-tag.h"
  7
  8struct blk_mq_tag_set;
  9
 10struct blk_mq_ctxs {
 11	struct kobject kobj;
 12	struct blk_mq_ctx __percpu	*queue_ctx;
 13};
 14
 15/**
 16 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
 17 */
 18struct blk_mq_ctx {
 19	struct {
 20		spinlock_t		lock;
 21		struct list_head	rq_lists[HCTX_MAX_TYPES];
 22	} ____cacheline_aligned_in_smp;
 23
 24	unsigned int		cpu;
 25	unsigned short		index_hw[HCTX_MAX_TYPES];
 26	struct blk_mq_hw_ctx 	*hctxs[HCTX_MAX_TYPES];
 27
 28	/* incremented at dispatch time */
 29	unsigned long		rq_dispatched[2];
 30	unsigned long		rq_merged;
 31
 32	/* incremented at completion time */
 33	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
 34
 35	struct request_queue	*queue;
 36	struct blk_mq_ctxs      *ctxs;
 37	struct kobject		kobj;
 38} ____cacheline_aligned_in_smp;
 39
 40void blk_mq_exit_queue(struct request_queue *q);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 41int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 42void blk_mq_wake_waiters(struct request_queue *q);
 43bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
 44			     unsigned int);
 45void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
 46				bool kick_requeue_list);
 47void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 
 
 48struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
 49					struct blk_mq_ctx *start);
 50
 51/*
 52 * Internal helpers for allocating/freeing the request map
 53 */
 54void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 55		     unsigned int hctx_idx);
 56void blk_mq_free_rq_map(struct blk_mq_tags *tags);
 57struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
 58					unsigned int hctx_idx,
 59					unsigned int nr_tags,
 60					unsigned int reserved_tags);
 61int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 62		     unsigned int hctx_idx, unsigned int depth);
 63
 64/*
 65 * Internal helpers for request insertion into sw queues
 66 */
 67void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 68				bool at_head);
 69void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
 70				  bool run_queue);
 71void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 72				struct list_head *list);
 73
 74/* Used by blk_insert_cloned_request() to issue request directly */
 75blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
 76void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
 77				    struct list_head *list);
 78
 79/*
 80 * CPU -> queue mappings
 81 */
 82extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
 83
 84/*
 85 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
 86 * @q: request queue
 87 * @type: the hctx type index
 88 * @cpu: CPU
 89 */
 90static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
 91							  enum hctx_type type,
 92							  unsigned int cpu)
 93{
 94	return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
 95}
 96
 97/*
 98 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
 99 * @q: request queue
100 * @flags: request command flags
101 * @cpu: cpu ctx
102 */
103static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
104						     unsigned int flags,
105						     struct blk_mq_ctx *ctx)
106{
107	enum hctx_type type = HCTX_TYPE_DEFAULT;
108
109	/*
110	 * The caller ensure that if REQ_HIPRI, poll must be enabled.
111	 */
112	if (flags & REQ_HIPRI)
113		type = HCTX_TYPE_POLL;
114	else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
115		type = HCTX_TYPE_READ;
116	
117	return ctx->hctxs[type];
118}
119
120/*
121 * sysfs helpers
122 */
123extern void blk_mq_sysfs_init(struct request_queue *q);
124extern void blk_mq_sysfs_deinit(struct request_queue *q);
125extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
126extern int blk_mq_sysfs_register(struct request_queue *q);
127extern void blk_mq_sysfs_unregister(struct request_queue *q);
128extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
129
130void blk_mq_release(struct request_queue *q);
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
133					   unsigned int cpu)
134{
135	return per_cpu_ptr(q->queue_ctx, cpu);
136}
137
138/*
139 * This assumes per-cpu software queueing queues. They could be per-node
140 * as well, for instance. For now this is hardcoded as-is. Note that we don't
141 * care about preemption, since we know the ctx's are persistent. This does
142 * mean that we can't rely on ctx always matching the currently running CPU.
143 */
144static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
145{
146	return __blk_mq_get_ctx(q, raw_smp_processor_id());
 
 
 
 
 
147}
148
149struct blk_mq_alloc_data {
150	/* input parameter */
151	struct request_queue *q;
152	blk_mq_req_flags_t flags;
153	unsigned int shallow_depth;
154	unsigned int cmd_flags;
155
156	/* input & output parameter */
157	struct blk_mq_ctx *ctx;
158	struct blk_mq_hw_ctx *hctx;
159};
160
161static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
162{
163	if (data->q->elevator)
164		return data->hctx->sched_tags;
165
166	return data->hctx->tags;
167}
168
169static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
170{
171	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
172}
173
174static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
175{
176	return hctx->nr_ctx && hctx->tags;
177}
178
179unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
 
180void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
181			 unsigned int inflight[2]);
182
183static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
184{
 
 
185	if (q->mq_ops->put_budget)
186		q->mq_ops->put_budget(q);
187}
188
189static inline bool blk_mq_get_dispatch_budget(struct request_queue *q)
190{
 
 
191	if (q->mq_ops->get_budget)
192		return q->mq_ops->get_budget(q);
193	return true;
194}
195
196static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
197					   struct request *rq)
198{
199	blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
200	rq->tag = BLK_MQ_NO_TAG;
201
202	if (rq->rq_flags & RQF_MQ_INFLIGHT) {
203		rq->rq_flags &= ~RQF_MQ_INFLIGHT;
204		atomic_dec(&hctx->nr_active);
205	}
206}
207
208static inline void blk_mq_put_driver_tag(struct request *rq)
 
209{
210	if (rq->tag == BLK_MQ_NO_TAG || rq->internal_tag == BLK_MQ_NO_TAG)
211		return;
212
213	__blk_mq_put_driver_tag(rq->mq_hctx, rq);
214}
215
216static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
217{
218	int cpu;
219
220	for_each_possible_cpu(cpu)
221		qmap->mq_map[cpu] = 0;
222}
223
224/*
225 * blk_mq_plug() - Get caller context plug
226 * @q: request queue
227 * @bio : the bio being submitted by the caller context
228 *
229 * Plugging, by design, may delay the insertion of BIOs into the elevator in
230 * order to increase BIO merging opportunities. This however can cause BIO
231 * insertion order to change from the order in which submit_bio() is being
232 * executed in the case of multiple contexts concurrently issuing BIOs to a
233 * device, even if these context are synchronized to tightly control BIO issuing
234 * order. While this is not a problem with regular block devices, this ordering
235 * change can cause write BIO failures with zoned block devices as these
236 * require sequential write patterns to zones. Prevent this from happening by
237 * ignoring the plug state of a BIO issuing context if the target request queue
238 * is for a zoned block device and the BIO to plug is a write operation.
239 *
240 * Return current->plug if the bio can be plugged and NULL otherwise
241 */
242static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
243					   struct bio *bio)
244{
245	/*
246	 * For regular block devices or read operations, use the context plug
247	 * which may be NULL if blk_start_plug() was not executed.
248	 */
249	if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
250		return current->plug;
251
252	/* Zoned block device write operation case: do not plug the BIO */
253	return NULL;
254}
255
256#endif