Loading...
1/*
2 * blk-mq scheduling framework
3 *
4 * Copyright (C) 2016 Jens Axboe
5 */
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/blk-mq.h>
9
10#include <trace/events/block.h>
11
12#include "blk.h"
13#include "blk-mq.h"
14#include "blk-mq-debugfs.h"
15#include "blk-mq-sched.h"
16#include "blk-mq-tag.h"
17#include "blk-wbt.h"
18
19void blk_mq_sched_free_hctx_data(struct request_queue *q,
20 void (*exit)(struct blk_mq_hw_ctx *))
21{
22 struct blk_mq_hw_ctx *hctx;
23 int i;
24
25 queue_for_each_hw_ctx(q, hctx, i) {
26 if (exit && hctx->sched_data)
27 exit(hctx);
28 kfree(hctx->sched_data);
29 hctx->sched_data = NULL;
30 }
31}
32EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
33
34void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
35{
36 struct request_queue *q = rq->q;
37 struct io_context *ioc = rq_ioc(bio);
38 struct io_cq *icq;
39
40 spin_lock_irq(q->queue_lock);
41 icq = ioc_lookup_icq(ioc, q);
42 spin_unlock_irq(q->queue_lock);
43
44 if (!icq) {
45 icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
46 if (!icq)
47 return;
48 }
49 get_io_context(icq->ioc);
50 rq->elv.icq = icq;
51}
52
53/*
54 * Mark a hardware queue as needing a restart. For shared queues, maintain
55 * a count of how many hardware queues are marked for restart.
56 */
57static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
58{
59 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
60 return;
61
62 if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
63 struct request_queue *q = hctx->queue;
64
65 if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
66 atomic_inc(&q->shared_hctx_restart);
67 } else
68 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
69}
70
71static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
72{
73 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
74 return false;
75
76 if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
77 struct request_queue *q = hctx->queue;
78
79 if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
80 atomic_dec(&q->shared_hctx_restart);
81 } else
82 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
83
84 return blk_mq_run_hw_queue(hctx, true);
85}
86
87/*
88 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
89 * its queue by itself in its completion handler, so we don't need to
90 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
91 */
92static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
93{
94 struct request_queue *q = hctx->queue;
95 struct elevator_queue *e = q->elevator;
96 LIST_HEAD(rq_list);
97
98 do {
99 struct request *rq;
100
101 if (e->type->ops.mq.has_work &&
102 !e->type->ops.mq.has_work(hctx))
103 break;
104
105 if (!blk_mq_get_dispatch_budget(hctx))
106 break;
107
108 rq = e->type->ops.mq.dispatch_request(hctx);
109 if (!rq) {
110 blk_mq_put_dispatch_budget(hctx);
111 break;
112 }
113
114 /*
115 * Now this rq owns the budget which has to be released
116 * if this rq won't be queued to driver via .queue_rq()
117 * in blk_mq_dispatch_rq_list().
118 */
119 list_add(&rq->queuelist, &rq_list);
120 } while (blk_mq_dispatch_rq_list(q, &rq_list, true));
121}
122
123static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
124 struct blk_mq_ctx *ctx)
125{
126 unsigned idx = ctx->index_hw;
127
128 if (++idx == hctx->nr_ctx)
129 idx = 0;
130
131 return hctx->ctxs[idx];
132}
133
134/*
135 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
136 * its queue by itself in its completion handler, so we don't need to
137 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
138 */
139static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
140{
141 struct request_queue *q = hctx->queue;
142 LIST_HEAD(rq_list);
143 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
144
145 do {
146 struct request *rq;
147
148 if (!sbitmap_any_bit_set(&hctx->ctx_map))
149 break;
150
151 if (!blk_mq_get_dispatch_budget(hctx))
152 break;
153
154 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
155 if (!rq) {
156 blk_mq_put_dispatch_budget(hctx);
157 break;
158 }
159
160 /*
161 * Now this rq owns the budget which has to be released
162 * if this rq won't be queued to driver via .queue_rq()
163 * in blk_mq_dispatch_rq_list().
164 */
165 list_add(&rq->queuelist, &rq_list);
166
167 /* round robin for fair dispatch */
168 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
169
170 } while (blk_mq_dispatch_rq_list(q, &rq_list, true));
171
172 WRITE_ONCE(hctx->dispatch_from, ctx);
173}
174
175void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
176{
177 struct request_queue *q = hctx->queue;
178 struct elevator_queue *e = q->elevator;
179 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
180 LIST_HEAD(rq_list);
181
182 /* RCU or SRCU read lock is needed before checking quiesced flag */
183 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
184 return;
185
186 hctx->run++;
187
188 /*
189 * If we have previous entries on our dispatch list, grab them first for
190 * more fair dispatch.
191 */
192 if (!list_empty_careful(&hctx->dispatch)) {
193 spin_lock(&hctx->lock);
194 if (!list_empty(&hctx->dispatch))
195 list_splice_init(&hctx->dispatch, &rq_list);
196 spin_unlock(&hctx->lock);
197 }
198
199 /*
200 * Only ask the scheduler for requests, if we didn't have residual
201 * requests from the dispatch list. This is to avoid the case where
202 * we only ever dispatch a fraction of the requests available because
203 * of low device queue depth. Once we pull requests out of the IO
204 * scheduler, we can no longer merge or sort them. So it's best to
205 * leave them there for as long as we can. Mark the hw queue as
206 * needing a restart in that case.
207 *
208 * We want to dispatch from the scheduler if there was nothing
209 * on the dispatch list or we were able to dispatch from the
210 * dispatch list.
211 */
212 if (!list_empty(&rq_list)) {
213 blk_mq_sched_mark_restart_hctx(hctx);
214 if (blk_mq_dispatch_rq_list(q, &rq_list, false)) {
215 if (has_sched_dispatch)
216 blk_mq_do_dispatch_sched(hctx);
217 else
218 blk_mq_do_dispatch_ctx(hctx);
219 }
220 } else if (has_sched_dispatch) {
221 blk_mq_do_dispatch_sched(hctx);
222 } else if (q->mq_ops->get_budget) {
223 /*
224 * If we need to get budget before queuing request, we
225 * dequeue request one by one from sw queue for avoiding
226 * to mess up I/O merge when dispatch runs out of resource.
227 *
228 * TODO: get more budgets, and dequeue more requests in
229 * one time.
230 */
231 blk_mq_do_dispatch_ctx(hctx);
232 } else {
233 blk_mq_flush_busy_ctxs(hctx, &rq_list);
234 blk_mq_dispatch_rq_list(q, &rq_list, false);
235 }
236}
237
238bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
239 struct request **merged_request)
240{
241 struct request *rq;
242
243 switch (elv_merge(q, &rq, bio)) {
244 case ELEVATOR_BACK_MERGE:
245 if (!blk_mq_sched_allow_merge(q, rq, bio))
246 return false;
247 if (!bio_attempt_back_merge(q, rq, bio))
248 return false;
249 *merged_request = attempt_back_merge(q, rq);
250 if (!*merged_request)
251 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
252 return true;
253 case ELEVATOR_FRONT_MERGE:
254 if (!blk_mq_sched_allow_merge(q, rq, bio))
255 return false;
256 if (!bio_attempt_front_merge(q, rq, bio))
257 return false;
258 *merged_request = attempt_front_merge(q, rq);
259 if (!*merged_request)
260 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
261 return true;
262 case ELEVATOR_DISCARD_MERGE:
263 return bio_attempt_discard_merge(q, rq, bio);
264 default:
265 return false;
266 }
267}
268EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
269
270/*
271 * Reverse check our software queue for entries that we could potentially
272 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
273 * too much time checking for merges.
274 */
275static bool blk_mq_attempt_merge(struct request_queue *q,
276 struct blk_mq_ctx *ctx, struct bio *bio)
277{
278 struct request *rq;
279 int checked = 8;
280
281 lockdep_assert_held(&ctx->lock);
282
283 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
284 bool merged = false;
285
286 if (!checked--)
287 break;
288
289 if (!blk_rq_merge_ok(rq, bio))
290 continue;
291
292 switch (blk_try_merge(rq, bio)) {
293 case ELEVATOR_BACK_MERGE:
294 if (blk_mq_sched_allow_merge(q, rq, bio))
295 merged = bio_attempt_back_merge(q, rq, bio);
296 break;
297 case ELEVATOR_FRONT_MERGE:
298 if (blk_mq_sched_allow_merge(q, rq, bio))
299 merged = bio_attempt_front_merge(q, rq, bio);
300 break;
301 case ELEVATOR_DISCARD_MERGE:
302 merged = bio_attempt_discard_merge(q, rq, bio);
303 break;
304 default:
305 continue;
306 }
307
308 if (merged)
309 ctx->rq_merged++;
310 return merged;
311 }
312
313 return false;
314}
315
316bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
317{
318 struct elevator_queue *e = q->elevator;
319 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
320 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
321 bool ret = false;
322
323 if (e && e->type->ops.mq.bio_merge) {
324 blk_mq_put_ctx(ctx);
325 return e->type->ops.mq.bio_merge(hctx, bio);
326 }
327
328 if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) {
329 /* default per sw-queue merge */
330 spin_lock(&ctx->lock);
331 ret = blk_mq_attempt_merge(q, ctx, bio);
332 spin_unlock(&ctx->lock);
333 }
334
335 blk_mq_put_ctx(ctx);
336 return ret;
337}
338
339bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
340{
341 return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
342}
343EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
344
345void blk_mq_sched_request_inserted(struct request *rq)
346{
347 trace_block_rq_insert(rq->q, rq);
348}
349EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
350
351static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
352 bool has_sched,
353 struct request *rq)
354{
355 /* dispatch flush rq directly */
356 if (rq->rq_flags & RQF_FLUSH_SEQ) {
357 spin_lock(&hctx->lock);
358 list_add(&rq->queuelist, &hctx->dispatch);
359 spin_unlock(&hctx->lock);
360 return true;
361 }
362
363 if (has_sched)
364 rq->rq_flags |= RQF_SORTED;
365
366 return false;
367}
368
369/**
370 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
371 * @pos: loop cursor.
372 * @skip: the list element that will not be examined. Iteration starts at
373 * @skip->next.
374 * @head: head of the list to examine. This list must have at least one
375 * element, namely @skip.
376 * @member: name of the list_head structure within typeof(*pos).
377 */
378#define list_for_each_entry_rcu_rr(pos, skip, head, member) \
379 for ((pos) = (skip); \
380 (pos = (pos)->member.next != (head) ? list_entry_rcu( \
381 (pos)->member.next, typeof(*pos), member) : \
382 list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
383 (pos) != (skip); )
384
385/*
386 * Called after a driver tag has been freed to check whether a hctx needs to
387 * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
388 * queues in a round-robin fashion if the tag set of @hctx is shared with other
389 * hardware queues.
390 */
391void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
392{
393 struct blk_mq_tags *const tags = hctx->tags;
394 struct blk_mq_tag_set *const set = hctx->queue->tag_set;
395 struct request_queue *const queue = hctx->queue, *q;
396 struct blk_mq_hw_ctx *hctx2;
397 unsigned int i, j;
398
399 if (set->flags & BLK_MQ_F_TAG_SHARED) {
400 /*
401 * If this is 0, then we know that no hardware queues
402 * have RESTART marked. We're done.
403 */
404 if (!atomic_read(&queue->shared_hctx_restart))
405 return;
406
407 rcu_read_lock();
408 list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
409 tag_set_list) {
410 queue_for_each_hw_ctx(q, hctx2, i)
411 if (hctx2->tags == tags &&
412 blk_mq_sched_restart_hctx(hctx2))
413 goto done;
414 }
415 j = hctx->queue_num + 1;
416 for (i = 0; i < queue->nr_hw_queues; i++, j++) {
417 if (j == queue->nr_hw_queues)
418 j = 0;
419 hctx2 = queue->queue_hw_ctx[j];
420 if (hctx2->tags == tags &&
421 blk_mq_sched_restart_hctx(hctx2))
422 break;
423 }
424done:
425 rcu_read_unlock();
426 } else {
427 blk_mq_sched_restart_hctx(hctx);
428 }
429}
430
431void blk_mq_sched_insert_request(struct request *rq, bool at_head,
432 bool run_queue, bool async)
433{
434 struct request_queue *q = rq->q;
435 struct elevator_queue *e = q->elevator;
436 struct blk_mq_ctx *ctx = rq->mq_ctx;
437 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
438
439 /* flush rq in flush machinery need to be dispatched directly */
440 if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
441 blk_insert_flush(rq);
442 goto run;
443 }
444
445 WARN_ON(e && (rq->tag != -1));
446
447 if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
448 goto run;
449
450 if (e && e->type->ops.mq.insert_requests) {
451 LIST_HEAD(list);
452
453 list_add(&rq->queuelist, &list);
454 e->type->ops.mq.insert_requests(hctx, &list, at_head);
455 } else {
456 spin_lock(&ctx->lock);
457 __blk_mq_insert_request(hctx, rq, at_head);
458 spin_unlock(&ctx->lock);
459 }
460
461run:
462 if (run_queue)
463 blk_mq_run_hw_queue(hctx, async);
464}
465
466void blk_mq_sched_insert_requests(struct request_queue *q,
467 struct blk_mq_ctx *ctx,
468 struct list_head *list, bool run_queue_async)
469{
470 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
471 struct elevator_queue *e = hctx->queue->elevator;
472
473 if (e && e->type->ops.mq.insert_requests)
474 e->type->ops.mq.insert_requests(hctx, list, false);
475 else
476 blk_mq_insert_requests(hctx, ctx, list);
477
478 blk_mq_run_hw_queue(hctx, run_queue_async);
479}
480
481static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
482 struct blk_mq_hw_ctx *hctx,
483 unsigned int hctx_idx)
484{
485 if (hctx->sched_tags) {
486 blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
487 blk_mq_free_rq_map(hctx->sched_tags);
488 hctx->sched_tags = NULL;
489 }
490}
491
492static int blk_mq_sched_alloc_tags(struct request_queue *q,
493 struct blk_mq_hw_ctx *hctx,
494 unsigned int hctx_idx)
495{
496 struct blk_mq_tag_set *set = q->tag_set;
497 int ret;
498
499 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
500 set->reserved_tags);
501 if (!hctx->sched_tags)
502 return -ENOMEM;
503
504 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
505 if (ret)
506 blk_mq_sched_free_tags(set, hctx, hctx_idx);
507
508 return ret;
509}
510
511static void blk_mq_sched_tags_teardown(struct request_queue *q)
512{
513 struct blk_mq_tag_set *set = q->tag_set;
514 struct blk_mq_hw_ctx *hctx;
515 int i;
516
517 queue_for_each_hw_ctx(q, hctx, i)
518 blk_mq_sched_free_tags(set, hctx, i);
519}
520
521int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
522 unsigned int hctx_idx)
523{
524 struct elevator_queue *e = q->elevator;
525 int ret;
526
527 if (!e)
528 return 0;
529
530 ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
531 if (ret)
532 return ret;
533
534 if (e->type->ops.mq.init_hctx) {
535 ret = e->type->ops.mq.init_hctx(hctx, hctx_idx);
536 if (ret) {
537 blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
538 return ret;
539 }
540 }
541
542 blk_mq_debugfs_register_sched_hctx(q, hctx);
543
544 return 0;
545}
546
547void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
548 unsigned int hctx_idx)
549{
550 struct elevator_queue *e = q->elevator;
551
552 if (!e)
553 return;
554
555 blk_mq_debugfs_unregister_sched_hctx(hctx);
556
557 if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
558 e->type->ops.mq.exit_hctx(hctx, hctx_idx);
559 hctx->sched_data = NULL;
560 }
561
562 blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
563}
564
565int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
566{
567 struct blk_mq_hw_ctx *hctx;
568 struct elevator_queue *eq;
569 unsigned int i;
570 int ret;
571
572 if (!e) {
573 q->elevator = NULL;
574 return 0;
575 }
576
577 /*
578 * Default to double of smaller one between hw queue_depth and 128,
579 * since we don't split into sync/async like the old code did.
580 * Additionally, this is a per-hw queue depth.
581 */
582 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
583 BLKDEV_MAX_RQ);
584
585 queue_for_each_hw_ctx(q, hctx, i) {
586 ret = blk_mq_sched_alloc_tags(q, hctx, i);
587 if (ret)
588 goto err;
589 }
590
591 ret = e->ops.mq.init_sched(q, e);
592 if (ret)
593 goto err;
594
595 blk_mq_debugfs_register_sched(q);
596
597 queue_for_each_hw_ctx(q, hctx, i) {
598 if (e->ops.mq.init_hctx) {
599 ret = e->ops.mq.init_hctx(hctx, i);
600 if (ret) {
601 eq = q->elevator;
602 blk_mq_exit_sched(q, eq);
603 kobject_put(&eq->kobj);
604 return ret;
605 }
606 }
607 blk_mq_debugfs_register_sched_hctx(q, hctx);
608 }
609
610 return 0;
611
612err:
613 blk_mq_sched_tags_teardown(q);
614 q->elevator = NULL;
615 return ret;
616}
617
618void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
619{
620 struct blk_mq_hw_ctx *hctx;
621 unsigned int i;
622
623 queue_for_each_hw_ctx(q, hctx, i) {
624 blk_mq_debugfs_unregister_sched_hctx(hctx);
625 if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
626 e->type->ops.mq.exit_hctx(hctx, i);
627 hctx->sched_data = NULL;
628 }
629 }
630 blk_mq_debugfs_unregister_sched(q);
631 if (e->type->ops.mq.exit_sched)
632 e->type->ops.mq.exit_sched(e);
633 blk_mq_sched_tags_teardown(q);
634 q->elevator = NULL;
635}
636
637int blk_mq_sched_init(struct request_queue *q)
638{
639 int ret;
640
641 mutex_lock(&q->sysfs_lock);
642 ret = elevator_init(q, NULL);
643 mutex_unlock(&q->sysfs_lock);
644
645 return ret;
646}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * blk-mq scheduling framework
4 *
5 * Copyright (C) 2016 Jens Axboe
6 */
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/blk-mq.h>
10#include <linux/list_sort.h>
11
12#include <trace/events/block.h>
13
14#include "blk.h"
15#include "blk-mq.h"
16#include "blk-mq-debugfs.h"
17#include "blk-mq-sched.h"
18#include "blk-mq-tag.h"
19#include "blk-wbt.h"
20
21void blk_mq_sched_assign_ioc(struct request *rq)
22{
23 struct request_queue *q = rq->q;
24 struct io_context *ioc;
25 struct io_cq *icq;
26
27 /*
28 * May not have an IO context if it's a passthrough request
29 */
30 ioc = current->io_context;
31 if (!ioc)
32 return;
33
34 spin_lock_irq(&q->queue_lock);
35 icq = ioc_lookup_icq(ioc, q);
36 spin_unlock_irq(&q->queue_lock);
37
38 if (!icq) {
39 icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
40 if (!icq)
41 return;
42 }
43 get_io_context(icq->ioc);
44 rq->elv.icq = icq;
45}
46
47/*
48 * Mark a hardware queue as needing a restart. For shared queues, maintain
49 * a count of how many hardware queues are marked for restart.
50 */
51void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
52{
53 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
54 return;
55
56 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
57}
58EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
59
60void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
61{
62 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
63 return;
64 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
65
66 /*
67 * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
68 * in blk_mq_run_hw_queue(). Its pair is the barrier in
69 * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
70 * meantime new request added to hctx->dispatch is missed to check in
71 * blk_mq_run_hw_queue().
72 */
73 smp_mb();
74
75 blk_mq_run_hw_queue(hctx, true);
76}
77
78static int sched_rq_cmp(void *priv, const struct list_head *a,
79 const struct list_head *b)
80{
81 struct request *rqa = container_of(a, struct request, queuelist);
82 struct request *rqb = container_of(b, struct request, queuelist);
83
84 return rqa->mq_hctx > rqb->mq_hctx;
85}
86
87static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list)
88{
89 struct blk_mq_hw_ctx *hctx =
90 list_first_entry(rq_list, struct request, queuelist)->mq_hctx;
91 struct request *rq;
92 LIST_HEAD(hctx_list);
93 unsigned int count = 0;
94
95 list_for_each_entry(rq, rq_list, queuelist) {
96 if (rq->mq_hctx != hctx) {
97 list_cut_before(&hctx_list, rq_list, &rq->queuelist);
98 goto dispatch;
99 }
100 count++;
101 }
102 list_splice_tail_init(rq_list, &hctx_list);
103
104dispatch:
105 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count);
106}
107
108#define BLK_MQ_BUDGET_DELAY 3 /* ms units */
109
110/*
111 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
112 * its queue by itself in its completion handler, so we don't need to
113 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
114 *
115 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
116 * be run again. This is necessary to avoid starving flushes.
117 */
118static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
119{
120 struct request_queue *q = hctx->queue;
121 struct elevator_queue *e = q->elevator;
122 bool multi_hctxs = false, run_queue = false;
123 bool dispatched = false, busy = false;
124 unsigned int max_dispatch;
125 LIST_HEAD(rq_list);
126 int count = 0;
127
128 if (hctx->dispatch_busy)
129 max_dispatch = 1;
130 else
131 max_dispatch = hctx->queue->nr_requests;
132
133 do {
134 struct request *rq;
135 int budget_token;
136
137 if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
138 break;
139
140 if (!list_empty_careful(&hctx->dispatch)) {
141 busy = true;
142 break;
143 }
144
145 budget_token = blk_mq_get_dispatch_budget(q);
146 if (budget_token < 0)
147 break;
148
149 rq = e->type->ops.dispatch_request(hctx);
150 if (!rq) {
151 blk_mq_put_dispatch_budget(q, budget_token);
152 /*
153 * We're releasing without dispatching. Holding the
154 * budget could have blocked any "hctx"s with the
155 * same queue and if we didn't dispatch then there's
156 * no guarantee anyone will kick the queue. Kick it
157 * ourselves.
158 */
159 run_queue = true;
160 break;
161 }
162
163 blk_mq_set_rq_budget_token(rq, budget_token);
164
165 /*
166 * Now this rq owns the budget which has to be released
167 * if this rq won't be queued to driver via .queue_rq()
168 * in blk_mq_dispatch_rq_list().
169 */
170 list_add_tail(&rq->queuelist, &rq_list);
171 count++;
172 if (rq->mq_hctx != hctx)
173 multi_hctxs = true;
174
175 /*
176 * If we cannot get tag for the request, stop dequeueing
177 * requests from the IO scheduler. We are unlikely to be able
178 * to submit them anyway and it creates false impression for
179 * scheduling heuristics that the device can take more IO.
180 */
181 if (!blk_mq_get_driver_tag(rq))
182 break;
183 } while (count < max_dispatch);
184
185 if (!count) {
186 if (run_queue)
187 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
188 } else if (multi_hctxs) {
189 /*
190 * Requests from different hctx may be dequeued from some
191 * schedulers, such as bfq and deadline.
192 *
193 * Sort the requests in the list according to their hctx,
194 * dispatch batching requests from same hctx at a time.
195 */
196 list_sort(NULL, &rq_list, sched_rq_cmp);
197 do {
198 dispatched |= blk_mq_dispatch_hctx_list(&rq_list);
199 } while (!list_empty(&rq_list));
200 } else {
201 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count);
202 }
203
204 if (busy)
205 return -EAGAIN;
206 return !!dispatched;
207}
208
209static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
210{
211 int ret;
212
213 do {
214 ret = __blk_mq_do_dispatch_sched(hctx);
215 } while (ret == 1);
216
217 return ret;
218}
219
220static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
221 struct blk_mq_ctx *ctx)
222{
223 unsigned short idx = ctx->index_hw[hctx->type];
224
225 if (++idx == hctx->nr_ctx)
226 idx = 0;
227
228 return hctx->ctxs[idx];
229}
230
231/*
232 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
233 * its queue by itself in its completion handler, so we don't need to
234 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
235 *
236 * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
237 * be run again. This is necessary to avoid starving flushes.
238 */
239static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
240{
241 struct request_queue *q = hctx->queue;
242 LIST_HEAD(rq_list);
243 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
244 int ret = 0;
245 struct request *rq;
246
247 do {
248 int budget_token;
249
250 if (!list_empty_careful(&hctx->dispatch)) {
251 ret = -EAGAIN;
252 break;
253 }
254
255 if (!sbitmap_any_bit_set(&hctx->ctx_map))
256 break;
257
258 budget_token = blk_mq_get_dispatch_budget(q);
259 if (budget_token < 0)
260 break;
261
262 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
263 if (!rq) {
264 blk_mq_put_dispatch_budget(q, budget_token);
265 /*
266 * We're releasing without dispatching. Holding the
267 * budget could have blocked any "hctx"s with the
268 * same queue and if we didn't dispatch then there's
269 * no guarantee anyone will kick the queue. Kick it
270 * ourselves.
271 */
272 blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY);
273 break;
274 }
275
276 blk_mq_set_rq_budget_token(rq, budget_token);
277
278 /*
279 * Now this rq owns the budget which has to be released
280 * if this rq won't be queued to driver via .queue_rq()
281 * in blk_mq_dispatch_rq_list().
282 */
283 list_add(&rq->queuelist, &rq_list);
284
285 /* round robin for fair dispatch */
286 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
287
288 } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1));
289
290 WRITE_ONCE(hctx->dispatch_from, ctx);
291 return ret;
292}
293
294static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
295{
296 struct request_queue *q = hctx->queue;
297 const bool has_sched = q->elevator;
298 int ret = 0;
299 LIST_HEAD(rq_list);
300
301 /*
302 * If we have previous entries on our dispatch list, grab them first for
303 * more fair dispatch.
304 */
305 if (!list_empty_careful(&hctx->dispatch)) {
306 spin_lock(&hctx->lock);
307 if (!list_empty(&hctx->dispatch))
308 list_splice_init(&hctx->dispatch, &rq_list);
309 spin_unlock(&hctx->lock);
310 }
311
312 /*
313 * Only ask the scheduler for requests, if we didn't have residual
314 * requests from the dispatch list. This is to avoid the case where
315 * we only ever dispatch a fraction of the requests available because
316 * of low device queue depth. Once we pull requests out of the IO
317 * scheduler, we can no longer merge or sort them. So it's best to
318 * leave them there for as long as we can. Mark the hw queue as
319 * needing a restart in that case.
320 *
321 * We want to dispatch from the scheduler if there was nothing
322 * on the dispatch list or we were able to dispatch from the
323 * dispatch list.
324 */
325 if (!list_empty(&rq_list)) {
326 blk_mq_sched_mark_restart_hctx(hctx);
327 if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) {
328 if (has_sched)
329 ret = blk_mq_do_dispatch_sched(hctx);
330 else
331 ret = blk_mq_do_dispatch_ctx(hctx);
332 }
333 } else if (has_sched) {
334 ret = blk_mq_do_dispatch_sched(hctx);
335 } else if (hctx->dispatch_busy) {
336 /* dequeue request one by one from sw queue if queue is busy */
337 ret = blk_mq_do_dispatch_ctx(hctx);
338 } else {
339 blk_mq_flush_busy_ctxs(hctx, &rq_list);
340 blk_mq_dispatch_rq_list(hctx, &rq_list, 0);
341 }
342
343 return ret;
344}
345
346void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
347{
348 struct request_queue *q = hctx->queue;
349
350 /* RCU or SRCU read lock is needed before checking quiesced flag */
351 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
352 return;
353
354 hctx->run++;
355
356 /*
357 * A return of -EAGAIN is an indication that hctx->dispatch is not
358 * empty and we must run again in order to avoid starving flushes.
359 */
360 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) {
361 if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN)
362 blk_mq_run_hw_queue(hctx, true);
363 }
364}
365
366bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
367 unsigned int nr_segs)
368{
369 struct elevator_queue *e = q->elevator;
370 struct blk_mq_ctx *ctx;
371 struct blk_mq_hw_ctx *hctx;
372 bool ret = false;
373 enum hctx_type type;
374
375 if (e && e->type->ops.bio_merge)
376 return e->type->ops.bio_merge(q, bio, nr_segs);
377
378 ctx = blk_mq_get_ctx(q);
379 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
380 type = hctx->type;
381 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
382 list_empty_careful(&ctx->rq_lists[type]))
383 return false;
384
385 /* default per sw-queue merge */
386 spin_lock(&ctx->lock);
387 /*
388 * Reverse check our software queue for entries that we could
389 * potentially merge with. Currently includes a hand-wavy stop
390 * count of 8, to not spend too much time checking for merges.
391 */
392 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
393 ctx->rq_merged++;
394 ret = true;
395 }
396
397 spin_unlock(&ctx->lock);
398
399 return ret;
400}
401
402bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
403 struct list_head *free)
404{
405 return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
406}
407EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
408
409static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
410 struct request *rq)
411{
412 /*
413 * dispatch flush and passthrough rq directly
414 *
415 * passthrough request has to be added to hctx->dispatch directly.
416 * For some reason, device may be in one situation which can't
417 * handle FS request, so STS_RESOURCE is always returned and the
418 * FS request will be added to hctx->dispatch. However passthrough
419 * request may be required at that time for fixing the problem. If
420 * passthrough request is added to scheduler queue, there isn't any
421 * chance to dispatch it given we prioritize requests in hctx->dispatch.
422 */
423 if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
424 return true;
425
426 return false;
427}
428
429void blk_mq_sched_insert_request(struct request *rq, bool at_head,
430 bool run_queue, bool async)
431{
432 struct request_queue *q = rq->q;
433 struct elevator_queue *e = q->elevator;
434 struct blk_mq_ctx *ctx = rq->mq_ctx;
435 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
436
437 WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
438
439 if (blk_mq_sched_bypass_insert(hctx, rq)) {
440 /*
441 * Firstly normal IO request is inserted to scheduler queue or
442 * sw queue, meantime we add flush request to dispatch queue(
443 * hctx->dispatch) directly and there is at most one in-flight
444 * flush request for each hw queue, so it doesn't matter to add
445 * flush request to tail or front of the dispatch queue.
446 *
447 * Secondly in case of NCQ, flush request belongs to non-NCQ
448 * command, and queueing it will fail when there is any
449 * in-flight normal IO request(NCQ command). When adding flush
450 * rq to the front of hctx->dispatch, it is easier to introduce
451 * extra time to flush rq's latency because of S_SCHED_RESTART
452 * compared with adding to the tail of dispatch queue, then
453 * chance of flush merge is increased, and less flush requests
454 * will be issued to controller. It is observed that ~10% time
455 * is saved in blktests block/004 on disk attached to AHCI/NCQ
456 * drive when adding flush rq to the front of hctx->dispatch.
457 *
458 * Simply queue flush rq to the front of hctx->dispatch so that
459 * intensive flush workloads can benefit in case of NCQ HW.
460 */
461 at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
462 blk_mq_request_bypass_insert(rq, at_head, false);
463 goto run;
464 }
465
466 if (e) {
467 LIST_HEAD(list);
468
469 list_add(&rq->queuelist, &list);
470 e->type->ops.insert_requests(hctx, &list, at_head);
471 } else {
472 spin_lock(&ctx->lock);
473 __blk_mq_insert_request(hctx, rq, at_head);
474 spin_unlock(&ctx->lock);
475 }
476
477run:
478 if (run_queue)
479 blk_mq_run_hw_queue(hctx, async);
480}
481
482void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
483 struct blk_mq_ctx *ctx,
484 struct list_head *list, bool run_queue_async)
485{
486 struct elevator_queue *e;
487 struct request_queue *q = hctx->queue;
488
489 /*
490 * blk_mq_sched_insert_requests() is called from flush plug
491 * context only, and hold one usage counter to prevent queue
492 * from being released.
493 */
494 percpu_ref_get(&q->q_usage_counter);
495
496 e = hctx->queue->elevator;
497 if (e) {
498 e->type->ops.insert_requests(hctx, list, false);
499 } else {
500 /*
501 * try to issue requests directly if the hw queue isn't
502 * busy in case of 'none' scheduler, and this way may save
503 * us one extra enqueue & dequeue to sw queue.
504 */
505 if (!hctx->dispatch_busy && !e && !run_queue_async) {
506 blk_mq_try_issue_list_directly(hctx, list);
507 if (list_empty(list))
508 goto out;
509 }
510 blk_mq_insert_requests(hctx, ctx, list);
511 }
512
513 blk_mq_run_hw_queue(hctx, run_queue_async);
514 out:
515 percpu_ref_put(&q->q_usage_counter);
516}
517
518static int blk_mq_sched_alloc_tags(struct request_queue *q,
519 struct blk_mq_hw_ctx *hctx,
520 unsigned int hctx_idx)
521{
522 struct blk_mq_tag_set *set = q->tag_set;
523 int ret;
524
525 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
526 set->reserved_tags, set->flags);
527 if (!hctx->sched_tags)
528 return -ENOMEM;
529
530 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
531 if (ret) {
532 blk_mq_free_rq_map(hctx->sched_tags, set->flags);
533 hctx->sched_tags = NULL;
534 }
535
536 return ret;
537}
538
539/* called in queue's release handler, tagset has gone away */
540static void blk_mq_sched_tags_teardown(struct request_queue *q)
541{
542 struct blk_mq_hw_ctx *hctx;
543 int i;
544
545 queue_for_each_hw_ctx(q, hctx, i) {
546 if (hctx->sched_tags) {
547 blk_mq_free_rq_map(hctx->sched_tags, hctx->flags);
548 hctx->sched_tags = NULL;
549 }
550 }
551}
552
553static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)
554{
555 struct blk_mq_tag_set *set = queue->tag_set;
556 int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
557 struct blk_mq_hw_ctx *hctx;
558 int ret, i;
559
560 /*
561 * Set initial depth at max so that we don't need to reallocate for
562 * updating nr_requests.
563 */
564 ret = blk_mq_init_bitmaps(&queue->sched_bitmap_tags,
565 &queue->sched_breserved_tags,
566 MAX_SCHED_RQ, set->reserved_tags,
567 set->numa_node, alloc_policy);
568 if (ret)
569 return ret;
570
571 queue_for_each_hw_ctx(queue, hctx, i) {
572 hctx->sched_tags->bitmap_tags =
573 &queue->sched_bitmap_tags;
574 hctx->sched_tags->breserved_tags =
575 &queue->sched_breserved_tags;
576 }
577
578 sbitmap_queue_resize(&queue->sched_bitmap_tags,
579 queue->nr_requests - set->reserved_tags);
580
581 return 0;
582}
583
584static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue)
585{
586 sbitmap_queue_free(&queue->sched_bitmap_tags);
587 sbitmap_queue_free(&queue->sched_breserved_tags);
588}
589
590int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
591{
592 struct blk_mq_hw_ctx *hctx;
593 struct elevator_queue *eq;
594 unsigned int i;
595 int ret;
596
597 if (!e) {
598 q->elevator = NULL;
599 q->nr_requests = q->tag_set->queue_depth;
600 return 0;
601 }
602
603 /*
604 * Default to double of smaller one between hw queue_depth and 128,
605 * since we don't split into sync/async like the old code did.
606 * Additionally, this is a per-hw queue depth.
607 */
608 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
609 BLKDEV_MAX_RQ);
610
611 queue_for_each_hw_ctx(q, hctx, i) {
612 ret = blk_mq_sched_alloc_tags(q, hctx, i);
613 if (ret)
614 goto err_free_tags;
615 }
616
617 if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) {
618 ret = blk_mq_init_sched_shared_sbitmap(q);
619 if (ret)
620 goto err_free_tags;
621 }
622
623 ret = e->ops.init_sched(q, e);
624 if (ret)
625 goto err_free_sbitmap;
626
627 blk_mq_debugfs_register_sched(q);
628
629 queue_for_each_hw_ctx(q, hctx, i) {
630 if (e->ops.init_hctx) {
631 ret = e->ops.init_hctx(hctx, i);
632 if (ret) {
633 eq = q->elevator;
634 blk_mq_sched_free_requests(q);
635 blk_mq_exit_sched(q, eq);
636 kobject_put(&eq->kobj);
637 return ret;
638 }
639 }
640 blk_mq_debugfs_register_sched_hctx(q, hctx);
641 }
642
643 return 0;
644
645err_free_sbitmap:
646 if (blk_mq_is_sbitmap_shared(q->tag_set->flags))
647 blk_mq_exit_sched_shared_sbitmap(q);
648err_free_tags:
649 blk_mq_sched_free_requests(q);
650 blk_mq_sched_tags_teardown(q);
651 q->elevator = NULL;
652 return ret;
653}
654
655/*
656 * called in either blk_queue_cleanup or elevator_switch, tagset
657 * is required for freeing requests
658 */
659void blk_mq_sched_free_requests(struct request_queue *q)
660{
661 struct blk_mq_hw_ctx *hctx;
662 int i;
663
664 queue_for_each_hw_ctx(q, hctx, i) {
665 if (hctx->sched_tags)
666 blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
667 }
668}
669
670void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
671{
672 struct blk_mq_hw_ctx *hctx;
673 unsigned int i;
674 unsigned int flags = 0;
675
676 queue_for_each_hw_ctx(q, hctx, i) {
677 blk_mq_debugfs_unregister_sched_hctx(hctx);
678 if (e->type->ops.exit_hctx && hctx->sched_data) {
679 e->type->ops.exit_hctx(hctx, i);
680 hctx->sched_data = NULL;
681 }
682 flags = hctx->flags;
683 }
684 blk_mq_debugfs_unregister_sched(q);
685 if (e->type->ops.exit_sched)
686 e->type->ops.exit_sched(e);
687 blk_mq_sched_tags_teardown(q);
688 if (blk_mq_is_sbitmap_shared(flags))
689 blk_mq_exit_sched_shared_sbitmap(q);
690 q->elevator = NULL;
691}