Loading...
1/*
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/fs.h>
27#include <linux/blkdev.h>
28#include <linux/elevator.h>
29#include <linux/bio.h>
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
34#include <linux/delay.h>
35#include <linux/blktrace_api.h>
36#include <linux/hash.h>
37#include <linux/uaccess.h>
38
39#include <trace/events/block.h>
40
41#include "blk.h"
42
43static DEFINE_SPINLOCK(elv_list_lock);
44static LIST_HEAD(elv_list);
45
46/*
47 * Merge hash stuff.
48 */
49static const int elv_hash_shift = 6;
50#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
51#define ELV_HASH_FN(sec) \
52 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
53#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
54#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
55
56/*
57 * Query io scheduler to see if the current process issuing bio may be
58 * merged with rq.
59 */
60static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
61{
62 struct request_queue *q = rq->q;
63 struct elevator_queue *e = q->elevator;
64
65 if (e->ops->elevator_allow_merge_fn)
66 return e->ops->elevator_allow_merge_fn(q, rq, bio);
67
68 return 1;
69}
70
71/*
72 * can we safely merge with this request?
73 */
74int elv_rq_merge_ok(struct request *rq, struct bio *bio)
75{
76 if (!rq_mergeable(rq))
77 return 0;
78
79 /*
80 * Don't merge file system requests and discard requests
81 */
82 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
83 return 0;
84
85 /*
86 * Don't merge discard requests and secure discard requests
87 */
88 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
89 return 0;
90
91 /*
92 * different data direction or already started, don't merge
93 */
94 if (bio_data_dir(bio) != rq_data_dir(rq))
95 return 0;
96
97 /*
98 * must be same device and not a special request
99 */
100 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
101 return 0;
102
103 /*
104 * only merge integrity protected bio into ditto rq
105 */
106 if (bio_integrity(bio) != blk_integrity_rq(rq))
107 return 0;
108
109 if (!elv_iosched_allow_merge(rq, bio))
110 return 0;
111
112 return 1;
113}
114EXPORT_SYMBOL(elv_rq_merge_ok);
115
116int elv_try_merge(struct request *__rq, struct bio *bio)
117{
118 int ret = ELEVATOR_NO_MERGE;
119
120 /*
121 * we can merge and sequence is ok, check if it's possible
122 */
123 if (elv_rq_merge_ok(__rq, bio)) {
124 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
125 ret = ELEVATOR_BACK_MERGE;
126 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
127 ret = ELEVATOR_FRONT_MERGE;
128 }
129
130 return ret;
131}
132
133static struct elevator_type *elevator_find(const char *name)
134{
135 struct elevator_type *e;
136
137 list_for_each_entry(e, &elv_list, list) {
138 if (!strcmp(e->elevator_name, name))
139 return e;
140 }
141
142 return NULL;
143}
144
145static void elevator_put(struct elevator_type *e)
146{
147 module_put(e->elevator_owner);
148}
149
150static struct elevator_type *elevator_get(const char *name)
151{
152 struct elevator_type *e;
153
154 spin_lock(&elv_list_lock);
155
156 e = elevator_find(name);
157 if (!e) {
158 spin_unlock(&elv_list_lock);
159 request_module("%s-iosched", name);
160 spin_lock(&elv_list_lock);
161 e = elevator_find(name);
162 }
163
164 if (e && !try_module_get(e->elevator_owner))
165 e = NULL;
166
167 spin_unlock(&elv_list_lock);
168
169 return e;
170}
171
172static void *elevator_init_queue(struct request_queue *q,
173 struct elevator_queue *eq)
174{
175 return eq->ops->elevator_init_fn(q);
176}
177
178static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
179 void *data)
180{
181 q->elevator = eq;
182 eq->elevator_data = data;
183}
184
185static char chosen_elevator[16];
186
187static int __init elevator_setup(char *str)
188{
189 /*
190 * Be backwards-compatible with previous kernels, so users
191 * won't get the wrong elevator.
192 */
193 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
194 return 1;
195}
196
197__setup("elevator=", elevator_setup);
198
199static struct kobj_type elv_ktype;
200
201static struct elevator_queue *elevator_alloc(struct request_queue *q,
202 struct elevator_type *e)
203{
204 struct elevator_queue *eq;
205 int i;
206
207 eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
208 if (unlikely(!eq))
209 goto err;
210
211 eq->ops = &e->ops;
212 eq->elevator_type = e;
213 kobject_init(&eq->kobj, &elv_ktype);
214 mutex_init(&eq->sysfs_lock);
215
216 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
217 GFP_KERNEL, q->node);
218 if (!eq->hash)
219 goto err;
220
221 for (i = 0; i < ELV_HASH_ENTRIES; i++)
222 INIT_HLIST_HEAD(&eq->hash[i]);
223
224 return eq;
225err:
226 kfree(eq);
227 elevator_put(e);
228 return NULL;
229}
230
231static void elevator_release(struct kobject *kobj)
232{
233 struct elevator_queue *e;
234
235 e = container_of(kobj, struct elevator_queue, kobj);
236 elevator_put(e->elevator_type);
237 kfree(e->hash);
238 kfree(e);
239}
240
241int elevator_init(struct request_queue *q, char *name)
242{
243 struct elevator_type *e = NULL;
244 struct elevator_queue *eq;
245 void *data;
246
247 if (unlikely(q->elevator))
248 return 0;
249
250 INIT_LIST_HEAD(&q->queue_head);
251 q->last_merge = NULL;
252 q->end_sector = 0;
253 q->boundary_rq = NULL;
254
255 if (name) {
256 e = elevator_get(name);
257 if (!e)
258 return -EINVAL;
259 }
260
261 if (!e && *chosen_elevator) {
262 e = elevator_get(chosen_elevator);
263 if (!e)
264 printk(KERN_ERR "I/O scheduler %s not found\n",
265 chosen_elevator);
266 }
267
268 if (!e) {
269 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
270 if (!e) {
271 printk(KERN_ERR
272 "Default I/O scheduler not found. " \
273 "Using noop.\n");
274 e = elevator_get("noop");
275 }
276 }
277
278 eq = elevator_alloc(q, e);
279 if (!eq)
280 return -ENOMEM;
281
282 data = elevator_init_queue(q, eq);
283 if (!data) {
284 kobject_put(&eq->kobj);
285 return -ENOMEM;
286 }
287
288 elevator_attach(q, eq, data);
289 return 0;
290}
291EXPORT_SYMBOL(elevator_init);
292
293void elevator_exit(struct elevator_queue *e)
294{
295 mutex_lock(&e->sysfs_lock);
296 if (e->ops->elevator_exit_fn)
297 e->ops->elevator_exit_fn(e);
298 e->ops = NULL;
299 mutex_unlock(&e->sysfs_lock);
300
301 kobject_put(&e->kobj);
302}
303EXPORT_SYMBOL(elevator_exit);
304
305static inline void __elv_rqhash_del(struct request *rq)
306{
307 hlist_del_init(&rq->hash);
308}
309
310static void elv_rqhash_del(struct request_queue *q, struct request *rq)
311{
312 if (ELV_ON_HASH(rq))
313 __elv_rqhash_del(rq);
314}
315
316static void elv_rqhash_add(struct request_queue *q, struct request *rq)
317{
318 struct elevator_queue *e = q->elevator;
319
320 BUG_ON(ELV_ON_HASH(rq));
321 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
322}
323
324static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
325{
326 __elv_rqhash_del(rq);
327 elv_rqhash_add(q, rq);
328}
329
330static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
331{
332 struct elevator_queue *e = q->elevator;
333 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
334 struct hlist_node *entry, *next;
335 struct request *rq;
336
337 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
338 BUG_ON(!ELV_ON_HASH(rq));
339
340 if (unlikely(!rq_mergeable(rq))) {
341 __elv_rqhash_del(rq);
342 continue;
343 }
344
345 if (rq_hash_key(rq) == offset)
346 return rq;
347 }
348
349 return NULL;
350}
351
352/*
353 * RB-tree support functions for inserting/lookup/removal of requests
354 * in a sorted RB tree.
355 */
356void elv_rb_add(struct rb_root *root, struct request *rq)
357{
358 struct rb_node **p = &root->rb_node;
359 struct rb_node *parent = NULL;
360 struct request *__rq;
361
362 while (*p) {
363 parent = *p;
364 __rq = rb_entry(parent, struct request, rb_node);
365
366 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
367 p = &(*p)->rb_left;
368 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
369 p = &(*p)->rb_right;
370 }
371
372 rb_link_node(&rq->rb_node, parent, p);
373 rb_insert_color(&rq->rb_node, root);
374}
375EXPORT_SYMBOL(elv_rb_add);
376
377void elv_rb_del(struct rb_root *root, struct request *rq)
378{
379 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
380 rb_erase(&rq->rb_node, root);
381 RB_CLEAR_NODE(&rq->rb_node);
382}
383EXPORT_SYMBOL(elv_rb_del);
384
385struct request *elv_rb_find(struct rb_root *root, sector_t sector)
386{
387 struct rb_node *n = root->rb_node;
388 struct request *rq;
389
390 while (n) {
391 rq = rb_entry(n, struct request, rb_node);
392
393 if (sector < blk_rq_pos(rq))
394 n = n->rb_left;
395 else if (sector > blk_rq_pos(rq))
396 n = n->rb_right;
397 else
398 return rq;
399 }
400
401 return NULL;
402}
403EXPORT_SYMBOL(elv_rb_find);
404
405/*
406 * Insert rq into dispatch queue of q. Queue lock must be held on
407 * entry. rq is sort instead into the dispatch queue. To be used by
408 * specific elevators.
409 */
410void elv_dispatch_sort(struct request_queue *q, struct request *rq)
411{
412 sector_t boundary;
413 struct list_head *entry;
414 int stop_flags;
415
416 if (q->last_merge == rq)
417 q->last_merge = NULL;
418
419 elv_rqhash_del(q, rq);
420
421 q->nr_sorted--;
422
423 boundary = q->end_sector;
424 stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
425 list_for_each_prev(entry, &q->queue_head) {
426 struct request *pos = list_entry_rq(entry);
427
428 if ((rq->cmd_flags & REQ_DISCARD) !=
429 (pos->cmd_flags & REQ_DISCARD))
430 break;
431 if (rq_data_dir(rq) != rq_data_dir(pos))
432 break;
433 if (pos->cmd_flags & stop_flags)
434 break;
435 if (blk_rq_pos(rq) >= boundary) {
436 if (blk_rq_pos(pos) < boundary)
437 continue;
438 } else {
439 if (blk_rq_pos(pos) >= boundary)
440 break;
441 }
442 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
443 break;
444 }
445
446 list_add(&rq->queuelist, entry);
447}
448EXPORT_SYMBOL(elv_dispatch_sort);
449
450/*
451 * Insert rq into dispatch queue of q. Queue lock must be held on
452 * entry. rq is added to the back of the dispatch queue. To be used by
453 * specific elevators.
454 */
455void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
456{
457 if (q->last_merge == rq)
458 q->last_merge = NULL;
459
460 elv_rqhash_del(q, rq);
461
462 q->nr_sorted--;
463
464 q->end_sector = rq_end_sector(rq);
465 q->boundary_rq = rq;
466 list_add_tail(&rq->queuelist, &q->queue_head);
467}
468EXPORT_SYMBOL(elv_dispatch_add_tail);
469
470int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
471{
472 struct elevator_queue *e = q->elevator;
473 struct request *__rq;
474 int ret;
475
476 /*
477 * Levels of merges:
478 * nomerges: No merges at all attempted
479 * noxmerges: Only simple one-hit cache try
480 * merges: All merge tries attempted
481 */
482 if (blk_queue_nomerges(q))
483 return ELEVATOR_NO_MERGE;
484
485 /*
486 * First try one-hit cache.
487 */
488 if (q->last_merge) {
489 ret = elv_try_merge(q->last_merge, bio);
490 if (ret != ELEVATOR_NO_MERGE) {
491 *req = q->last_merge;
492 return ret;
493 }
494 }
495
496 if (blk_queue_noxmerges(q))
497 return ELEVATOR_NO_MERGE;
498
499 /*
500 * See if our hash lookup can find a potential backmerge.
501 */
502 __rq = elv_rqhash_find(q, bio->bi_sector);
503 if (__rq && elv_rq_merge_ok(__rq, bio)) {
504 *req = __rq;
505 return ELEVATOR_BACK_MERGE;
506 }
507
508 if (e->ops->elevator_merge_fn)
509 return e->ops->elevator_merge_fn(q, req, bio);
510
511 return ELEVATOR_NO_MERGE;
512}
513
514/*
515 * Attempt to do an insertion back merge. Only check for the case where
516 * we can append 'rq' to an existing request, so we can throw 'rq' away
517 * afterwards.
518 *
519 * Returns true if we merged, false otherwise
520 */
521static bool elv_attempt_insert_merge(struct request_queue *q,
522 struct request *rq)
523{
524 struct request *__rq;
525
526 if (blk_queue_nomerges(q))
527 return false;
528
529 /*
530 * First try one-hit cache.
531 */
532 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
533 return true;
534
535 if (blk_queue_noxmerges(q))
536 return false;
537
538 /*
539 * See if our hash lookup can find a potential backmerge.
540 */
541 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
542 if (__rq && blk_attempt_req_merge(q, __rq, rq))
543 return true;
544
545 return false;
546}
547
548void elv_merged_request(struct request_queue *q, struct request *rq, int type)
549{
550 struct elevator_queue *e = q->elevator;
551
552 if (e->ops->elevator_merged_fn)
553 e->ops->elevator_merged_fn(q, rq, type);
554
555 if (type == ELEVATOR_BACK_MERGE)
556 elv_rqhash_reposition(q, rq);
557
558 q->last_merge = rq;
559}
560
561void elv_merge_requests(struct request_queue *q, struct request *rq,
562 struct request *next)
563{
564 struct elevator_queue *e = q->elevator;
565 const int next_sorted = next->cmd_flags & REQ_SORTED;
566
567 if (next_sorted && e->ops->elevator_merge_req_fn)
568 e->ops->elevator_merge_req_fn(q, rq, next);
569
570 elv_rqhash_reposition(q, rq);
571
572 if (next_sorted) {
573 elv_rqhash_del(q, next);
574 q->nr_sorted--;
575 }
576
577 q->last_merge = rq;
578}
579
580void elv_bio_merged(struct request_queue *q, struct request *rq,
581 struct bio *bio)
582{
583 struct elevator_queue *e = q->elevator;
584
585 if (e->ops->elevator_bio_merged_fn)
586 e->ops->elevator_bio_merged_fn(q, rq, bio);
587}
588
589void elv_requeue_request(struct request_queue *q, struct request *rq)
590{
591 /*
592 * it already went through dequeue, we need to decrement the
593 * in_flight count again
594 */
595 if (blk_account_rq(rq)) {
596 q->in_flight[rq_is_sync(rq)]--;
597 if (rq->cmd_flags & REQ_SORTED)
598 elv_deactivate_rq(q, rq);
599 }
600
601 rq->cmd_flags &= ~REQ_STARTED;
602
603 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
604}
605
606void elv_drain_elevator(struct request_queue *q)
607{
608 static int printed;
609 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
610 ;
611 if (q->nr_sorted == 0)
612 return;
613 if (printed++ < 10) {
614 printk(KERN_ERR "%s: forced dispatching is broken "
615 "(nr_sorted=%u), please report this\n",
616 q->elevator->elevator_type->elevator_name, q->nr_sorted);
617 }
618}
619
620/*
621 * Call with queue lock held, interrupts disabled
622 */
623void elv_quiesce_start(struct request_queue *q)
624{
625 if (!q->elevator)
626 return;
627
628 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
629
630 /*
631 * make sure we don't have any requests in flight
632 */
633 elv_drain_elevator(q);
634 while (q->rq.elvpriv) {
635 __blk_run_queue(q);
636 spin_unlock_irq(q->queue_lock);
637 msleep(10);
638 spin_lock_irq(q->queue_lock);
639 elv_drain_elevator(q);
640 }
641}
642
643void elv_quiesce_end(struct request_queue *q)
644{
645 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
646}
647
648void __elv_add_request(struct request_queue *q, struct request *rq, int where)
649{
650 trace_block_rq_insert(q, rq);
651
652 rq->q = q;
653
654 if (rq->cmd_flags & REQ_SOFTBARRIER) {
655 /* barriers are scheduling boundary, update end_sector */
656 if (rq->cmd_type == REQ_TYPE_FS ||
657 (rq->cmd_flags & REQ_DISCARD)) {
658 q->end_sector = rq_end_sector(rq);
659 q->boundary_rq = rq;
660 }
661 } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
662 (where == ELEVATOR_INSERT_SORT ||
663 where == ELEVATOR_INSERT_SORT_MERGE))
664 where = ELEVATOR_INSERT_BACK;
665
666 switch (where) {
667 case ELEVATOR_INSERT_REQUEUE:
668 case ELEVATOR_INSERT_FRONT:
669 rq->cmd_flags |= REQ_SOFTBARRIER;
670 list_add(&rq->queuelist, &q->queue_head);
671 break;
672
673 case ELEVATOR_INSERT_BACK:
674 rq->cmd_flags |= REQ_SOFTBARRIER;
675 elv_drain_elevator(q);
676 list_add_tail(&rq->queuelist, &q->queue_head);
677 /*
678 * We kick the queue here for the following reasons.
679 * - The elevator might have returned NULL previously
680 * to delay requests and returned them now. As the
681 * queue wasn't empty before this request, ll_rw_blk
682 * won't run the queue on return, resulting in hang.
683 * - Usually, back inserted requests won't be merged
684 * with anything. There's no point in delaying queue
685 * processing.
686 */
687 __blk_run_queue(q);
688 break;
689
690 case ELEVATOR_INSERT_SORT_MERGE:
691 /*
692 * If we succeed in merging this request with one in the
693 * queue already, we are done - rq has now been freed,
694 * so no need to do anything further.
695 */
696 if (elv_attempt_insert_merge(q, rq))
697 break;
698 case ELEVATOR_INSERT_SORT:
699 BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
700 !(rq->cmd_flags & REQ_DISCARD));
701 rq->cmd_flags |= REQ_SORTED;
702 q->nr_sorted++;
703 if (rq_mergeable(rq)) {
704 elv_rqhash_add(q, rq);
705 if (!q->last_merge)
706 q->last_merge = rq;
707 }
708
709 /*
710 * Some ioscheds (cfq) run q->request_fn directly, so
711 * rq cannot be accessed after calling
712 * elevator_add_req_fn.
713 */
714 q->elevator->ops->elevator_add_req_fn(q, rq);
715 break;
716
717 case ELEVATOR_INSERT_FLUSH:
718 rq->cmd_flags |= REQ_SOFTBARRIER;
719 blk_insert_flush(rq);
720 break;
721 default:
722 printk(KERN_ERR "%s: bad insertion point %d\n",
723 __func__, where);
724 BUG();
725 }
726}
727EXPORT_SYMBOL(__elv_add_request);
728
729void elv_add_request(struct request_queue *q, struct request *rq, int where)
730{
731 unsigned long flags;
732
733 spin_lock_irqsave(q->queue_lock, flags);
734 __elv_add_request(q, rq, where);
735 spin_unlock_irqrestore(q->queue_lock, flags);
736}
737EXPORT_SYMBOL(elv_add_request);
738
739struct request *elv_latter_request(struct request_queue *q, struct request *rq)
740{
741 struct elevator_queue *e = q->elevator;
742
743 if (e->ops->elevator_latter_req_fn)
744 return e->ops->elevator_latter_req_fn(q, rq);
745 return NULL;
746}
747
748struct request *elv_former_request(struct request_queue *q, struct request *rq)
749{
750 struct elevator_queue *e = q->elevator;
751
752 if (e->ops->elevator_former_req_fn)
753 return e->ops->elevator_former_req_fn(q, rq);
754 return NULL;
755}
756
757int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
758{
759 struct elevator_queue *e = q->elevator;
760
761 if (e->ops->elevator_set_req_fn)
762 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
763
764 rq->elevator_private[0] = NULL;
765 return 0;
766}
767
768void elv_put_request(struct request_queue *q, struct request *rq)
769{
770 struct elevator_queue *e = q->elevator;
771
772 if (e->ops->elevator_put_req_fn)
773 e->ops->elevator_put_req_fn(rq);
774}
775
776int elv_may_queue(struct request_queue *q, int rw)
777{
778 struct elevator_queue *e = q->elevator;
779
780 if (e->ops->elevator_may_queue_fn)
781 return e->ops->elevator_may_queue_fn(q, rw);
782
783 return ELV_MQUEUE_MAY;
784}
785
786void elv_abort_queue(struct request_queue *q)
787{
788 struct request *rq;
789
790 blk_abort_flushes(q);
791
792 while (!list_empty(&q->queue_head)) {
793 rq = list_entry_rq(q->queue_head.next);
794 rq->cmd_flags |= REQ_QUIET;
795 trace_block_rq_abort(q, rq);
796 /*
797 * Mark this request as started so we don't trigger
798 * any debug logic in the end I/O path.
799 */
800 blk_start_request(rq);
801 __blk_end_request_all(rq, -EIO);
802 }
803}
804EXPORT_SYMBOL(elv_abort_queue);
805
806void elv_completed_request(struct request_queue *q, struct request *rq)
807{
808 struct elevator_queue *e = q->elevator;
809
810 /*
811 * request is released from the driver, io must be done
812 */
813 if (blk_account_rq(rq)) {
814 q->in_flight[rq_is_sync(rq)]--;
815 if ((rq->cmd_flags & REQ_SORTED) &&
816 e->ops->elevator_completed_req_fn)
817 e->ops->elevator_completed_req_fn(q, rq);
818 }
819}
820
821#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
822
823static ssize_t
824elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
825{
826 struct elv_fs_entry *entry = to_elv(attr);
827 struct elevator_queue *e;
828 ssize_t error;
829
830 if (!entry->show)
831 return -EIO;
832
833 e = container_of(kobj, struct elevator_queue, kobj);
834 mutex_lock(&e->sysfs_lock);
835 error = e->ops ? entry->show(e, page) : -ENOENT;
836 mutex_unlock(&e->sysfs_lock);
837 return error;
838}
839
840static ssize_t
841elv_attr_store(struct kobject *kobj, struct attribute *attr,
842 const char *page, size_t length)
843{
844 struct elv_fs_entry *entry = to_elv(attr);
845 struct elevator_queue *e;
846 ssize_t error;
847
848 if (!entry->store)
849 return -EIO;
850
851 e = container_of(kobj, struct elevator_queue, kobj);
852 mutex_lock(&e->sysfs_lock);
853 error = e->ops ? entry->store(e, page, length) : -ENOENT;
854 mutex_unlock(&e->sysfs_lock);
855 return error;
856}
857
858static const struct sysfs_ops elv_sysfs_ops = {
859 .show = elv_attr_show,
860 .store = elv_attr_store,
861};
862
863static struct kobj_type elv_ktype = {
864 .sysfs_ops = &elv_sysfs_ops,
865 .release = elevator_release,
866};
867
868int elv_register_queue(struct request_queue *q)
869{
870 struct elevator_queue *e = q->elevator;
871 int error;
872
873 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
874 if (!error) {
875 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
876 if (attr) {
877 while (attr->attr.name) {
878 if (sysfs_create_file(&e->kobj, &attr->attr))
879 break;
880 attr++;
881 }
882 }
883 kobject_uevent(&e->kobj, KOBJ_ADD);
884 e->registered = 1;
885 }
886 return error;
887}
888EXPORT_SYMBOL(elv_register_queue);
889
890static void __elv_unregister_queue(struct elevator_queue *e)
891{
892 kobject_uevent(&e->kobj, KOBJ_REMOVE);
893 kobject_del(&e->kobj);
894 e->registered = 0;
895}
896
897void elv_unregister_queue(struct request_queue *q)
898{
899 if (q)
900 __elv_unregister_queue(q->elevator);
901}
902EXPORT_SYMBOL(elv_unregister_queue);
903
904void elv_register(struct elevator_type *e)
905{
906 char *def = "";
907
908 spin_lock(&elv_list_lock);
909 BUG_ON(elevator_find(e->elevator_name));
910 list_add_tail(&e->list, &elv_list);
911 spin_unlock(&elv_list_lock);
912
913 if (!strcmp(e->elevator_name, chosen_elevator) ||
914 (!*chosen_elevator &&
915 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
916 def = " (default)";
917
918 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
919 def);
920}
921EXPORT_SYMBOL_GPL(elv_register);
922
923void elv_unregister(struct elevator_type *e)
924{
925 struct task_struct *g, *p;
926
927 /*
928 * Iterate every thread in the process to remove the io contexts.
929 */
930 if (e->ops.trim) {
931 read_lock(&tasklist_lock);
932 do_each_thread(g, p) {
933 task_lock(p);
934 if (p->io_context)
935 e->ops.trim(p->io_context);
936 task_unlock(p);
937 } while_each_thread(g, p);
938 read_unlock(&tasklist_lock);
939 }
940
941 spin_lock(&elv_list_lock);
942 list_del_init(&e->list);
943 spin_unlock(&elv_list_lock);
944}
945EXPORT_SYMBOL_GPL(elv_unregister);
946
947/*
948 * switch to new_e io scheduler. be careful not to introduce deadlocks -
949 * we don't free the old io scheduler, before we have allocated what we
950 * need for the new one. this way we have a chance of going back to the old
951 * one, if the new one fails init for some reason.
952 */
953static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
954{
955 struct elevator_queue *old_elevator, *e;
956 void *data;
957 int err;
958
959 /*
960 * Allocate new elevator
961 */
962 e = elevator_alloc(q, new_e);
963 if (!e)
964 return -ENOMEM;
965
966 data = elevator_init_queue(q, e);
967 if (!data) {
968 kobject_put(&e->kobj);
969 return -ENOMEM;
970 }
971
972 /*
973 * Turn on BYPASS and drain all requests w/ elevator private data
974 */
975 spin_lock_irq(q->queue_lock);
976 elv_quiesce_start(q);
977
978 /*
979 * Remember old elevator.
980 */
981 old_elevator = q->elevator;
982
983 /*
984 * attach and start new elevator
985 */
986 elevator_attach(q, e, data);
987
988 spin_unlock_irq(q->queue_lock);
989
990 if (old_elevator->registered) {
991 __elv_unregister_queue(old_elevator);
992
993 err = elv_register_queue(q);
994 if (err)
995 goto fail_register;
996 }
997
998 /*
999 * finally exit old elevator and turn off BYPASS.
1000 */
1001 elevator_exit(old_elevator);
1002 spin_lock_irq(q->queue_lock);
1003 elv_quiesce_end(q);
1004 spin_unlock_irq(q->queue_lock);
1005
1006 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
1007
1008 return 0;
1009
1010fail_register:
1011 /*
1012 * switch failed, exit the new io scheduler and reattach the old
1013 * one again (along with re-adding the sysfs dir)
1014 */
1015 elevator_exit(e);
1016 q->elevator = old_elevator;
1017 elv_register_queue(q);
1018
1019 spin_lock_irq(q->queue_lock);
1020 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1021 spin_unlock_irq(q->queue_lock);
1022
1023 return err;
1024}
1025
1026/*
1027 * Switch this queue to the given IO scheduler.
1028 */
1029int elevator_change(struct request_queue *q, const char *name)
1030{
1031 char elevator_name[ELV_NAME_MAX];
1032 struct elevator_type *e;
1033
1034 if (!q->elevator)
1035 return -ENXIO;
1036
1037 strlcpy(elevator_name, name, sizeof(elevator_name));
1038 e = elevator_get(strstrip(elevator_name));
1039 if (!e) {
1040 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1041 return -EINVAL;
1042 }
1043
1044 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1045 elevator_put(e);
1046 return 0;
1047 }
1048
1049 return elevator_switch(q, e);
1050}
1051EXPORT_SYMBOL(elevator_change);
1052
1053ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1054 size_t count)
1055{
1056 int ret;
1057
1058 if (!q->elevator)
1059 return count;
1060
1061 ret = elevator_change(q, name);
1062 if (!ret)
1063 return count;
1064
1065 printk(KERN_ERR "elevator: switch to %s failed\n", name);
1066 return ret;
1067}
1068
1069ssize_t elv_iosched_show(struct request_queue *q, char *name)
1070{
1071 struct elevator_queue *e = q->elevator;
1072 struct elevator_type *elv;
1073 struct elevator_type *__e;
1074 int len = 0;
1075
1076 if (!q->elevator || !blk_queue_stackable(q))
1077 return sprintf(name, "none\n");
1078
1079 elv = e->elevator_type;
1080
1081 spin_lock(&elv_list_lock);
1082 list_for_each_entry(__e, &elv_list, list) {
1083 if (!strcmp(elv->elevator_name, __e->elevator_name))
1084 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1085 else
1086 len += sprintf(name+len, "%s ", __e->elevator_name);
1087 }
1088 spin_unlock(&elv_list_lock);
1089
1090 len += sprintf(len+name, "\n");
1091 return len;
1092}
1093
1094struct request *elv_rb_former_request(struct request_queue *q,
1095 struct request *rq)
1096{
1097 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1098
1099 if (rbprev)
1100 return rb_entry_rq(rbprev);
1101
1102 return NULL;
1103}
1104EXPORT_SYMBOL(elv_rb_former_request);
1105
1106struct request *elv_rb_latter_request(struct request_queue *q,
1107 struct request *rq)
1108{
1109 struct rb_node *rbnext = rb_next(&rq->rb_node);
1110
1111 if (rbnext)
1112 return rb_entry_rq(rbnext);
1113
1114 return NULL;
1115}
1116EXPORT_SYMBOL(elv_rb_latter_request);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Block device elevator/IO-scheduler.
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 *
7 * 30042000 Jens Axboe <axboe@kernel.dk> :
8 *
9 * Split the elevator a bit so that it is possible to choose a different
10 * one or even write a new "plug in". There are three pieces:
11 * - elevator_fn, inserts a new request in the queue list
12 * - elevator_merge_fn, decides whether a new buffer can be merged with
13 * an existing request
14 * - elevator_dequeue_fn, called when a request is taken off the active list
15 *
16 * 20082000 Dave Jones <davej@suse.de> :
17 * Removed tests for max-bomb-segments, which was breaking elvtune
18 * when run without -bN
19 *
20 * Jens:
21 * - Rework again to work with bio instead of buffer_heads
22 * - loose bi_dev comparisons, partition handling is right now
23 * - completely modularize elevator setup and teardown
24 *
25 */
26#include <linux/kernel.h>
27#include <linux/fs.h>
28#include <linux/blkdev.h>
29#include <linux/bio.h>
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
34#include <linux/blktrace_api.h>
35#include <linux/hash.h>
36#include <linux/uaccess.h>
37#include <linux/pm_runtime.h>
38
39#include <trace/events/block.h>
40
41#include "elevator.h"
42#include "blk.h"
43#include "blk-mq-sched.h"
44#include "blk-pm.h"
45#include "blk-wbt.h"
46#include "blk-cgroup.h"
47
48static DEFINE_SPINLOCK(elv_list_lock);
49static LIST_HEAD(elv_list);
50
51/*
52 * Merge hash stuff.
53 */
54#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
55
56/*
57 * Query io scheduler to see if the current process issuing bio may be
58 * merged with rq.
59 */
60static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
61{
62 struct request_queue *q = rq->q;
63 struct elevator_queue *e = q->elevator;
64
65 if (e->type->ops.allow_merge)
66 return e->type->ops.allow_merge(q, rq, bio);
67
68 return true;
69}
70
71/*
72 * can we safely merge with this request?
73 */
74bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
75{
76 if (!blk_rq_merge_ok(rq, bio))
77 return false;
78
79 if (!elv_iosched_allow_bio_merge(rq, bio))
80 return false;
81
82 return true;
83}
84EXPORT_SYMBOL(elv_bio_merge_ok);
85
86/**
87 * elevator_match - Check whether @e's name or alias matches @name
88 * @e: Scheduler to test
89 * @name: Elevator name to test
90 *
91 * Return true if the elevator @e's name or alias matches @name.
92 */
93static bool elevator_match(const struct elevator_type *e, const char *name)
94{
95 return !strcmp(e->elevator_name, name) ||
96 (e->elevator_alias && !strcmp(e->elevator_alias, name));
97}
98
99static struct elevator_type *__elevator_find(const char *name)
100{
101 struct elevator_type *e;
102
103 list_for_each_entry(e, &elv_list, list)
104 if (elevator_match(e, name))
105 return e;
106 return NULL;
107}
108
109static struct elevator_type *elevator_find_get(const char *name)
110{
111 struct elevator_type *e;
112
113 spin_lock(&elv_list_lock);
114 e = __elevator_find(name);
115 if (e && (!elevator_tryget(e)))
116 e = NULL;
117 spin_unlock(&elv_list_lock);
118 return e;
119}
120
121static const struct kobj_type elv_ktype;
122
123struct elevator_queue *elevator_alloc(struct request_queue *q,
124 struct elevator_type *e)
125{
126 struct elevator_queue *eq;
127
128 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
129 if (unlikely(!eq))
130 return NULL;
131
132 __elevator_get(e);
133 eq->type = e;
134 kobject_init(&eq->kobj, &elv_ktype);
135 mutex_init(&eq->sysfs_lock);
136 hash_init(eq->hash);
137
138 return eq;
139}
140EXPORT_SYMBOL(elevator_alloc);
141
142static void elevator_release(struct kobject *kobj)
143{
144 struct elevator_queue *e;
145
146 e = container_of(kobj, struct elevator_queue, kobj);
147 elevator_put(e->type);
148 kfree(e);
149}
150
151void elevator_exit(struct request_queue *q)
152{
153 struct elevator_queue *e = q->elevator;
154
155 ioc_clear_queue(q);
156 blk_mq_sched_free_rqs(q);
157
158 mutex_lock(&e->sysfs_lock);
159 blk_mq_exit_sched(q, e);
160 mutex_unlock(&e->sysfs_lock);
161
162 kobject_put(&e->kobj);
163}
164
165static inline void __elv_rqhash_del(struct request *rq)
166{
167 hash_del(&rq->hash);
168 rq->rq_flags &= ~RQF_HASHED;
169}
170
171void elv_rqhash_del(struct request_queue *q, struct request *rq)
172{
173 if (ELV_ON_HASH(rq))
174 __elv_rqhash_del(rq);
175}
176EXPORT_SYMBOL_GPL(elv_rqhash_del);
177
178void elv_rqhash_add(struct request_queue *q, struct request *rq)
179{
180 struct elevator_queue *e = q->elevator;
181
182 BUG_ON(ELV_ON_HASH(rq));
183 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
184 rq->rq_flags |= RQF_HASHED;
185}
186EXPORT_SYMBOL_GPL(elv_rqhash_add);
187
188void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
189{
190 __elv_rqhash_del(rq);
191 elv_rqhash_add(q, rq);
192}
193
194struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
195{
196 struct elevator_queue *e = q->elevator;
197 struct hlist_node *next;
198 struct request *rq;
199
200 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
201 BUG_ON(!ELV_ON_HASH(rq));
202
203 if (unlikely(!rq_mergeable(rq))) {
204 __elv_rqhash_del(rq);
205 continue;
206 }
207
208 if (rq_hash_key(rq) == offset)
209 return rq;
210 }
211
212 return NULL;
213}
214
215/*
216 * RB-tree support functions for inserting/lookup/removal of requests
217 * in a sorted RB tree.
218 */
219void elv_rb_add(struct rb_root *root, struct request *rq)
220{
221 struct rb_node **p = &root->rb_node;
222 struct rb_node *parent = NULL;
223 struct request *__rq;
224
225 while (*p) {
226 parent = *p;
227 __rq = rb_entry(parent, struct request, rb_node);
228
229 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
230 p = &(*p)->rb_left;
231 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
232 p = &(*p)->rb_right;
233 }
234
235 rb_link_node(&rq->rb_node, parent, p);
236 rb_insert_color(&rq->rb_node, root);
237}
238EXPORT_SYMBOL(elv_rb_add);
239
240void elv_rb_del(struct rb_root *root, struct request *rq)
241{
242 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
243 rb_erase(&rq->rb_node, root);
244 RB_CLEAR_NODE(&rq->rb_node);
245}
246EXPORT_SYMBOL(elv_rb_del);
247
248struct request *elv_rb_find(struct rb_root *root, sector_t sector)
249{
250 struct rb_node *n = root->rb_node;
251 struct request *rq;
252
253 while (n) {
254 rq = rb_entry(n, struct request, rb_node);
255
256 if (sector < blk_rq_pos(rq))
257 n = n->rb_left;
258 else if (sector > blk_rq_pos(rq))
259 n = n->rb_right;
260 else
261 return rq;
262 }
263
264 return NULL;
265}
266EXPORT_SYMBOL(elv_rb_find);
267
268enum elv_merge elv_merge(struct request_queue *q, struct request **req,
269 struct bio *bio)
270{
271 struct elevator_queue *e = q->elevator;
272 struct request *__rq;
273
274 /*
275 * Levels of merges:
276 * nomerges: No merges at all attempted
277 * noxmerges: Only simple one-hit cache try
278 * merges: All merge tries attempted
279 */
280 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
281 return ELEVATOR_NO_MERGE;
282
283 /*
284 * First try one-hit cache.
285 */
286 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
287 enum elv_merge ret = blk_try_merge(q->last_merge, bio);
288
289 if (ret != ELEVATOR_NO_MERGE) {
290 *req = q->last_merge;
291 return ret;
292 }
293 }
294
295 if (blk_queue_noxmerges(q))
296 return ELEVATOR_NO_MERGE;
297
298 /*
299 * See if our hash lookup can find a potential backmerge.
300 */
301 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
302 if (__rq && elv_bio_merge_ok(__rq, bio)) {
303 *req = __rq;
304
305 if (blk_discard_mergable(__rq))
306 return ELEVATOR_DISCARD_MERGE;
307 return ELEVATOR_BACK_MERGE;
308 }
309
310 if (e->type->ops.request_merge)
311 return e->type->ops.request_merge(q, req, bio);
312
313 return ELEVATOR_NO_MERGE;
314}
315
316/*
317 * Attempt to do an insertion back merge. Only check for the case where
318 * we can append 'rq' to an existing request, so we can throw 'rq' away
319 * afterwards.
320 *
321 * Returns true if we merged, false otherwise. 'free' will contain all
322 * requests that need to be freed.
323 */
324bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
325 struct list_head *free)
326{
327 struct request *__rq;
328 bool ret;
329
330 if (blk_queue_nomerges(q))
331 return false;
332
333 /*
334 * First try one-hit cache.
335 */
336 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
337 list_add(&rq->queuelist, free);
338 return true;
339 }
340
341 if (blk_queue_noxmerges(q))
342 return false;
343
344 ret = false;
345 /*
346 * See if our hash lookup can find a potential backmerge.
347 */
348 while (1) {
349 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
350 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
351 break;
352
353 list_add(&rq->queuelist, free);
354 /* The merged request could be merged with others, try again */
355 ret = true;
356 rq = __rq;
357 }
358
359 return ret;
360}
361
362void elv_merged_request(struct request_queue *q, struct request *rq,
363 enum elv_merge type)
364{
365 struct elevator_queue *e = q->elevator;
366
367 if (e->type->ops.request_merged)
368 e->type->ops.request_merged(q, rq, type);
369
370 if (type == ELEVATOR_BACK_MERGE)
371 elv_rqhash_reposition(q, rq);
372
373 q->last_merge = rq;
374}
375
376void elv_merge_requests(struct request_queue *q, struct request *rq,
377 struct request *next)
378{
379 struct elevator_queue *e = q->elevator;
380
381 if (e->type->ops.requests_merged)
382 e->type->ops.requests_merged(q, rq, next);
383
384 elv_rqhash_reposition(q, rq);
385 q->last_merge = rq;
386}
387
388struct request *elv_latter_request(struct request_queue *q, struct request *rq)
389{
390 struct elevator_queue *e = q->elevator;
391
392 if (e->type->ops.next_request)
393 return e->type->ops.next_request(q, rq);
394
395 return NULL;
396}
397
398struct request *elv_former_request(struct request_queue *q, struct request *rq)
399{
400 struct elevator_queue *e = q->elevator;
401
402 if (e->type->ops.former_request)
403 return e->type->ops.former_request(q, rq);
404
405 return NULL;
406}
407
408#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
409
410static ssize_t
411elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
412{
413 struct elv_fs_entry *entry = to_elv(attr);
414 struct elevator_queue *e;
415 ssize_t error;
416
417 if (!entry->show)
418 return -EIO;
419
420 e = container_of(kobj, struct elevator_queue, kobj);
421 mutex_lock(&e->sysfs_lock);
422 error = e->type ? entry->show(e, page) : -ENOENT;
423 mutex_unlock(&e->sysfs_lock);
424 return error;
425}
426
427static ssize_t
428elv_attr_store(struct kobject *kobj, struct attribute *attr,
429 const char *page, size_t length)
430{
431 struct elv_fs_entry *entry = to_elv(attr);
432 struct elevator_queue *e;
433 ssize_t error;
434
435 if (!entry->store)
436 return -EIO;
437
438 e = container_of(kobj, struct elevator_queue, kobj);
439 mutex_lock(&e->sysfs_lock);
440 error = e->type ? entry->store(e, page, length) : -ENOENT;
441 mutex_unlock(&e->sysfs_lock);
442 return error;
443}
444
445static const struct sysfs_ops elv_sysfs_ops = {
446 .show = elv_attr_show,
447 .store = elv_attr_store,
448};
449
450static const struct kobj_type elv_ktype = {
451 .sysfs_ops = &elv_sysfs_ops,
452 .release = elevator_release,
453};
454
455int elv_register_queue(struct request_queue *q, bool uevent)
456{
457 struct elevator_queue *e = q->elevator;
458 int error;
459
460 lockdep_assert_held(&q->sysfs_lock);
461
462 error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
463 if (!error) {
464 struct elv_fs_entry *attr = e->type->elevator_attrs;
465 if (attr) {
466 while (attr->attr.name) {
467 if (sysfs_create_file(&e->kobj, &attr->attr))
468 break;
469 attr++;
470 }
471 }
472 if (uevent)
473 kobject_uevent(&e->kobj, KOBJ_ADD);
474
475 set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
476 }
477 return error;
478}
479
480void elv_unregister_queue(struct request_queue *q)
481{
482 struct elevator_queue *e = q->elevator;
483
484 lockdep_assert_held(&q->sysfs_lock);
485
486 if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
487 kobject_uevent(&e->kobj, KOBJ_REMOVE);
488 kobject_del(&e->kobj);
489 }
490}
491
492int elv_register(struct elevator_type *e)
493{
494 /* finish request is mandatory */
495 if (WARN_ON_ONCE(!e->ops.finish_request))
496 return -EINVAL;
497 /* insert_requests and dispatch_request are mandatory */
498 if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
499 return -EINVAL;
500
501 /* create icq_cache if requested */
502 if (e->icq_size) {
503 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
504 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
505 return -EINVAL;
506
507 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
508 "%s_io_cq", e->elevator_name);
509 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
510 e->icq_align, 0, NULL);
511 if (!e->icq_cache)
512 return -ENOMEM;
513 }
514
515 /* register, don't allow duplicate names */
516 spin_lock(&elv_list_lock);
517 if (__elevator_find(e->elevator_name)) {
518 spin_unlock(&elv_list_lock);
519 kmem_cache_destroy(e->icq_cache);
520 return -EBUSY;
521 }
522 list_add_tail(&e->list, &elv_list);
523 spin_unlock(&elv_list_lock);
524
525 printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
526
527 return 0;
528}
529EXPORT_SYMBOL_GPL(elv_register);
530
531void elv_unregister(struct elevator_type *e)
532{
533 /* unregister */
534 spin_lock(&elv_list_lock);
535 list_del_init(&e->list);
536 spin_unlock(&elv_list_lock);
537
538 /*
539 * Destroy icq_cache if it exists. icq's are RCU managed. Make
540 * sure all RCU operations are complete before proceeding.
541 */
542 if (e->icq_cache) {
543 rcu_barrier();
544 kmem_cache_destroy(e->icq_cache);
545 e->icq_cache = NULL;
546 }
547}
548EXPORT_SYMBOL_GPL(elv_unregister);
549
550static inline bool elv_support_iosched(struct request_queue *q)
551{
552 if (!queue_is_mq(q) ||
553 (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
554 return false;
555 return true;
556}
557
558/*
559 * For single queue devices, default to using mq-deadline. If we have multiple
560 * queues or mq-deadline is not available, default to "none".
561 */
562static struct elevator_type *elevator_get_default(struct request_queue *q)
563{
564 if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
565 return NULL;
566
567 if (q->nr_hw_queues != 1 &&
568 !blk_mq_is_shared_tags(q->tag_set->flags))
569 return NULL;
570
571 return elevator_find_get("mq-deadline");
572}
573
574/*
575 * Use the default elevator settings. If the chosen elevator initialization
576 * fails, fall back to the "none" elevator (no elevator).
577 */
578void elevator_init_mq(struct request_queue *q)
579{
580 struct elevator_type *e;
581 int err;
582
583 if (!elv_support_iosched(q))
584 return;
585
586 WARN_ON_ONCE(blk_queue_registered(q));
587
588 if (unlikely(q->elevator))
589 return;
590
591 e = elevator_get_default(q);
592 if (!e)
593 return;
594
595 /*
596 * We are called before adding disk, when there isn't any FS I/O,
597 * so freezing queue plus canceling dispatch work is enough to
598 * drain any dispatch activities originated from passthrough
599 * requests, then no need to quiesce queue which may add long boot
600 * latency, especially when lots of disks are involved.
601 *
602 * Disk isn't added yet, so verifying queue lock only manually.
603 */
604 blk_freeze_queue_start_non_owner(q);
605 blk_freeze_acquire_lock(q, true, false);
606 blk_mq_freeze_queue_wait(q);
607
608 blk_mq_cancel_work_sync(q);
609
610 err = blk_mq_init_sched(q, e);
611
612 blk_unfreeze_release_lock(q, true, false);
613 blk_mq_unfreeze_queue_non_owner(q);
614
615 if (err) {
616 pr_warn("\"%s\" elevator initialization failed, "
617 "falling back to \"none\"\n", e->elevator_name);
618 }
619
620 elevator_put(e);
621}
622
623/*
624 * Switch to new_e io scheduler.
625 *
626 * If switching fails, we are most likely running out of memory and not able
627 * to restore the old io scheduler, so leaving the io scheduler being none.
628 */
629int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
630{
631 int ret;
632
633 lockdep_assert_held(&q->sysfs_lock);
634
635 blk_mq_freeze_queue(q);
636 blk_mq_quiesce_queue(q);
637
638 if (q->elevator) {
639 elv_unregister_queue(q);
640 elevator_exit(q);
641 }
642
643 ret = blk_mq_init_sched(q, new_e);
644 if (ret)
645 goto out_unfreeze;
646
647 ret = elv_register_queue(q, true);
648 if (ret) {
649 elevator_exit(q);
650 goto out_unfreeze;
651 }
652 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
653
654out_unfreeze:
655 blk_mq_unquiesce_queue(q);
656 blk_mq_unfreeze_queue(q);
657
658 if (ret) {
659 pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
660 new_e->elevator_name);
661 }
662
663 return ret;
664}
665
666void elevator_disable(struct request_queue *q)
667{
668 lockdep_assert_held(&q->sysfs_lock);
669
670 blk_mq_freeze_queue(q);
671 blk_mq_quiesce_queue(q);
672
673 elv_unregister_queue(q);
674 elevator_exit(q);
675 blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
676 q->elevator = NULL;
677 q->nr_requests = q->tag_set->queue_depth;
678 blk_add_trace_msg(q, "elv switch: none");
679
680 blk_mq_unquiesce_queue(q);
681 blk_mq_unfreeze_queue(q);
682}
683
684/*
685 * Switch this queue to the given IO scheduler.
686 */
687static int elevator_change(struct request_queue *q, const char *elevator_name)
688{
689 struct elevator_type *e;
690 int ret;
691
692 /* Make sure queue is not in the middle of being removed */
693 if (!blk_queue_registered(q))
694 return -ENOENT;
695
696 if (!strncmp(elevator_name, "none", 4)) {
697 if (q->elevator)
698 elevator_disable(q);
699 return 0;
700 }
701
702 if (q->elevator && elevator_match(q->elevator->type, elevator_name))
703 return 0;
704
705 e = elevator_find_get(elevator_name);
706 if (!e)
707 return -EINVAL;
708 ret = elevator_switch(q, e);
709 elevator_put(e);
710 return ret;
711}
712
713void elv_iosched_load_module(struct gendisk *disk, const char *buf,
714 size_t count)
715{
716 char elevator_name[ELV_NAME_MAX];
717 struct elevator_type *found;
718 const char *name;
719
720 if (!elv_support_iosched(disk->queue))
721 return;
722
723 strscpy(elevator_name, buf, sizeof(elevator_name));
724 name = strstrip(elevator_name);
725
726 spin_lock(&elv_list_lock);
727 found = __elevator_find(name);
728 spin_unlock(&elv_list_lock);
729
730 if (!found)
731 request_module("%s-iosched", name);
732}
733
734ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
735 size_t count)
736{
737 char elevator_name[ELV_NAME_MAX];
738 int ret;
739
740 if (!elv_support_iosched(disk->queue))
741 return count;
742
743 strscpy(elevator_name, buf, sizeof(elevator_name));
744 ret = elevator_change(disk->queue, strstrip(elevator_name));
745 if (!ret)
746 return count;
747 return ret;
748}
749
750ssize_t elv_iosched_show(struct gendisk *disk, char *name)
751{
752 struct request_queue *q = disk->queue;
753 struct elevator_queue *eq = q->elevator;
754 struct elevator_type *cur = NULL, *e;
755 int len = 0;
756
757 if (!elv_support_iosched(q))
758 return sprintf(name, "none\n");
759
760 if (!q->elevator) {
761 len += sprintf(name+len, "[none] ");
762 } else {
763 len += sprintf(name+len, "none ");
764 cur = eq->type;
765 }
766
767 spin_lock(&elv_list_lock);
768 list_for_each_entry(e, &elv_list, list) {
769 if (e == cur)
770 len += sprintf(name+len, "[%s] ", e->elevator_name);
771 else
772 len += sprintf(name+len, "%s ", e->elevator_name);
773 }
774 spin_unlock(&elv_list_lock);
775
776 len += sprintf(name+len, "\n");
777 return len;
778}
779
780struct request *elv_rb_former_request(struct request_queue *q,
781 struct request *rq)
782{
783 struct rb_node *rbprev = rb_prev(&rq->rb_node);
784
785 if (rbprev)
786 return rb_entry_rq(rbprev);
787
788 return NULL;
789}
790EXPORT_SYMBOL(elv_rb_former_request);
791
792struct request *elv_rb_latter_request(struct request_queue *q,
793 struct request *rq)
794{
795 struct rb_node *rbnext = rb_next(&rq->rb_node);
796
797 if (rbnext)
798 return rb_entry_rq(rbnext);
799
800 return NULL;
801}
802EXPORT_SYMBOL(elv_rb_latter_request);
803
804static int __init elevator_setup(char *str)
805{
806 pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
807 "Please use sysfs to set IO scheduler for individual devices.\n");
808 return 1;
809}
810
811__setup("elevator=", elevator_setup);