Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Block device elevator/IO-scheduler.
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 *
7 * 30042000 Jens Axboe <axboe@kernel.dk> :
8 *
9 * Split the elevator a bit so that it is possible to choose a different
10 * one or even write a new "plug in". There are three pieces:
11 * - elevator_fn, inserts a new request in the queue list
12 * - elevator_merge_fn, decides whether a new buffer can be merged with
13 * an existing request
14 * - elevator_dequeue_fn, called when a request is taken off the active list
15 *
16 * 20082000 Dave Jones <davej@suse.de> :
17 * Removed tests for max-bomb-segments, which was breaking elvtune
18 * when run without -bN
19 *
20 * Jens:
21 * - Rework again to work with bio instead of buffer_heads
22 * - loose bi_dev comparisons, partition handling is right now
23 * - completely modularize elevator setup and teardown
24 *
25 */
26#include <linux/kernel.h>
27#include <linux/fs.h>
28#include <linux/blkdev.h>
29#include <linux/bio.h>
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
34#include <linux/blktrace_api.h>
35#include <linux/hash.h>
36#include <linux/uaccess.h>
37#include <linux/pm_runtime.h>
38
39#include <trace/events/block.h>
40
41#include "elevator.h"
42#include "blk.h"
43#include "blk-mq-sched.h"
44#include "blk-pm.h"
45#include "blk-wbt.h"
46#include "blk-cgroup.h"
47
48static DEFINE_SPINLOCK(elv_list_lock);
49static LIST_HEAD(elv_list);
50
51/*
52 * Merge hash stuff.
53 */
54#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
55
56/*
57 * Query io scheduler to see if the current process issuing bio may be
58 * merged with rq.
59 */
60static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
61{
62 struct request_queue *q = rq->q;
63 struct elevator_queue *e = q->elevator;
64
65 if (e->type->ops.allow_merge)
66 return e->type->ops.allow_merge(q, rq, bio);
67
68 return true;
69}
70
71/*
72 * can we safely merge with this request?
73 */
74bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
75{
76 if (!blk_rq_merge_ok(rq, bio))
77 return false;
78
79 if (!elv_iosched_allow_bio_merge(rq, bio))
80 return false;
81
82 return true;
83}
84EXPORT_SYMBOL(elv_bio_merge_ok);
85
86static inline bool elv_support_features(struct request_queue *q,
87 const struct elevator_type *e)
88{
89 return (q->required_elevator_features & e->elevator_features) ==
90 q->required_elevator_features;
91}
92
93/**
94 * elevator_match - Check whether @e's name or alias matches @name
95 * @e: Scheduler to test
96 * @name: Elevator name to test
97 *
98 * Return true if the elevator @e's name or alias matches @name.
99 */
100static bool elevator_match(const struct elevator_type *e, const char *name)
101{
102 return !strcmp(e->elevator_name, name) ||
103 (e->elevator_alias && !strcmp(e->elevator_alias, name));
104}
105
106static struct elevator_type *__elevator_find(const char *name)
107{
108 struct elevator_type *e;
109
110 list_for_each_entry(e, &elv_list, list)
111 if (elevator_match(e, name))
112 return e;
113 return NULL;
114}
115
116static struct elevator_type *elevator_find_get(struct request_queue *q,
117 const char *name)
118{
119 struct elevator_type *e;
120
121 spin_lock(&elv_list_lock);
122 e = __elevator_find(name);
123 if (e && (!elv_support_features(q, e) || !elevator_tryget(e)))
124 e = NULL;
125 spin_unlock(&elv_list_lock);
126 return e;
127}
128
129static struct kobj_type elv_ktype;
130
131struct elevator_queue *elevator_alloc(struct request_queue *q,
132 struct elevator_type *e)
133{
134 struct elevator_queue *eq;
135
136 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
137 if (unlikely(!eq))
138 return NULL;
139
140 __elevator_get(e);
141 eq->type = e;
142 kobject_init(&eq->kobj, &elv_ktype);
143 mutex_init(&eq->sysfs_lock);
144 hash_init(eq->hash);
145
146 return eq;
147}
148EXPORT_SYMBOL(elevator_alloc);
149
150static void elevator_release(struct kobject *kobj)
151{
152 struct elevator_queue *e;
153
154 e = container_of(kobj, struct elevator_queue, kobj);
155 elevator_put(e->type);
156 kfree(e);
157}
158
159void elevator_exit(struct request_queue *q)
160{
161 struct elevator_queue *e = q->elevator;
162
163 ioc_clear_queue(q);
164 blk_mq_sched_free_rqs(q);
165
166 mutex_lock(&e->sysfs_lock);
167 blk_mq_exit_sched(q, e);
168 mutex_unlock(&e->sysfs_lock);
169
170 kobject_put(&e->kobj);
171}
172
173static inline void __elv_rqhash_del(struct request *rq)
174{
175 hash_del(&rq->hash);
176 rq->rq_flags &= ~RQF_HASHED;
177}
178
179void elv_rqhash_del(struct request_queue *q, struct request *rq)
180{
181 if (ELV_ON_HASH(rq))
182 __elv_rqhash_del(rq);
183}
184EXPORT_SYMBOL_GPL(elv_rqhash_del);
185
186void elv_rqhash_add(struct request_queue *q, struct request *rq)
187{
188 struct elevator_queue *e = q->elevator;
189
190 BUG_ON(ELV_ON_HASH(rq));
191 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
192 rq->rq_flags |= RQF_HASHED;
193}
194EXPORT_SYMBOL_GPL(elv_rqhash_add);
195
196void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
197{
198 __elv_rqhash_del(rq);
199 elv_rqhash_add(q, rq);
200}
201
202struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
203{
204 struct elevator_queue *e = q->elevator;
205 struct hlist_node *next;
206 struct request *rq;
207
208 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
209 BUG_ON(!ELV_ON_HASH(rq));
210
211 if (unlikely(!rq_mergeable(rq))) {
212 __elv_rqhash_del(rq);
213 continue;
214 }
215
216 if (rq_hash_key(rq) == offset)
217 return rq;
218 }
219
220 return NULL;
221}
222
223/*
224 * RB-tree support functions for inserting/lookup/removal of requests
225 * in a sorted RB tree.
226 */
227void elv_rb_add(struct rb_root *root, struct request *rq)
228{
229 struct rb_node **p = &root->rb_node;
230 struct rb_node *parent = NULL;
231 struct request *__rq;
232
233 while (*p) {
234 parent = *p;
235 __rq = rb_entry(parent, struct request, rb_node);
236
237 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
238 p = &(*p)->rb_left;
239 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
240 p = &(*p)->rb_right;
241 }
242
243 rb_link_node(&rq->rb_node, parent, p);
244 rb_insert_color(&rq->rb_node, root);
245}
246EXPORT_SYMBOL(elv_rb_add);
247
248void elv_rb_del(struct rb_root *root, struct request *rq)
249{
250 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
251 rb_erase(&rq->rb_node, root);
252 RB_CLEAR_NODE(&rq->rb_node);
253}
254EXPORT_SYMBOL(elv_rb_del);
255
256struct request *elv_rb_find(struct rb_root *root, sector_t sector)
257{
258 struct rb_node *n = root->rb_node;
259 struct request *rq;
260
261 while (n) {
262 rq = rb_entry(n, struct request, rb_node);
263
264 if (sector < blk_rq_pos(rq))
265 n = n->rb_left;
266 else if (sector > blk_rq_pos(rq))
267 n = n->rb_right;
268 else
269 return rq;
270 }
271
272 return NULL;
273}
274EXPORT_SYMBOL(elv_rb_find);
275
276enum elv_merge elv_merge(struct request_queue *q, struct request **req,
277 struct bio *bio)
278{
279 struct elevator_queue *e = q->elevator;
280 struct request *__rq;
281
282 /*
283 * Levels of merges:
284 * nomerges: No merges at all attempted
285 * noxmerges: Only simple one-hit cache try
286 * merges: All merge tries attempted
287 */
288 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
289 return ELEVATOR_NO_MERGE;
290
291 /*
292 * First try one-hit cache.
293 */
294 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
295 enum elv_merge ret = blk_try_merge(q->last_merge, bio);
296
297 if (ret != ELEVATOR_NO_MERGE) {
298 *req = q->last_merge;
299 return ret;
300 }
301 }
302
303 if (blk_queue_noxmerges(q))
304 return ELEVATOR_NO_MERGE;
305
306 /*
307 * See if our hash lookup can find a potential backmerge.
308 */
309 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
310 if (__rq && elv_bio_merge_ok(__rq, bio)) {
311 *req = __rq;
312
313 if (blk_discard_mergable(__rq))
314 return ELEVATOR_DISCARD_MERGE;
315 return ELEVATOR_BACK_MERGE;
316 }
317
318 if (e->type->ops.request_merge)
319 return e->type->ops.request_merge(q, req, bio);
320
321 return ELEVATOR_NO_MERGE;
322}
323
324/*
325 * Attempt to do an insertion back merge. Only check for the case where
326 * we can append 'rq' to an existing request, so we can throw 'rq' away
327 * afterwards.
328 *
329 * Returns true if we merged, false otherwise. 'free' will contain all
330 * requests that need to be freed.
331 */
332bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
333 struct list_head *free)
334{
335 struct request *__rq;
336 bool ret;
337
338 if (blk_queue_nomerges(q))
339 return false;
340
341 /*
342 * First try one-hit cache.
343 */
344 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
345 list_add(&rq->queuelist, free);
346 return true;
347 }
348
349 if (blk_queue_noxmerges(q))
350 return false;
351
352 ret = false;
353 /*
354 * See if our hash lookup can find a potential backmerge.
355 */
356 while (1) {
357 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
358 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
359 break;
360
361 list_add(&rq->queuelist, free);
362 /* The merged request could be merged with others, try again */
363 ret = true;
364 rq = __rq;
365 }
366
367 return ret;
368}
369
370void elv_merged_request(struct request_queue *q, struct request *rq,
371 enum elv_merge type)
372{
373 struct elevator_queue *e = q->elevator;
374
375 if (e->type->ops.request_merged)
376 e->type->ops.request_merged(q, rq, type);
377
378 if (type == ELEVATOR_BACK_MERGE)
379 elv_rqhash_reposition(q, rq);
380
381 q->last_merge = rq;
382}
383
384void elv_merge_requests(struct request_queue *q, struct request *rq,
385 struct request *next)
386{
387 struct elevator_queue *e = q->elevator;
388
389 if (e->type->ops.requests_merged)
390 e->type->ops.requests_merged(q, rq, next);
391
392 elv_rqhash_reposition(q, rq);
393 q->last_merge = rq;
394}
395
396struct request *elv_latter_request(struct request_queue *q, struct request *rq)
397{
398 struct elevator_queue *e = q->elevator;
399
400 if (e->type->ops.next_request)
401 return e->type->ops.next_request(q, rq);
402
403 return NULL;
404}
405
406struct request *elv_former_request(struct request_queue *q, struct request *rq)
407{
408 struct elevator_queue *e = q->elevator;
409
410 if (e->type->ops.former_request)
411 return e->type->ops.former_request(q, rq);
412
413 return NULL;
414}
415
416#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
417
418static ssize_t
419elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
420{
421 struct elv_fs_entry *entry = to_elv(attr);
422 struct elevator_queue *e;
423 ssize_t error;
424
425 if (!entry->show)
426 return -EIO;
427
428 e = container_of(kobj, struct elevator_queue, kobj);
429 mutex_lock(&e->sysfs_lock);
430 error = e->type ? entry->show(e, page) : -ENOENT;
431 mutex_unlock(&e->sysfs_lock);
432 return error;
433}
434
435static ssize_t
436elv_attr_store(struct kobject *kobj, struct attribute *attr,
437 const char *page, size_t length)
438{
439 struct elv_fs_entry *entry = to_elv(attr);
440 struct elevator_queue *e;
441 ssize_t error;
442
443 if (!entry->store)
444 return -EIO;
445
446 e = container_of(kobj, struct elevator_queue, kobj);
447 mutex_lock(&e->sysfs_lock);
448 error = e->type ? entry->store(e, page, length) : -ENOENT;
449 mutex_unlock(&e->sysfs_lock);
450 return error;
451}
452
453static const struct sysfs_ops elv_sysfs_ops = {
454 .show = elv_attr_show,
455 .store = elv_attr_store,
456};
457
458static struct kobj_type elv_ktype = {
459 .sysfs_ops = &elv_sysfs_ops,
460 .release = elevator_release,
461};
462
463int elv_register_queue(struct request_queue *q, bool uevent)
464{
465 struct elevator_queue *e = q->elevator;
466 int error;
467
468 lockdep_assert_held(&q->sysfs_lock);
469
470 error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
471 if (!error) {
472 struct elv_fs_entry *attr = e->type->elevator_attrs;
473 if (attr) {
474 while (attr->attr.name) {
475 if (sysfs_create_file(&e->kobj, &attr->attr))
476 break;
477 attr++;
478 }
479 }
480 if (uevent)
481 kobject_uevent(&e->kobj, KOBJ_ADD);
482
483 set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
484 }
485 return error;
486}
487
488void elv_unregister_queue(struct request_queue *q)
489{
490 struct elevator_queue *e = q->elevator;
491
492 lockdep_assert_held(&q->sysfs_lock);
493
494 if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
495 kobject_uevent(&e->kobj, KOBJ_REMOVE);
496 kobject_del(&e->kobj);
497 }
498}
499
500int elv_register(struct elevator_type *e)
501{
502 /* insert_requests and dispatch_request are mandatory */
503 if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
504 return -EINVAL;
505
506 /* create icq_cache if requested */
507 if (e->icq_size) {
508 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
509 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
510 return -EINVAL;
511
512 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
513 "%s_io_cq", e->elevator_name);
514 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
515 e->icq_align, 0, NULL);
516 if (!e->icq_cache)
517 return -ENOMEM;
518 }
519
520 /* register, don't allow duplicate names */
521 spin_lock(&elv_list_lock);
522 if (__elevator_find(e->elevator_name)) {
523 spin_unlock(&elv_list_lock);
524 kmem_cache_destroy(e->icq_cache);
525 return -EBUSY;
526 }
527 list_add_tail(&e->list, &elv_list);
528 spin_unlock(&elv_list_lock);
529
530 printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
531
532 return 0;
533}
534EXPORT_SYMBOL_GPL(elv_register);
535
536void elv_unregister(struct elevator_type *e)
537{
538 /* unregister */
539 spin_lock(&elv_list_lock);
540 list_del_init(&e->list);
541 spin_unlock(&elv_list_lock);
542
543 /*
544 * Destroy icq_cache if it exists. icq's are RCU managed. Make
545 * sure all RCU operations are complete before proceeding.
546 */
547 if (e->icq_cache) {
548 rcu_barrier();
549 kmem_cache_destroy(e->icq_cache);
550 e->icq_cache = NULL;
551 }
552}
553EXPORT_SYMBOL_GPL(elv_unregister);
554
555static inline bool elv_support_iosched(struct request_queue *q)
556{
557 if (!queue_is_mq(q) ||
558 (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
559 return false;
560 return true;
561}
562
563/*
564 * For single queue devices, default to using mq-deadline. If we have multiple
565 * queues or mq-deadline is not available, default to "none".
566 */
567static struct elevator_type *elevator_get_default(struct request_queue *q)
568{
569 if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
570 return NULL;
571
572 if (q->nr_hw_queues != 1 &&
573 !blk_mq_is_shared_tags(q->tag_set->flags))
574 return NULL;
575
576 return elevator_find_get(q, "mq-deadline");
577}
578
579/*
580 * Get the first elevator providing the features required by the request queue.
581 * Default to "none" if no matching elevator is found.
582 */
583static struct elevator_type *elevator_get_by_features(struct request_queue *q)
584{
585 struct elevator_type *e, *found = NULL;
586
587 spin_lock(&elv_list_lock);
588
589 list_for_each_entry(e, &elv_list, list) {
590 if (elv_support_features(q, e)) {
591 found = e;
592 break;
593 }
594 }
595
596 if (found && !elevator_tryget(found))
597 found = NULL;
598
599 spin_unlock(&elv_list_lock);
600 return found;
601}
602
603/*
604 * For a device queue that has no required features, use the default elevator
605 * settings. Otherwise, use the first elevator available matching the required
606 * features. If no suitable elevator is find or if the chosen elevator
607 * initialization fails, fall back to the "none" elevator (no elevator).
608 */
609void elevator_init_mq(struct request_queue *q)
610{
611 struct elevator_type *e;
612 int err;
613
614 if (!elv_support_iosched(q))
615 return;
616
617 WARN_ON_ONCE(blk_queue_registered(q));
618
619 if (unlikely(q->elevator))
620 return;
621
622 if (!q->required_elevator_features)
623 e = elevator_get_default(q);
624 else
625 e = elevator_get_by_features(q);
626 if (!e)
627 return;
628
629 /*
630 * We are called before adding disk, when there isn't any FS I/O,
631 * so freezing queue plus canceling dispatch work is enough to
632 * drain any dispatch activities originated from passthrough
633 * requests, then no need to quiesce queue which may add long boot
634 * latency, especially when lots of disks are involved.
635 */
636 blk_mq_freeze_queue(q);
637 blk_mq_cancel_work_sync(q);
638
639 err = blk_mq_init_sched(q, e);
640
641 blk_mq_unfreeze_queue(q);
642
643 if (err) {
644 pr_warn("\"%s\" elevator initialization failed, "
645 "falling back to \"none\"\n", e->elevator_name);
646 }
647
648 elevator_put(e);
649}
650
651/*
652 * Switch to new_e io scheduler.
653 *
654 * If switching fails, we are most likely running out of memory and not able
655 * to restore the old io scheduler, so leaving the io scheduler being none.
656 */
657int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
658{
659 int ret;
660
661 lockdep_assert_held(&q->sysfs_lock);
662
663 blk_mq_freeze_queue(q);
664 blk_mq_quiesce_queue(q);
665
666 if (q->elevator) {
667 elv_unregister_queue(q);
668 elevator_exit(q);
669 }
670
671 ret = blk_mq_init_sched(q, new_e);
672 if (ret)
673 goto out_unfreeze;
674
675 ret = elv_register_queue(q, true);
676 if (ret) {
677 elevator_exit(q);
678 goto out_unfreeze;
679 }
680 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
681
682out_unfreeze:
683 blk_mq_unquiesce_queue(q);
684 blk_mq_unfreeze_queue(q);
685
686 if (ret) {
687 pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
688 new_e->elevator_name);
689 }
690
691 return ret;
692}
693
694void elevator_disable(struct request_queue *q)
695{
696 lockdep_assert_held(&q->sysfs_lock);
697
698 blk_mq_freeze_queue(q);
699 blk_mq_quiesce_queue(q);
700
701 elv_unregister_queue(q);
702 elevator_exit(q);
703 blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
704 q->elevator = NULL;
705 q->nr_requests = q->tag_set->queue_depth;
706 blk_add_trace_msg(q, "elv switch: none");
707
708 blk_mq_unquiesce_queue(q);
709 blk_mq_unfreeze_queue(q);
710}
711
712/*
713 * Switch this queue to the given IO scheduler.
714 */
715static int elevator_change(struct request_queue *q, const char *elevator_name)
716{
717 struct elevator_type *e;
718 int ret;
719
720 /* Make sure queue is not in the middle of being removed */
721 if (!blk_queue_registered(q))
722 return -ENOENT;
723
724 if (!strncmp(elevator_name, "none", 4)) {
725 if (q->elevator)
726 elevator_disable(q);
727 return 0;
728 }
729
730 if (q->elevator && elevator_match(q->elevator->type, elevator_name))
731 return 0;
732
733 e = elevator_find_get(q, elevator_name);
734 if (!e) {
735 request_module("%s-iosched", elevator_name);
736 e = elevator_find_get(q, elevator_name);
737 if (!e)
738 return -EINVAL;
739 }
740 ret = elevator_switch(q, e);
741 elevator_put(e);
742 return ret;
743}
744
745ssize_t elv_iosched_store(struct request_queue *q, const char *buf,
746 size_t count)
747{
748 char elevator_name[ELV_NAME_MAX];
749 int ret;
750
751 if (!elv_support_iosched(q))
752 return count;
753
754 strlcpy(elevator_name, buf, sizeof(elevator_name));
755 ret = elevator_change(q, strstrip(elevator_name));
756 if (!ret)
757 return count;
758 return ret;
759}
760
761ssize_t elv_iosched_show(struct request_queue *q, char *name)
762{
763 struct elevator_queue *eq = q->elevator;
764 struct elevator_type *cur = NULL, *e;
765 int len = 0;
766
767 if (!elv_support_iosched(q))
768 return sprintf(name, "none\n");
769
770 if (!q->elevator) {
771 len += sprintf(name+len, "[none] ");
772 } else {
773 len += sprintf(name+len, "none ");
774 cur = eq->type;
775 }
776
777 spin_lock(&elv_list_lock);
778 list_for_each_entry(e, &elv_list, list) {
779 if (e == cur)
780 len += sprintf(name+len, "[%s] ", e->elevator_name);
781 else if (elv_support_features(q, e))
782 len += sprintf(name+len, "%s ", e->elevator_name);
783 }
784 spin_unlock(&elv_list_lock);
785
786 len += sprintf(name+len, "\n");
787 return len;
788}
789
790struct request *elv_rb_former_request(struct request_queue *q,
791 struct request *rq)
792{
793 struct rb_node *rbprev = rb_prev(&rq->rb_node);
794
795 if (rbprev)
796 return rb_entry_rq(rbprev);
797
798 return NULL;
799}
800EXPORT_SYMBOL(elv_rb_former_request);
801
802struct request *elv_rb_latter_request(struct request_queue *q,
803 struct request *rq)
804{
805 struct rb_node *rbnext = rb_next(&rq->rb_node);
806
807 if (rbnext)
808 return rb_entry_rq(rbnext);
809
810 return NULL;
811}
812EXPORT_SYMBOL(elv_rb_latter_request);
813
814static int __init elevator_setup(char *str)
815{
816 pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
817 "Please use sysfs to set IO scheduler for individual devices.\n");
818 return 1;
819}
820
821__setup("elevator=", elevator_setup);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Block device elevator/IO-scheduler.
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 *
7 * 30042000 Jens Axboe <axboe@kernel.dk> :
8 *
9 * Split the elevator a bit so that it is possible to choose a different
10 * one or even write a new "plug in". There are three pieces:
11 * - elevator_fn, inserts a new request in the queue list
12 * - elevator_merge_fn, decides whether a new buffer can be merged with
13 * an existing request
14 * - elevator_dequeue_fn, called when a request is taken off the active list
15 *
16 * 20082000 Dave Jones <davej@suse.de> :
17 * Removed tests for max-bomb-segments, which was breaking elvtune
18 * when run without -bN
19 *
20 * Jens:
21 * - Rework again to work with bio instead of buffer_heads
22 * - loose bi_dev comparisons, partition handling is right now
23 * - completely modularize elevator setup and teardown
24 *
25 */
26#include <linux/kernel.h>
27#include <linux/fs.h>
28#include <linux/blkdev.h>
29#include <linux/bio.h>
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
34#include <linux/blktrace_api.h>
35#include <linux/hash.h>
36#include <linux/uaccess.h>
37#include <linux/pm_runtime.h>
38
39#include <trace/events/block.h>
40
41#include "elevator.h"
42#include "blk.h"
43#include "blk-mq-sched.h"
44#include "blk-pm.h"
45#include "blk-wbt.h"
46#include "blk-cgroup.h"
47
48static DEFINE_SPINLOCK(elv_list_lock);
49static LIST_HEAD(elv_list);
50
51/*
52 * Merge hash stuff.
53 */
54#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
55
56/*
57 * Query io scheduler to see if the current process issuing bio may be
58 * merged with rq.
59 */
60static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
61{
62 struct request_queue *q = rq->q;
63 struct elevator_queue *e = q->elevator;
64
65 if (e->type->ops.allow_merge)
66 return e->type->ops.allow_merge(q, rq, bio);
67
68 return true;
69}
70
71/*
72 * can we safely merge with this request?
73 */
74bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
75{
76 if (!blk_rq_merge_ok(rq, bio))
77 return false;
78
79 if (!elv_iosched_allow_bio_merge(rq, bio))
80 return false;
81
82 return true;
83}
84EXPORT_SYMBOL(elv_bio_merge_ok);
85
86/**
87 * elevator_match - Check whether @e's name or alias matches @name
88 * @e: Scheduler to test
89 * @name: Elevator name to test
90 *
91 * Return true if the elevator @e's name or alias matches @name.
92 */
93static bool elevator_match(const struct elevator_type *e, const char *name)
94{
95 return !strcmp(e->elevator_name, name) ||
96 (e->elevator_alias && !strcmp(e->elevator_alias, name));
97}
98
99static struct elevator_type *__elevator_find(const char *name)
100{
101 struct elevator_type *e;
102
103 list_for_each_entry(e, &elv_list, list)
104 if (elevator_match(e, name))
105 return e;
106 return NULL;
107}
108
109static struct elevator_type *elevator_find_get(const char *name)
110{
111 struct elevator_type *e;
112
113 spin_lock(&elv_list_lock);
114 e = __elevator_find(name);
115 if (e && (!elevator_tryget(e)))
116 e = NULL;
117 spin_unlock(&elv_list_lock);
118 return e;
119}
120
121static const struct kobj_type elv_ktype;
122
123struct elevator_queue *elevator_alloc(struct request_queue *q,
124 struct elevator_type *e)
125{
126 struct elevator_queue *eq;
127
128 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
129 if (unlikely(!eq))
130 return NULL;
131
132 __elevator_get(e);
133 eq->type = e;
134 kobject_init(&eq->kobj, &elv_ktype);
135 mutex_init(&eq->sysfs_lock);
136 hash_init(eq->hash);
137
138 return eq;
139}
140EXPORT_SYMBOL(elevator_alloc);
141
142static void elevator_release(struct kobject *kobj)
143{
144 struct elevator_queue *e;
145
146 e = container_of(kobj, struct elevator_queue, kobj);
147 elevator_put(e->type);
148 kfree(e);
149}
150
151void elevator_exit(struct request_queue *q)
152{
153 struct elevator_queue *e = q->elevator;
154
155 ioc_clear_queue(q);
156 blk_mq_sched_free_rqs(q);
157
158 mutex_lock(&e->sysfs_lock);
159 blk_mq_exit_sched(q, e);
160 mutex_unlock(&e->sysfs_lock);
161
162 kobject_put(&e->kobj);
163}
164
165static inline void __elv_rqhash_del(struct request *rq)
166{
167 hash_del(&rq->hash);
168 rq->rq_flags &= ~RQF_HASHED;
169}
170
171void elv_rqhash_del(struct request_queue *q, struct request *rq)
172{
173 if (ELV_ON_HASH(rq))
174 __elv_rqhash_del(rq);
175}
176EXPORT_SYMBOL_GPL(elv_rqhash_del);
177
178void elv_rqhash_add(struct request_queue *q, struct request *rq)
179{
180 struct elevator_queue *e = q->elevator;
181
182 BUG_ON(ELV_ON_HASH(rq));
183 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
184 rq->rq_flags |= RQF_HASHED;
185}
186EXPORT_SYMBOL_GPL(elv_rqhash_add);
187
188void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
189{
190 __elv_rqhash_del(rq);
191 elv_rqhash_add(q, rq);
192}
193
194struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
195{
196 struct elevator_queue *e = q->elevator;
197 struct hlist_node *next;
198 struct request *rq;
199
200 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
201 BUG_ON(!ELV_ON_HASH(rq));
202
203 if (unlikely(!rq_mergeable(rq))) {
204 __elv_rqhash_del(rq);
205 continue;
206 }
207
208 if (rq_hash_key(rq) == offset)
209 return rq;
210 }
211
212 return NULL;
213}
214
215/*
216 * RB-tree support functions for inserting/lookup/removal of requests
217 * in a sorted RB tree.
218 */
219void elv_rb_add(struct rb_root *root, struct request *rq)
220{
221 struct rb_node **p = &root->rb_node;
222 struct rb_node *parent = NULL;
223 struct request *__rq;
224
225 while (*p) {
226 parent = *p;
227 __rq = rb_entry(parent, struct request, rb_node);
228
229 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
230 p = &(*p)->rb_left;
231 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
232 p = &(*p)->rb_right;
233 }
234
235 rb_link_node(&rq->rb_node, parent, p);
236 rb_insert_color(&rq->rb_node, root);
237}
238EXPORT_SYMBOL(elv_rb_add);
239
240void elv_rb_del(struct rb_root *root, struct request *rq)
241{
242 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
243 rb_erase(&rq->rb_node, root);
244 RB_CLEAR_NODE(&rq->rb_node);
245}
246EXPORT_SYMBOL(elv_rb_del);
247
248struct request *elv_rb_find(struct rb_root *root, sector_t sector)
249{
250 struct rb_node *n = root->rb_node;
251 struct request *rq;
252
253 while (n) {
254 rq = rb_entry(n, struct request, rb_node);
255
256 if (sector < blk_rq_pos(rq))
257 n = n->rb_left;
258 else if (sector > blk_rq_pos(rq))
259 n = n->rb_right;
260 else
261 return rq;
262 }
263
264 return NULL;
265}
266EXPORT_SYMBOL(elv_rb_find);
267
268enum elv_merge elv_merge(struct request_queue *q, struct request **req,
269 struct bio *bio)
270{
271 struct elevator_queue *e = q->elevator;
272 struct request *__rq;
273
274 /*
275 * Levels of merges:
276 * nomerges: No merges at all attempted
277 * noxmerges: Only simple one-hit cache try
278 * merges: All merge tries attempted
279 */
280 if (blk_queue_nomerges(q) || !bio_mergeable(bio))
281 return ELEVATOR_NO_MERGE;
282
283 /*
284 * First try one-hit cache.
285 */
286 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
287 enum elv_merge ret = blk_try_merge(q->last_merge, bio);
288
289 if (ret != ELEVATOR_NO_MERGE) {
290 *req = q->last_merge;
291 return ret;
292 }
293 }
294
295 if (blk_queue_noxmerges(q))
296 return ELEVATOR_NO_MERGE;
297
298 /*
299 * See if our hash lookup can find a potential backmerge.
300 */
301 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
302 if (__rq && elv_bio_merge_ok(__rq, bio)) {
303 *req = __rq;
304
305 if (blk_discard_mergable(__rq))
306 return ELEVATOR_DISCARD_MERGE;
307 return ELEVATOR_BACK_MERGE;
308 }
309
310 if (e->type->ops.request_merge)
311 return e->type->ops.request_merge(q, req, bio);
312
313 return ELEVATOR_NO_MERGE;
314}
315
316/*
317 * Attempt to do an insertion back merge. Only check for the case where
318 * we can append 'rq' to an existing request, so we can throw 'rq' away
319 * afterwards.
320 *
321 * Returns true if we merged, false otherwise. 'free' will contain all
322 * requests that need to be freed.
323 */
324bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
325 struct list_head *free)
326{
327 struct request *__rq;
328 bool ret;
329
330 if (blk_queue_nomerges(q))
331 return false;
332
333 /*
334 * First try one-hit cache.
335 */
336 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
337 list_add(&rq->queuelist, free);
338 return true;
339 }
340
341 if (blk_queue_noxmerges(q))
342 return false;
343
344 ret = false;
345 /*
346 * See if our hash lookup can find a potential backmerge.
347 */
348 while (1) {
349 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
350 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
351 break;
352
353 list_add(&rq->queuelist, free);
354 /* The merged request could be merged with others, try again */
355 ret = true;
356 rq = __rq;
357 }
358
359 return ret;
360}
361
362void elv_merged_request(struct request_queue *q, struct request *rq,
363 enum elv_merge type)
364{
365 struct elevator_queue *e = q->elevator;
366
367 if (e->type->ops.request_merged)
368 e->type->ops.request_merged(q, rq, type);
369
370 if (type == ELEVATOR_BACK_MERGE)
371 elv_rqhash_reposition(q, rq);
372
373 q->last_merge = rq;
374}
375
376void elv_merge_requests(struct request_queue *q, struct request *rq,
377 struct request *next)
378{
379 struct elevator_queue *e = q->elevator;
380
381 if (e->type->ops.requests_merged)
382 e->type->ops.requests_merged(q, rq, next);
383
384 elv_rqhash_reposition(q, rq);
385 q->last_merge = rq;
386}
387
388struct request *elv_latter_request(struct request_queue *q, struct request *rq)
389{
390 struct elevator_queue *e = q->elevator;
391
392 if (e->type->ops.next_request)
393 return e->type->ops.next_request(q, rq);
394
395 return NULL;
396}
397
398struct request *elv_former_request(struct request_queue *q, struct request *rq)
399{
400 struct elevator_queue *e = q->elevator;
401
402 if (e->type->ops.former_request)
403 return e->type->ops.former_request(q, rq);
404
405 return NULL;
406}
407
408#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
409
410static ssize_t
411elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
412{
413 struct elv_fs_entry *entry = to_elv(attr);
414 struct elevator_queue *e;
415 ssize_t error;
416
417 if (!entry->show)
418 return -EIO;
419
420 e = container_of(kobj, struct elevator_queue, kobj);
421 mutex_lock(&e->sysfs_lock);
422 error = e->type ? entry->show(e, page) : -ENOENT;
423 mutex_unlock(&e->sysfs_lock);
424 return error;
425}
426
427static ssize_t
428elv_attr_store(struct kobject *kobj, struct attribute *attr,
429 const char *page, size_t length)
430{
431 struct elv_fs_entry *entry = to_elv(attr);
432 struct elevator_queue *e;
433 ssize_t error;
434
435 if (!entry->store)
436 return -EIO;
437
438 e = container_of(kobj, struct elevator_queue, kobj);
439 mutex_lock(&e->sysfs_lock);
440 error = e->type ? entry->store(e, page, length) : -ENOENT;
441 mutex_unlock(&e->sysfs_lock);
442 return error;
443}
444
445static const struct sysfs_ops elv_sysfs_ops = {
446 .show = elv_attr_show,
447 .store = elv_attr_store,
448};
449
450static const struct kobj_type elv_ktype = {
451 .sysfs_ops = &elv_sysfs_ops,
452 .release = elevator_release,
453};
454
455int elv_register_queue(struct request_queue *q, bool uevent)
456{
457 struct elevator_queue *e = q->elevator;
458 int error;
459
460 lockdep_assert_held(&q->sysfs_lock);
461
462 error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
463 if (!error) {
464 struct elv_fs_entry *attr = e->type->elevator_attrs;
465 if (attr) {
466 while (attr->attr.name) {
467 if (sysfs_create_file(&e->kobj, &attr->attr))
468 break;
469 attr++;
470 }
471 }
472 if (uevent)
473 kobject_uevent(&e->kobj, KOBJ_ADD);
474
475 set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
476 }
477 return error;
478}
479
480void elv_unregister_queue(struct request_queue *q)
481{
482 struct elevator_queue *e = q->elevator;
483
484 lockdep_assert_held(&q->sysfs_lock);
485
486 if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
487 kobject_uevent(&e->kobj, KOBJ_REMOVE);
488 kobject_del(&e->kobj);
489 }
490}
491
492int elv_register(struct elevator_type *e)
493{
494 /* finish request is mandatory */
495 if (WARN_ON_ONCE(!e->ops.finish_request))
496 return -EINVAL;
497 /* insert_requests and dispatch_request are mandatory */
498 if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
499 return -EINVAL;
500
501 /* create icq_cache if requested */
502 if (e->icq_size) {
503 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
504 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
505 return -EINVAL;
506
507 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
508 "%s_io_cq", e->elevator_name);
509 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
510 e->icq_align, 0, NULL);
511 if (!e->icq_cache)
512 return -ENOMEM;
513 }
514
515 /* register, don't allow duplicate names */
516 spin_lock(&elv_list_lock);
517 if (__elevator_find(e->elevator_name)) {
518 spin_unlock(&elv_list_lock);
519 kmem_cache_destroy(e->icq_cache);
520 return -EBUSY;
521 }
522 list_add_tail(&e->list, &elv_list);
523 spin_unlock(&elv_list_lock);
524
525 printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
526
527 return 0;
528}
529EXPORT_SYMBOL_GPL(elv_register);
530
531void elv_unregister(struct elevator_type *e)
532{
533 /* unregister */
534 spin_lock(&elv_list_lock);
535 list_del_init(&e->list);
536 spin_unlock(&elv_list_lock);
537
538 /*
539 * Destroy icq_cache if it exists. icq's are RCU managed. Make
540 * sure all RCU operations are complete before proceeding.
541 */
542 if (e->icq_cache) {
543 rcu_barrier();
544 kmem_cache_destroy(e->icq_cache);
545 e->icq_cache = NULL;
546 }
547}
548EXPORT_SYMBOL_GPL(elv_unregister);
549
550static inline bool elv_support_iosched(struct request_queue *q)
551{
552 if (!queue_is_mq(q) ||
553 (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
554 return false;
555 return true;
556}
557
558/*
559 * For single queue devices, default to using mq-deadline. If we have multiple
560 * queues or mq-deadline is not available, default to "none".
561 */
562static struct elevator_type *elevator_get_default(struct request_queue *q)
563{
564 if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
565 return NULL;
566
567 if (q->nr_hw_queues != 1 &&
568 !blk_mq_is_shared_tags(q->tag_set->flags))
569 return NULL;
570
571 return elevator_find_get("mq-deadline");
572}
573
574/*
575 * Use the default elevator settings. If the chosen elevator initialization
576 * fails, fall back to the "none" elevator (no elevator).
577 */
578void elevator_init_mq(struct request_queue *q)
579{
580 struct elevator_type *e;
581 int err;
582
583 if (!elv_support_iosched(q))
584 return;
585
586 WARN_ON_ONCE(blk_queue_registered(q));
587
588 if (unlikely(q->elevator))
589 return;
590
591 e = elevator_get_default(q);
592 if (!e)
593 return;
594
595 /*
596 * We are called before adding disk, when there isn't any FS I/O,
597 * so freezing queue plus canceling dispatch work is enough to
598 * drain any dispatch activities originated from passthrough
599 * requests, then no need to quiesce queue which may add long boot
600 * latency, especially when lots of disks are involved.
601 *
602 * Disk isn't added yet, so verifying queue lock only manually.
603 */
604 blk_freeze_queue_start_non_owner(q);
605 blk_freeze_acquire_lock(q, true, false);
606 blk_mq_freeze_queue_wait(q);
607
608 blk_mq_cancel_work_sync(q);
609
610 err = blk_mq_init_sched(q, e);
611
612 blk_unfreeze_release_lock(q, true, false);
613 blk_mq_unfreeze_queue_non_owner(q);
614
615 if (err) {
616 pr_warn("\"%s\" elevator initialization failed, "
617 "falling back to \"none\"\n", e->elevator_name);
618 }
619
620 elevator_put(e);
621}
622
623/*
624 * Switch to new_e io scheduler.
625 *
626 * If switching fails, we are most likely running out of memory and not able
627 * to restore the old io scheduler, so leaving the io scheduler being none.
628 */
629int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
630{
631 int ret;
632
633 lockdep_assert_held(&q->sysfs_lock);
634
635 blk_mq_freeze_queue(q);
636 blk_mq_quiesce_queue(q);
637
638 if (q->elevator) {
639 elv_unregister_queue(q);
640 elevator_exit(q);
641 }
642
643 ret = blk_mq_init_sched(q, new_e);
644 if (ret)
645 goto out_unfreeze;
646
647 ret = elv_register_queue(q, true);
648 if (ret) {
649 elevator_exit(q);
650 goto out_unfreeze;
651 }
652 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
653
654out_unfreeze:
655 blk_mq_unquiesce_queue(q);
656 blk_mq_unfreeze_queue(q);
657
658 if (ret) {
659 pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
660 new_e->elevator_name);
661 }
662
663 return ret;
664}
665
666void elevator_disable(struct request_queue *q)
667{
668 lockdep_assert_held(&q->sysfs_lock);
669
670 blk_mq_freeze_queue(q);
671 blk_mq_quiesce_queue(q);
672
673 elv_unregister_queue(q);
674 elevator_exit(q);
675 blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
676 q->elevator = NULL;
677 q->nr_requests = q->tag_set->queue_depth;
678 blk_add_trace_msg(q, "elv switch: none");
679
680 blk_mq_unquiesce_queue(q);
681 blk_mq_unfreeze_queue(q);
682}
683
684/*
685 * Switch this queue to the given IO scheduler.
686 */
687static int elevator_change(struct request_queue *q, const char *elevator_name)
688{
689 struct elevator_type *e;
690 int ret;
691
692 /* Make sure queue is not in the middle of being removed */
693 if (!blk_queue_registered(q))
694 return -ENOENT;
695
696 if (!strncmp(elevator_name, "none", 4)) {
697 if (q->elevator)
698 elevator_disable(q);
699 return 0;
700 }
701
702 if (q->elevator && elevator_match(q->elevator->type, elevator_name))
703 return 0;
704
705 e = elevator_find_get(elevator_name);
706 if (!e)
707 return -EINVAL;
708 ret = elevator_switch(q, e);
709 elevator_put(e);
710 return ret;
711}
712
713void elv_iosched_load_module(struct gendisk *disk, const char *buf,
714 size_t count)
715{
716 char elevator_name[ELV_NAME_MAX];
717 struct elevator_type *found;
718 const char *name;
719
720 if (!elv_support_iosched(disk->queue))
721 return;
722
723 strscpy(elevator_name, buf, sizeof(elevator_name));
724 name = strstrip(elevator_name);
725
726 spin_lock(&elv_list_lock);
727 found = __elevator_find(name);
728 spin_unlock(&elv_list_lock);
729
730 if (!found)
731 request_module("%s-iosched", name);
732}
733
734ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
735 size_t count)
736{
737 char elevator_name[ELV_NAME_MAX];
738 int ret;
739
740 if (!elv_support_iosched(disk->queue))
741 return count;
742
743 strscpy(elevator_name, buf, sizeof(elevator_name));
744 ret = elevator_change(disk->queue, strstrip(elevator_name));
745 if (!ret)
746 return count;
747 return ret;
748}
749
750ssize_t elv_iosched_show(struct gendisk *disk, char *name)
751{
752 struct request_queue *q = disk->queue;
753 struct elevator_queue *eq = q->elevator;
754 struct elevator_type *cur = NULL, *e;
755 int len = 0;
756
757 if (!elv_support_iosched(q))
758 return sprintf(name, "none\n");
759
760 if (!q->elevator) {
761 len += sprintf(name+len, "[none] ");
762 } else {
763 len += sprintf(name+len, "none ");
764 cur = eq->type;
765 }
766
767 spin_lock(&elv_list_lock);
768 list_for_each_entry(e, &elv_list, list) {
769 if (e == cur)
770 len += sprintf(name+len, "[%s] ", e->elevator_name);
771 else
772 len += sprintf(name+len, "%s ", e->elevator_name);
773 }
774 spin_unlock(&elv_list_lock);
775
776 len += sprintf(name+len, "\n");
777 return len;
778}
779
780struct request *elv_rb_former_request(struct request_queue *q,
781 struct request *rq)
782{
783 struct rb_node *rbprev = rb_prev(&rq->rb_node);
784
785 if (rbprev)
786 return rb_entry_rq(rbprev);
787
788 return NULL;
789}
790EXPORT_SYMBOL(elv_rb_former_request);
791
792struct request *elv_rb_latter_request(struct request_queue *q,
793 struct request *rq)
794{
795 struct rb_node *rbnext = rb_next(&rq->rb_node);
796
797 if (rbnext)
798 return rb_entry_rq(rbnext);
799
800 return NULL;
801}
802EXPORT_SYMBOL(elv_rb_latter_request);
803
804static int __init elevator_setup(char *str)
805{
806 pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
807 "Please use sysfs to set IO scheduler for individual devices.\n");
808 return 1;
809}
810
811__setup("elevator=", elevator_setup);