Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Block device elevator/IO-scheduler.
  4 *
  5 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  6 *
  7 * 30042000 Jens Axboe <axboe@kernel.dk> :
  8 *
  9 * Split the elevator a bit so that it is possible to choose a different
 10 * one or even write a new "plug in". There are three pieces:
 11 * - elevator_fn, inserts a new request in the queue list
 12 * - elevator_merge_fn, decides whether a new buffer can be merged with
 13 *   an existing request
 14 * - elevator_dequeue_fn, called when a request is taken off the active list
 15 *
 16 * 20082000 Dave Jones <davej@suse.de> :
 17 * Removed tests for max-bomb-segments, which was breaking elvtune
 18 *  when run without -bN
 19 *
 20 * Jens:
 21 * - Rework again to work with bio instead of buffer_heads
 22 * - loose bi_dev comparisons, partition handling is right now
 23 * - completely modularize elevator setup and teardown
 24 *
 25 */
 26#include <linux/kernel.h>
 27#include <linux/fs.h>
 28#include <linux/blkdev.h>
 
 29#include <linux/bio.h>
 30#include <linux/module.h>
 31#include <linux/slab.h>
 32#include <linux/init.h>
 33#include <linux/compiler.h>
 34#include <linux/blktrace_api.h>
 35#include <linux/hash.h>
 36#include <linux/uaccess.h>
 37#include <linux/pm_runtime.h>
 
 38
 39#include <trace/events/block.h>
 40
 41#include "elevator.h"
 42#include "blk.h"
 43#include "blk-mq-sched.h"
 44#include "blk-pm.h"
 45#include "blk-wbt.h"
 46#include "blk-cgroup.h"
 47
 48static DEFINE_SPINLOCK(elv_list_lock);
 49static LIST_HEAD(elv_list);
 50
 51/*
 52 * Merge hash stuff.
 53 */
 54#define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
 55
 56/*
 57 * Query io scheduler to see if the current process issuing bio may be
 58 * merged with rq.
 59 */
 60static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
 61{
 62	struct request_queue *q = rq->q;
 63	struct elevator_queue *e = q->elevator;
 64
 65	if (e->type->ops.allow_merge)
 66		return e->type->ops.allow_merge(q, rq, bio);
 67
 68	return true;
 69}
 70
 71/*
 72 * can we safely merge with this request?
 73 */
 74bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
 75{
 76	if (!blk_rq_merge_ok(rq, bio))
 77		return false;
 78
 79	if (!elv_iosched_allow_bio_merge(rq, bio))
 80		return false;
 81
 82	return true;
 83}
 84EXPORT_SYMBOL(elv_bio_merge_ok);
 85
 86static inline bool elv_support_features(struct request_queue *q,
 87		const struct elevator_type *e)
 88{
 89	return (q->required_elevator_features & e->elevator_features) ==
 90		q->required_elevator_features;
 91}
 92
 93/**
 94 * elevator_match - Check whether @e's name or alias matches @name
 95 * @e: Scheduler to test
 96 * @name: Elevator name to test
 
 97 *
 98 * Return true if the elevator @e's name or alias matches @name.
 
 99 */
100static bool elevator_match(const struct elevator_type *e, const char *name)
 
101{
102	return !strcmp(e->elevator_name, name) ||
103		(e->elevator_alias && !strcmp(e->elevator_alias, name));
 
 
 
 
 
 
104}
105
106static struct elevator_type *__elevator_find(const char *name)
 
 
 
 
 
 
 
 
 
107{
108	struct elevator_type *e;
109
110	list_for_each_entry(e, &elv_list, list)
111		if (elevator_match(e, name))
112			return e;
 
 
113	return NULL;
114}
115
116static struct elevator_type *elevator_find_get(struct request_queue *q,
117		const char *name)
 
 
 
 
 
118{
119	struct elevator_type *e;
120
121	spin_lock(&elv_list_lock);
122	e = __elevator_find(name);
123	if (e && (!elv_support_features(q, e) || !elevator_tryget(e)))
 
 
 
 
 
 
 
 
124		e = NULL;
 
125	spin_unlock(&elv_list_lock);
126	return e;
127}
128
129static struct kobj_type elv_ktype;
130
131struct elevator_queue *elevator_alloc(struct request_queue *q,
132				  struct elevator_type *e)
133{
134	struct elevator_queue *eq;
135
136	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
137	if (unlikely(!eq))
138		return NULL;
139
140	__elevator_get(e);
141	eq->type = e;
142	kobject_init(&eq->kobj, &elv_ktype);
143	mutex_init(&eq->sysfs_lock);
144	hash_init(eq->hash);
145
146	return eq;
147}
148EXPORT_SYMBOL(elevator_alloc);
149
150static void elevator_release(struct kobject *kobj)
151{
152	struct elevator_queue *e;
153
154	e = container_of(kobj, struct elevator_queue, kobj);
155	elevator_put(e->type);
156	kfree(e);
157}
158
159void elevator_exit(struct request_queue *q)
160{
161	struct elevator_queue *e = q->elevator;
162
163	ioc_clear_queue(q);
164	blk_mq_sched_free_rqs(q);
165
166	mutex_lock(&e->sysfs_lock);
167	blk_mq_exit_sched(q, e);
168	mutex_unlock(&e->sysfs_lock);
169
170	kobject_put(&e->kobj);
171}
172
173static inline void __elv_rqhash_del(struct request *rq)
174{
175	hash_del(&rq->hash);
176	rq->rq_flags &= ~RQF_HASHED;
177}
178
179void elv_rqhash_del(struct request_queue *q, struct request *rq)
180{
181	if (ELV_ON_HASH(rq))
182		__elv_rqhash_del(rq);
183}
184EXPORT_SYMBOL_GPL(elv_rqhash_del);
185
186void elv_rqhash_add(struct request_queue *q, struct request *rq)
187{
188	struct elevator_queue *e = q->elevator;
189
190	BUG_ON(ELV_ON_HASH(rq));
191	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
192	rq->rq_flags |= RQF_HASHED;
193}
194EXPORT_SYMBOL_GPL(elv_rqhash_add);
195
196void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
197{
198	__elv_rqhash_del(rq);
199	elv_rqhash_add(q, rq);
200}
201
202struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
203{
204	struct elevator_queue *e = q->elevator;
205	struct hlist_node *next;
206	struct request *rq;
207
208	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
209		BUG_ON(!ELV_ON_HASH(rq));
210
211		if (unlikely(!rq_mergeable(rq))) {
212			__elv_rqhash_del(rq);
213			continue;
214		}
215
216		if (rq_hash_key(rq) == offset)
217			return rq;
218	}
219
220	return NULL;
221}
222
223/*
224 * RB-tree support functions for inserting/lookup/removal of requests
225 * in a sorted RB tree.
226 */
227void elv_rb_add(struct rb_root *root, struct request *rq)
228{
229	struct rb_node **p = &root->rb_node;
230	struct rb_node *parent = NULL;
231	struct request *__rq;
232
233	while (*p) {
234		parent = *p;
235		__rq = rb_entry(parent, struct request, rb_node);
236
237		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
238			p = &(*p)->rb_left;
239		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
240			p = &(*p)->rb_right;
241	}
242
243	rb_link_node(&rq->rb_node, parent, p);
244	rb_insert_color(&rq->rb_node, root);
245}
246EXPORT_SYMBOL(elv_rb_add);
247
248void elv_rb_del(struct rb_root *root, struct request *rq)
249{
250	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
251	rb_erase(&rq->rb_node, root);
252	RB_CLEAR_NODE(&rq->rb_node);
253}
254EXPORT_SYMBOL(elv_rb_del);
255
256struct request *elv_rb_find(struct rb_root *root, sector_t sector)
257{
258	struct rb_node *n = root->rb_node;
259	struct request *rq;
260
261	while (n) {
262		rq = rb_entry(n, struct request, rb_node);
263
264		if (sector < blk_rq_pos(rq))
265			n = n->rb_left;
266		else if (sector > blk_rq_pos(rq))
267			n = n->rb_right;
268		else
269			return rq;
270	}
271
272	return NULL;
273}
274EXPORT_SYMBOL(elv_rb_find);
275
276enum elv_merge elv_merge(struct request_queue *q, struct request **req,
277		struct bio *bio)
278{
279	struct elevator_queue *e = q->elevator;
280	struct request *__rq;
281
282	/*
283	 * Levels of merges:
284	 * 	nomerges:  No merges at all attempted
285	 * 	noxmerges: Only simple one-hit cache try
286	 * 	merges:	   All merge tries attempted
287	 */
288	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
289		return ELEVATOR_NO_MERGE;
290
291	/*
292	 * First try one-hit cache.
293	 */
294	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
295		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
296
297		if (ret != ELEVATOR_NO_MERGE) {
298			*req = q->last_merge;
299			return ret;
300		}
301	}
302
303	if (blk_queue_noxmerges(q))
304		return ELEVATOR_NO_MERGE;
305
306	/*
307	 * See if our hash lookup can find a potential backmerge.
308	 */
309	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
310	if (__rq && elv_bio_merge_ok(__rq, bio)) {
311		*req = __rq;
312
313		if (blk_discard_mergable(__rq))
314			return ELEVATOR_DISCARD_MERGE;
315		return ELEVATOR_BACK_MERGE;
316	}
317
318	if (e->type->ops.request_merge)
319		return e->type->ops.request_merge(q, req, bio);
320
321	return ELEVATOR_NO_MERGE;
322}
323
324/*
325 * Attempt to do an insertion back merge. Only check for the case where
326 * we can append 'rq' to an existing request, so we can throw 'rq' away
327 * afterwards.
328 *
329 * Returns true if we merged, false otherwise. 'free' will contain all
330 * requests that need to be freed.
331 */
332bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
333			      struct list_head *free)
334{
335	struct request *__rq;
336	bool ret;
337
338	if (blk_queue_nomerges(q))
339		return false;
340
341	/*
342	 * First try one-hit cache.
343	 */
344	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
345		list_add(&rq->queuelist, free);
346		return true;
347	}
348
349	if (blk_queue_noxmerges(q))
350		return false;
351
352	ret = false;
353	/*
354	 * See if our hash lookup can find a potential backmerge.
355	 */
356	while (1) {
357		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
358		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
359			break;
360
361		list_add(&rq->queuelist, free);
362		/* The merged request could be merged with others, try again */
363		ret = true;
364		rq = __rq;
365	}
366
367	return ret;
368}
369
370void elv_merged_request(struct request_queue *q, struct request *rq,
371		enum elv_merge type)
372{
373	struct elevator_queue *e = q->elevator;
374
375	if (e->type->ops.request_merged)
376		e->type->ops.request_merged(q, rq, type);
377
378	if (type == ELEVATOR_BACK_MERGE)
379		elv_rqhash_reposition(q, rq);
380
381	q->last_merge = rq;
382}
383
384void elv_merge_requests(struct request_queue *q, struct request *rq,
385			     struct request *next)
386{
387	struct elevator_queue *e = q->elevator;
388
389	if (e->type->ops.requests_merged)
390		e->type->ops.requests_merged(q, rq, next);
391
392	elv_rqhash_reposition(q, rq);
393	q->last_merge = rq;
394}
395
396struct request *elv_latter_request(struct request_queue *q, struct request *rq)
397{
398	struct elevator_queue *e = q->elevator;
399
400	if (e->type->ops.next_request)
401		return e->type->ops.next_request(q, rq);
402
403	return NULL;
404}
405
406struct request *elv_former_request(struct request_queue *q, struct request *rq)
407{
408	struct elevator_queue *e = q->elevator;
409
410	if (e->type->ops.former_request)
411		return e->type->ops.former_request(q, rq);
412
413	return NULL;
414}
415
416#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
417
418static ssize_t
419elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
420{
421	struct elv_fs_entry *entry = to_elv(attr);
422	struct elevator_queue *e;
423	ssize_t error;
424
425	if (!entry->show)
426		return -EIO;
427
428	e = container_of(kobj, struct elevator_queue, kobj);
429	mutex_lock(&e->sysfs_lock);
430	error = e->type ? entry->show(e, page) : -ENOENT;
431	mutex_unlock(&e->sysfs_lock);
432	return error;
433}
434
435static ssize_t
436elv_attr_store(struct kobject *kobj, struct attribute *attr,
437	       const char *page, size_t length)
438{
439	struct elv_fs_entry *entry = to_elv(attr);
440	struct elevator_queue *e;
441	ssize_t error;
442
443	if (!entry->store)
444		return -EIO;
445
446	e = container_of(kobj, struct elevator_queue, kobj);
447	mutex_lock(&e->sysfs_lock);
448	error = e->type ? entry->store(e, page, length) : -ENOENT;
449	mutex_unlock(&e->sysfs_lock);
450	return error;
451}
452
453static const struct sysfs_ops elv_sysfs_ops = {
454	.show	= elv_attr_show,
455	.store	= elv_attr_store,
456};
457
458static struct kobj_type elv_ktype = {
459	.sysfs_ops	= &elv_sysfs_ops,
460	.release	= elevator_release,
461};
462
463int elv_register_queue(struct request_queue *q, bool uevent)
464{
465	struct elevator_queue *e = q->elevator;
466	int error;
467
468	lockdep_assert_held(&q->sysfs_lock);
469
470	error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
471	if (!error) {
472		struct elv_fs_entry *attr = e->type->elevator_attrs;
473		if (attr) {
474			while (attr->attr.name) {
475				if (sysfs_create_file(&e->kobj, &attr->attr))
476					break;
477				attr++;
478			}
479		}
480		if (uevent)
481			kobject_uevent(&e->kobj, KOBJ_ADD);
482
483		set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
484	}
485	return error;
486}
487
488void elv_unregister_queue(struct request_queue *q)
489{
490	struct elevator_queue *e = q->elevator;
491
492	lockdep_assert_held(&q->sysfs_lock);
493
494	if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
 
 
495		kobject_uevent(&e->kobj, KOBJ_REMOVE);
496		kobject_del(&e->kobj);
 
 
 
 
497	}
498}
499
500int elv_register(struct elevator_type *e)
501{
502	/* insert_requests and dispatch_request are mandatory */
503	if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
504		return -EINVAL;
505
506	/* create icq_cache if requested */
507	if (e->icq_size) {
508		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
509		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
510			return -EINVAL;
511
512		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
513			 "%s_io_cq", e->elevator_name);
514		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
515						 e->icq_align, 0, NULL);
516		if (!e->icq_cache)
517			return -ENOMEM;
518	}
519
520	/* register, don't allow duplicate names */
521	spin_lock(&elv_list_lock);
522	if (__elevator_find(e->elevator_name)) {
523		spin_unlock(&elv_list_lock);
524		kmem_cache_destroy(e->icq_cache);
525		return -EBUSY;
526	}
527	list_add_tail(&e->list, &elv_list);
528	spin_unlock(&elv_list_lock);
529
530	printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
531
532	return 0;
533}
534EXPORT_SYMBOL_GPL(elv_register);
535
536void elv_unregister(struct elevator_type *e)
537{
538	/* unregister */
539	spin_lock(&elv_list_lock);
540	list_del_init(&e->list);
541	spin_unlock(&elv_list_lock);
542
543	/*
544	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
545	 * sure all RCU operations are complete before proceeding.
546	 */
547	if (e->icq_cache) {
548		rcu_barrier();
549		kmem_cache_destroy(e->icq_cache);
550		e->icq_cache = NULL;
551	}
552}
553EXPORT_SYMBOL_GPL(elv_unregister);
554
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555static inline bool elv_support_iosched(struct request_queue *q)
556{
557	if (!queue_is_mq(q) ||
558	    (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
559		return false;
560	return true;
561}
562
563/*
564 * For single queue devices, default to using mq-deadline. If we have multiple
565 * queues or mq-deadline is not available, default to "none".
566 */
567static struct elevator_type *elevator_get_default(struct request_queue *q)
568{
569	if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
570		return NULL;
571
572	if (q->nr_hw_queues != 1 &&
573	    !blk_mq_is_shared_tags(q->tag_set->flags))
574		return NULL;
575
576	return elevator_find_get(q, "mq-deadline");
577}
578
579/*
580 * Get the first elevator providing the features required by the request queue.
581 * Default to "none" if no matching elevator is found.
582 */
583static struct elevator_type *elevator_get_by_features(struct request_queue *q)
584{
585	struct elevator_type *e, *found = NULL;
586
587	spin_lock(&elv_list_lock);
588
589	list_for_each_entry(e, &elv_list, list) {
590		if (elv_support_features(q, e)) {
 
591			found = e;
592			break;
593		}
594	}
595
596	if (found && !elevator_tryget(found))
597		found = NULL;
598
599	spin_unlock(&elv_list_lock);
600	return found;
601}
602
603/*
604 * For a device queue that has no required features, use the default elevator
605 * settings. Otherwise, use the first elevator available matching the required
606 * features. If no suitable elevator is find or if the chosen elevator
607 * initialization fails, fall back to the "none" elevator (no elevator).
608 */
609void elevator_init_mq(struct request_queue *q)
610{
611	struct elevator_type *e;
612	int err;
613
614	if (!elv_support_iosched(q))
615		return;
616
617	WARN_ON_ONCE(blk_queue_registered(q));
618
619	if (unlikely(q->elevator))
620		return;
621
622	if (!q->required_elevator_features)
623		e = elevator_get_default(q);
624	else
625		e = elevator_get_by_features(q);
626	if (!e)
627		return;
628
629	/*
630	 * We are called before adding disk, when there isn't any FS I/O,
631	 * so freezing queue plus canceling dispatch work is enough to
632	 * drain any dispatch activities originated from passthrough
633	 * requests, then no need to quiesce queue which may add long boot
634	 * latency, especially when lots of disks are involved.
635	 */
636	blk_mq_freeze_queue(q);
637	blk_mq_cancel_work_sync(q);
638
639	err = blk_mq_init_sched(q, e);
640
 
641	blk_mq_unfreeze_queue(q);
642
643	if (err) {
644		pr_warn("\"%s\" elevator initialization failed, "
645			"falling back to \"none\"\n", e->elevator_name);
 
646	}
647
648	elevator_put(e);
649}
 
650
651/*
652 * Switch to new_e io scheduler.
653 *
654 * If switching fails, we are most likely running out of memory and not able
655 * to restore the old io scheduler, so leaving the io scheduler being none.
656 */
657int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
658{
659	int ret;
660
661	lockdep_assert_held(&q->sysfs_lock);
662
663	blk_mq_freeze_queue(q);
664	blk_mq_quiesce_queue(q);
665
666	if (q->elevator) {
667		elv_unregister_queue(q);
668		elevator_exit(q);
669	}
670
671	ret = blk_mq_init_sched(q, new_e);
672	if (ret)
673		goto out_unfreeze;
674
675	ret = elv_register_queue(q, true);
676	if (ret) {
677		elevator_exit(q);
678		goto out_unfreeze;
679	}
680	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
681
682out_unfreeze:
683	blk_mq_unquiesce_queue(q);
684	blk_mq_unfreeze_queue(q);
685
686	if (ret) {
687		pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
688			new_e->elevator_name);
689	}
690
691	return ret;
692}
693
694void elevator_disable(struct request_queue *q)
695{
696	lockdep_assert_held(&q->sysfs_lock);
697
698	blk_mq_freeze_queue(q);
699	blk_mq_quiesce_queue(q);
700
701	elv_unregister_queue(q);
702	elevator_exit(q);
703	blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
704	q->elevator = NULL;
705	q->nr_requests = q->tag_set->queue_depth;
706	blk_add_trace_msg(q, "elv switch: none");
707
708	blk_mq_unquiesce_queue(q);
709	blk_mq_unfreeze_queue(q);
710}
711
712/*
713 * Switch this queue to the given IO scheduler.
714 */
715static int elevator_change(struct request_queue *q, const char *elevator_name)
716{
 
717	struct elevator_type *e;
718	int ret;
719
720	/* Make sure queue is not in the middle of being removed */
721	if (!blk_queue_registered(q))
722		return -ENOENT;
723
724	if (!strncmp(elevator_name, "none", 4)) {
725		if (q->elevator)
726			elevator_disable(q);
727		return 0;
 
 
 
728	}
729
730	if (q->elevator && elevator_match(q->elevator->type, elevator_name))
731		return 0;
 
 
732
733	e = elevator_find_get(q, elevator_name);
734	if (!e) {
735		request_module("%s-iosched", elevator_name);
736		e = elevator_find_get(q, elevator_name);
737		if (!e)
738			return -EINVAL;
739	}
740	ret = elevator_switch(q, e);
741	elevator_put(e);
742	return ret;
743}
744
745ssize_t elv_iosched_store(struct request_queue *q, const char *buf,
746			  size_t count)
747{
748	char elevator_name[ELV_NAME_MAX];
749	int ret;
750
751	if (!elv_support_iosched(q))
752		return count;
753
754	strlcpy(elevator_name, buf, sizeof(elevator_name));
755	ret = elevator_change(q, strstrip(elevator_name));
756	if (!ret)
757		return count;
 
758	return ret;
759}
760
761ssize_t elv_iosched_show(struct request_queue *q, char *name)
762{
763	struct elevator_queue *eq = q->elevator;
764	struct elevator_type *cur = NULL, *e;
 
765	int len = 0;
766
767	if (!elv_support_iosched(q))
768		return sprintf(name, "none\n");
769
770	if (!q->elevator) {
771		len += sprintf(name+len, "[none] ");
772	} else {
773		len += sprintf(name+len, "none ");
774		cur = eq->type;
775	}
776
777	spin_lock(&elv_list_lock);
778	list_for_each_entry(e, &elv_list, list) {
779		if (e == cur)
780			len += sprintf(name+len, "[%s] ", e->elevator_name);
781		else if (elv_support_features(q, e))
782			len += sprintf(name+len, "%s ", e->elevator_name);
 
 
 
 
783	}
784	spin_unlock(&elv_list_lock);
785
786	len += sprintf(name+len, "\n");
 
 
 
787	return len;
788}
789
790struct request *elv_rb_former_request(struct request_queue *q,
791				      struct request *rq)
792{
793	struct rb_node *rbprev = rb_prev(&rq->rb_node);
794
795	if (rbprev)
796		return rb_entry_rq(rbprev);
797
798	return NULL;
799}
800EXPORT_SYMBOL(elv_rb_former_request);
801
802struct request *elv_rb_latter_request(struct request_queue *q,
803				      struct request *rq)
804{
805	struct rb_node *rbnext = rb_next(&rq->rb_node);
806
807	if (rbnext)
808		return rb_entry_rq(rbnext);
809
810	return NULL;
811}
812EXPORT_SYMBOL(elv_rb_latter_request);
813
814static int __init elevator_setup(char *str)
815{
816	pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
817		"Please use sysfs to set IO scheduler for individual devices.\n");
818	return 1;
819}
820
821__setup("elevator=", elevator_setup);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Block device elevator/IO-scheduler.
  4 *
  5 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  6 *
  7 * 30042000 Jens Axboe <axboe@kernel.dk> :
  8 *
  9 * Split the elevator a bit so that it is possible to choose a different
 10 * one or even write a new "plug in". There are three pieces:
 11 * - elevator_fn, inserts a new request in the queue list
 12 * - elevator_merge_fn, decides whether a new buffer can be merged with
 13 *   an existing request
 14 * - elevator_dequeue_fn, called when a request is taken off the active list
 15 *
 16 * 20082000 Dave Jones <davej@suse.de> :
 17 * Removed tests for max-bomb-segments, which was breaking elvtune
 18 *  when run without -bN
 19 *
 20 * Jens:
 21 * - Rework again to work with bio instead of buffer_heads
 22 * - loose bi_dev comparisons, partition handling is right now
 23 * - completely modularize elevator setup and teardown
 24 *
 25 */
 26#include <linux/kernel.h>
 27#include <linux/fs.h>
 28#include <linux/blkdev.h>
 29#include <linux/elevator.h>
 30#include <linux/bio.h>
 31#include <linux/module.h>
 32#include <linux/slab.h>
 33#include <linux/init.h>
 34#include <linux/compiler.h>
 35#include <linux/blktrace_api.h>
 36#include <linux/hash.h>
 37#include <linux/uaccess.h>
 38#include <linux/pm_runtime.h>
 39#include <linux/blk-cgroup.h>
 40
 41#include <trace/events/block.h>
 42
 
 43#include "blk.h"
 44#include "blk-mq-sched.h"
 45#include "blk-pm.h"
 46#include "blk-wbt.h"
 
 47
 48static DEFINE_SPINLOCK(elv_list_lock);
 49static LIST_HEAD(elv_list);
 50
 51/*
 52 * Merge hash stuff.
 53 */
 54#define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
 55
 56/*
 57 * Query io scheduler to see if the current process issuing bio may be
 58 * merged with rq.
 59 */
 60static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
 61{
 62	struct request_queue *q = rq->q;
 63	struct elevator_queue *e = q->elevator;
 64
 65	if (e->type->ops.allow_merge)
 66		return e->type->ops.allow_merge(q, rq, bio);
 67
 68	return 1;
 69}
 70
 71/*
 72 * can we safely merge with this request?
 73 */
 74bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
 75{
 76	if (!blk_rq_merge_ok(rq, bio))
 77		return false;
 78
 79	if (!elv_iosched_allow_bio_merge(rq, bio))
 80		return false;
 81
 82	return true;
 83}
 84EXPORT_SYMBOL(elv_bio_merge_ok);
 85
 86static inline bool elv_support_features(unsigned int elv_features,
 87					unsigned int required_features)
 88{
 89	return (required_features & elv_features) == required_features;
 
 90}
 91
 92/**
 93 * elevator_match - Test an elevator name and features
 94 * @e: Scheduler to test
 95 * @name: Elevator name to test
 96 * @required_features: Features that the elevator must provide
 97 *
 98 * Return true if the elevator @e name matches @name and if @e provides all
 99 * the features specified by @required_features.
100 */
101static bool elevator_match(const struct elevator_type *e, const char *name,
102			   unsigned int required_features)
103{
104	if (!elv_support_features(e->elevator_features, required_features))
105		return false;
106	if (!strcmp(e->elevator_name, name))
107		return true;
108	if (e->elevator_alias && !strcmp(e->elevator_alias, name))
109		return true;
110
111	return false;
112}
113
114/**
115 * elevator_find - Find an elevator
116 * @name: Name of the elevator to find
117 * @required_features: Features that the elevator must provide
118 *
119 * Return the first registered scheduler with name @name and supporting the
120 * features @required_features and NULL otherwise.
121 */
122static struct elevator_type *elevator_find(const char *name,
123					   unsigned int required_features)
124{
125	struct elevator_type *e;
126
127	list_for_each_entry(e, &elv_list, list) {
128		if (elevator_match(e, name, required_features))
129			return e;
130	}
131
132	return NULL;
133}
134
135static void elevator_put(struct elevator_type *e)
136{
137	module_put(e->elevator_owner);
138}
139
140static struct elevator_type *elevator_get(struct request_queue *q,
141					  const char *name, bool try_loading)
142{
143	struct elevator_type *e;
144
145	spin_lock(&elv_list_lock);
146
147	e = elevator_find(name, q->required_elevator_features);
148	if (!e && try_loading) {
149		spin_unlock(&elv_list_lock);
150		request_module("%s-iosched", name);
151		spin_lock(&elv_list_lock);
152		e = elevator_find(name, q->required_elevator_features);
153	}
154
155	if (e && !try_module_get(e->elevator_owner))
156		e = NULL;
157
158	spin_unlock(&elv_list_lock);
159	return e;
160}
161
162static struct kobj_type elv_ktype;
163
164struct elevator_queue *elevator_alloc(struct request_queue *q,
165				  struct elevator_type *e)
166{
167	struct elevator_queue *eq;
168
169	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
170	if (unlikely(!eq))
171		return NULL;
172
 
173	eq->type = e;
174	kobject_init(&eq->kobj, &elv_ktype);
175	mutex_init(&eq->sysfs_lock);
176	hash_init(eq->hash);
177
178	return eq;
179}
180EXPORT_SYMBOL(elevator_alloc);
181
182static void elevator_release(struct kobject *kobj)
183{
184	struct elevator_queue *e;
185
186	e = container_of(kobj, struct elevator_queue, kobj);
187	elevator_put(e->type);
188	kfree(e);
189}
190
191void __elevator_exit(struct request_queue *q, struct elevator_queue *e)
192{
 
 
 
 
 
193	mutex_lock(&e->sysfs_lock);
194	blk_mq_exit_sched(q, e);
195	mutex_unlock(&e->sysfs_lock);
196
197	kobject_put(&e->kobj);
198}
199
200static inline void __elv_rqhash_del(struct request *rq)
201{
202	hash_del(&rq->hash);
203	rq->rq_flags &= ~RQF_HASHED;
204}
205
206void elv_rqhash_del(struct request_queue *q, struct request *rq)
207{
208	if (ELV_ON_HASH(rq))
209		__elv_rqhash_del(rq);
210}
211EXPORT_SYMBOL_GPL(elv_rqhash_del);
212
213void elv_rqhash_add(struct request_queue *q, struct request *rq)
214{
215	struct elevator_queue *e = q->elevator;
216
217	BUG_ON(ELV_ON_HASH(rq));
218	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
219	rq->rq_flags |= RQF_HASHED;
220}
221EXPORT_SYMBOL_GPL(elv_rqhash_add);
222
223void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
224{
225	__elv_rqhash_del(rq);
226	elv_rqhash_add(q, rq);
227}
228
229struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
230{
231	struct elevator_queue *e = q->elevator;
232	struct hlist_node *next;
233	struct request *rq;
234
235	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
236		BUG_ON(!ELV_ON_HASH(rq));
237
238		if (unlikely(!rq_mergeable(rq))) {
239			__elv_rqhash_del(rq);
240			continue;
241		}
242
243		if (rq_hash_key(rq) == offset)
244			return rq;
245	}
246
247	return NULL;
248}
249
250/*
251 * RB-tree support functions for inserting/lookup/removal of requests
252 * in a sorted RB tree.
253 */
254void elv_rb_add(struct rb_root *root, struct request *rq)
255{
256	struct rb_node **p = &root->rb_node;
257	struct rb_node *parent = NULL;
258	struct request *__rq;
259
260	while (*p) {
261		parent = *p;
262		__rq = rb_entry(parent, struct request, rb_node);
263
264		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
265			p = &(*p)->rb_left;
266		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
267			p = &(*p)->rb_right;
268	}
269
270	rb_link_node(&rq->rb_node, parent, p);
271	rb_insert_color(&rq->rb_node, root);
272}
273EXPORT_SYMBOL(elv_rb_add);
274
275void elv_rb_del(struct rb_root *root, struct request *rq)
276{
277	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
278	rb_erase(&rq->rb_node, root);
279	RB_CLEAR_NODE(&rq->rb_node);
280}
281EXPORT_SYMBOL(elv_rb_del);
282
283struct request *elv_rb_find(struct rb_root *root, sector_t sector)
284{
285	struct rb_node *n = root->rb_node;
286	struct request *rq;
287
288	while (n) {
289		rq = rb_entry(n, struct request, rb_node);
290
291		if (sector < blk_rq_pos(rq))
292			n = n->rb_left;
293		else if (sector > blk_rq_pos(rq))
294			n = n->rb_right;
295		else
296			return rq;
297	}
298
299	return NULL;
300}
301EXPORT_SYMBOL(elv_rb_find);
302
303enum elv_merge elv_merge(struct request_queue *q, struct request **req,
304		struct bio *bio)
305{
306	struct elevator_queue *e = q->elevator;
307	struct request *__rq;
308
309	/*
310	 * Levels of merges:
311	 * 	nomerges:  No merges at all attempted
312	 * 	noxmerges: Only simple one-hit cache try
313	 * 	merges:	   All merge tries attempted
314	 */
315	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
316		return ELEVATOR_NO_MERGE;
317
318	/*
319	 * First try one-hit cache.
320	 */
321	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
322		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
323
324		if (ret != ELEVATOR_NO_MERGE) {
325			*req = q->last_merge;
326			return ret;
327		}
328	}
329
330	if (blk_queue_noxmerges(q))
331		return ELEVATOR_NO_MERGE;
332
333	/*
334	 * See if our hash lookup can find a potential backmerge.
335	 */
336	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
337	if (__rq && elv_bio_merge_ok(__rq, bio)) {
338		*req = __rq;
339
340		if (blk_discard_mergable(__rq))
341			return ELEVATOR_DISCARD_MERGE;
342		return ELEVATOR_BACK_MERGE;
343	}
344
345	if (e->type->ops.request_merge)
346		return e->type->ops.request_merge(q, req, bio);
347
348	return ELEVATOR_NO_MERGE;
349}
350
351/*
352 * Attempt to do an insertion back merge. Only check for the case where
353 * we can append 'rq' to an existing request, so we can throw 'rq' away
354 * afterwards.
355 *
356 * Returns true if we merged, false otherwise. 'free' will contain all
357 * requests that need to be freed.
358 */
359bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
360			      struct list_head *free)
361{
362	struct request *__rq;
363	bool ret;
364
365	if (blk_queue_nomerges(q))
366		return false;
367
368	/*
369	 * First try one-hit cache.
370	 */
371	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
372		list_add(&rq->queuelist, free);
373		return true;
374	}
375
376	if (blk_queue_noxmerges(q))
377		return false;
378
379	ret = false;
380	/*
381	 * See if our hash lookup can find a potential backmerge.
382	 */
383	while (1) {
384		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
385		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
386			break;
387
388		list_add(&rq->queuelist, free);
389		/* The merged request could be merged with others, try again */
390		ret = true;
391		rq = __rq;
392	}
393
394	return ret;
395}
396
397void elv_merged_request(struct request_queue *q, struct request *rq,
398		enum elv_merge type)
399{
400	struct elevator_queue *e = q->elevator;
401
402	if (e->type->ops.request_merged)
403		e->type->ops.request_merged(q, rq, type);
404
405	if (type == ELEVATOR_BACK_MERGE)
406		elv_rqhash_reposition(q, rq);
407
408	q->last_merge = rq;
409}
410
411void elv_merge_requests(struct request_queue *q, struct request *rq,
412			     struct request *next)
413{
414	struct elevator_queue *e = q->elevator;
415
416	if (e->type->ops.requests_merged)
417		e->type->ops.requests_merged(q, rq, next);
418
419	elv_rqhash_reposition(q, rq);
420	q->last_merge = rq;
421}
422
423struct request *elv_latter_request(struct request_queue *q, struct request *rq)
424{
425	struct elevator_queue *e = q->elevator;
426
427	if (e->type->ops.next_request)
428		return e->type->ops.next_request(q, rq);
429
430	return NULL;
431}
432
433struct request *elv_former_request(struct request_queue *q, struct request *rq)
434{
435	struct elevator_queue *e = q->elevator;
436
437	if (e->type->ops.former_request)
438		return e->type->ops.former_request(q, rq);
439
440	return NULL;
441}
442
443#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
444
445static ssize_t
446elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
447{
448	struct elv_fs_entry *entry = to_elv(attr);
449	struct elevator_queue *e;
450	ssize_t error;
451
452	if (!entry->show)
453		return -EIO;
454
455	e = container_of(kobj, struct elevator_queue, kobj);
456	mutex_lock(&e->sysfs_lock);
457	error = e->type ? entry->show(e, page) : -ENOENT;
458	mutex_unlock(&e->sysfs_lock);
459	return error;
460}
461
462static ssize_t
463elv_attr_store(struct kobject *kobj, struct attribute *attr,
464	       const char *page, size_t length)
465{
466	struct elv_fs_entry *entry = to_elv(attr);
467	struct elevator_queue *e;
468	ssize_t error;
469
470	if (!entry->store)
471		return -EIO;
472
473	e = container_of(kobj, struct elevator_queue, kobj);
474	mutex_lock(&e->sysfs_lock);
475	error = e->type ? entry->store(e, page, length) : -ENOENT;
476	mutex_unlock(&e->sysfs_lock);
477	return error;
478}
479
480static const struct sysfs_ops elv_sysfs_ops = {
481	.show	= elv_attr_show,
482	.store	= elv_attr_store,
483};
484
485static struct kobj_type elv_ktype = {
486	.sysfs_ops	= &elv_sysfs_ops,
487	.release	= elevator_release,
488};
489
490int elv_register_queue(struct request_queue *q, bool uevent)
491{
492	struct elevator_queue *e = q->elevator;
493	int error;
494
495	lockdep_assert_held(&q->sysfs_lock);
496
497	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
498	if (!error) {
499		struct elv_fs_entry *attr = e->type->elevator_attrs;
500		if (attr) {
501			while (attr->attr.name) {
502				if (sysfs_create_file(&e->kobj, &attr->attr))
503					break;
504				attr++;
505			}
506		}
507		if (uevent)
508			kobject_uevent(&e->kobj, KOBJ_ADD);
509
510		e->registered = 1;
511	}
512	return error;
513}
514
515void elv_unregister_queue(struct request_queue *q)
516{
 
 
517	lockdep_assert_held(&q->sysfs_lock);
518
519	if (q) {
520		struct elevator_queue *e = q->elevator;
521
522		kobject_uevent(&e->kobj, KOBJ_REMOVE);
523		kobject_del(&e->kobj);
524
525		e->registered = 0;
526		/* Re-enable throttling in case elevator disabled it */
527		wbt_enable_default(q);
528	}
529}
530
531int elv_register(struct elevator_type *e)
532{
533	/* insert_requests and dispatch_request are mandatory */
534	if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
535		return -EINVAL;
536
537	/* create icq_cache if requested */
538	if (e->icq_size) {
539		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
540		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
541			return -EINVAL;
542
543		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
544			 "%s_io_cq", e->elevator_name);
545		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
546						 e->icq_align, 0, NULL);
547		if (!e->icq_cache)
548			return -ENOMEM;
549	}
550
551	/* register, don't allow duplicate names */
552	spin_lock(&elv_list_lock);
553	if (elevator_find(e->elevator_name, 0)) {
554		spin_unlock(&elv_list_lock);
555		kmem_cache_destroy(e->icq_cache);
556		return -EBUSY;
557	}
558	list_add_tail(&e->list, &elv_list);
559	spin_unlock(&elv_list_lock);
560
561	printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
562
563	return 0;
564}
565EXPORT_SYMBOL_GPL(elv_register);
566
567void elv_unregister(struct elevator_type *e)
568{
569	/* unregister */
570	spin_lock(&elv_list_lock);
571	list_del_init(&e->list);
572	spin_unlock(&elv_list_lock);
573
574	/*
575	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
576	 * sure all RCU operations are complete before proceeding.
577	 */
578	if (e->icq_cache) {
579		rcu_barrier();
580		kmem_cache_destroy(e->icq_cache);
581		e->icq_cache = NULL;
582	}
583}
584EXPORT_SYMBOL_GPL(elv_unregister);
585
586int elevator_switch_mq(struct request_queue *q,
587			      struct elevator_type *new_e)
588{
589	int ret;
590
591	lockdep_assert_held(&q->sysfs_lock);
592
593	if (q->elevator) {
594		if (q->elevator->registered)
595			elv_unregister_queue(q);
596
597		ioc_clear_queue(q);
598		elevator_exit(q, q->elevator);
599	}
600
601	ret = blk_mq_init_sched(q, new_e);
602	if (ret)
603		goto out;
604
605	if (new_e) {
606		ret = elv_register_queue(q, true);
607		if (ret) {
608			elevator_exit(q, q->elevator);
609			goto out;
610		}
611	}
612
613	if (new_e)
614		blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
615	else
616		blk_add_trace_msg(q, "elv switch: none");
617
618out:
619	return ret;
620}
621
622static inline bool elv_support_iosched(struct request_queue *q)
623{
624	if (!queue_is_mq(q) ||
625	    (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
626		return false;
627	return true;
628}
629
630/*
631 * For single queue devices, default to using mq-deadline. If we have multiple
632 * queues or mq-deadline is not available, default to "none".
633 */
634static struct elevator_type *elevator_get_default(struct request_queue *q)
635{
 
 
 
636	if (q->nr_hw_queues != 1 &&
637			!blk_mq_is_sbitmap_shared(q->tag_set->flags))
638		return NULL;
639
640	return elevator_get(q, "mq-deadline", false);
641}
642
643/*
644 * Get the first elevator providing the features required by the request queue.
645 * Default to "none" if no matching elevator is found.
646 */
647static struct elevator_type *elevator_get_by_features(struct request_queue *q)
648{
649	struct elevator_type *e, *found = NULL;
650
651	spin_lock(&elv_list_lock);
652
653	list_for_each_entry(e, &elv_list, list) {
654		if (elv_support_features(e->elevator_features,
655					 q->required_elevator_features)) {
656			found = e;
657			break;
658		}
659	}
660
661	if (found && !try_module_get(found->elevator_owner))
662		found = NULL;
663
664	spin_unlock(&elv_list_lock);
665	return found;
666}
667
668/*
669 * For a device queue that has no required features, use the default elevator
670 * settings. Otherwise, use the first elevator available matching the required
671 * features. If no suitable elevator is find or if the chosen elevator
672 * initialization fails, fall back to the "none" elevator (no elevator).
673 */
674void elevator_init_mq(struct request_queue *q)
675{
676	struct elevator_type *e;
677	int err;
678
679	if (!elv_support_iosched(q))
680		return;
681
682	WARN_ON_ONCE(blk_queue_registered(q));
683
684	if (unlikely(q->elevator))
685		return;
686
687	if (!q->required_elevator_features)
688		e = elevator_get_default(q);
689	else
690		e = elevator_get_by_features(q);
691	if (!e)
692		return;
693
 
 
 
 
 
 
 
694	blk_mq_freeze_queue(q);
695	blk_mq_quiesce_queue(q);
696
697	err = blk_mq_init_sched(q, e);
698
699	blk_mq_unquiesce_queue(q);
700	blk_mq_unfreeze_queue(q);
701
702	if (err) {
703		pr_warn("\"%s\" elevator initialization failed, "
704			"falling back to \"none\"\n", e->elevator_name);
705		elevator_put(e);
706	}
 
 
707}
708EXPORT_SYMBOL_GPL(elevator_init_mq); /* only for dm-rq */
709
710/*
711 * switch to new_e io scheduler. be careful not to introduce deadlocks -
712 * we don't free the old io scheduler, before we have allocated what we
713 * need for the new one. this way we have a chance of going back to the old
714 * one, if the new one fails init for some reason.
715 */
716static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
717{
718	int err;
719
720	lockdep_assert_held(&q->sysfs_lock);
721
722	blk_mq_freeze_queue(q);
723	blk_mq_quiesce_queue(q);
724
725	err = elevator_switch_mq(q, new_e);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
726
 
727	blk_mq_unquiesce_queue(q);
728	blk_mq_unfreeze_queue(q);
729
730	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
731}
732
733/*
734 * Switch this queue to the given IO scheduler.
735 */
736static int __elevator_change(struct request_queue *q, const char *name)
737{
738	char elevator_name[ELV_NAME_MAX];
739	struct elevator_type *e;
 
740
741	/* Make sure queue is not in the middle of being removed */
742	if (!blk_queue_registered(q))
743		return -ENOENT;
744
745	/*
746	 * Special case for mq, turn off scheduling
747	 */
748	if (!strncmp(name, "none", 4)) {
749		if (!q->elevator)
750			return 0;
751		return elevator_switch(q, NULL);
752	}
753
754	strlcpy(elevator_name, name, sizeof(elevator_name));
755	e = elevator_get(q, strstrip(elevator_name), true);
756	if (!e)
757		return -EINVAL;
758
759	if (q->elevator &&
760	    elevator_match(q->elevator->type, elevator_name, 0)) {
761		elevator_put(e);
762		return 0;
 
 
763	}
764
765	return elevator_switch(q, e);
 
766}
767
768ssize_t elv_iosched_store(struct request_queue *q, const char *name,
769			  size_t count)
770{
 
771	int ret;
772
773	if (!elv_support_iosched(q))
774		return count;
775
776	ret = __elevator_change(q, name);
 
777	if (!ret)
778		return count;
779
780	return ret;
781}
782
783ssize_t elv_iosched_show(struct request_queue *q, char *name)
784{
785	struct elevator_queue *e = q->elevator;
786	struct elevator_type *elv = NULL;
787	struct elevator_type *__e;
788	int len = 0;
789
790	if (!queue_is_mq(q))
791		return sprintf(name, "none\n");
792
793	if (!q->elevator)
794		len += sprintf(name+len, "[none] ");
795	else
796		elv = e->type;
 
 
797
798	spin_lock(&elv_list_lock);
799	list_for_each_entry(__e, &elv_list, list) {
800		if (elv && elevator_match(elv, __e->elevator_name, 0)) {
801			len += sprintf(name+len, "[%s] ", elv->elevator_name);
802			continue;
803		}
804		if (elv_support_iosched(q) &&
805		    elevator_match(__e, __e->elevator_name,
806				   q->required_elevator_features))
807			len += sprintf(name+len, "%s ", __e->elevator_name);
808	}
809	spin_unlock(&elv_list_lock);
810
811	if (q->elevator)
812		len += sprintf(name+len, "none");
813
814	len += sprintf(len+name, "\n");
815	return len;
816}
817
818struct request *elv_rb_former_request(struct request_queue *q,
819				      struct request *rq)
820{
821	struct rb_node *rbprev = rb_prev(&rq->rb_node);
822
823	if (rbprev)
824		return rb_entry_rq(rbprev);
825
826	return NULL;
827}
828EXPORT_SYMBOL(elv_rb_former_request);
829
830struct request *elv_rb_latter_request(struct request_queue *q,
831				      struct request *rq)
832{
833	struct rb_node *rbnext = rb_next(&rq->rb_node);
834
835	if (rbnext)
836		return rb_entry_rq(rbnext);
837
838	return NULL;
839}
840EXPORT_SYMBOL(elv_rb_latter_request);
841
842static int __init elevator_setup(char *str)
843{
844	pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
845		"Please use sysfs to set IO scheduler for individual devices.\n");
846	return 1;
847}
848
849__setup("elevator=", elevator_setup);