Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Block device elevator/IO-scheduler.
  4 *
  5 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  6 *
  7 * 30042000 Jens Axboe <axboe@kernel.dk> :
  8 *
  9 * Split the elevator a bit so that it is possible to choose a different
 10 * one or even write a new "plug in". There are three pieces:
 11 * - elevator_fn, inserts a new request in the queue list
 12 * - elevator_merge_fn, decides whether a new buffer can be merged with
 13 *   an existing request
 14 * - elevator_dequeue_fn, called when a request is taken off the active list
 15 *
 16 * 20082000 Dave Jones <davej@suse.de> :
 17 * Removed tests for max-bomb-segments, which was breaking elvtune
 18 *  when run without -bN
 19 *
 20 * Jens:
 21 * - Rework again to work with bio instead of buffer_heads
 22 * - loose bi_dev comparisons, partition handling is right now
 23 * - completely modularize elevator setup and teardown
 24 *
 25 */
 26#include <linux/kernel.h>
 27#include <linux/fs.h>
 28#include <linux/blkdev.h>
 
 29#include <linux/bio.h>
 30#include <linux/module.h>
 31#include <linux/slab.h>
 32#include <linux/init.h>
 33#include <linux/compiler.h>
 34#include <linux/blktrace_api.h>
 35#include <linux/hash.h>
 36#include <linux/uaccess.h>
 37#include <linux/pm_runtime.h>
 
 38
 39#include <trace/events/block.h>
 40
 41#include "elevator.h"
 42#include "blk.h"
 43#include "blk-mq-sched.h"
 44#include "blk-pm.h"
 45#include "blk-wbt.h"
 46#include "blk-cgroup.h"
 47
 48static DEFINE_SPINLOCK(elv_list_lock);
 49static LIST_HEAD(elv_list);
 50
 51/*
 52 * Merge hash stuff.
 53 */
 54#define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
 55
 56/*
 57 * Query io scheduler to see if the current process issuing bio may be
 58 * merged with rq.
 59 */
 60static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
 61{
 62	struct request_queue *q = rq->q;
 63	struct elevator_queue *e = q->elevator;
 64
 65	if (e->type->ops.allow_merge)
 66		return e->type->ops.allow_merge(q, rq, bio);
 67
 68	return true;
 69}
 70
 71/*
 72 * can we safely merge with this request?
 73 */
 74bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
 75{
 76	if (!blk_rq_merge_ok(rq, bio))
 77		return false;
 78
 79	if (!elv_iosched_allow_bio_merge(rq, bio))
 80		return false;
 81
 82	return true;
 83}
 84EXPORT_SYMBOL(elv_bio_merge_ok);
 85
 86static inline bool elv_support_features(struct request_queue *q,
 87		const struct elevator_type *e)
 88{
 89	return (q->required_elevator_features & e->elevator_features) ==
 90		q->required_elevator_features;
 91}
 92
 93/**
 94 * elevator_match - Check whether @e's name or alias matches @name
 95 * @e: Scheduler to test
 96 * @name: Elevator name to test
 
 97 *
 98 * Return true if the elevator @e's name or alias matches @name.
 
 99 */
100static bool elevator_match(const struct elevator_type *e, const char *name)
 
101{
102	return !strcmp(e->elevator_name, name) ||
103		(e->elevator_alias && !strcmp(e->elevator_alias, name));
 
 
 
 
 
 
104}
105
106static struct elevator_type *__elevator_find(const char *name)
 
 
 
 
 
 
 
 
 
107{
108	struct elevator_type *e;
109
110	list_for_each_entry(e, &elv_list, list)
111		if (elevator_match(e, name))
112			return e;
 
 
113	return NULL;
114}
115
116static struct elevator_type *elevator_find_get(struct request_queue *q,
117		const char *name)
 
 
 
 
 
118{
119	struct elevator_type *e;
120
121	spin_lock(&elv_list_lock);
122	e = __elevator_find(name);
123	if (e && (!elv_support_features(q, e) || !elevator_tryget(e)))
 
 
 
 
 
 
 
 
124		e = NULL;
 
125	spin_unlock(&elv_list_lock);
126	return e;
127}
128
129static struct kobj_type elv_ktype;
130
131struct elevator_queue *elevator_alloc(struct request_queue *q,
132				  struct elevator_type *e)
133{
134	struct elevator_queue *eq;
135
136	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
137	if (unlikely(!eq))
138		return NULL;
139
140	__elevator_get(e);
141	eq->type = e;
142	kobject_init(&eq->kobj, &elv_ktype);
143	mutex_init(&eq->sysfs_lock);
144	hash_init(eq->hash);
145
146	return eq;
147}
148EXPORT_SYMBOL(elevator_alloc);
149
150static void elevator_release(struct kobject *kobj)
151{
152	struct elevator_queue *e;
153
154	e = container_of(kobj, struct elevator_queue, kobj);
155	elevator_put(e->type);
156	kfree(e);
157}
158
159void elevator_exit(struct request_queue *q)
160{
161	struct elevator_queue *e = q->elevator;
162
163	ioc_clear_queue(q);
164	blk_mq_sched_free_rqs(q);
165
166	mutex_lock(&e->sysfs_lock);
167	blk_mq_exit_sched(q, e);
 
168	mutex_unlock(&e->sysfs_lock);
169
170	kobject_put(&e->kobj);
171}
172
173static inline void __elv_rqhash_del(struct request *rq)
174{
175	hash_del(&rq->hash);
176	rq->rq_flags &= ~RQF_HASHED;
177}
178
179void elv_rqhash_del(struct request_queue *q, struct request *rq)
180{
181	if (ELV_ON_HASH(rq))
182		__elv_rqhash_del(rq);
183}
184EXPORT_SYMBOL_GPL(elv_rqhash_del);
185
186void elv_rqhash_add(struct request_queue *q, struct request *rq)
187{
188	struct elevator_queue *e = q->elevator;
189
190	BUG_ON(ELV_ON_HASH(rq));
191	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
192	rq->rq_flags |= RQF_HASHED;
193}
194EXPORT_SYMBOL_GPL(elv_rqhash_add);
195
196void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
197{
198	__elv_rqhash_del(rq);
199	elv_rqhash_add(q, rq);
200}
201
202struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
203{
204	struct elevator_queue *e = q->elevator;
205	struct hlist_node *next;
206	struct request *rq;
207
208	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
209		BUG_ON(!ELV_ON_HASH(rq));
210
211		if (unlikely(!rq_mergeable(rq))) {
212			__elv_rqhash_del(rq);
213			continue;
214		}
215
216		if (rq_hash_key(rq) == offset)
217			return rq;
218	}
219
220	return NULL;
221}
222
223/*
224 * RB-tree support functions for inserting/lookup/removal of requests
225 * in a sorted RB tree.
226 */
227void elv_rb_add(struct rb_root *root, struct request *rq)
228{
229	struct rb_node **p = &root->rb_node;
230	struct rb_node *parent = NULL;
231	struct request *__rq;
232
233	while (*p) {
234		parent = *p;
235		__rq = rb_entry(parent, struct request, rb_node);
236
237		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
238			p = &(*p)->rb_left;
239		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
240			p = &(*p)->rb_right;
241	}
242
243	rb_link_node(&rq->rb_node, parent, p);
244	rb_insert_color(&rq->rb_node, root);
245}
246EXPORT_SYMBOL(elv_rb_add);
247
248void elv_rb_del(struct rb_root *root, struct request *rq)
249{
250	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
251	rb_erase(&rq->rb_node, root);
252	RB_CLEAR_NODE(&rq->rb_node);
253}
254EXPORT_SYMBOL(elv_rb_del);
255
256struct request *elv_rb_find(struct rb_root *root, sector_t sector)
257{
258	struct rb_node *n = root->rb_node;
259	struct request *rq;
260
261	while (n) {
262		rq = rb_entry(n, struct request, rb_node);
263
264		if (sector < blk_rq_pos(rq))
265			n = n->rb_left;
266		else if (sector > blk_rq_pos(rq))
267			n = n->rb_right;
268		else
269			return rq;
270	}
271
272	return NULL;
273}
274EXPORT_SYMBOL(elv_rb_find);
275
276enum elv_merge elv_merge(struct request_queue *q, struct request **req,
277		struct bio *bio)
278{
279	struct elevator_queue *e = q->elevator;
280	struct request *__rq;
281
282	/*
283	 * Levels of merges:
284	 * 	nomerges:  No merges at all attempted
285	 * 	noxmerges: Only simple one-hit cache try
286	 * 	merges:	   All merge tries attempted
287	 */
288	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
289		return ELEVATOR_NO_MERGE;
290
291	/*
292	 * First try one-hit cache.
293	 */
294	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
295		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
296
297		if (ret != ELEVATOR_NO_MERGE) {
298			*req = q->last_merge;
299			return ret;
300		}
301	}
302
303	if (blk_queue_noxmerges(q))
304		return ELEVATOR_NO_MERGE;
305
306	/*
307	 * See if our hash lookup can find a potential backmerge.
308	 */
309	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
310	if (__rq && elv_bio_merge_ok(__rq, bio)) {
311		*req = __rq;
312
313		if (blk_discard_mergable(__rq))
314			return ELEVATOR_DISCARD_MERGE;
315		return ELEVATOR_BACK_MERGE;
316	}
317
318	if (e->type->ops.request_merge)
319		return e->type->ops.request_merge(q, req, bio);
320
321	return ELEVATOR_NO_MERGE;
322}
323
324/*
325 * Attempt to do an insertion back merge. Only check for the case where
326 * we can append 'rq' to an existing request, so we can throw 'rq' away
327 * afterwards.
328 *
329 * Returns true if we merged, false otherwise. 'free' will contain all
330 * requests that need to be freed.
331 */
332bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
333			      struct list_head *free)
334{
335	struct request *__rq;
336	bool ret;
337
338	if (blk_queue_nomerges(q))
339		return false;
340
341	/*
342	 * First try one-hit cache.
343	 */
344	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
345		list_add(&rq->queuelist, free);
346		return true;
347	}
348
349	if (blk_queue_noxmerges(q))
350		return false;
351
352	ret = false;
353	/*
354	 * See if our hash lookup can find a potential backmerge.
355	 */
356	while (1) {
357		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
358		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
359			break;
360
361		list_add(&rq->queuelist, free);
362		/* The merged request could be merged with others, try again */
363		ret = true;
364		rq = __rq;
365	}
366
367	return ret;
368}
369
370void elv_merged_request(struct request_queue *q, struct request *rq,
371		enum elv_merge type)
372{
373	struct elevator_queue *e = q->elevator;
374
375	if (e->type->ops.request_merged)
376		e->type->ops.request_merged(q, rq, type);
377
378	if (type == ELEVATOR_BACK_MERGE)
379		elv_rqhash_reposition(q, rq);
380
381	q->last_merge = rq;
382}
383
384void elv_merge_requests(struct request_queue *q, struct request *rq,
385			     struct request *next)
386{
387	struct elevator_queue *e = q->elevator;
388
389	if (e->type->ops.requests_merged)
390		e->type->ops.requests_merged(q, rq, next);
391
392	elv_rqhash_reposition(q, rq);
393	q->last_merge = rq;
394}
395
396struct request *elv_latter_request(struct request_queue *q, struct request *rq)
397{
398	struct elevator_queue *e = q->elevator;
399
400	if (e->type->ops.next_request)
401		return e->type->ops.next_request(q, rq);
402
403	return NULL;
404}
405
406struct request *elv_former_request(struct request_queue *q, struct request *rq)
407{
408	struct elevator_queue *e = q->elevator;
409
410	if (e->type->ops.former_request)
411		return e->type->ops.former_request(q, rq);
412
413	return NULL;
414}
415
416#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
417
418static ssize_t
419elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
420{
421	struct elv_fs_entry *entry = to_elv(attr);
422	struct elevator_queue *e;
423	ssize_t error;
424
425	if (!entry->show)
426		return -EIO;
427
428	e = container_of(kobj, struct elevator_queue, kobj);
429	mutex_lock(&e->sysfs_lock);
430	error = e->type ? entry->show(e, page) : -ENOENT;
431	mutex_unlock(&e->sysfs_lock);
432	return error;
433}
434
435static ssize_t
436elv_attr_store(struct kobject *kobj, struct attribute *attr,
437	       const char *page, size_t length)
438{
439	struct elv_fs_entry *entry = to_elv(attr);
440	struct elevator_queue *e;
441	ssize_t error;
442
443	if (!entry->store)
444		return -EIO;
445
446	e = container_of(kobj, struct elevator_queue, kobj);
447	mutex_lock(&e->sysfs_lock);
448	error = e->type ? entry->store(e, page, length) : -ENOENT;
449	mutex_unlock(&e->sysfs_lock);
450	return error;
451}
452
453static const struct sysfs_ops elv_sysfs_ops = {
454	.show	= elv_attr_show,
455	.store	= elv_attr_store,
456};
457
458static struct kobj_type elv_ktype = {
459	.sysfs_ops	= &elv_sysfs_ops,
460	.release	= elevator_release,
461};
462
 
 
 
 
 
463int elv_register_queue(struct request_queue *q, bool uevent)
464{
465	struct elevator_queue *e = q->elevator;
466	int error;
467
468	lockdep_assert_held(&q->sysfs_lock);
469
470	error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
471	if (!error) {
472		struct elv_fs_entry *attr = e->type->elevator_attrs;
473		if (attr) {
474			while (attr->attr.name) {
475				if (sysfs_create_file(&e->kobj, &attr->attr))
476					break;
477				attr++;
478			}
479		}
480		if (uevent)
481			kobject_uevent(&e->kobj, KOBJ_ADD);
482
483		set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
484	}
485	return error;
486}
487
 
 
 
 
 
488void elv_unregister_queue(struct request_queue *q)
489{
490	struct elevator_queue *e = q->elevator;
 
491
492	lockdep_assert_held(&q->sysfs_lock);
493
494	if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
495		kobject_uevent(&e->kobj, KOBJ_REMOVE);
496		kobject_del(&e->kobj);
 
 
 
 
497	}
498}
499
500int elv_register(struct elevator_type *e)
501{
502	/* insert_requests and dispatch_request are mandatory */
503	if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
504		return -EINVAL;
505
506	/* create icq_cache if requested */
507	if (e->icq_size) {
508		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
509		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
510			return -EINVAL;
511
512		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
513			 "%s_io_cq", e->elevator_name);
514		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
515						 e->icq_align, 0, NULL);
516		if (!e->icq_cache)
517			return -ENOMEM;
518	}
519
520	/* register, don't allow duplicate names */
521	spin_lock(&elv_list_lock);
522	if (__elevator_find(e->elevator_name)) {
523		spin_unlock(&elv_list_lock);
524		kmem_cache_destroy(e->icq_cache);
525		return -EBUSY;
526	}
527	list_add_tail(&e->list, &elv_list);
528	spin_unlock(&elv_list_lock);
529
530	printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
531
532	return 0;
533}
534EXPORT_SYMBOL_GPL(elv_register);
535
536void elv_unregister(struct elevator_type *e)
537{
538	/* unregister */
539	spin_lock(&elv_list_lock);
540	list_del_init(&e->list);
541	spin_unlock(&elv_list_lock);
542
543	/*
544	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
545	 * sure all RCU operations are complete before proceeding.
546	 */
547	if (e->icq_cache) {
548		rcu_barrier();
549		kmem_cache_destroy(e->icq_cache);
550		e->icq_cache = NULL;
551	}
552}
553EXPORT_SYMBOL_GPL(elv_unregister);
554
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555static inline bool elv_support_iosched(struct request_queue *q)
556{
557	if (!queue_is_mq(q) ||
558	    (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
559		return false;
560	return true;
561}
562
563/*
564 * For single queue devices, default to using mq-deadline. If we have multiple
565 * queues or mq-deadline is not available, default to "none".
566 */
567static struct elevator_type *elevator_get_default(struct request_queue *q)
568{
569	if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
570		return NULL;
571
572	if (q->nr_hw_queues != 1 &&
573	    !blk_mq_is_shared_tags(q->tag_set->flags))
574		return NULL;
575
576	return elevator_find_get(q, "mq-deadline");
577}
578
579/*
580 * Get the first elevator providing the features required by the request queue.
581 * Default to "none" if no matching elevator is found.
582 */
583static struct elevator_type *elevator_get_by_features(struct request_queue *q)
584{
585	struct elevator_type *e, *found = NULL;
586
587	spin_lock(&elv_list_lock);
588
589	list_for_each_entry(e, &elv_list, list) {
590		if (elv_support_features(q, e)) {
 
591			found = e;
592			break;
593		}
594	}
595
596	if (found && !elevator_tryget(found))
597		found = NULL;
598
599	spin_unlock(&elv_list_lock);
600	return found;
601}
602
603/*
604 * For a device queue that has no required features, use the default elevator
605 * settings. Otherwise, use the first elevator available matching the required
606 * features. If no suitable elevator is find or if the chosen elevator
607 * initialization fails, fall back to the "none" elevator (no elevator).
608 */
609void elevator_init_mq(struct request_queue *q)
610{
611	struct elevator_type *e;
612	int err;
613
614	if (!elv_support_iosched(q))
615		return;
616
617	WARN_ON_ONCE(blk_queue_registered(q));
618
619	if (unlikely(q->elevator))
620		return;
621
622	if (!q->required_elevator_features)
623		e = elevator_get_default(q);
624	else
625		e = elevator_get_by_features(q);
626	if (!e)
627		return;
628
629	/*
630	 * We are called before adding disk, when there isn't any FS I/O,
631	 * so freezing queue plus canceling dispatch work is enough to
632	 * drain any dispatch activities originated from passthrough
633	 * requests, then no need to quiesce queue which may add long boot
634	 * latency, especially when lots of disks are involved.
635	 */
636	blk_mq_freeze_queue(q);
637	blk_mq_cancel_work_sync(q);
638
639	err = blk_mq_init_sched(q, e);
640
 
641	blk_mq_unfreeze_queue(q);
642
643	if (err) {
644		pr_warn("\"%s\" elevator initialization failed, "
645			"falling back to \"none\"\n", e->elevator_name);
 
646	}
647
648	elevator_put(e);
649}
650
 
651/*
652 * Switch to new_e io scheduler.
653 *
654 * If switching fails, we are most likely running out of memory and not able
655 * to restore the old io scheduler, so leaving the io scheduler being none.
656 */
657int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
658{
659	int ret;
660
661	lockdep_assert_held(&q->sysfs_lock);
662
663	blk_mq_freeze_queue(q);
664	blk_mq_quiesce_queue(q);
665
666	if (q->elevator) {
667		elv_unregister_queue(q);
668		elevator_exit(q);
669	}
670
671	ret = blk_mq_init_sched(q, new_e);
672	if (ret)
673		goto out_unfreeze;
674
675	ret = elv_register_queue(q, true);
676	if (ret) {
677		elevator_exit(q);
678		goto out_unfreeze;
679	}
680	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
681
682out_unfreeze:
683	blk_mq_unquiesce_queue(q);
684	blk_mq_unfreeze_queue(q);
685
686	if (ret) {
687		pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
688			new_e->elevator_name);
689	}
690
691	return ret;
692}
693
694void elevator_disable(struct request_queue *q)
695{
696	lockdep_assert_held(&q->sysfs_lock);
697
698	blk_mq_freeze_queue(q);
699	blk_mq_quiesce_queue(q);
700
701	elv_unregister_queue(q);
702	elevator_exit(q);
703	blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
704	q->elevator = NULL;
705	q->nr_requests = q->tag_set->queue_depth;
706	blk_add_trace_msg(q, "elv switch: none");
707
708	blk_mq_unquiesce_queue(q);
709	blk_mq_unfreeze_queue(q);
710}
711
712/*
713 * Switch this queue to the given IO scheduler.
714 */
715static int elevator_change(struct request_queue *q, const char *elevator_name)
716{
 
717	struct elevator_type *e;
718	int ret;
719
720	/* Make sure queue is not in the middle of being removed */
721	if (!blk_queue_registered(q))
722		return -ENOENT;
723
724	if (!strncmp(elevator_name, "none", 4)) {
725		if (q->elevator)
726			elevator_disable(q);
727		return 0;
 
 
 
728	}
729
730	if (q->elevator && elevator_match(q->elevator->type, elevator_name))
731		return 0;
 
 
732
733	e = elevator_find_get(q, elevator_name);
734	if (!e) {
735		request_module("%s-iosched", elevator_name);
736		e = elevator_find_get(q, elevator_name);
737		if (!e)
738			return -EINVAL;
739	}
740	ret = elevator_switch(q, e);
741	elevator_put(e);
742	return ret;
743}
744
745ssize_t elv_iosched_store(struct request_queue *q, const char *buf,
746			  size_t count)
747{
748	char elevator_name[ELV_NAME_MAX];
749	int ret;
750
751	if (!elv_support_iosched(q))
752		return count;
753
754	strlcpy(elevator_name, buf, sizeof(elevator_name));
755	ret = elevator_change(q, strstrip(elevator_name));
756	if (!ret)
757		return count;
 
758	return ret;
759}
760
761ssize_t elv_iosched_show(struct request_queue *q, char *name)
762{
763	struct elevator_queue *eq = q->elevator;
764	struct elevator_type *cur = NULL, *e;
 
765	int len = 0;
766
767	if (!elv_support_iosched(q))
768		return sprintf(name, "none\n");
769
770	if (!q->elevator) {
771		len += sprintf(name+len, "[none] ");
772	} else {
773		len += sprintf(name+len, "none ");
774		cur = eq->type;
775	}
776
777	spin_lock(&elv_list_lock);
778	list_for_each_entry(e, &elv_list, list) {
779		if (e == cur)
780			len += sprintf(name+len, "[%s] ", e->elevator_name);
781		else if (elv_support_features(q, e))
782			len += sprintf(name+len, "%s ", e->elevator_name);
 
 
 
 
783	}
784	spin_unlock(&elv_list_lock);
785
786	len += sprintf(name+len, "\n");
 
 
 
787	return len;
788}
789
790struct request *elv_rb_former_request(struct request_queue *q,
791				      struct request *rq)
792{
793	struct rb_node *rbprev = rb_prev(&rq->rb_node);
794
795	if (rbprev)
796		return rb_entry_rq(rbprev);
797
798	return NULL;
799}
800EXPORT_SYMBOL(elv_rb_former_request);
801
802struct request *elv_rb_latter_request(struct request_queue *q,
803				      struct request *rq)
804{
805	struct rb_node *rbnext = rb_next(&rq->rb_node);
806
807	if (rbnext)
808		return rb_entry_rq(rbnext);
809
810	return NULL;
811}
812EXPORT_SYMBOL(elv_rb_latter_request);
813
814static int __init elevator_setup(char *str)
815{
816	pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
817		"Please use sysfs to set IO scheduler for individual devices.\n");
818	return 1;
819}
820
821__setup("elevator=", elevator_setup);
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Block device elevator/IO-scheduler.
  4 *
  5 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  6 *
  7 * 30042000 Jens Axboe <axboe@kernel.dk> :
  8 *
  9 * Split the elevator a bit so that it is possible to choose a different
 10 * one or even write a new "plug in". There are three pieces:
 11 * - elevator_fn, inserts a new request in the queue list
 12 * - elevator_merge_fn, decides whether a new buffer can be merged with
 13 *   an existing request
 14 * - elevator_dequeue_fn, called when a request is taken off the active list
 15 *
 16 * 20082000 Dave Jones <davej@suse.de> :
 17 * Removed tests for max-bomb-segments, which was breaking elvtune
 18 *  when run without -bN
 19 *
 20 * Jens:
 21 * - Rework again to work with bio instead of buffer_heads
 22 * - loose bi_dev comparisons, partition handling is right now
 23 * - completely modularize elevator setup and teardown
 24 *
 25 */
 26#include <linux/kernel.h>
 27#include <linux/fs.h>
 28#include <linux/blkdev.h>
 29#include <linux/elevator.h>
 30#include <linux/bio.h>
 31#include <linux/module.h>
 32#include <linux/slab.h>
 33#include <linux/init.h>
 34#include <linux/compiler.h>
 35#include <linux/blktrace_api.h>
 36#include <linux/hash.h>
 37#include <linux/uaccess.h>
 38#include <linux/pm_runtime.h>
 39#include <linux/blk-cgroup.h>
 40
 41#include <trace/events/block.h>
 42
 
 43#include "blk.h"
 44#include "blk-mq-sched.h"
 45#include "blk-pm.h"
 46#include "blk-wbt.h"
 
 47
 48static DEFINE_SPINLOCK(elv_list_lock);
 49static LIST_HEAD(elv_list);
 50
 51/*
 52 * Merge hash stuff.
 53 */
 54#define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
 55
 56/*
 57 * Query io scheduler to see if the current process issuing bio may be
 58 * merged with rq.
 59 */
 60static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
 61{
 62	struct request_queue *q = rq->q;
 63	struct elevator_queue *e = q->elevator;
 64
 65	if (e->type->ops.allow_merge)
 66		return e->type->ops.allow_merge(q, rq, bio);
 67
 68	return 1;
 69}
 70
 71/*
 72 * can we safely merge with this request?
 73 */
 74bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
 75{
 76	if (!blk_rq_merge_ok(rq, bio))
 77		return false;
 78
 79	if (!elv_iosched_allow_bio_merge(rq, bio))
 80		return false;
 81
 82	return true;
 83}
 84EXPORT_SYMBOL(elv_bio_merge_ok);
 85
 86static inline bool elv_support_features(unsigned int elv_features,
 87					unsigned int required_features)
 88{
 89	return (required_features & elv_features) == required_features;
 
 90}
 91
 92/**
 93 * elevator_match - Test an elevator name and features
 94 * @e: Scheduler to test
 95 * @name: Elevator name to test
 96 * @required_features: Features that the elevator must provide
 97 *
 98 * Return true if the elevator @e name matches @name and if @e provides all
 99 * the features specified by @required_features.
100 */
101static bool elevator_match(const struct elevator_type *e, const char *name,
102			   unsigned int required_features)
103{
104	if (!elv_support_features(e->elevator_features, required_features))
105		return false;
106	if (!strcmp(e->elevator_name, name))
107		return true;
108	if (e->elevator_alias && !strcmp(e->elevator_alias, name))
109		return true;
110
111	return false;
112}
113
114/**
115 * elevator_find - Find an elevator
116 * @name: Name of the elevator to find
117 * @required_features: Features that the elevator must provide
118 *
119 * Return the first registered scheduler with name @name and supporting the
120 * features @required_features and NULL otherwise.
121 */
122static struct elevator_type *elevator_find(const char *name,
123					   unsigned int required_features)
124{
125	struct elevator_type *e;
126
127	list_for_each_entry(e, &elv_list, list) {
128		if (elevator_match(e, name, required_features))
129			return e;
130	}
131
132	return NULL;
133}
134
135static void elevator_put(struct elevator_type *e)
136{
137	module_put(e->elevator_owner);
138}
139
140static struct elevator_type *elevator_get(struct request_queue *q,
141					  const char *name, bool try_loading)
142{
143	struct elevator_type *e;
144
145	spin_lock(&elv_list_lock);
146
147	e = elevator_find(name, q->required_elevator_features);
148	if (!e && try_loading) {
149		spin_unlock(&elv_list_lock);
150		request_module("%s-iosched", name);
151		spin_lock(&elv_list_lock);
152		e = elevator_find(name, q->required_elevator_features);
153	}
154
155	if (e && !try_module_get(e->elevator_owner))
156		e = NULL;
157
158	spin_unlock(&elv_list_lock);
159	return e;
160}
161
162static struct kobj_type elv_ktype;
163
164struct elevator_queue *elevator_alloc(struct request_queue *q,
165				  struct elevator_type *e)
166{
167	struct elevator_queue *eq;
168
169	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
170	if (unlikely(!eq))
171		return NULL;
172
 
173	eq->type = e;
174	kobject_init(&eq->kobj, &elv_ktype);
175	mutex_init(&eq->sysfs_lock);
176	hash_init(eq->hash);
177
178	return eq;
179}
180EXPORT_SYMBOL(elevator_alloc);
181
182static void elevator_release(struct kobject *kobj)
183{
184	struct elevator_queue *e;
185
186	e = container_of(kobj, struct elevator_queue, kobj);
187	elevator_put(e->type);
188	kfree(e);
189}
190
191void __elevator_exit(struct request_queue *q, struct elevator_queue *e)
192{
 
 
 
 
 
193	mutex_lock(&e->sysfs_lock);
194	if (e->type->ops.exit_sched)
195		blk_mq_exit_sched(q, e);
196	mutex_unlock(&e->sysfs_lock);
197
198	kobject_put(&e->kobj);
199}
200
201static inline void __elv_rqhash_del(struct request *rq)
202{
203	hash_del(&rq->hash);
204	rq->rq_flags &= ~RQF_HASHED;
205}
206
207void elv_rqhash_del(struct request_queue *q, struct request *rq)
208{
209	if (ELV_ON_HASH(rq))
210		__elv_rqhash_del(rq);
211}
212EXPORT_SYMBOL_GPL(elv_rqhash_del);
213
214void elv_rqhash_add(struct request_queue *q, struct request *rq)
215{
216	struct elevator_queue *e = q->elevator;
217
218	BUG_ON(ELV_ON_HASH(rq));
219	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
220	rq->rq_flags |= RQF_HASHED;
221}
222EXPORT_SYMBOL_GPL(elv_rqhash_add);
223
224void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
225{
226	__elv_rqhash_del(rq);
227	elv_rqhash_add(q, rq);
228}
229
230struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
231{
232	struct elevator_queue *e = q->elevator;
233	struct hlist_node *next;
234	struct request *rq;
235
236	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
237		BUG_ON(!ELV_ON_HASH(rq));
238
239		if (unlikely(!rq_mergeable(rq))) {
240			__elv_rqhash_del(rq);
241			continue;
242		}
243
244		if (rq_hash_key(rq) == offset)
245			return rq;
246	}
247
248	return NULL;
249}
250
251/*
252 * RB-tree support functions for inserting/lookup/removal of requests
253 * in a sorted RB tree.
254 */
255void elv_rb_add(struct rb_root *root, struct request *rq)
256{
257	struct rb_node **p = &root->rb_node;
258	struct rb_node *parent = NULL;
259	struct request *__rq;
260
261	while (*p) {
262		parent = *p;
263		__rq = rb_entry(parent, struct request, rb_node);
264
265		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
266			p = &(*p)->rb_left;
267		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
268			p = &(*p)->rb_right;
269	}
270
271	rb_link_node(&rq->rb_node, parent, p);
272	rb_insert_color(&rq->rb_node, root);
273}
274EXPORT_SYMBOL(elv_rb_add);
275
276void elv_rb_del(struct rb_root *root, struct request *rq)
277{
278	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
279	rb_erase(&rq->rb_node, root);
280	RB_CLEAR_NODE(&rq->rb_node);
281}
282EXPORT_SYMBOL(elv_rb_del);
283
284struct request *elv_rb_find(struct rb_root *root, sector_t sector)
285{
286	struct rb_node *n = root->rb_node;
287	struct request *rq;
288
289	while (n) {
290		rq = rb_entry(n, struct request, rb_node);
291
292		if (sector < blk_rq_pos(rq))
293			n = n->rb_left;
294		else if (sector > blk_rq_pos(rq))
295			n = n->rb_right;
296		else
297			return rq;
298	}
299
300	return NULL;
301}
302EXPORT_SYMBOL(elv_rb_find);
303
304enum elv_merge elv_merge(struct request_queue *q, struct request **req,
305		struct bio *bio)
306{
307	struct elevator_queue *e = q->elevator;
308	struct request *__rq;
309
310	/*
311	 * Levels of merges:
312	 * 	nomerges:  No merges at all attempted
313	 * 	noxmerges: Only simple one-hit cache try
314	 * 	merges:	   All merge tries attempted
315	 */
316	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
317		return ELEVATOR_NO_MERGE;
318
319	/*
320	 * First try one-hit cache.
321	 */
322	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
323		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
324
325		if (ret != ELEVATOR_NO_MERGE) {
326			*req = q->last_merge;
327			return ret;
328		}
329	}
330
331	if (blk_queue_noxmerges(q))
332		return ELEVATOR_NO_MERGE;
333
334	/*
335	 * See if our hash lookup can find a potential backmerge.
336	 */
337	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
338	if (__rq && elv_bio_merge_ok(__rq, bio)) {
339		*req = __rq;
 
 
 
340		return ELEVATOR_BACK_MERGE;
341	}
342
343	if (e->type->ops.request_merge)
344		return e->type->ops.request_merge(q, req, bio);
345
346	return ELEVATOR_NO_MERGE;
347}
348
349/*
350 * Attempt to do an insertion back merge. Only check for the case where
351 * we can append 'rq' to an existing request, so we can throw 'rq' away
352 * afterwards.
353 *
354 * Returns true if we merged, false otherwise
 
355 */
356bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
 
357{
358	struct request *__rq;
359	bool ret;
360
361	if (blk_queue_nomerges(q))
362		return false;
363
364	/*
365	 * First try one-hit cache.
366	 */
367	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
 
368		return true;
 
369
370	if (blk_queue_noxmerges(q))
371		return false;
372
373	ret = false;
374	/*
375	 * See if our hash lookup can find a potential backmerge.
376	 */
377	while (1) {
378		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
379		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
380			break;
381
 
382		/* The merged request could be merged with others, try again */
383		ret = true;
384		rq = __rq;
385	}
386
387	return ret;
388}
389
390void elv_merged_request(struct request_queue *q, struct request *rq,
391		enum elv_merge type)
392{
393	struct elevator_queue *e = q->elevator;
394
395	if (e->type->ops.request_merged)
396		e->type->ops.request_merged(q, rq, type);
397
398	if (type == ELEVATOR_BACK_MERGE)
399		elv_rqhash_reposition(q, rq);
400
401	q->last_merge = rq;
402}
403
404void elv_merge_requests(struct request_queue *q, struct request *rq,
405			     struct request *next)
406{
407	struct elevator_queue *e = q->elevator;
408
409	if (e->type->ops.requests_merged)
410		e->type->ops.requests_merged(q, rq, next);
411
412	elv_rqhash_reposition(q, rq);
413	q->last_merge = rq;
414}
415
416struct request *elv_latter_request(struct request_queue *q, struct request *rq)
417{
418	struct elevator_queue *e = q->elevator;
419
420	if (e->type->ops.next_request)
421		return e->type->ops.next_request(q, rq);
422
423	return NULL;
424}
425
426struct request *elv_former_request(struct request_queue *q, struct request *rq)
427{
428	struct elevator_queue *e = q->elevator;
429
430	if (e->type->ops.former_request)
431		return e->type->ops.former_request(q, rq);
432
433	return NULL;
434}
435
436#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
437
438static ssize_t
439elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
440{
441	struct elv_fs_entry *entry = to_elv(attr);
442	struct elevator_queue *e;
443	ssize_t error;
444
445	if (!entry->show)
446		return -EIO;
447
448	e = container_of(kobj, struct elevator_queue, kobj);
449	mutex_lock(&e->sysfs_lock);
450	error = e->type ? entry->show(e, page) : -ENOENT;
451	mutex_unlock(&e->sysfs_lock);
452	return error;
453}
454
455static ssize_t
456elv_attr_store(struct kobject *kobj, struct attribute *attr,
457	       const char *page, size_t length)
458{
459	struct elv_fs_entry *entry = to_elv(attr);
460	struct elevator_queue *e;
461	ssize_t error;
462
463	if (!entry->store)
464		return -EIO;
465
466	e = container_of(kobj, struct elevator_queue, kobj);
467	mutex_lock(&e->sysfs_lock);
468	error = e->type ? entry->store(e, page, length) : -ENOENT;
469	mutex_unlock(&e->sysfs_lock);
470	return error;
471}
472
473static const struct sysfs_ops elv_sysfs_ops = {
474	.show	= elv_attr_show,
475	.store	= elv_attr_store,
476};
477
478static struct kobj_type elv_ktype = {
479	.sysfs_ops	= &elv_sysfs_ops,
480	.release	= elevator_release,
481};
482
483/*
484 * elv_register_queue is called from either blk_register_queue or
485 * elevator_switch, elevator switch is prevented from being happen
486 * in the two paths, so it is safe to not hold q->sysfs_lock.
487 */
488int elv_register_queue(struct request_queue *q, bool uevent)
489{
490	struct elevator_queue *e = q->elevator;
491	int error;
492
493	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
 
 
494	if (!error) {
495		struct elv_fs_entry *attr = e->type->elevator_attrs;
496		if (attr) {
497			while (attr->attr.name) {
498				if (sysfs_create_file(&e->kobj, &attr->attr))
499					break;
500				attr++;
501			}
502		}
503		if (uevent)
504			kobject_uevent(&e->kobj, KOBJ_ADD);
505
506		e->registered = 1;
507	}
508	return error;
509}
510
511/*
512 * elv_unregister_queue is called from either blk_unregister_queue or
513 * elevator_switch, elevator switch is prevented from being happen
514 * in the two paths, so it is safe to not hold q->sysfs_lock.
515 */
516void elv_unregister_queue(struct request_queue *q)
517{
518	if (q) {
519		struct elevator_queue *e = q->elevator;
520
 
 
 
521		kobject_uevent(&e->kobj, KOBJ_REMOVE);
522		kobject_del(&e->kobj);
523
524		e->registered = 0;
525		/* Re-enable throttling in case elevator disabled it */
526		wbt_enable_default(q);
527	}
528}
529
530int elv_register(struct elevator_type *e)
531{
 
 
 
 
532	/* create icq_cache if requested */
533	if (e->icq_size) {
534		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
535		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
536			return -EINVAL;
537
538		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
539			 "%s_io_cq", e->elevator_name);
540		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
541						 e->icq_align, 0, NULL);
542		if (!e->icq_cache)
543			return -ENOMEM;
544	}
545
546	/* register, don't allow duplicate names */
547	spin_lock(&elv_list_lock);
548	if (elevator_find(e->elevator_name, 0)) {
549		spin_unlock(&elv_list_lock);
550		kmem_cache_destroy(e->icq_cache);
551		return -EBUSY;
552	}
553	list_add_tail(&e->list, &elv_list);
554	spin_unlock(&elv_list_lock);
555
556	printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
557
558	return 0;
559}
560EXPORT_SYMBOL_GPL(elv_register);
561
562void elv_unregister(struct elevator_type *e)
563{
564	/* unregister */
565	spin_lock(&elv_list_lock);
566	list_del_init(&e->list);
567	spin_unlock(&elv_list_lock);
568
569	/*
570	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
571	 * sure all RCU operations are complete before proceeding.
572	 */
573	if (e->icq_cache) {
574		rcu_barrier();
575		kmem_cache_destroy(e->icq_cache);
576		e->icq_cache = NULL;
577	}
578}
579EXPORT_SYMBOL_GPL(elv_unregister);
580
581int elevator_switch_mq(struct request_queue *q,
582			      struct elevator_type *new_e)
583{
584	int ret;
585
586	lockdep_assert_held(&q->sysfs_lock);
587
588	if (q->elevator) {
589		if (q->elevator->registered)
590			elv_unregister_queue(q);
591
592		ioc_clear_queue(q);
593		elevator_exit(q, q->elevator);
594	}
595
596	ret = blk_mq_init_sched(q, new_e);
597	if (ret)
598		goto out;
599
600	if (new_e) {
601		ret = elv_register_queue(q, true);
602		if (ret) {
603			elevator_exit(q, q->elevator);
604			goto out;
605		}
606	}
607
608	if (new_e)
609		blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
610	else
611		blk_add_trace_msg(q, "elv switch: none");
612
613out:
614	return ret;
615}
616
617static inline bool elv_support_iosched(struct request_queue *q)
618{
619	if (!q->mq_ops ||
620	    (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
621		return false;
622	return true;
623}
624
625/*
626 * For single queue devices, default to using mq-deadline. If we have multiple
627 * queues or mq-deadline is not available, default to "none".
628 */
629static struct elevator_type *elevator_get_default(struct request_queue *q)
630{
631	if (q->nr_hw_queues != 1)
632		return NULL;
633
634	return elevator_get(q, "mq-deadline", false);
 
 
 
 
635}
636
637/*
638 * Get the first elevator providing the features required by the request queue.
639 * Default to "none" if no matching elevator is found.
640 */
641static struct elevator_type *elevator_get_by_features(struct request_queue *q)
642{
643	struct elevator_type *e, *found = NULL;
644
645	spin_lock(&elv_list_lock);
646
647	list_for_each_entry(e, &elv_list, list) {
648		if (elv_support_features(e->elevator_features,
649					 q->required_elevator_features)) {
650			found = e;
651			break;
652		}
653	}
654
655	if (found && !try_module_get(found->elevator_owner))
656		found = NULL;
657
658	spin_unlock(&elv_list_lock);
659	return found;
660}
661
662/*
663 * For a device queue that has no required features, use the default elevator
664 * settings. Otherwise, use the first elevator available matching the required
665 * features. If no suitable elevator is find or if the chosen elevator
666 * initialization fails, fall back to the "none" elevator (no elevator).
667 */
668void elevator_init_mq(struct request_queue *q)
669{
670	struct elevator_type *e;
671	int err;
672
673	if (!elv_support_iosched(q))
674		return;
675
676	WARN_ON_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags));
677
678	if (unlikely(q->elevator))
679		return;
680
681	if (!q->required_elevator_features)
682		e = elevator_get_default(q);
683	else
684		e = elevator_get_by_features(q);
685	if (!e)
686		return;
687
 
 
 
 
 
 
 
688	blk_mq_freeze_queue(q);
689	blk_mq_quiesce_queue(q);
690
691	err = blk_mq_init_sched(q, e);
692
693	blk_mq_unquiesce_queue(q);
694	blk_mq_unfreeze_queue(q);
695
696	if (err) {
697		pr_warn("\"%s\" elevator initialization failed, "
698			"falling back to \"none\"\n", e->elevator_name);
699		elevator_put(e);
700	}
 
 
701}
702
703
704/*
705 * switch to new_e io scheduler. be careful not to introduce deadlocks -
706 * we don't free the old io scheduler, before we have allocated what we
707 * need for the new one. this way we have a chance of going back to the old
708 * one, if the new one fails init for some reason.
709 */
710static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
711{
712	int err;
713
714	lockdep_assert_held(&q->sysfs_lock);
715
716	blk_mq_freeze_queue(q);
717	blk_mq_quiesce_queue(q);
718
719	err = elevator_switch_mq(q, new_e);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
720
 
721	blk_mq_unquiesce_queue(q);
722	blk_mq_unfreeze_queue(q);
723
724	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
725}
726
727/*
728 * Switch this queue to the given IO scheduler.
729 */
730static int __elevator_change(struct request_queue *q, const char *name)
731{
732	char elevator_name[ELV_NAME_MAX];
733	struct elevator_type *e;
 
734
735	/* Make sure queue is not in the middle of being removed */
736	if (!blk_queue_registered(q))
737		return -ENOENT;
738
739	/*
740	 * Special case for mq, turn off scheduling
741	 */
742	if (!strncmp(name, "none", 4)) {
743		if (!q->elevator)
744			return 0;
745		return elevator_switch(q, NULL);
746	}
747
748	strlcpy(elevator_name, name, sizeof(elevator_name));
749	e = elevator_get(q, strstrip(elevator_name), true);
750	if (!e)
751		return -EINVAL;
752
753	if (q->elevator &&
754	    elevator_match(q->elevator->type, elevator_name, 0)) {
755		elevator_put(e);
756		return 0;
 
 
757	}
758
759	return elevator_switch(q, e);
 
760}
761
762ssize_t elv_iosched_store(struct request_queue *q, const char *name,
763			  size_t count)
764{
 
765	int ret;
766
767	if (!queue_is_mq(q) || !elv_support_iosched(q))
768		return count;
769
770	ret = __elevator_change(q, name);
 
771	if (!ret)
772		return count;
773
774	return ret;
775}
776
777ssize_t elv_iosched_show(struct request_queue *q, char *name)
778{
779	struct elevator_queue *e = q->elevator;
780	struct elevator_type *elv = NULL;
781	struct elevator_type *__e;
782	int len = 0;
783
784	if (!queue_is_mq(q))
785		return sprintf(name, "none\n");
786
787	if (!q->elevator)
788		len += sprintf(name+len, "[none] ");
789	else
790		elv = e->type;
 
 
791
792	spin_lock(&elv_list_lock);
793	list_for_each_entry(__e, &elv_list, list) {
794		if (elv && elevator_match(elv, __e->elevator_name, 0)) {
795			len += sprintf(name+len, "[%s] ", elv->elevator_name);
796			continue;
797		}
798		if (elv_support_iosched(q) &&
799		    elevator_match(__e, __e->elevator_name,
800				   q->required_elevator_features))
801			len += sprintf(name+len, "%s ", __e->elevator_name);
802	}
803	spin_unlock(&elv_list_lock);
804
805	if (q->elevator)
806		len += sprintf(name+len, "none");
807
808	len += sprintf(len+name, "\n");
809	return len;
810}
811
812struct request *elv_rb_former_request(struct request_queue *q,
813				      struct request *rq)
814{
815	struct rb_node *rbprev = rb_prev(&rq->rb_node);
816
817	if (rbprev)
818		return rb_entry_rq(rbprev);
819
820	return NULL;
821}
822EXPORT_SYMBOL(elv_rb_former_request);
823
824struct request *elv_rb_latter_request(struct request_queue *q,
825				      struct request *rq)
826{
827	struct rb_node *rbnext = rb_next(&rq->rb_node);
828
829	if (rbnext)
830		return rb_entry_rq(rbnext);
831
832	return NULL;
833}
834EXPORT_SYMBOL(elv_rb_latter_request);
835
836static int __init elevator_setup(char *str)
837{
838	pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
839		"Please use sysfs to set IO scheduler for individual devices.\n");
840	return 1;
841}
842
843__setup("elevator=", elevator_setup);