Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 *  Block device elevator/IO-scheduler.
   3 *
   4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
   5 *
   6 * 30042000 Jens Axboe <axboe@kernel.dk> :
   7 *
   8 * Split the elevator a bit so that it is possible to choose a different
   9 * one or even write a new "plug in". There are three pieces:
  10 * - elevator_fn, inserts a new request in the queue list
  11 * - elevator_merge_fn, decides whether a new buffer can be merged with
  12 *   an existing request
  13 * - elevator_dequeue_fn, called when a request is taken off the active list
  14 *
  15 * 20082000 Dave Jones <davej@suse.de> :
  16 * Removed tests for max-bomb-segments, which was breaking elvtune
  17 *  when run without -bN
  18 *
  19 * Jens:
  20 * - Rework again to work with bio instead of buffer_heads
  21 * - loose bi_dev comparisons, partition handling is right now
  22 * - completely modularize elevator setup and teardown
  23 *
  24 */
  25#include <linux/kernel.h>
  26#include <linux/fs.h>
  27#include <linux/blkdev.h>
  28#include <linux/elevator.h>
  29#include <linux/bio.h>
  30#include <linux/module.h>
  31#include <linux/slab.h>
  32#include <linux/init.h>
  33#include <linux/compiler.h>
 
  34#include <linux/blktrace_api.h>
  35#include <linux/hash.h>
  36#include <linux/uaccess.h>
  37#include <linux/pm_runtime.h>
  38#include <linux/blk-cgroup.h>
  39
  40#include <trace/events/block.h>
  41
  42#include "blk.h"
  43#include "blk-mq-sched.h"
  44#include "blk-wbt.h"
  45
  46static DEFINE_SPINLOCK(elv_list_lock);
  47static LIST_HEAD(elv_list);
  48
  49/*
  50 * Merge hash stuff.
  51 */
 
 
 
 
 
  52#define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
  53
  54/*
  55 * Query io scheduler to see if the current process issuing bio may be
  56 * merged with rq.
  57 */
  58static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
  59{
  60	struct request_queue *q = rq->q;
  61	struct elevator_queue *e = q->elevator;
  62
  63	if (e->uses_mq && e->type->ops.mq.allow_merge)
  64		return e->type->ops.mq.allow_merge(q, rq, bio);
  65	else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
  66		return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
  67
  68	return 1;
  69}
  70
  71/*
  72 * can we safely merge with this request?
  73 */
  74bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
  75{
  76	if (!blk_rq_merge_ok(rq, bio))
  77		return false;
  78
  79	if (!elv_iosched_allow_bio_merge(rq, bio))
  80		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  81
  82	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  83}
  84EXPORT_SYMBOL(elv_bio_merge_ok);
  85
  86static bool elevator_match(const struct elevator_type *e, const char *name)
  87{
  88	if (!strcmp(e->elevator_name, name))
  89		return true;
  90	if (e->elevator_alias && !strcmp(e->elevator_alias, name))
  91		return true;
  92
  93	return false;
 
 
 
 
 
 
 
 
 
 
  94}
  95
  96/*
  97 * Return scheduler with name 'name' and with matching 'mq capability
  98 */
  99static struct elevator_type *elevator_find(const char *name, bool mq)
 100{
 101	struct elevator_type *e;
 102
 103	list_for_each_entry(e, &elv_list, list) {
 104		if (elevator_match(e, name) && (mq == e->uses_mq))
 105			return e;
 106	}
 107
 108	return NULL;
 109}
 110
 111static void elevator_put(struct elevator_type *e)
 112{
 113	module_put(e->elevator_owner);
 114}
 115
 116static struct elevator_type *elevator_get(struct request_queue *q,
 117					  const char *name, bool try_loading)
 118{
 119	struct elevator_type *e;
 120
 121	spin_lock(&elv_list_lock);
 122
 123	e = elevator_find(name, q->mq_ops != NULL);
 124	if (!e && try_loading) {
 125		spin_unlock(&elv_list_lock);
 126		request_module("%s-iosched", name);
 127		spin_lock(&elv_list_lock);
 128		e = elevator_find(name, q->mq_ops != NULL);
 129	}
 130
 131	if (e && !try_module_get(e->elevator_owner))
 132		e = NULL;
 133
 134	spin_unlock(&elv_list_lock);
 
 135	return e;
 136}
 137
 138static char chosen_elevator[ELV_NAME_MAX];
 
 
 
 
 
 
 
 
 
 
 
 
 
 139
 140static int __init elevator_setup(char *str)
 141{
 142	/*
 143	 * Be backwards-compatible with previous kernels, so users
 144	 * won't get the wrong elevator.
 145	 */
 146	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
 147	return 1;
 148}
 149
 150__setup("elevator=", elevator_setup);
 151
 152/* called during boot to load the elevator chosen by the elevator param */
 153void __init load_default_elevator_module(void)
 154{
 155	struct elevator_type *e;
 156
 157	if (!chosen_elevator[0])
 158		return;
 159
 160	/*
 161	 * Boot parameter is deprecated, we haven't supported that for MQ.
 162	 * Only look for non-mq schedulers from here.
 163	 */
 164	spin_lock(&elv_list_lock);
 165	e = elevator_find(chosen_elevator, false);
 166	spin_unlock(&elv_list_lock);
 167
 168	if (!e)
 169		request_module("%s-iosched", chosen_elevator);
 170}
 171
 172static struct kobj_type elv_ktype;
 173
 174struct elevator_queue *elevator_alloc(struct request_queue *q,
 175				  struct elevator_type *e)
 176{
 177	struct elevator_queue *eq;
 
 178
 179	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
 180	if (unlikely(!eq))
 181		return NULL;
 182
 183	eq->type = e;
 
 184	kobject_init(&eq->kobj, &elv_ktype);
 185	mutex_init(&eq->sysfs_lock);
 186	hash_init(eq->hash);
 187	eq->uses_mq = e->uses_mq;
 
 
 
 
 
 
 188
 189	return eq;
 
 
 
 
 190}
 191EXPORT_SYMBOL(elevator_alloc);
 192
 193static void elevator_release(struct kobject *kobj)
 194{
 195	struct elevator_queue *e;
 196
 197	e = container_of(kobj, struct elevator_queue, kobj);
 198	elevator_put(e->type);
 
 199	kfree(e);
 200}
 201
 202int elevator_init(struct request_queue *q, char *name)
 203{
 204	struct elevator_type *e = NULL;
 205	int err;
 206
 207	/*
 208	 * q->sysfs_lock must be held to provide mutual exclusion between
 209	 * elevator_switch() and here.
 210	 */
 211	lockdep_assert_held(&q->sysfs_lock);
 212
 213	if (unlikely(q->elevator))
 214		return 0;
 215
 216	INIT_LIST_HEAD(&q->queue_head);
 217	q->last_merge = NULL;
 218	q->end_sector = 0;
 219	q->boundary_rq = NULL;
 220
 221	if (name) {
 222		e = elevator_get(q, name, true);
 223		if (!e)
 224			return -EINVAL;
 225	}
 226
 227	/*
 228	 * Use the default elevator specified by config boot param for
 229	 * non-mq devices, or by config option. Don't try to load modules
 230	 * as we could be running off async and request_module() isn't
 231	 * allowed from async.
 232	 */
 233	if (!e && !q->mq_ops && *chosen_elevator) {
 234		e = elevator_get(q, chosen_elevator, false);
 235		if (!e)
 236			printk(KERN_ERR "I/O scheduler %s not found\n",
 237							chosen_elevator);
 238	}
 239
 240	if (!e) {
 241		/*
 242		 * For blk-mq devices, we default to using mq-deadline,
 243		 * if available, for single queue devices. If deadline
 244		 * isn't available OR we have multiple queues, default
 245		 * to "none".
 246		 */
 247		if (q->mq_ops) {
 248			if (q->nr_hw_queues == 1)
 249				e = elevator_get(q, "mq-deadline", false);
 250			if (!e)
 251				return 0;
 252		} else
 253			e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
 254
 255		if (!e) {
 256			printk(KERN_ERR
 257				"Default I/O scheduler not found. " \
 258				"Using noop.\n");
 259			e = elevator_get(q, "noop", false);
 260		}
 261	}
 262
 263	if (e->uses_mq)
 264		err = blk_mq_init_sched(q, e);
 265	else
 266		err = e->ops.sq.elevator_init_fn(q, e);
 267	if (err)
 268		elevator_put(e);
 269	return err;
 
 
 
 
 
 270}
 271EXPORT_SYMBOL(elevator_init);
 272
 273void elevator_exit(struct request_queue *q, struct elevator_queue *e)
 274{
 275	mutex_lock(&e->sysfs_lock);
 276	if (e->uses_mq && e->type->ops.mq.exit_sched)
 277		blk_mq_exit_sched(q, e);
 278	else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
 279		e->type->ops.sq.elevator_exit_fn(e);
 280	mutex_unlock(&e->sysfs_lock);
 281
 282	kobject_put(&e->kobj);
 283}
 284EXPORT_SYMBOL(elevator_exit);
 285
 286static inline void __elv_rqhash_del(struct request *rq)
 287{
 288	hash_del(&rq->hash);
 289	rq->rq_flags &= ~RQF_HASHED;
 290}
 291
 292void elv_rqhash_del(struct request_queue *q, struct request *rq)
 293{
 294	if (ELV_ON_HASH(rq))
 295		__elv_rqhash_del(rq);
 296}
 297EXPORT_SYMBOL_GPL(elv_rqhash_del);
 298
 299void elv_rqhash_add(struct request_queue *q, struct request *rq)
 300{
 301	struct elevator_queue *e = q->elevator;
 302
 303	BUG_ON(ELV_ON_HASH(rq));
 304	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
 305	rq->rq_flags |= RQF_HASHED;
 306}
 307EXPORT_SYMBOL_GPL(elv_rqhash_add);
 308
 309void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
 310{
 311	__elv_rqhash_del(rq);
 312	elv_rqhash_add(q, rq);
 313}
 314
 315struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
 316{
 317	struct elevator_queue *e = q->elevator;
 318	struct hlist_node *next;
 
 319	struct request *rq;
 320
 321	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
 322		BUG_ON(!ELV_ON_HASH(rq));
 323
 324		if (unlikely(!rq_mergeable(rq))) {
 325			__elv_rqhash_del(rq);
 326			continue;
 327		}
 328
 329		if (rq_hash_key(rq) == offset)
 330			return rq;
 331	}
 332
 333	return NULL;
 334}
 335
 336/*
 337 * RB-tree support functions for inserting/lookup/removal of requests
 338 * in a sorted RB tree.
 339 */
 340void elv_rb_add(struct rb_root *root, struct request *rq)
 341{
 342	struct rb_node **p = &root->rb_node;
 343	struct rb_node *parent = NULL;
 344	struct request *__rq;
 345
 346	while (*p) {
 347		parent = *p;
 348		__rq = rb_entry(parent, struct request, rb_node);
 349
 350		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
 351			p = &(*p)->rb_left;
 352		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
 353			p = &(*p)->rb_right;
 354	}
 355
 356	rb_link_node(&rq->rb_node, parent, p);
 357	rb_insert_color(&rq->rb_node, root);
 358}
 359EXPORT_SYMBOL(elv_rb_add);
 360
 361void elv_rb_del(struct rb_root *root, struct request *rq)
 362{
 363	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
 364	rb_erase(&rq->rb_node, root);
 365	RB_CLEAR_NODE(&rq->rb_node);
 366}
 367EXPORT_SYMBOL(elv_rb_del);
 368
 369struct request *elv_rb_find(struct rb_root *root, sector_t sector)
 370{
 371	struct rb_node *n = root->rb_node;
 372	struct request *rq;
 373
 374	while (n) {
 375		rq = rb_entry(n, struct request, rb_node);
 376
 377		if (sector < blk_rq_pos(rq))
 378			n = n->rb_left;
 379		else if (sector > blk_rq_pos(rq))
 380			n = n->rb_right;
 381		else
 382			return rq;
 383	}
 384
 385	return NULL;
 386}
 387EXPORT_SYMBOL(elv_rb_find);
 388
 389/*
 390 * Insert rq into dispatch queue of q.  Queue lock must be held on
 391 * entry.  rq is sort instead into the dispatch queue. To be used by
 392 * specific elevators.
 393 */
 394void elv_dispatch_sort(struct request_queue *q, struct request *rq)
 395{
 396	sector_t boundary;
 397	struct list_head *entry;
 
 398
 399	if (q->last_merge == rq)
 400		q->last_merge = NULL;
 401
 402	elv_rqhash_del(q, rq);
 403
 404	q->nr_sorted--;
 405
 406	boundary = q->end_sector;
 
 407	list_for_each_prev(entry, &q->queue_head) {
 408		struct request *pos = list_entry_rq(entry);
 409
 410		if (req_op(rq) != req_op(pos))
 
 411			break;
 412		if (rq_data_dir(rq) != rq_data_dir(pos))
 413			break;
 414		if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
 415			break;
 416		if (blk_rq_pos(rq) >= boundary) {
 417			if (blk_rq_pos(pos) < boundary)
 418				continue;
 419		} else {
 420			if (blk_rq_pos(pos) >= boundary)
 421				break;
 422		}
 423		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
 424			break;
 425	}
 426
 427	list_add(&rq->queuelist, entry);
 428}
 429EXPORT_SYMBOL(elv_dispatch_sort);
 430
 431/*
 432 * Insert rq into dispatch queue of q.  Queue lock must be held on
 433 * entry.  rq is added to the back of the dispatch queue. To be used by
 434 * specific elevators.
 435 */
 436void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
 437{
 438	if (q->last_merge == rq)
 439		q->last_merge = NULL;
 440
 441	elv_rqhash_del(q, rq);
 442
 443	q->nr_sorted--;
 444
 445	q->end_sector = rq_end_sector(rq);
 446	q->boundary_rq = rq;
 447	list_add_tail(&rq->queuelist, &q->queue_head);
 448}
 449EXPORT_SYMBOL(elv_dispatch_add_tail);
 450
 451enum elv_merge elv_merge(struct request_queue *q, struct request **req,
 452		struct bio *bio)
 453{
 454	struct elevator_queue *e = q->elevator;
 455	struct request *__rq;
 
 456
 457	/*
 458	 * Levels of merges:
 459	 * 	nomerges:  No merges at all attempted
 460	 * 	noxmerges: Only simple one-hit cache try
 461	 * 	merges:	   All merge tries attempted
 462	 */
 463	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
 464		return ELEVATOR_NO_MERGE;
 465
 466	/*
 467	 * First try one-hit cache.
 468	 */
 469	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
 470		enum elv_merge ret = blk_try_merge(q->last_merge, bio);
 471
 472		if (ret != ELEVATOR_NO_MERGE) {
 473			*req = q->last_merge;
 474			return ret;
 475		}
 476	}
 477
 478	if (blk_queue_noxmerges(q))
 479		return ELEVATOR_NO_MERGE;
 480
 481	/*
 482	 * See if our hash lookup can find a potential backmerge.
 483	 */
 484	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
 485	if (__rq && elv_bio_merge_ok(__rq, bio)) {
 486		*req = __rq;
 487		return ELEVATOR_BACK_MERGE;
 488	}
 489
 490	if (e->uses_mq && e->type->ops.mq.request_merge)
 491		return e->type->ops.mq.request_merge(q, req, bio);
 492	else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
 493		return e->type->ops.sq.elevator_merge_fn(q, req, bio);
 494
 495	return ELEVATOR_NO_MERGE;
 496}
 497
 498/*
 499 * Attempt to do an insertion back merge. Only check for the case where
 500 * we can append 'rq' to an existing request, so we can throw 'rq' away
 501 * afterwards.
 502 *
 503 * Returns true if we merged, false otherwise
 504 */
 505bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
 
 506{
 507	struct request *__rq;
 508	bool ret;
 509
 510	if (blk_queue_nomerges(q))
 511		return false;
 512
 513	/*
 514	 * First try one-hit cache.
 515	 */
 516	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
 517		return true;
 518
 519	if (blk_queue_noxmerges(q))
 520		return false;
 521
 522	ret = false;
 523	/*
 524	 * See if our hash lookup can find a potential backmerge.
 525	 */
 526	while (1) {
 527		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
 528		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
 529			break;
 530
 531		/* The merged request could be merged with others, try again */
 532		ret = true;
 533		rq = __rq;
 534	}
 535
 536	return ret;
 537}
 538
 539void elv_merged_request(struct request_queue *q, struct request *rq,
 540		enum elv_merge type)
 541{
 542	struct elevator_queue *e = q->elevator;
 543
 544	if (e->uses_mq && e->type->ops.mq.request_merged)
 545		e->type->ops.mq.request_merged(q, rq, type);
 546	else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
 547		e->type->ops.sq.elevator_merged_fn(q, rq, type);
 548
 549	if (type == ELEVATOR_BACK_MERGE)
 550		elv_rqhash_reposition(q, rq);
 551
 552	q->last_merge = rq;
 553}
 554
 555void elv_merge_requests(struct request_queue *q, struct request *rq,
 556			     struct request *next)
 557{
 558	struct elevator_queue *e = q->elevator;
 559	bool next_sorted = false;
 560
 561	if (e->uses_mq && e->type->ops.mq.requests_merged)
 562		e->type->ops.mq.requests_merged(q, rq, next);
 563	else if (e->type->ops.sq.elevator_merge_req_fn) {
 564		next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
 565		if (next_sorted)
 566			e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
 567	}
 568
 569	elv_rqhash_reposition(q, rq);
 570
 571	if (next_sorted) {
 572		elv_rqhash_del(q, next);
 573		q->nr_sorted--;
 574	}
 575
 576	q->last_merge = rq;
 577}
 578
 579void elv_bio_merged(struct request_queue *q, struct request *rq,
 580			struct bio *bio)
 581{
 582	struct elevator_queue *e = q->elevator;
 583
 584	if (WARN_ON_ONCE(e->uses_mq))
 585		return;
 586
 587	if (e->type->ops.sq.elevator_bio_merged_fn)
 588		e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
 589}
 590
 591#ifdef CONFIG_PM
 592static void blk_pm_requeue_request(struct request *rq)
 593{
 594	if (rq->q->dev && !(rq->rq_flags & RQF_PM))
 595		rq->q->nr_pending--;
 596}
 597
 598static void blk_pm_add_request(struct request_queue *q, struct request *rq)
 599{
 600	if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
 601	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
 602		pm_request_resume(q->dev);
 603}
 604#else
 605static inline void blk_pm_requeue_request(struct request *rq) {}
 606static inline void blk_pm_add_request(struct request_queue *q,
 607				      struct request *rq)
 608{
 609}
 610#endif
 611
 612void elv_requeue_request(struct request_queue *q, struct request *rq)
 613{
 614	/*
 615	 * it already went through dequeue, we need to decrement the
 616	 * in_flight count again
 617	 */
 618	if (blk_account_rq(rq)) {
 619		q->in_flight[rq_is_sync(rq)]--;
 620		if (rq->rq_flags & RQF_SORTED)
 621			elv_deactivate_rq(q, rq);
 622	}
 623
 624	rq->rq_flags &= ~RQF_STARTED;
 625
 626	blk_pm_requeue_request(rq);
 627
 628	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
 629}
 630
 631void elv_drain_elevator(struct request_queue *q)
 632{
 633	struct elevator_queue *e = q->elevator;
 634	static int printed;
 
 
 
 
 
 
 
 
 
 
 635
 636	if (WARN_ON_ONCE(e->uses_mq))
 
 
 
 
 
 637		return;
 638
 639	lockdep_assert_held(q->queue_lock);
 640
 641	while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
 642		;
 643	if (q->nr_sorted && printed++ < 10) {
 644		printk(KERN_ERR "%s: forced dispatching is broken "
 645		       "(nr_sorted=%u), please report this\n",
 646		       q->elevator->type->elevator_name, q->nr_sorted);
 
 
 
 
 647	}
 648}
 649
 
 
 
 
 
 650void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 651{
 652	trace_block_rq_insert(q, rq);
 653
 654	blk_pm_add_request(q, rq);
 655
 656	rq->q = q;
 657
 658	if (rq->rq_flags & RQF_SOFTBARRIER) {
 659		/* barriers are scheduling boundary, update end_sector */
 660		if (!blk_rq_is_passthrough(rq)) {
 
 661			q->end_sector = rq_end_sector(rq);
 662			q->boundary_rq = rq;
 663		}
 664	} else if (!(rq->rq_flags & RQF_ELVPRIV) &&
 665		    (where == ELEVATOR_INSERT_SORT ||
 666		     where == ELEVATOR_INSERT_SORT_MERGE))
 667		where = ELEVATOR_INSERT_BACK;
 668
 669	switch (where) {
 670	case ELEVATOR_INSERT_REQUEUE:
 671	case ELEVATOR_INSERT_FRONT:
 672		rq->rq_flags |= RQF_SOFTBARRIER;
 673		list_add(&rq->queuelist, &q->queue_head);
 674		break;
 675
 676	case ELEVATOR_INSERT_BACK:
 677		rq->rq_flags |= RQF_SOFTBARRIER;
 678		elv_drain_elevator(q);
 679		list_add_tail(&rq->queuelist, &q->queue_head);
 680		/*
 681		 * We kick the queue here for the following reasons.
 682		 * - The elevator might have returned NULL previously
 683		 *   to delay requests and returned them now.  As the
 684		 *   queue wasn't empty before this request, ll_rw_blk
 685		 *   won't run the queue on return, resulting in hang.
 686		 * - Usually, back inserted requests won't be merged
 687		 *   with anything.  There's no point in delaying queue
 688		 *   processing.
 689		 */
 690		__blk_run_queue(q);
 691		break;
 692
 693	case ELEVATOR_INSERT_SORT_MERGE:
 694		/*
 695		 * If we succeed in merging this request with one in the
 696		 * queue already, we are done - rq has now been freed,
 697		 * so no need to do anything further.
 698		 */
 699		if (elv_attempt_insert_merge(q, rq))
 700			break;
 701		/* fall through */
 702	case ELEVATOR_INSERT_SORT:
 703		BUG_ON(blk_rq_is_passthrough(rq));
 704		rq->rq_flags |= RQF_SORTED;
 
 705		q->nr_sorted++;
 706		if (rq_mergeable(rq)) {
 707			elv_rqhash_add(q, rq);
 708			if (!q->last_merge)
 709				q->last_merge = rq;
 710		}
 711
 712		/*
 713		 * Some ioscheds (cfq) run q->request_fn directly, so
 714		 * rq cannot be accessed after calling
 715		 * elevator_add_req_fn.
 716		 */
 717		q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
 718		break;
 719
 720	case ELEVATOR_INSERT_FLUSH:
 721		rq->rq_flags |= RQF_SOFTBARRIER;
 722		blk_insert_flush(rq);
 723		break;
 724	default:
 725		printk(KERN_ERR "%s: bad insertion point %d\n",
 726		       __func__, where);
 727		BUG();
 728	}
 729}
 730EXPORT_SYMBOL(__elv_add_request);
 731
 732void elv_add_request(struct request_queue *q, struct request *rq, int where)
 733{
 734	unsigned long flags;
 735
 736	spin_lock_irqsave(q->queue_lock, flags);
 737	__elv_add_request(q, rq, where);
 738	spin_unlock_irqrestore(q->queue_lock, flags);
 739}
 740EXPORT_SYMBOL(elv_add_request);
 741
 742struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 743{
 744	struct elevator_queue *e = q->elevator;
 745
 746	if (e->uses_mq && e->type->ops.mq.next_request)
 747		return e->type->ops.mq.next_request(q, rq);
 748	else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
 749		return e->type->ops.sq.elevator_latter_req_fn(q, rq);
 750
 751	return NULL;
 752}
 753
 754struct request *elv_former_request(struct request_queue *q, struct request *rq)
 755{
 756	struct elevator_queue *e = q->elevator;
 757
 758	if (e->uses_mq && e->type->ops.mq.former_request)
 759		return e->type->ops.mq.former_request(q, rq);
 760	if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
 761		return e->type->ops.sq.elevator_former_req_fn(q, rq);
 762	return NULL;
 763}
 764
 765int elv_set_request(struct request_queue *q, struct request *rq,
 766		    struct bio *bio, gfp_t gfp_mask)
 767{
 768	struct elevator_queue *e = q->elevator;
 769
 770	if (WARN_ON_ONCE(e->uses_mq))
 771		return 0;
 772
 773	if (e->type->ops.sq.elevator_set_req_fn)
 774		return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
 775	return 0;
 776}
 777
 778void elv_put_request(struct request_queue *q, struct request *rq)
 779{
 780	struct elevator_queue *e = q->elevator;
 781
 782	if (WARN_ON_ONCE(e->uses_mq))
 783		return;
 784
 785	if (e->type->ops.sq.elevator_put_req_fn)
 786		e->type->ops.sq.elevator_put_req_fn(rq);
 787}
 788
 789int elv_may_queue(struct request_queue *q, unsigned int op)
 790{
 791	struct elevator_queue *e = q->elevator;
 792
 793	if (WARN_ON_ONCE(e->uses_mq))
 794		return 0;
 795
 796	if (e->type->ops.sq.elevator_may_queue_fn)
 797		return e->type->ops.sq.elevator_may_queue_fn(q, op);
 798
 799	return ELV_MQUEUE_MAY;
 800}
 801
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 802void elv_completed_request(struct request_queue *q, struct request *rq)
 803{
 804	struct elevator_queue *e = q->elevator;
 805
 806	if (WARN_ON_ONCE(e->uses_mq))
 807		return;
 808
 809	/*
 810	 * request is released from the driver, io must be done
 811	 */
 812	if (blk_account_rq(rq)) {
 813		q->in_flight[rq_is_sync(rq)]--;
 814		if ((rq->rq_flags & RQF_SORTED) &&
 815		    e->type->ops.sq.elevator_completed_req_fn)
 816			e->type->ops.sq.elevator_completed_req_fn(q, rq);
 817	}
 818}
 819
 820#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
 821
 822static ssize_t
 823elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 824{
 825	struct elv_fs_entry *entry = to_elv(attr);
 826	struct elevator_queue *e;
 827	ssize_t error;
 828
 829	if (!entry->show)
 830		return -EIO;
 831
 832	e = container_of(kobj, struct elevator_queue, kobj);
 833	mutex_lock(&e->sysfs_lock);
 834	error = e->type ? entry->show(e, page) : -ENOENT;
 835	mutex_unlock(&e->sysfs_lock);
 836	return error;
 837}
 838
 839static ssize_t
 840elv_attr_store(struct kobject *kobj, struct attribute *attr,
 841	       const char *page, size_t length)
 842{
 843	struct elv_fs_entry *entry = to_elv(attr);
 844	struct elevator_queue *e;
 845	ssize_t error;
 846
 847	if (!entry->store)
 848		return -EIO;
 849
 850	e = container_of(kobj, struct elevator_queue, kobj);
 851	mutex_lock(&e->sysfs_lock);
 852	error = e->type ? entry->store(e, page, length) : -ENOENT;
 853	mutex_unlock(&e->sysfs_lock);
 854	return error;
 855}
 856
 857static const struct sysfs_ops elv_sysfs_ops = {
 858	.show	= elv_attr_show,
 859	.store	= elv_attr_store,
 860};
 861
 862static struct kobj_type elv_ktype = {
 863	.sysfs_ops	= &elv_sysfs_ops,
 864	.release	= elevator_release,
 865};
 866
 867int elv_register_queue(struct request_queue *q)
 868{
 869	struct elevator_queue *e = q->elevator;
 870	int error;
 871
 872	lockdep_assert_held(&q->sysfs_lock);
 873
 874	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
 875	if (!error) {
 876		struct elv_fs_entry *attr = e->type->elevator_attrs;
 877		if (attr) {
 878			while (attr->attr.name) {
 879				if (sysfs_create_file(&e->kobj, &attr->attr))
 880					break;
 881				attr++;
 882			}
 883		}
 884		kobject_uevent(&e->kobj, KOBJ_ADD);
 885		e->registered = 1;
 886		if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
 887			e->type->ops.sq.elevator_registered_fn(q);
 888	}
 889	return error;
 890}
 
 891
 892void elv_unregister_queue(struct request_queue *q)
 893{
 894	lockdep_assert_held(&q->sysfs_lock);
 895
 896	if (q) {
 897		struct elevator_queue *e = q->elevator;
 898
 899		kobject_uevent(&e->kobj, KOBJ_REMOVE);
 900		kobject_del(&e->kobj);
 901		e->registered = 0;
 902		/* Re-enable throttling in case elevator disabled it */
 903		wbt_enable_default(q);
 904	}
 905}
 
 906
 907int elv_register(struct elevator_type *e)
 908{
 909	char *def = "";
 910
 911	/* create icq_cache if requested */
 912	if (e->icq_size) {
 913		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
 914		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
 915			return -EINVAL;
 916
 917		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
 918			 "%s_io_cq", e->elevator_name);
 919		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
 920						 e->icq_align, 0, NULL);
 921		if (!e->icq_cache)
 922			return -ENOMEM;
 923	}
 924
 925	/* register, don't allow duplicate names */
 926	spin_lock(&elv_list_lock);
 927	if (elevator_find(e->elevator_name, e->uses_mq)) {
 928		spin_unlock(&elv_list_lock);
 929		if (e->icq_cache)
 930			kmem_cache_destroy(e->icq_cache);
 931		return -EBUSY;
 932	}
 933	list_add_tail(&e->list, &elv_list);
 934	spin_unlock(&elv_list_lock);
 935
 936	/* print pretty message */
 937	if (elevator_match(e, chosen_elevator) ||
 938			(!*chosen_elevator &&
 939			 elevator_match(e, CONFIG_DEFAULT_IOSCHED)))
 940				def = " (default)";
 941
 942	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
 943								def);
 944	return 0;
 945}
 946EXPORT_SYMBOL_GPL(elv_register);
 947
 948void elv_unregister(struct elevator_type *e)
 949{
 950	/* unregister */
 951	spin_lock(&elv_list_lock);
 952	list_del_init(&e->list);
 953	spin_unlock(&elv_list_lock);
 954
 955	/*
 956	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
 957	 * sure all RCU operations are complete before proceeding.
 958	 */
 959	if (e->icq_cache) {
 960		rcu_barrier();
 961		kmem_cache_destroy(e->icq_cache);
 962		e->icq_cache = NULL;
 
 
 
 
 
 963	}
 964}
 965EXPORT_SYMBOL_GPL(elv_unregister);
 966
 967static int elevator_switch_mq(struct request_queue *q,
 968			      struct elevator_type *new_e)
 969{
 970	int ret;
 971
 972	lockdep_assert_held(&q->sysfs_lock);
 973
 974	blk_mq_freeze_queue(q);
 975	blk_mq_quiesce_queue(q);
 976
 977	if (q->elevator) {
 978		if (q->elevator->registered)
 979			elv_unregister_queue(q);
 980		ioc_clear_queue(q);
 981		elevator_exit(q, q->elevator);
 982	}
 983
 984	ret = blk_mq_init_sched(q, new_e);
 985	if (ret)
 986		goto out;
 987
 988	if (new_e) {
 989		ret = elv_register_queue(q);
 990		if (ret) {
 991			elevator_exit(q, q->elevator);
 992			goto out;
 993		}
 994	}
 995
 996	if (new_e)
 997		blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 998	else
 999		blk_add_trace_msg(q, "elv switch: none");
1000
1001out:
1002	blk_mq_unquiesce_queue(q);
1003	blk_mq_unfreeze_queue(q);
1004	return ret;
1005}
 
1006
1007/*
1008 * switch to new_e io scheduler. be careful not to introduce deadlocks -
1009 * we don't free the old io scheduler, before we have allocated what we
1010 * need for the new one. this way we have a chance of going back to the old
1011 * one, if the new one fails init for some reason.
1012 */
1013static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1014{
1015	struct elevator_queue *old = q->elevator;
1016	bool old_registered = false;
1017	int err;
1018
1019	lockdep_assert_held(&q->sysfs_lock);
 
 
 
 
 
1020
1021	if (q->mq_ops)
1022		return elevator_switch_mq(q, new_e);
 
 
 
1023
1024	/*
1025	 * Turn on BYPASS and drain all requests w/ elevator private data.
1026	 * Block layer doesn't call into a quiesced elevator - all requests
1027	 * are directly put on the dispatch list without elevator data
1028	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
1029	 * merge happens either.
1030	 */
1031	if (old) {
1032		old_registered = old->registered;
1033
1034		blk_queue_bypass_start(q);
1035
1036		/* unregister and clear all auxiliary data of the old elevator */
1037		if (old_registered)
1038			elv_unregister_queue(q);
 
1039
1040		ioc_clear_queue(q);
1041	}
 
 
1042
1043	/* allocate, init and register new elevator */
1044	err = new_e->ops.sq.elevator_init_fn(q, new_e);
1045	if (err)
1046		goto fail_init;
1047
1048	err = elv_register_queue(q);
1049	if (err)
1050		goto fail_register;
1051
1052	/* done, kill the old one and finish */
1053	if (old) {
1054		elevator_exit(q, old);
1055		blk_queue_bypass_end(q);
1056	}
1057
1058	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 
 
 
 
 
 
 
 
1059
1060	return 0;
1061
1062fail_register:
1063	elevator_exit(q, q->elevator);
1064fail_init:
1065	/* switch failed, restore and re-register old elevator */
1066	if (old) {
1067		q->elevator = old;
1068		elv_register_queue(q);
1069		blk_queue_bypass_end(q);
1070	}
 
 
 
1071
1072	return err;
1073}
1074
1075/*
1076 * Switch this queue to the given IO scheduler.
1077 */
1078static int __elevator_change(struct request_queue *q, const char *name)
1079{
1080	char elevator_name[ELV_NAME_MAX];
1081	struct elevator_type *e;
1082
1083	/* Make sure queue is not in the middle of being removed */
1084	if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
1085		return -ENOENT;
1086
1087	/*
1088	 * Special case for mq, turn off scheduling
1089	 */
1090	if (q->mq_ops && !strncmp(name, "none", 4))
1091		return elevator_switch(q, NULL);
1092
1093	strlcpy(elevator_name, name, sizeof(elevator_name));
1094	e = elevator_get(q, strstrip(elevator_name), true);
1095	if (!e)
 
1096		return -EINVAL;
 
1097
1098	if (q->elevator && elevator_match(q->elevator->type, elevator_name)) {
1099		elevator_put(e);
1100		return 0;
1101	}
1102
1103	return elevator_switch(q, e);
1104}
1105
1106static inline bool elv_support_iosched(struct request_queue *q)
1107{
1108	if (q->mq_ops && q->tag_set && (q->tag_set->flags &
1109				BLK_MQ_F_NO_SCHED))
1110		return false;
1111	return true;
1112}
1113
1114ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1115			  size_t count)
1116{
1117	int ret;
1118
1119	if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
1120		return count;
1121
1122	ret = __elevator_change(q, name);
1123	if (!ret)
1124		return count;
1125
 
1126	return ret;
1127}
1128
1129ssize_t elv_iosched_show(struct request_queue *q, char *name)
1130{
1131	struct elevator_queue *e = q->elevator;
1132	struct elevator_type *elv = NULL;
1133	struct elevator_type *__e;
1134	bool uses_mq = q->mq_ops != NULL;
1135	int len = 0;
1136
1137	if (!queue_is_rq_based(q))
1138		return sprintf(name, "none\n");
1139
1140	if (!q->elevator)
1141		len += sprintf(name+len, "[none] ");
1142	else
1143		elv = e->type;
1144
1145	spin_lock(&elv_list_lock);
1146	list_for_each_entry(__e, &elv_list, list) {
1147		if (elv && elevator_match(elv, __e->elevator_name) &&
1148		    (__e->uses_mq == uses_mq)) {
1149			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1150			continue;
1151		}
1152		if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
1153			len += sprintf(name+len, "%s ", __e->elevator_name);
1154		else if (!__e->uses_mq && !q->mq_ops)
1155			len += sprintf(name+len, "%s ", __e->elevator_name);
1156	}
1157	spin_unlock(&elv_list_lock);
1158
1159	if (q->mq_ops && q->elevator)
1160		len += sprintf(name+len, "none");
1161
1162	len += sprintf(len+name, "\n");
1163	return len;
1164}
1165
1166struct request *elv_rb_former_request(struct request_queue *q,
1167				      struct request *rq)
1168{
1169	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1170
1171	if (rbprev)
1172		return rb_entry_rq(rbprev);
1173
1174	return NULL;
1175}
1176EXPORT_SYMBOL(elv_rb_former_request);
1177
1178struct request *elv_rb_latter_request(struct request_queue *q,
1179				      struct request *rq)
1180{
1181	struct rb_node *rbnext = rb_next(&rq->rb_node);
1182
1183	if (rbnext)
1184		return rb_entry_rq(rbnext);
1185
1186	return NULL;
1187}
1188EXPORT_SYMBOL(elv_rb_latter_request);
v3.1
   1/*
   2 *  Block device elevator/IO-scheduler.
   3 *
   4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
   5 *
   6 * 30042000 Jens Axboe <axboe@kernel.dk> :
   7 *
   8 * Split the elevator a bit so that it is possible to choose a different
   9 * one or even write a new "plug in". There are three pieces:
  10 * - elevator_fn, inserts a new request in the queue list
  11 * - elevator_merge_fn, decides whether a new buffer can be merged with
  12 *   an existing request
  13 * - elevator_dequeue_fn, called when a request is taken off the active list
  14 *
  15 * 20082000 Dave Jones <davej@suse.de> :
  16 * Removed tests for max-bomb-segments, which was breaking elvtune
  17 *  when run without -bN
  18 *
  19 * Jens:
  20 * - Rework again to work with bio instead of buffer_heads
  21 * - loose bi_dev comparisons, partition handling is right now
  22 * - completely modularize elevator setup and teardown
  23 *
  24 */
  25#include <linux/kernel.h>
  26#include <linux/fs.h>
  27#include <linux/blkdev.h>
  28#include <linux/elevator.h>
  29#include <linux/bio.h>
  30#include <linux/module.h>
  31#include <linux/slab.h>
  32#include <linux/init.h>
  33#include <linux/compiler.h>
  34#include <linux/delay.h>
  35#include <linux/blktrace_api.h>
  36#include <linux/hash.h>
  37#include <linux/uaccess.h>
 
 
  38
  39#include <trace/events/block.h>
  40
  41#include "blk.h"
 
 
  42
  43static DEFINE_SPINLOCK(elv_list_lock);
  44static LIST_HEAD(elv_list);
  45
  46/*
  47 * Merge hash stuff.
  48 */
  49static const int elv_hash_shift = 6;
  50#define ELV_HASH_BLOCK(sec)	((sec) >> 3)
  51#define ELV_HASH_FN(sec)	\
  52		(hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
  53#define ELV_HASH_ENTRIES	(1 << elv_hash_shift)
  54#define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
  55
  56/*
  57 * Query io scheduler to see if the current process issuing bio may be
  58 * merged with rq.
  59 */
  60static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
  61{
  62	struct request_queue *q = rq->q;
  63	struct elevator_queue *e = q->elevator;
  64
  65	if (e->ops->elevator_allow_merge_fn)
  66		return e->ops->elevator_allow_merge_fn(q, rq, bio);
 
 
  67
  68	return 1;
  69}
  70
  71/*
  72 * can we safely merge with this request?
  73 */
  74int elv_rq_merge_ok(struct request *rq, struct bio *bio)
  75{
  76	if (!rq_mergeable(rq))
  77		return 0;
  78
  79	/*
  80	 * Don't merge file system requests and discard requests
  81	 */
  82	if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
  83		return 0;
  84
  85	/*
  86	 * Don't merge discard requests and secure discard requests
  87	 */
  88	if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
  89		return 0;
  90
  91	/*
  92	 * different data direction or already started, don't merge
  93	 */
  94	if (bio_data_dir(bio) != rq_data_dir(rq))
  95		return 0;
  96
  97	/*
  98	 * must be same device and not a special request
  99	 */
 100	if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
 101		return 0;
 102
 103	/*
 104	 * only merge integrity protected bio into ditto rq
 105	 */
 106	if (bio_integrity(bio) != blk_integrity_rq(rq))
 107		return 0;
 108
 109	if (!elv_iosched_allow_merge(rq, bio))
 110		return 0;
 111
 112	return 1;
 113}
 114EXPORT_SYMBOL(elv_rq_merge_ok);
 115
 116int elv_try_merge(struct request *__rq, struct bio *bio)
 117{
 118	int ret = ELEVATOR_NO_MERGE;
 
 
 
 119
 120	/*
 121	 * we can merge and sequence is ok, check if it's possible
 122	 */
 123	if (elv_rq_merge_ok(__rq, bio)) {
 124		if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
 125			ret = ELEVATOR_BACK_MERGE;
 126		else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
 127			ret = ELEVATOR_FRONT_MERGE;
 128	}
 129
 130	return ret;
 131}
 132
 133static struct elevator_type *elevator_find(const char *name)
 
 
 
 134{
 135	struct elevator_type *e;
 136
 137	list_for_each_entry(e, &elv_list, list) {
 138		if (!strcmp(e->elevator_name, name))
 139			return e;
 140	}
 141
 142	return NULL;
 143}
 144
 145static void elevator_put(struct elevator_type *e)
 146{
 147	module_put(e->elevator_owner);
 148}
 149
 150static struct elevator_type *elevator_get(const char *name)
 
 151{
 152	struct elevator_type *e;
 153
 154	spin_lock(&elv_list_lock);
 155
 156	e = elevator_find(name);
 157	if (!e) {
 158		spin_unlock(&elv_list_lock);
 159		request_module("%s-iosched", name);
 160		spin_lock(&elv_list_lock);
 161		e = elevator_find(name);
 162	}
 163
 164	if (e && !try_module_get(e->elevator_owner))
 165		e = NULL;
 166
 167	spin_unlock(&elv_list_lock);
 168
 169	return e;
 170}
 171
 172static void *elevator_init_queue(struct request_queue *q,
 173				 struct elevator_queue *eq)
 174{
 175	return eq->ops->elevator_init_fn(q);
 176}
 177
 178static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
 179			   void *data)
 180{
 181	q->elevator = eq;
 182	eq->elevator_data = data;
 183}
 184
 185static char chosen_elevator[16];
 186
 187static int __init elevator_setup(char *str)
 188{
 189	/*
 190	 * Be backwards-compatible with previous kernels, so users
 191	 * won't get the wrong elevator.
 192	 */
 193	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
 194	return 1;
 195}
 196
 197__setup("elevator=", elevator_setup);
 198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 199static struct kobj_type elv_ktype;
 200
 201static struct elevator_queue *elevator_alloc(struct request_queue *q,
 202				  struct elevator_type *e)
 203{
 204	struct elevator_queue *eq;
 205	int i;
 206
 207	eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
 208	if (unlikely(!eq))
 209		goto err;
 210
 211	eq->ops = &e->ops;
 212	eq->elevator_type = e;
 213	kobject_init(&eq->kobj, &elv_ktype);
 214	mutex_init(&eq->sysfs_lock);
 215
 216	eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
 217					GFP_KERNEL, q->node);
 218	if (!eq->hash)
 219		goto err;
 220
 221	for (i = 0; i < ELV_HASH_ENTRIES; i++)
 222		INIT_HLIST_HEAD(&eq->hash[i]);
 223
 224	return eq;
 225err:
 226	kfree(eq);
 227	elevator_put(e);
 228	return NULL;
 229}
 
 230
 231static void elevator_release(struct kobject *kobj)
 232{
 233	struct elevator_queue *e;
 234
 235	e = container_of(kobj, struct elevator_queue, kobj);
 236	elevator_put(e->elevator_type);
 237	kfree(e->hash);
 238	kfree(e);
 239}
 240
 241int elevator_init(struct request_queue *q, char *name)
 242{
 243	struct elevator_type *e = NULL;
 244	struct elevator_queue *eq;
 245	void *data;
 
 
 
 
 
 246
 247	if (unlikely(q->elevator))
 248		return 0;
 249
 250	INIT_LIST_HEAD(&q->queue_head);
 251	q->last_merge = NULL;
 252	q->end_sector = 0;
 253	q->boundary_rq = NULL;
 254
 255	if (name) {
 256		e = elevator_get(name);
 257		if (!e)
 258			return -EINVAL;
 259	}
 260
 261	if (!e && *chosen_elevator) {
 262		e = elevator_get(chosen_elevator);
 
 
 
 
 
 
 263		if (!e)
 264			printk(KERN_ERR "I/O scheduler %s not found\n",
 265							chosen_elevator);
 266	}
 267
 268	if (!e) {
 269		e = elevator_get(CONFIG_DEFAULT_IOSCHED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 270		if (!e) {
 271			printk(KERN_ERR
 272				"Default I/O scheduler not found. " \
 273				"Using noop.\n");
 274			e = elevator_get("noop");
 275		}
 276	}
 277
 278	eq = elevator_alloc(q, e);
 279	if (!eq)
 280		return -ENOMEM;
 281
 282	data = elevator_init_queue(q, eq);
 283	if (!data) {
 284		kobject_put(&eq->kobj);
 285		return -ENOMEM;
 286	}
 287
 288	elevator_attach(q, eq, data);
 289	return 0;
 290}
 291EXPORT_SYMBOL(elevator_init);
 292
 293void elevator_exit(struct elevator_queue *e)
 294{
 295	mutex_lock(&e->sysfs_lock);
 296	if (e->ops->elevator_exit_fn)
 297		e->ops->elevator_exit_fn(e);
 298	e->ops = NULL;
 
 299	mutex_unlock(&e->sysfs_lock);
 300
 301	kobject_put(&e->kobj);
 302}
 303EXPORT_SYMBOL(elevator_exit);
 304
 305static inline void __elv_rqhash_del(struct request *rq)
 306{
 307	hlist_del_init(&rq->hash);
 
 308}
 309
 310static void elv_rqhash_del(struct request_queue *q, struct request *rq)
 311{
 312	if (ELV_ON_HASH(rq))
 313		__elv_rqhash_del(rq);
 314}
 
 315
 316static void elv_rqhash_add(struct request_queue *q, struct request *rq)
 317{
 318	struct elevator_queue *e = q->elevator;
 319
 320	BUG_ON(ELV_ON_HASH(rq));
 321	hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
 
 322}
 
 323
 324static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
 325{
 326	__elv_rqhash_del(rq);
 327	elv_rqhash_add(q, rq);
 328}
 329
 330static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
 331{
 332	struct elevator_queue *e = q->elevator;
 333	struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
 334	struct hlist_node *entry, *next;
 335	struct request *rq;
 336
 337	hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
 338		BUG_ON(!ELV_ON_HASH(rq));
 339
 340		if (unlikely(!rq_mergeable(rq))) {
 341			__elv_rqhash_del(rq);
 342			continue;
 343		}
 344
 345		if (rq_hash_key(rq) == offset)
 346			return rq;
 347	}
 348
 349	return NULL;
 350}
 351
 352/*
 353 * RB-tree support functions for inserting/lookup/removal of requests
 354 * in a sorted RB tree.
 355 */
 356void elv_rb_add(struct rb_root *root, struct request *rq)
 357{
 358	struct rb_node **p = &root->rb_node;
 359	struct rb_node *parent = NULL;
 360	struct request *__rq;
 361
 362	while (*p) {
 363		parent = *p;
 364		__rq = rb_entry(parent, struct request, rb_node);
 365
 366		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
 367			p = &(*p)->rb_left;
 368		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
 369			p = &(*p)->rb_right;
 370	}
 371
 372	rb_link_node(&rq->rb_node, parent, p);
 373	rb_insert_color(&rq->rb_node, root);
 374}
 375EXPORT_SYMBOL(elv_rb_add);
 376
 377void elv_rb_del(struct rb_root *root, struct request *rq)
 378{
 379	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
 380	rb_erase(&rq->rb_node, root);
 381	RB_CLEAR_NODE(&rq->rb_node);
 382}
 383EXPORT_SYMBOL(elv_rb_del);
 384
 385struct request *elv_rb_find(struct rb_root *root, sector_t sector)
 386{
 387	struct rb_node *n = root->rb_node;
 388	struct request *rq;
 389
 390	while (n) {
 391		rq = rb_entry(n, struct request, rb_node);
 392
 393		if (sector < blk_rq_pos(rq))
 394			n = n->rb_left;
 395		else if (sector > blk_rq_pos(rq))
 396			n = n->rb_right;
 397		else
 398			return rq;
 399	}
 400
 401	return NULL;
 402}
 403EXPORT_SYMBOL(elv_rb_find);
 404
 405/*
 406 * Insert rq into dispatch queue of q.  Queue lock must be held on
 407 * entry.  rq is sort instead into the dispatch queue. To be used by
 408 * specific elevators.
 409 */
 410void elv_dispatch_sort(struct request_queue *q, struct request *rq)
 411{
 412	sector_t boundary;
 413	struct list_head *entry;
 414	int stop_flags;
 415
 416	if (q->last_merge == rq)
 417		q->last_merge = NULL;
 418
 419	elv_rqhash_del(q, rq);
 420
 421	q->nr_sorted--;
 422
 423	boundary = q->end_sector;
 424	stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
 425	list_for_each_prev(entry, &q->queue_head) {
 426		struct request *pos = list_entry_rq(entry);
 427
 428		if ((rq->cmd_flags & REQ_DISCARD) !=
 429		    (pos->cmd_flags & REQ_DISCARD))
 430			break;
 431		if (rq_data_dir(rq) != rq_data_dir(pos))
 432			break;
 433		if (pos->cmd_flags & stop_flags)
 434			break;
 435		if (blk_rq_pos(rq) >= boundary) {
 436			if (blk_rq_pos(pos) < boundary)
 437				continue;
 438		} else {
 439			if (blk_rq_pos(pos) >= boundary)
 440				break;
 441		}
 442		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
 443			break;
 444	}
 445
 446	list_add(&rq->queuelist, entry);
 447}
 448EXPORT_SYMBOL(elv_dispatch_sort);
 449
 450/*
 451 * Insert rq into dispatch queue of q.  Queue lock must be held on
 452 * entry.  rq is added to the back of the dispatch queue. To be used by
 453 * specific elevators.
 454 */
 455void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
 456{
 457	if (q->last_merge == rq)
 458		q->last_merge = NULL;
 459
 460	elv_rqhash_del(q, rq);
 461
 462	q->nr_sorted--;
 463
 464	q->end_sector = rq_end_sector(rq);
 465	q->boundary_rq = rq;
 466	list_add_tail(&rq->queuelist, &q->queue_head);
 467}
 468EXPORT_SYMBOL(elv_dispatch_add_tail);
 469
 470int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
 
 471{
 472	struct elevator_queue *e = q->elevator;
 473	struct request *__rq;
 474	int ret;
 475
 476	/*
 477	 * Levels of merges:
 478	 * 	nomerges:  No merges at all attempted
 479	 * 	noxmerges: Only simple one-hit cache try
 480	 * 	merges:	   All merge tries attempted
 481	 */
 482	if (blk_queue_nomerges(q))
 483		return ELEVATOR_NO_MERGE;
 484
 485	/*
 486	 * First try one-hit cache.
 487	 */
 488	if (q->last_merge) {
 489		ret = elv_try_merge(q->last_merge, bio);
 
 490		if (ret != ELEVATOR_NO_MERGE) {
 491			*req = q->last_merge;
 492			return ret;
 493		}
 494	}
 495
 496	if (blk_queue_noxmerges(q))
 497		return ELEVATOR_NO_MERGE;
 498
 499	/*
 500	 * See if our hash lookup can find a potential backmerge.
 501	 */
 502	__rq = elv_rqhash_find(q, bio->bi_sector);
 503	if (__rq && elv_rq_merge_ok(__rq, bio)) {
 504		*req = __rq;
 505		return ELEVATOR_BACK_MERGE;
 506	}
 507
 508	if (e->ops->elevator_merge_fn)
 509		return e->ops->elevator_merge_fn(q, req, bio);
 
 
 510
 511	return ELEVATOR_NO_MERGE;
 512}
 513
 514/*
 515 * Attempt to do an insertion back merge. Only check for the case where
 516 * we can append 'rq' to an existing request, so we can throw 'rq' away
 517 * afterwards.
 518 *
 519 * Returns true if we merged, false otherwise
 520 */
 521static bool elv_attempt_insert_merge(struct request_queue *q,
 522				     struct request *rq)
 523{
 524	struct request *__rq;
 
 525
 526	if (blk_queue_nomerges(q))
 527		return false;
 528
 529	/*
 530	 * First try one-hit cache.
 531	 */
 532	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
 533		return true;
 534
 535	if (blk_queue_noxmerges(q))
 536		return false;
 537
 
 538	/*
 539	 * See if our hash lookup can find a potential backmerge.
 540	 */
 541	__rq = elv_rqhash_find(q, blk_rq_pos(rq));
 542	if (__rq && blk_attempt_req_merge(q, __rq, rq))
 543		return true;
 
 
 
 
 
 
 544
 545	return false;
 546}
 547
 548void elv_merged_request(struct request_queue *q, struct request *rq, int type)
 
 549{
 550	struct elevator_queue *e = q->elevator;
 551
 552	if (e->ops->elevator_merged_fn)
 553		e->ops->elevator_merged_fn(q, rq, type);
 
 
 554
 555	if (type == ELEVATOR_BACK_MERGE)
 556		elv_rqhash_reposition(q, rq);
 557
 558	q->last_merge = rq;
 559}
 560
 561void elv_merge_requests(struct request_queue *q, struct request *rq,
 562			     struct request *next)
 563{
 564	struct elevator_queue *e = q->elevator;
 565	const int next_sorted = next->cmd_flags & REQ_SORTED;
 566
 567	if (next_sorted && e->ops->elevator_merge_req_fn)
 568		e->ops->elevator_merge_req_fn(q, rq, next);
 
 
 
 
 
 569
 570	elv_rqhash_reposition(q, rq);
 571
 572	if (next_sorted) {
 573		elv_rqhash_del(q, next);
 574		q->nr_sorted--;
 575	}
 576
 577	q->last_merge = rq;
 578}
 579
 580void elv_bio_merged(struct request_queue *q, struct request *rq,
 581			struct bio *bio)
 582{
 583	struct elevator_queue *e = q->elevator;
 584
 585	if (e->ops->elevator_bio_merged_fn)
 586		e->ops->elevator_bio_merged_fn(q, rq, bio);
 
 
 
 587}
 588
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 589void elv_requeue_request(struct request_queue *q, struct request *rq)
 590{
 591	/*
 592	 * it already went through dequeue, we need to decrement the
 593	 * in_flight count again
 594	 */
 595	if (blk_account_rq(rq)) {
 596		q->in_flight[rq_is_sync(rq)]--;
 597		if (rq->cmd_flags & REQ_SORTED)
 598			elv_deactivate_rq(q, rq);
 599	}
 600
 601	rq->cmd_flags &= ~REQ_STARTED;
 
 
 602
 603	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
 604}
 605
 606void elv_drain_elevator(struct request_queue *q)
 607{
 
 608	static int printed;
 609	while (q->elevator->ops->elevator_dispatch_fn(q, 1))
 610		;
 611	if (q->nr_sorted == 0)
 612		return;
 613	if (printed++ < 10) {
 614		printk(KERN_ERR "%s: forced dispatching is broken "
 615		       "(nr_sorted=%u), please report this\n",
 616		       q->elevator->elevator_type->elevator_name, q->nr_sorted);
 617	}
 618}
 619
 620/*
 621 * Call with queue lock held, interrupts disabled
 622 */
 623void elv_quiesce_start(struct request_queue *q)
 624{
 625	if (!q->elevator)
 626		return;
 627
 628	queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
 629
 630	/*
 631	 * make sure we don't have any requests in flight
 632	 */
 633	elv_drain_elevator(q);
 634	while (q->rq.elvpriv) {
 635		__blk_run_queue(q);
 636		spin_unlock_irq(q->queue_lock);
 637		msleep(10);
 638		spin_lock_irq(q->queue_lock);
 639		elv_drain_elevator(q);
 640	}
 641}
 642
 643void elv_quiesce_end(struct request_queue *q)
 644{
 645	queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
 646}
 647
 648void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 649{
 650	trace_block_rq_insert(q, rq);
 651
 
 
 652	rq->q = q;
 653
 654	if (rq->cmd_flags & REQ_SOFTBARRIER) {
 655		/* barriers are scheduling boundary, update end_sector */
 656		if (rq->cmd_type == REQ_TYPE_FS ||
 657		    (rq->cmd_flags & REQ_DISCARD)) {
 658			q->end_sector = rq_end_sector(rq);
 659			q->boundary_rq = rq;
 660		}
 661	} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
 662		    (where == ELEVATOR_INSERT_SORT ||
 663		     where == ELEVATOR_INSERT_SORT_MERGE))
 664		where = ELEVATOR_INSERT_BACK;
 665
 666	switch (where) {
 667	case ELEVATOR_INSERT_REQUEUE:
 668	case ELEVATOR_INSERT_FRONT:
 669		rq->cmd_flags |= REQ_SOFTBARRIER;
 670		list_add(&rq->queuelist, &q->queue_head);
 671		break;
 672
 673	case ELEVATOR_INSERT_BACK:
 674		rq->cmd_flags |= REQ_SOFTBARRIER;
 675		elv_drain_elevator(q);
 676		list_add_tail(&rq->queuelist, &q->queue_head);
 677		/*
 678		 * We kick the queue here for the following reasons.
 679		 * - The elevator might have returned NULL previously
 680		 *   to delay requests and returned them now.  As the
 681		 *   queue wasn't empty before this request, ll_rw_blk
 682		 *   won't run the queue on return, resulting in hang.
 683		 * - Usually, back inserted requests won't be merged
 684		 *   with anything.  There's no point in delaying queue
 685		 *   processing.
 686		 */
 687		__blk_run_queue(q);
 688		break;
 689
 690	case ELEVATOR_INSERT_SORT_MERGE:
 691		/*
 692		 * If we succeed in merging this request with one in the
 693		 * queue already, we are done - rq has now been freed,
 694		 * so no need to do anything further.
 695		 */
 696		if (elv_attempt_insert_merge(q, rq))
 697			break;
 
 698	case ELEVATOR_INSERT_SORT:
 699		BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
 700		       !(rq->cmd_flags & REQ_DISCARD));
 701		rq->cmd_flags |= REQ_SORTED;
 702		q->nr_sorted++;
 703		if (rq_mergeable(rq)) {
 704			elv_rqhash_add(q, rq);
 705			if (!q->last_merge)
 706				q->last_merge = rq;
 707		}
 708
 709		/*
 710		 * Some ioscheds (cfq) run q->request_fn directly, so
 711		 * rq cannot be accessed after calling
 712		 * elevator_add_req_fn.
 713		 */
 714		q->elevator->ops->elevator_add_req_fn(q, rq);
 715		break;
 716
 717	case ELEVATOR_INSERT_FLUSH:
 718		rq->cmd_flags |= REQ_SOFTBARRIER;
 719		blk_insert_flush(rq);
 720		break;
 721	default:
 722		printk(KERN_ERR "%s: bad insertion point %d\n",
 723		       __func__, where);
 724		BUG();
 725	}
 726}
 727EXPORT_SYMBOL(__elv_add_request);
 728
 729void elv_add_request(struct request_queue *q, struct request *rq, int where)
 730{
 731	unsigned long flags;
 732
 733	spin_lock_irqsave(q->queue_lock, flags);
 734	__elv_add_request(q, rq, where);
 735	spin_unlock_irqrestore(q->queue_lock, flags);
 736}
 737EXPORT_SYMBOL(elv_add_request);
 738
 739struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 740{
 741	struct elevator_queue *e = q->elevator;
 742
 743	if (e->ops->elevator_latter_req_fn)
 744		return e->ops->elevator_latter_req_fn(q, rq);
 
 
 
 745	return NULL;
 746}
 747
 748struct request *elv_former_request(struct request_queue *q, struct request *rq)
 749{
 750	struct elevator_queue *e = q->elevator;
 751
 752	if (e->ops->elevator_former_req_fn)
 753		return e->ops->elevator_former_req_fn(q, rq);
 
 
 754	return NULL;
 755}
 756
 757int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
 
 758{
 759	struct elevator_queue *e = q->elevator;
 760
 761	if (e->ops->elevator_set_req_fn)
 762		return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
 763
 764	rq->elevator_private[0] = NULL;
 
 765	return 0;
 766}
 767
 768void elv_put_request(struct request_queue *q, struct request *rq)
 769{
 770	struct elevator_queue *e = q->elevator;
 771
 772	if (e->ops->elevator_put_req_fn)
 773		e->ops->elevator_put_req_fn(rq);
 
 
 
 774}
 775
 776int elv_may_queue(struct request_queue *q, int rw)
 777{
 778	struct elevator_queue *e = q->elevator;
 779
 780	if (e->ops->elevator_may_queue_fn)
 781		return e->ops->elevator_may_queue_fn(q, rw);
 
 
 
 782
 783	return ELV_MQUEUE_MAY;
 784}
 785
 786void elv_abort_queue(struct request_queue *q)
 787{
 788	struct request *rq;
 789
 790	blk_abort_flushes(q);
 791
 792	while (!list_empty(&q->queue_head)) {
 793		rq = list_entry_rq(q->queue_head.next);
 794		rq->cmd_flags |= REQ_QUIET;
 795		trace_block_rq_abort(q, rq);
 796		/*
 797		 * Mark this request as started so we don't trigger
 798		 * any debug logic in the end I/O path.
 799		 */
 800		blk_start_request(rq);
 801		__blk_end_request_all(rq, -EIO);
 802	}
 803}
 804EXPORT_SYMBOL(elv_abort_queue);
 805
 806void elv_completed_request(struct request_queue *q, struct request *rq)
 807{
 808	struct elevator_queue *e = q->elevator;
 809
 
 
 
 810	/*
 811	 * request is released from the driver, io must be done
 812	 */
 813	if (blk_account_rq(rq)) {
 814		q->in_flight[rq_is_sync(rq)]--;
 815		if ((rq->cmd_flags & REQ_SORTED) &&
 816		    e->ops->elevator_completed_req_fn)
 817			e->ops->elevator_completed_req_fn(q, rq);
 818	}
 819}
 820
 821#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
 822
 823static ssize_t
 824elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 825{
 826	struct elv_fs_entry *entry = to_elv(attr);
 827	struct elevator_queue *e;
 828	ssize_t error;
 829
 830	if (!entry->show)
 831		return -EIO;
 832
 833	e = container_of(kobj, struct elevator_queue, kobj);
 834	mutex_lock(&e->sysfs_lock);
 835	error = e->ops ? entry->show(e, page) : -ENOENT;
 836	mutex_unlock(&e->sysfs_lock);
 837	return error;
 838}
 839
 840static ssize_t
 841elv_attr_store(struct kobject *kobj, struct attribute *attr,
 842	       const char *page, size_t length)
 843{
 844	struct elv_fs_entry *entry = to_elv(attr);
 845	struct elevator_queue *e;
 846	ssize_t error;
 847
 848	if (!entry->store)
 849		return -EIO;
 850
 851	e = container_of(kobj, struct elevator_queue, kobj);
 852	mutex_lock(&e->sysfs_lock);
 853	error = e->ops ? entry->store(e, page, length) : -ENOENT;
 854	mutex_unlock(&e->sysfs_lock);
 855	return error;
 856}
 857
 858static const struct sysfs_ops elv_sysfs_ops = {
 859	.show	= elv_attr_show,
 860	.store	= elv_attr_store,
 861};
 862
 863static struct kobj_type elv_ktype = {
 864	.sysfs_ops	= &elv_sysfs_ops,
 865	.release	= elevator_release,
 866};
 867
 868int elv_register_queue(struct request_queue *q)
 869{
 870	struct elevator_queue *e = q->elevator;
 871	int error;
 872
 
 
 873	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
 874	if (!error) {
 875		struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
 876		if (attr) {
 877			while (attr->attr.name) {
 878				if (sysfs_create_file(&e->kobj, &attr->attr))
 879					break;
 880				attr++;
 881			}
 882		}
 883		kobject_uevent(&e->kobj, KOBJ_ADD);
 884		e->registered = 1;
 
 
 885	}
 886	return error;
 887}
 888EXPORT_SYMBOL(elv_register_queue);
 889
 890static void __elv_unregister_queue(struct elevator_queue *e)
 891{
 892	kobject_uevent(&e->kobj, KOBJ_REMOVE);
 893	kobject_del(&e->kobj);
 894	e->registered = 0;
 895}
 896
 897void elv_unregister_queue(struct request_queue *q)
 898{
 899	if (q)
 900		__elv_unregister_queue(q->elevator);
 
 
 901}
 902EXPORT_SYMBOL(elv_unregister_queue);
 903
 904void elv_register(struct elevator_type *e)
 905{
 906	char *def = "";
 907
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908	spin_lock(&elv_list_lock);
 909	BUG_ON(elevator_find(e->elevator_name));
 
 
 
 
 
 910	list_add_tail(&e->list, &elv_list);
 911	spin_unlock(&elv_list_lock);
 912
 913	if (!strcmp(e->elevator_name, chosen_elevator) ||
 
 914			(!*chosen_elevator &&
 915			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
 916				def = " (default)";
 917
 918	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
 919								def);
 
 920}
 921EXPORT_SYMBOL_GPL(elv_register);
 922
 923void elv_unregister(struct elevator_type *e)
 924{
 925	struct task_struct *g, *p;
 
 
 
 926
 927	/*
 928	 * Iterate every thread in the process to remove the io contexts.
 
 929	 */
 930	if (e->ops.trim) {
 931		read_lock(&tasklist_lock);
 932		do_each_thread(g, p) {
 933			task_lock(p);
 934			if (p->io_context)
 935				e->ops.trim(p->io_context);
 936			task_unlock(p);
 937		} while_each_thread(g, p);
 938		read_unlock(&tasklist_lock);
 939	}
 
 
 
 
 
 
 
 
 
 940
 941	spin_lock(&elv_list_lock);
 942	list_del_init(&e->list);
 943	spin_unlock(&elv_list_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944}
 945EXPORT_SYMBOL_GPL(elv_unregister);
 946
 947/*
 948 * switch to new_e io scheduler. be careful not to introduce deadlocks -
 949 * we don't free the old io scheduler, before we have allocated what we
 950 * need for the new one. this way we have a chance of going back to the old
 951 * one, if the new one fails init for some reason.
 952 */
 953static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 954{
 955	struct elevator_queue *old_elevator, *e;
 956	void *data;
 957	int err;
 958
 959	/*
 960	 * Allocate new elevator
 961	 */
 962	e = elevator_alloc(q, new_e);
 963	if (!e)
 964		return -ENOMEM;
 965
 966	data = elevator_init_queue(q, e);
 967	if (!data) {
 968		kobject_put(&e->kobj);
 969		return -ENOMEM;
 970	}
 971
 972	/*
 973	 * Turn on BYPASS and drain all requests w/ elevator private data
 
 
 
 
 974	 */
 975	spin_lock_irq(q->queue_lock);
 976	elv_quiesce_start(q);
 
 
 977
 978	/*
 979	 * Remember old elevator.
 980	 */
 981	old_elevator = q->elevator;
 982
 983	/*
 984	 * attach and start new elevator
 985	 */
 986	elevator_attach(q, e, data);
 987
 988	spin_unlock_irq(q->queue_lock);
 
 
 
 989
 990	if (old_elevator->registered) {
 991		__elv_unregister_queue(old_elevator);
 
 992
 993		err = elv_register_queue(q);
 994		if (err)
 995			goto fail_register;
 
 996	}
 997
 998	/*
 999	 * finally exit old elevator and turn off BYPASS.
1000	 */
1001	elevator_exit(old_elevator);
1002	spin_lock_irq(q->queue_lock);
1003	elv_quiesce_end(q);
1004	spin_unlock_irq(q->queue_lock);
1005
1006	blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
1007
1008	return 0;
1009
1010fail_register:
1011	/*
1012	 * switch failed, exit the new io scheduler and reattach the old
1013	 * one again (along with re-adding the sysfs dir)
1014	 */
1015	elevator_exit(e);
1016	q->elevator = old_elevator;
1017	elv_register_queue(q);
1018
1019	spin_lock_irq(q->queue_lock);
1020	queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1021	spin_unlock_irq(q->queue_lock);
1022
1023	return err;
1024}
1025
1026/*
1027 * Switch this queue to the given IO scheduler.
1028 */
1029int elevator_change(struct request_queue *q, const char *name)
1030{
1031	char elevator_name[ELV_NAME_MAX];
1032	struct elevator_type *e;
1033
1034	if (!q->elevator)
1035		return -ENXIO;
 
 
 
 
 
 
 
1036
1037	strlcpy(elevator_name, name, sizeof(elevator_name));
1038	e = elevator_get(strstrip(elevator_name));
1039	if (!e) {
1040		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1041		return -EINVAL;
1042	}
1043
1044	if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1045		elevator_put(e);
1046		return 0;
1047	}
1048
1049	return elevator_switch(q, e);
1050}
1051EXPORT_SYMBOL(elevator_change);
 
 
 
 
 
 
 
1052
1053ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1054			  size_t count)
1055{
1056	int ret;
1057
1058	if (!q->elevator)
1059		return count;
1060
1061	ret = elevator_change(q, name);
1062	if (!ret)
1063		return count;
1064
1065	printk(KERN_ERR "elevator: switch to %s failed\n", name);
1066	return ret;
1067}
1068
1069ssize_t elv_iosched_show(struct request_queue *q, char *name)
1070{
1071	struct elevator_queue *e = q->elevator;
1072	struct elevator_type *elv;
1073	struct elevator_type *__e;
 
1074	int len = 0;
1075
1076	if (!q->elevator || !blk_queue_stackable(q))
1077		return sprintf(name, "none\n");
1078
1079	elv = e->elevator_type;
 
 
 
1080
1081	spin_lock(&elv_list_lock);
1082	list_for_each_entry(__e, &elv_list, list) {
1083		if (!strcmp(elv->elevator_name, __e->elevator_name))
 
1084			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1085		else
 
 
 
 
1086			len += sprintf(name+len, "%s ", __e->elevator_name);
1087	}
1088	spin_unlock(&elv_list_lock);
 
 
 
1089
1090	len += sprintf(len+name, "\n");
1091	return len;
1092}
1093
1094struct request *elv_rb_former_request(struct request_queue *q,
1095				      struct request *rq)
1096{
1097	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1098
1099	if (rbprev)
1100		return rb_entry_rq(rbprev);
1101
1102	return NULL;
1103}
1104EXPORT_SYMBOL(elv_rb_former_request);
1105
1106struct request *elv_rb_latter_request(struct request_queue *q,
1107				      struct request *rq)
1108{
1109	struct rb_node *rbnext = rb_next(&rq->rb_node);
1110
1111	if (rbnext)
1112		return rb_entry_rq(rbnext);
1113
1114	return NULL;
1115}
1116EXPORT_SYMBOL(elv_rb_latter_request);