Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v4.6
   1/*
   2 *  Block device elevator/IO-scheduler.
   3 *
   4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
   5 *
   6 * 30042000 Jens Axboe <axboe@kernel.dk> :
   7 *
   8 * Split the elevator a bit so that it is possible to choose a different
   9 * one or even write a new "plug in". There are three pieces:
  10 * - elevator_fn, inserts a new request in the queue list
  11 * - elevator_merge_fn, decides whether a new buffer can be merged with
  12 *   an existing request
  13 * - elevator_dequeue_fn, called when a request is taken off the active list
  14 *
  15 * 20082000 Dave Jones <davej@suse.de> :
  16 * Removed tests for max-bomb-segments, which was breaking elvtune
  17 *  when run without -bN
  18 *
  19 * Jens:
  20 * - Rework again to work with bio instead of buffer_heads
  21 * - loose bi_dev comparisons, partition handling is right now
  22 * - completely modularize elevator setup and teardown
  23 *
  24 */
  25#include <linux/kernel.h>
  26#include <linux/fs.h>
  27#include <linux/blkdev.h>
  28#include <linux/elevator.h>
  29#include <linux/bio.h>
  30#include <linux/module.h>
  31#include <linux/slab.h>
  32#include <linux/init.h>
  33#include <linux/compiler.h>
  34#include <linux/blktrace_api.h>
  35#include <linux/hash.h>
  36#include <linux/uaccess.h>
  37#include <linux/pm_runtime.h>
  38#include <linux/blk-cgroup.h>
  39
  40#include <trace/events/block.h>
  41
  42#include "blk.h"
  43
  44static DEFINE_SPINLOCK(elv_list_lock);
  45static LIST_HEAD(elv_list);
  46
  47/*
  48 * Merge hash stuff.
  49 */
  50#define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
  51
  52/*
  53 * Query io scheduler to see if the current process issuing bio may be
  54 * merged with rq.
  55 */
  56static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
  57{
  58	struct request_queue *q = rq->q;
  59	struct elevator_queue *e = q->elevator;
  60
  61	if (e->type->ops.elevator_allow_merge_fn)
  62		return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
  63
  64	return 1;
  65}
  66
  67/*
  68 * can we safely merge with this request?
  69 */
  70bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
  71{
  72	if (!blk_rq_merge_ok(rq, bio))
  73		return 0;
  74
  75	if (!elv_iosched_allow_merge(rq, bio))
  76		return 0;
  77
  78	return 1;
  79}
  80EXPORT_SYMBOL(elv_rq_merge_ok);
  81
  82static struct elevator_type *elevator_find(const char *name)
  83{
  84	struct elevator_type *e;
  85
  86	list_for_each_entry(e, &elv_list, list) {
  87		if (!strcmp(e->elevator_name, name))
  88			return e;
  89	}
  90
  91	return NULL;
  92}
  93
  94static void elevator_put(struct elevator_type *e)
  95{
  96	module_put(e->elevator_owner);
  97}
  98
  99static struct elevator_type *elevator_get(const char *name, bool try_loading)
 100{
 101	struct elevator_type *e;
 102
 103	spin_lock(&elv_list_lock);
 104
 105	e = elevator_find(name);
 106	if (!e && try_loading) {
 107		spin_unlock(&elv_list_lock);
 108		request_module("%s-iosched", name);
 109		spin_lock(&elv_list_lock);
 110		e = elevator_find(name);
 111	}
 112
 113	if (e && !try_module_get(e->elevator_owner))
 114		e = NULL;
 115
 116	spin_unlock(&elv_list_lock);
 117
 118	return e;
 119}
 120
 121static char chosen_elevator[ELV_NAME_MAX];
 122
 123static int __init elevator_setup(char *str)
 124{
 125	/*
 126	 * Be backwards-compatible with previous kernels, so users
 127	 * won't get the wrong elevator.
 128	 */
 129	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
 130	return 1;
 131}
 132
 133__setup("elevator=", elevator_setup);
 134
 135/* called during boot to load the elevator chosen by the elevator param */
 136void __init load_default_elevator_module(void)
 137{
 138	struct elevator_type *e;
 139
 140	if (!chosen_elevator[0])
 141		return;
 142
 143	spin_lock(&elv_list_lock);
 144	e = elevator_find(chosen_elevator);
 145	spin_unlock(&elv_list_lock);
 146
 147	if (!e)
 148		request_module("%s-iosched", chosen_elevator);
 149}
 150
 151static struct kobj_type elv_ktype;
 152
 153struct elevator_queue *elevator_alloc(struct request_queue *q,
 154				  struct elevator_type *e)
 155{
 156	struct elevator_queue *eq;
 157
 158	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
 159	if (unlikely(!eq))
 160		return NULL;
 161
 162	eq->type = e;
 163	kobject_init(&eq->kobj, &elv_ktype);
 164	mutex_init(&eq->sysfs_lock);
 165	hash_init(eq->hash);
 166
 167	return eq;
 168}
 169EXPORT_SYMBOL(elevator_alloc);
 170
 171static void elevator_release(struct kobject *kobj)
 172{
 173	struct elevator_queue *e;
 174
 175	e = container_of(kobj, struct elevator_queue, kobj);
 176	elevator_put(e->type);
 177	kfree(e);
 178}
 179
 180int elevator_init(struct request_queue *q, char *name)
 181{
 182	struct elevator_type *e = NULL;
 183	int err;
 184
 185	/*
 186	 * q->sysfs_lock must be held to provide mutual exclusion between
 187	 * elevator_switch() and here.
 188	 */
 189	lockdep_assert_held(&q->sysfs_lock);
 190
 191	if (unlikely(q->elevator))
 192		return 0;
 193
 194	INIT_LIST_HEAD(&q->queue_head);
 195	q->last_merge = NULL;
 196	q->end_sector = 0;
 197	q->boundary_rq = NULL;
 198
 199	if (name) {
 200		e = elevator_get(name, true);
 201		if (!e)
 202			return -EINVAL;
 203	}
 204
 205	/*
 206	 * Use the default elevator specified by config boot param or
 207	 * config option.  Don't try to load modules as we could be running
 208	 * off async and request_module() isn't allowed from async.
 209	 */
 210	if (!e && *chosen_elevator) {
 211		e = elevator_get(chosen_elevator, false);
 212		if (!e)
 213			printk(KERN_ERR "I/O scheduler %s not found\n",
 214							chosen_elevator);
 215	}
 216
 217	if (!e) {
 218		e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
 219		if (!e) {
 220			printk(KERN_ERR
 221				"Default I/O scheduler not found. " \
 222				"Using noop.\n");
 223			e = elevator_get("noop", false);
 224		}
 225	}
 226
 227	err = e->ops.elevator_init_fn(q, e);
 228	if (err)
 229		elevator_put(e);
 230	return err;
 231}
 232EXPORT_SYMBOL(elevator_init);
 233
 234void elevator_exit(struct elevator_queue *e)
 235{
 236	mutex_lock(&e->sysfs_lock);
 237	if (e->type->ops.elevator_exit_fn)
 238		e->type->ops.elevator_exit_fn(e);
 239	mutex_unlock(&e->sysfs_lock);
 240
 241	kobject_put(&e->kobj);
 242}
 243EXPORT_SYMBOL(elevator_exit);
 244
 245static inline void __elv_rqhash_del(struct request *rq)
 246{
 247	hash_del(&rq->hash);
 248	rq->cmd_flags &= ~REQ_HASHED;
 249}
 250
 251static void elv_rqhash_del(struct request_queue *q, struct request *rq)
 252{
 253	if (ELV_ON_HASH(rq))
 254		__elv_rqhash_del(rq);
 255}
 256
 257static void elv_rqhash_add(struct request_queue *q, struct request *rq)
 258{
 259	struct elevator_queue *e = q->elevator;
 260
 261	BUG_ON(ELV_ON_HASH(rq));
 262	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
 263	rq->cmd_flags |= REQ_HASHED;
 264}
 265
 266static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
 267{
 268	__elv_rqhash_del(rq);
 269	elv_rqhash_add(q, rq);
 270}
 271
 272static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
 273{
 274	struct elevator_queue *e = q->elevator;
 275	struct hlist_node *next;
 276	struct request *rq;
 277
 278	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
 279		BUG_ON(!ELV_ON_HASH(rq));
 280
 281		if (unlikely(!rq_mergeable(rq))) {
 282			__elv_rqhash_del(rq);
 283			continue;
 284		}
 285
 286		if (rq_hash_key(rq) == offset)
 287			return rq;
 288	}
 289
 290	return NULL;
 291}
 292
 293/*
 294 * RB-tree support functions for inserting/lookup/removal of requests
 295 * in a sorted RB tree.
 296 */
 297void elv_rb_add(struct rb_root *root, struct request *rq)
 298{
 299	struct rb_node **p = &root->rb_node;
 300	struct rb_node *parent = NULL;
 301	struct request *__rq;
 302
 303	while (*p) {
 304		parent = *p;
 305		__rq = rb_entry(parent, struct request, rb_node);
 306
 307		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
 308			p = &(*p)->rb_left;
 309		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
 310			p = &(*p)->rb_right;
 311	}
 312
 313	rb_link_node(&rq->rb_node, parent, p);
 314	rb_insert_color(&rq->rb_node, root);
 315}
 316EXPORT_SYMBOL(elv_rb_add);
 317
 318void elv_rb_del(struct rb_root *root, struct request *rq)
 319{
 320	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
 321	rb_erase(&rq->rb_node, root);
 322	RB_CLEAR_NODE(&rq->rb_node);
 323}
 324EXPORT_SYMBOL(elv_rb_del);
 325
 326struct request *elv_rb_find(struct rb_root *root, sector_t sector)
 327{
 328	struct rb_node *n = root->rb_node;
 329	struct request *rq;
 330
 331	while (n) {
 332		rq = rb_entry(n, struct request, rb_node);
 333
 334		if (sector < blk_rq_pos(rq))
 335			n = n->rb_left;
 336		else if (sector > blk_rq_pos(rq))
 337			n = n->rb_right;
 338		else
 339			return rq;
 340	}
 341
 342	return NULL;
 343}
 344EXPORT_SYMBOL(elv_rb_find);
 345
 346/*
 347 * Insert rq into dispatch queue of q.  Queue lock must be held on
 348 * entry.  rq is sort instead into the dispatch queue. To be used by
 349 * specific elevators.
 350 */
 351void elv_dispatch_sort(struct request_queue *q, struct request *rq)
 352{
 353	sector_t boundary;
 354	struct list_head *entry;
 355	int stop_flags;
 356
 357	if (q->last_merge == rq)
 358		q->last_merge = NULL;
 359
 360	elv_rqhash_del(q, rq);
 361
 362	q->nr_sorted--;
 363
 364	boundary = q->end_sector;
 365	stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
 366	list_for_each_prev(entry, &q->queue_head) {
 367		struct request *pos = list_entry_rq(entry);
 368
 369		if ((rq->cmd_flags & REQ_DISCARD) !=
 370		    (pos->cmd_flags & REQ_DISCARD))
 371			break;
 372		if (rq_data_dir(rq) != rq_data_dir(pos))
 373			break;
 374		if (pos->cmd_flags & stop_flags)
 375			break;
 376		if (blk_rq_pos(rq) >= boundary) {
 377			if (blk_rq_pos(pos) < boundary)
 378				continue;
 379		} else {
 380			if (blk_rq_pos(pos) >= boundary)
 381				break;
 382		}
 383		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
 384			break;
 385	}
 386
 387	list_add(&rq->queuelist, entry);
 388}
 389EXPORT_SYMBOL(elv_dispatch_sort);
 390
 391/*
 392 * Insert rq into dispatch queue of q.  Queue lock must be held on
 393 * entry.  rq is added to the back of the dispatch queue. To be used by
 394 * specific elevators.
 395 */
 396void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
 397{
 398	if (q->last_merge == rq)
 399		q->last_merge = NULL;
 400
 401	elv_rqhash_del(q, rq);
 402
 403	q->nr_sorted--;
 404
 405	q->end_sector = rq_end_sector(rq);
 406	q->boundary_rq = rq;
 407	list_add_tail(&rq->queuelist, &q->queue_head);
 408}
 409EXPORT_SYMBOL(elv_dispatch_add_tail);
 410
 411int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
 412{
 413	struct elevator_queue *e = q->elevator;
 414	struct request *__rq;
 415	int ret;
 416
 417	/*
 418	 * Levels of merges:
 419	 * 	nomerges:  No merges at all attempted
 420	 * 	noxmerges: Only simple one-hit cache try
 421	 * 	merges:	   All merge tries attempted
 422	 */
 423	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
 424		return ELEVATOR_NO_MERGE;
 425
 426	/*
 427	 * First try one-hit cache.
 428	 */
 429	if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
 430		ret = blk_try_merge(q->last_merge, bio);
 431		if (ret != ELEVATOR_NO_MERGE) {
 432			*req = q->last_merge;
 433			return ret;
 434		}
 435	}
 436
 437	if (blk_queue_noxmerges(q))
 438		return ELEVATOR_NO_MERGE;
 439
 440	/*
 441	 * See if our hash lookup can find a potential backmerge.
 442	 */
 443	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
 444	if (__rq && elv_rq_merge_ok(__rq, bio)) {
 445		*req = __rq;
 446		return ELEVATOR_BACK_MERGE;
 447	}
 448
 449	if (e->type->ops.elevator_merge_fn)
 450		return e->type->ops.elevator_merge_fn(q, req, bio);
 451
 452	return ELEVATOR_NO_MERGE;
 453}
 454
 455/*
 456 * Attempt to do an insertion back merge. Only check for the case where
 457 * we can append 'rq' to an existing request, so we can throw 'rq' away
 458 * afterwards.
 459 *
 460 * Returns true if we merged, false otherwise
 461 */
 462static bool elv_attempt_insert_merge(struct request_queue *q,
 463				     struct request *rq)
 464{
 465	struct request *__rq;
 466	bool ret;
 467
 468	if (blk_queue_nomerges(q))
 469		return false;
 470
 471	/*
 472	 * First try one-hit cache.
 473	 */
 474	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
 475		return true;
 476
 477	if (blk_queue_noxmerges(q))
 478		return false;
 479
 480	ret = false;
 481	/*
 482	 * See if our hash lookup can find a potential backmerge.
 483	 */
 484	while (1) {
 485		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
 486		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
 487			break;
 488
 489		/* The merged request could be merged with others, try again */
 490		ret = true;
 491		rq = __rq;
 492	}
 493
 494	return ret;
 495}
 496
 497void elv_merged_request(struct request_queue *q, struct request *rq, int type)
 498{
 499	struct elevator_queue *e = q->elevator;
 500
 501	if (e->type->ops.elevator_merged_fn)
 502		e->type->ops.elevator_merged_fn(q, rq, type);
 503
 504	if (type == ELEVATOR_BACK_MERGE)
 505		elv_rqhash_reposition(q, rq);
 506
 507	q->last_merge = rq;
 508}
 509
 510void elv_merge_requests(struct request_queue *q, struct request *rq,
 511			     struct request *next)
 512{
 513	struct elevator_queue *e = q->elevator;
 514	const int next_sorted = next->cmd_flags & REQ_SORTED;
 515
 516	if (next_sorted && e->type->ops.elevator_merge_req_fn)
 517		e->type->ops.elevator_merge_req_fn(q, rq, next);
 518
 519	elv_rqhash_reposition(q, rq);
 520
 521	if (next_sorted) {
 522		elv_rqhash_del(q, next);
 523		q->nr_sorted--;
 524	}
 525
 526	q->last_merge = rq;
 527}
 528
 529void elv_bio_merged(struct request_queue *q, struct request *rq,
 530			struct bio *bio)
 531{
 532	struct elevator_queue *e = q->elevator;
 533
 534	if (e->type->ops.elevator_bio_merged_fn)
 535		e->type->ops.elevator_bio_merged_fn(q, rq, bio);
 536}
 537
 538#ifdef CONFIG_PM
 539static void blk_pm_requeue_request(struct request *rq)
 540{
 541	if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
 542		rq->q->nr_pending--;
 543}
 544
 545static void blk_pm_add_request(struct request_queue *q, struct request *rq)
 546{
 547	if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
 548	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
 549		pm_request_resume(q->dev);
 550}
 551#else
 552static inline void blk_pm_requeue_request(struct request *rq) {}
 553static inline void blk_pm_add_request(struct request_queue *q,
 554				      struct request *rq)
 555{
 556}
 557#endif
 558
 559void elv_requeue_request(struct request_queue *q, struct request *rq)
 560{
 561	/*
 562	 * it already went through dequeue, we need to decrement the
 563	 * in_flight count again
 564	 */
 565	if (blk_account_rq(rq)) {
 566		q->in_flight[rq_is_sync(rq)]--;
 567		if (rq->cmd_flags & REQ_SORTED)
 568			elv_deactivate_rq(q, rq);
 569	}
 570
 571	rq->cmd_flags &= ~REQ_STARTED;
 572
 573	blk_pm_requeue_request(rq);
 574
 575	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
 576}
 577
 578void elv_drain_elevator(struct request_queue *q)
 579{
 580	static int printed;
 581
 582	lockdep_assert_held(q->queue_lock);
 583
 584	while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
 585		;
 586	if (q->nr_sorted && printed++ < 10) {
 587		printk(KERN_ERR "%s: forced dispatching is broken "
 588		       "(nr_sorted=%u), please report this\n",
 589		       q->elevator->type->elevator_name, q->nr_sorted);
 590	}
 591}
 592
 593void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 594{
 595	trace_block_rq_insert(q, rq);
 596
 597	blk_pm_add_request(q, rq);
 598
 599	rq->q = q;
 600
 601	if (rq->cmd_flags & REQ_SOFTBARRIER) {
 602		/* barriers are scheduling boundary, update end_sector */
 603		if (rq->cmd_type == REQ_TYPE_FS) {
 604			q->end_sector = rq_end_sector(rq);
 605			q->boundary_rq = rq;
 606		}
 607	} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
 608		    (where == ELEVATOR_INSERT_SORT ||
 609		     where == ELEVATOR_INSERT_SORT_MERGE))
 610		where = ELEVATOR_INSERT_BACK;
 611
 612	switch (where) {
 613	case ELEVATOR_INSERT_REQUEUE:
 614	case ELEVATOR_INSERT_FRONT:
 615		rq->cmd_flags |= REQ_SOFTBARRIER;
 616		list_add(&rq->queuelist, &q->queue_head);
 617		break;
 618
 619	case ELEVATOR_INSERT_BACK:
 620		rq->cmd_flags |= REQ_SOFTBARRIER;
 621		elv_drain_elevator(q);
 622		list_add_tail(&rq->queuelist, &q->queue_head);
 623		/*
 624		 * We kick the queue here for the following reasons.
 625		 * - The elevator might have returned NULL previously
 626		 *   to delay requests and returned them now.  As the
 627		 *   queue wasn't empty before this request, ll_rw_blk
 628		 *   won't run the queue on return, resulting in hang.
 629		 * - Usually, back inserted requests won't be merged
 630		 *   with anything.  There's no point in delaying queue
 631		 *   processing.
 632		 */
 633		__blk_run_queue(q);
 634		break;
 635
 636	case ELEVATOR_INSERT_SORT_MERGE:
 637		/*
 638		 * If we succeed in merging this request with one in the
 639		 * queue already, we are done - rq has now been freed,
 640		 * so no need to do anything further.
 641		 */
 642		if (elv_attempt_insert_merge(q, rq))
 643			break;
 644	case ELEVATOR_INSERT_SORT:
 645		BUG_ON(rq->cmd_type != REQ_TYPE_FS);
 646		rq->cmd_flags |= REQ_SORTED;
 647		q->nr_sorted++;
 648		if (rq_mergeable(rq)) {
 649			elv_rqhash_add(q, rq);
 650			if (!q->last_merge)
 651				q->last_merge = rq;
 652		}
 653
 654		/*
 655		 * Some ioscheds (cfq) run q->request_fn directly, so
 656		 * rq cannot be accessed after calling
 657		 * elevator_add_req_fn.
 658		 */
 659		q->elevator->type->ops.elevator_add_req_fn(q, rq);
 660		break;
 661
 662	case ELEVATOR_INSERT_FLUSH:
 663		rq->cmd_flags |= REQ_SOFTBARRIER;
 664		blk_insert_flush(rq);
 665		break;
 666	default:
 667		printk(KERN_ERR "%s: bad insertion point %d\n",
 668		       __func__, where);
 669		BUG();
 670	}
 671}
 672EXPORT_SYMBOL(__elv_add_request);
 673
 674void elv_add_request(struct request_queue *q, struct request *rq, int where)
 675{
 676	unsigned long flags;
 677
 678	spin_lock_irqsave(q->queue_lock, flags);
 679	__elv_add_request(q, rq, where);
 680	spin_unlock_irqrestore(q->queue_lock, flags);
 681}
 682EXPORT_SYMBOL(elv_add_request);
 683
 684struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 685{
 686	struct elevator_queue *e = q->elevator;
 687
 688	if (e->type->ops.elevator_latter_req_fn)
 689		return e->type->ops.elevator_latter_req_fn(q, rq);
 690	return NULL;
 691}
 692
 693struct request *elv_former_request(struct request_queue *q, struct request *rq)
 694{
 695	struct elevator_queue *e = q->elevator;
 696
 697	if (e->type->ops.elevator_former_req_fn)
 698		return e->type->ops.elevator_former_req_fn(q, rq);
 699	return NULL;
 700}
 701
 702int elv_set_request(struct request_queue *q, struct request *rq,
 703		    struct bio *bio, gfp_t gfp_mask)
 704{
 705	struct elevator_queue *e = q->elevator;
 706
 707	if (e->type->ops.elevator_set_req_fn)
 708		return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
 709	return 0;
 710}
 711
 712void elv_put_request(struct request_queue *q, struct request *rq)
 713{
 714	struct elevator_queue *e = q->elevator;
 715
 716	if (e->type->ops.elevator_put_req_fn)
 717		e->type->ops.elevator_put_req_fn(rq);
 718}
 719
 720int elv_may_queue(struct request_queue *q, int rw)
 721{
 722	struct elevator_queue *e = q->elevator;
 723
 724	if (e->type->ops.elevator_may_queue_fn)
 725		return e->type->ops.elevator_may_queue_fn(q, rw);
 726
 727	return ELV_MQUEUE_MAY;
 728}
 729
 730void elv_completed_request(struct request_queue *q, struct request *rq)
 731{
 732	struct elevator_queue *e = q->elevator;
 733
 734	/*
 735	 * request is released from the driver, io must be done
 736	 */
 737	if (blk_account_rq(rq)) {
 738		q->in_flight[rq_is_sync(rq)]--;
 739		if ((rq->cmd_flags & REQ_SORTED) &&
 740		    e->type->ops.elevator_completed_req_fn)
 741			e->type->ops.elevator_completed_req_fn(q, rq);
 742	}
 743}
 744
 745#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
 746
 747static ssize_t
 748elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 749{
 750	struct elv_fs_entry *entry = to_elv(attr);
 751	struct elevator_queue *e;
 752	ssize_t error;
 753
 754	if (!entry->show)
 755		return -EIO;
 756
 757	e = container_of(kobj, struct elevator_queue, kobj);
 758	mutex_lock(&e->sysfs_lock);
 759	error = e->type ? entry->show(e, page) : -ENOENT;
 760	mutex_unlock(&e->sysfs_lock);
 761	return error;
 762}
 763
 764static ssize_t
 765elv_attr_store(struct kobject *kobj, struct attribute *attr,
 766	       const char *page, size_t length)
 767{
 768	struct elv_fs_entry *entry = to_elv(attr);
 769	struct elevator_queue *e;
 770	ssize_t error;
 771
 772	if (!entry->store)
 773		return -EIO;
 774
 775	e = container_of(kobj, struct elevator_queue, kobj);
 776	mutex_lock(&e->sysfs_lock);
 777	error = e->type ? entry->store(e, page, length) : -ENOENT;
 778	mutex_unlock(&e->sysfs_lock);
 779	return error;
 780}
 781
 782static const struct sysfs_ops elv_sysfs_ops = {
 783	.show	= elv_attr_show,
 784	.store	= elv_attr_store,
 785};
 786
 787static struct kobj_type elv_ktype = {
 788	.sysfs_ops	= &elv_sysfs_ops,
 789	.release	= elevator_release,
 790};
 791
 792int elv_register_queue(struct request_queue *q)
 793{
 794	struct elevator_queue *e = q->elevator;
 795	int error;
 796
 797	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
 798	if (!error) {
 799		struct elv_fs_entry *attr = e->type->elevator_attrs;
 800		if (attr) {
 801			while (attr->attr.name) {
 802				if (sysfs_create_file(&e->kobj, &attr->attr))
 803					break;
 804				attr++;
 805			}
 806		}
 807		kobject_uevent(&e->kobj, KOBJ_ADD);
 808		e->registered = 1;
 809		if (e->type->ops.elevator_registered_fn)
 810			e->type->ops.elevator_registered_fn(q);
 811	}
 812	return error;
 813}
 814EXPORT_SYMBOL(elv_register_queue);
 815
 816void elv_unregister_queue(struct request_queue *q)
 817{
 818	if (q) {
 819		struct elevator_queue *e = q->elevator;
 820
 821		kobject_uevent(&e->kobj, KOBJ_REMOVE);
 822		kobject_del(&e->kobj);
 823		e->registered = 0;
 824	}
 825}
 826EXPORT_SYMBOL(elv_unregister_queue);
 827
 828int elv_register(struct elevator_type *e)
 829{
 830	char *def = "";
 831
 832	/* create icq_cache if requested */
 833	if (e->icq_size) {
 834		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
 835		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
 836			return -EINVAL;
 837
 838		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
 839			 "%s_io_cq", e->elevator_name);
 840		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
 841						 e->icq_align, 0, NULL);
 842		if (!e->icq_cache)
 843			return -ENOMEM;
 844	}
 845
 846	/* register, don't allow duplicate names */
 847	spin_lock(&elv_list_lock);
 848	if (elevator_find(e->elevator_name)) {
 849		spin_unlock(&elv_list_lock);
 850		if (e->icq_cache)
 851			kmem_cache_destroy(e->icq_cache);
 852		return -EBUSY;
 853	}
 854	list_add_tail(&e->list, &elv_list);
 855	spin_unlock(&elv_list_lock);
 856
 857	/* print pretty message */
 858	if (!strcmp(e->elevator_name, chosen_elevator) ||
 859			(!*chosen_elevator &&
 860			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
 861				def = " (default)";
 862
 863	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
 864								def);
 865	return 0;
 866}
 867EXPORT_SYMBOL_GPL(elv_register);
 868
 869void elv_unregister(struct elevator_type *e)
 870{
 871	/* unregister */
 872	spin_lock(&elv_list_lock);
 873	list_del_init(&e->list);
 874	spin_unlock(&elv_list_lock);
 875
 876	/*
 877	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
 878	 * sure all RCU operations are complete before proceeding.
 879	 */
 880	if (e->icq_cache) {
 881		rcu_barrier();
 882		kmem_cache_destroy(e->icq_cache);
 883		e->icq_cache = NULL;
 884	}
 885}
 886EXPORT_SYMBOL_GPL(elv_unregister);
 887
 888/*
 889 * switch to new_e io scheduler. be careful not to introduce deadlocks -
 890 * we don't free the old io scheduler, before we have allocated what we
 891 * need for the new one. this way we have a chance of going back to the old
 892 * one, if the new one fails init for some reason.
 893 */
 894static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 895{
 896	struct elevator_queue *old = q->elevator;
 897	bool registered = old->registered;
 898	int err;
 899
 900	/*
 901	 * Turn on BYPASS and drain all requests w/ elevator private data.
 902	 * Block layer doesn't call into a quiesced elevator - all requests
 903	 * are directly put on the dispatch list without elevator data
 904	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
 905	 * merge happens either.
 906	 */
 907	blk_queue_bypass_start(q);
 908
 909	/* unregister and clear all auxiliary data of the old elevator */
 910	if (registered)
 911		elv_unregister_queue(q);
 912
 913	spin_lock_irq(q->queue_lock);
 914	ioc_clear_queue(q);
 915	spin_unlock_irq(q->queue_lock);
 916
 917	/* allocate, init and register new elevator */
 918	err = new_e->ops.elevator_init_fn(q, new_e);
 919	if (err)
 920		goto fail_init;
 921
 922	if (registered) {
 923		err = elv_register_queue(q);
 924		if (err)
 925			goto fail_register;
 926	}
 927
 928	/* done, kill the old one and finish */
 929	elevator_exit(old);
 930	blk_queue_bypass_end(q);
 931
 932	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 933
 934	return 0;
 935
 936fail_register:
 937	elevator_exit(q->elevator);
 938fail_init:
 939	/* switch failed, restore and re-register old elevator */
 940	q->elevator = old;
 941	elv_register_queue(q);
 942	blk_queue_bypass_end(q);
 943
 944	return err;
 945}
 946
 947/*
 948 * Switch this queue to the given IO scheduler.
 949 */
 950static int __elevator_change(struct request_queue *q, const char *name)
 951{
 952	char elevator_name[ELV_NAME_MAX];
 953	struct elevator_type *e;
 954
 955	if (!q->elevator)
 956		return -ENXIO;
 957
 958	strlcpy(elevator_name, name, sizeof(elevator_name));
 959	e = elevator_get(strstrip(elevator_name), true);
 960	if (!e) {
 961		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
 962		return -EINVAL;
 963	}
 964
 965	if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
 966		elevator_put(e);
 967		return 0;
 968	}
 969
 970	return elevator_switch(q, e);
 971}
 972
 973int elevator_change(struct request_queue *q, const char *name)
 974{
 975	int ret;
 976
 977	/* Protect q->elevator from elevator_init() */
 978	mutex_lock(&q->sysfs_lock);
 979	ret = __elevator_change(q, name);
 980	mutex_unlock(&q->sysfs_lock);
 981
 982	return ret;
 983}
 984EXPORT_SYMBOL(elevator_change);
 985
 986ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 987			  size_t count)
 988{
 989	int ret;
 990
 991	if (!q->elevator)
 992		return count;
 993
 994	ret = __elevator_change(q, name);
 995	if (!ret)
 996		return count;
 997
 998	printk(KERN_ERR "elevator: switch to %s failed\n", name);
 999	return ret;
1000}
1001
1002ssize_t elv_iosched_show(struct request_queue *q, char *name)
1003{
1004	struct elevator_queue *e = q->elevator;
1005	struct elevator_type *elv;
1006	struct elevator_type *__e;
1007	int len = 0;
1008
1009	if (!q->elevator || !blk_queue_stackable(q))
1010		return sprintf(name, "none\n");
1011
1012	elv = e->type;
1013
1014	spin_lock(&elv_list_lock);
1015	list_for_each_entry(__e, &elv_list, list) {
1016		if (!strcmp(elv->elevator_name, __e->elevator_name))
1017			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1018		else
1019			len += sprintf(name+len, "%s ", __e->elevator_name);
1020	}
1021	spin_unlock(&elv_list_lock);
1022
1023	len += sprintf(len+name, "\n");
1024	return len;
1025}
1026
1027struct request *elv_rb_former_request(struct request_queue *q,
1028				      struct request *rq)
1029{
1030	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1031
1032	if (rbprev)
1033		return rb_entry_rq(rbprev);
1034
1035	return NULL;
1036}
1037EXPORT_SYMBOL(elv_rb_former_request);
1038
1039struct request *elv_rb_latter_request(struct request_queue *q,
1040				      struct request *rq)
1041{
1042	struct rb_node *rbnext = rb_next(&rq->rb_node);
1043
1044	if (rbnext)
1045		return rb_entry_rq(rbnext);
1046
1047	return NULL;
1048}
1049EXPORT_SYMBOL(elv_rb_latter_request);
v4.10.11
   1/*
   2 *  Block device elevator/IO-scheduler.
   3 *
   4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
   5 *
   6 * 30042000 Jens Axboe <axboe@kernel.dk> :
   7 *
   8 * Split the elevator a bit so that it is possible to choose a different
   9 * one or even write a new "plug in". There are three pieces:
  10 * - elevator_fn, inserts a new request in the queue list
  11 * - elevator_merge_fn, decides whether a new buffer can be merged with
  12 *   an existing request
  13 * - elevator_dequeue_fn, called when a request is taken off the active list
  14 *
  15 * 20082000 Dave Jones <davej@suse.de> :
  16 * Removed tests for max-bomb-segments, which was breaking elvtune
  17 *  when run without -bN
  18 *
  19 * Jens:
  20 * - Rework again to work with bio instead of buffer_heads
  21 * - loose bi_dev comparisons, partition handling is right now
  22 * - completely modularize elevator setup and teardown
  23 *
  24 */
  25#include <linux/kernel.h>
  26#include <linux/fs.h>
  27#include <linux/blkdev.h>
  28#include <linux/elevator.h>
  29#include <linux/bio.h>
  30#include <linux/module.h>
  31#include <linux/slab.h>
  32#include <linux/init.h>
  33#include <linux/compiler.h>
  34#include <linux/blktrace_api.h>
  35#include <linux/hash.h>
  36#include <linux/uaccess.h>
  37#include <linux/pm_runtime.h>
  38#include <linux/blk-cgroup.h>
  39
  40#include <trace/events/block.h>
  41
  42#include "blk.h"
  43
  44static DEFINE_SPINLOCK(elv_list_lock);
  45static LIST_HEAD(elv_list);
  46
  47/*
  48 * Merge hash stuff.
  49 */
  50#define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
  51
  52/*
  53 * Query io scheduler to see if the current process issuing bio may be
  54 * merged with rq.
  55 */
  56static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
  57{
  58	struct request_queue *q = rq->q;
  59	struct elevator_queue *e = q->elevator;
  60
  61	if (e->type->ops.elevator_allow_bio_merge_fn)
  62		return e->type->ops.elevator_allow_bio_merge_fn(q, rq, bio);
  63
  64	return 1;
  65}
  66
  67/*
  68 * can we safely merge with this request?
  69 */
  70bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
  71{
  72	if (!blk_rq_merge_ok(rq, bio))
  73		return false;
  74
  75	if (!elv_iosched_allow_bio_merge(rq, bio))
  76		return false;
  77
  78	return true;
  79}
  80EXPORT_SYMBOL(elv_bio_merge_ok);
  81
  82static struct elevator_type *elevator_find(const char *name)
  83{
  84	struct elevator_type *e;
  85
  86	list_for_each_entry(e, &elv_list, list) {
  87		if (!strcmp(e->elevator_name, name))
  88			return e;
  89	}
  90
  91	return NULL;
  92}
  93
  94static void elevator_put(struct elevator_type *e)
  95{
  96	module_put(e->elevator_owner);
  97}
  98
  99static struct elevator_type *elevator_get(const char *name, bool try_loading)
 100{
 101	struct elevator_type *e;
 102
 103	spin_lock(&elv_list_lock);
 104
 105	e = elevator_find(name);
 106	if (!e && try_loading) {
 107		spin_unlock(&elv_list_lock);
 108		request_module("%s-iosched", name);
 109		spin_lock(&elv_list_lock);
 110		e = elevator_find(name);
 111	}
 112
 113	if (e && !try_module_get(e->elevator_owner))
 114		e = NULL;
 115
 116	spin_unlock(&elv_list_lock);
 117
 118	return e;
 119}
 120
 121static char chosen_elevator[ELV_NAME_MAX];
 122
 123static int __init elevator_setup(char *str)
 124{
 125	/*
 126	 * Be backwards-compatible with previous kernels, so users
 127	 * won't get the wrong elevator.
 128	 */
 129	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
 130	return 1;
 131}
 132
 133__setup("elevator=", elevator_setup);
 134
 135/* called during boot to load the elevator chosen by the elevator param */
 136void __init load_default_elevator_module(void)
 137{
 138	struct elevator_type *e;
 139
 140	if (!chosen_elevator[0])
 141		return;
 142
 143	spin_lock(&elv_list_lock);
 144	e = elevator_find(chosen_elevator);
 145	spin_unlock(&elv_list_lock);
 146
 147	if (!e)
 148		request_module("%s-iosched", chosen_elevator);
 149}
 150
 151static struct kobj_type elv_ktype;
 152
 153struct elevator_queue *elevator_alloc(struct request_queue *q,
 154				  struct elevator_type *e)
 155{
 156	struct elevator_queue *eq;
 157
 158	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
 159	if (unlikely(!eq))
 160		return NULL;
 161
 162	eq->type = e;
 163	kobject_init(&eq->kobj, &elv_ktype);
 164	mutex_init(&eq->sysfs_lock);
 165	hash_init(eq->hash);
 166
 167	return eq;
 168}
 169EXPORT_SYMBOL(elevator_alloc);
 170
 171static void elevator_release(struct kobject *kobj)
 172{
 173	struct elevator_queue *e;
 174
 175	e = container_of(kobj, struct elevator_queue, kobj);
 176	elevator_put(e->type);
 177	kfree(e);
 178}
 179
 180int elevator_init(struct request_queue *q, char *name)
 181{
 182	struct elevator_type *e = NULL;
 183	int err;
 184
 185	/*
 186	 * q->sysfs_lock must be held to provide mutual exclusion between
 187	 * elevator_switch() and here.
 188	 */
 189	lockdep_assert_held(&q->sysfs_lock);
 190
 191	if (unlikely(q->elevator))
 192		return 0;
 193
 194	INIT_LIST_HEAD(&q->queue_head);
 195	q->last_merge = NULL;
 196	q->end_sector = 0;
 197	q->boundary_rq = NULL;
 198
 199	if (name) {
 200		e = elevator_get(name, true);
 201		if (!e)
 202			return -EINVAL;
 203	}
 204
 205	/*
 206	 * Use the default elevator specified by config boot param or
 207	 * config option.  Don't try to load modules as we could be running
 208	 * off async and request_module() isn't allowed from async.
 209	 */
 210	if (!e && *chosen_elevator) {
 211		e = elevator_get(chosen_elevator, false);
 212		if (!e)
 213			printk(KERN_ERR "I/O scheduler %s not found\n",
 214							chosen_elevator);
 215	}
 216
 217	if (!e) {
 218		e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
 219		if (!e) {
 220			printk(KERN_ERR
 221				"Default I/O scheduler not found. " \
 222				"Using noop.\n");
 223			e = elevator_get("noop", false);
 224		}
 225	}
 226
 227	err = e->ops.elevator_init_fn(q, e);
 228	if (err)
 229		elevator_put(e);
 230	return err;
 231}
 232EXPORT_SYMBOL(elevator_init);
 233
 234void elevator_exit(struct elevator_queue *e)
 235{
 236	mutex_lock(&e->sysfs_lock);
 237	if (e->type->ops.elevator_exit_fn)
 238		e->type->ops.elevator_exit_fn(e);
 239	mutex_unlock(&e->sysfs_lock);
 240
 241	kobject_put(&e->kobj);
 242}
 243EXPORT_SYMBOL(elevator_exit);
 244
 245static inline void __elv_rqhash_del(struct request *rq)
 246{
 247	hash_del(&rq->hash);
 248	rq->rq_flags &= ~RQF_HASHED;
 249}
 250
 251void elv_rqhash_del(struct request_queue *q, struct request *rq)
 252{
 253	if (ELV_ON_HASH(rq))
 254		__elv_rqhash_del(rq);
 255}
 256
 257void elv_rqhash_add(struct request_queue *q, struct request *rq)
 258{
 259	struct elevator_queue *e = q->elevator;
 260
 261	BUG_ON(ELV_ON_HASH(rq));
 262	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
 263	rq->rq_flags |= RQF_HASHED;
 264}
 265
 266void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
 267{
 268	__elv_rqhash_del(rq);
 269	elv_rqhash_add(q, rq);
 270}
 271
 272struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
 273{
 274	struct elevator_queue *e = q->elevator;
 275	struct hlist_node *next;
 276	struct request *rq;
 277
 278	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
 279		BUG_ON(!ELV_ON_HASH(rq));
 280
 281		if (unlikely(!rq_mergeable(rq))) {
 282			__elv_rqhash_del(rq);
 283			continue;
 284		}
 285
 286		if (rq_hash_key(rq) == offset)
 287			return rq;
 288	}
 289
 290	return NULL;
 291}
 292
 293/*
 294 * RB-tree support functions for inserting/lookup/removal of requests
 295 * in a sorted RB tree.
 296 */
 297void elv_rb_add(struct rb_root *root, struct request *rq)
 298{
 299	struct rb_node **p = &root->rb_node;
 300	struct rb_node *parent = NULL;
 301	struct request *__rq;
 302
 303	while (*p) {
 304		parent = *p;
 305		__rq = rb_entry(parent, struct request, rb_node);
 306
 307		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
 308			p = &(*p)->rb_left;
 309		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
 310			p = &(*p)->rb_right;
 311	}
 312
 313	rb_link_node(&rq->rb_node, parent, p);
 314	rb_insert_color(&rq->rb_node, root);
 315}
 316EXPORT_SYMBOL(elv_rb_add);
 317
 318void elv_rb_del(struct rb_root *root, struct request *rq)
 319{
 320	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
 321	rb_erase(&rq->rb_node, root);
 322	RB_CLEAR_NODE(&rq->rb_node);
 323}
 324EXPORT_SYMBOL(elv_rb_del);
 325
 326struct request *elv_rb_find(struct rb_root *root, sector_t sector)
 327{
 328	struct rb_node *n = root->rb_node;
 329	struct request *rq;
 330
 331	while (n) {
 332		rq = rb_entry(n, struct request, rb_node);
 333
 334		if (sector < blk_rq_pos(rq))
 335			n = n->rb_left;
 336		else if (sector > blk_rq_pos(rq))
 337			n = n->rb_right;
 338		else
 339			return rq;
 340	}
 341
 342	return NULL;
 343}
 344EXPORT_SYMBOL(elv_rb_find);
 345
 346/*
 347 * Insert rq into dispatch queue of q.  Queue lock must be held on
 348 * entry.  rq is sort instead into the dispatch queue. To be used by
 349 * specific elevators.
 350 */
 351void elv_dispatch_sort(struct request_queue *q, struct request *rq)
 352{
 353	sector_t boundary;
 354	struct list_head *entry;
 
 355
 356	if (q->last_merge == rq)
 357		q->last_merge = NULL;
 358
 359	elv_rqhash_del(q, rq);
 360
 361	q->nr_sorted--;
 362
 363	boundary = q->end_sector;
 
 364	list_for_each_prev(entry, &q->queue_head) {
 365		struct request *pos = list_entry_rq(entry);
 366
 367		if (req_op(rq) != req_op(pos))
 
 368			break;
 369		if (rq_data_dir(rq) != rq_data_dir(pos))
 370			break;
 371		if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
 372			break;
 373		if (blk_rq_pos(rq) >= boundary) {
 374			if (blk_rq_pos(pos) < boundary)
 375				continue;
 376		} else {
 377			if (blk_rq_pos(pos) >= boundary)
 378				break;
 379		}
 380		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
 381			break;
 382	}
 383
 384	list_add(&rq->queuelist, entry);
 385}
 386EXPORT_SYMBOL(elv_dispatch_sort);
 387
 388/*
 389 * Insert rq into dispatch queue of q.  Queue lock must be held on
 390 * entry.  rq is added to the back of the dispatch queue. To be used by
 391 * specific elevators.
 392 */
 393void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
 394{
 395	if (q->last_merge == rq)
 396		q->last_merge = NULL;
 397
 398	elv_rqhash_del(q, rq);
 399
 400	q->nr_sorted--;
 401
 402	q->end_sector = rq_end_sector(rq);
 403	q->boundary_rq = rq;
 404	list_add_tail(&rq->queuelist, &q->queue_head);
 405}
 406EXPORT_SYMBOL(elv_dispatch_add_tail);
 407
 408int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
 409{
 410	struct elevator_queue *e = q->elevator;
 411	struct request *__rq;
 412	int ret;
 413
 414	/*
 415	 * Levels of merges:
 416	 * 	nomerges:  No merges at all attempted
 417	 * 	noxmerges: Only simple one-hit cache try
 418	 * 	merges:	   All merge tries attempted
 419	 */
 420	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
 421		return ELEVATOR_NO_MERGE;
 422
 423	/*
 424	 * First try one-hit cache.
 425	 */
 426	if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
 427		ret = blk_try_merge(q->last_merge, bio);
 428		if (ret != ELEVATOR_NO_MERGE) {
 429			*req = q->last_merge;
 430			return ret;
 431		}
 432	}
 433
 434	if (blk_queue_noxmerges(q))
 435		return ELEVATOR_NO_MERGE;
 436
 437	/*
 438	 * See if our hash lookup can find a potential backmerge.
 439	 */
 440	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
 441	if (__rq && elv_bio_merge_ok(__rq, bio)) {
 442		*req = __rq;
 443		return ELEVATOR_BACK_MERGE;
 444	}
 445
 446	if (e->type->ops.elevator_merge_fn)
 447		return e->type->ops.elevator_merge_fn(q, req, bio);
 448
 449	return ELEVATOR_NO_MERGE;
 450}
 451
 452/*
 453 * Attempt to do an insertion back merge. Only check for the case where
 454 * we can append 'rq' to an existing request, so we can throw 'rq' away
 455 * afterwards.
 456 *
 457 * Returns true if we merged, false otherwise
 458 */
 459static bool elv_attempt_insert_merge(struct request_queue *q,
 460				     struct request *rq)
 461{
 462	struct request *__rq;
 463	bool ret;
 464
 465	if (blk_queue_nomerges(q))
 466		return false;
 467
 468	/*
 469	 * First try one-hit cache.
 470	 */
 471	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
 472		return true;
 473
 474	if (blk_queue_noxmerges(q))
 475		return false;
 476
 477	ret = false;
 478	/*
 479	 * See if our hash lookup can find a potential backmerge.
 480	 */
 481	while (1) {
 482		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
 483		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
 484			break;
 485
 486		/* The merged request could be merged with others, try again */
 487		ret = true;
 488		rq = __rq;
 489	}
 490
 491	return ret;
 492}
 493
 494void elv_merged_request(struct request_queue *q, struct request *rq, int type)
 495{
 496	struct elevator_queue *e = q->elevator;
 497
 498	if (e->type->ops.elevator_merged_fn)
 499		e->type->ops.elevator_merged_fn(q, rq, type);
 500
 501	if (type == ELEVATOR_BACK_MERGE)
 502		elv_rqhash_reposition(q, rq);
 503
 504	q->last_merge = rq;
 505}
 506
 507void elv_merge_requests(struct request_queue *q, struct request *rq,
 508			     struct request *next)
 509{
 510	struct elevator_queue *e = q->elevator;
 511	const int next_sorted = next->rq_flags & RQF_SORTED;
 512
 513	if (next_sorted && e->type->ops.elevator_merge_req_fn)
 514		e->type->ops.elevator_merge_req_fn(q, rq, next);
 515
 516	elv_rqhash_reposition(q, rq);
 517
 518	if (next_sorted) {
 519		elv_rqhash_del(q, next);
 520		q->nr_sorted--;
 521	}
 522
 523	q->last_merge = rq;
 524}
 525
 526void elv_bio_merged(struct request_queue *q, struct request *rq,
 527			struct bio *bio)
 528{
 529	struct elevator_queue *e = q->elevator;
 530
 531	if (e->type->ops.elevator_bio_merged_fn)
 532		e->type->ops.elevator_bio_merged_fn(q, rq, bio);
 533}
 534
 535#ifdef CONFIG_PM
 536static void blk_pm_requeue_request(struct request *rq)
 537{
 538	if (rq->q->dev && !(rq->rq_flags & RQF_PM))
 539		rq->q->nr_pending--;
 540}
 541
 542static void blk_pm_add_request(struct request_queue *q, struct request *rq)
 543{
 544	if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
 545	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
 546		pm_request_resume(q->dev);
 547}
 548#else
 549static inline void blk_pm_requeue_request(struct request *rq) {}
 550static inline void blk_pm_add_request(struct request_queue *q,
 551				      struct request *rq)
 552{
 553}
 554#endif
 555
 556void elv_requeue_request(struct request_queue *q, struct request *rq)
 557{
 558	/*
 559	 * it already went through dequeue, we need to decrement the
 560	 * in_flight count again
 561	 */
 562	if (blk_account_rq(rq)) {
 563		q->in_flight[rq_is_sync(rq)]--;
 564		if (rq->rq_flags & RQF_SORTED)
 565			elv_deactivate_rq(q, rq);
 566	}
 567
 568	rq->rq_flags &= ~RQF_STARTED;
 569
 570	blk_pm_requeue_request(rq);
 571
 572	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
 573}
 574
 575void elv_drain_elevator(struct request_queue *q)
 576{
 577	static int printed;
 578
 579	lockdep_assert_held(q->queue_lock);
 580
 581	while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
 582		;
 583	if (q->nr_sorted && printed++ < 10) {
 584		printk(KERN_ERR "%s: forced dispatching is broken "
 585		       "(nr_sorted=%u), please report this\n",
 586		       q->elevator->type->elevator_name, q->nr_sorted);
 587	}
 588}
 589
 590void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 591{
 592	trace_block_rq_insert(q, rq);
 593
 594	blk_pm_add_request(q, rq);
 595
 596	rq->q = q;
 597
 598	if (rq->rq_flags & RQF_SOFTBARRIER) {
 599		/* barriers are scheduling boundary, update end_sector */
 600		if (rq->cmd_type == REQ_TYPE_FS) {
 601			q->end_sector = rq_end_sector(rq);
 602			q->boundary_rq = rq;
 603		}
 604	} else if (!(rq->rq_flags & RQF_ELVPRIV) &&
 605		    (where == ELEVATOR_INSERT_SORT ||
 606		     where == ELEVATOR_INSERT_SORT_MERGE))
 607		where = ELEVATOR_INSERT_BACK;
 608
 609	switch (where) {
 610	case ELEVATOR_INSERT_REQUEUE:
 611	case ELEVATOR_INSERT_FRONT:
 612		rq->rq_flags |= RQF_SOFTBARRIER;
 613		list_add(&rq->queuelist, &q->queue_head);
 614		break;
 615
 616	case ELEVATOR_INSERT_BACK:
 617		rq->rq_flags |= RQF_SOFTBARRIER;
 618		elv_drain_elevator(q);
 619		list_add_tail(&rq->queuelist, &q->queue_head);
 620		/*
 621		 * We kick the queue here for the following reasons.
 622		 * - The elevator might have returned NULL previously
 623		 *   to delay requests and returned them now.  As the
 624		 *   queue wasn't empty before this request, ll_rw_blk
 625		 *   won't run the queue on return, resulting in hang.
 626		 * - Usually, back inserted requests won't be merged
 627		 *   with anything.  There's no point in delaying queue
 628		 *   processing.
 629		 */
 630		__blk_run_queue(q);
 631		break;
 632
 633	case ELEVATOR_INSERT_SORT_MERGE:
 634		/*
 635		 * If we succeed in merging this request with one in the
 636		 * queue already, we are done - rq has now been freed,
 637		 * so no need to do anything further.
 638		 */
 639		if (elv_attempt_insert_merge(q, rq))
 640			break;
 641	case ELEVATOR_INSERT_SORT:
 642		BUG_ON(rq->cmd_type != REQ_TYPE_FS);
 643		rq->rq_flags |= RQF_SORTED;
 644		q->nr_sorted++;
 645		if (rq_mergeable(rq)) {
 646			elv_rqhash_add(q, rq);
 647			if (!q->last_merge)
 648				q->last_merge = rq;
 649		}
 650
 651		/*
 652		 * Some ioscheds (cfq) run q->request_fn directly, so
 653		 * rq cannot be accessed after calling
 654		 * elevator_add_req_fn.
 655		 */
 656		q->elevator->type->ops.elevator_add_req_fn(q, rq);
 657		break;
 658
 659	case ELEVATOR_INSERT_FLUSH:
 660		rq->rq_flags |= RQF_SOFTBARRIER;
 661		blk_insert_flush(rq);
 662		break;
 663	default:
 664		printk(KERN_ERR "%s: bad insertion point %d\n",
 665		       __func__, where);
 666		BUG();
 667	}
 668}
 669EXPORT_SYMBOL(__elv_add_request);
 670
 671void elv_add_request(struct request_queue *q, struct request *rq, int where)
 672{
 673	unsigned long flags;
 674
 675	spin_lock_irqsave(q->queue_lock, flags);
 676	__elv_add_request(q, rq, where);
 677	spin_unlock_irqrestore(q->queue_lock, flags);
 678}
 679EXPORT_SYMBOL(elv_add_request);
 680
 681struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 682{
 683	struct elevator_queue *e = q->elevator;
 684
 685	if (e->type->ops.elevator_latter_req_fn)
 686		return e->type->ops.elevator_latter_req_fn(q, rq);
 687	return NULL;
 688}
 689
 690struct request *elv_former_request(struct request_queue *q, struct request *rq)
 691{
 692	struct elevator_queue *e = q->elevator;
 693
 694	if (e->type->ops.elevator_former_req_fn)
 695		return e->type->ops.elevator_former_req_fn(q, rq);
 696	return NULL;
 697}
 698
 699int elv_set_request(struct request_queue *q, struct request *rq,
 700		    struct bio *bio, gfp_t gfp_mask)
 701{
 702	struct elevator_queue *e = q->elevator;
 703
 704	if (e->type->ops.elevator_set_req_fn)
 705		return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
 706	return 0;
 707}
 708
 709void elv_put_request(struct request_queue *q, struct request *rq)
 710{
 711	struct elevator_queue *e = q->elevator;
 712
 713	if (e->type->ops.elevator_put_req_fn)
 714		e->type->ops.elevator_put_req_fn(rq);
 715}
 716
 717int elv_may_queue(struct request_queue *q, unsigned int op)
 718{
 719	struct elevator_queue *e = q->elevator;
 720
 721	if (e->type->ops.elevator_may_queue_fn)
 722		return e->type->ops.elevator_may_queue_fn(q, op);
 723
 724	return ELV_MQUEUE_MAY;
 725}
 726
 727void elv_completed_request(struct request_queue *q, struct request *rq)
 728{
 729	struct elevator_queue *e = q->elevator;
 730
 731	/*
 732	 * request is released from the driver, io must be done
 733	 */
 734	if (blk_account_rq(rq)) {
 735		q->in_flight[rq_is_sync(rq)]--;
 736		if ((rq->rq_flags & RQF_SORTED) &&
 737		    e->type->ops.elevator_completed_req_fn)
 738			e->type->ops.elevator_completed_req_fn(q, rq);
 739	}
 740}
 741
 742#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
 743
 744static ssize_t
 745elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 746{
 747	struct elv_fs_entry *entry = to_elv(attr);
 748	struct elevator_queue *e;
 749	ssize_t error;
 750
 751	if (!entry->show)
 752		return -EIO;
 753
 754	e = container_of(kobj, struct elevator_queue, kobj);
 755	mutex_lock(&e->sysfs_lock);
 756	error = e->type ? entry->show(e, page) : -ENOENT;
 757	mutex_unlock(&e->sysfs_lock);
 758	return error;
 759}
 760
 761static ssize_t
 762elv_attr_store(struct kobject *kobj, struct attribute *attr,
 763	       const char *page, size_t length)
 764{
 765	struct elv_fs_entry *entry = to_elv(attr);
 766	struct elevator_queue *e;
 767	ssize_t error;
 768
 769	if (!entry->store)
 770		return -EIO;
 771
 772	e = container_of(kobj, struct elevator_queue, kobj);
 773	mutex_lock(&e->sysfs_lock);
 774	error = e->type ? entry->store(e, page, length) : -ENOENT;
 775	mutex_unlock(&e->sysfs_lock);
 776	return error;
 777}
 778
 779static const struct sysfs_ops elv_sysfs_ops = {
 780	.show	= elv_attr_show,
 781	.store	= elv_attr_store,
 782};
 783
 784static struct kobj_type elv_ktype = {
 785	.sysfs_ops	= &elv_sysfs_ops,
 786	.release	= elevator_release,
 787};
 788
 789int elv_register_queue(struct request_queue *q)
 790{
 791	struct elevator_queue *e = q->elevator;
 792	int error;
 793
 794	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
 795	if (!error) {
 796		struct elv_fs_entry *attr = e->type->elevator_attrs;
 797		if (attr) {
 798			while (attr->attr.name) {
 799				if (sysfs_create_file(&e->kobj, &attr->attr))
 800					break;
 801				attr++;
 802			}
 803		}
 804		kobject_uevent(&e->kobj, KOBJ_ADD);
 805		e->registered = 1;
 806		if (e->type->ops.elevator_registered_fn)
 807			e->type->ops.elevator_registered_fn(q);
 808	}
 809	return error;
 810}
 811EXPORT_SYMBOL(elv_register_queue);
 812
 813void elv_unregister_queue(struct request_queue *q)
 814{
 815	if (q) {
 816		struct elevator_queue *e = q->elevator;
 817
 818		kobject_uevent(&e->kobj, KOBJ_REMOVE);
 819		kobject_del(&e->kobj);
 820		e->registered = 0;
 821	}
 822}
 823EXPORT_SYMBOL(elv_unregister_queue);
 824
 825int elv_register(struct elevator_type *e)
 826{
 827	char *def = "";
 828
 829	/* create icq_cache if requested */
 830	if (e->icq_size) {
 831		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
 832		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
 833			return -EINVAL;
 834
 835		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
 836			 "%s_io_cq", e->elevator_name);
 837		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
 838						 e->icq_align, 0, NULL);
 839		if (!e->icq_cache)
 840			return -ENOMEM;
 841	}
 842
 843	/* register, don't allow duplicate names */
 844	spin_lock(&elv_list_lock);
 845	if (elevator_find(e->elevator_name)) {
 846		spin_unlock(&elv_list_lock);
 847		if (e->icq_cache)
 848			kmem_cache_destroy(e->icq_cache);
 849		return -EBUSY;
 850	}
 851	list_add_tail(&e->list, &elv_list);
 852	spin_unlock(&elv_list_lock);
 853
 854	/* print pretty message */
 855	if (!strcmp(e->elevator_name, chosen_elevator) ||
 856			(!*chosen_elevator &&
 857			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
 858				def = " (default)";
 859
 860	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
 861								def);
 862	return 0;
 863}
 864EXPORT_SYMBOL_GPL(elv_register);
 865
 866void elv_unregister(struct elevator_type *e)
 867{
 868	/* unregister */
 869	spin_lock(&elv_list_lock);
 870	list_del_init(&e->list);
 871	spin_unlock(&elv_list_lock);
 872
 873	/*
 874	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
 875	 * sure all RCU operations are complete before proceeding.
 876	 */
 877	if (e->icq_cache) {
 878		rcu_barrier();
 879		kmem_cache_destroy(e->icq_cache);
 880		e->icq_cache = NULL;
 881	}
 882}
 883EXPORT_SYMBOL_GPL(elv_unregister);
 884
 885/*
 886 * switch to new_e io scheduler. be careful not to introduce deadlocks -
 887 * we don't free the old io scheduler, before we have allocated what we
 888 * need for the new one. this way we have a chance of going back to the old
 889 * one, if the new one fails init for some reason.
 890 */
 891static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 892{
 893	struct elevator_queue *old = q->elevator;
 894	bool registered = old->registered;
 895	int err;
 896
 897	/*
 898	 * Turn on BYPASS and drain all requests w/ elevator private data.
 899	 * Block layer doesn't call into a quiesced elevator - all requests
 900	 * are directly put on the dispatch list without elevator data
 901	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
 902	 * merge happens either.
 903	 */
 904	blk_queue_bypass_start(q);
 905
 906	/* unregister and clear all auxiliary data of the old elevator */
 907	if (registered)
 908		elv_unregister_queue(q);
 909
 910	spin_lock_irq(q->queue_lock);
 911	ioc_clear_queue(q);
 912	spin_unlock_irq(q->queue_lock);
 913
 914	/* allocate, init and register new elevator */
 915	err = new_e->ops.elevator_init_fn(q, new_e);
 916	if (err)
 917		goto fail_init;
 918
 919	if (registered) {
 920		err = elv_register_queue(q);
 921		if (err)
 922			goto fail_register;
 923	}
 924
 925	/* done, kill the old one and finish */
 926	elevator_exit(old);
 927	blk_queue_bypass_end(q);
 928
 929	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 930
 931	return 0;
 932
 933fail_register:
 934	elevator_exit(q->elevator);
 935fail_init:
 936	/* switch failed, restore and re-register old elevator */
 937	q->elevator = old;
 938	elv_register_queue(q);
 939	blk_queue_bypass_end(q);
 940
 941	return err;
 942}
 943
 944/*
 945 * Switch this queue to the given IO scheduler.
 946 */
 947static int __elevator_change(struct request_queue *q, const char *name)
 948{
 949	char elevator_name[ELV_NAME_MAX];
 950	struct elevator_type *e;
 951
 952	if (!q->elevator)
 953		return -ENXIO;
 954
 955	strlcpy(elevator_name, name, sizeof(elevator_name));
 956	e = elevator_get(strstrip(elevator_name), true);
 957	if (!e) {
 958		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
 959		return -EINVAL;
 960	}
 961
 962	if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
 963		elevator_put(e);
 964		return 0;
 965	}
 966
 967	return elevator_switch(q, e);
 968}
 969
 970int elevator_change(struct request_queue *q, const char *name)
 971{
 972	int ret;
 973
 974	/* Protect q->elevator from elevator_init() */
 975	mutex_lock(&q->sysfs_lock);
 976	ret = __elevator_change(q, name);
 977	mutex_unlock(&q->sysfs_lock);
 978
 979	return ret;
 980}
 981EXPORT_SYMBOL(elevator_change);
 982
 983ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 984			  size_t count)
 985{
 986	int ret;
 987
 988	if (!q->elevator)
 989		return count;
 990
 991	ret = __elevator_change(q, name);
 992	if (!ret)
 993		return count;
 994
 995	printk(KERN_ERR "elevator: switch to %s failed\n", name);
 996	return ret;
 997}
 998
 999ssize_t elv_iosched_show(struct request_queue *q, char *name)
1000{
1001	struct elevator_queue *e = q->elevator;
1002	struct elevator_type *elv;
1003	struct elevator_type *__e;
1004	int len = 0;
1005
1006	if (!q->elevator || !blk_queue_stackable(q))
1007		return sprintf(name, "none\n");
1008
1009	elv = e->type;
1010
1011	spin_lock(&elv_list_lock);
1012	list_for_each_entry(__e, &elv_list, list) {
1013		if (!strcmp(elv->elevator_name, __e->elevator_name))
1014			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1015		else
1016			len += sprintf(name+len, "%s ", __e->elevator_name);
1017	}
1018	spin_unlock(&elv_list_lock);
1019
1020	len += sprintf(len+name, "\n");
1021	return len;
1022}
1023
1024struct request *elv_rb_former_request(struct request_queue *q,
1025				      struct request *rq)
1026{
1027	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1028
1029	if (rbprev)
1030		return rb_entry_rq(rbprev);
1031
1032	return NULL;
1033}
1034EXPORT_SYMBOL(elv_rb_former_request);
1035
1036struct request *elv_rb_latter_request(struct request_queue *q,
1037				      struct request *rq)
1038{
1039	struct rb_node *rbnext = rb_next(&rq->rb_node);
1040
1041	if (rbnext)
1042		return rb_entry_rq(rbnext);
1043
1044	return NULL;
1045}
1046EXPORT_SYMBOL(elv_rb_latter_request);