Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 *  Block device elevator/IO-scheduler.
   3 *
   4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
   5 *
   6 * 30042000 Jens Axboe <axboe@kernel.dk> :
   7 *
   8 * Split the elevator a bit so that it is possible to choose a different
   9 * one or even write a new "plug in". There are three pieces:
  10 * - elevator_fn, inserts a new request in the queue list
  11 * - elevator_merge_fn, decides whether a new buffer can be merged with
  12 *   an existing request
  13 * - elevator_dequeue_fn, called when a request is taken off the active list
  14 *
  15 * 20082000 Dave Jones <davej@suse.de> :
  16 * Removed tests for max-bomb-segments, which was breaking elvtune
  17 *  when run without -bN
  18 *
  19 * Jens:
  20 * - Rework again to work with bio instead of buffer_heads
  21 * - loose bi_dev comparisons, partition handling is right now
  22 * - completely modularize elevator setup and teardown
  23 *
  24 */
  25#include <linux/kernel.h>
  26#include <linux/fs.h>
  27#include <linux/blkdev.h>
  28#include <linux/elevator.h>
  29#include <linux/bio.h>
  30#include <linux/module.h>
  31#include <linux/slab.h>
  32#include <linux/init.h>
  33#include <linux/compiler.h>
  34#include <linux/blktrace_api.h>
  35#include <linux/hash.h>
  36#include <linux/uaccess.h>
 
  37
  38#include <trace/events/block.h>
  39
  40#include "blk.h"
  41#include "blk-cgroup.h"
  42
  43static DEFINE_SPINLOCK(elv_list_lock);
  44static LIST_HEAD(elv_list);
  45
  46/*
  47 * Merge hash stuff.
  48 */
  49static const int elv_hash_shift = 6;
  50#define ELV_HASH_BLOCK(sec)	((sec) >> 3)
  51#define ELV_HASH_FN(sec)	\
  52		(hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
  53#define ELV_HASH_ENTRIES	(1 << elv_hash_shift)
  54#define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
  55
  56/*
  57 * Query io scheduler to see if the current process issuing bio may be
  58 * merged with rq.
  59 */
  60static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
  61{
  62	struct request_queue *q = rq->q;
  63	struct elevator_queue *e = q->elevator;
  64
  65	if (e->type->ops.elevator_allow_merge_fn)
  66		return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
  67
  68	return 1;
  69}
  70
  71/*
  72 * can we safely merge with this request?
  73 */
  74bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
  75{
  76	if (!blk_rq_merge_ok(rq, bio))
  77		return 0;
  78
  79	if (!elv_iosched_allow_merge(rq, bio))
  80		return 0;
  81
  82	return 1;
  83}
  84EXPORT_SYMBOL(elv_rq_merge_ok);
  85
  86static struct elevator_type *elevator_find(const char *name)
  87{
  88	struct elevator_type *e;
  89
  90	list_for_each_entry(e, &elv_list, list) {
  91		if (!strcmp(e->elevator_name, name))
  92			return e;
  93	}
  94
  95	return NULL;
  96}
  97
  98static void elevator_put(struct elevator_type *e)
  99{
 100	module_put(e->elevator_owner);
 101}
 102
 103static struct elevator_type *elevator_get(const char *name)
 104{
 105	struct elevator_type *e;
 106
 107	spin_lock(&elv_list_lock);
 108
 109	e = elevator_find(name);
 110	if (!e) {
 111		spin_unlock(&elv_list_lock);
 112		request_module("%s-iosched", name);
 113		spin_lock(&elv_list_lock);
 114		e = elevator_find(name);
 115	}
 116
 117	if (e && !try_module_get(e->elevator_owner))
 118		e = NULL;
 119
 120	spin_unlock(&elv_list_lock);
 121
 122	return e;
 123}
 124
 125static char chosen_elevator[ELV_NAME_MAX];
 126
 127static int __init elevator_setup(char *str)
 128{
 129	/*
 130	 * Be backwards-compatible with previous kernels, so users
 131	 * won't get the wrong elevator.
 132	 */
 133	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
 134	return 1;
 135}
 136
 137__setup("elevator=", elevator_setup);
 138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 139static struct kobj_type elv_ktype;
 140
 141static struct elevator_queue *elevator_alloc(struct request_queue *q,
 142				  struct elevator_type *e)
 143{
 144	struct elevator_queue *eq;
 145	int i;
 146
 147	eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
 148	if (unlikely(!eq))
 149		goto err;
 150
 151	eq->type = e;
 152	kobject_init(&eq->kobj, &elv_ktype);
 153	mutex_init(&eq->sysfs_lock);
 154
 155	eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
 156					GFP_KERNEL, q->node);
 157	if (!eq->hash)
 158		goto err;
 159
 160	for (i = 0; i < ELV_HASH_ENTRIES; i++)
 161		INIT_HLIST_HEAD(&eq->hash[i]);
 162
 163	return eq;
 164err:
 165	kfree(eq);
 166	elevator_put(e);
 167	return NULL;
 168}
 
 169
 170static void elevator_release(struct kobject *kobj)
 171{
 172	struct elevator_queue *e;
 173
 174	e = container_of(kobj, struct elevator_queue, kobj);
 175	elevator_put(e->type);
 176	kfree(e->hash);
 177	kfree(e);
 178}
 179
 180int elevator_init(struct request_queue *q, char *name)
 181{
 182	struct elevator_type *e = NULL;
 183	int err;
 184
 
 
 
 
 
 
 185	if (unlikely(q->elevator))
 186		return 0;
 187
 188	INIT_LIST_HEAD(&q->queue_head);
 189	q->last_merge = NULL;
 190	q->end_sector = 0;
 191	q->boundary_rq = NULL;
 192
 193	if (name) {
 194		e = elevator_get(name);
 195		if (!e)
 196			return -EINVAL;
 197	}
 198
 
 
 
 
 
 199	if (!e && *chosen_elevator) {
 200		e = elevator_get(chosen_elevator);
 201		if (!e)
 202			printk(KERN_ERR "I/O scheduler %s not found\n",
 203							chosen_elevator);
 204	}
 205
 206	if (!e) {
 207		e = elevator_get(CONFIG_DEFAULT_IOSCHED);
 208		if (!e) {
 209			printk(KERN_ERR
 210				"Default I/O scheduler not found. " \
 211				"Using noop.\n");
 212			e = elevator_get("noop");
 213		}
 214	}
 215
 216	q->elevator = elevator_alloc(q, e);
 217	if (!q->elevator)
 218		return -ENOMEM;
 219
 220	err = e->ops.elevator_init_fn(q);
 221	if (err) {
 222		kobject_put(&q->elevator->kobj);
 223		return err;
 224	}
 225
 226	return 0;
 227}
 228EXPORT_SYMBOL(elevator_init);
 229
 230void elevator_exit(struct elevator_queue *e)
 231{
 232	mutex_lock(&e->sysfs_lock);
 233	if (e->type->ops.elevator_exit_fn)
 234		e->type->ops.elevator_exit_fn(e);
 235	mutex_unlock(&e->sysfs_lock);
 236
 237	kobject_put(&e->kobj);
 238}
 239EXPORT_SYMBOL(elevator_exit);
 240
 241static inline void __elv_rqhash_del(struct request *rq)
 242{
 243	hlist_del_init(&rq->hash);
 
 244}
 245
 246static void elv_rqhash_del(struct request_queue *q, struct request *rq)
 247{
 248	if (ELV_ON_HASH(rq))
 249		__elv_rqhash_del(rq);
 250}
 251
 252static void elv_rqhash_add(struct request_queue *q, struct request *rq)
 253{
 254	struct elevator_queue *e = q->elevator;
 255
 256	BUG_ON(ELV_ON_HASH(rq));
 257	hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
 
 258}
 259
 260static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
 261{
 262	__elv_rqhash_del(rq);
 263	elv_rqhash_add(q, rq);
 264}
 265
 266static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
 267{
 268	struct elevator_queue *e = q->elevator;
 269	struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
 270	struct hlist_node *entry, *next;
 271	struct request *rq;
 272
 273	hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
 274		BUG_ON(!ELV_ON_HASH(rq));
 275
 276		if (unlikely(!rq_mergeable(rq))) {
 277			__elv_rqhash_del(rq);
 278			continue;
 279		}
 280
 281		if (rq_hash_key(rq) == offset)
 282			return rq;
 283	}
 284
 285	return NULL;
 286}
 287
 288/*
 289 * RB-tree support functions for inserting/lookup/removal of requests
 290 * in a sorted RB tree.
 291 */
 292void elv_rb_add(struct rb_root *root, struct request *rq)
 293{
 294	struct rb_node **p = &root->rb_node;
 295	struct rb_node *parent = NULL;
 296	struct request *__rq;
 297
 298	while (*p) {
 299		parent = *p;
 300		__rq = rb_entry(parent, struct request, rb_node);
 301
 302		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
 303			p = &(*p)->rb_left;
 304		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
 305			p = &(*p)->rb_right;
 306	}
 307
 308	rb_link_node(&rq->rb_node, parent, p);
 309	rb_insert_color(&rq->rb_node, root);
 310}
 311EXPORT_SYMBOL(elv_rb_add);
 312
 313void elv_rb_del(struct rb_root *root, struct request *rq)
 314{
 315	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
 316	rb_erase(&rq->rb_node, root);
 317	RB_CLEAR_NODE(&rq->rb_node);
 318}
 319EXPORT_SYMBOL(elv_rb_del);
 320
 321struct request *elv_rb_find(struct rb_root *root, sector_t sector)
 322{
 323	struct rb_node *n = root->rb_node;
 324	struct request *rq;
 325
 326	while (n) {
 327		rq = rb_entry(n, struct request, rb_node);
 328
 329		if (sector < blk_rq_pos(rq))
 330			n = n->rb_left;
 331		else if (sector > blk_rq_pos(rq))
 332			n = n->rb_right;
 333		else
 334			return rq;
 335	}
 336
 337	return NULL;
 338}
 339EXPORT_SYMBOL(elv_rb_find);
 340
 341/*
 342 * Insert rq into dispatch queue of q.  Queue lock must be held on
 343 * entry.  rq is sort instead into the dispatch queue. To be used by
 344 * specific elevators.
 345 */
 346void elv_dispatch_sort(struct request_queue *q, struct request *rq)
 347{
 348	sector_t boundary;
 349	struct list_head *entry;
 350	int stop_flags;
 351
 352	if (q->last_merge == rq)
 353		q->last_merge = NULL;
 354
 355	elv_rqhash_del(q, rq);
 356
 357	q->nr_sorted--;
 358
 359	boundary = q->end_sector;
 360	stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
 361	list_for_each_prev(entry, &q->queue_head) {
 362		struct request *pos = list_entry_rq(entry);
 363
 364		if ((rq->cmd_flags & REQ_DISCARD) !=
 365		    (pos->cmd_flags & REQ_DISCARD))
 366			break;
 367		if (rq_data_dir(rq) != rq_data_dir(pos))
 368			break;
 369		if (pos->cmd_flags & stop_flags)
 370			break;
 371		if (blk_rq_pos(rq) >= boundary) {
 372			if (blk_rq_pos(pos) < boundary)
 373				continue;
 374		} else {
 375			if (blk_rq_pos(pos) >= boundary)
 376				break;
 377		}
 378		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
 379			break;
 380	}
 381
 382	list_add(&rq->queuelist, entry);
 383}
 384EXPORT_SYMBOL(elv_dispatch_sort);
 385
 386/*
 387 * Insert rq into dispatch queue of q.  Queue lock must be held on
 388 * entry.  rq is added to the back of the dispatch queue. To be used by
 389 * specific elevators.
 390 */
 391void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
 392{
 393	if (q->last_merge == rq)
 394		q->last_merge = NULL;
 395
 396	elv_rqhash_del(q, rq);
 397
 398	q->nr_sorted--;
 399
 400	q->end_sector = rq_end_sector(rq);
 401	q->boundary_rq = rq;
 402	list_add_tail(&rq->queuelist, &q->queue_head);
 403}
 404EXPORT_SYMBOL(elv_dispatch_add_tail);
 405
 406int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
 407{
 408	struct elevator_queue *e = q->elevator;
 409	struct request *__rq;
 410	int ret;
 411
 412	/*
 413	 * Levels of merges:
 414	 * 	nomerges:  No merges at all attempted
 415	 * 	noxmerges: Only simple one-hit cache try
 416	 * 	merges:	   All merge tries attempted
 417	 */
 418	if (blk_queue_nomerges(q))
 419		return ELEVATOR_NO_MERGE;
 420
 421	/*
 422	 * First try one-hit cache.
 423	 */
 424	if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
 425		ret = blk_try_merge(q->last_merge, bio);
 426		if (ret != ELEVATOR_NO_MERGE) {
 427			*req = q->last_merge;
 428			return ret;
 429		}
 430	}
 431
 432	if (blk_queue_noxmerges(q))
 433		return ELEVATOR_NO_MERGE;
 434
 435	/*
 436	 * See if our hash lookup can find a potential backmerge.
 437	 */
 438	__rq = elv_rqhash_find(q, bio->bi_sector);
 439	if (__rq && elv_rq_merge_ok(__rq, bio)) {
 440		*req = __rq;
 441		return ELEVATOR_BACK_MERGE;
 442	}
 443
 444	if (e->type->ops.elevator_merge_fn)
 445		return e->type->ops.elevator_merge_fn(q, req, bio);
 446
 447	return ELEVATOR_NO_MERGE;
 448}
 449
 450/*
 451 * Attempt to do an insertion back merge. Only check for the case where
 452 * we can append 'rq' to an existing request, so we can throw 'rq' away
 453 * afterwards.
 454 *
 455 * Returns true if we merged, false otherwise
 456 */
 457static bool elv_attempt_insert_merge(struct request_queue *q,
 458				     struct request *rq)
 459{
 460	struct request *__rq;
 
 461
 462	if (blk_queue_nomerges(q))
 463		return false;
 464
 465	/*
 466	 * First try one-hit cache.
 467	 */
 468	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
 469		return true;
 470
 471	if (blk_queue_noxmerges(q))
 472		return false;
 473
 
 474	/*
 475	 * See if our hash lookup can find a potential backmerge.
 476	 */
 477	__rq = elv_rqhash_find(q, blk_rq_pos(rq));
 478	if (__rq && blk_attempt_req_merge(q, __rq, rq))
 479		return true;
 
 480
 481	return false;
 
 
 
 
 
 482}
 483
 484void elv_merged_request(struct request_queue *q, struct request *rq, int type)
 485{
 486	struct elevator_queue *e = q->elevator;
 487
 488	if (e->type->ops.elevator_merged_fn)
 489		e->type->ops.elevator_merged_fn(q, rq, type);
 490
 491	if (type == ELEVATOR_BACK_MERGE)
 492		elv_rqhash_reposition(q, rq);
 493
 494	q->last_merge = rq;
 495}
 496
 497void elv_merge_requests(struct request_queue *q, struct request *rq,
 498			     struct request *next)
 499{
 500	struct elevator_queue *e = q->elevator;
 501	const int next_sorted = next->cmd_flags & REQ_SORTED;
 502
 503	if (next_sorted && e->type->ops.elevator_merge_req_fn)
 504		e->type->ops.elevator_merge_req_fn(q, rq, next);
 505
 506	elv_rqhash_reposition(q, rq);
 507
 508	if (next_sorted) {
 509		elv_rqhash_del(q, next);
 510		q->nr_sorted--;
 511	}
 512
 513	q->last_merge = rq;
 514}
 515
 516void elv_bio_merged(struct request_queue *q, struct request *rq,
 517			struct bio *bio)
 518{
 519	struct elevator_queue *e = q->elevator;
 520
 521	if (e->type->ops.elevator_bio_merged_fn)
 522		e->type->ops.elevator_bio_merged_fn(q, rq, bio);
 523}
 524
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 525void elv_requeue_request(struct request_queue *q, struct request *rq)
 526{
 527	/*
 528	 * it already went through dequeue, we need to decrement the
 529	 * in_flight count again
 530	 */
 531	if (blk_account_rq(rq)) {
 532		q->in_flight[rq_is_sync(rq)]--;
 533		if (rq->cmd_flags & REQ_SORTED)
 534			elv_deactivate_rq(q, rq);
 535	}
 536
 537	rq->cmd_flags &= ~REQ_STARTED;
 538
 
 
 539	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
 540}
 541
 542void elv_drain_elevator(struct request_queue *q)
 543{
 544	static int printed;
 545
 546	lockdep_assert_held(q->queue_lock);
 547
 548	while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
 549		;
 550	if (q->nr_sorted && printed++ < 10) {
 551		printk(KERN_ERR "%s: forced dispatching is broken "
 552		       "(nr_sorted=%u), please report this\n",
 553		       q->elevator->type->elevator_name, q->nr_sorted);
 554	}
 555}
 556
 557void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 558{
 559	trace_block_rq_insert(q, rq);
 560
 
 
 561	rq->q = q;
 562
 563	if (rq->cmd_flags & REQ_SOFTBARRIER) {
 564		/* barriers are scheduling boundary, update end_sector */
 565		if (rq->cmd_type == REQ_TYPE_FS ||
 566		    (rq->cmd_flags & REQ_DISCARD)) {
 567			q->end_sector = rq_end_sector(rq);
 568			q->boundary_rq = rq;
 569		}
 570	} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
 571		    (where == ELEVATOR_INSERT_SORT ||
 572		     where == ELEVATOR_INSERT_SORT_MERGE))
 573		where = ELEVATOR_INSERT_BACK;
 574
 575	switch (where) {
 576	case ELEVATOR_INSERT_REQUEUE:
 577	case ELEVATOR_INSERT_FRONT:
 578		rq->cmd_flags |= REQ_SOFTBARRIER;
 579		list_add(&rq->queuelist, &q->queue_head);
 580		break;
 581
 582	case ELEVATOR_INSERT_BACK:
 583		rq->cmd_flags |= REQ_SOFTBARRIER;
 584		elv_drain_elevator(q);
 585		list_add_tail(&rq->queuelist, &q->queue_head);
 586		/*
 587		 * We kick the queue here for the following reasons.
 588		 * - The elevator might have returned NULL previously
 589		 *   to delay requests and returned them now.  As the
 590		 *   queue wasn't empty before this request, ll_rw_blk
 591		 *   won't run the queue on return, resulting in hang.
 592		 * - Usually, back inserted requests won't be merged
 593		 *   with anything.  There's no point in delaying queue
 594		 *   processing.
 595		 */
 596		__blk_run_queue(q);
 597		break;
 598
 599	case ELEVATOR_INSERT_SORT_MERGE:
 600		/*
 601		 * If we succeed in merging this request with one in the
 602		 * queue already, we are done - rq has now been freed,
 603		 * so no need to do anything further.
 604		 */
 605		if (elv_attempt_insert_merge(q, rq))
 606			break;
 607	case ELEVATOR_INSERT_SORT:
 608		BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
 609		       !(rq->cmd_flags & REQ_DISCARD));
 610		rq->cmd_flags |= REQ_SORTED;
 611		q->nr_sorted++;
 612		if (rq_mergeable(rq)) {
 613			elv_rqhash_add(q, rq);
 614			if (!q->last_merge)
 615				q->last_merge = rq;
 616		}
 617
 618		/*
 619		 * Some ioscheds (cfq) run q->request_fn directly, so
 620		 * rq cannot be accessed after calling
 621		 * elevator_add_req_fn.
 622		 */
 623		q->elevator->type->ops.elevator_add_req_fn(q, rq);
 624		break;
 625
 626	case ELEVATOR_INSERT_FLUSH:
 627		rq->cmd_flags |= REQ_SOFTBARRIER;
 628		blk_insert_flush(rq);
 629		break;
 630	default:
 631		printk(KERN_ERR "%s: bad insertion point %d\n",
 632		       __func__, where);
 633		BUG();
 634	}
 635}
 636EXPORT_SYMBOL(__elv_add_request);
 637
 638void elv_add_request(struct request_queue *q, struct request *rq, int where)
 639{
 640	unsigned long flags;
 641
 642	spin_lock_irqsave(q->queue_lock, flags);
 643	__elv_add_request(q, rq, where);
 644	spin_unlock_irqrestore(q->queue_lock, flags);
 645}
 646EXPORT_SYMBOL(elv_add_request);
 647
 648struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 649{
 650	struct elevator_queue *e = q->elevator;
 651
 652	if (e->type->ops.elevator_latter_req_fn)
 653		return e->type->ops.elevator_latter_req_fn(q, rq);
 654	return NULL;
 655}
 656
 657struct request *elv_former_request(struct request_queue *q, struct request *rq)
 658{
 659	struct elevator_queue *e = q->elevator;
 660
 661	if (e->type->ops.elevator_former_req_fn)
 662		return e->type->ops.elevator_former_req_fn(q, rq);
 663	return NULL;
 664}
 665
 666int elv_set_request(struct request_queue *q, struct request *rq,
 667		    struct bio *bio, gfp_t gfp_mask)
 668{
 669	struct elevator_queue *e = q->elevator;
 670
 671	if (e->type->ops.elevator_set_req_fn)
 672		return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
 673	return 0;
 674}
 675
 676void elv_put_request(struct request_queue *q, struct request *rq)
 677{
 678	struct elevator_queue *e = q->elevator;
 679
 680	if (e->type->ops.elevator_put_req_fn)
 681		e->type->ops.elevator_put_req_fn(rq);
 682}
 683
 684int elv_may_queue(struct request_queue *q, int rw)
 685{
 686	struct elevator_queue *e = q->elevator;
 687
 688	if (e->type->ops.elevator_may_queue_fn)
 689		return e->type->ops.elevator_may_queue_fn(q, rw);
 690
 691	return ELV_MQUEUE_MAY;
 692}
 693
 694void elv_abort_queue(struct request_queue *q)
 695{
 696	struct request *rq;
 697
 698	blk_abort_flushes(q);
 699
 700	while (!list_empty(&q->queue_head)) {
 701		rq = list_entry_rq(q->queue_head.next);
 702		rq->cmd_flags |= REQ_QUIET;
 703		trace_block_rq_abort(q, rq);
 704		/*
 705		 * Mark this request as started so we don't trigger
 706		 * any debug logic in the end I/O path.
 707		 */
 708		blk_start_request(rq);
 709		__blk_end_request_all(rq, -EIO);
 710	}
 711}
 712EXPORT_SYMBOL(elv_abort_queue);
 713
 714void elv_completed_request(struct request_queue *q, struct request *rq)
 715{
 716	struct elevator_queue *e = q->elevator;
 717
 718	/*
 719	 * request is released from the driver, io must be done
 720	 */
 721	if (blk_account_rq(rq)) {
 722		q->in_flight[rq_is_sync(rq)]--;
 723		if ((rq->cmd_flags & REQ_SORTED) &&
 724		    e->type->ops.elevator_completed_req_fn)
 725			e->type->ops.elevator_completed_req_fn(q, rq);
 726	}
 727}
 728
 729#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
 730
 731static ssize_t
 732elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 733{
 734	struct elv_fs_entry *entry = to_elv(attr);
 735	struct elevator_queue *e;
 736	ssize_t error;
 737
 738	if (!entry->show)
 739		return -EIO;
 740
 741	e = container_of(kobj, struct elevator_queue, kobj);
 742	mutex_lock(&e->sysfs_lock);
 743	error = e->type ? entry->show(e, page) : -ENOENT;
 744	mutex_unlock(&e->sysfs_lock);
 745	return error;
 746}
 747
 748static ssize_t
 749elv_attr_store(struct kobject *kobj, struct attribute *attr,
 750	       const char *page, size_t length)
 751{
 752	struct elv_fs_entry *entry = to_elv(attr);
 753	struct elevator_queue *e;
 754	ssize_t error;
 755
 756	if (!entry->store)
 757		return -EIO;
 758
 759	e = container_of(kobj, struct elevator_queue, kobj);
 760	mutex_lock(&e->sysfs_lock);
 761	error = e->type ? entry->store(e, page, length) : -ENOENT;
 762	mutex_unlock(&e->sysfs_lock);
 763	return error;
 764}
 765
 766static const struct sysfs_ops elv_sysfs_ops = {
 767	.show	= elv_attr_show,
 768	.store	= elv_attr_store,
 769};
 770
 771static struct kobj_type elv_ktype = {
 772	.sysfs_ops	= &elv_sysfs_ops,
 773	.release	= elevator_release,
 774};
 775
 776int elv_register_queue(struct request_queue *q)
 777{
 778	struct elevator_queue *e = q->elevator;
 779	int error;
 780
 781	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
 782	if (!error) {
 783		struct elv_fs_entry *attr = e->type->elevator_attrs;
 784		if (attr) {
 785			while (attr->attr.name) {
 786				if (sysfs_create_file(&e->kobj, &attr->attr))
 787					break;
 788				attr++;
 789			}
 790		}
 791		kobject_uevent(&e->kobj, KOBJ_ADD);
 792		e->registered = 1;
 793	}
 794	return error;
 795}
 796EXPORT_SYMBOL(elv_register_queue);
 797
 798void elv_unregister_queue(struct request_queue *q)
 799{
 800	if (q) {
 801		struct elevator_queue *e = q->elevator;
 802
 803		kobject_uevent(&e->kobj, KOBJ_REMOVE);
 804		kobject_del(&e->kobj);
 805		e->registered = 0;
 806	}
 807}
 808EXPORT_SYMBOL(elv_unregister_queue);
 809
 810int elv_register(struct elevator_type *e)
 811{
 812	char *def = "";
 813
 814	/* create icq_cache if requested */
 815	if (e->icq_size) {
 816		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
 817		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
 818			return -EINVAL;
 819
 820		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
 821			 "%s_io_cq", e->elevator_name);
 822		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
 823						 e->icq_align, 0, NULL);
 824		if (!e->icq_cache)
 825			return -ENOMEM;
 826	}
 827
 828	/* register, don't allow duplicate names */
 829	spin_lock(&elv_list_lock);
 830	if (elevator_find(e->elevator_name)) {
 831		spin_unlock(&elv_list_lock);
 832		if (e->icq_cache)
 833			kmem_cache_destroy(e->icq_cache);
 834		return -EBUSY;
 835	}
 836	list_add_tail(&e->list, &elv_list);
 837	spin_unlock(&elv_list_lock);
 838
 839	/* print pretty message */
 840	if (!strcmp(e->elevator_name, chosen_elevator) ||
 841			(!*chosen_elevator &&
 842			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
 843				def = " (default)";
 844
 845	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
 846								def);
 847	return 0;
 848}
 849EXPORT_SYMBOL_GPL(elv_register);
 850
 851void elv_unregister(struct elevator_type *e)
 852{
 853	/* unregister */
 854	spin_lock(&elv_list_lock);
 855	list_del_init(&e->list);
 856	spin_unlock(&elv_list_lock);
 857
 858	/*
 859	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
 860	 * sure all RCU operations are complete before proceeding.
 861	 */
 862	if (e->icq_cache) {
 863		rcu_barrier();
 864		kmem_cache_destroy(e->icq_cache);
 865		e->icq_cache = NULL;
 866	}
 867}
 868EXPORT_SYMBOL_GPL(elv_unregister);
 869
 870/*
 871 * switch to new_e io scheduler. be careful not to introduce deadlocks -
 872 * we don't free the old io scheduler, before we have allocated what we
 873 * need for the new one. this way we have a chance of going back to the old
 874 * one, if the new one fails init for some reason.
 875 */
 876static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 877{
 878	struct elevator_queue *old = q->elevator;
 879	bool registered = old->registered;
 880	int err;
 881
 882	/*
 883	 * Turn on BYPASS and drain all requests w/ elevator private data.
 884	 * Block layer doesn't call into a quiesced elevator - all requests
 885	 * are directly put on the dispatch list without elevator data
 886	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
 887	 * merge happens either.
 888	 */
 889	blk_queue_bypass_start(q);
 890
 891	/* unregister and clear all auxiliary data of the old elevator */
 892	if (registered)
 893		elv_unregister_queue(q);
 894
 895	spin_lock_irq(q->queue_lock);
 896	ioc_clear_queue(q);
 897	spin_unlock_irq(q->queue_lock);
 898
 899	/* allocate, init and register new elevator */
 900	err = -ENOMEM;
 901	q->elevator = elevator_alloc(q, new_e);
 902	if (!q->elevator)
 903		goto fail_init;
 904
 905	err = new_e->ops.elevator_init_fn(q);
 906	if (err) {
 907		kobject_put(&q->elevator->kobj);
 908		goto fail_init;
 909	}
 910
 911	if (registered) {
 912		err = elv_register_queue(q);
 913		if (err)
 914			goto fail_register;
 915	}
 916
 917	/* done, kill the old one and finish */
 918	elevator_exit(old);
 919	blk_queue_bypass_end(q);
 920
 921	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 922
 923	return 0;
 924
 925fail_register:
 926	elevator_exit(q->elevator);
 927fail_init:
 928	/* switch failed, restore and re-register old elevator */
 929	q->elevator = old;
 930	elv_register_queue(q);
 931	blk_queue_bypass_end(q);
 932
 933	return err;
 934}
 935
 936/*
 937 * Switch this queue to the given IO scheduler.
 938 */
 939int elevator_change(struct request_queue *q, const char *name)
 940{
 941	char elevator_name[ELV_NAME_MAX];
 942	struct elevator_type *e;
 943
 944	if (!q->elevator)
 945		return -ENXIO;
 946
 947	strlcpy(elevator_name, name, sizeof(elevator_name));
 948	e = elevator_get(strstrip(elevator_name));
 949	if (!e) {
 950		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
 951		return -EINVAL;
 952	}
 953
 954	if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
 955		elevator_put(e);
 956		return 0;
 957	}
 958
 959	return elevator_switch(q, e);
 960}
 
 
 
 
 
 
 
 
 
 
 
 
 961EXPORT_SYMBOL(elevator_change);
 962
 963ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 964			  size_t count)
 965{
 966	int ret;
 967
 968	if (!q->elevator)
 969		return count;
 970
 971	ret = elevator_change(q, name);
 972	if (!ret)
 973		return count;
 974
 975	printk(KERN_ERR "elevator: switch to %s failed\n", name);
 976	return ret;
 977}
 978
 979ssize_t elv_iosched_show(struct request_queue *q, char *name)
 980{
 981	struct elevator_queue *e = q->elevator;
 982	struct elevator_type *elv;
 983	struct elevator_type *__e;
 984	int len = 0;
 985
 986	if (!q->elevator || !blk_queue_stackable(q))
 987		return sprintf(name, "none\n");
 988
 989	elv = e->type;
 990
 991	spin_lock(&elv_list_lock);
 992	list_for_each_entry(__e, &elv_list, list) {
 993		if (!strcmp(elv->elevator_name, __e->elevator_name))
 994			len += sprintf(name+len, "[%s] ", elv->elevator_name);
 995		else
 996			len += sprintf(name+len, "%s ", __e->elevator_name);
 997	}
 998	spin_unlock(&elv_list_lock);
 999
1000	len += sprintf(len+name, "\n");
1001	return len;
1002}
1003
1004struct request *elv_rb_former_request(struct request_queue *q,
1005				      struct request *rq)
1006{
1007	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1008
1009	if (rbprev)
1010		return rb_entry_rq(rbprev);
1011
1012	return NULL;
1013}
1014EXPORT_SYMBOL(elv_rb_former_request);
1015
1016struct request *elv_rb_latter_request(struct request_queue *q,
1017				      struct request *rq)
1018{
1019	struct rb_node *rbnext = rb_next(&rq->rb_node);
1020
1021	if (rbnext)
1022		return rb_entry_rq(rbnext);
1023
1024	return NULL;
1025}
1026EXPORT_SYMBOL(elv_rb_latter_request);
v3.15
   1/*
   2 *  Block device elevator/IO-scheduler.
   3 *
   4 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
   5 *
   6 * 30042000 Jens Axboe <axboe@kernel.dk> :
   7 *
   8 * Split the elevator a bit so that it is possible to choose a different
   9 * one or even write a new "plug in". There are three pieces:
  10 * - elevator_fn, inserts a new request in the queue list
  11 * - elevator_merge_fn, decides whether a new buffer can be merged with
  12 *   an existing request
  13 * - elevator_dequeue_fn, called when a request is taken off the active list
  14 *
  15 * 20082000 Dave Jones <davej@suse.de> :
  16 * Removed tests for max-bomb-segments, which was breaking elvtune
  17 *  when run without -bN
  18 *
  19 * Jens:
  20 * - Rework again to work with bio instead of buffer_heads
  21 * - loose bi_dev comparisons, partition handling is right now
  22 * - completely modularize elevator setup and teardown
  23 *
  24 */
  25#include <linux/kernel.h>
  26#include <linux/fs.h>
  27#include <linux/blkdev.h>
  28#include <linux/elevator.h>
  29#include <linux/bio.h>
  30#include <linux/module.h>
  31#include <linux/slab.h>
  32#include <linux/init.h>
  33#include <linux/compiler.h>
  34#include <linux/blktrace_api.h>
  35#include <linux/hash.h>
  36#include <linux/uaccess.h>
  37#include <linux/pm_runtime.h>
  38
  39#include <trace/events/block.h>
  40
  41#include "blk.h"
  42#include "blk-cgroup.h"
  43
  44static DEFINE_SPINLOCK(elv_list_lock);
  45static LIST_HEAD(elv_list);
  46
  47/*
  48 * Merge hash stuff.
  49 */
 
 
 
 
 
  50#define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
  51
  52/*
  53 * Query io scheduler to see if the current process issuing bio may be
  54 * merged with rq.
  55 */
  56static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
  57{
  58	struct request_queue *q = rq->q;
  59	struct elevator_queue *e = q->elevator;
  60
  61	if (e->type->ops.elevator_allow_merge_fn)
  62		return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
  63
  64	return 1;
  65}
  66
  67/*
  68 * can we safely merge with this request?
  69 */
  70bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
  71{
  72	if (!blk_rq_merge_ok(rq, bio))
  73		return 0;
  74
  75	if (!elv_iosched_allow_merge(rq, bio))
  76		return 0;
  77
  78	return 1;
  79}
  80EXPORT_SYMBOL(elv_rq_merge_ok);
  81
  82static struct elevator_type *elevator_find(const char *name)
  83{
  84	struct elevator_type *e;
  85
  86	list_for_each_entry(e, &elv_list, list) {
  87		if (!strcmp(e->elevator_name, name))
  88			return e;
  89	}
  90
  91	return NULL;
  92}
  93
  94static void elevator_put(struct elevator_type *e)
  95{
  96	module_put(e->elevator_owner);
  97}
  98
  99static struct elevator_type *elevator_get(const char *name, bool try_loading)
 100{
 101	struct elevator_type *e;
 102
 103	spin_lock(&elv_list_lock);
 104
 105	e = elevator_find(name);
 106	if (!e && try_loading) {
 107		spin_unlock(&elv_list_lock);
 108		request_module("%s-iosched", name);
 109		spin_lock(&elv_list_lock);
 110		e = elevator_find(name);
 111	}
 112
 113	if (e && !try_module_get(e->elevator_owner))
 114		e = NULL;
 115
 116	spin_unlock(&elv_list_lock);
 117
 118	return e;
 119}
 120
 121static char chosen_elevator[ELV_NAME_MAX];
 122
 123static int __init elevator_setup(char *str)
 124{
 125	/*
 126	 * Be backwards-compatible with previous kernels, so users
 127	 * won't get the wrong elevator.
 128	 */
 129	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
 130	return 1;
 131}
 132
 133__setup("elevator=", elevator_setup);
 134
 135/* called during boot to load the elevator chosen by the elevator param */
 136void __init load_default_elevator_module(void)
 137{
 138	struct elevator_type *e;
 139
 140	if (!chosen_elevator[0])
 141		return;
 142
 143	spin_lock(&elv_list_lock);
 144	e = elevator_find(chosen_elevator);
 145	spin_unlock(&elv_list_lock);
 146
 147	if (!e)
 148		request_module("%s-iosched", chosen_elevator);
 149}
 150
 151static struct kobj_type elv_ktype;
 152
 153struct elevator_queue *elevator_alloc(struct request_queue *q,
 154				  struct elevator_type *e)
 155{
 156	struct elevator_queue *eq;
 
 157
 158	eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
 159	if (unlikely(!eq))
 160		goto err;
 161
 162	eq->type = e;
 163	kobject_init(&eq->kobj, &elv_ktype);
 164	mutex_init(&eq->sysfs_lock);
 165	hash_init(eq->hash);
 
 
 
 
 
 
 
 166
 167	return eq;
 168err:
 169	kfree(eq);
 170	elevator_put(e);
 171	return NULL;
 172}
 173EXPORT_SYMBOL(elevator_alloc);
 174
 175static void elevator_release(struct kobject *kobj)
 176{
 177	struct elevator_queue *e;
 178
 179	e = container_of(kobj, struct elevator_queue, kobj);
 180	elevator_put(e->type);
 
 181	kfree(e);
 182}
 183
 184int elevator_init(struct request_queue *q, char *name)
 185{
 186	struct elevator_type *e = NULL;
 187	int err;
 188
 189	/*
 190	 * q->sysfs_lock must be held to provide mutual exclusion between
 191	 * elevator_switch() and here.
 192	 */
 193	lockdep_assert_held(&q->sysfs_lock);
 194
 195	if (unlikely(q->elevator))
 196		return 0;
 197
 198	INIT_LIST_HEAD(&q->queue_head);
 199	q->last_merge = NULL;
 200	q->end_sector = 0;
 201	q->boundary_rq = NULL;
 202
 203	if (name) {
 204		e = elevator_get(name, true);
 205		if (!e)
 206			return -EINVAL;
 207	}
 208
 209	/*
 210	 * Use the default elevator specified by config boot param or
 211	 * config option.  Don't try to load modules as we could be running
 212	 * off async and request_module() isn't allowed from async.
 213	 */
 214	if (!e && *chosen_elevator) {
 215		e = elevator_get(chosen_elevator, false);
 216		if (!e)
 217			printk(KERN_ERR "I/O scheduler %s not found\n",
 218							chosen_elevator);
 219	}
 220
 221	if (!e) {
 222		e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
 223		if (!e) {
 224			printk(KERN_ERR
 225				"Default I/O scheduler not found. " \
 226				"Using noop.\n");
 227			e = elevator_get("noop", false);
 228		}
 229	}
 230
 231	err = e->ops.elevator_init_fn(q, e);
 
 
 
 
 
 
 
 
 
 232	return 0;
 233}
 234EXPORT_SYMBOL(elevator_init);
 235
 236void elevator_exit(struct elevator_queue *e)
 237{
 238	mutex_lock(&e->sysfs_lock);
 239	if (e->type->ops.elevator_exit_fn)
 240		e->type->ops.elevator_exit_fn(e);
 241	mutex_unlock(&e->sysfs_lock);
 242
 243	kobject_put(&e->kobj);
 244}
 245EXPORT_SYMBOL(elevator_exit);
 246
 247static inline void __elv_rqhash_del(struct request *rq)
 248{
 249	hash_del(&rq->hash);
 250	rq->cmd_flags &= ~REQ_HASHED;
 251}
 252
 253static void elv_rqhash_del(struct request_queue *q, struct request *rq)
 254{
 255	if (ELV_ON_HASH(rq))
 256		__elv_rqhash_del(rq);
 257}
 258
 259static void elv_rqhash_add(struct request_queue *q, struct request *rq)
 260{
 261	struct elevator_queue *e = q->elevator;
 262
 263	BUG_ON(ELV_ON_HASH(rq));
 264	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
 265	rq->cmd_flags |= REQ_HASHED;
 266}
 267
 268static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
 269{
 270	__elv_rqhash_del(rq);
 271	elv_rqhash_add(q, rq);
 272}
 273
 274static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
 275{
 276	struct elevator_queue *e = q->elevator;
 277	struct hlist_node *next;
 
 278	struct request *rq;
 279
 280	hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
 281		BUG_ON(!ELV_ON_HASH(rq));
 282
 283		if (unlikely(!rq_mergeable(rq))) {
 284			__elv_rqhash_del(rq);
 285			continue;
 286		}
 287
 288		if (rq_hash_key(rq) == offset)
 289			return rq;
 290	}
 291
 292	return NULL;
 293}
 294
 295/*
 296 * RB-tree support functions for inserting/lookup/removal of requests
 297 * in a sorted RB tree.
 298 */
 299void elv_rb_add(struct rb_root *root, struct request *rq)
 300{
 301	struct rb_node **p = &root->rb_node;
 302	struct rb_node *parent = NULL;
 303	struct request *__rq;
 304
 305	while (*p) {
 306		parent = *p;
 307		__rq = rb_entry(parent, struct request, rb_node);
 308
 309		if (blk_rq_pos(rq) < blk_rq_pos(__rq))
 310			p = &(*p)->rb_left;
 311		else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
 312			p = &(*p)->rb_right;
 313	}
 314
 315	rb_link_node(&rq->rb_node, parent, p);
 316	rb_insert_color(&rq->rb_node, root);
 317}
 318EXPORT_SYMBOL(elv_rb_add);
 319
 320void elv_rb_del(struct rb_root *root, struct request *rq)
 321{
 322	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
 323	rb_erase(&rq->rb_node, root);
 324	RB_CLEAR_NODE(&rq->rb_node);
 325}
 326EXPORT_SYMBOL(elv_rb_del);
 327
 328struct request *elv_rb_find(struct rb_root *root, sector_t sector)
 329{
 330	struct rb_node *n = root->rb_node;
 331	struct request *rq;
 332
 333	while (n) {
 334		rq = rb_entry(n, struct request, rb_node);
 335
 336		if (sector < blk_rq_pos(rq))
 337			n = n->rb_left;
 338		else if (sector > blk_rq_pos(rq))
 339			n = n->rb_right;
 340		else
 341			return rq;
 342	}
 343
 344	return NULL;
 345}
 346EXPORT_SYMBOL(elv_rb_find);
 347
 348/*
 349 * Insert rq into dispatch queue of q.  Queue lock must be held on
 350 * entry.  rq is sort instead into the dispatch queue. To be used by
 351 * specific elevators.
 352 */
 353void elv_dispatch_sort(struct request_queue *q, struct request *rq)
 354{
 355	sector_t boundary;
 356	struct list_head *entry;
 357	int stop_flags;
 358
 359	if (q->last_merge == rq)
 360		q->last_merge = NULL;
 361
 362	elv_rqhash_del(q, rq);
 363
 364	q->nr_sorted--;
 365
 366	boundary = q->end_sector;
 367	stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
 368	list_for_each_prev(entry, &q->queue_head) {
 369		struct request *pos = list_entry_rq(entry);
 370
 371		if ((rq->cmd_flags & REQ_DISCARD) !=
 372		    (pos->cmd_flags & REQ_DISCARD))
 373			break;
 374		if (rq_data_dir(rq) != rq_data_dir(pos))
 375			break;
 376		if (pos->cmd_flags & stop_flags)
 377			break;
 378		if (blk_rq_pos(rq) >= boundary) {
 379			if (blk_rq_pos(pos) < boundary)
 380				continue;
 381		} else {
 382			if (blk_rq_pos(pos) >= boundary)
 383				break;
 384		}
 385		if (blk_rq_pos(rq) >= blk_rq_pos(pos))
 386			break;
 387	}
 388
 389	list_add(&rq->queuelist, entry);
 390}
 391EXPORT_SYMBOL(elv_dispatch_sort);
 392
 393/*
 394 * Insert rq into dispatch queue of q.  Queue lock must be held on
 395 * entry.  rq is added to the back of the dispatch queue. To be used by
 396 * specific elevators.
 397 */
 398void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
 399{
 400	if (q->last_merge == rq)
 401		q->last_merge = NULL;
 402
 403	elv_rqhash_del(q, rq);
 404
 405	q->nr_sorted--;
 406
 407	q->end_sector = rq_end_sector(rq);
 408	q->boundary_rq = rq;
 409	list_add_tail(&rq->queuelist, &q->queue_head);
 410}
 411EXPORT_SYMBOL(elv_dispatch_add_tail);
 412
 413int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
 414{
 415	struct elevator_queue *e = q->elevator;
 416	struct request *__rq;
 417	int ret;
 418
 419	/*
 420	 * Levels of merges:
 421	 * 	nomerges:  No merges at all attempted
 422	 * 	noxmerges: Only simple one-hit cache try
 423	 * 	merges:	   All merge tries attempted
 424	 */
 425	if (blk_queue_nomerges(q))
 426		return ELEVATOR_NO_MERGE;
 427
 428	/*
 429	 * First try one-hit cache.
 430	 */
 431	if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
 432		ret = blk_try_merge(q->last_merge, bio);
 433		if (ret != ELEVATOR_NO_MERGE) {
 434			*req = q->last_merge;
 435			return ret;
 436		}
 437	}
 438
 439	if (blk_queue_noxmerges(q))
 440		return ELEVATOR_NO_MERGE;
 441
 442	/*
 443	 * See if our hash lookup can find a potential backmerge.
 444	 */
 445	__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
 446	if (__rq && elv_rq_merge_ok(__rq, bio)) {
 447		*req = __rq;
 448		return ELEVATOR_BACK_MERGE;
 449	}
 450
 451	if (e->type->ops.elevator_merge_fn)
 452		return e->type->ops.elevator_merge_fn(q, req, bio);
 453
 454	return ELEVATOR_NO_MERGE;
 455}
 456
 457/*
 458 * Attempt to do an insertion back merge. Only check for the case where
 459 * we can append 'rq' to an existing request, so we can throw 'rq' away
 460 * afterwards.
 461 *
 462 * Returns true if we merged, false otherwise
 463 */
 464static bool elv_attempt_insert_merge(struct request_queue *q,
 465				     struct request *rq)
 466{
 467	struct request *__rq;
 468	bool ret;
 469
 470	if (blk_queue_nomerges(q))
 471		return false;
 472
 473	/*
 474	 * First try one-hit cache.
 475	 */
 476	if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
 477		return true;
 478
 479	if (blk_queue_noxmerges(q))
 480		return false;
 481
 482	ret = false;
 483	/*
 484	 * See if our hash lookup can find a potential backmerge.
 485	 */
 486	while (1) {
 487		__rq = elv_rqhash_find(q, blk_rq_pos(rq));
 488		if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
 489			break;
 490
 491		/* The merged request could be merged with others, try again */
 492		ret = true;
 493		rq = __rq;
 494	}
 495
 496	return ret;
 497}
 498
 499void elv_merged_request(struct request_queue *q, struct request *rq, int type)
 500{
 501	struct elevator_queue *e = q->elevator;
 502
 503	if (e->type->ops.elevator_merged_fn)
 504		e->type->ops.elevator_merged_fn(q, rq, type);
 505
 506	if (type == ELEVATOR_BACK_MERGE)
 507		elv_rqhash_reposition(q, rq);
 508
 509	q->last_merge = rq;
 510}
 511
 512void elv_merge_requests(struct request_queue *q, struct request *rq,
 513			     struct request *next)
 514{
 515	struct elevator_queue *e = q->elevator;
 516	const int next_sorted = next->cmd_flags & REQ_SORTED;
 517
 518	if (next_sorted && e->type->ops.elevator_merge_req_fn)
 519		e->type->ops.elevator_merge_req_fn(q, rq, next);
 520
 521	elv_rqhash_reposition(q, rq);
 522
 523	if (next_sorted) {
 524		elv_rqhash_del(q, next);
 525		q->nr_sorted--;
 526	}
 527
 528	q->last_merge = rq;
 529}
 530
 531void elv_bio_merged(struct request_queue *q, struct request *rq,
 532			struct bio *bio)
 533{
 534	struct elevator_queue *e = q->elevator;
 535
 536	if (e->type->ops.elevator_bio_merged_fn)
 537		e->type->ops.elevator_bio_merged_fn(q, rq, bio);
 538}
 539
 540#ifdef CONFIG_PM_RUNTIME
 541static void blk_pm_requeue_request(struct request *rq)
 542{
 543	if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
 544		rq->q->nr_pending--;
 545}
 546
 547static void blk_pm_add_request(struct request_queue *q, struct request *rq)
 548{
 549	if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
 550	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
 551		pm_request_resume(q->dev);
 552}
 553#else
 554static inline void blk_pm_requeue_request(struct request *rq) {}
 555static inline void blk_pm_add_request(struct request_queue *q,
 556				      struct request *rq)
 557{
 558}
 559#endif
 560
 561void elv_requeue_request(struct request_queue *q, struct request *rq)
 562{
 563	/*
 564	 * it already went through dequeue, we need to decrement the
 565	 * in_flight count again
 566	 */
 567	if (blk_account_rq(rq)) {
 568		q->in_flight[rq_is_sync(rq)]--;
 569		if (rq->cmd_flags & REQ_SORTED)
 570			elv_deactivate_rq(q, rq);
 571	}
 572
 573	rq->cmd_flags &= ~REQ_STARTED;
 574
 575	blk_pm_requeue_request(rq);
 576
 577	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
 578}
 579
 580void elv_drain_elevator(struct request_queue *q)
 581{
 582	static int printed;
 583
 584	lockdep_assert_held(q->queue_lock);
 585
 586	while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
 587		;
 588	if (q->nr_sorted && printed++ < 10) {
 589		printk(KERN_ERR "%s: forced dispatching is broken "
 590		       "(nr_sorted=%u), please report this\n",
 591		       q->elevator->type->elevator_name, q->nr_sorted);
 592	}
 593}
 594
 595void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 596{
 597	trace_block_rq_insert(q, rq);
 598
 599	blk_pm_add_request(q, rq);
 600
 601	rq->q = q;
 602
 603	if (rq->cmd_flags & REQ_SOFTBARRIER) {
 604		/* barriers are scheduling boundary, update end_sector */
 605		if (rq->cmd_type == REQ_TYPE_FS) {
 
 606			q->end_sector = rq_end_sector(rq);
 607			q->boundary_rq = rq;
 608		}
 609	} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
 610		    (where == ELEVATOR_INSERT_SORT ||
 611		     where == ELEVATOR_INSERT_SORT_MERGE))
 612		where = ELEVATOR_INSERT_BACK;
 613
 614	switch (where) {
 615	case ELEVATOR_INSERT_REQUEUE:
 616	case ELEVATOR_INSERT_FRONT:
 617		rq->cmd_flags |= REQ_SOFTBARRIER;
 618		list_add(&rq->queuelist, &q->queue_head);
 619		break;
 620
 621	case ELEVATOR_INSERT_BACK:
 622		rq->cmd_flags |= REQ_SOFTBARRIER;
 623		elv_drain_elevator(q);
 624		list_add_tail(&rq->queuelist, &q->queue_head);
 625		/*
 626		 * We kick the queue here for the following reasons.
 627		 * - The elevator might have returned NULL previously
 628		 *   to delay requests and returned them now.  As the
 629		 *   queue wasn't empty before this request, ll_rw_blk
 630		 *   won't run the queue on return, resulting in hang.
 631		 * - Usually, back inserted requests won't be merged
 632		 *   with anything.  There's no point in delaying queue
 633		 *   processing.
 634		 */
 635		__blk_run_queue(q);
 636		break;
 637
 638	case ELEVATOR_INSERT_SORT_MERGE:
 639		/*
 640		 * If we succeed in merging this request with one in the
 641		 * queue already, we are done - rq has now been freed,
 642		 * so no need to do anything further.
 643		 */
 644		if (elv_attempt_insert_merge(q, rq))
 645			break;
 646	case ELEVATOR_INSERT_SORT:
 647		BUG_ON(rq->cmd_type != REQ_TYPE_FS);
 
 648		rq->cmd_flags |= REQ_SORTED;
 649		q->nr_sorted++;
 650		if (rq_mergeable(rq)) {
 651			elv_rqhash_add(q, rq);
 652			if (!q->last_merge)
 653				q->last_merge = rq;
 654		}
 655
 656		/*
 657		 * Some ioscheds (cfq) run q->request_fn directly, so
 658		 * rq cannot be accessed after calling
 659		 * elevator_add_req_fn.
 660		 */
 661		q->elevator->type->ops.elevator_add_req_fn(q, rq);
 662		break;
 663
 664	case ELEVATOR_INSERT_FLUSH:
 665		rq->cmd_flags |= REQ_SOFTBARRIER;
 666		blk_insert_flush(rq);
 667		break;
 668	default:
 669		printk(KERN_ERR "%s: bad insertion point %d\n",
 670		       __func__, where);
 671		BUG();
 672	}
 673}
 674EXPORT_SYMBOL(__elv_add_request);
 675
 676void elv_add_request(struct request_queue *q, struct request *rq, int where)
 677{
 678	unsigned long flags;
 679
 680	spin_lock_irqsave(q->queue_lock, flags);
 681	__elv_add_request(q, rq, where);
 682	spin_unlock_irqrestore(q->queue_lock, flags);
 683}
 684EXPORT_SYMBOL(elv_add_request);
 685
 686struct request *elv_latter_request(struct request_queue *q, struct request *rq)
 687{
 688	struct elevator_queue *e = q->elevator;
 689
 690	if (e->type->ops.elevator_latter_req_fn)
 691		return e->type->ops.elevator_latter_req_fn(q, rq);
 692	return NULL;
 693}
 694
 695struct request *elv_former_request(struct request_queue *q, struct request *rq)
 696{
 697	struct elevator_queue *e = q->elevator;
 698
 699	if (e->type->ops.elevator_former_req_fn)
 700		return e->type->ops.elevator_former_req_fn(q, rq);
 701	return NULL;
 702}
 703
 704int elv_set_request(struct request_queue *q, struct request *rq,
 705		    struct bio *bio, gfp_t gfp_mask)
 706{
 707	struct elevator_queue *e = q->elevator;
 708
 709	if (e->type->ops.elevator_set_req_fn)
 710		return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
 711	return 0;
 712}
 713
 714void elv_put_request(struct request_queue *q, struct request *rq)
 715{
 716	struct elevator_queue *e = q->elevator;
 717
 718	if (e->type->ops.elevator_put_req_fn)
 719		e->type->ops.elevator_put_req_fn(rq);
 720}
 721
 722int elv_may_queue(struct request_queue *q, int rw)
 723{
 724	struct elevator_queue *e = q->elevator;
 725
 726	if (e->type->ops.elevator_may_queue_fn)
 727		return e->type->ops.elevator_may_queue_fn(q, rw);
 728
 729	return ELV_MQUEUE_MAY;
 730}
 731
 732void elv_abort_queue(struct request_queue *q)
 733{
 734	struct request *rq;
 735
 736	blk_abort_flushes(q);
 737
 738	while (!list_empty(&q->queue_head)) {
 739		rq = list_entry_rq(q->queue_head.next);
 740		rq->cmd_flags |= REQ_QUIET;
 741		trace_block_rq_abort(q, rq);
 742		/*
 743		 * Mark this request as started so we don't trigger
 744		 * any debug logic in the end I/O path.
 745		 */
 746		blk_start_request(rq);
 747		__blk_end_request_all(rq, -EIO);
 748	}
 749}
 750EXPORT_SYMBOL(elv_abort_queue);
 751
 752void elv_completed_request(struct request_queue *q, struct request *rq)
 753{
 754	struct elevator_queue *e = q->elevator;
 755
 756	/*
 757	 * request is released from the driver, io must be done
 758	 */
 759	if (blk_account_rq(rq)) {
 760		q->in_flight[rq_is_sync(rq)]--;
 761		if ((rq->cmd_flags & REQ_SORTED) &&
 762		    e->type->ops.elevator_completed_req_fn)
 763			e->type->ops.elevator_completed_req_fn(q, rq);
 764	}
 765}
 766
 767#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
 768
 769static ssize_t
 770elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 771{
 772	struct elv_fs_entry *entry = to_elv(attr);
 773	struct elevator_queue *e;
 774	ssize_t error;
 775
 776	if (!entry->show)
 777		return -EIO;
 778
 779	e = container_of(kobj, struct elevator_queue, kobj);
 780	mutex_lock(&e->sysfs_lock);
 781	error = e->type ? entry->show(e, page) : -ENOENT;
 782	mutex_unlock(&e->sysfs_lock);
 783	return error;
 784}
 785
 786static ssize_t
 787elv_attr_store(struct kobject *kobj, struct attribute *attr,
 788	       const char *page, size_t length)
 789{
 790	struct elv_fs_entry *entry = to_elv(attr);
 791	struct elevator_queue *e;
 792	ssize_t error;
 793
 794	if (!entry->store)
 795		return -EIO;
 796
 797	e = container_of(kobj, struct elevator_queue, kobj);
 798	mutex_lock(&e->sysfs_lock);
 799	error = e->type ? entry->store(e, page, length) : -ENOENT;
 800	mutex_unlock(&e->sysfs_lock);
 801	return error;
 802}
 803
 804static const struct sysfs_ops elv_sysfs_ops = {
 805	.show	= elv_attr_show,
 806	.store	= elv_attr_store,
 807};
 808
 809static struct kobj_type elv_ktype = {
 810	.sysfs_ops	= &elv_sysfs_ops,
 811	.release	= elevator_release,
 812};
 813
 814int elv_register_queue(struct request_queue *q)
 815{
 816	struct elevator_queue *e = q->elevator;
 817	int error;
 818
 819	error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
 820	if (!error) {
 821		struct elv_fs_entry *attr = e->type->elevator_attrs;
 822		if (attr) {
 823			while (attr->attr.name) {
 824				if (sysfs_create_file(&e->kobj, &attr->attr))
 825					break;
 826				attr++;
 827			}
 828		}
 829		kobject_uevent(&e->kobj, KOBJ_ADD);
 830		e->registered = 1;
 831	}
 832	return error;
 833}
 834EXPORT_SYMBOL(elv_register_queue);
 835
 836void elv_unregister_queue(struct request_queue *q)
 837{
 838	if (q) {
 839		struct elevator_queue *e = q->elevator;
 840
 841		kobject_uevent(&e->kobj, KOBJ_REMOVE);
 842		kobject_del(&e->kobj);
 843		e->registered = 0;
 844	}
 845}
 846EXPORT_SYMBOL(elv_unregister_queue);
 847
 848int elv_register(struct elevator_type *e)
 849{
 850	char *def = "";
 851
 852	/* create icq_cache if requested */
 853	if (e->icq_size) {
 854		if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
 855		    WARN_ON(e->icq_align < __alignof__(struct io_cq)))
 856			return -EINVAL;
 857
 858		snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
 859			 "%s_io_cq", e->elevator_name);
 860		e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
 861						 e->icq_align, 0, NULL);
 862		if (!e->icq_cache)
 863			return -ENOMEM;
 864	}
 865
 866	/* register, don't allow duplicate names */
 867	spin_lock(&elv_list_lock);
 868	if (elevator_find(e->elevator_name)) {
 869		spin_unlock(&elv_list_lock);
 870		if (e->icq_cache)
 871			kmem_cache_destroy(e->icq_cache);
 872		return -EBUSY;
 873	}
 874	list_add_tail(&e->list, &elv_list);
 875	spin_unlock(&elv_list_lock);
 876
 877	/* print pretty message */
 878	if (!strcmp(e->elevator_name, chosen_elevator) ||
 879			(!*chosen_elevator &&
 880			 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
 881				def = " (default)";
 882
 883	printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
 884								def);
 885	return 0;
 886}
 887EXPORT_SYMBOL_GPL(elv_register);
 888
 889void elv_unregister(struct elevator_type *e)
 890{
 891	/* unregister */
 892	spin_lock(&elv_list_lock);
 893	list_del_init(&e->list);
 894	spin_unlock(&elv_list_lock);
 895
 896	/*
 897	 * Destroy icq_cache if it exists.  icq's are RCU managed.  Make
 898	 * sure all RCU operations are complete before proceeding.
 899	 */
 900	if (e->icq_cache) {
 901		rcu_barrier();
 902		kmem_cache_destroy(e->icq_cache);
 903		e->icq_cache = NULL;
 904	}
 905}
 906EXPORT_SYMBOL_GPL(elv_unregister);
 907
 908/*
 909 * switch to new_e io scheduler. be careful not to introduce deadlocks -
 910 * we don't free the old io scheduler, before we have allocated what we
 911 * need for the new one. this way we have a chance of going back to the old
 912 * one, if the new one fails init for some reason.
 913 */
 914static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 915{
 916	struct elevator_queue *old = q->elevator;
 917	bool registered = old->registered;
 918	int err;
 919
 920	/*
 921	 * Turn on BYPASS and drain all requests w/ elevator private data.
 922	 * Block layer doesn't call into a quiesced elevator - all requests
 923	 * are directly put on the dispatch list without elevator data
 924	 * using INSERT_BACK.  All requests have SOFTBARRIER set and no
 925	 * merge happens either.
 926	 */
 927	blk_queue_bypass_start(q);
 928
 929	/* unregister and clear all auxiliary data of the old elevator */
 930	if (registered)
 931		elv_unregister_queue(q);
 932
 933	spin_lock_irq(q->queue_lock);
 934	ioc_clear_queue(q);
 935	spin_unlock_irq(q->queue_lock);
 936
 937	/* allocate, init and register new elevator */
 938	err = new_e->ops.elevator_init_fn(q, new_e);
 939	if (err)
 
 
 
 
 
 
 940		goto fail_init;
 
 941
 942	if (registered) {
 943		err = elv_register_queue(q);
 944		if (err)
 945			goto fail_register;
 946	}
 947
 948	/* done, kill the old one and finish */
 949	elevator_exit(old);
 950	blk_queue_bypass_end(q);
 951
 952	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 953
 954	return 0;
 955
 956fail_register:
 957	elevator_exit(q->elevator);
 958fail_init:
 959	/* switch failed, restore and re-register old elevator */
 960	q->elevator = old;
 961	elv_register_queue(q);
 962	blk_queue_bypass_end(q);
 963
 964	return err;
 965}
 966
 967/*
 968 * Switch this queue to the given IO scheduler.
 969 */
 970static int __elevator_change(struct request_queue *q, const char *name)
 971{
 972	char elevator_name[ELV_NAME_MAX];
 973	struct elevator_type *e;
 974
 975	if (!q->elevator)
 976		return -ENXIO;
 977
 978	strlcpy(elevator_name, name, sizeof(elevator_name));
 979	e = elevator_get(strstrip(elevator_name), true);
 980	if (!e) {
 981		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
 982		return -EINVAL;
 983	}
 984
 985	if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
 986		elevator_put(e);
 987		return 0;
 988	}
 989
 990	return elevator_switch(q, e);
 991}
 992
 993int elevator_change(struct request_queue *q, const char *name)
 994{
 995	int ret;
 996
 997	/* Protect q->elevator from elevator_init() */
 998	mutex_lock(&q->sysfs_lock);
 999	ret = __elevator_change(q, name);
1000	mutex_unlock(&q->sysfs_lock);
1001
1002	return ret;
1003}
1004EXPORT_SYMBOL(elevator_change);
1005
1006ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1007			  size_t count)
1008{
1009	int ret;
1010
1011	if (!q->elevator)
1012		return count;
1013
1014	ret = __elevator_change(q, name);
1015	if (!ret)
1016		return count;
1017
1018	printk(KERN_ERR "elevator: switch to %s failed\n", name);
1019	return ret;
1020}
1021
1022ssize_t elv_iosched_show(struct request_queue *q, char *name)
1023{
1024	struct elevator_queue *e = q->elevator;
1025	struct elevator_type *elv;
1026	struct elevator_type *__e;
1027	int len = 0;
1028
1029	if (!q->elevator || !blk_queue_stackable(q))
1030		return sprintf(name, "none\n");
1031
1032	elv = e->type;
1033
1034	spin_lock(&elv_list_lock);
1035	list_for_each_entry(__e, &elv_list, list) {
1036		if (!strcmp(elv->elevator_name, __e->elevator_name))
1037			len += sprintf(name+len, "[%s] ", elv->elevator_name);
1038		else
1039			len += sprintf(name+len, "%s ", __e->elevator_name);
1040	}
1041	spin_unlock(&elv_list_lock);
1042
1043	len += sprintf(len+name, "\n");
1044	return len;
1045}
1046
1047struct request *elv_rb_former_request(struct request_queue *q,
1048				      struct request *rq)
1049{
1050	struct rb_node *rbprev = rb_prev(&rq->rb_node);
1051
1052	if (rbprev)
1053		return rb_entry_rq(rbprev);
1054
1055	return NULL;
1056}
1057EXPORT_SYMBOL(elv_rb_former_request);
1058
1059struct request *elv_rb_latter_request(struct request_queue *q,
1060				      struct request *rq)
1061{
1062	struct rb_node *rbnext = rb_next(&rq->rb_node);
1063
1064	if (rbnext)
1065		return rb_entry_rq(rbnext);
1066
1067	return NULL;
1068}
1069EXPORT_SYMBOL(elv_rb_latter_request);