Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
   1/*
   2 *  CFQ, or complete fairness queueing, disk scheduler.
   3 *
   4 *  Based on ideas from a previously unfinished io
   5 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   6 *
   7 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   8 */
   9#include <linux/module.h>
  10#include <linux/slab.h>
  11#include <linux/blkdev.h>
  12#include <linux/elevator.h>
  13#include <linux/jiffies.h>
  14#include <linux/rbtree.h>
  15#include <linux/ioprio.h>
  16#include <linux/blktrace_api.h>
  17#include "blk.h"
  18#include "blk-cgroup.h"
  19
  20/*
  21 * tunables
  22 */
  23/* max queue in one round of service */
  24static const int cfq_quantum = 8;
  25static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  26/* maximum backwards seek, in KiB */
  27static const int cfq_back_max = 16 * 1024;
  28/* penalty of a backwards seek */
  29static const int cfq_back_penalty = 2;
  30static const int cfq_slice_sync = HZ / 10;
  31static int cfq_slice_async = HZ / 25;
  32static const int cfq_slice_async_rq = 2;
  33static int cfq_slice_idle = HZ / 125;
  34static int cfq_group_idle = HZ / 125;
  35static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  36static const int cfq_hist_divisor = 4;
  37
  38/*
  39 * offset from end of service tree
  40 */
  41#define CFQ_IDLE_DELAY		(HZ / 5)
  42
  43/*
  44 * below this threshold, we consider thinktime immediate
  45 */
  46#define CFQ_MIN_TT		(2)
  47
  48#define CFQ_SLICE_SCALE		(5)
  49#define CFQ_HW_QUEUE_MIN	(5)
  50#define CFQ_SERVICE_SHIFT       12
  51
  52#define CFQQ_SEEK_THR		(sector_t)(8 * 100)
  53#define CFQQ_CLOSE_THR		(sector_t)(8 * 1024)
  54#define CFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
  55#define CFQQ_SEEKY(cfqq)	(hweight32(cfqq->seek_history) > 32/8)
  56
  57#define RQ_CIC(rq)		icq_to_cic((rq)->elv.icq)
  58#define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elv.priv[0])
  59#define RQ_CFQG(rq)		(struct cfq_group *) ((rq)->elv.priv[1])
  60
  61static struct kmem_cache *cfq_pool;
  62
  63#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
  64#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  65#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  66
  67#define sample_valid(samples)	((samples) > 80)
  68#define rb_entry_cfqg(node)	rb_entry((node), struct cfq_group, rb_node)
  69
  70struct cfq_ttime {
  71	unsigned long last_end_request;
  72
  73	unsigned long ttime_total;
  74	unsigned long ttime_samples;
  75	unsigned long ttime_mean;
  76};
  77
  78/*
  79 * Most of our rbtree usage is for sorting with min extraction, so
  80 * if we cache the leftmost node we don't have to walk down the tree
  81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  82 * move this into the elevator for the rq sorting as well.
  83 */
  84struct cfq_rb_root {
  85	struct rb_root rb;
  86	struct rb_node *left;
  87	unsigned count;
  88	u64 min_vdisktime;
  89	struct cfq_ttime ttime;
  90};
  91#define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, \
  92			.ttime = {.last_end_request = jiffies,},}
  93
  94/*
  95 * Per process-grouping structure
  96 */
  97struct cfq_queue {
  98	/* reference count */
  99	int ref;
 100	/* various state flags, see below */
 101	unsigned int flags;
 102	/* parent cfq_data */
 103	struct cfq_data *cfqd;
 104	/* service_tree member */
 105	struct rb_node rb_node;
 106	/* service_tree key */
 107	unsigned long rb_key;
 108	/* prio tree member */
 109	struct rb_node p_node;
 110	/* prio tree root we belong to, if any */
 111	struct rb_root *p_root;
 112	/* sorted list of pending requests */
 113	struct rb_root sort_list;
 114	/* if fifo isn't expired, next request to serve */
 115	struct request *next_rq;
 116	/* requests queued in sort_list */
 117	int queued[2];
 118	/* currently allocated requests */
 119	int allocated[2];
 120	/* fifo list of requests in sort_list */
 121	struct list_head fifo;
 122
 123	/* time when queue got scheduled in to dispatch first request. */
 124	unsigned long dispatch_start;
 125	unsigned int allocated_slice;
 126	unsigned int slice_dispatch;
 127	/* time when first request from queue completed and slice started. */
 128	unsigned long slice_start;
 129	unsigned long slice_end;
 130	long slice_resid;
 131
 132	/* pending priority requests */
 133	int prio_pending;
 134	/* number of requests that are on the dispatch list or inside driver */
 135	int dispatched;
 136
 137	/* io prio of this group */
 138	unsigned short ioprio, org_ioprio;
 139	unsigned short ioprio_class;
 140
 141	pid_t pid;
 142
 143	u32 seek_history;
 144	sector_t last_request_pos;
 145
 146	struct cfq_rb_root *service_tree;
 147	struct cfq_queue *new_cfqq;
 148	struct cfq_group *cfqg;
 149	/* Number of sectors dispatched from queue in single dispatch round */
 150	unsigned long nr_sectors;
 151};
 152
 153/*
 154 * First index in the service_trees.
 155 * IDLE is handled separately, so it has negative index
 156 */
 157enum wl_class_t {
 158	BE_WORKLOAD = 0,
 159	RT_WORKLOAD = 1,
 160	IDLE_WORKLOAD = 2,
 161	CFQ_PRIO_NR,
 162};
 163
 164/*
 165 * Second index in the service_trees.
 166 */
 167enum wl_type_t {
 168	ASYNC_WORKLOAD = 0,
 169	SYNC_NOIDLE_WORKLOAD = 1,
 170	SYNC_WORKLOAD = 2
 171};
 172
 173struct cfqg_stats {
 174#ifdef CONFIG_CFQ_GROUP_IOSCHED
 175	/* total bytes transferred */
 176	struct blkg_rwstat		service_bytes;
 177	/* total IOs serviced, post merge */
 178	struct blkg_rwstat		serviced;
 179	/* number of ios merged */
 180	struct blkg_rwstat		merged;
 181	/* total time spent on device in ns, may not be accurate w/ queueing */
 182	struct blkg_rwstat		service_time;
 183	/* total time spent waiting in scheduler queue in ns */
 184	struct blkg_rwstat		wait_time;
 185	/* number of IOs queued up */
 186	struct blkg_rwstat		queued;
 187	/* total sectors transferred */
 188	struct blkg_stat		sectors;
 189	/* total disk time and nr sectors dispatched by this group */
 190	struct blkg_stat		time;
 191#ifdef CONFIG_DEBUG_BLK_CGROUP
 192	/* time not charged to this cgroup */
 193	struct blkg_stat		unaccounted_time;
 194	/* sum of number of ios queued across all samples */
 195	struct blkg_stat		avg_queue_size_sum;
 196	/* count of samples taken for average */
 197	struct blkg_stat		avg_queue_size_samples;
 198	/* how many times this group has been removed from service tree */
 199	struct blkg_stat		dequeue;
 200	/* total time spent waiting for it to be assigned a timeslice. */
 201	struct blkg_stat		group_wait_time;
 202	/* time spent idling for this blkcg_gq */
 203	struct blkg_stat		idle_time;
 204	/* total time with empty current active q with other requests queued */
 205	struct blkg_stat		empty_time;
 206	/* fields after this shouldn't be cleared on stat reset */
 207	uint64_t			start_group_wait_time;
 208	uint64_t			start_idle_time;
 209	uint64_t			start_empty_time;
 210	uint16_t			flags;
 211#endif	/* CONFIG_DEBUG_BLK_CGROUP */
 212#endif	/* CONFIG_CFQ_GROUP_IOSCHED */
 213};
 214
 215/* This is per cgroup per device grouping structure */
 216struct cfq_group {
 217	/* must be the first member */
 218	struct blkg_policy_data pd;
 219
 220	/* group service_tree member */
 221	struct rb_node rb_node;
 222
 223	/* group service_tree key */
 224	u64 vdisktime;
 225
 226	/*
 227	 * The number of active cfqgs and sum of their weights under this
 228	 * cfqg.  This covers this cfqg's leaf_weight and all children's
 229	 * weights, but does not cover weights of further descendants.
 230	 *
 231	 * If a cfqg is on the service tree, it's active.  An active cfqg
 232	 * also activates its parent and contributes to the children_weight
 233	 * of the parent.
 234	 */
 235	int nr_active;
 236	unsigned int children_weight;
 237
 238	/*
 239	 * vfraction is the fraction of vdisktime that the tasks in this
 240	 * cfqg are entitled to.  This is determined by compounding the
 241	 * ratios walking up from this cfqg to the root.
 242	 *
 243	 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
 244	 * vfractions on a service tree is approximately 1.  The sum may
 245	 * deviate a bit due to rounding errors and fluctuations caused by
 246	 * cfqgs entering and leaving the service tree.
 247	 */
 248	unsigned int vfraction;
 249
 250	/*
 251	 * There are two weights - (internal) weight is the weight of this
 252	 * cfqg against the sibling cfqgs.  leaf_weight is the wight of
 253	 * this cfqg against the child cfqgs.  For the root cfqg, both
 254	 * weights are kept in sync for backward compatibility.
 255	 */
 256	unsigned int weight;
 257	unsigned int new_weight;
 258	unsigned int dev_weight;
 259
 260	unsigned int leaf_weight;
 261	unsigned int new_leaf_weight;
 262	unsigned int dev_leaf_weight;
 263
 264	/* number of cfqq currently on this group */
 265	int nr_cfqq;
 266
 267	/*
 268	 * Per group busy queues average. Useful for workload slice calc. We
 269	 * create the array for each prio class but at run time it is used
 270	 * only for RT and BE class and slot for IDLE class remains unused.
 271	 * This is primarily done to avoid confusion and a gcc warning.
 272	 */
 273	unsigned int busy_queues_avg[CFQ_PRIO_NR];
 274	/*
 275	 * rr lists of queues with requests. We maintain service trees for
 276	 * RT and BE classes. These trees are subdivided in subclasses
 277	 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
 278	 * class there is no subclassification and all the cfq queues go on
 279	 * a single tree service_tree_idle.
 280	 * Counts are embedded in the cfq_rb_root
 281	 */
 282	struct cfq_rb_root service_trees[2][3];
 283	struct cfq_rb_root service_tree_idle;
 284
 285	unsigned long saved_wl_slice;
 286	enum wl_type_t saved_wl_type;
 287	enum wl_class_t saved_wl_class;
 288
 289	/* number of requests that are on the dispatch list or inside driver */
 290	int dispatched;
 291	struct cfq_ttime ttime;
 292	struct cfqg_stats stats;	/* stats for this cfqg */
 293	struct cfqg_stats dead_stats;	/* stats pushed from dead children */
 294};
 295
 296struct cfq_io_cq {
 297	struct io_cq		icq;		/* must be the first member */
 298	struct cfq_queue	*cfqq[2];
 299	struct cfq_ttime	ttime;
 300	int			ioprio;		/* the current ioprio */
 301#ifdef CONFIG_CFQ_GROUP_IOSCHED
 302	uint64_t		blkcg_id;	/* the current blkcg ID */
 303#endif
 304};
 305
 306/*
 307 * Per block device queue structure
 308 */
 309struct cfq_data {
 310	struct request_queue *queue;
 311	/* Root service tree for cfq_groups */
 312	struct cfq_rb_root grp_service_tree;
 313	struct cfq_group *root_group;
 314
 315	/*
 316	 * The priority currently being served
 317	 */
 318	enum wl_class_t serving_wl_class;
 319	enum wl_type_t serving_wl_type;
 320	unsigned long workload_expires;
 321	struct cfq_group *serving_group;
 322
 323	/*
 324	 * Each priority tree is sorted by next_request position.  These
 325	 * trees are used when determining if two or more queues are
 326	 * interleaving requests (see cfq_close_cooperator).
 327	 */
 328	struct rb_root prio_trees[CFQ_PRIO_LISTS];
 329
 330	unsigned int busy_queues;
 331	unsigned int busy_sync_queues;
 332
 333	int rq_in_driver;
 334	int rq_in_flight[2];
 335
 336	/*
 337	 * queue-depth detection
 338	 */
 339	int rq_queued;
 340	int hw_tag;
 341	/*
 342	 * hw_tag can be
 343	 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
 344	 *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
 345	 *  0 => no NCQ
 346	 */
 347	int hw_tag_est_depth;
 348	unsigned int hw_tag_samples;
 349
 350	/*
 351	 * idle window management
 352	 */
 353	struct timer_list idle_slice_timer;
 354	struct work_struct unplug_work;
 355
 356	struct cfq_queue *active_queue;
 357	struct cfq_io_cq *active_cic;
 358
 359	/*
 360	 * async queue for each priority case
 361	 */
 362	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 363	struct cfq_queue *async_idle_cfqq;
 364
 365	sector_t last_position;
 366
 367	/*
 368	 * tunables, see top of file
 369	 */
 370	unsigned int cfq_quantum;
 371	unsigned int cfq_fifo_expire[2];
 372	unsigned int cfq_back_penalty;
 373	unsigned int cfq_back_max;
 374	unsigned int cfq_slice[2];
 375	unsigned int cfq_slice_async_rq;
 376	unsigned int cfq_slice_idle;
 377	unsigned int cfq_group_idle;
 378	unsigned int cfq_latency;
 379	unsigned int cfq_target_latency;
 380
 381	/*
 382	 * Fallback dummy cfqq for extreme OOM conditions
 383	 */
 384	struct cfq_queue oom_cfqq;
 385
 386	unsigned long last_delayed_sync;
 387};
 388
 389static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
 390
 391static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
 392					    enum wl_class_t class,
 393					    enum wl_type_t type)
 394{
 395	if (!cfqg)
 396		return NULL;
 397
 398	if (class == IDLE_WORKLOAD)
 399		return &cfqg->service_tree_idle;
 400
 401	return &cfqg->service_trees[class][type];
 402}
 403
 404enum cfqq_state_flags {
 405	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
 406	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
 407	CFQ_CFQQ_FLAG_must_dispatch,	/* must be allowed a dispatch */
 408	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
 409	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
 410	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
 411	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
 412	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
 413	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
 414	CFQ_CFQQ_FLAG_coop,		/* cfqq is shared */
 415	CFQ_CFQQ_FLAG_split_coop,	/* shared cfqq will be splitted */
 416	CFQ_CFQQ_FLAG_deep,		/* sync cfqq experienced large depth */
 417	CFQ_CFQQ_FLAG_wait_busy,	/* Waiting for next request */
 418};
 419
 420#define CFQ_CFQQ_FNS(name)						\
 421static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
 422{									\
 423	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
 424}									\
 425static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
 426{									\
 427	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
 428}									\
 429static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
 430{									\
 431	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
 432}
 433
 434CFQ_CFQQ_FNS(on_rr);
 435CFQ_CFQQ_FNS(wait_request);
 436CFQ_CFQQ_FNS(must_dispatch);
 437CFQ_CFQQ_FNS(must_alloc_slice);
 438CFQ_CFQQ_FNS(fifo_expire);
 439CFQ_CFQQ_FNS(idle_window);
 440CFQ_CFQQ_FNS(prio_changed);
 441CFQ_CFQQ_FNS(slice_new);
 442CFQ_CFQQ_FNS(sync);
 443CFQ_CFQQ_FNS(coop);
 444CFQ_CFQQ_FNS(split_coop);
 445CFQ_CFQQ_FNS(deep);
 446CFQ_CFQQ_FNS(wait_busy);
 447#undef CFQ_CFQQ_FNS
 448
 449static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
 450{
 451	return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 452}
 453
 454static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 455{
 456	return pd_to_blkg(&cfqg->pd);
 457}
 458
 459#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
 460
 461/* cfqg stats flags */
 462enum cfqg_stats_flags {
 463	CFQG_stats_waiting = 0,
 464	CFQG_stats_idling,
 465	CFQG_stats_empty,
 466};
 467
 468#define CFQG_FLAG_FNS(name)						\
 469static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)	\
 470{									\
 471	stats->flags |= (1 << CFQG_stats_##name);			\
 472}									\
 473static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)	\
 474{									\
 475	stats->flags &= ~(1 << CFQG_stats_##name);			\
 476}									\
 477static inline int cfqg_stats_##name(struct cfqg_stats *stats)		\
 478{									\
 479	return (stats->flags & (1 << CFQG_stats_##name)) != 0;		\
 480}									\
 481
 482CFQG_FLAG_FNS(waiting)
 483CFQG_FLAG_FNS(idling)
 484CFQG_FLAG_FNS(empty)
 485#undef CFQG_FLAG_FNS
 486
 487/* This should be called with the queue_lock held. */
 488static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
 489{
 490	unsigned long long now;
 491
 492	if (!cfqg_stats_waiting(stats))
 493		return;
 494
 495	now = sched_clock();
 496	if (time_after64(now, stats->start_group_wait_time))
 497		blkg_stat_add(&stats->group_wait_time,
 498			      now - stats->start_group_wait_time);
 499	cfqg_stats_clear_waiting(stats);
 500}
 501
 502/* This should be called with the queue_lock held. */
 503static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
 504						 struct cfq_group *curr_cfqg)
 505{
 506	struct cfqg_stats *stats = &cfqg->stats;
 507
 508	if (cfqg_stats_waiting(stats))
 509		return;
 510	if (cfqg == curr_cfqg)
 511		return;
 512	stats->start_group_wait_time = sched_clock();
 513	cfqg_stats_mark_waiting(stats);
 514}
 515
 516/* This should be called with the queue_lock held. */
 517static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
 518{
 519	unsigned long long now;
 520
 521	if (!cfqg_stats_empty(stats))
 522		return;
 523
 524	now = sched_clock();
 525	if (time_after64(now, stats->start_empty_time))
 526		blkg_stat_add(&stats->empty_time,
 527			      now - stats->start_empty_time);
 528	cfqg_stats_clear_empty(stats);
 529}
 530
 531static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
 532{
 533	blkg_stat_add(&cfqg->stats.dequeue, 1);
 534}
 535
 536static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
 537{
 538	struct cfqg_stats *stats = &cfqg->stats;
 539
 540	if (blkg_rwstat_total(&stats->queued))
 541		return;
 542
 543	/*
 544	 * group is already marked empty. This can happen if cfqq got new
 545	 * request in parent group and moved to this group while being added
 546	 * to service tree. Just ignore the event and move on.
 547	 */
 548	if (cfqg_stats_empty(stats))
 549		return;
 550
 551	stats->start_empty_time = sched_clock();
 552	cfqg_stats_mark_empty(stats);
 553}
 554
 555static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
 556{
 557	struct cfqg_stats *stats = &cfqg->stats;
 558
 559	if (cfqg_stats_idling(stats)) {
 560		unsigned long long now = sched_clock();
 561
 562		if (time_after64(now, stats->start_idle_time))
 563			blkg_stat_add(&stats->idle_time,
 564				      now - stats->start_idle_time);
 565		cfqg_stats_clear_idling(stats);
 566	}
 567}
 568
 569static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
 570{
 571	struct cfqg_stats *stats = &cfqg->stats;
 572
 573	BUG_ON(cfqg_stats_idling(stats));
 574
 575	stats->start_idle_time = sched_clock();
 576	cfqg_stats_mark_idling(stats);
 577}
 578
 579static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
 580{
 581	struct cfqg_stats *stats = &cfqg->stats;
 582
 583	blkg_stat_add(&stats->avg_queue_size_sum,
 584		      blkg_rwstat_total(&stats->queued));
 585	blkg_stat_add(&stats->avg_queue_size_samples, 1);
 586	cfqg_stats_update_group_wait_time(stats);
 587}
 588
 589#else	/* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 590
 591static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
 592static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
 593static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
 594static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
 595static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
 596static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
 597static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 598
 599#endif	/* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 600
 601#ifdef CONFIG_CFQ_GROUP_IOSCHED
 602
 603static struct blkcg_policy blkcg_policy_cfq;
 604
 605static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
 606{
 607	return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
 608}
 609
 610static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
 611{
 612	struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
 613
 614	return pblkg ? blkg_to_cfqg(pblkg) : NULL;
 615}
 616
 617static inline void cfqg_get(struct cfq_group *cfqg)
 618{
 619	return blkg_get(cfqg_to_blkg(cfqg));
 620}
 621
 622static inline void cfqg_put(struct cfq_group *cfqg)
 623{
 624	return blkg_put(cfqg_to_blkg(cfqg));
 625}
 626
 627#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	do {			\
 628	char __pbuf[128];						\
 629									\
 630	blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));	\
 631	blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
 632			cfq_cfqq_sync((cfqq)) ? 'S' : 'A',		\
 633			cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 634			  __pbuf, ##args);				\
 635} while (0)
 636
 637#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)	do {			\
 638	char __pbuf[128];						\
 639									\
 640	blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));		\
 641	blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);	\
 642} while (0)
 643
 644static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 645					    struct cfq_group *curr_cfqg, int rw)
 646{
 647	blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
 648	cfqg_stats_end_empty_time(&cfqg->stats);
 649	cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
 650}
 651
 652static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 653			unsigned long time, unsigned long unaccounted_time)
 654{
 655	blkg_stat_add(&cfqg->stats.time, time);
 656#ifdef CONFIG_DEBUG_BLK_CGROUP
 657	blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
 658#endif
 659}
 660
 661static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
 662{
 663	blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
 664}
 665
 666static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
 667{
 668	blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
 669}
 670
 671static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
 672					      uint64_t bytes, int rw)
 673{
 674	blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
 675	blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
 676	blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
 677}
 678
 679static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 680			uint64_t start_time, uint64_t io_start_time, int rw)
 681{
 682	struct cfqg_stats *stats = &cfqg->stats;
 683	unsigned long long now = sched_clock();
 684
 685	if (time_after64(now, io_start_time))
 686		blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
 687	if (time_after64(io_start_time, start_time))
 688		blkg_rwstat_add(&stats->wait_time, rw,
 689				io_start_time - start_time);
 690}
 691
 692/* @stats = 0 */
 693static void cfqg_stats_reset(struct cfqg_stats *stats)
 694{
 695	/* queued stats shouldn't be cleared */
 696	blkg_rwstat_reset(&stats->service_bytes);
 697	blkg_rwstat_reset(&stats->serviced);
 698	blkg_rwstat_reset(&stats->merged);
 699	blkg_rwstat_reset(&stats->service_time);
 700	blkg_rwstat_reset(&stats->wait_time);
 701	blkg_stat_reset(&stats->time);
 702#ifdef CONFIG_DEBUG_BLK_CGROUP
 703	blkg_stat_reset(&stats->unaccounted_time);
 704	blkg_stat_reset(&stats->avg_queue_size_sum);
 705	blkg_stat_reset(&stats->avg_queue_size_samples);
 706	blkg_stat_reset(&stats->dequeue);
 707	blkg_stat_reset(&stats->group_wait_time);
 708	blkg_stat_reset(&stats->idle_time);
 709	blkg_stat_reset(&stats->empty_time);
 710#endif
 711}
 712
 713/* @to += @from */
 714static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
 715{
 716	/* queued stats shouldn't be cleared */
 717	blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
 718	blkg_rwstat_merge(&to->serviced, &from->serviced);
 719	blkg_rwstat_merge(&to->merged, &from->merged);
 720	blkg_rwstat_merge(&to->service_time, &from->service_time);
 721	blkg_rwstat_merge(&to->wait_time, &from->wait_time);
 722	blkg_stat_merge(&from->time, &from->time);
 723#ifdef CONFIG_DEBUG_BLK_CGROUP
 724	blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
 725	blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
 726	blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
 727	blkg_stat_merge(&to->dequeue, &from->dequeue);
 728	blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
 729	blkg_stat_merge(&to->idle_time, &from->idle_time);
 730	blkg_stat_merge(&to->empty_time, &from->empty_time);
 731#endif
 732}
 733
 734/*
 735 * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
 736 * recursive stats can still account for the amount used by this cfqg after
 737 * it's gone.
 738 */
 739static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
 740{
 741	struct cfq_group *parent = cfqg_parent(cfqg);
 742
 743	lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
 744
 745	if (unlikely(!parent))
 746		return;
 747
 748	cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
 749	cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
 750	cfqg_stats_reset(&cfqg->stats);
 751	cfqg_stats_reset(&cfqg->dead_stats);
 752}
 753
 754#else	/* CONFIG_CFQ_GROUP_IOSCHED */
 755
 756static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
 757static inline void cfqg_get(struct cfq_group *cfqg) { }
 758static inline void cfqg_put(struct cfq_group *cfqg) { }
 759
 760#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
 761	blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid,	\
 762			cfq_cfqq_sync((cfqq)) ? 'S' : 'A',		\
 763			cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 764				##args)
 765#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0)
 766
 767static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 768			struct cfq_group *curr_cfqg, int rw) { }
 769static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 770			unsigned long time, unsigned long unaccounted_time) { }
 771static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
 772static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
 773static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
 774					      uint64_t bytes, int rw) { }
 775static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 776			uint64_t start_time, uint64_t io_start_time, int rw) { }
 777
 778#endif	/* CONFIG_CFQ_GROUP_IOSCHED */
 779
 780#define cfq_log(cfqd, fmt, args...)	\
 781	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 782
 783/* Traverses through cfq group service trees */
 784#define for_each_cfqg_st(cfqg, i, j, st) \
 785	for (i = 0; i <= IDLE_WORKLOAD; i++) \
 786		for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
 787			: &cfqg->service_tree_idle; \
 788			(i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
 789			(i == IDLE_WORKLOAD && j == 0); \
 790			j++, st = i < IDLE_WORKLOAD ? \
 791			&cfqg->service_trees[i][j]: NULL) \
 792
 793static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
 794	struct cfq_ttime *ttime, bool group_idle)
 795{
 796	unsigned long slice;
 797	if (!sample_valid(ttime->ttime_samples))
 798		return false;
 799	if (group_idle)
 800		slice = cfqd->cfq_group_idle;
 801	else
 802		slice = cfqd->cfq_slice_idle;
 803	return ttime->ttime_mean > slice;
 804}
 805
 806static inline bool iops_mode(struct cfq_data *cfqd)
 807{
 808	/*
 809	 * If we are not idling on queues and it is a NCQ drive, parallel
 810	 * execution of requests is on and measuring time is not possible
 811	 * in most of the cases until and unless we drive shallower queue
 812	 * depths and that becomes a performance bottleneck. In such cases
 813	 * switch to start providing fairness in terms of number of IOs.
 814	 */
 815	if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
 816		return true;
 817	else
 818		return false;
 819}
 820
 821static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
 822{
 823	if (cfq_class_idle(cfqq))
 824		return IDLE_WORKLOAD;
 825	if (cfq_class_rt(cfqq))
 826		return RT_WORKLOAD;
 827	return BE_WORKLOAD;
 828}
 829
 830
 831static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
 832{
 833	if (!cfq_cfqq_sync(cfqq))
 834		return ASYNC_WORKLOAD;
 835	if (!cfq_cfqq_idle_window(cfqq))
 836		return SYNC_NOIDLE_WORKLOAD;
 837	return SYNC_WORKLOAD;
 838}
 839
 840static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
 841					struct cfq_data *cfqd,
 842					struct cfq_group *cfqg)
 843{
 844	if (wl_class == IDLE_WORKLOAD)
 845		return cfqg->service_tree_idle.count;
 846
 847	return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
 848		cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
 849		cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
 850}
 851
 852static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
 853					struct cfq_group *cfqg)
 854{
 855	return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
 856		cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
 857}
 858
 859static void cfq_dispatch_insert(struct request_queue *, struct request *);
 860static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
 861				       struct cfq_io_cq *cic, struct bio *bio,
 862				       gfp_t gfp_mask);
 863
 864static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
 865{
 866	/* cic->icq is the first member, %NULL will convert to %NULL */
 867	return container_of(icq, struct cfq_io_cq, icq);
 868}
 869
 870static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
 871					       struct io_context *ioc)
 872{
 873	if (ioc)
 874		return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
 875	return NULL;
 876}
 877
 878static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
 879{
 880	return cic->cfqq[is_sync];
 881}
 882
 883static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
 884				bool is_sync)
 885{
 886	cic->cfqq[is_sync] = cfqq;
 887}
 888
 889static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
 890{
 891	return cic->icq.q->elevator->elevator_data;
 892}
 893
 894/*
 895 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 896 * set (in which case it could also be direct WRITE).
 897 */
 898static inline bool cfq_bio_sync(struct bio *bio)
 899{
 900	return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
 901}
 902
 903/*
 904 * scheduler run of queue, if there are requests pending and no one in the
 905 * driver that will restart queueing
 906 */
 907static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 908{
 909	if (cfqd->busy_queues) {
 910		cfq_log(cfqd, "schedule dispatch");
 911		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
 912	}
 913}
 914
 915/*
 916 * Scale schedule slice based on io priority. Use the sync time slice only
 917 * if a queue is marked sync and has sync io queued. A sync queue with async
 918 * io only, should not get full sync slice length.
 919 */
 920static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
 921				 unsigned short prio)
 922{
 923	const int base_slice = cfqd->cfq_slice[sync];
 924
 925	WARN_ON(prio >= IOPRIO_BE_NR);
 926
 927	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
 928}
 929
 930static inline int
 931cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 932{
 933	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
 934}
 935
 936/**
 937 * cfqg_scale_charge - scale disk time charge according to cfqg weight
 938 * @charge: disk time being charged
 939 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
 940 *
 941 * Scale @charge according to @vfraction, which is in range (0, 1].  The
 942 * scaling is inversely proportional.
 943 *
 944 * scaled = charge / vfraction
 945 *
 946 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
 947 */
 948static inline u64 cfqg_scale_charge(unsigned long charge,
 949				    unsigned int vfraction)
 950{
 951	u64 c = charge << CFQ_SERVICE_SHIFT;	/* make it fixed point */
 952
 953	/* charge / vfraction */
 954	c <<= CFQ_SERVICE_SHIFT;
 955	do_div(c, vfraction);
 956	return c;
 957}
 958
 959static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
 960{
 961	s64 delta = (s64)(vdisktime - min_vdisktime);
 962	if (delta > 0)
 963		min_vdisktime = vdisktime;
 964
 965	return min_vdisktime;
 966}
 967
 968static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
 969{
 970	s64 delta = (s64)(vdisktime - min_vdisktime);
 971	if (delta < 0)
 972		min_vdisktime = vdisktime;
 973
 974	return min_vdisktime;
 975}
 976
 977static void update_min_vdisktime(struct cfq_rb_root *st)
 978{
 979	struct cfq_group *cfqg;
 980
 981	if (st->left) {
 982		cfqg = rb_entry_cfqg(st->left);
 983		st->min_vdisktime = max_vdisktime(st->min_vdisktime,
 984						  cfqg->vdisktime);
 985	}
 986}
 987
 988/*
 989 * get averaged number of queues of RT/BE priority.
 990 * average is updated, with a formula that gives more weight to higher numbers,
 991 * to quickly follows sudden increases and decrease slowly
 992 */
 993
 994static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
 995					struct cfq_group *cfqg, bool rt)
 996{
 997	unsigned min_q, max_q;
 998	unsigned mult  = cfq_hist_divisor - 1;
 999	unsigned round = cfq_hist_divisor / 2;
1000	unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
1001
1002	min_q = min(cfqg->busy_queues_avg[rt], busy);
1003	max_q = max(cfqg->busy_queues_avg[rt], busy);
1004	cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
1005		cfq_hist_divisor;
1006	return cfqg->busy_queues_avg[rt];
1007}
1008
1009static inline unsigned
1010cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1011{
1012	return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
1013}
1014
1015static inline unsigned
1016cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1017{
1018	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1019	if (cfqd->cfq_latency) {
1020		/*
1021		 * interested queues (we consider only the ones with the same
1022		 * priority class in the cfq group)
1023		 */
1024		unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1025						cfq_class_rt(cfqq));
1026		unsigned sync_slice = cfqd->cfq_slice[1];
1027		unsigned expect_latency = sync_slice * iq;
1028		unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1029
1030		if (expect_latency > group_slice) {
1031			unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1032			/* scale low_slice according to IO priority
1033			 * and sync vs async */
1034			unsigned low_slice =
1035				min(slice, base_low_slice * slice / sync_slice);
1036			/* the adapted slice value is scaled to fit all iqs
1037			 * into the target latency */
1038			slice = max(slice * group_slice / expect_latency,
1039				    low_slice);
1040		}
1041	}
1042	return slice;
1043}
1044
1045static inline void
1046cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1047{
1048	unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1049
1050	cfqq->slice_start = jiffies;
1051	cfqq->slice_end = jiffies + slice;
1052	cfqq->allocated_slice = slice;
1053	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
1054}
1055
1056/*
1057 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1058 * isn't valid until the first request from the dispatch is activated
1059 * and the slice time set.
1060 */
1061static inline bool cfq_slice_used(struct cfq_queue *cfqq)
1062{
1063	if (cfq_cfqq_slice_new(cfqq))
1064		return false;
1065	if (time_before(jiffies, cfqq->slice_end))
1066		return false;
1067
1068	return true;
1069}
1070
1071/*
1072 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1073 * We choose the request that is closest to the head right now. Distance
1074 * behind the head is penalized and only allowed to a certain extent.
1075 */
1076static struct request *
1077cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1078{
1079	sector_t s1, s2, d1 = 0, d2 = 0;
1080	unsigned long back_max;
1081#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
1082#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
1083	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1084
1085	if (rq1 == NULL || rq1 == rq2)
1086		return rq2;
1087	if (rq2 == NULL)
1088		return rq1;
1089
1090	if (rq_is_sync(rq1) != rq_is_sync(rq2))
1091		return rq_is_sync(rq1) ? rq1 : rq2;
1092
1093	if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1094		return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
1095
1096	s1 = blk_rq_pos(rq1);
1097	s2 = blk_rq_pos(rq2);
1098
1099	/*
1100	 * by definition, 1KiB is 2 sectors
1101	 */
1102	back_max = cfqd->cfq_back_max * 2;
1103
1104	/*
1105	 * Strict one way elevator _except_ in the case where we allow
1106	 * short backward seeks which are biased as twice the cost of a
1107	 * similar forward seek.
1108	 */
1109	if (s1 >= last)
1110		d1 = s1 - last;
1111	else if (s1 + back_max >= last)
1112		d1 = (last - s1) * cfqd->cfq_back_penalty;
1113	else
1114		wrap |= CFQ_RQ1_WRAP;
1115
1116	if (s2 >= last)
1117		d2 = s2 - last;
1118	else if (s2 + back_max >= last)
1119		d2 = (last - s2) * cfqd->cfq_back_penalty;
1120	else
1121		wrap |= CFQ_RQ2_WRAP;
1122
1123	/* Found required data */
1124
1125	/*
1126	 * By doing switch() on the bit mask "wrap" we avoid having to
1127	 * check two variables for all permutations: --> faster!
1128	 */
1129	switch (wrap) {
1130	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1131		if (d1 < d2)
1132			return rq1;
1133		else if (d2 < d1)
1134			return rq2;
1135		else {
1136			if (s1 >= s2)
1137				return rq1;
1138			else
1139				return rq2;
1140		}
1141
1142	case CFQ_RQ2_WRAP:
1143		return rq1;
1144	case CFQ_RQ1_WRAP:
1145		return rq2;
1146	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1147	default:
1148		/*
1149		 * Since both rqs are wrapped,
1150		 * start with the one that's further behind head
1151		 * (--> only *one* back seek required),
1152		 * since back seek takes more time than forward.
1153		 */
1154		if (s1 <= s2)
1155			return rq1;
1156		else
1157			return rq2;
1158	}
1159}
1160
1161/*
1162 * The below is leftmost cache rbtree addon
1163 */
1164static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1165{
1166	/* Service tree is empty */
1167	if (!root->count)
1168		return NULL;
1169
1170	if (!root->left)
1171		root->left = rb_first(&root->rb);
1172
1173	if (root->left)
1174		return rb_entry(root->left, struct cfq_queue, rb_node);
1175
1176	return NULL;
1177}
1178
1179static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1180{
1181	if (!root->left)
1182		root->left = rb_first(&root->rb);
1183
1184	if (root->left)
1185		return rb_entry_cfqg(root->left);
1186
1187	return NULL;
1188}
1189
1190static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1191{
1192	rb_erase(n, root);
1193	RB_CLEAR_NODE(n);
1194}
1195
1196static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1197{
1198	if (root->left == n)
1199		root->left = NULL;
1200	rb_erase_init(n, &root->rb);
1201	--root->count;
1202}
1203
1204/*
1205 * would be nice to take fifo expire time into account as well
1206 */
1207static struct request *
1208cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1209		  struct request *last)
1210{
1211	struct rb_node *rbnext = rb_next(&last->rb_node);
1212	struct rb_node *rbprev = rb_prev(&last->rb_node);
1213	struct request *next = NULL, *prev = NULL;
1214
1215	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1216
1217	if (rbprev)
1218		prev = rb_entry_rq(rbprev);
1219
1220	if (rbnext)
1221		next = rb_entry_rq(rbnext);
1222	else {
1223		rbnext = rb_first(&cfqq->sort_list);
1224		if (rbnext && rbnext != &last->rb_node)
1225			next = rb_entry_rq(rbnext);
1226	}
1227
1228	return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1229}
1230
1231static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1232				      struct cfq_queue *cfqq)
1233{
1234	/*
1235	 * just an approximation, should be ok.
1236	 */
1237	return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1238		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1239}
1240
1241static inline s64
1242cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1243{
1244	return cfqg->vdisktime - st->min_vdisktime;
1245}
1246
1247static void
1248__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1249{
1250	struct rb_node **node = &st->rb.rb_node;
1251	struct rb_node *parent = NULL;
1252	struct cfq_group *__cfqg;
1253	s64 key = cfqg_key(st, cfqg);
1254	int left = 1;
1255
1256	while (*node != NULL) {
1257		parent = *node;
1258		__cfqg = rb_entry_cfqg(parent);
1259
1260		if (key < cfqg_key(st, __cfqg))
1261			node = &parent->rb_left;
1262		else {
1263			node = &parent->rb_right;
1264			left = 0;
1265		}
1266	}
1267
1268	if (left)
1269		st->left = &cfqg->rb_node;
1270
1271	rb_link_node(&cfqg->rb_node, parent, node);
1272	rb_insert_color(&cfqg->rb_node, &st->rb);
1273}
1274
1275static void
1276cfq_update_group_weight(struct cfq_group *cfqg)
1277{
1278	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1279
1280	if (cfqg->new_weight) {
1281		cfqg->weight = cfqg->new_weight;
1282		cfqg->new_weight = 0;
1283	}
1284
1285	if (cfqg->new_leaf_weight) {
1286		cfqg->leaf_weight = cfqg->new_leaf_weight;
1287		cfqg->new_leaf_weight = 0;
1288	}
1289}
1290
1291static void
1292cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1293{
1294	unsigned int vfr = 1 << CFQ_SERVICE_SHIFT;	/* start with 1 */
1295	struct cfq_group *pos = cfqg;
1296	struct cfq_group *parent;
1297	bool propagate;
1298
1299	/* add to the service tree */
1300	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1301
1302	cfq_update_group_weight(cfqg);
1303	__cfq_group_service_tree_add(st, cfqg);
1304
1305	/*
1306	 * Activate @cfqg and calculate the portion of vfraction @cfqg is
1307	 * entitled to.  vfraction is calculated by walking the tree
1308	 * towards the root calculating the fraction it has at each level.
1309	 * The compounded ratio is how much vfraction @cfqg owns.
1310	 *
1311	 * Start with the proportion tasks in this cfqg has against active
1312	 * children cfqgs - its leaf_weight against children_weight.
1313	 */
1314	propagate = !pos->nr_active++;
1315	pos->children_weight += pos->leaf_weight;
1316	vfr = vfr * pos->leaf_weight / pos->children_weight;
1317
1318	/*
1319	 * Compound ->weight walking up the tree.  Both activation and
1320	 * vfraction calculation are done in the same loop.  Propagation
1321	 * stops once an already activated node is met.  vfraction
1322	 * calculation should always continue to the root.
1323	 */
1324	while ((parent = cfqg_parent(pos))) {
1325		if (propagate) {
1326			propagate = !parent->nr_active++;
1327			parent->children_weight += pos->weight;
1328		}
1329		vfr = vfr * pos->weight / parent->children_weight;
1330		pos = parent;
1331	}
1332
1333	cfqg->vfraction = max_t(unsigned, vfr, 1);
1334}
1335
1336static void
1337cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1338{
1339	struct cfq_rb_root *st = &cfqd->grp_service_tree;
1340	struct cfq_group *__cfqg;
1341	struct rb_node *n;
1342
1343	cfqg->nr_cfqq++;
1344	if (!RB_EMPTY_NODE(&cfqg->rb_node))
1345		return;
1346
1347	/*
1348	 * Currently put the group at the end. Later implement something
1349	 * so that groups get lesser vtime based on their weights, so that
1350	 * if group does not loose all if it was not continuously backlogged.
1351	 */
1352	n = rb_last(&st->rb);
1353	if (n) {
1354		__cfqg = rb_entry_cfqg(n);
1355		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1356	} else
1357		cfqg->vdisktime = st->min_vdisktime;
1358	cfq_group_service_tree_add(st, cfqg);
1359}
1360
1361static void
1362cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1363{
1364	struct cfq_group *pos = cfqg;
1365	bool propagate;
1366
1367	/*
1368	 * Undo activation from cfq_group_service_tree_add().  Deactivate
1369	 * @cfqg and propagate deactivation upwards.
1370	 */
1371	propagate = !--pos->nr_active;
1372	pos->children_weight -= pos->leaf_weight;
1373
1374	while (propagate) {
1375		struct cfq_group *parent = cfqg_parent(pos);
1376
1377		/* @pos has 0 nr_active at this point */
1378		WARN_ON_ONCE(pos->children_weight);
1379		pos->vfraction = 0;
1380
1381		if (!parent)
1382			break;
1383
1384		propagate = !--parent->nr_active;
1385		parent->children_weight -= pos->weight;
1386		pos = parent;
1387	}
1388
1389	/* remove from the service tree */
1390	if (!RB_EMPTY_NODE(&cfqg->rb_node))
1391		cfq_rb_erase(&cfqg->rb_node, st);
1392}
1393
1394static void
1395cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1396{
1397	struct cfq_rb_root *st = &cfqd->grp_service_tree;
1398
1399	BUG_ON(cfqg->nr_cfqq < 1);
1400	cfqg->nr_cfqq--;
1401
1402	/* If there are other cfq queues under this group, don't delete it */
1403	if (cfqg->nr_cfqq)
1404		return;
1405
1406	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1407	cfq_group_service_tree_del(st, cfqg);
1408	cfqg->saved_wl_slice = 0;
1409	cfqg_stats_update_dequeue(cfqg);
1410}
1411
1412static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1413						unsigned int *unaccounted_time)
1414{
1415	unsigned int slice_used;
1416
1417	/*
1418	 * Queue got expired before even a single request completed or
1419	 * got expired immediately after first request completion.
1420	 */
1421	if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1422		/*
1423		 * Also charge the seek time incurred to the group, otherwise
1424		 * if there are mutiple queues in the group, each can dispatch
1425		 * a single request on seeky media and cause lots of seek time
1426		 * and group will never know it.
1427		 */
1428		slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1429					1);
1430	} else {
1431		slice_used = jiffies - cfqq->slice_start;
1432		if (slice_used > cfqq->allocated_slice) {
1433			*unaccounted_time = slice_used - cfqq->allocated_slice;
1434			slice_used = cfqq->allocated_slice;
1435		}
1436		if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1437			*unaccounted_time += cfqq->slice_start -
1438					cfqq->dispatch_start;
1439	}
1440
1441	return slice_used;
1442}
1443
1444static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1445				struct cfq_queue *cfqq)
1446{
1447	struct cfq_rb_root *st = &cfqd->grp_service_tree;
1448	unsigned int used_sl, charge, unaccounted_sl = 0;
1449	int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1450			- cfqg->service_tree_idle.count;
1451	unsigned int vfr;
1452
1453	BUG_ON(nr_sync < 0);
1454	used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1455
1456	if (iops_mode(cfqd))
1457		charge = cfqq->slice_dispatch;
1458	else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1459		charge = cfqq->allocated_slice;
1460
1461	/*
1462	 * Can't update vdisktime while on service tree and cfqg->vfraction
1463	 * is valid only while on it.  Cache vfr, leave the service tree,
1464	 * update vdisktime and go back on.  The re-addition to the tree
1465	 * will also update the weights as necessary.
1466	 */
1467	vfr = cfqg->vfraction;
1468	cfq_group_service_tree_del(st, cfqg);
1469	cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
1470	cfq_group_service_tree_add(st, cfqg);
1471
1472	/* This group is being expired. Save the context */
1473	if (time_after(cfqd->workload_expires, jiffies)) {
1474		cfqg->saved_wl_slice = cfqd->workload_expires
1475						- jiffies;
1476		cfqg->saved_wl_type = cfqd->serving_wl_type;
1477		cfqg->saved_wl_class = cfqd->serving_wl_class;
1478	} else
1479		cfqg->saved_wl_slice = 0;
1480
1481	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1482					st->min_vdisktime);
1483	cfq_log_cfqq(cfqq->cfqd, cfqq,
1484		     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1485		     used_sl, cfqq->slice_dispatch, charge,
1486		     iops_mode(cfqd), cfqq->nr_sectors);
1487	cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1488	cfqg_stats_set_start_empty_time(cfqg);
1489}
1490
1491/**
1492 * cfq_init_cfqg_base - initialize base part of a cfq_group
1493 * @cfqg: cfq_group to initialize
1494 *
1495 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1496 * is enabled or not.
1497 */
1498static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1499{
1500	struct cfq_rb_root *st;
1501	int i, j;
1502
1503	for_each_cfqg_st(cfqg, i, j, st)
1504		*st = CFQ_RB_ROOT;
1505	RB_CLEAR_NODE(&cfqg->rb_node);
1506
1507	cfqg->ttime.last_end_request = jiffies;
1508}
1509
1510#ifdef CONFIG_CFQ_GROUP_IOSCHED
1511static void cfqg_stats_init(struct cfqg_stats *stats)
1512{
1513	blkg_rwstat_init(&stats->service_bytes);
1514	blkg_rwstat_init(&stats->serviced);
1515	blkg_rwstat_init(&stats->merged);
1516	blkg_rwstat_init(&stats->service_time);
1517	blkg_rwstat_init(&stats->wait_time);
1518	blkg_rwstat_init(&stats->queued);
1519
1520	blkg_stat_init(&stats->sectors);
1521	blkg_stat_init(&stats->time);
1522
1523#ifdef CONFIG_DEBUG_BLK_CGROUP
1524	blkg_stat_init(&stats->unaccounted_time);
1525	blkg_stat_init(&stats->avg_queue_size_sum);
1526	blkg_stat_init(&stats->avg_queue_size_samples);
1527	blkg_stat_init(&stats->dequeue);
1528	blkg_stat_init(&stats->group_wait_time);
1529	blkg_stat_init(&stats->idle_time);
1530	blkg_stat_init(&stats->empty_time);
1531#endif
1532}
1533
1534static void cfq_pd_init(struct blkcg_gq *blkg)
1535{
1536	struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1537
1538	cfq_init_cfqg_base(cfqg);
1539	cfqg->weight = blkg->blkcg->cfq_weight;
1540	cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
1541	cfqg_stats_init(&cfqg->stats);
1542	cfqg_stats_init(&cfqg->dead_stats);
1543}
1544
1545static void cfq_pd_offline(struct blkcg_gq *blkg)
1546{
1547	/*
1548	 * @blkg is going offline and will be ignored by
1549	 * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
1550	 * that they don't get lost.  If IOs complete after this point, the
1551	 * stats for them will be lost.  Oh well...
1552	 */
1553	cfqg_stats_xfer_dead(blkg_to_cfqg(blkg));
1554}
1555
1556/* offset delta from cfqg->stats to cfqg->dead_stats */
1557static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
1558					offsetof(struct cfq_group, stats);
1559
1560/* to be used by recursive prfill, sums live and dead stats recursively */
1561static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
1562{
1563	u64 sum = 0;
1564
1565	sum += blkg_stat_recursive_sum(pd, off);
1566	sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
1567	return sum;
1568}
1569
1570/* to be used by recursive prfill, sums live and dead rwstats recursively */
1571static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
1572						       int off)
1573{
1574	struct blkg_rwstat a, b;
1575
1576	a = blkg_rwstat_recursive_sum(pd, off);
1577	b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
1578	blkg_rwstat_merge(&a, &b);
1579	return a;
1580}
1581
1582static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
1583{
1584	struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1585
1586	cfqg_stats_reset(&cfqg->stats);
1587	cfqg_stats_reset(&cfqg->dead_stats);
1588}
1589
1590/*
1591 * Search for the cfq group current task belongs to. request_queue lock must
1592 * be held.
1593 */
1594static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1595						struct blkcg *blkcg)
1596{
1597	struct request_queue *q = cfqd->queue;
1598	struct cfq_group *cfqg = NULL;
1599
1600	/* avoid lookup for the common case where there's no blkcg */
1601	if (blkcg == &blkcg_root) {
1602		cfqg = cfqd->root_group;
1603	} else {
1604		struct blkcg_gq *blkg;
1605
1606		blkg = blkg_lookup_create(blkcg, q);
1607		if (!IS_ERR(blkg))
1608			cfqg = blkg_to_cfqg(blkg);
1609	}
1610
1611	return cfqg;
1612}
1613
1614static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1615{
1616	/* Currently, all async queues are mapped to root group */
1617	if (!cfq_cfqq_sync(cfqq))
1618		cfqg = cfqq->cfqd->root_group;
1619
1620	cfqq->cfqg = cfqg;
1621	/* cfqq reference on cfqg */
1622	cfqg_get(cfqg);
1623}
1624
1625static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1626				     struct blkg_policy_data *pd, int off)
1627{
1628	struct cfq_group *cfqg = pd_to_cfqg(pd);
1629
1630	if (!cfqg->dev_weight)
1631		return 0;
1632	return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1633}
1634
1635static int cfqg_print_weight_device(struct seq_file *sf, void *v)
1636{
1637	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1638			  cfqg_prfill_weight_device, &blkcg_policy_cfq,
1639			  0, false);
1640	return 0;
1641}
1642
1643static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1644					  struct blkg_policy_data *pd, int off)
1645{
1646	struct cfq_group *cfqg = pd_to_cfqg(pd);
1647
1648	if (!cfqg->dev_leaf_weight)
1649		return 0;
1650	return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1651}
1652
1653static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
1654{
1655	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1656			  cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1657			  0, false);
1658	return 0;
1659}
1660
1661static int cfq_print_weight(struct seq_file *sf, void *v)
1662{
1663	seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_weight);
1664	return 0;
1665}
1666
1667static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
1668{
1669	seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_leaf_weight);
1670	return 0;
1671}
1672
1673static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
1674				    struct cftype *cft, const char *buf,
1675				    bool is_leaf_weight)
1676{
1677	struct blkcg *blkcg = css_to_blkcg(css);
1678	struct blkg_conf_ctx ctx;
1679	struct cfq_group *cfqg;
1680	int ret;
1681
1682	ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1683	if (ret)
1684		return ret;
1685
1686	ret = -EINVAL;
1687	cfqg = blkg_to_cfqg(ctx.blkg);
1688	if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
1689		if (!is_leaf_weight) {
1690			cfqg->dev_weight = ctx.v;
1691			cfqg->new_weight = ctx.v ?: blkcg->cfq_weight;
1692		} else {
1693			cfqg->dev_leaf_weight = ctx.v;
1694			cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight;
1695		}
1696		ret = 0;
1697	}
1698
1699	blkg_conf_finish(&ctx);
1700	return ret;
1701}
1702
1703static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
1704				  struct cftype *cft, char *buf)
1705{
1706	return __cfqg_set_weight_device(css, cft, buf, false);
1707}
1708
1709static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
1710				       struct cftype *cft, char *buf)
1711{
1712	return __cfqg_set_weight_device(css, cft, buf, true);
1713}
1714
1715static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1716			    u64 val, bool is_leaf_weight)
1717{
1718	struct blkcg *blkcg = css_to_blkcg(css);
1719	struct blkcg_gq *blkg;
1720
1721	if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
1722		return -EINVAL;
1723
1724	spin_lock_irq(&blkcg->lock);
1725
1726	if (!is_leaf_weight)
1727		blkcg->cfq_weight = val;
1728	else
1729		blkcg->cfq_leaf_weight = val;
1730
1731	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1732		struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1733
1734		if (!cfqg)
1735			continue;
1736
1737		if (!is_leaf_weight) {
1738			if (!cfqg->dev_weight)
1739				cfqg->new_weight = blkcg->cfq_weight;
1740		} else {
1741			if (!cfqg->dev_leaf_weight)
1742				cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
1743		}
1744	}
1745
1746	spin_unlock_irq(&blkcg->lock);
1747	return 0;
1748}
1749
1750static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1751			  u64 val)
1752{
1753	return __cfq_set_weight(css, cft, val, false);
1754}
1755
1756static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1757			       struct cftype *cft, u64 val)
1758{
1759	return __cfq_set_weight(css, cft, val, true);
1760}
1761
1762static int cfqg_print_stat(struct seq_file *sf, void *v)
1763{
1764	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1765			  &blkcg_policy_cfq, seq_cft(sf)->private, false);
1766	return 0;
1767}
1768
1769static int cfqg_print_rwstat(struct seq_file *sf, void *v)
1770{
1771	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1772			  &blkcg_policy_cfq, seq_cft(sf)->private, true);
1773	return 0;
1774}
1775
1776static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1777				      struct blkg_policy_data *pd, int off)
1778{
1779	u64 sum = cfqg_stat_pd_recursive_sum(pd, off);
1780
1781	return __blkg_prfill_u64(sf, pd, sum);
1782}
1783
1784static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1785					struct blkg_policy_data *pd, int off)
1786{
1787	struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off);
1788
1789	return __blkg_prfill_rwstat(sf, pd, &sum);
1790}
1791
1792static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
1793{
1794	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1795			  cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1796			  seq_cft(sf)->private, false);
1797	return 0;
1798}
1799
1800static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1801{
1802	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1803			  cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1804			  seq_cft(sf)->private, true);
1805	return 0;
1806}
1807
1808#ifdef CONFIG_DEBUG_BLK_CGROUP
1809static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1810				      struct blkg_policy_data *pd, int off)
1811{
1812	struct cfq_group *cfqg = pd_to_cfqg(pd);
1813	u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1814	u64 v = 0;
1815
1816	if (samples) {
1817		v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1818		v = div64_u64(v, samples);
1819	}
1820	__blkg_prfill_u64(sf, pd, v);
1821	return 0;
1822}
1823
1824/* print avg_queue_size */
1825static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1826{
1827	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1828			  cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
1829			  0, false);
1830	return 0;
1831}
1832#endif	/* CONFIG_DEBUG_BLK_CGROUP */
1833
1834static struct cftype cfq_blkcg_files[] = {
1835	/* on root, weight is mapped to leaf_weight */
1836	{
1837		.name = "weight_device",
1838		.flags = CFTYPE_ONLY_ON_ROOT,
1839		.seq_show = cfqg_print_leaf_weight_device,
1840		.write_string = cfqg_set_leaf_weight_device,
1841	},
1842	{
1843		.name = "weight",
1844		.flags = CFTYPE_ONLY_ON_ROOT,
1845		.seq_show = cfq_print_leaf_weight,
1846		.write_u64 = cfq_set_leaf_weight,
1847	},
1848
1849	/* no such mapping necessary for !roots */
1850	{
1851		.name = "weight_device",
1852		.flags = CFTYPE_NOT_ON_ROOT,
1853		.seq_show = cfqg_print_weight_device,
1854		.write_string = cfqg_set_weight_device,
1855	},
1856	{
1857		.name = "weight",
1858		.flags = CFTYPE_NOT_ON_ROOT,
1859		.seq_show = cfq_print_weight,
1860		.write_u64 = cfq_set_weight,
1861	},
1862
1863	{
1864		.name = "leaf_weight_device",
1865		.seq_show = cfqg_print_leaf_weight_device,
1866		.write_string = cfqg_set_leaf_weight_device,
1867	},
1868	{
1869		.name = "leaf_weight",
1870		.seq_show = cfq_print_leaf_weight,
1871		.write_u64 = cfq_set_leaf_weight,
1872	},
1873
1874	/* statistics, covers only the tasks in the cfqg */
1875	{
1876		.name = "time",
1877		.private = offsetof(struct cfq_group, stats.time),
1878		.seq_show = cfqg_print_stat,
1879	},
1880	{
1881		.name = "sectors",
1882		.private = offsetof(struct cfq_group, stats.sectors),
1883		.seq_show = cfqg_print_stat,
1884	},
1885	{
1886		.name = "io_service_bytes",
1887		.private = offsetof(struct cfq_group, stats.service_bytes),
1888		.seq_show = cfqg_print_rwstat,
1889	},
1890	{
1891		.name = "io_serviced",
1892		.private = offsetof(struct cfq_group, stats.serviced),
1893		.seq_show = cfqg_print_rwstat,
1894	},
1895	{
1896		.name = "io_service_time",
1897		.private = offsetof(struct cfq_group, stats.service_time),
1898		.seq_show = cfqg_print_rwstat,
1899	},
1900	{
1901		.name = "io_wait_time",
1902		.private = offsetof(struct cfq_group, stats.wait_time),
1903		.seq_show = cfqg_print_rwstat,
1904	},
1905	{
1906		.name = "io_merged",
1907		.private = offsetof(struct cfq_group, stats.merged),
1908		.seq_show = cfqg_print_rwstat,
1909	},
1910	{
1911		.name = "io_queued",
1912		.private = offsetof(struct cfq_group, stats.queued),
1913		.seq_show = cfqg_print_rwstat,
1914	},
1915
1916	/* the same statictics which cover the cfqg and its descendants */
1917	{
1918		.name = "time_recursive",
1919		.private = offsetof(struct cfq_group, stats.time),
1920		.seq_show = cfqg_print_stat_recursive,
1921	},
1922	{
1923		.name = "sectors_recursive",
1924		.private = offsetof(struct cfq_group, stats.sectors),
1925		.seq_show = cfqg_print_stat_recursive,
1926	},
1927	{
1928		.name = "io_service_bytes_recursive",
1929		.private = offsetof(struct cfq_group, stats.service_bytes),
1930		.seq_show = cfqg_print_rwstat_recursive,
1931	},
1932	{
1933		.name = "io_serviced_recursive",
1934		.private = offsetof(struct cfq_group, stats.serviced),
1935		.seq_show = cfqg_print_rwstat_recursive,
1936	},
1937	{
1938		.name = "io_service_time_recursive",
1939		.private = offsetof(struct cfq_group, stats.service_time),
1940		.seq_show = cfqg_print_rwstat_recursive,
1941	},
1942	{
1943		.name = "io_wait_time_recursive",
1944		.private = offsetof(struct cfq_group, stats.wait_time),
1945		.seq_show = cfqg_print_rwstat_recursive,
1946	},
1947	{
1948		.name = "io_merged_recursive",
1949		.private = offsetof(struct cfq_group, stats.merged),
1950		.seq_show = cfqg_print_rwstat_recursive,
1951	},
1952	{
1953		.name = "io_queued_recursive",
1954		.private = offsetof(struct cfq_group, stats.queued),
1955		.seq_show = cfqg_print_rwstat_recursive,
1956	},
1957#ifdef CONFIG_DEBUG_BLK_CGROUP
1958	{
1959		.name = "avg_queue_size",
1960		.seq_show = cfqg_print_avg_queue_size,
1961	},
1962	{
1963		.name = "group_wait_time",
1964		.private = offsetof(struct cfq_group, stats.group_wait_time),
1965		.seq_show = cfqg_print_stat,
1966	},
1967	{
1968		.name = "idle_time",
1969		.private = offsetof(struct cfq_group, stats.idle_time),
1970		.seq_show = cfqg_print_stat,
1971	},
1972	{
1973		.name = "empty_time",
1974		.private = offsetof(struct cfq_group, stats.empty_time),
1975		.seq_show = cfqg_print_stat,
1976	},
1977	{
1978		.name = "dequeue",
1979		.private = offsetof(struct cfq_group, stats.dequeue),
1980		.seq_show = cfqg_print_stat,
1981	},
1982	{
1983		.name = "unaccounted_time",
1984		.private = offsetof(struct cfq_group, stats.unaccounted_time),
1985		.seq_show = cfqg_print_stat,
1986	},
1987#endif	/* CONFIG_DEBUG_BLK_CGROUP */
1988	{ }	/* terminate */
1989};
1990#else /* GROUP_IOSCHED */
1991static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1992						struct blkcg *blkcg)
1993{
1994	return cfqd->root_group;
1995}
1996
1997static inline void
1998cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1999	cfqq->cfqg = cfqg;
2000}
2001
2002#endif /* GROUP_IOSCHED */
2003
2004/*
2005 * The cfqd->service_trees holds all pending cfq_queue's that have
2006 * requests waiting to be processed. It is sorted in the order that
2007 * we will service the queues.
2008 */
2009static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2010				 bool add_front)
2011{
2012	struct rb_node **p, *parent;
2013	struct cfq_queue *__cfqq;
2014	unsigned long rb_key;
2015	struct cfq_rb_root *st;
2016	int left;
2017	int new_cfqq = 1;
2018
2019	st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
2020	if (cfq_class_idle(cfqq)) {
2021		rb_key = CFQ_IDLE_DELAY;
2022		parent = rb_last(&st->rb);
2023		if (parent && parent != &cfqq->rb_node) {
2024			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2025			rb_key += __cfqq->rb_key;
2026		} else
2027			rb_key += jiffies;
2028	} else if (!add_front) {
2029		/*
2030		 * Get our rb key offset. Subtract any residual slice
2031		 * value carried from last service. A negative resid
2032		 * count indicates slice overrun, and this should position
2033		 * the next service time further away in the tree.
2034		 */
2035		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
2036		rb_key -= cfqq->slice_resid;
2037		cfqq->slice_resid = 0;
2038	} else {
2039		rb_key = -HZ;
2040		__cfqq = cfq_rb_first(st);
2041		rb_key += __cfqq ? __cfqq->rb_key : jiffies;
2042	}
2043
2044	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2045		new_cfqq = 0;
2046		/*
2047		 * same position, nothing more to do
2048		 */
2049		if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
2050			return;
2051
2052		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2053		cfqq->service_tree = NULL;
2054	}
2055
2056	left = 1;
2057	parent = NULL;
2058	cfqq->service_tree = st;
2059	p = &st->rb.rb_node;
2060	while (*p) {
2061		parent = *p;
2062		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2063
2064		/*
2065		 * sort by key, that represents service time.
2066		 */
2067		if (time_before(rb_key, __cfqq->rb_key))
2068			p = &parent->rb_left;
2069		else {
2070			p = &parent->rb_right;
2071			left = 0;
2072		}
2073	}
2074
2075	if (left)
2076		st->left = &cfqq->rb_node;
2077
2078	cfqq->rb_key = rb_key;
2079	rb_link_node(&cfqq->rb_node, parent, p);
2080	rb_insert_color(&cfqq->rb_node, &st->rb);
2081	st->count++;
2082	if (add_front || !new_cfqq)
2083		return;
2084	cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
2085}
2086
2087static struct cfq_queue *
2088cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2089		     sector_t sector, struct rb_node **ret_parent,
2090		     struct rb_node ***rb_link)
2091{
2092	struct rb_node **p, *parent;
2093	struct cfq_queue *cfqq = NULL;
2094
2095	parent = NULL;
2096	p = &root->rb_node;
2097	while (*p) {
2098		struct rb_node **n;
2099
2100		parent = *p;
2101		cfqq = rb_entry(parent, struct cfq_queue, p_node);
2102
2103		/*
2104		 * Sort strictly based on sector.  Smallest to the left,
2105		 * largest to the right.
2106		 */
2107		if (sector > blk_rq_pos(cfqq->next_rq))
2108			n = &(*p)->rb_right;
2109		else if (sector < blk_rq_pos(cfqq->next_rq))
2110			n = &(*p)->rb_left;
2111		else
2112			break;
2113		p = n;
2114		cfqq = NULL;
2115	}
2116
2117	*ret_parent = parent;
2118	if (rb_link)
2119		*rb_link = p;
2120	return cfqq;
2121}
2122
2123static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2124{
2125	struct rb_node **p, *parent;
2126	struct cfq_queue *__cfqq;
2127
2128	if (cfqq->p_root) {
2129		rb_erase(&cfqq->p_node, cfqq->p_root);
2130		cfqq->p_root = NULL;
2131	}
2132
2133	if (cfq_class_idle(cfqq))
2134		return;
2135	if (!cfqq->next_rq)
2136		return;
2137
2138	cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2139	__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2140				      blk_rq_pos(cfqq->next_rq), &parent, &p);
2141	if (!__cfqq) {
2142		rb_link_node(&cfqq->p_node, parent, p);
2143		rb_insert_color(&cfqq->p_node, cfqq->p_root);
2144	} else
2145		cfqq->p_root = NULL;
2146}
2147
2148/*
2149 * Update cfqq's position in the service tree.
2150 */
2151static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2152{
2153	/*
2154	 * Resorting requires the cfqq to be on the RR list already.
2155	 */
2156	if (cfq_cfqq_on_rr(cfqq)) {
2157		cfq_service_tree_add(cfqd, cfqq, 0);
2158		cfq_prio_tree_add(cfqd, cfqq);
2159	}
2160}
2161
2162/*
2163 * add to busy list of queues for service, trying to be fair in ordering
2164 * the pending list according to last request service
2165 */
2166static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2167{
2168	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
2169	BUG_ON(cfq_cfqq_on_rr(cfqq));
2170	cfq_mark_cfqq_on_rr(cfqq);
2171	cfqd->busy_queues++;
2172	if (cfq_cfqq_sync(cfqq))
2173		cfqd->busy_sync_queues++;
2174
2175	cfq_resort_rr_list(cfqd, cfqq);
2176}
2177
2178/*
2179 * Called when the cfqq no longer has requests pending, remove it from
2180 * the service tree.
2181 */
2182static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2183{
2184	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
2185	BUG_ON(!cfq_cfqq_on_rr(cfqq));
2186	cfq_clear_cfqq_on_rr(cfqq);
2187
2188	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2189		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2190		cfqq->service_tree = NULL;
2191	}
2192	if (cfqq->p_root) {
2193		rb_erase(&cfqq->p_node, cfqq->p_root);
2194		cfqq->p_root = NULL;
2195	}
2196
2197	cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
2198	BUG_ON(!cfqd->busy_queues);
2199	cfqd->busy_queues--;
2200	if (cfq_cfqq_sync(cfqq))
2201		cfqd->busy_sync_queues--;
2202}
2203
2204/*
2205 * rb tree support functions
2206 */
2207static void cfq_del_rq_rb(struct request *rq)
2208{
2209	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2210	const int sync = rq_is_sync(rq);
2211
2212	BUG_ON(!cfqq->queued[sync]);
2213	cfqq->queued[sync]--;
2214
2215	elv_rb_del(&cfqq->sort_list, rq);
2216
2217	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2218		/*
2219		 * Queue will be deleted from service tree when we actually
2220		 * expire it later. Right now just remove it from prio tree
2221		 * as it is empty.
2222		 */
2223		if (cfqq->p_root) {
2224			rb_erase(&cfqq->p_node, cfqq->p_root);
2225			cfqq->p_root = NULL;
2226		}
2227	}
2228}
2229
2230static void cfq_add_rq_rb(struct request *rq)
2231{
2232	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2233	struct cfq_data *cfqd = cfqq->cfqd;
2234	struct request *prev;
2235
2236	cfqq->queued[rq_is_sync(rq)]++;
2237
2238	elv_rb_add(&cfqq->sort_list, rq);
2239
2240	if (!cfq_cfqq_on_rr(cfqq))
2241		cfq_add_cfqq_rr(cfqd, cfqq);
2242
2243	/*
2244	 * check if this request is a better next-serve candidate
2245	 */
2246	prev = cfqq->next_rq;
2247	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
2248
2249	/*
2250	 * adjust priority tree position, if ->next_rq changes
2251	 */
2252	if (prev != cfqq->next_rq)
2253		cfq_prio_tree_add(cfqd, cfqq);
2254
2255	BUG_ON(!cfqq->next_rq);
2256}
2257
2258static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
2259{
2260	elv_rb_del(&cfqq->sort_list, rq);
2261	cfqq->queued[rq_is_sync(rq)]--;
2262	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2263	cfq_add_rq_rb(rq);
2264	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2265				 rq->cmd_flags);
2266}
2267
2268static struct request *
2269cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
2270{
2271	struct task_struct *tsk = current;
2272	struct cfq_io_cq *cic;
2273	struct cfq_queue *cfqq;
2274
2275	cic = cfq_cic_lookup(cfqd, tsk->io_context);
2276	if (!cic)
2277		return NULL;
2278
2279	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2280	if (cfqq)
2281		return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
2282
2283	return NULL;
2284}
2285
2286static void cfq_activate_request(struct request_queue *q, struct request *rq)
2287{
2288	struct cfq_data *cfqd = q->elevator->elevator_data;
2289
2290	cfqd->rq_in_driver++;
2291	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
2292						cfqd->rq_in_driver);
2293
2294	cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2295}
2296
2297static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
2298{
2299	struct cfq_data *cfqd = q->elevator->elevator_data;
2300
2301	WARN_ON(!cfqd->rq_in_driver);
2302	cfqd->rq_in_driver--;
2303	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
2304						cfqd->rq_in_driver);
2305}
2306
2307static void cfq_remove_request(struct request *rq)
2308{
2309	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2310
2311	if (cfqq->next_rq == rq)
2312		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
2313
2314	list_del_init(&rq->queuelist);
2315	cfq_del_rq_rb(rq);
2316
2317	cfqq->cfqd->rq_queued--;
2318	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2319	if (rq->cmd_flags & REQ_PRIO) {
2320		WARN_ON(!cfqq->prio_pending);
2321		cfqq->prio_pending--;
2322	}
2323}
2324
2325static int cfq_merge(struct request_queue *q, struct request **req,
2326		     struct bio *bio)
2327{
2328	struct cfq_data *cfqd = q->elevator->elevator_data;
2329	struct request *__rq;
2330
2331	__rq = cfq_find_rq_fmerge(cfqd, bio);
2332	if (__rq && elv_rq_merge_ok(__rq, bio)) {
2333		*req = __rq;
2334		return ELEVATOR_FRONT_MERGE;
2335	}
2336
2337	return ELEVATOR_NO_MERGE;
2338}
2339
2340static void cfq_merged_request(struct request_queue *q, struct request *req,
2341			       int type)
2342{
2343	if (type == ELEVATOR_FRONT_MERGE) {
2344		struct cfq_queue *cfqq = RQ_CFQQ(req);
2345
2346		cfq_reposition_rq_rb(cfqq, req);
2347	}
2348}
2349
2350static void cfq_bio_merged(struct request_queue *q, struct request *req,
2351				struct bio *bio)
2352{
2353	cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
2354}
2355
2356static void
2357cfq_merged_requests(struct request_queue *q, struct request *rq,
2358		    struct request *next)
2359{
2360	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2361	struct cfq_data *cfqd = q->elevator->elevator_data;
2362
2363	/*
2364	 * reposition in fifo if next is older than rq
2365	 */
2366	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2367	    time_before(next->fifo_time, rq->fifo_time) &&
2368	    cfqq == RQ_CFQQ(next)) {
2369		list_move(&rq->queuelist, &next->queuelist);
2370		rq->fifo_time = next->fifo_time;
2371	}
2372
2373	if (cfqq->next_rq == next)
2374		cfqq->next_rq = rq;
2375	cfq_remove_request(next);
2376	cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
2377
2378	cfqq = RQ_CFQQ(next);
2379	/*
2380	 * all requests of this queue are merged to other queues, delete it
2381	 * from the service tree. If it's the active_queue,
2382	 * cfq_dispatch_requests() will choose to expire it or do idle
2383	 */
2384	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2385	    cfqq != cfqd->active_queue)
2386		cfq_del_cfqq_rr(cfqd, cfqq);
2387}
2388
2389static int cfq_allow_merge(struct request_queue *q, struct request *rq,
2390			   struct bio *bio)
2391{
2392	struct cfq_data *cfqd = q->elevator->elevator_data;
2393	struct cfq_io_cq *cic;
2394	struct cfq_queue *cfqq;
2395
2396	/*
2397	 * Disallow merge of a sync bio into an async request.
2398	 */
2399	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2400		return false;
2401
2402	/*
2403	 * Lookup the cfqq that this bio will be queued with and allow
2404	 * merge only if rq is queued there.
2405	 */
2406	cic = cfq_cic_lookup(cfqd, current->io_context);
2407	if (!cic)
2408		return false;
2409
2410	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2411	return cfqq == RQ_CFQQ(rq);
2412}
2413
2414static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2415{
2416	del_timer(&cfqd->idle_slice_timer);
2417	cfqg_stats_update_idle_time(cfqq->cfqg);
2418}
2419
2420static void __cfq_set_active_queue(struct cfq_data *cfqd,
2421				   struct cfq_queue *cfqq)
2422{
2423	if (cfqq) {
2424		cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2425				cfqd->serving_wl_class, cfqd->serving_wl_type);
2426		cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2427		cfqq->slice_start = 0;
2428		cfqq->dispatch_start = jiffies;
2429		cfqq->allocated_slice = 0;
2430		cfqq->slice_end = 0;
2431		cfqq->slice_dispatch = 0;
2432		cfqq->nr_sectors = 0;
2433
2434		cfq_clear_cfqq_wait_request(cfqq);
2435		cfq_clear_cfqq_must_dispatch(cfqq);
2436		cfq_clear_cfqq_must_alloc_slice(cfqq);
2437		cfq_clear_cfqq_fifo_expire(cfqq);
2438		cfq_mark_cfqq_slice_new(cfqq);
2439
2440		cfq_del_timer(cfqd, cfqq);
2441	}
2442
2443	cfqd->active_queue = cfqq;
2444}
2445
2446/*
2447 * current cfqq expired its slice (or was too idle), select new one
2448 */
2449static void
2450__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2451		    bool timed_out)
2452{
2453	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2454
2455	if (cfq_cfqq_wait_request(cfqq))
2456		cfq_del_timer(cfqd, cfqq);
2457
2458	cfq_clear_cfqq_wait_request(cfqq);
2459	cfq_clear_cfqq_wait_busy(cfqq);
2460
2461	/*
2462	 * If this cfqq is shared between multiple processes, check to
2463	 * make sure that those processes are still issuing I/Os within
2464	 * the mean seek distance.  If not, it may be time to break the
2465	 * queues apart again.
2466	 */
2467	if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2468		cfq_mark_cfqq_split_coop(cfqq);
2469
2470	/*
2471	 * store what was left of this slice, if the queue idled/timed out
2472	 */
2473	if (timed_out) {
2474		if (cfq_cfqq_slice_new(cfqq))
2475			cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2476		else
2477			cfqq->slice_resid = cfqq->slice_end - jiffies;
2478		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2479	}
2480
2481	cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2482
2483	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2484		cfq_del_cfqq_rr(cfqd, cfqq);
2485
2486	cfq_resort_rr_list(cfqd, cfqq);
2487
2488	if (cfqq == cfqd->active_queue)
2489		cfqd->active_queue = NULL;
2490
2491	if (cfqd->active_cic) {
2492		put_io_context(cfqd->active_cic->icq.ioc);
2493		cfqd->active_cic = NULL;
2494	}
2495}
2496
2497static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2498{
2499	struct cfq_queue *cfqq = cfqd->active_queue;
2500
2501	if (cfqq)
2502		__cfq_slice_expired(cfqd, cfqq, timed_out);
2503}
2504
2505/*
2506 * Get next queue for service. Unless we have a queue preemption,
2507 * we'll simply select the first cfqq in the service tree.
2508 */
2509static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2510{
2511	struct cfq_rb_root *st = st_for(cfqd->serving_group,
2512			cfqd->serving_wl_class, cfqd->serving_wl_type);
2513
2514	if (!cfqd->rq_queued)
2515		return NULL;
2516
2517	/* There is nothing to dispatch */
2518	if (!st)
2519		return NULL;
2520	if (RB_EMPTY_ROOT(&st->rb))
2521		return NULL;
2522	return cfq_rb_first(st);
2523}
2524
2525static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2526{
2527	struct cfq_group *cfqg;
2528	struct cfq_queue *cfqq;
2529	int i, j;
2530	struct cfq_rb_root *st;
2531
2532	if (!cfqd->rq_queued)
2533		return NULL;
2534
2535	cfqg = cfq_get_next_cfqg(cfqd);
2536	if (!cfqg)
2537		return NULL;
2538
2539	for_each_cfqg_st(cfqg, i, j, st)
2540		if ((cfqq = cfq_rb_first(st)) != NULL)
2541			return cfqq;
2542	return NULL;
2543}
2544
2545/*
2546 * Get and set a new active queue for service.
2547 */
2548static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2549					      struct cfq_queue *cfqq)
2550{
2551	if (!cfqq)
2552		cfqq = cfq_get_next_queue(cfqd);
2553
2554	__cfq_set_active_queue(cfqd, cfqq);
2555	return cfqq;
2556}
2557
2558static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2559					  struct request *rq)
2560{
2561	if (blk_rq_pos(rq) >= cfqd->last_position)
2562		return blk_rq_pos(rq) - cfqd->last_position;
2563	else
2564		return cfqd->last_position - blk_rq_pos(rq);
2565}
2566
2567static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2568			       struct request *rq)
2569{
2570	return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2571}
2572
2573static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2574				    struct cfq_queue *cur_cfqq)
2575{
2576	struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2577	struct rb_node *parent, *node;
2578	struct cfq_queue *__cfqq;
2579	sector_t sector = cfqd->last_position;
2580
2581	if (RB_EMPTY_ROOT(root))
2582		return NULL;
2583
2584	/*
2585	 * First, if we find a request starting at the end of the last
2586	 * request, choose it.
2587	 */
2588	__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2589	if (__cfqq)
2590		return __cfqq;
2591
2592	/*
2593	 * If the exact sector wasn't found, the parent of the NULL leaf
2594	 * will contain the closest sector.
2595	 */
2596	__cfqq = rb_entry(parent, struct cfq_queue, p_node);
2597	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2598		return __cfqq;
2599
2600	if (blk_rq_pos(__cfqq->next_rq) < sector)
2601		node = rb_next(&__cfqq->p_node);
2602	else
2603		node = rb_prev(&__cfqq->p_node);
2604	if (!node)
2605		return NULL;
2606
2607	__cfqq = rb_entry(node, struct cfq_queue, p_node);
2608	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2609		return __cfqq;
2610
2611	return NULL;
2612}
2613
2614/*
2615 * cfqd - obvious
2616 * cur_cfqq - passed in so that we don't decide that the current queue is
2617 * 	      closely cooperating with itself.
2618 *
2619 * So, basically we're assuming that that cur_cfqq has dispatched at least
2620 * one request, and that cfqd->last_position reflects a position on the disk
2621 * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
2622 * assumption.
2623 */
2624static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2625					      struct cfq_queue *cur_cfqq)
2626{
2627	struct cfq_queue *cfqq;
2628
2629	if (cfq_class_idle(cur_cfqq))
2630		return NULL;
2631	if (!cfq_cfqq_sync(cur_cfqq))
2632		return NULL;
2633	if (CFQQ_SEEKY(cur_cfqq))
2634		return NULL;
2635
2636	/*
2637	 * Don't search priority tree if it's the only queue in the group.
2638	 */
2639	if (cur_cfqq->cfqg->nr_cfqq == 1)
2640		return NULL;
2641
2642	/*
2643	 * We should notice if some of the queues are cooperating, eg
2644	 * working closely on the same area of the disk. In that case,
2645	 * we can group them together and don't waste time idling.
2646	 */
2647	cfqq = cfqq_close(cfqd, cur_cfqq);
2648	if (!cfqq)
2649		return NULL;
2650
2651	/* If new queue belongs to different cfq_group, don't choose it */
2652	if (cur_cfqq->cfqg != cfqq->cfqg)
2653		return NULL;
2654
2655	/*
2656	 * It only makes sense to merge sync queues.
2657	 */
2658	if (!cfq_cfqq_sync(cfqq))
2659		return NULL;
2660	if (CFQQ_SEEKY(cfqq))
2661		return NULL;
2662
2663	/*
2664	 * Do not merge queues of different priority classes
2665	 */
2666	if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2667		return NULL;
2668
2669	return cfqq;
2670}
2671
2672/*
2673 * Determine whether we should enforce idle window for this queue.
2674 */
2675
2676static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2677{
2678	enum wl_class_t wl_class = cfqq_class(cfqq);
2679	struct cfq_rb_root *st = cfqq->service_tree;
2680
2681	BUG_ON(!st);
2682	BUG_ON(!st->count);
2683
2684	if (!cfqd->cfq_slice_idle)
2685		return false;
2686
2687	/* We never do for idle class queues. */
2688	if (wl_class == IDLE_WORKLOAD)
2689		return false;
2690
2691	/* We do for queues that were marked with idle window flag. */
2692	if (cfq_cfqq_idle_window(cfqq) &&
2693	   !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2694		return true;
2695
2696	/*
2697	 * Otherwise, we do only if they are the last ones
2698	 * in their service tree.
2699	 */
2700	if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2701	   !cfq_io_thinktime_big(cfqd, &st->ttime, false))
2702		return true;
2703	cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
2704	return false;
2705}
2706
2707static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2708{
2709	struct cfq_queue *cfqq = cfqd->active_queue;
2710	struct cfq_io_cq *cic;
2711	unsigned long sl, group_idle = 0;
2712
2713	/*
2714	 * SSD device without seek penalty, disable idling. But only do so
2715	 * for devices that support queuing, otherwise we still have a problem
2716	 * with sync vs async workloads.
2717	 */
2718	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2719		return;
2720
2721	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2722	WARN_ON(cfq_cfqq_slice_new(cfqq));
2723
2724	/*
2725	 * idle is disabled, either manually or by past process history
2726	 */
2727	if (!cfq_should_idle(cfqd, cfqq)) {
2728		/* no queue idling. Check for group idling */
2729		if (cfqd->cfq_group_idle)
2730			group_idle = cfqd->cfq_group_idle;
2731		else
2732			return;
2733	}
2734
2735	/*
2736	 * still active requests from this queue, don't idle
2737	 */
2738	if (cfqq->dispatched)
2739		return;
2740
2741	/*
2742	 * task has exited, don't wait
2743	 */
2744	cic = cfqd->active_cic;
2745	if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2746		return;
2747
2748	/*
2749	 * If our average think time is larger than the remaining time
2750	 * slice, then don't idle. This avoids overrunning the allotted
2751	 * time slice.
2752	 */
2753	if (sample_valid(cic->ttime.ttime_samples) &&
2754	    (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2755		cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2756			     cic->ttime.ttime_mean);
2757		return;
2758	}
2759
2760	/* There are other queues in the group, don't do group idle */
2761	if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2762		return;
2763
2764	cfq_mark_cfqq_wait_request(cfqq);
2765
2766	if (group_idle)
2767		sl = cfqd->cfq_group_idle;
2768	else
2769		sl = cfqd->cfq_slice_idle;
2770
2771	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2772	cfqg_stats_set_start_idle_time(cfqq->cfqg);
2773	cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2774			group_idle ? 1 : 0);
2775}
2776
2777/*
2778 * Move request from internal lists to the request queue dispatch list.
2779 */
2780static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2781{
2782	struct cfq_data *cfqd = q->elevator->elevator_data;
2783	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2784
2785	cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2786
2787	cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2788	cfq_remove_request(rq);
2789	cfqq->dispatched++;
2790	(RQ_CFQG(rq))->dispatched++;
2791	elv_dispatch_sort(q, rq);
2792
2793	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2794	cfqq->nr_sectors += blk_rq_sectors(rq);
2795	cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
2796}
2797
2798/*
2799 * return expired entry, or NULL to just start from scratch in rbtree
2800 */
2801static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2802{
2803	struct request *rq = NULL;
2804
2805	if (cfq_cfqq_fifo_expire(cfqq))
2806		return NULL;
2807
2808	cfq_mark_cfqq_fifo_expire(cfqq);
2809
2810	if (list_empty(&cfqq->fifo))
2811		return NULL;
2812
2813	rq = rq_entry_fifo(cfqq->fifo.next);
2814	if (time_before(jiffies, rq->fifo_time))
2815		rq = NULL;
2816
2817	cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2818	return rq;
2819}
2820
2821static inline int
2822cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2823{
2824	const int base_rq = cfqd->cfq_slice_async_rq;
2825
2826	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2827
2828	return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2829}
2830
2831/*
2832 * Must be called with the queue_lock held.
2833 */
2834static int cfqq_process_refs(struct cfq_queue *cfqq)
2835{
2836	int process_refs, io_refs;
2837
2838	io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2839	process_refs = cfqq->ref - io_refs;
2840	BUG_ON(process_refs < 0);
2841	return process_refs;
2842}
2843
2844static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2845{
2846	int process_refs, new_process_refs;
2847	struct cfq_queue *__cfqq;
2848
2849	/*
2850	 * If there are no process references on the new_cfqq, then it is
2851	 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2852	 * chain may have dropped their last reference (not just their
2853	 * last process reference).
2854	 */
2855	if (!cfqq_process_refs(new_cfqq))
2856		return;
2857
2858	/* Avoid a circular list and skip interim queue merges */
2859	while ((__cfqq = new_cfqq->new_cfqq)) {
2860		if (__cfqq == cfqq)
2861			return;
2862		new_cfqq = __cfqq;
2863	}
2864
2865	process_refs = cfqq_process_refs(cfqq);
2866	new_process_refs = cfqq_process_refs(new_cfqq);
2867	/*
2868	 * If the process for the cfqq has gone away, there is no
2869	 * sense in merging the queues.
2870	 */
2871	if (process_refs == 0 || new_process_refs == 0)
2872		return;
2873
2874	/*
2875	 * Merge in the direction of the lesser amount of work.
2876	 */
2877	if (new_process_refs >= process_refs) {
2878		cfqq->new_cfqq = new_cfqq;
2879		new_cfqq->ref += process_refs;
2880	} else {
2881		new_cfqq->new_cfqq = cfqq;
2882		cfqq->ref += new_process_refs;
2883	}
2884}
2885
2886static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
2887			struct cfq_group *cfqg, enum wl_class_t wl_class)
2888{
2889	struct cfq_queue *queue;
2890	int i;
2891	bool key_valid = false;
2892	unsigned long lowest_key = 0;
2893	enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2894
2895	for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2896		/* select the one with lowest rb_key */
2897		queue = cfq_rb_first(st_for(cfqg, wl_class, i));
2898		if (queue &&
2899		    (!key_valid || time_before(queue->rb_key, lowest_key))) {
2900			lowest_key = queue->rb_key;
2901			cur_best = i;
2902			key_valid = true;
2903		}
2904	}
2905
2906	return cur_best;
2907}
2908
2909static void
2910choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
2911{
2912	unsigned slice;
2913	unsigned count;
2914	struct cfq_rb_root *st;
2915	unsigned group_slice;
2916	enum wl_class_t original_class = cfqd->serving_wl_class;
2917
2918	/* Choose next priority. RT > BE > IDLE */
2919	if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2920		cfqd->serving_wl_class = RT_WORKLOAD;
2921	else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2922		cfqd->serving_wl_class = BE_WORKLOAD;
2923	else {
2924		cfqd->serving_wl_class = IDLE_WORKLOAD;
2925		cfqd->workload_expires = jiffies + 1;
2926		return;
2927	}
2928
2929	if (original_class != cfqd->serving_wl_class)
2930		goto new_workload;
2931
2932	/*
2933	 * For RT and BE, we have to choose also the type
2934	 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2935	 * expiration time
2936	 */
2937	st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2938	count = st->count;
2939
2940	/*
2941	 * check workload expiration, and that we still have other queues ready
2942	 */
2943	if (count && !time_after(jiffies, cfqd->workload_expires))
2944		return;
2945
2946new_workload:
2947	/* otherwise select new workload type */
2948	cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
2949					cfqd->serving_wl_class);
2950	st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2951	count = st->count;
2952
2953	/*
2954	 * the workload slice is computed as a fraction of target latency
2955	 * proportional to the number of queues in that workload, over
2956	 * all the queues in the same priority class
2957	 */
2958	group_slice = cfq_group_slice(cfqd, cfqg);
2959
2960	slice = group_slice * count /
2961		max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
2962		      cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
2963					cfqg));
2964
2965	if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
2966		unsigned int tmp;
2967
2968		/*
2969		 * Async queues are currently system wide. Just taking
2970		 * proportion of queues with-in same group will lead to higher
2971		 * async ratio system wide as generally root group is going
2972		 * to have higher weight. A more accurate thing would be to
2973		 * calculate system wide asnc/sync ratio.
2974		 */
2975		tmp = cfqd->cfq_target_latency *
2976			cfqg_busy_async_queues(cfqd, cfqg);
2977		tmp = tmp/cfqd->busy_queues;
2978		slice = min_t(unsigned, slice, tmp);
2979
2980		/* async workload slice is scaled down according to
2981		 * the sync/async slice ratio. */
2982		slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2983	} else
2984		/* sync workload slice is at least 2 * cfq_slice_idle */
2985		slice = max(slice, 2 * cfqd->cfq_slice_idle);
2986
2987	slice = max_t(unsigned, slice, CFQ_MIN_TT);
2988	cfq_log(cfqd, "workload slice:%d", slice);
2989	cfqd->workload_expires = jiffies + slice;
2990}
2991
2992static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2993{
2994	struct cfq_rb_root *st = &cfqd->grp_service_tree;
2995	struct cfq_group *cfqg;
2996
2997	if (RB_EMPTY_ROOT(&st->rb))
2998		return NULL;
2999	cfqg = cfq_rb_first_group(st);
3000	update_min_vdisktime(st);
3001	return cfqg;
3002}
3003
3004static void cfq_choose_cfqg(struct cfq_data *cfqd)
3005{
3006	struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3007
3008	cfqd->serving_group = cfqg;
3009
3010	/* Restore the workload type data */
3011	if (cfqg->saved_wl_slice) {
3012		cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
3013		cfqd->serving_wl_type = cfqg->saved_wl_type;
3014		cfqd->serving_wl_class = cfqg->saved_wl_class;
3015	} else
3016		cfqd->workload_expires = jiffies - 1;
3017
3018	choose_wl_class_and_type(cfqd, cfqg);
3019}
3020
3021/*
3022 * Select a queue for service. If we have a current active queue,
3023 * check whether to continue servicing it, or retrieve and set a new one.
3024 */
3025static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
3026{
3027	struct cfq_queue *cfqq, *new_cfqq = NULL;
3028
3029	cfqq = cfqd->active_queue;
3030	if (!cfqq)
3031		goto new_queue;
3032
3033	if (!cfqd->rq_queued)
3034		return NULL;
3035
3036	/*
3037	 * We were waiting for group to get backlogged. Expire the queue
3038	 */
3039	if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3040		goto expire;
3041
3042	/*
3043	 * The active queue has run out of time, expire it and select new.
3044	 */
3045	if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3046		/*
3047		 * If slice had not expired at the completion of last request
3048		 * we might not have turned on wait_busy flag. Don't expire
3049		 * the queue yet. Allow the group to get backlogged.
3050		 *
3051		 * The very fact that we have used the slice, that means we
3052		 * have been idling all along on this queue and it should be
3053		 * ok to wait for this request to complete.
3054		 */
3055		if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3056		    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3057			cfqq = NULL;
3058			goto keep_queue;
3059		} else
3060			goto check_group_idle;
3061	}
3062
3063	/*
3064	 * The active queue has requests and isn't expired, allow it to
3065	 * dispatch.
3066	 */
3067	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3068		goto keep_queue;
3069
3070	/*
3071	 * If another queue has a request waiting within our mean seek
3072	 * distance, let it run.  The expire code will check for close
3073	 * cooperators and put the close queue at the front of the service
3074	 * tree.  If possible, merge the expiring queue with the new cfqq.
3075	 */
3076	new_cfqq = cfq_close_cooperator(cfqd, cfqq);
3077	if (new_cfqq) {
3078		if (!cfqq->new_cfqq)
3079			cfq_setup_merge(cfqq, new_cfqq);
3080		goto expire;
3081	}
3082
3083	/*
3084	 * No requests pending. If the active queue still has requests in
3085	 * flight or is idling for a new request, allow either of these
3086	 * conditions to happen (or time out) before selecting a new queue.
3087	 */
3088	if (timer_pending(&cfqd->idle_slice_timer)) {
3089		cfqq = NULL;
3090		goto keep_queue;
3091	}
3092
3093	/*
3094	 * This is a deep seek queue, but the device is much faster than
3095	 * the queue can deliver, don't idle
3096	 **/
3097	if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3098	    (cfq_cfqq_slice_new(cfqq) ||
3099	    (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
3100		cfq_clear_cfqq_deep(cfqq);
3101		cfq_clear_cfqq_idle_window(cfqq);
3102	}
3103
3104	if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3105		cfqq = NULL;
3106		goto keep_queue;
3107	}
3108
3109	/*
3110	 * If group idle is enabled and there are requests dispatched from
3111	 * this group, wait for requests to complete.
3112	 */
3113check_group_idle:
3114	if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3115	    cfqq->cfqg->dispatched &&
3116	    !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
3117		cfqq = NULL;
3118		goto keep_queue;
3119	}
3120
3121expire:
3122	cfq_slice_expired(cfqd, 0);
3123new_queue:
3124	/*
3125	 * Current queue expired. Check if we have to switch to a new
3126	 * service tree
3127	 */
3128	if (!new_cfqq)
3129		cfq_choose_cfqg(cfqd);
3130
3131	cfqq = cfq_set_active_queue(cfqd, new_cfqq);
3132keep_queue:
3133	return cfqq;
3134}
3135
3136static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
3137{
3138	int dispatched = 0;
3139
3140	while (cfqq->next_rq) {
3141		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3142		dispatched++;
3143	}
3144
3145	BUG_ON(!list_empty(&cfqq->fifo));
3146
3147	/* By default cfqq is not expired if it is empty. Do it explicitly */
3148	__cfq_slice_expired(cfqq->cfqd, cfqq, 0);
3149	return dispatched;
3150}
3151
3152/*
3153 * Drain our current requests. Used for barriers and when switching
3154 * io schedulers on-the-fly.
3155 */
3156static int cfq_forced_dispatch(struct cfq_data *cfqd)
3157{
3158	struct cfq_queue *cfqq;
3159	int dispatched = 0;
3160
3161	/* Expire the timeslice of the current active queue first */
3162	cfq_slice_expired(cfqd, 0);
3163	while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3164		__cfq_set_active_queue(cfqd, cfqq);
3165		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3166	}
3167
3168	BUG_ON(cfqd->busy_queues);
3169
3170	cfq_log(cfqd, "forced_dispatch=%d", dispatched);
3171	return dispatched;
3172}
3173
3174static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3175	struct cfq_queue *cfqq)
3176{
3177	/* the queue hasn't finished any request, can't estimate */
3178	if (cfq_cfqq_slice_new(cfqq))
3179		return true;
3180	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3181		cfqq->slice_end))
3182		return true;
3183
3184	return false;
3185}
3186
3187static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3188{
3189	unsigned int max_dispatch;
3190
3191	/*
3192	 * Drain async requests before we start sync IO
3193	 */
3194	if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
3195		return false;
3196
3197	/*
3198	 * If this is an async queue and we have sync IO in flight, let it wait
3199	 */
3200	if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
3201		return false;
3202
3203	max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
3204	if (cfq_class_idle(cfqq))
3205		max_dispatch = 1;
3206
3207	/*
3208	 * Does this cfqq already have too much IO in flight?
3209	 */
3210	if (cfqq->dispatched >= max_dispatch) {
3211		bool promote_sync = false;
3212		/*
3213		 * idle queue must always only have a single IO in flight
3214		 */
3215		if (cfq_class_idle(cfqq))
3216			return false;
3217
3218		/*
3219		 * If there is only one sync queue
3220		 * we can ignore async queue here and give the sync
3221		 * queue no dispatch limit. The reason is a sync queue can
3222		 * preempt async queue, limiting the sync queue doesn't make
3223		 * sense. This is useful for aiostress test.
3224		 */
3225		if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3226			promote_sync = true;
3227
3228		/*
3229		 * We have other queues, don't allow more IO from this one
3230		 */
3231		if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3232				!promote_sync)
3233			return false;
3234
3235		/*
3236		 * Sole queue user, no limit
3237		 */
3238		if (cfqd->busy_queues == 1 || promote_sync)
3239			max_dispatch = -1;
3240		else
3241			/*
3242			 * Normally we start throttling cfqq when cfq_quantum/2
3243			 * requests have been dispatched. But we can drive
3244			 * deeper queue depths at the beginning of slice
3245			 * subjected to upper limit of cfq_quantum.
3246			 * */
3247			max_dispatch = cfqd->cfq_quantum;
3248	}
3249
3250	/*
3251	 * Async queues must wait a bit before being allowed dispatch.
3252	 * We also ramp up the dispatch depth gradually for async IO,
3253	 * based on the last sync IO we serviced
3254	 */
3255	if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
3256		unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
3257		unsigned int depth;
3258
3259		depth = last_sync / cfqd->cfq_slice[1];
3260		if (!depth && !cfqq->dispatched)
3261			depth = 1;
3262		if (depth < max_dispatch)
3263			max_dispatch = depth;
3264	}
3265
3266	/*
3267	 * If we're below the current max, allow a dispatch
3268	 */
3269	return cfqq->dispatched < max_dispatch;
3270}
3271
3272/*
3273 * Dispatch a request from cfqq, moving them to the request queue
3274 * dispatch list.
3275 */
3276static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3277{
3278	struct request *rq;
3279
3280	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3281
3282	if (!cfq_may_dispatch(cfqd, cfqq))
3283		return false;
3284
3285	/*
3286	 * follow expired path, else get first next available
3287	 */
3288	rq = cfq_check_fifo(cfqq);
3289	if (!rq)
3290		rq = cfqq->next_rq;
3291
3292	/*
3293	 * insert request into driver dispatch list
3294	 */
3295	cfq_dispatch_insert(cfqd->queue, rq);
3296
3297	if (!cfqd->active_cic) {
3298		struct cfq_io_cq *cic = RQ_CIC(rq);
3299
3300		atomic_long_inc(&cic->icq.ioc->refcount);
3301		cfqd->active_cic = cic;
3302	}
3303
3304	return true;
3305}
3306
3307/*
3308 * Find the cfqq that we need to service and move a request from that to the
3309 * dispatch list
3310 */
3311static int cfq_dispatch_requests(struct request_queue *q, int force)
3312{
3313	struct cfq_data *cfqd = q->elevator->elevator_data;
3314	struct cfq_queue *cfqq;
3315
3316	if (!cfqd->busy_queues)
3317		return 0;
3318
3319	if (unlikely(force))
3320		return cfq_forced_dispatch(cfqd);
3321
3322	cfqq = cfq_select_queue(cfqd);
3323	if (!cfqq)
3324		return 0;
3325
3326	/*
3327	 * Dispatch a request from this cfqq, if it is allowed
3328	 */
3329	if (!cfq_dispatch_request(cfqd, cfqq))
3330		return 0;
3331
3332	cfqq->slice_dispatch++;
3333	cfq_clear_cfqq_must_dispatch(cfqq);
3334
3335	/*
3336	 * expire an async queue immediately if it has used up its slice. idle
3337	 * queue always expire after 1 dispatch round.
3338	 */
3339	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3340	    cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3341	    cfq_class_idle(cfqq))) {
3342		cfqq->slice_end = jiffies + 1;
3343		cfq_slice_expired(cfqd, 0);
3344	}
3345
3346	cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
3347	return 1;
3348}
3349
3350/*
3351 * task holds one reference to the queue, dropped when task exits. each rq
3352 * in-flight on this queue also holds a reference, dropped when rq is freed.
3353 *
3354 * Each cfq queue took a reference on the parent group. Drop it now.
3355 * queue lock must be held here.
3356 */
3357static void cfq_put_queue(struct cfq_queue *cfqq)
3358{
3359	struct cfq_data *cfqd = cfqq->cfqd;
3360	struct cfq_group *cfqg;
3361
3362	BUG_ON(cfqq->ref <= 0);
3363
3364	cfqq->ref--;
3365	if (cfqq->ref)
3366		return;
3367
3368	cfq_log_cfqq(cfqd, cfqq, "put_queue");
3369	BUG_ON(rb_first(&cfqq->sort_list));
3370	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3371	cfqg = cfqq->cfqg;
3372
3373	if (unlikely(cfqd->active_queue == cfqq)) {
3374		__cfq_slice_expired(cfqd, cfqq, 0);
3375		cfq_schedule_dispatch(cfqd);
3376	}
3377
3378	BUG_ON(cfq_cfqq_on_rr(cfqq));
3379	kmem_cache_free(cfq_pool, cfqq);
3380	cfqg_put(cfqg);
3381}
3382
3383static void cfq_put_cooperator(struct cfq_queue *cfqq)
3384{
3385	struct cfq_queue *__cfqq, *next;
3386
3387	/*
3388	 * If this queue was scheduled to merge with another queue, be
3389	 * sure to drop the reference taken on that queue (and others in
3390	 * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
3391	 */
3392	__cfqq = cfqq->new_cfqq;
3393	while (__cfqq) {
3394		if (__cfqq == cfqq) {
3395			WARN(1, "cfqq->new_cfqq loop detected\n");
3396			break;
3397		}
3398		next = __cfqq->new_cfqq;
3399		cfq_put_queue(__cfqq);
3400		__cfqq = next;
3401	}
3402}
3403
3404static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3405{
3406	if (unlikely(cfqq == cfqd->active_queue)) {
3407		__cfq_slice_expired(cfqd, cfqq, 0);
3408		cfq_schedule_dispatch(cfqd);
3409	}
3410
3411	cfq_put_cooperator(cfqq);
3412
3413	cfq_put_queue(cfqq);
3414}
3415
3416static void cfq_init_icq(struct io_cq *icq)
3417{
3418	struct cfq_io_cq *cic = icq_to_cic(icq);
3419
3420	cic->ttime.last_end_request = jiffies;
3421}
3422
3423static void cfq_exit_icq(struct io_cq *icq)
3424{
3425	struct cfq_io_cq *cic = icq_to_cic(icq);
3426	struct cfq_data *cfqd = cic_to_cfqd(cic);
3427
3428	if (cic->cfqq[BLK_RW_ASYNC]) {
3429		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
3430		cic->cfqq[BLK_RW_ASYNC] = NULL;
3431	}
3432
3433	if (cic->cfqq[BLK_RW_SYNC]) {
3434		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
3435		cic->cfqq[BLK_RW_SYNC] = NULL;
3436	}
3437}
3438
3439static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3440{
3441	struct task_struct *tsk = current;
3442	int ioprio_class;
3443
3444	if (!cfq_cfqq_prio_changed(cfqq))
3445		return;
3446
3447	ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3448	switch (ioprio_class) {
3449	default:
3450		printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3451	case IOPRIO_CLASS_NONE:
3452		/*
3453		 * no prio set, inherit CPU scheduling settings
3454		 */
3455		cfqq->ioprio = task_nice_ioprio(tsk);
3456		cfqq->ioprio_class = task_nice_ioclass(tsk);
3457		break;
3458	case IOPRIO_CLASS_RT:
3459		cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3460		cfqq->ioprio_class = IOPRIO_CLASS_RT;
3461		break;
3462	case IOPRIO_CLASS_BE:
3463		cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3464		cfqq->ioprio_class = IOPRIO_CLASS_BE;
3465		break;
3466	case IOPRIO_CLASS_IDLE:
3467		cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3468		cfqq->ioprio = 7;
3469		cfq_clear_cfqq_idle_window(cfqq);
3470		break;
3471	}
3472
3473	/*
3474	 * keep track of original prio settings in case we have to temporarily
3475	 * elevate the priority of this queue
3476	 */
3477	cfqq->org_ioprio = cfqq->ioprio;
3478	cfq_clear_cfqq_prio_changed(cfqq);
3479}
3480
3481static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3482{
3483	int ioprio = cic->icq.ioc->ioprio;
3484	struct cfq_data *cfqd = cic_to_cfqd(cic);
3485	struct cfq_queue *cfqq;
3486
3487	/*
3488	 * Check whether ioprio has changed.  The condition may trigger
3489	 * spuriously on a newly created cic but there's no harm.
3490	 */
3491	if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3492		return;
3493
3494	cfqq = cic->cfqq[BLK_RW_ASYNC];
3495	if (cfqq) {
3496		struct cfq_queue *new_cfqq;
3497		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
3498					 GFP_ATOMIC);
3499		if (new_cfqq) {
3500			cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
3501			cfq_put_queue(cfqq);
3502		}
3503	}
3504
3505	cfqq = cic->cfqq[BLK_RW_SYNC];
3506	if (cfqq)
3507		cfq_mark_cfqq_prio_changed(cfqq);
3508
3509	cic->ioprio = ioprio;
3510}
3511
3512static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3513			  pid_t pid, bool is_sync)
3514{
3515	RB_CLEAR_NODE(&cfqq->rb_node);
3516	RB_CLEAR_NODE(&cfqq->p_node);
3517	INIT_LIST_HEAD(&cfqq->fifo);
3518
3519	cfqq->ref = 0;
3520	cfqq->cfqd = cfqd;
3521
3522	cfq_mark_cfqq_prio_changed(cfqq);
3523
3524	if (is_sync) {
3525		if (!cfq_class_idle(cfqq))
3526			cfq_mark_cfqq_idle_window(cfqq);
3527		cfq_mark_cfqq_sync(cfqq);
3528	}
3529	cfqq->pid = pid;
3530}
3531
3532#ifdef CONFIG_CFQ_GROUP_IOSCHED
3533static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3534{
3535	struct cfq_data *cfqd = cic_to_cfqd(cic);
3536	struct cfq_queue *sync_cfqq;
3537	uint64_t id;
3538
3539	rcu_read_lock();
3540	id = bio_blkcg(bio)->id;
3541	rcu_read_unlock();
3542
3543	/*
3544	 * Check whether blkcg has changed.  The condition may trigger
3545	 * spuriously on a newly created cic but there's no harm.
3546	 */
3547	if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
3548		return;
3549
3550	sync_cfqq = cic_to_cfqq(cic, 1);
3551	if (sync_cfqq) {
3552		/*
3553		 * Drop reference to sync queue. A new sync queue will be
3554		 * assigned in new group upon arrival of a fresh request.
3555		 */
3556		cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
3557		cic_set_cfqq(cic, NULL, 1);
3558		cfq_put_queue(sync_cfqq);
3559	}
3560
3561	cic->blkcg_id = id;
3562}
3563#else
3564static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3565#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
3566
3567static struct cfq_queue *
3568cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3569		     struct bio *bio, gfp_t gfp_mask)
3570{
3571	struct blkcg *blkcg;
3572	struct cfq_queue *cfqq, *new_cfqq = NULL;
3573	struct cfq_group *cfqg;
3574
3575retry:
3576	rcu_read_lock();
3577
3578	blkcg = bio_blkcg(bio);
3579	cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
3580	cfqq = cic_to_cfqq(cic, is_sync);
3581
3582	/*
3583	 * Always try a new alloc if we fell back to the OOM cfqq
3584	 * originally, since it should just be a temporary situation.
3585	 */
3586	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3587		cfqq = NULL;
3588		if (new_cfqq) {
3589			cfqq = new_cfqq;
3590			new_cfqq = NULL;
3591		} else if (gfp_mask & __GFP_WAIT) {
3592			rcu_read_unlock();
3593			spin_unlock_irq(cfqd->queue->queue_lock);
3594			new_cfqq = kmem_cache_alloc_node(cfq_pool,
3595					gfp_mask | __GFP_ZERO,
3596					cfqd->queue->node);
3597			spin_lock_irq(cfqd->queue->queue_lock);
3598			if (new_cfqq)
3599				goto retry;
3600			else
3601				return &cfqd->oom_cfqq;
3602		} else {
3603			cfqq = kmem_cache_alloc_node(cfq_pool,
3604					gfp_mask | __GFP_ZERO,
3605					cfqd->queue->node);
3606		}
3607
3608		if (cfqq) {
3609			cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3610			cfq_init_prio_data(cfqq, cic);
3611			cfq_link_cfqq_cfqg(cfqq, cfqg);
3612			cfq_log_cfqq(cfqd, cfqq, "alloced");
3613		} else
3614			cfqq = &cfqd->oom_cfqq;
3615	}
3616
3617	if (new_cfqq)
3618		kmem_cache_free(cfq_pool, new_cfqq);
3619
3620	rcu_read_unlock();
3621	return cfqq;
3622}
3623
3624static struct cfq_queue **
3625cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3626{
3627	switch (ioprio_class) {
3628	case IOPRIO_CLASS_RT:
3629		return &cfqd->async_cfqq[0][ioprio];
3630	case IOPRIO_CLASS_NONE:
3631		ioprio = IOPRIO_NORM;
3632		/* fall through */
3633	case IOPRIO_CLASS_BE:
3634		return &cfqd->async_cfqq[1][ioprio];
3635	case IOPRIO_CLASS_IDLE:
3636		return &cfqd->async_idle_cfqq;
3637	default:
3638		BUG();
3639	}
3640}
3641
3642static struct cfq_queue *
3643cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3644	      struct bio *bio, gfp_t gfp_mask)
3645{
3646	const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3647	const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3648	struct cfq_queue **async_cfqq = NULL;
3649	struct cfq_queue *cfqq = NULL;
3650
3651	if (!is_sync) {
3652		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3653		cfqq = *async_cfqq;
3654	}
3655
3656	if (!cfqq)
3657		cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
3658
3659	/*
3660	 * pin the queue now that it's allocated, scheduler exit will prune it
3661	 */
3662	if (!is_sync && !(*async_cfqq)) {
3663		cfqq->ref++;
3664		*async_cfqq = cfqq;
3665	}
3666
3667	cfqq->ref++;
3668	return cfqq;
3669}
3670
3671static void
3672__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3673{
3674	unsigned long elapsed = jiffies - ttime->last_end_request;
3675	elapsed = min(elapsed, 2UL * slice_idle);
3676
3677	ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3678	ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3679	ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3680}
3681
3682static void
3683cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3684			struct cfq_io_cq *cic)
3685{
3686	if (cfq_cfqq_sync(cfqq)) {
3687		__cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3688		__cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3689			cfqd->cfq_slice_idle);
3690	}
3691#ifdef CONFIG_CFQ_GROUP_IOSCHED
3692	__cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3693#endif
3694}
3695
3696static void
3697cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3698		       struct request *rq)
3699{
3700	sector_t sdist = 0;
3701	sector_t n_sec = blk_rq_sectors(rq);
3702	if (cfqq->last_request_pos) {
3703		if (cfqq->last_request_pos < blk_rq_pos(rq))
3704			sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3705		else
3706			sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3707	}
3708
3709	cfqq->seek_history <<= 1;
3710	if (blk_queue_nonrot(cfqd->queue))
3711		cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3712	else
3713		cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3714}
3715
3716/*
3717 * Disable idle window if the process thinks too long or seeks so much that
3718 * it doesn't matter
3719 */
3720static void
3721cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3722		       struct cfq_io_cq *cic)
3723{
3724	int old_idle, enable_idle;
3725
3726	/*
3727	 * Don't idle for async or idle io prio class
3728	 */
3729	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3730		return;
3731
3732	enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3733
3734	if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3735		cfq_mark_cfqq_deep(cfqq);
3736
3737	if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3738		enable_idle = 0;
3739	else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3740		 !cfqd->cfq_slice_idle ||
3741		 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3742		enable_idle = 0;
3743	else if (sample_valid(cic->ttime.ttime_samples)) {
3744		if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3745			enable_idle = 0;
3746		else
3747			enable_idle = 1;
3748	}
3749
3750	if (old_idle != enable_idle) {
3751		cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3752		if (enable_idle)
3753			cfq_mark_cfqq_idle_window(cfqq);
3754		else
3755			cfq_clear_cfqq_idle_window(cfqq);
3756	}
3757}
3758
3759/*
3760 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3761 * no or if we aren't sure, a 1 will cause a preempt.
3762 */
3763static bool
3764cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3765		   struct request *rq)
3766{
3767	struct cfq_queue *cfqq;
3768
3769	cfqq = cfqd->active_queue;
3770	if (!cfqq)
3771		return false;
3772
3773	if (cfq_class_idle(new_cfqq))
3774		return false;
3775
3776	if (cfq_class_idle(cfqq))
3777		return true;
3778
3779	/*
3780	 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3781	 */
3782	if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3783		return false;
3784
3785	/*
3786	 * if the new request is sync, but the currently running queue is
3787	 * not, let the sync request have priority.
3788	 */
3789	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3790		return true;
3791
3792	if (new_cfqq->cfqg != cfqq->cfqg)
3793		return false;
3794
3795	if (cfq_slice_used(cfqq))
3796		return true;
3797
3798	/* Allow preemption only if we are idling on sync-noidle tree */
3799	if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
3800	    cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3801	    new_cfqq->service_tree->count == 2 &&
3802	    RB_EMPTY_ROOT(&cfqq->sort_list))
3803		return true;
3804
3805	/*
3806	 * So both queues are sync. Let the new request get disk time if
3807	 * it's a metadata request and the current queue is doing regular IO.
3808	 */
3809	if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3810		return true;
3811
3812	/*
3813	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3814	 */
3815	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3816		return true;
3817
3818	/* An idle queue should not be idle now for some reason */
3819	if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3820		return true;
3821
3822	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3823		return false;
3824
3825	/*
3826	 * if this request is as-good as one we would expect from the
3827	 * current cfqq, let it preempt
3828	 */
3829	if (cfq_rq_close(cfqd, cfqq, rq))
3830		return true;
3831
3832	return false;
3833}
3834
3835/*
3836 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3837 * let it have half of its nominal slice.
3838 */
3839static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3840{
3841	enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3842
3843	cfq_log_cfqq(cfqd, cfqq, "preempt");
3844	cfq_slice_expired(cfqd, 1);
3845
3846	/*
3847	 * workload type is changed, don't save slice, otherwise preempt
3848	 * doesn't happen
3849	 */
3850	if (old_type != cfqq_type(cfqq))
3851		cfqq->cfqg->saved_wl_slice = 0;
3852
3853	/*
3854	 * Put the new queue at the front of the of the current list,
3855	 * so we know that it will be selected next.
3856	 */
3857	BUG_ON(!cfq_cfqq_on_rr(cfqq));
3858
3859	cfq_service_tree_add(cfqd, cfqq, 1);
3860
3861	cfqq->slice_end = 0;
3862	cfq_mark_cfqq_slice_new(cfqq);
3863}
3864
3865/*
3866 * Called when a new fs request (rq) is added (to cfqq). Check if there's
3867 * something we should do about it
3868 */
3869static void
3870cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3871		struct request *rq)
3872{
3873	struct cfq_io_cq *cic = RQ_CIC(rq);
3874
3875	cfqd->rq_queued++;
3876	if (rq->cmd_flags & REQ_PRIO)
3877		cfqq->prio_pending++;
3878
3879	cfq_update_io_thinktime(cfqd, cfqq, cic);
3880	cfq_update_io_seektime(cfqd, cfqq, rq);
3881	cfq_update_idle_window(cfqd, cfqq, cic);
3882
3883	cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3884
3885	if (cfqq == cfqd->active_queue) {
3886		/*
3887		 * Remember that we saw a request from this process, but
3888		 * don't start queuing just yet. Otherwise we risk seeing lots
3889		 * of tiny requests, because we disrupt the normal plugging
3890		 * and merging. If the request is already larger than a single
3891		 * page, let it rip immediately. For that case we assume that
3892		 * merging is already done. Ditto for a busy system that
3893		 * has other work pending, don't risk delaying until the
3894		 * idle timer unplug to continue working.
3895		 */
3896		if (cfq_cfqq_wait_request(cfqq)) {
3897			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3898			    cfqd->busy_queues > 1) {
3899				cfq_del_timer(cfqd, cfqq);
3900				cfq_clear_cfqq_wait_request(cfqq);
3901				__blk_run_queue(cfqd->queue);
3902			} else {
3903				cfqg_stats_update_idle_time(cfqq->cfqg);
3904				cfq_mark_cfqq_must_dispatch(cfqq);
3905			}
3906		}
3907	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3908		/*
3909		 * not the active queue - expire current slice if it is
3910		 * idle and has expired it's mean thinktime or this new queue
3911		 * has some old slice time left and is of higher priority or
3912		 * this new queue is RT and the current one is BE
3913		 */
3914		cfq_preempt_queue(cfqd, cfqq);
3915		__blk_run_queue(cfqd->queue);
3916	}
3917}
3918
3919static void cfq_insert_request(struct request_queue *q, struct request *rq)
3920{
3921	struct cfq_data *cfqd = q->elevator->elevator_data;
3922	struct cfq_queue *cfqq = RQ_CFQQ(rq);
3923
3924	cfq_log_cfqq(cfqd, cfqq, "insert_request");
3925	cfq_init_prio_data(cfqq, RQ_CIC(rq));
3926
3927	rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
3928	list_add_tail(&rq->queuelist, &cfqq->fifo);
3929	cfq_add_rq_rb(rq);
3930	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
3931				 rq->cmd_flags);
3932	cfq_rq_enqueued(cfqd, cfqq, rq);
3933}
3934
3935/*
3936 * Update hw_tag based on peak queue depth over 50 samples under
3937 * sufficient load.
3938 */
3939static void cfq_update_hw_tag(struct cfq_data *cfqd)
3940{
3941	struct cfq_queue *cfqq = cfqd->active_queue;
3942
3943	if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3944		cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3945
3946	if (cfqd->hw_tag == 1)
3947		return;
3948
3949	if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3950	    cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3951		return;
3952
3953	/*
3954	 * If active queue hasn't enough requests and can idle, cfq might not
3955	 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3956	 * case
3957	 */
3958	if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3959	    cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3960	    CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3961		return;
3962
3963	if (cfqd->hw_tag_samples++ < 50)
3964		return;
3965
3966	if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3967		cfqd->hw_tag = 1;
3968	else
3969		cfqd->hw_tag = 0;
3970}
3971
3972static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3973{
3974	struct cfq_io_cq *cic = cfqd->active_cic;
3975
3976	/* If the queue already has requests, don't wait */
3977	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3978		return false;
3979
3980	/* If there are other queues in the group, don't wait */
3981	if (cfqq->cfqg->nr_cfqq > 1)
3982		return false;
3983
3984	/* the only queue in the group, but think time is big */
3985	if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3986		return false;
3987
3988	if (cfq_slice_used(cfqq))
3989		return true;
3990
3991	/* if slice left is less than think time, wait busy */
3992	if (cic && sample_valid(cic->ttime.ttime_samples)
3993	    && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3994		return true;
3995
3996	/*
3997	 * If think times is less than a jiffy than ttime_mean=0 and above
3998	 * will not be true. It might happen that slice has not expired yet
3999	 * but will expire soon (4-5 ns) during select_queue(). To cover the
4000	 * case where think time is less than a jiffy, mark the queue wait
4001	 * busy if only 1 jiffy is left in the slice.
4002	 */
4003	if (cfqq->slice_end - jiffies == 1)
4004		return true;
4005
4006	return false;
4007}
4008
4009static void cfq_completed_request(struct request_queue *q, struct request *rq)
4010{
4011	struct cfq_queue *cfqq = RQ_CFQQ(rq);
4012	struct cfq_data *cfqd = cfqq->cfqd;
4013	const int sync = rq_is_sync(rq);
4014	unsigned long now;
4015
4016	now = jiffies;
4017	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4018		     !!(rq->cmd_flags & REQ_NOIDLE));
4019
4020	cfq_update_hw_tag(cfqd);
4021
4022	WARN_ON(!cfqd->rq_in_driver);
4023	WARN_ON(!cfqq->dispatched);
4024	cfqd->rq_in_driver--;
4025	cfqq->dispatched--;
4026	(RQ_CFQG(rq))->dispatched--;
4027	cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4028				     rq_io_start_time_ns(rq), rq->cmd_flags);
4029
4030	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
4031
4032	if (sync) {
4033		struct cfq_rb_root *st;
4034
4035		RQ_CIC(rq)->ttime.last_end_request = now;
4036
4037		if (cfq_cfqq_on_rr(cfqq))
4038			st = cfqq->service_tree;
4039		else
4040			st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4041					cfqq_type(cfqq));
4042
4043		st->ttime.last_end_request = now;
4044		if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
4045			cfqd->last_delayed_sync = now;
4046	}
4047
4048#ifdef CONFIG_CFQ_GROUP_IOSCHED
4049	cfqq->cfqg->ttime.last_end_request = now;
4050#endif
4051
4052	/*
4053	 * If this is the active queue, check if it needs to be expired,
4054	 * or if we want to idle in case it has no pending requests.
4055	 */
4056	if (cfqd->active_queue == cfqq) {
4057		const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4058
4059		if (cfq_cfqq_slice_new(cfqq)) {
4060			cfq_set_prio_slice(cfqd, cfqq);
4061			cfq_clear_cfqq_slice_new(cfqq);
4062		}
4063
4064		/*
4065		 * Should we wait for next request to come in before we expire
4066		 * the queue.
4067		 */
4068		if (cfq_should_wait_busy(cfqd, cfqq)) {
4069			unsigned long extend_sl = cfqd->cfq_slice_idle;
4070			if (!cfqd->cfq_slice_idle)
4071				extend_sl = cfqd->cfq_group_idle;
4072			cfqq->slice_end = jiffies + extend_sl;
4073			cfq_mark_cfqq_wait_busy(cfqq);
4074			cfq_log_cfqq(cfqd, cfqq, "will busy wait");
4075		}
4076
4077		/*
4078		 * Idling is not enabled on:
4079		 * - expired queues
4080		 * - idle-priority queues
4081		 * - async queues
4082		 * - queues with still some requests queued
4083		 * - when there is a close cooperator
4084		 */
4085		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
4086			cfq_slice_expired(cfqd, 1);
4087		else if (sync && cfqq_empty &&
4088			 !cfq_close_cooperator(cfqd, cfqq)) {
4089			cfq_arm_slice_timer(cfqd);
4090		}
4091	}
4092
4093	if (!cfqd->rq_in_driver)
4094		cfq_schedule_dispatch(cfqd);
4095}
4096
4097static inline int __cfq_may_queue(struct cfq_queue *cfqq)
4098{
4099	if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
4100		cfq_mark_cfqq_must_alloc_slice(cfqq);
4101		return ELV_MQUEUE_MUST;
4102	}
4103
4104	return ELV_MQUEUE_MAY;
4105}
4106
4107static int cfq_may_queue(struct request_queue *q, int rw)
4108{
4109	struct cfq_data *cfqd = q->elevator->elevator_data;
4110	struct task_struct *tsk = current;
4111	struct cfq_io_cq *cic;
4112	struct cfq_queue *cfqq;
4113
4114	/*
4115	 * don't force setup of a queue from here, as a call to may_queue
4116	 * does not necessarily imply that a request actually will be queued.
4117	 * so just lookup a possibly existing queue, or return 'may queue'
4118	 * if that fails
4119	 */
4120	cic = cfq_cic_lookup(cfqd, tsk->io_context);
4121	if (!cic)
4122		return ELV_MQUEUE_MAY;
4123
4124	cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
4125	if (cfqq) {
4126		cfq_init_prio_data(cfqq, cic);
4127
4128		return __cfq_may_queue(cfqq);
4129	}
4130
4131	return ELV_MQUEUE_MAY;
4132}
4133
4134/*
4135 * queue lock held here
4136 */
4137static void cfq_put_request(struct request *rq)
4138{
4139	struct cfq_queue *cfqq = RQ_CFQQ(rq);
4140
4141	if (cfqq) {
4142		const int rw = rq_data_dir(rq);
4143
4144		BUG_ON(!cfqq->allocated[rw]);
4145		cfqq->allocated[rw]--;
4146
4147		/* Put down rq reference on cfqg */
4148		cfqg_put(RQ_CFQG(rq));
4149		rq->elv.priv[0] = NULL;
4150		rq->elv.priv[1] = NULL;
4151
4152		cfq_put_queue(cfqq);
4153	}
4154}
4155
4156static struct cfq_queue *
4157cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
4158		struct cfq_queue *cfqq)
4159{
4160	cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4161	cic_set_cfqq(cic, cfqq->new_cfqq, 1);
4162	cfq_mark_cfqq_coop(cfqq->new_cfqq);
4163	cfq_put_queue(cfqq);
4164	return cic_to_cfqq(cic, 1);
4165}
4166
4167/*
4168 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4169 * was the last process referring to said cfqq.
4170 */
4171static struct cfq_queue *
4172split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
4173{
4174	if (cfqq_process_refs(cfqq) == 1) {
4175		cfqq->pid = current->pid;
4176		cfq_clear_cfqq_coop(cfqq);
4177		cfq_clear_cfqq_split_coop(cfqq);
4178		return cfqq;
4179	}
4180
4181	cic_set_cfqq(cic, NULL, 1);
4182
4183	cfq_put_cooperator(cfqq);
4184
4185	cfq_put_queue(cfqq);
4186	return NULL;
4187}
4188/*
4189 * Allocate cfq data structures associated with this request.
4190 */
4191static int
4192cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4193		gfp_t gfp_mask)
4194{
4195	struct cfq_data *cfqd = q->elevator->elevator_data;
4196	struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
4197	const int rw = rq_data_dir(rq);
4198	const bool is_sync = rq_is_sync(rq);
4199	struct cfq_queue *cfqq;
4200
4201	might_sleep_if(gfp_mask & __GFP_WAIT);
4202
4203	spin_lock_irq(q->queue_lock);
4204
4205	check_ioprio_changed(cic, bio);
4206	check_blkcg_changed(cic, bio);
4207new_queue:
4208	cfqq = cic_to_cfqq(cic, is_sync);
4209	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
4210		cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
4211		cic_set_cfqq(cic, cfqq, is_sync);
4212	} else {
4213		/*
4214		 * If the queue was seeky for too long, break it apart.
4215		 */
4216		if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
4217			cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4218			cfqq = split_cfqq(cic, cfqq);
4219			if (!cfqq)
4220				goto new_queue;
4221		}
4222
4223		/*
4224		 * Check to see if this queue is scheduled to merge with
4225		 * another, closely cooperating queue.  The merging of
4226		 * queues happens here as it must be done in process context.
4227		 * The reference on new_cfqq was taken in merge_cfqqs.
4228		 */
4229		if (cfqq->new_cfqq)
4230			cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
4231	}
4232
4233	cfqq->allocated[rw]++;
4234
4235	cfqq->ref++;
4236	cfqg_get(cfqq->cfqg);
4237	rq->elv.priv[0] = cfqq;
4238	rq->elv.priv[1] = cfqq->cfqg;
4239	spin_unlock_irq(q->queue_lock);
4240	return 0;
4241}
4242
4243static void cfq_kick_queue(struct work_struct *work)
4244{
4245	struct cfq_data *cfqd =
4246		container_of(work, struct cfq_data, unplug_work);
4247	struct request_queue *q = cfqd->queue;
4248
4249	spin_lock_irq(q->queue_lock);
4250	__blk_run_queue(cfqd->queue);
4251	spin_unlock_irq(q->queue_lock);
4252}
4253
4254/*
4255 * Timer running if the active_queue is currently idling inside its time slice
4256 */
4257static void cfq_idle_slice_timer(unsigned long data)
4258{
4259	struct cfq_data *cfqd = (struct cfq_data *) data;
4260	struct cfq_queue *cfqq;
4261	unsigned long flags;
4262	int timed_out = 1;
4263
4264	cfq_log(cfqd, "idle timer fired");
4265
4266	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4267
4268	cfqq = cfqd->active_queue;
4269	if (cfqq) {
4270		timed_out = 0;
4271
4272		/*
4273		 * We saw a request before the queue expired, let it through
4274		 */
4275		if (cfq_cfqq_must_dispatch(cfqq))
4276			goto out_kick;
4277
4278		/*
4279		 * expired
4280		 */
4281		if (cfq_slice_used(cfqq))
4282			goto expire;
4283
4284		/*
4285		 * only expire and reinvoke request handler, if there are
4286		 * other queues with pending requests
4287		 */
4288		if (!cfqd->busy_queues)
4289			goto out_cont;
4290
4291		/*
4292		 * not expired and it has a request pending, let it dispatch
4293		 */
4294		if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4295			goto out_kick;
4296
4297		/*
4298		 * Queue depth flag is reset only when the idle didn't succeed
4299		 */
4300		cfq_clear_cfqq_deep(cfqq);
4301	}
4302expire:
4303	cfq_slice_expired(cfqd, timed_out);
4304out_kick:
4305	cfq_schedule_dispatch(cfqd);
4306out_cont:
4307	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4308}
4309
4310static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4311{
4312	del_timer_sync(&cfqd->idle_slice_timer);
4313	cancel_work_sync(&cfqd->unplug_work);
4314}
4315
4316static void cfq_put_async_queues(struct cfq_data *cfqd)
4317{
4318	int i;
4319
4320	for (i = 0; i < IOPRIO_BE_NR; i++) {
4321		if (cfqd->async_cfqq[0][i])
4322			cfq_put_queue(cfqd->async_cfqq[0][i]);
4323		if (cfqd->async_cfqq[1][i])
4324			cfq_put_queue(cfqd->async_cfqq[1][i]);
4325	}
4326
4327	if (cfqd->async_idle_cfqq)
4328		cfq_put_queue(cfqd->async_idle_cfqq);
4329}
4330
4331static void cfq_exit_queue(struct elevator_queue *e)
4332{
4333	struct cfq_data *cfqd = e->elevator_data;
4334	struct request_queue *q = cfqd->queue;
4335
4336	cfq_shutdown_timer_wq(cfqd);
4337
4338	spin_lock_irq(q->queue_lock);
4339
4340	if (cfqd->active_queue)
4341		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
4342
4343	cfq_put_async_queues(cfqd);
4344
4345	spin_unlock_irq(q->queue_lock);
4346
4347	cfq_shutdown_timer_wq(cfqd);
4348
4349#ifdef CONFIG_CFQ_GROUP_IOSCHED
4350	blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4351#else
4352	kfree(cfqd->root_group);
4353#endif
4354	kfree(cfqd);
4355}
4356
4357static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
4358{
4359	struct cfq_data *cfqd;
4360	struct blkcg_gq *blkg __maybe_unused;
4361	int i, ret;
4362	struct elevator_queue *eq;
4363
4364	eq = elevator_alloc(q, e);
4365	if (!eq)
4366		return -ENOMEM;
4367
4368	cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
4369	if (!cfqd) {
4370		kobject_put(&eq->kobj);
4371		return -ENOMEM;
4372	}
4373	eq->elevator_data = cfqd;
4374
4375	cfqd->queue = q;
4376	spin_lock_irq(q->queue_lock);
4377	q->elevator = eq;
4378	spin_unlock_irq(q->queue_lock);
4379
4380	/* Init root service tree */
4381	cfqd->grp_service_tree = CFQ_RB_ROOT;
4382
4383	/* Init root group and prefer root group over other groups by default */
4384#ifdef CONFIG_CFQ_GROUP_IOSCHED
4385	ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
4386	if (ret)
4387		goto out_free;
4388
4389	cfqd->root_group = blkg_to_cfqg(q->root_blkg);
4390#else
4391	ret = -ENOMEM;
4392	cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4393					GFP_KERNEL, cfqd->queue->node);
4394	if (!cfqd->root_group)
4395		goto out_free;
4396
4397	cfq_init_cfqg_base(cfqd->root_group);
4398#endif
4399	cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
4400	cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
4401
4402	/*
4403	 * Not strictly needed (since RB_ROOT just clears the node and we
4404	 * zeroed cfqd on alloc), but better be safe in case someone decides
4405	 * to add magic to the rb code
4406	 */
4407	for (i = 0; i < CFQ_PRIO_LISTS; i++)
4408		cfqd->prio_trees[i] = RB_ROOT;
4409
4410	/*
4411	 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
4412	 * Grab a permanent reference to it, so that the normal code flow
4413	 * will not attempt to free it.  oom_cfqq is linked to root_group
4414	 * but shouldn't hold a reference as it'll never be unlinked.  Lose
4415	 * the reference from linking right away.
4416	 */
4417	cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4418	cfqd->oom_cfqq.ref++;
4419
4420	spin_lock_irq(q->queue_lock);
4421	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4422	cfqg_put(cfqd->root_group);
4423	spin_unlock_irq(q->queue_lock);
4424
4425	init_timer(&cfqd->idle_slice_timer);
4426	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4427	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4428
4429	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4430
4431	cfqd->cfq_quantum = cfq_quantum;
4432	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4433	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4434	cfqd->cfq_back_max = cfq_back_max;
4435	cfqd->cfq_back_penalty = cfq_back_penalty;
4436	cfqd->cfq_slice[0] = cfq_slice_async;
4437	cfqd->cfq_slice[1] = cfq_slice_sync;
4438	cfqd->cfq_target_latency = cfq_target_latency;
4439	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4440	cfqd->cfq_slice_idle = cfq_slice_idle;
4441	cfqd->cfq_group_idle = cfq_group_idle;
4442	cfqd->cfq_latency = 1;
4443	cfqd->hw_tag = -1;
4444	/*
4445	 * we optimistically start assuming sync ops weren't delayed in last
4446	 * second, in order to have larger depth for async operations.
4447	 */
4448	cfqd->last_delayed_sync = jiffies - HZ;
4449	return 0;
4450
4451out_free:
4452	kfree(cfqd);
4453	kobject_put(&eq->kobj);
4454	return ret;
4455}
4456
4457/*
4458 * sysfs parts below -->
4459 */
4460static ssize_t
4461cfq_var_show(unsigned int var, char *page)
4462{
4463	return sprintf(page, "%d\n", var);
4464}
4465
4466static ssize_t
4467cfq_var_store(unsigned int *var, const char *page, size_t count)
4468{
4469	char *p = (char *) page;
4470
4471	*var = simple_strtoul(p, &p, 10);
4472	return count;
4473}
4474
4475#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
4476static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
4477{									\
4478	struct cfq_data *cfqd = e->elevator_data;			\
4479	unsigned int __data = __VAR;					\
4480	if (__CONV)							\
4481		__data = jiffies_to_msecs(__data);			\
4482	return cfq_var_show(__data, (page));				\
4483}
4484SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4485SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4486SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4487SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4488SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4489SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4490SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4491SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4492SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4493SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4494SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4495SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4496#undef SHOW_FUNCTION
4497
4498#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
4499static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
4500{									\
4501	struct cfq_data *cfqd = e->elevator_data;			\
4502	unsigned int __data;						\
4503	int ret = cfq_var_store(&__data, (page), count);		\
4504	if (__data < (MIN))						\
4505		__data = (MIN);						\
4506	else if (__data > (MAX))					\
4507		__data = (MAX);						\
4508	if (__CONV)							\
4509		*(__PTR) = msecs_to_jiffies(__data);			\
4510	else								\
4511		*(__PTR) = __data;					\
4512	return ret;							\
4513}
4514STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4515STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4516		UINT_MAX, 1);
4517STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4518		UINT_MAX, 1);
4519STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4520STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4521		UINT_MAX, 0);
4522STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4523STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4524STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4525STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4526STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4527		UINT_MAX, 0);
4528STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4529STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
4530#undef STORE_FUNCTION
4531
4532#define CFQ_ATTR(name) \
4533	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4534
4535static struct elv_fs_entry cfq_attrs[] = {
4536	CFQ_ATTR(quantum),
4537	CFQ_ATTR(fifo_expire_sync),
4538	CFQ_ATTR(fifo_expire_async),
4539	CFQ_ATTR(back_seek_max),
4540	CFQ_ATTR(back_seek_penalty),
4541	CFQ_ATTR(slice_sync),
4542	CFQ_ATTR(slice_async),
4543	CFQ_ATTR(slice_async_rq),
4544	CFQ_ATTR(slice_idle),
4545	CFQ_ATTR(group_idle),
4546	CFQ_ATTR(low_latency),
4547	CFQ_ATTR(target_latency),
4548	__ATTR_NULL
4549};
4550
4551static struct elevator_type iosched_cfq = {
4552	.ops = {
4553		.elevator_merge_fn = 		cfq_merge,
4554		.elevator_merged_fn =		cfq_merged_request,
4555		.elevator_merge_req_fn =	cfq_merged_requests,
4556		.elevator_allow_merge_fn =	cfq_allow_merge,
4557		.elevator_bio_merged_fn =	cfq_bio_merged,
4558		.elevator_dispatch_fn =		cfq_dispatch_requests,
4559		.elevator_add_req_fn =		cfq_insert_request,
4560		.elevator_activate_req_fn =	cfq_activate_request,
4561		.elevator_deactivate_req_fn =	cfq_deactivate_request,
4562		.elevator_completed_req_fn =	cfq_completed_request,
4563		.elevator_former_req_fn =	elv_rb_former_request,
4564		.elevator_latter_req_fn =	elv_rb_latter_request,
4565		.elevator_init_icq_fn =		cfq_init_icq,
4566		.elevator_exit_icq_fn =		cfq_exit_icq,
4567		.elevator_set_req_fn =		cfq_set_request,
4568		.elevator_put_req_fn =		cfq_put_request,
4569		.elevator_may_queue_fn =	cfq_may_queue,
4570		.elevator_init_fn =		cfq_init_queue,
4571		.elevator_exit_fn =		cfq_exit_queue,
4572	},
4573	.icq_size	=	sizeof(struct cfq_io_cq),
4574	.icq_align	=	__alignof__(struct cfq_io_cq),
4575	.elevator_attrs =	cfq_attrs,
4576	.elevator_name	=	"cfq",
4577	.elevator_owner =	THIS_MODULE,
4578};
4579
4580#ifdef CONFIG_CFQ_GROUP_IOSCHED
4581static struct blkcg_policy blkcg_policy_cfq = {
4582	.pd_size		= sizeof(struct cfq_group),
4583	.cftypes		= cfq_blkcg_files,
4584
4585	.pd_init_fn		= cfq_pd_init,
4586	.pd_offline_fn		= cfq_pd_offline,
4587	.pd_reset_stats_fn	= cfq_pd_reset_stats,
4588};
4589#endif
4590
4591static int __init cfq_init(void)
4592{
4593	int ret;
4594
4595	/*
4596	 * could be 0 on HZ < 1000 setups
4597	 */
4598	if (!cfq_slice_async)
4599		cfq_slice_async = 1;
4600	if (!cfq_slice_idle)
4601		cfq_slice_idle = 1;
4602
4603#ifdef CONFIG_CFQ_GROUP_IOSCHED
4604	if (!cfq_group_idle)
4605		cfq_group_idle = 1;
4606
4607	ret = blkcg_policy_register(&blkcg_policy_cfq);
4608	if (ret)
4609		return ret;
4610#else
4611	cfq_group_idle = 0;
4612#endif
4613
4614	ret = -ENOMEM;
4615	cfq_pool = KMEM_CACHE(cfq_queue, 0);
4616	if (!cfq_pool)
4617		goto err_pol_unreg;
4618
4619	ret = elv_register(&iosched_cfq);
4620	if (ret)
4621		goto err_free_pool;
4622
4623	return 0;
4624
4625err_free_pool:
4626	kmem_cache_destroy(cfq_pool);
4627err_pol_unreg:
4628#ifdef CONFIG_CFQ_GROUP_IOSCHED
4629	blkcg_policy_unregister(&blkcg_policy_cfq);
4630#endif
4631	return ret;
4632}
4633
4634static void __exit cfq_exit(void)
4635{
4636#ifdef CONFIG_CFQ_GROUP_IOSCHED
4637	blkcg_policy_unregister(&blkcg_policy_cfq);
4638#endif
4639	elv_unregister(&iosched_cfq);
4640	kmem_cache_destroy(cfq_pool);
4641}
4642
4643module_init(cfq_init);
4644module_exit(cfq_exit);
4645
4646MODULE_AUTHOR("Jens Axboe");
4647MODULE_LICENSE("GPL");
4648MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");