Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 *  CFQ, or complete fairness queueing, disk scheduler.
   3 *
   4 *  Based on ideas from a previously unfinished io
   5 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   6 *
   7 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   8 */
   9#include <linux/module.h>
  10#include <linux/slab.h>
  11#include <linux/blkdev.h>
  12#include <linux/elevator.h>
  13#include <linux/jiffies.h>
  14#include <linux/rbtree.h>
  15#include <linux/ioprio.h>
  16#include <linux/blktrace_api.h>
 
  17#include "blk.h"
  18#include "blk-cgroup.h"
  19
  20/*
  21 * tunables
  22 */
  23/* max queue in one round of service */
  24static const int cfq_quantum = 8;
  25static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  26/* maximum backwards seek, in KiB */
  27static const int cfq_back_max = 16 * 1024;
  28/* penalty of a backwards seek */
  29static const int cfq_back_penalty = 2;
  30static const int cfq_slice_sync = HZ / 10;
  31static int cfq_slice_async = HZ / 25;
  32static const int cfq_slice_async_rq = 2;
  33static int cfq_slice_idle = HZ / 125;
  34static int cfq_group_idle = HZ / 125;
  35static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  36static const int cfq_hist_divisor = 4;
  37
  38/*
  39 * offset from end of service tree
  40 */
  41#define CFQ_IDLE_DELAY		(HZ / 5)
  42
  43/*
  44 * below this threshold, we consider thinktime immediate
  45 */
  46#define CFQ_MIN_TT		(2)
  47
  48#define CFQ_SLICE_SCALE		(5)
  49#define CFQ_HW_QUEUE_MIN	(5)
  50#define CFQ_SERVICE_SHIFT       12
  51
  52#define CFQQ_SEEK_THR		(sector_t)(8 * 100)
  53#define CFQQ_CLOSE_THR		(sector_t)(8 * 1024)
  54#define CFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
  55#define CFQQ_SEEKY(cfqq)	(hweight32(cfqq->seek_history) > 32/8)
  56
  57#define RQ_CIC(rq)		icq_to_cic((rq)->elv.icq)
  58#define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elv.priv[0])
  59#define RQ_CFQG(rq)		(struct cfq_group *) ((rq)->elv.priv[1])
  60
  61static struct kmem_cache *cfq_pool;
  62
  63#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
  64#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  65#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  66
  67#define sample_valid(samples)	((samples) > 80)
  68#define rb_entry_cfqg(node)	rb_entry((node), struct cfq_group, rb_node)
  69
 
 
 
 
 
  70struct cfq_ttime {
  71	unsigned long last_end_request;
  72
  73	unsigned long ttime_total;
  74	unsigned long ttime_samples;
  75	unsigned long ttime_mean;
  76};
  77
  78/*
  79 * Most of our rbtree usage is for sorting with min extraction, so
  80 * if we cache the leftmost node we don't have to walk down the tree
  81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  82 * move this into the elevator for the rq sorting as well.
  83 */
  84struct cfq_rb_root {
  85	struct rb_root rb;
  86	struct rb_node *left;
  87	unsigned count;
  88	u64 min_vdisktime;
  89	struct cfq_ttime ttime;
  90};
  91#define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, \
  92			.ttime = {.last_end_request = jiffies,},}
  93
  94/*
  95 * Per process-grouping structure
  96 */
  97struct cfq_queue {
  98	/* reference count */
  99	int ref;
 100	/* various state flags, see below */
 101	unsigned int flags;
 102	/* parent cfq_data */
 103	struct cfq_data *cfqd;
 104	/* service_tree member */
 105	struct rb_node rb_node;
 106	/* service_tree key */
 107	unsigned long rb_key;
 108	/* prio tree member */
 109	struct rb_node p_node;
 110	/* prio tree root we belong to, if any */
 111	struct rb_root *p_root;
 112	/* sorted list of pending requests */
 113	struct rb_root sort_list;
 114	/* if fifo isn't expired, next request to serve */
 115	struct request *next_rq;
 116	/* requests queued in sort_list */
 117	int queued[2];
 118	/* currently allocated requests */
 119	int allocated[2];
 120	/* fifo list of requests in sort_list */
 121	struct list_head fifo;
 122
 123	/* time when queue got scheduled in to dispatch first request. */
 124	unsigned long dispatch_start;
 125	unsigned int allocated_slice;
 126	unsigned int slice_dispatch;
 127	/* time when first request from queue completed and slice started. */
 128	unsigned long slice_start;
 129	unsigned long slice_end;
 130	long slice_resid;
 131
 132	/* pending priority requests */
 133	int prio_pending;
 134	/* number of requests that are on the dispatch list or inside driver */
 135	int dispatched;
 136
 137	/* io prio of this group */
 138	unsigned short ioprio, org_ioprio;
 139	unsigned short ioprio_class;
 140
 141	pid_t pid;
 142
 143	u32 seek_history;
 144	sector_t last_request_pos;
 145
 146	struct cfq_rb_root *service_tree;
 147	struct cfq_queue *new_cfqq;
 148	struct cfq_group *cfqg;
 149	/* Number of sectors dispatched from queue in single dispatch round */
 150	unsigned long nr_sectors;
 151};
 152
 153/*
 154 * First index in the service_trees.
 155 * IDLE is handled separately, so it has negative index
 156 */
 157enum wl_class_t {
 158	BE_WORKLOAD = 0,
 159	RT_WORKLOAD = 1,
 160	IDLE_WORKLOAD = 2,
 161	CFQ_PRIO_NR,
 162};
 163
 164/*
 165 * Second index in the service_trees.
 166 */
 167enum wl_type_t {
 168	ASYNC_WORKLOAD = 0,
 169	SYNC_NOIDLE_WORKLOAD = 1,
 170	SYNC_WORKLOAD = 2
 171};
 172
 173struct cfqg_stats {
 174#ifdef CONFIG_CFQ_GROUP_IOSCHED
 175	/* total bytes transferred */
 176	struct blkg_rwstat		service_bytes;
 177	/* total IOs serviced, post merge */
 178	struct blkg_rwstat		serviced;
 179	/* number of ios merged */
 180	struct blkg_rwstat		merged;
 181	/* total time spent on device in ns, may not be accurate w/ queueing */
 182	struct blkg_rwstat		service_time;
 183	/* total time spent waiting in scheduler queue in ns */
 184	struct blkg_rwstat		wait_time;
 185	/* number of IOs queued up */
 186	struct blkg_rwstat		queued;
 187	/* total sectors transferred */
 188	struct blkg_stat		sectors;
 189	/* total disk time and nr sectors dispatched by this group */
 190	struct blkg_stat		time;
 191#ifdef CONFIG_DEBUG_BLK_CGROUP
 192	/* time not charged to this cgroup */
 193	struct blkg_stat		unaccounted_time;
 194	/* sum of number of ios queued across all samples */
 195	struct blkg_stat		avg_queue_size_sum;
 196	/* count of samples taken for average */
 197	struct blkg_stat		avg_queue_size_samples;
 198	/* how many times this group has been removed from service tree */
 199	struct blkg_stat		dequeue;
 200	/* total time spent waiting for it to be assigned a timeslice. */
 201	struct blkg_stat		group_wait_time;
 202	/* time spent idling for this blkcg_gq */
 203	struct blkg_stat		idle_time;
 204	/* total time with empty current active q with other requests queued */
 205	struct blkg_stat		empty_time;
 206	/* fields after this shouldn't be cleared on stat reset */
 207	uint64_t			start_group_wait_time;
 208	uint64_t			start_idle_time;
 209	uint64_t			start_empty_time;
 210	uint16_t			flags;
 211#endif	/* CONFIG_DEBUG_BLK_CGROUP */
 212#endif	/* CONFIG_CFQ_GROUP_IOSCHED */
 213};
 214
 
 
 
 
 
 
 
 
 
 215/* This is per cgroup per device grouping structure */
 216struct cfq_group {
 217	/* must be the first member */
 218	struct blkg_policy_data pd;
 219
 220	/* group service_tree member */
 221	struct rb_node rb_node;
 222
 223	/* group service_tree key */
 224	u64 vdisktime;
 225
 226	/*
 227	 * The number of active cfqgs and sum of their weights under this
 228	 * cfqg.  This covers this cfqg's leaf_weight and all children's
 229	 * weights, but does not cover weights of further descendants.
 230	 *
 231	 * If a cfqg is on the service tree, it's active.  An active cfqg
 232	 * also activates its parent and contributes to the children_weight
 233	 * of the parent.
 234	 */
 235	int nr_active;
 236	unsigned int children_weight;
 237
 238	/*
 239	 * vfraction is the fraction of vdisktime that the tasks in this
 240	 * cfqg are entitled to.  This is determined by compounding the
 241	 * ratios walking up from this cfqg to the root.
 242	 *
 243	 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
 244	 * vfractions on a service tree is approximately 1.  The sum may
 245	 * deviate a bit due to rounding errors and fluctuations caused by
 246	 * cfqgs entering and leaving the service tree.
 247	 */
 248	unsigned int vfraction;
 249
 250	/*
 251	 * There are two weights - (internal) weight is the weight of this
 252	 * cfqg against the sibling cfqgs.  leaf_weight is the wight of
 253	 * this cfqg against the child cfqgs.  For the root cfqg, both
 254	 * weights are kept in sync for backward compatibility.
 255	 */
 256	unsigned int weight;
 257	unsigned int new_weight;
 258	unsigned int dev_weight;
 259
 260	unsigned int leaf_weight;
 261	unsigned int new_leaf_weight;
 262	unsigned int dev_leaf_weight;
 263
 264	/* number of cfqq currently on this group */
 265	int nr_cfqq;
 266
 267	/*
 268	 * Per group busy queues average. Useful for workload slice calc. We
 269	 * create the array for each prio class but at run time it is used
 270	 * only for RT and BE class and slot for IDLE class remains unused.
 271	 * This is primarily done to avoid confusion and a gcc warning.
 272	 */
 273	unsigned int busy_queues_avg[CFQ_PRIO_NR];
 274	/*
 275	 * rr lists of queues with requests. We maintain service trees for
 276	 * RT and BE classes. These trees are subdivided in subclasses
 277	 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
 278	 * class there is no subclassification and all the cfq queues go on
 279	 * a single tree service_tree_idle.
 280	 * Counts are embedded in the cfq_rb_root
 281	 */
 282	struct cfq_rb_root service_trees[2][3];
 283	struct cfq_rb_root service_tree_idle;
 284
 285	unsigned long saved_wl_slice;
 286	enum wl_type_t saved_wl_type;
 287	enum wl_class_t saved_wl_class;
 288
 289	/* number of requests that are on the dispatch list or inside driver */
 290	int dispatched;
 291	struct cfq_ttime ttime;
 292	struct cfqg_stats stats;	/* stats for this cfqg */
 293	struct cfqg_stats dead_stats;	/* stats pushed from dead children */
 
 
 
 
 294};
 295
 296struct cfq_io_cq {
 297	struct io_cq		icq;		/* must be the first member */
 298	struct cfq_queue	*cfqq[2];
 299	struct cfq_ttime	ttime;
 300	int			ioprio;		/* the current ioprio */
 301#ifdef CONFIG_CFQ_GROUP_IOSCHED
 302	uint64_t		blkcg_id;	/* the current blkcg ID */
 303#endif
 304};
 305
 306/*
 307 * Per block device queue structure
 308 */
 309struct cfq_data {
 310	struct request_queue *queue;
 311	/* Root service tree for cfq_groups */
 312	struct cfq_rb_root grp_service_tree;
 313	struct cfq_group *root_group;
 314
 315	/*
 316	 * The priority currently being served
 317	 */
 318	enum wl_class_t serving_wl_class;
 319	enum wl_type_t serving_wl_type;
 320	unsigned long workload_expires;
 321	struct cfq_group *serving_group;
 322
 323	/*
 324	 * Each priority tree is sorted by next_request position.  These
 325	 * trees are used when determining if two or more queues are
 326	 * interleaving requests (see cfq_close_cooperator).
 327	 */
 328	struct rb_root prio_trees[CFQ_PRIO_LISTS];
 329
 330	unsigned int busy_queues;
 331	unsigned int busy_sync_queues;
 332
 333	int rq_in_driver;
 334	int rq_in_flight[2];
 335
 336	/*
 337	 * queue-depth detection
 338	 */
 339	int rq_queued;
 340	int hw_tag;
 341	/*
 342	 * hw_tag can be
 343	 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
 344	 *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
 345	 *  0 => no NCQ
 346	 */
 347	int hw_tag_est_depth;
 348	unsigned int hw_tag_samples;
 349
 350	/*
 351	 * idle window management
 352	 */
 353	struct timer_list idle_slice_timer;
 354	struct work_struct unplug_work;
 355
 356	struct cfq_queue *active_queue;
 357	struct cfq_io_cq *active_cic;
 358
 359	/*
 360	 * async queue for each priority case
 361	 */
 362	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 363	struct cfq_queue *async_idle_cfqq;
 364
 365	sector_t last_position;
 366
 367	/*
 368	 * tunables, see top of file
 369	 */
 370	unsigned int cfq_quantum;
 371	unsigned int cfq_fifo_expire[2];
 372	unsigned int cfq_back_penalty;
 373	unsigned int cfq_back_max;
 374	unsigned int cfq_slice[2];
 375	unsigned int cfq_slice_async_rq;
 376	unsigned int cfq_slice_idle;
 377	unsigned int cfq_group_idle;
 378	unsigned int cfq_latency;
 379	unsigned int cfq_target_latency;
 380
 381	/*
 382	 * Fallback dummy cfqq for extreme OOM conditions
 383	 */
 384	struct cfq_queue oom_cfqq;
 385
 386	unsigned long last_delayed_sync;
 387};
 388
 389static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
 
 390
 391static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
 392					    enum wl_class_t class,
 393					    enum wl_type_t type)
 394{
 395	if (!cfqg)
 396		return NULL;
 397
 398	if (class == IDLE_WORKLOAD)
 399		return &cfqg->service_tree_idle;
 400
 401	return &cfqg->service_trees[class][type];
 402}
 403
 404enum cfqq_state_flags {
 405	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
 406	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
 407	CFQ_CFQQ_FLAG_must_dispatch,	/* must be allowed a dispatch */
 408	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
 409	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
 410	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
 411	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
 412	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
 413	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
 414	CFQ_CFQQ_FLAG_coop,		/* cfqq is shared */
 415	CFQ_CFQQ_FLAG_split_coop,	/* shared cfqq will be splitted */
 416	CFQ_CFQQ_FLAG_deep,		/* sync cfqq experienced large depth */
 417	CFQ_CFQQ_FLAG_wait_busy,	/* Waiting for next request */
 418};
 419
 420#define CFQ_CFQQ_FNS(name)						\
 421static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
 422{									\
 423	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
 424}									\
 425static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
 426{									\
 427	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
 428}									\
 429static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
 430{									\
 431	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
 432}
 433
 434CFQ_CFQQ_FNS(on_rr);
 435CFQ_CFQQ_FNS(wait_request);
 436CFQ_CFQQ_FNS(must_dispatch);
 437CFQ_CFQQ_FNS(must_alloc_slice);
 438CFQ_CFQQ_FNS(fifo_expire);
 439CFQ_CFQQ_FNS(idle_window);
 440CFQ_CFQQ_FNS(prio_changed);
 441CFQ_CFQQ_FNS(slice_new);
 442CFQ_CFQQ_FNS(sync);
 443CFQ_CFQQ_FNS(coop);
 444CFQ_CFQQ_FNS(split_coop);
 445CFQ_CFQQ_FNS(deep);
 446CFQ_CFQQ_FNS(wait_busy);
 447#undef CFQ_CFQQ_FNS
 448
 449static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
 450{
 451	return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 452}
 453
 454static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 455{
 456	return pd_to_blkg(&cfqg->pd);
 457}
 458
 459#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
 460
 461/* cfqg stats flags */
 462enum cfqg_stats_flags {
 463	CFQG_stats_waiting = 0,
 464	CFQG_stats_idling,
 465	CFQG_stats_empty,
 466};
 467
 468#define CFQG_FLAG_FNS(name)						\
 469static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)	\
 470{									\
 471	stats->flags |= (1 << CFQG_stats_##name);			\
 472}									\
 473static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)	\
 474{									\
 475	stats->flags &= ~(1 << CFQG_stats_##name);			\
 476}									\
 477static inline int cfqg_stats_##name(struct cfqg_stats *stats)		\
 478{									\
 479	return (stats->flags & (1 << CFQG_stats_##name)) != 0;		\
 480}									\
 481
 482CFQG_FLAG_FNS(waiting)
 483CFQG_FLAG_FNS(idling)
 484CFQG_FLAG_FNS(empty)
 485#undef CFQG_FLAG_FNS
 486
 487/* This should be called with the queue_lock held. */
 488static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
 489{
 490	unsigned long long now;
 491
 492	if (!cfqg_stats_waiting(stats))
 493		return;
 494
 495	now = sched_clock();
 496	if (time_after64(now, stats->start_group_wait_time))
 497		blkg_stat_add(&stats->group_wait_time,
 498			      now - stats->start_group_wait_time);
 499	cfqg_stats_clear_waiting(stats);
 500}
 501
 502/* This should be called with the queue_lock held. */
 503static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
 504						 struct cfq_group *curr_cfqg)
 505{
 506	struct cfqg_stats *stats = &cfqg->stats;
 507
 508	if (cfqg_stats_waiting(stats))
 509		return;
 510	if (cfqg == curr_cfqg)
 511		return;
 512	stats->start_group_wait_time = sched_clock();
 513	cfqg_stats_mark_waiting(stats);
 514}
 515
 516/* This should be called with the queue_lock held. */
 517static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
 518{
 519	unsigned long long now;
 520
 521	if (!cfqg_stats_empty(stats))
 522		return;
 523
 524	now = sched_clock();
 525	if (time_after64(now, stats->start_empty_time))
 526		blkg_stat_add(&stats->empty_time,
 527			      now - stats->start_empty_time);
 528	cfqg_stats_clear_empty(stats);
 529}
 530
 531static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
 532{
 533	blkg_stat_add(&cfqg->stats.dequeue, 1);
 534}
 535
 536static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
 537{
 538	struct cfqg_stats *stats = &cfqg->stats;
 539
 540	if (blkg_rwstat_total(&stats->queued))
 541		return;
 542
 543	/*
 544	 * group is already marked empty. This can happen if cfqq got new
 545	 * request in parent group and moved to this group while being added
 546	 * to service tree. Just ignore the event and move on.
 547	 */
 548	if (cfqg_stats_empty(stats))
 549		return;
 550
 551	stats->start_empty_time = sched_clock();
 552	cfqg_stats_mark_empty(stats);
 553}
 554
 555static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
 556{
 557	struct cfqg_stats *stats = &cfqg->stats;
 558
 559	if (cfqg_stats_idling(stats)) {
 560		unsigned long long now = sched_clock();
 561
 562		if (time_after64(now, stats->start_idle_time))
 563			blkg_stat_add(&stats->idle_time,
 564				      now - stats->start_idle_time);
 565		cfqg_stats_clear_idling(stats);
 566	}
 567}
 568
 569static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
 570{
 571	struct cfqg_stats *stats = &cfqg->stats;
 572
 573	BUG_ON(cfqg_stats_idling(stats));
 574
 575	stats->start_idle_time = sched_clock();
 576	cfqg_stats_mark_idling(stats);
 577}
 578
 579static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
 580{
 581	struct cfqg_stats *stats = &cfqg->stats;
 582
 583	blkg_stat_add(&stats->avg_queue_size_sum,
 584		      blkg_rwstat_total(&stats->queued));
 585	blkg_stat_add(&stats->avg_queue_size_samples, 1);
 586	cfqg_stats_update_group_wait_time(stats);
 587}
 588
 589#else	/* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 590
 591static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
 592static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
 593static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
 594static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
 595static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
 596static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
 597static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 598
 599#endif	/* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 600
 601#ifdef CONFIG_CFQ_GROUP_IOSCHED
 602
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 603static struct blkcg_policy blkcg_policy_cfq;
 604
 605static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
 606{
 607	return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
 608}
 609
 
 
 
 
 
 610static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
 611{
 612	struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
 613
 614	return pblkg ? blkg_to_cfqg(pblkg) : NULL;
 615}
 616
 
 
 
 
 
 
 
 617static inline void cfqg_get(struct cfq_group *cfqg)
 618{
 619	return blkg_get(cfqg_to_blkg(cfqg));
 620}
 621
 622static inline void cfqg_put(struct cfq_group *cfqg)
 623{
 624	return blkg_put(cfqg_to_blkg(cfqg));
 625}
 626
 627#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	do {			\
 628	char __pbuf[128];						\
 629									\
 630	blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));	\
 631	blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
 632			cfq_cfqq_sync((cfqq)) ? 'S' : 'A',		\
 633			cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 634			  __pbuf, ##args);				\
 635} while (0)
 636
 637#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)	do {			\
 638	char __pbuf[128];						\
 639									\
 640	blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));		\
 641	blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);	\
 642} while (0)
 643
 644static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 645					    struct cfq_group *curr_cfqg, int rw)
 646{
 647	blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
 648	cfqg_stats_end_empty_time(&cfqg->stats);
 649	cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
 650}
 651
 652static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 653			unsigned long time, unsigned long unaccounted_time)
 654{
 655	blkg_stat_add(&cfqg->stats.time, time);
 656#ifdef CONFIG_DEBUG_BLK_CGROUP
 657	blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
 658#endif
 659}
 660
 661static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
 662{
 663	blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
 664}
 665
 666static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
 667{
 668	blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
 669}
 670
 671static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
 672					      uint64_t bytes, int rw)
 673{
 674	blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
 675	blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
 676	blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
 677}
 678
 679static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 680			uint64_t start_time, uint64_t io_start_time, int rw)
 681{
 682	struct cfqg_stats *stats = &cfqg->stats;
 683	unsigned long long now = sched_clock();
 684
 685	if (time_after64(now, io_start_time))
 686		blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
 687	if (time_after64(io_start_time, start_time))
 688		blkg_rwstat_add(&stats->wait_time, rw,
 689				io_start_time - start_time);
 690}
 691
 692/* @stats = 0 */
 693static void cfqg_stats_reset(struct cfqg_stats *stats)
 694{
 695	/* queued stats shouldn't be cleared */
 696	blkg_rwstat_reset(&stats->service_bytes);
 697	blkg_rwstat_reset(&stats->serviced);
 698	blkg_rwstat_reset(&stats->merged);
 699	blkg_rwstat_reset(&stats->service_time);
 700	blkg_rwstat_reset(&stats->wait_time);
 701	blkg_stat_reset(&stats->time);
 702#ifdef CONFIG_DEBUG_BLK_CGROUP
 703	blkg_stat_reset(&stats->unaccounted_time);
 704	blkg_stat_reset(&stats->avg_queue_size_sum);
 705	blkg_stat_reset(&stats->avg_queue_size_samples);
 706	blkg_stat_reset(&stats->dequeue);
 707	blkg_stat_reset(&stats->group_wait_time);
 708	blkg_stat_reset(&stats->idle_time);
 709	blkg_stat_reset(&stats->empty_time);
 710#endif
 711}
 712
 713/* @to += @from */
 714static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
 715{
 716	/* queued stats shouldn't be cleared */
 717	blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
 718	blkg_rwstat_merge(&to->serviced, &from->serviced);
 719	blkg_rwstat_merge(&to->merged, &from->merged);
 720	blkg_rwstat_merge(&to->service_time, &from->service_time);
 721	blkg_rwstat_merge(&to->wait_time, &from->wait_time);
 722	blkg_stat_merge(&from->time, &from->time);
 723#ifdef CONFIG_DEBUG_BLK_CGROUP
 724	blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
 725	blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
 726	blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
 727	blkg_stat_merge(&to->dequeue, &from->dequeue);
 728	blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
 729	blkg_stat_merge(&to->idle_time, &from->idle_time);
 730	blkg_stat_merge(&to->empty_time, &from->empty_time);
 731#endif
 732}
 733
 734/*
 735 * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
 736 * recursive stats can still account for the amount used by this cfqg after
 737 * it's gone.
 738 */
 739static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
 740{
 741	struct cfq_group *parent = cfqg_parent(cfqg);
 742
 743	lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
 744
 745	if (unlikely(!parent))
 746		return;
 747
 748	cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
 749	cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
 750	cfqg_stats_reset(&cfqg->stats);
 751	cfqg_stats_reset(&cfqg->dead_stats);
 752}
 753
 754#else	/* CONFIG_CFQ_GROUP_IOSCHED */
 755
 756static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
 
 
 
 
 
 757static inline void cfqg_get(struct cfq_group *cfqg) { }
 758static inline void cfqg_put(struct cfq_group *cfqg) { }
 759
 760#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
 761	blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid,	\
 762			cfq_cfqq_sync((cfqq)) ? 'S' : 'A',		\
 763			cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 764				##args)
 765#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0)
 766
 767static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 768			struct cfq_group *curr_cfqg, int rw) { }
 769static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 770			unsigned long time, unsigned long unaccounted_time) { }
 771static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
 772static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
 773static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
 774					      uint64_t bytes, int rw) { }
 775static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 776			uint64_t start_time, uint64_t io_start_time, int rw) { }
 777
 778#endif	/* CONFIG_CFQ_GROUP_IOSCHED */
 779
 780#define cfq_log(cfqd, fmt, args...)	\
 781	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 782
 783/* Traverses through cfq group service trees */
 784#define for_each_cfqg_st(cfqg, i, j, st) \
 785	for (i = 0; i <= IDLE_WORKLOAD; i++) \
 786		for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
 787			: &cfqg->service_tree_idle; \
 788			(i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
 789			(i == IDLE_WORKLOAD && j == 0); \
 790			j++, st = i < IDLE_WORKLOAD ? \
 791			&cfqg->service_trees[i][j]: NULL) \
 792
 793static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
 794	struct cfq_ttime *ttime, bool group_idle)
 795{
 796	unsigned long slice;
 797	if (!sample_valid(ttime->ttime_samples))
 798		return false;
 799	if (group_idle)
 800		slice = cfqd->cfq_group_idle;
 801	else
 802		slice = cfqd->cfq_slice_idle;
 803	return ttime->ttime_mean > slice;
 804}
 805
 806static inline bool iops_mode(struct cfq_data *cfqd)
 807{
 808	/*
 809	 * If we are not idling on queues and it is a NCQ drive, parallel
 810	 * execution of requests is on and measuring time is not possible
 811	 * in most of the cases until and unless we drive shallower queue
 812	 * depths and that becomes a performance bottleneck. In such cases
 813	 * switch to start providing fairness in terms of number of IOs.
 814	 */
 815	if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
 816		return true;
 817	else
 818		return false;
 819}
 820
 821static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
 822{
 823	if (cfq_class_idle(cfqq))
 824		return IDLE_WORKLOAD;
 825	if (cfq_class_rt(cfqq))
 826		return RT_WORKLOAD;
 827	return BE_WORKLOAD;
 828}
 829
 830
 831static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
 832{
 833	if (!cfq_cfqq_sync(cfqq))
 834		return ASYNC_WORKLOAD;
 835	if (!cfq_cfqq_idle_window(cfqq))
 836		return SYNC_NOIDLE_WORKLOAD;
 837	return SYNC_WORKLOAD;
 838}
 839
 840static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
 841					struct cfq_data *cfqd,
 842					struct cfq_group *cfqg)
 843{
 844	if (wl_class == IDLE_WORKLOAD)
 845		return cfqg->service_tree_idle.count;
 846
 847	return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
 848		cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
 849		cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
 850}
 851
 852static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
 853					struct cfq_group *cfqg)
 854{
 855	return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
 856		cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
 857}
 858
 859static void cfq_dispatch_insert(struct request_queue *, struct request *);
 860static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
 861				       struct cfq_io_cq *cic, struct bio *bio,
 862				       gfp_t gfp_mask);
 863
 864static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
 865{
 866	/* cic->icq is the first member, %NULL will convert to %NULL */
 867	return container_of(icq, struct cfq_io_cq, icq);
 868}
 869
 870static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
 871					       struct io_context *ioc)
 872{
 873	if (ioc)
 874		return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
 875	return NULL;
 876}
 877
 878static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
 879{
 880	return cic->cfqq[is_sync];
 881}
 882
 883static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
 884				bool is_sync)
 885{
 886	cic->cfqq[is_sync] = cfqq;
 887}
 888
 889static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
 890{
 891	return cic->icq.q->elevator->elevator_data;
 892}
 893
 894/*
 895 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 896 * set (in which case it could also be direct WRITE).
 897 */
 898static inline bool cfq_bio_sync(struct bio *bio)
 899{
 900	return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
 901}
 902
 903/*
 904 * scheduler run of queue, if there are requests pending and no one in the
 905 * driver that will restart queueing
 906 */
 907static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 908{
 909	if (cfqd->busy_queues) {
 910		cfq_log(cfqd, "schedule dispatch");
 911		kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
 912	}
 913}
 914
 915/*
 916 * Scale schedule slice based on io priority. Use the sync time slice only
 917 * if a queue is marked sync and has sync io queued. A sync queue with async
 918 * io only, should not get full sync slice length.
 919 */
 920static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
 921				 unsigned short prio)
 922{
 923	const int base_slice = cfqd->cfq_slice[sync];
 924
 925	WARN_ON(prio >= IOPRIO_BE_NR);
 926
 927	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
 928}
 929
 930static inline int
 931cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 932{
 933	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
 934}
 935
 936/**
 937 * cfqg_scale_charge - scale disk time charge according to cfqg weight
 938 * @charge: disk time being charged
 939 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
 940 *
 941 * Scale @charge according to @vfraction, which is in range (0, 1].  The
 942 * scaling is inversely proportional.
 943 *
 944 * scaled = charge / vfraction
 945 *
 946 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
 947 */
 948static inline u64 cfqg_scale_charge(unsigned long charge,
 949				    unsigned int vfraction)
 950{
 951	u64 c = charge << CFQ_SERVICE_SHIFT;	/* make it fixed point */
 952
 953	/* charge / vfraction */
 954	c <<= CFQ_SERVICE_SHIFT;
 955	do_div(c, vfraction);
 956	return c;
 957}
 958
 959static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
 960{
 961	s64 delta = (s64)(vdisktime - min_vdisktime);
 962	if (delta > 0)
 963		min_vdisktime = vdisktime;
 964
 965	return min_vdisktime;
 966}
 967
 968static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
 969{
 970	s64 delta = (s64)(vdisktime - min_vdisktime);
 971	if (delta < 0)
 972		min_vdisktime = vdisktime;
 973
 974	return min_vdisktime;
 975}
 976
 977static void update_min_vdisktime(struct cfq_rb_root *st)
 978{
 979	struct cfq_group *cfqg;
 980
 981	if (st->left) {
 982		cfqg = rb_entry_cfqg(st->left);
 983		st->min_vdisktime = max_vdisktime(st->min_vdisktime,
 984						  cfqg->vdisktime);
 985	}
 986}
 987
 988/*
 989 * get averaged number of queues of RT/BE priority.
 990 * average is updated, with a formula that gives more weight to higher numbers,
 991 * to quickly follows sudden increases and decrease slowly
 992 */
 993
 994static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
 995					struct cfq_group *cfqg, bool rt)
 996{
 997	unsigned min_q, max_q;
 998	unsigned mult  = cfq_hist_divisor - 1;
 999	unsigned round = cfq_hist_divisor / 2;
1000	unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
1001
1002	min_q = min(cfqg->busy_queues_avg[rt], busy);
1003	max_q = max(cfqg->busy_queues_avg[rt], busy);
1004	cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
1005		cfq_hist_divisor;
1006	return cfqg->busy_queues_avg[rt];
1007}
1008
1009static inline unsigned
1010cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1011{
1012	return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
1013}
1014
1015static inline unsigned
1016cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1017{
1018	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1019	if (cfqd->cfq_latency) {
1020		/*
1021		 * interested queues (we consider only the ones with the same
1022		 * priority class in the cfq group)
1023		 */
1024		unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1025						cfq_class_rt(cfqq));
1026		unsigned sync_slice = cfqd->cfq_slice[1];
1027		unsigned expect_latency = sync_slice * iq;
1028		unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1029
1030		if (expect_latency > group_slice) {
1031			unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1032			/* scale low_slice according to IO priority
1033			 * and sync vs async */
1034			unsigned low_slice =
1035				min(slice, base_low_slice * slice / sync_slice);
1036			/* the adapted slice value is scaled to fit all iqs
1037			 * into the target latency */
1038			slice = max(slice * group_slice / expect_latency,
1039				    low_slice);
1040		}
1041	}
1042	return slice;
1043}
1044
1045static inline void
1046cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1047{
1048	unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1049
1050	cfqq->slice_start = jiffies;
1051	cfqq->slice_end = jiffies + slice;
1052	cfqq->allocated_slice = slice;
1053	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
1054}
1055
1056/*
1057 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1058 * isn't valid until the first request from the dispatch is activated
1059 * and the slice time set.
1060 */
1061static inline bool cfq_slice_used(struct cfq_queue *cfqq)
1062{
1063	if (cfq_cfqq_slice_new(cfqq))
1064		return false;
1065	if (time_before(jiffies, cfqq->slice_end))
1066		return false;
1067
1068	return true;
1069}
1070
1071/*
1072 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1073 * We choose the request that is closest to the head right now. Distance
1074 * behind the head is penalized and only allowed to a certain extent.
1075 */
1076static struct request *
1077cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1078{
1079	sector_t s1, s2, d1 = 0, d2 = 0;
1080	unsigned long back_max;
1081#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
1082#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
1083	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1084
1085	if (rq1 == NULL || rq1 == rq2)
1086		return rq2;
1087	if (rq2 == NULL)
1088		return rq1;
1089
1090	if (rq_is_sync(rq1) != rq_is_sync(rq2))
1091		return rq_is_sync(rq1) ? rq1 : rq2;
1092
1093	if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1094		return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
1095
1096	s1 = blk_rq_pos(rq1);
1097	s2 = blk_rq_pos(rq2);
1098
1099	/*
1100	 * by definition, 1KiB is 2 sectors
1101	 */
1102	back_max = cfqd->cfq_back_max * 2;
1103
1104	/*
1105	 * Strict one way elevator _except_ in the case where we allow
1106	 * short backward seeks which are biased as twice the cost of a
1107	 * similar forward seek.
1108	 */
1109	if (s1 >= last)
1110		d1 = s1 - last;
1111	else if (s1 + back_max >= last)
1112		d1 = (last - s1) * cfqd->cfq_back_penalty;
1113	else
1114		wrap |= CFQ_RQ1_WRAP;
1115
1116	if (s2 >= last)
1117		d2 = s2 - last;
1118	else if (s2 + back_max >= last)
1119		d2 = (last - s2) * cfqd->cfq_back_penalty;
1120	else
1121		wrap |= CFQ_RQ2_WRAP;
1122
1123	/* Found required data */
1124
1125	/*
1126	 * By doing switch() on the bit mask "wrap" we avoid having to
1127	 * check two variables for all permutations: --> faster!
1128	 */
1129	switch (wrap) {
1130	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1131		if (d1 < d2)
1132			return rq1;
1133		else if (d2 < d1)
1134			return rq2;
1135		else {
1136			if (s1 >= s2)
1137				return rq1;
1138			else
1139				return rq2;
1140		}
1141
1142	case CFQ_RQ2_WRAP:
1143		return rq1;
1144	case CFQ_RQ1_WRAP:
1145		return rq2;
1146	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1147	default:
1148		/*
1149		 * Since both rqs are wrapped,
1150		 * start with the one that's further behind head
1151		 * (--> only *one* back seek required),
1152		 * since back seek takes more time than forward.
1153		 */
1154		if (s1 <= s2)
1155			return rq1;
1156		else
1157			return rq2;
1158	}
1159}
1160
1161/*
1162 * The below is leftmost cache rbtree addon
1163 */
1164static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1165{
1166	/* Service tree is empty */
1167	if (!root->count)
1168		return NULL;
1169
1170	if (!root->left)
1171		root->left = rb_first(&root->rb);
1172
1173	if (root->left)
1174		return rb_entry(root->left, struct cfq_queue, rb_node);
1175
1176	return NULL;
1177}
1178
1179static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1180{
1181	if (!root->left)
1182		root->left = rb_first(&root->rb);
1183
1184	if (root->left)
1185		return rb_entry_cfqg(root->left);
1186
1187	return NULL;
1188}
1189
1190static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1191{
1192	rb_erase(n, root);
1193	RB_CLEAR_NODE(n);
1194}
1195
1196static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1197{
1198	if (root->left == n)
1199		root->left = NULL;
1200	rb_erase_init(n, &root->rb);
1201	--root->count;
1202}
1203
1204/*
1205 * would be nice to take fifo expire time into account as well
1206 */
1207static struct request *
1208cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1209		  struct request *last)
1210{
1211	struct rb_node *rbnext = rb_next(&last->rb_node);
1212	struct rb_node *rbprev = rb_prev(&last->rb_node);
1213	struct request *next = NULL, *prev = NULL;
1214
1215	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1216
1217	if (rbprev)
1218		prev = rb_entry_rq(rbprev);
1219
1220	if (rbnext)
1221		next = rb_entry_rq(rbnext);
1222	else {
1223		rbnext = rb_first(&cfqq->sort_list);
1224		if (rbnext && rbnext != &last->rb_node)
1225			next = rb_entry_rq(rbnext);
1226	}
1227
1228	return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1229}
1230
1231static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1232				      struct cfq_queue *cfqq)
1233{
1234	/*
1235	 * just an approximation, should be ok.
1236	 */
1237	return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1238		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1239}
1240
1241static inline s64
1242cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1243{
1244	return cfqg->vdisktime - st->min_vdisktime;
1245}
1246
1247static void
1248__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1249{
1250	struct rb_node **node = &st->rb.rb_node;
1251	struct rb_node *parent = NULL;
1252	struct cfq_group *__cfqg;
1253	s64 key = cfqg_key(st, cfqg);
1254	int left = 1;
1255
1256	while (*node != NULL) {
1257		parent = *node;
1258		__cfqg = rb_entry_cfqg(parent);
1259
1260		if (key < cfqg_key(st, __cfqg))
1261			node = &parent->rb_left;
1262		else {
1263			node = &parent->rb_right;
1264			left = 0;
1265		}
1266	}
1267
1268	if (left)
1269		st->left = &cfqg->rb_node;
1270
1271	rb_link_node(&cfqg->rb_node, parent, node);
1272	rb_insert_color(&cfqg->rb_node, &st->rb);
1273}
1274
 
 
 
1275static void
1276cfq_update_group_weight(struct cfq_group *cfqg)
1277{
1278	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1279
1280	if (cfqg->new_weight) {
1281		cfqg->weight = cfqg->new_weight;
1282		cfqg->new_weight = 0;
1283	}
 
 
 
 
 
 
1284
1285	if (cfqg->new_leaf_weight) {
1286		cfqg->leaf_weight = cfqg->new_leaf_weight;
1287		cfqg->new_leaf_weight = 0;
1288	}
1289}
1290
1291static void
1292cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1293{
1294	unsigned int vfr = 1 << CFQ_SERVICE_SHIFT;	/* start with 1 */
1295	struct cfq_group *pos = cfqg;
1296	struct cfq_group *parent;
1297	bool propagate;
1298
1299	/* add to the service tree */
1300	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1301
1302	cfq_update_group_weight(cfqg);
 
 
 
 
 
1303	__cfq_group_service_tree_add(st, cfqg);
1304
1305	/*
1306	 * Activate @cfqg and calculate the portion of vfraction @cfqg is
1307	 * entitled to.  vfraction is calculated by walking the tree
1308	 * towards the root calculating the fraction it has at each level.
1309	 * The compounded ratio is how much vfraction @cfqg owns.
1310	 *
1311	 * Start with the proportion tasks in this cfqg has against active
1312	 * children cfqgs - its leaf_weight against children_weight.
1313	 */
1314	propagate = !pos->nr_active++;
1315	pos->children_weight += pos->leaf_weight;
1316	vfr = vfr * pos->leaf_weight / pos->children_weight;
1317
1318	/*
1319	 * Compound ->weight walking up the tree.  Both activation and
1320	 * vfraction calculation are done in the same loop.  Propagation
1321	 * stops once an already activated node is met.  vfraction
1322	 * calculation should always continue to the root.
1323	 */
1324	while ((parent = cfqg_parent(pos))) {
1325		if (propagate) {
 
1326			propagate = !parent->nr_active++;
1327			parent->children_weight += pos->weight;
1328		}
1329		vfr = vfr * pos->weight / parent->children_weight;
1330		pos = parent;
1331	}
1332
1333	cfqg->vfraction = max_t(unsigned, vfr, 1);
1334}
1335
1336static void
1337cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1338{
1339	struct cfq_rb_root *st = &cfqd->grp_service_tree;
1340	struct cfq_group *__cfqg;
1341	struct rb_node *n;
1342
1343	cfqg->nr_cfqq++;
1344	if (!RB_EMPTY_NODE(&cfqg->rb_node))
1345		return;
1346
1347	/*
1348	 * Currently put the group at the end. Later implement something
1349	 * so that groups get lesser vtime based on their weights, so that
1350	 * if group does not loose all if it was not continuously backlogged.
1351	 */
1352	n = rb_last(&st->rb);
1353	if (n) {
1354		__cfqg = rb_entry_cfqg(n);
1355		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1356	} else
1357		cfqg->vdisktime = st->min_vdisktime;
1358	cfq_group_service_tree_add(st, cfqg);
1359}
1360
1361static void
1362cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1363{
1364	struct cfq_group *pos = cfqg;
1365	bool propagate;
1366
1367	/*
1368	 * Undo activation from cfq_group_service_tree_add().  Deactivate
1369	 * @cfqg and propagate deactivation upwards.
1370	 */
1371	propagate = !--pos->nr_active;
1372	pos->children_weight -= pos->leaf_weight;
1373
1374	while (propagate) {
1375		struct cfq_group *parent = cfqg_parent(pos);
1376
1377		/* @pos has 0 nr_active at this point */
1378		WARN_ON_ONCE(pos->children_weight);
1379		pos->vfraction = 0;
1380
1381		if (!parent)
1382			break;
1383
1384		propagate = !--parent->nr_active;
1385		parent->children_weight -= pos->weight;
1386		pos = parent;
1387	}
1388
1389	/* remove from the service tree */
1390	if (!RB_EMPTY_NODE(&cfqg->rb_node))
1391		cfq_rb_erase(&cfqg->rb_node, st);
1392}
1393
1394static void
1395cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1396{
1397	struct cfq_rb_root *st = &cfqd->grp_service_tree;
1398
1399	BUG_ON(cfqg->nr_cfqq < 1);
1400	cfqg->nr_cfqq--;
1401
1402	/* If there are other cfq queues under this group, don't delete it */
1403	if (cfqg->nr_cfqq)
1404		return;
1405
1406	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1407	cfq_group_service_tree_del(st, cfqg);
1408	cfqg->saved_wl_slice = 0;
1409	cfqg_stats_update_dequeue(cfqg);
1410}
1411
1412static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1413						unsigned int *unaccounted_time)
1414{
1415	unsigned int slice_used;
1416
1417	/*
1418	 * Queue got expired before even a single request completed or
1419	 * got expired immediately after first request completion.
1420	 */
1421	if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1422		/*
1423		 * Also charge the seek time incurred to the group, otherwise
1424		 * if there are mutiple queues in the group, each can dispatch
1425		 * a single request on seeky media and cause lots of seek time
1426		 * and group will never know it.
1427		 */
1428		slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1429					1);
1430	} else {
1431		slice_used = jiffies - cfqq->slice_start;
1432		if (slice_used > cfqq->allocated_slice) {
1433			*unaccounted_time = slice_used - cfqq->allocated_slice;
1434			slice_used = cfqq->allocated_slice;
1435		}
1436		if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1437			*unaccounted_time += cfqq->slice_start -
1438					cfqq->dispatch_start;
1439	}
1440
1441	return slice_used;
1442}
1443
1444static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1445				struct cfq_queue *cfqq)
1446{
1447	struct cfq_rb_root *st = &cfqd->grp_service_tree;
1448	unsigned int used_sl, charge, unaccounted_sl = 0;
1449	int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1450			- cfqg->service_tree_idle.count;
1451	unsigned int vfr;
1452
1453	BUG_ON(nr_sync < 0);
1454	used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1455
1456	if (iops_mode(cfqd))
1457		charge = cfqq->slice_dispatch;
1458	else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1459		charge = cfqq->allocated_slice;
1460
1461	/*
1462	 * Can't update vdisktime while on service tree and cfqg->vfraction
1463	 * is valid only while on it.  Cache vfr, leave the service tree,
1464	 * update vdisktime and go back on.  The re-addition to the tree
1465	 * will also update the weights as necessary.
1466	 */
1467	vfr = cfqg->vfraction;
1468	cfq_group_service_tree_del(st, cfqg);
1469	cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
1470	cfq_group_service_tree_add(st, cfqg);
1471
1472	/* This group is being expired. Save the context */
1473	if (time_after(cfqd->workload_expires, jiffies)) {
1474		cfqg->saved_wl_slice = cfqd->workload_expires
1475						- jiffies;
1476		cfqg->saved_wl_type = cfqd->serving_wl_type;
1477		cfqg->saved_wl_class = cfqd->serving_wl_class;
1478	} else
1479		cfqg->saved_wl_slice = 0;
1480
1481	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1482					st->min_vdisktime);
1483	cfq_log_cfqq(cfqq->cfqd, cfqq,
1484		     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1485		     used_sl, cfqq->slice_dispatch, charge,
1486		     iops_mode(cfqd), cfqq->nr_sectors);
1487	cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1488	cfqg_stats_set_start_empty_time(cfqg);
1489}
1490
1491/**
1492 * cfq_init_cfqg_base - initialize base part of a cfq_group
1493 * @cfqg: cfq_group to initialize
1494 *
1495 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1496 * is enabled or not.
1497 */
1498static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1499{
1500	struct cfq_rb_root *st;
1501	int i, j;
1502
1503	for_each_cfqg_st(cfqg, i, j, st)
1504		*st = CFQ_RB_ROOT;
1505	RB_CLEAR_NODE(&cfqg->rb_node);
1506
1507	cfqg->ttime.last_end_request = jiffies;
1508}
1509
1510#ifdef CONFIG_CFQ_GROUP_IOSCHED
1511static void cfqg_stats_init(struct cfqg_stats *stats)
 
 
 
1512{
1513	blkg_rwstat_init(&stats->service_bytes);
1514	blkg_rwstat_init(&stats->serviced);
1515	blkg_rwstat_init(&stats->merged);
1516	blkg_rwstat_init(&stats->service_time);
1517	blkg_rwstat_init(&stats->wait_time);
1518	blkg_rwstat_init(&stats->queued);
 
 
 
 
 
 
 
 
 
1519
1520	blkg_stat_init(&stats->sectors);
1521	blkg_stat_init(&stats->time);
 
 
 
 
 
 
1522
1523#ifdef CONFIG_DEBUG_BLK_CGROUP
1524	blkg_stat_init(&stats->unaccounted_time);
1525	blkg_stat_init(&stats->avg_queue_size_sum);
1526	blkg_stat_init(&stats->avg_queue_size_samples);
1527	blkg_stat_init(&stats->dequeue);
1528	blkg_stat_init(&stats->group_wait_time);
1529	blkg_stat_init(&stats->idle_time);
1530	blkg_stat_init(&stats->empty_time);
 
1531#endif
 
 
 
 
1532}
1533
1534static void cfq_pd_init(struct blkcg_gq *blkg)
1535{
1536	struct cfq_group *cfqg = blkg_to_cfqg(blkg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1537
1538	cfq_init_cfqg_base(cfqg);
1539	cfqg->weight = blkg->blkcg->cfq_weight;
1540	cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
1541	cfqg_stats_init(&cfqg->stats);
1542	cfqg_stats_init(&cfqg->dead_stats);
 
 
1543}
1544
1545static void cfq_pd_offline(struct blkcg_gq *blkg)
1546{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1547	/*
1548	 * @blkg is going offline and will be ignored by
1549	 * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
1550	 * that they don't get lost.  If IOs complete after this point, the
1551	 * stats for them will be lost.  Oh well...
1552	 */
1553	cfqg_stats_xfer_dead(blkg_to_cfqg(blkg));
1554}
1555
1556/* offset delta from cfqg->stats to cfqg->dead_stats */
1557static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
1558					offsetof(struct cfq_group, stats);
1559
1560/* to be used by recursive prfill, sums live and dead stats recursively */
1561static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
1562{
1563	u64 sum = 0;
1564
1565	sum += blkg_stat_recursive_sum(pd, off);
1566	sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
1567	return sum;
1568}
1569
1570/* to be used by recursive prfill, sums live and dead rwstats recursively */
1571static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
1572						       int off)
1573{
1574	struct blkg_rwstat a, b;
1575
1576	a = blkg_rwstat_recursive_sum(pd, off);
1577	b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
1578	blkg_rwstat_merge(&a, &b);
1579	return a;
1580}
1581
1582static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
1583{
1584	struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1585
1586	cfqg_stats_reset(&cfqg->stats);
1587	cfqg_stats_reset(&cfqg->dead_stats);
1588}
1589
1590/*
1591 * Search for the cfq group current task belongs to. request_queue lock must
1592 * be held.
1593 */
1594static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1595						struct blkcg *blkcg)
1596{
1597	struct request_queue *q = cfqd->queue;
1598	struct cfq_group *cfqg = NULL;
1599
1600	/* avoid lookup for the common case where there's no blkcg */
1601	if (blkcg == &blkcg_root) {
1602		cfqg = cfqd->root_group;
1603	} else {
1604		struct blkcg_gq *blkg;
1605
1606		blkg = blkg_lookup_create(blkcg, q);
1607		if (!IS_ERR(blkg))
1608			cfqg = blkg_to_cfqg(blkg);
1609	}
1610
1611	return cfqg;
 
 
 
1612}
1613
1614static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1615{
1616	/* Currently, all async queues are mapped to root group */
1617	if (!cfq_cfqq_sync(cfqq))
1618		cfqg = cfqq->cfqd->root_group;
1619
1620	cfqq->cfqg = cfqg;
1621	/* cfqq reference on cfqg */
1622	cfqg_get(cfqg);
1623}
1624
1625static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1626				     struct blkg_policy_data *pd, int off)
1627{
1628	struct cfq_group *cfqg = pd_to_cfqg(pd);
1629
1630	if (!cfqg->dev_weight)
1631		return 0;
1632	return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1633}
1634
1635static int cfqg_print_weight_device(struct seq_file *sf, void *v)
1636{
1637	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1638			  cfqg_prfill_weight_device, &blkcg_policy_cfq,
1639			  0, false);
1640	return 0;
1641}
1642
1643static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1644					  struct blkg_policy_data *pd, int off)
1645{
1646	struct cfq_group *cfqg = pd_to_cfqg(pd);
1647
1648	if (!cfqg->dev_leaf_weight)
1649		return 0;
1650	return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1651}
1652
1653static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
1654{
1655	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1656			  cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1657			  0, false);
1658	return 0;
1659}
1660
1661static int cfq_print_weight(struct seq_file *sf, void *v)
1662{
1663	seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_weight);
 
 
 
 
 
 
 
1664	return 0;
1665}
1666
1667static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
1668{
1669	seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_leaf_weight);
 
 
 
 
 
 
 
1670	return 0;
1671}
1672
1673static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
1674				    struct cftype *cft, const char *buf,
1675				    bool is_leaf_weight)
1676{
1677	struct blkcg *blkcg = css_to_blkcg(css);
 
 
1678	struct blkg_conf_ctx ctx;
1679	struct cfq_group *cfqg;
 
1680	int ret;
 
1681
1682	ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1683	if (ret)
1684		return ret;
1685
1686	ret = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
1687	cfqg = blkg_to_cfqg(ctx.blkg);
1688	if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
 
 
 
1689		if (!is_leaf_weight) {
1690			cfqg->dev_weight = ctx.v;
1691			cfqg->new_weight = ctx.v ?: blkcg->cfq_weight;
1692		} else {
1693			cfqg->dev_leaf_weight = ctx.v;
1694			cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight;
1695		}
1696		ret = 0;
1697	}
1698
1699	blkg_conf_finish(&ctx);
1700	return ret;
1701}
1702
1703static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
1704				  struct cftype *cft, char *buf)
1705{
1706	return __cfqg_set_weight_device(css, cft, buf, false);
1707}
1708
1709static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
1710				       struct cftype *cft, char *buf)
1711{
1712	return __cfqg_set_weight_device(css, cft, buf, true);
1713}
1714
1715static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1716			    u64 val, bool is_leaf_weight)
1717{
 
 
1718	struct blkcg *blkcg = css_to_blkcg(css);
1719	struct blkcg_gq *blkg;
 
 
1720
1721	if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
1722		return -EINVAL;
1723
1724	spin_lock_irq(&blkcg->lock);
 
 
 
 
 
1725
1726	if (!is_leaf_weight)
1727		blkcg->cfq_weight = val;
1728	else
1729		blkcg->cfq_leaf_weight = val;
1730
1731	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1732		struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1733
1734		if (!cfqg)
1735			continue;
1736
1737		if (!is_leaf_weight) {
 
 
1738			if (!cfqg->dev_weight)
1739				cfqg->new_weight = blkcg->cfq_weight;
1740		} else {
 
 
1741			if (!cfqg->dev_leaf_weight)
1742				cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
1743		}
1744	}
1745
 
1746	spin_unlock_irq(&blkcg->lock);
1747	return 0;
1748}
1749
1750static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1751			  u64 val)
1752{
1753	return __cfq_set_weight(css, cft, val, false);
1754}
1755
1756static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1757			       struct cftype *cft, u64 val)
1758{
1759	return __cfq_set_weight(css, cft, val, true);
1760}
1761
1762static int cfqg_print_stat(struct seq_file *sf, void *v)
1763{
1764	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1765			  &blkcg_policy_cfq, seq_cft(sf)->private, false);
1766	return 0;
1767}
1768
1769static int cfqg_print_rwstat(struct seq_file *sf, void *v)
1770{
1771	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1772			  &blkcg_policy_cfq, seq_cft(sf)->private, true);
1773	return 0;
1774}
1775
1776static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1777				      struct blkg_policy_data *pd, int off)
1778{
1779	u64 sum = cfqg_stat_pd_recursive_sum(pd, off);
1780
1781	return __blkg_prfill_u64(sf, pd, sum);
1782}
1783
1784static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1785					struct blkg_policy_data *pd, int off)
1786{
1787	struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off);
1788
1789	return __blkg_prfill_rwstat(sf, pd, &sum);
1790}
1791
1792static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
1793{
1794	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1795			  cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1796			  seq_cft(sf)->private, false);
1797	return 0;
1798}
1799
1800static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1801{
1802	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1803			  cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1804			  seq_cft(sf)->private, true);
1805	return 0;
1806}
1807
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1808#ifdef CONFIG_DEBUG_BLK_CGROUP
1809static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1810				      struct blkg_policy_data *pd, int off)
1811{
1812	struct cfq_group *cfqg = pd_to_cfqg(pd);
1813	u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1814	u64 v = 0;
1815
1816	if (samples) {
1817		v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1818		v = div64_u64(v, samples);
1819	}
1820	__blkg_prfill_u64(sf, pd, v);
1821	return 0;
1822}
1823
1824/* print avg_queue_size */
1825static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1826{
1827	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1828			  cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
1829			  0, false);
1830	return 0;
1831}
1832#endif	/* CONFIG_DEBUG_BLK_CGROUP */
1833
1834static struct cftype cfq_blkcg_files[] = {
1835	/* on root, weight is mapped to leaf_weight */
1836	{
1837		.name = "weight_device",
1838		.flags = CFTYPE_ONLY_ON_ROOT,
1839		.seq_show = cfqg_print_leaf_weight_device,
1840		.write_string = cfqg_set_leaf_weight_device,
1841	},
1842	{
1843		.name = "weight",
1844		.flags = CFTYPE_ONLY_ON_ROOT,
1845		.seq_show = cfq_print_leaf_weight,
1846		.write_u64 = cfq_set_leaf_weight,
1847	},
1848
1849	/* no such mapping necessary for !roots */
1850	{
1851		.name = "weight_device",
1852		.flags = CFTYPE_NOT_ON_ROOT,
1853		.seq_show = cfqg_print_weight_device,
1854		.write_string = cfqg_set_weight_device,
1855	},
1856	{
1857		.name = "weight",
1858		.flags = CFTYPE_NOT_ON_ROOT,
1859		.seq_show = cfq_print_weight,
1860		.write_u64 = cfq_set_weight,
1861	},
1862
1863	{
1864		.name = "leaf_weight_device",
1865		.seq_show = cfqg_print_leaf_weight_device,
1866		.write_string = cfqg_set_leaf_weight_device,
1867	},
1868	{
1869		.name = "leaf_weight",
1870		.seq_show = cfq_print_leaf_weight,
1871		.write_u64 = cfq_set_leaf_weight,
1872	},
1873
1874	/* statistics, covers only the tasks in the cfqg */
1875	{
1876		.name = "time",
1877		.private = offsetof(struct cfq_group, stats.time),
1878		.seq_show = cfqg_print_stat,
1879	},
1880	{
1881		.name = "sectors",
1882		.private = offsetof(struct cfq_group, stats.sectors),
1883		.seq_show = cfqg_print_stat,
1884	},
1885	{
1886		.name = "io_service_bytes",
1887		.private = offsetof(struct cfq_group, stats.service_bytes),
1888		.seq_show = cfqg_print_rwstat,
1889	},
1890	{
1891		.name = "io_serviced",
1892		.private = offsetof(struct cfq_group, stats.serviced),
1893		.seq_show = cfqg_print_rwstat,
1894	},
1895	{
1896		.name = "io_service_time",
1897		.private = offsetof(struct cfq_group, stats.service_time),
1898		.seq_show = cfqg_print_rwstat,
1899	},
1900	{
1901		.name = "io_wait_time",
1902		.private = offsetof(struct cfq_group, stats.wait_time),
1903		.seq_show = cfqg_print_rwstat,
1904	},
1905	{
1906		.name = "io_merged",
1907		.private = offsetof(struct cfq_group, stats.merged),
1908		.seq_show = cfqg_print_rwstat,
1909	},
1910	{
1911		.name = "io_queued",
1912		.private = offsetof(struct cfq_group, stats.queued),
1913		.seq_show = cfqg_print_rwstat,
1914	},
1915
1916	/* the same statictics which cover the cfqg and its descendants */
1917	{
1918		.name = "time_recursive",
1919		.private = offsetof(struct cfq_group, stats.time),
1920		.seq_show = cfqg_print_stat_recursive,
1921	},
1922	{
1923		.name = "sectors_recursive",
1924		.private = offsetof(struct cfq_group, stats.sectors),
1925		.seq_show = cfqg_print_stat_recursive,
1926	},
1927	{
1928		.name = "io_service_bytes_recursive",
1929		.private = offsetof(struct cfq_group, stats.service_bytes),
1930		.seq_show = cfqg_print_rwstat_recursive,
1931	},
1932	{
1933		.name = "io_serviced_recursive",
1934		.private = offsetof(struct cfq_group, stats.serviced),
1935		.seq_show = cfqg_print_rwstat_recursive,
1936	},
1937	{
1938		.name = "io_service_time_recursive",
1939		.private = offsetof(struct cfq_group, stats.service_time),
1940		.seq_show = cfqg_print_rwstat_recursive,
1941	},
1942	{
1943		.name = "io_wait_time_recursive",
1944		.private = offsetof(struct cfq_group, stats.wait_time),
1945		.seq_show = cfqg_print_rwstat_recursive,
1946	},
1947	{
1948		.name = "io_merged_recursive",
1949		.private = offsetof(struct cfq_group, stats.merged),
1950		.seq_show = cfqg_print_rwstat_recursive,
1951	},
1952	{
1953		.name = "io_queued_recursive",
1954		.private = offsetof(struct cfq_group, stats.queued),
1955		.seq_show = cfqg_print_rwstat_recursive,
1956	},
1957#ifdef CONFIG_DEBUG_BLK_CGROUP
1958	{
1959		.name = "avg_queue_size",
1960		.seq_show = cfqg_print_avg_queue_size,
1961	},
1962	{
1963		.name = "group_wait_time",
1964		.private = offsetof(struct cfq_group, stats.group_wait_time),
1965		.seq_show = cfqg_print_stat,
1966	},
1967	{
1968		.name = "idle_time",
1969		.private = offsetof(struct cfq_group, stats.idle_time),
1970		.seq_show = cfqg_print_stat,
1971	},
1972	{
1973		.name = "empty_time",
1974		.private = offsetof(struct cfq_group, stats.empty_time),
1975		.seq_show = cfqg_print_stat,
1976	},
1977	{
1978		.name = "dequeue",
1979		.private = offsetof(struct cfq_group, stats.dequeue),
1980		.seq_show = cfqg_print_stat,
1981	},
1982	{
1983		.name = "unaccounted_time",
1984		.private = offsetof(struct cfq_group, stats.unaccounted_time),
1985		.seq_show = cfqg_print_stat,
1986	},
1987#endif	/* CONFIG_DEBUG_BLK_CGROUP */
1988	{ }	/* terminate */
1989};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1990#else /* GROUP_IOSCHED */
1991static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1992						struct blkcg *blkcg)
1993{
1994	return cfqd->root_group;
1995}
1996
1997static inline void
1998cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1999	cfqq->cfqg = cfqg;
2000}
2001
2002#endif /* GROUP_IOSCHED */
2003
2004/*
2005 * The cfqd->service_trees holds all pending cfq_queue's that have
2006 * requests waiting to be processed. It is sorted in the order that
2007 * we will service the queues.
2008 */
2009static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2010				 bool add_front)
2011{
2012	struct rb_node **p, *parent;
2013	struct cfq_queue *__cfqq;
2014	unsigned long rb_key;
2015	struct cfq_rb_root *st;
2016	int left;
2017	int new_cfqq = 1;
2018
2019	st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
2020	if (cfq_class_idle(cfqq)) {
2021		rb_key = CFQ_IDLE_DELAY;
2022		parent = rb_last(&st->rb);
2023		if (parent && parent != &cfqq->rb_node) {
2024			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2025			rb_key += __cfqq->rb_key;
2026		} else
2027			rb_key += jiffies;
2028	} else if (!add_front) {
2029		/*
2030		 * Get our rb key offset. Subtract any residual slice
2031		 * value carried from last service. A negative resid
2032		 * count indicates slice overrun, and this should position
2033		 * the next service time further away in the tree.
2034		 */
2035		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
2036		rb_key -= cfqq->slice_resid;
2037		cfqq->slice_resid = 0;
2038	} else {
2039		rb_key = -HZ;
2040		__cfqq = cfq_rb_first(st);
2041		rb_key += __cfqq ? __cfqq->rb_key : jiffies;
2042	}
2043
2044	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2045		new_cfqq = 0;
2046		/*
2047		 * same position, nothing more to do
2048		 */
2049		if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
2050			return;
2051
2052		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2053		cfqq->service_tree = NULL;
2054	}
2055
2056	left = 1;
2057	parent = NULL;
2058	cfqq->service_tree = st;
2059	p = &st->rb.rb_node;
2060	while (*p) {
2061		parent = *p;
2062		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2063
2064		/*
2065		 * sort by key, that represents service time.
2066		 */
2067		if (time_before(rb_key, __cfqq->rb_key))
2068			p = &parent->rb_left;
2069		else {
2070			p = &parent->rb_right;
2071			left = 0;
2072		}
2073	}
2074
2075	if (left)
2076		st->left = &cfqq->rb_node;
2077
2078	cfqq->rb_key = rb_key;
2079	rb_link_node(&cfqq->rb_node, parent, p);
2080	rb_insert_color(&cfqq->rb_node, &st->rb);
2081	st->count++;
2082	if (add_front || !new_cfqq)
2083		return;
2084	cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
2085}
2086
2087static struct cfq_queue *
2088cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2089		     sector_t sector, struct rb_node **ret_parent,
2090		     struct rb_node ***rb_link)
2091{
2092	struct rb_node **p, *parent;
2093	struct cfq_queue *cfqq = NULL;
2094
2095	parent = NULL;
2096	p = &root->rb_node;
2097	while (*p) {
2098		struct rb_node **n;
2099
2100		parent = *p;
2101		cfqq = rb_entry(parent, struct cfq_queue, p_node);
2102
2103		/*
2104		 * Sort strictly based on sector.  Smallest to the left,
2105		 * largest to the right.
2106		 */
2107		if (sector > blk_rq_pos(cfqq->next_rq))
2108			n = &(*p)->rb_right;
2109		else if (sector < blk_rq_pos(cfqq->next_rq))
2110			n = &(*p)->rb_left;
2111		else
2112			break;
2113		p = n;
2114		cfqq = NULL;
2115	}
2116
2117	*ret_parent = parent;
2118	if (rb_link)
2119		*rb_link = p;
2120	return cfqq;
2121}
2122
2123static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2124{
2125	struct rb_node **p, *parent;
2126	struct cfq_queue *__cfqq;
2127
2128	if (cfqq->p_root) {
2129		rb_erase(&cfqq->p_node, cfqq->p_root);
2130		cfqq->p_root = NULL;
2131	}
2132
2133	if (cfq_class_idle(cfqq))
2134		return;
2135	if (!cfqq->next_rq)
2136		return;
2137
2138	cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2139	__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2140				      blk_rq_pos(cfqq->next_rq), &parent, &p);
2141	if (!__cfqq) {
2142		rb_link_node(&cfqq->p_node, parent, p);
2143		rb_insert_color(&cfqq->p_node, cfqq->p_root);
2144	} else
2145		cfqq->p_root = NULL;
2146}
2147
2148/*
2149 * Update cfqq's position in the service tree.
2150 */
2151static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2152{
2153	/*
2154	 * Resorting requires the cfqq to be on the RR list already.
2155	 */
2156	if (cfq_cfqq_on_rr(cfqq)) {
2157		cfq_service_tree_add(cfqd, cfqq, 0);
2158		cfq_prio_tree_add(cfqd, cfqq);
2159	}
2160}
2161
2162/*
2163 * add to busy list of queues for service, trying to be fair in ordering
2164 * the pending list according to last request service
2165 */
2166static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2167{
2168	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
2169	BUG_ON(cfq_cfqq_on_rr(cfqq));
2170	cfq_mark_cfqq_on_rr(cfqq);
2171	cfqd->busy_queues++;
2172	if (cfq_cfqq_sync(cfqq))
2173		cfqd->busy_sync_queues++;
2174
2175	cfq_resort_rr_list(cfqd, cfqq);
2176}
2177
2178/*
2179 * Called when the cfqq no longer has requests pending, remove it from
2180 * the service tree.
2181 */
2182static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2183{
2184	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
2185	BUG_ON(!cfq_cfqq_on_rr(cfqq));
2186	cfq_clear_cfqq_on_rr(cfqq);
2187
2188	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2189		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2190		cfqq->service_tree = NULL;
2191	}
2192	if (cfqq->p_root) {
2193		rb_erase(&cfqq->p_node, cfqq->p_root);
2194		cfqq->p_root = NULL;
2195	}
2196
2197	cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
2198	BUG_ON(!cfqd->busy_queues);
2199	cfqd->busy_queues--;
2200	if (cfq_cfqq_sync(cfqq))
2201		cfqd->busy_sync_queues--;
2202}
2203
2204/*
2205 * rb tree support functions
2206 */
2207static void cfq_del_rq_rb(struct request *rq)
2208{
2209	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2210	const int sync = rq_is_sync(rq);
2211
2212	BUG_ON(!cfqq->queued[sync]);
2213	cfqq->queued[sync]--;
2214
2215	elv_rb_del(&cfqq->sort_list, rq);
2216
2217	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2218		/*
2219		 * Queue will be deleted from service tree when we actually
2220		 * expire it later. Right now just remove it from prio tree
2221		 * as it is empty.
2222		 */
2223		if (cfqq->p_root) {
2224			rb_erase(&cfqq->p_node, cfqq->p_root);
2225			cfqq->p_root = NULL;
2226		}
2227	}
2228}
2229
2230static void cfq_add_rq_rb(struct request *rq)
2231{
2232	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2233	struct cfq_data *cfqd = cfqq->cfqd;
2234	struct request *prev;
2235
2236	cfqq->queued[rq_is_sync(rq)]++;
2237
2238	elv_rb_add(&cfqq->sort_list, rq);
2239
2240	if (!cfq_cfqq_on_rr(cfqq))
2241		cfq_add_cfqq_rr(cfqd, cfqq);
2242
2243	/*
2244	 * check if this request is a better next-serve candidate
2245	 */
2246	prev = cfqq->next_rq;
2247	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
2248
2249	/*
2250	 * adjust priority tree position, if ->next_rq changes
2251	 */
2252	if (prev != cfqq->next_rq)
2253		cfq_prio_tree_add(cfqd, cfqq);
2254
2255	BUG_ON(!cfqq->next_rq);
2256}
2257
2258static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
2259{
2260	elv_rb_del(&cfqq->sort_list, rq);
2261	cfqq->queued[rq_is_sync(rq)]--;
2262	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2263	cfq_add_rq_rb(rq);
2264	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2265				 rq->cmd_flags);
2266}
2267
2268static struct request *
2269cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
2270{
2271	struct task_struct *tsk = current;
2272	struct cfq_io_cq *cic;
2273	struct cfq_queue *cfqq;
2274
2275	cic = cfq_cic_lookup(cfqd, tsk->io_context);
2276	if (!cic)
2277		return NULL;
2278
2279	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2280	if (cfqq)
2281		return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
2282
2283	return NULL;
2284}
2285
2286static void cfq_activate_request(struct request_queue *q, struct request *rq)
2287{
2288	struct cfq_data *cfqd = q->elevator->elevator_data;
2289
2290	cfqd->rq_in_driver++;
2291	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
2292						cfqd->rq_in_driver);
2293
2294	cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2295}
2296
2297static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
2298{
2299	struct cfq_data *cfqd = q->elevator->elevator_data;
2300
2301	WARN_ON(!cfqd->rq_in_driver);
2302	cfqd->rq_in_driver--;
2303	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
2304						cfqd->rq_in_driver);
2305}
2306
2307static void cfq_remove_request(struct request *rq)
2308{
2309	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2310
2311	if (cfqq->next_rq == rq)
2312		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
2313
2314	list_del_init(&rq->queuelist);
2315	cfq_del_rq_rb(rq);
2316
2317	cfqq->cfqd->rq_queued--;
2318	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2319	if (rq->cmd_flags & REQ_PRIO) {
2320		WARN_ON(!cfqq->prio_pending);
2321		cfqq->prio_pending--;
2322	}
2323}
2324
2325static int cfq_merge(struct request_queue *q, struct request **req,
2326		     struct bio *bio)
2327{
2328	struct cfq_data *cfqd = q->elevator->elevator_data;
2329	struct request *__rq;
2330
2331	__rq = cfq_find_rq_fmerge(cfqd, bio);
2332	if (__rq && elv_rq_merge_ok(__rq, bio)) {
2333		*req = __rq;
2334		return ELEVATOR_FRONT_MERGE;
2335	}
2336
2337	return ELEVATOR_NO_MERGE;
2338}
2339
2340static void cfq_merged_request(struct request_queue *q, struct request *req,
2341			       int type)
2342{
2343	if (type == ELEVATOR_FRONT_MERGE) {
2344		struct cfq_queue *cfqq = RQ_CFQQ(req);
2345
2346		cfq_reposition_rq_rb(cfqq, req);
2347	}
2348}
2349
2350static void cfq_bio_merged(struct request_queue *q, struct request *req,
2351				struct bio *bio)
2352{
2353	cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
2354}
2355
2356static void
2357cfq_merged_requests(struct request_queue *q, struct request *rq,
2358		    struct request *next)
2359{
2360	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2361	struct cfq_data *cfqd = q->elevator->elevator_data;
2362
2363	/*
2364	 * reposition in fifo if next is older than rq
2365	 */
2366	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2367	    time_before(next->fifo_time, rq->fifo_time) &&
2368	    cfqq == RQ_CFQQ(next)) {
2369		list_move(&rq->queuelist, &next->queuelist);
2370		rq->fifo_time = next->fifo_time;
2371	}
2372
2373	if (cfqq->next_rq == next)
2374		cfqq->next_rq = rq;
2375	cfq_remove_request(next);
2376	cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
2377
2378	cfqq = RQ_CFQQ(next);
2379	/*
2380	 * all requests of this queue are merged to other queues, delete it
2381	 * from the service tree. If it's the active_queue,
2382	 * cfq_dispatch_requests() will choose to expire it or do idle
2383	 */
2384	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2385	    cfqq != cfqd->active_queue)
2386		cfq_del_cfqq_rr(cfqd, cfqq);
2387}
2388
2389static int cfq_allow_merge(struct request_queue *q, struct request *rq,
2390			   struct bio *bio)
2391{
2392	struct cfq_data *cfqd = q->elevator->elevator_data;
2393	struct cfq_io_cq *cic;
2394	struct cfq_queue *cfqq;
2395
2396	/*
2397	 * Disallow merge of a sync bio into an async request.
2398	 */
2399	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2400		return false;
2401
2402	/*
2403	 * Lookup the cfqq that this bio will be queued with and allow
2404	 * merge only if rq is queued there.
2405	 */
2406	cic = cfq_cic_lookup(cfqd, current->io_context);
2407	if (!cic)
2408		return false;
2409
2410	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2411	return cfqq == RQ_CFQQ(rq);
2412}
2413
2414static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2415{
2416	del_timer(&cfqd->idle_slice_timer);
2417	cfqg_stats_update_idle_time(cfqq->cfqg);
2418}
2419
2420static void __cfq_set_active_queue(struct cfq_data *cfqd,
2421				   struct cfq_queue *cfqq)
2422{
2423	if (cfqq) {
2424		cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2425				cfqd->serving_wl_class, cfqd->serving_wl_type);
2426		cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2427		cfqq->slice_start = 0;
2428		cfqq->dispatch_start = jiffies;
2429		cfqq->allocated_slice = 0;
2430		cfqq->slice_end = 0;
2431		cfqq->slice_dispatch = 0;
2432		cfqq->nr_sectors = 0;
2433
2434		cfq_clear_cfqq_wait_request(cfqq);
2435		cfq_clear_cfqq_must_dispatch(cfqq);
2436		cfq_clear_cfqq_must_alloc_slice(cfqq);
2437		cfq_clear_cfqq_fifo_expire(cfqq);
2438		cfq_mark_cfqq_slice_new(cfqq);
2439
2440		cfq_del_timer(cfqd, cfqq);
2441	}
2442
2443	cfqd->active_queue = cfqq;
2444}
2445
2446/*
2447 * current cfqq expired its slice (or was too idle), select new one
2448 */
2449static void
2450__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2451		    bool timed_out)
2452{
2453	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2454
2455	if (cfq_cfqq_wait_request(cfqq))
2456		cfq_del_timer(cfqd, cfqq);
2457
2458	cfq_clear_cfqq_wait_request(cfqq);
2459	cfq_clear_cfqq_wait_busy(cfqq);
2460
2461	/*
2462	 * If this cfqq is shared between multiple processes, check to
2463	 * make sure that those processes are still issuing I/Os within
2464	 * the mean seek distance.  If not, it may be time to break the
2465	 * queues apart again.
2466	 */
2467	if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2468		cfq_mark_cfqq_split_coop(cfqq);
2469
2470	/*
2471	 * store what was left of this slice, if the queue idled/timed out
2472	 */
2473	if (timed_out) {
2474		if (cfq_cfqq_slice_new(cfqq))
2475			cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2476		else
2477			cfqq->slice_resid = cfqq->slice_end - jiffies;
2478		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2479	}
2480
2481	cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2482
2483	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2484		cfq_del_cfqq_rr(cfqd, cfqq);
2485
2486	cfq_resort_rr_list(cfqd, cfqq);
2487
2488	if (cfqq == cfqd->active_queue)
2489		cfqd->active_queue = NULL;
2490
2491	if (cfqd->active_cic) {
2492		put_io_context(cfqd->active_cic->icq.ioc);
2493		cfqd->active_cic = NULL;
2494	}
2495}
2496
2497static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2498{
2499	struct cfq_queue *cfqq = cfqd->active_queue;
2500
2501	if (cfqq)
2502		__cfq_slice_expired(cfqd, cfqq, timed_out);
2503}
2504
2505/*
2506 * Get next queue for service. Unless we have a queue preemption,
2507 * we'll simply select the first cfqq in the service tree.
2508 */
2509static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2510{
2511	struct cfq_rb_root *st = st_for(cfqd->serving_group,
2512			cfqd->serving_wl_class, cfqd->serving_wl_type);
2513
2514	if (!cfqd->rq_queued)
2515		return NULL;
2516
2517	/* There is nothing to dispatch */
2518	if (!st)
2519		return NULL;
2520	if (RB_EMPTY_ROOT(&st->rb))
2521		return NULL;
2522	return cfq_rb_first(st);
2523}
2524
2525static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2526{
2527	struct cfq_group *cfqg;
2528	struct cfq_queue *cfqq;
2529	int i, j;
2530	struct cfq_rb_root *st;
2531
2532	if (!cfqd->rq_queued)
2533		return NULL;
2534
2535	cfqg = cfq_get_next_cfqg(cfqd);
2536	if (!cfqg)
2537		return NULL;
2538
2539	for_each_cfqg_st(cfqg, i, j, st)
2540		if ((cfqq = cfq_rb_first(st)) != NULL)
2541			return cfqq;
2542	return NULL;
2543}
2544
2545/*
2546 * Get and set a new active queue for service.
2547 */
2548static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2549					      struct cfq_queue *cfqq)
2550{
2551	if (!cfqq)
2552		cfqq = cfq_get_next_queue(cfqd);
2553
2554	__cfq_set_active_queue(cfqd, cfqq);
2555	return cfqq;
2556}
2557
2558static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2559					  struct request *rq)
2560{
2561	if (blk_rq_pos(rq) >= cfqd->last_position)
2562		return blk_rq_pos(rq) - cfqd->last_position;
2563	else
2564		return cfqd->last_position - blk_rq_pos(rq);
2565}
2566
2567static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2568			       struct request *rq)
2569{
2570	return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2571}
2572
2573static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2574				    struct cfq_queue *cur_cfqq)
2575{
2576	struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2577	struct rb_node *parent, *node;
2578	struct cfq_queue *__cfqq;
2579	sector_t sector = cfqd->last_position;
2580
2581	if (RB_EMPTY_ROOT(root))
2582		return NULL;
2583
2584	/*
2585	 * First, if we find a request starting at the end of the last
2586	 * request, choose it.
2587	 */
2588	__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2589	if (__cfqq)
2590		return __cfqq;
2591
2592	/*
2593	 * If the exact sector wasn't found, the parent of the NULL leaf
2594	 * will contain the closest sector.
2595	 */
2596	__cfqq = rb_entry(parent, struct cfq_queue, p_node);
2597	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2598		return __cfqq;
2599
2600	if (blk_rq_pos(__cfqq->next_rq) < sector)
2601		node = rb_next(&__cfqq->p_node);
2602	else
2603		node = rb_prev(&__cfqq->p_node);
2604	if (!node)
2605		return NULL;
2606
2607	__cfqq = rb_entry(node, struct cfq_queue, p_node);
2608	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2609		return __cfqq;
2610
2611	return NULL;
2612}
2613
2614/*
2615 * cfqd - obvious
2616 * cur_cfqq - passed in so that we don't decide that the current queue is
2617 * 	      closely cooperating with itself.
2618 *
2619 * So, basically we're assuming that that cur_cfqq has dispatched at least
2620 * one request, and that cfqd->last_position reflects a position on the disk
2621 * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
2622 * assumption.
2623 */
2624static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2625					      struct cfq_queue *cur_cfqq)
2626{
2627	struct cfq_queue *cfqq;
2628
2629	if (cfq_class_idle(cur_cfqq))
2630		return NULL;
2631	if (!cfq_cfqq_sync(cur_cfqq))
2632		return NULL;
2633	if (CFQQ_SEEKY(cur_cfqq))
2634		return NULL;
2635
2636	/*
2637	 * Don't search priority tree if it's the only queue in the group.
2638	 */
2639	if (cur_cfqq->cfqg->nr_cfqq == 1)
2640		return NULL;
2641
2642	/*
2643	 * We should notice if some of the queues are cooperating, eg
2644	 * working closely on the same area of the disk. In that case,
2645	 * we can group them together and don't waste time idling.
2646	 */
2647	cfqq = cfqq_close(cfqd, cur_cfqq);
2648	if (!cfqq)
2649		return NULL;
2650
2651	/* If new queue belongs to different cfq_group, don't choose it */
2652	if (cur_cfqq->cfqg != cfqq->cfqg)
2653		return NULL;
2654
2655	/*
2656	 * It only makes sense to merge sync queues.
2657	 */
2658	if (!cfq_cfqq_sync(cfqq))
2659		return NULL;
2660	if (CFQQ_SEEKY(cfqq))
2661		return NULL;
2662
2663	/*
2664	 * Do not merge queues of different priority classes
2665	 */
2666	if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2667		return NULL;
2668
2669	return cfqq;
2670}
2671
2672/*
2673 * Determine whether we should enforce idle window for this queue.
2674 */
2675
2676static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2677{
2678	enum wl_class_t wl_class = cfqq_class(cfqq);
2679	struct cfq_rb_root *st = cfqq->service_tree;
2680
2681	BUG_ON(!st);
2682	BUG_ON(!st->count);
2683
2684	if (!cfqd->cfq_slice_idle)
2685		return false;
2686
2687	/* We never do for idle class queues. */
2688	if (wl_class == IDLE_WORKLOAD)
2689		return false;
2690
2691	/* We do for queues that were marked with idle window flag. */
2692	if (cfq_cfqq_idle_window(cfqq) &&
2693	   !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2694		return true;
2695
2696	/*
2697	 * Otherwise, we do only if they are the last ones
2698	 * in their service tree.
2699	 */
2700	if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2701	   !cfq_io_thinktime_big(cfqd, &st->ttime, false))
2702		return true;
2703	cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
2704	return false;
2705}
2706
2707static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2708{
2709	struct cfq_queue *cfqq = cfqd->active_queue;
 
2710	struct cfq_io_cq *cic;
2711	unsigned long sl, group_idle = 0;
2712
2713	/*
2714	 * SSD device without seek penalty, disable idling. But only do so
2715	 * for devices that support queuing, otherwise we still have a problem
2716	 * with sync vs async workloads.
2717	 */
2718	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2719		return;
2720
2721	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2722	WARN_ON(cfq_cfqq_slice_new(cfqq));
2723
2724	/*
2725	 * idle is disabled, either manually or by past process history
2726	 */
2727	if (!cfq_should_idle(cfqd, cfqq)) {
2728		/* no queue idling. Check for group idling */
2729		if (cfqd->cfq_group_idle)
2730			group_idle = cfqd->cfq_group_idle;
2731		else
2732			return;
2733	}
2734
2735	/*
2736	 * still active requests from this queue, don't idle
2737	 */
2738	if (cfqq->dispatched)
2739		return;
2740
2741	/*
2742	 * task has exited, don't wait
2743	 */
2744	cic = cfqd->active_cic;
2745	if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2746		return;
2747
2748	/*
2749	 * If our average think time is larger than the remaining time
2750	 * slice, then don't idle. This avoids overrunning the allotted
2751	 * time slice.
2752	 */
2753	if (sample_valid(cic->ttime.ttime_samples) &&
2754	    (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2755		cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2756			     cic->ttime.ttime_mean);
2757		return;
2758	}
2759
2760	/* There are other queues in the group, don't do group idle */
2761	if (group_idle && cfqq->cfqg->nr_cfqq > 1)
 
 
 
 
 
2762		return;
2763
2764	cfq_mark_cfqq_wait_request(cfqq);
2765
2766	if (group_idle)
2767		sl = cfqd->cfq_group_idle;
2768	else
2769		sl = cfqd->cfq_slice_idle;
2770
2771	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2772	cfqg_stats_set_start_idle_time(cfqq->cfqg);
2773	cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2774			group_idle ? 1 : 0);
2775}
2776
2777/*
2778 * Move request from internal lists to the request queue dispatch list.
2779 */
2780static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2781{
2782	struct cfq_data *cfqd = q->elevator->elevator_data;
2783	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2784
2785	cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2786
2787	cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2788	cfq_remove_request(rq);
2789	cfqq->dispatched++;
2790	(RQ_CFQG(rq))->dispatched++;
2791	elv_dispatch_sort(q, rq);
2792
2793	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2794	cfqq->nr_sectors += blk_rq_sectors(rq);
2795	cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
2796}
2797
2798/*
2799 * return expired entry, or NULL to just start from scratch in rbtree
2800 */
2801static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2802{
2803	struct request *rq = NULL;
2804
2805	if (cfq_cfqq_fifo_expire(cfqq))
2806		return NULL;
2807
2808	cfq_mark_cfqq_fifo_expire(cfqq);
2809
2810	if (list_empty(&cfqq->fifo))
2811		return NULL;
2812
2813	rq = rq_entry_fifo(cfqq->fifo.next);
2814	if (time_before(jiffies, rq->fifo_time))
2815		rq = NULL;
2816
2817	cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2818	return rq;
2819}
2820
2821static inline int
2822cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2823{
2824	const int base_rq = cfqd->cfq_slice_async_rq;
2825
2826	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2827
2828	return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2829}
2830
2831/*
2832 * Must be called with the queue_lock held.
2833 */
2834static int cfqq_process_refs(struct cfq_queue *cfqq)
2835{
2836	int process_refs, io_refs;
2837
2838	io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2839	process_refs = cfqq->ref - io_refs;
2840	BUG_ON(process_refs < 0);
2841	return process_refs;
2842}
2843
2844static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2845{
2846	int process_refs, new_process_refs;
2847	struct cfq_queue *__cfqq;
2848
2849	/*
2850	 * If there are no process references on the new_cfqq, then it is
2851	 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2852	 * chain may have dropped their last reference (not just their
2853	 * last process reference).
2854	 */
2855	if (!cfqq_process_refs(new_cfqq))
2856		return;
2857
2858	/* Avoid a circular list and skip interim queue merges */
2859	while ((__cfqq = new_cfqq->new_cfqq)) {
2860		if (__cfqq == cfqq)
2861			return;
2862		new_cfqq = __cfqq;
2863	}
2864
2865	process_refs = cfqq_process_refs(cfqq);
2866	new_process_refs = cfqq_process_refs(new_cfqq);
2867	/*
2868	 * If the process for the cfqq has gone away, there is no
2869	 * sense in merging the queues.
2870	 */
2871	if (process_refs == 0 || new_process_refs == 0)
2872		return;
2873
2874	/*
2875	 * Merge in the direction of the lesser amount of work.
2876	 */
2877	if (new_process_refs >= process_refs) {
2878		cfqq->new_cfqq = new_cfqq;
2879		new_cfqq->ref += process_refs;
2880	} else {
2881		new_cfqq->new_cfqq = cfqq;
2882		cfqq->ref += new_process_refs;
2883	}
2884}
2885
2886static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
2887			struct cfq_group *cfqg, enum wl_class_t wl_class)
2888{
2889	struct cfq_queue *queue;
2890	int i;
2891	bool key_valid = false;
2892	unsigned long lowest_key = 0;
2893	enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2894
2895	for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2896		/* select the one with lowest rb_key */
2897		queue = cfq_rb_first(st_for(cfqg, wl_class, i));
2898		if (queue &&
2899		    (!key_valid || time_before(queue->rb_key, lowest_key))) {
2900			lowest_key = queue->rb_key;
2901			cur_best = i;
2902			key_valid = true;
2903		}
2904	}
2905
2906	return cur_best;
2907}
2908
2909static void
2910choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
2911{
2912	unsigned slice;
2913	unsigned count;
2914	struct cfq_rb_root *st;
2915	unsigned group_slice;
2916	enum wl_class_t original_class = cfqd->serving_wl_class;
2917
2918	/* Choose next priority. RT > BE > IDLE */
2919	if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2920		cfqd->serving_wl_class = RT_WORKLOAD;
2921	else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2922		cfqd->serving_wl_class = BE_WORKLOAD;
2923	else {
2924		cfqd->serving_wl_class = IDLE_WORKLOAD;
2925		cfqd->workload_expires = jiffies + 1;
2926		return;
2927	}
2928
2929	if (original_class != cfqd->serving_wl_class)
2930		goto new_workload;
2931
2932	/*
2933	 * For RT and BE, we have to choose also the type
2934	 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2935	 * expiration time
2936	 */
2937	st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2938	count = st->count;
2939
2940	/*
2941	 * check workload expiration, and that we still have other queues ready
2942	 */
2943	if (count && !time_after(jiffies, cfqd->workload_expires))
2944		return;
2945
2946new_workload:
2947	/* otherwise select new workload type */
2948	cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
2949					cfqd->serving_wl_class);
2950	st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
2951	count = st->count;
2952
2953	/*
2954	 * the workload slice is computed as a fraction of target latency
2955	 * proportional to the number of queues in that workload, over
2956	 * all the queues in the same priority class
2957	 */
2958	group_slice = cfq_group_slice(cfqd, cfqg);
2959
2960	slice = group_slice * count /
2961		max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
2962		      cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
2963					cfqg));
2964
2965	if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
2966		unsigned int tmp;
2967
2968		/*
2969		 * Async queues are currently system wide. Just taking
2970		 * proportion of queues with-in same group will lead to higher
2971		 * async ratio system wide as generally root group is going
2972		 * to have higher weight. A more accurate thing would be to
2973		 * calculate system wide asnc/sync ratio.
2974		 */
2975		tmp = cfqd->cfq_target_latency *
2976			cfqg_busy_async_queues(cfqd, cfqg);
2977		tmp = tmp/cfqd->busy_queues;
2978		slice = min_t(unsigned, slice, tmp);
2979
2980		/* async workload slice is scaled down according to
2981		 * the sync/async slice ratio. */
2982		slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2983	} else
2984		/* sync workload slice is at least 2 * cfq_slice_idle */
2985		slice = max(slice, 2 * cfqd->cfq_slice_idle);
2986
2987	slice = max_t(unsigned, slice, CFQ_MIN_TT);
2988	cfq_log(cfqd, "workload slice:%d", slice);
2989	cfqd->workload_expires = jiffies + slice;
2990}
2991
2992static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2993{
2994	struct cfq_rb_root *st = &cfqd->grp_service_tree;
2995	struct cfq_group *cfqg;
2996
2997	if (RB_EMPTY_ROOT(&st->rb))
2998		return NULL;
2999	cfqg = cfq_rb_first_group(st);
3000	update_min_vdisktime(st);
3001	return cfqg;
3002}
3003
3004static void cfq_choose_cfqg(struct cfq_data *cfqd)
3005{
3006	struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3007
3008	cfqd->serving_group = cfqg;
3009
3010	/* Restore the workload type data */
3011	if (cfqg->saved_wl_slice) {
3012		cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
3013		cfqd->serving_wl_type = cfqg->saved_wl_type;
3014		cfqd->serving_wl_class = cfqg->saved_wl_class;
3015	} else
3016		cfqd->workload_expires = jiffies - 1;
3017
3018	choose_wl_class_and_type(cfqd, cfqg);
3019}
3020
3021/*
3022 * Select a queue for service. If we have a current active queue,
3023 * check whether to continue servicing it, or retrieve and set a new one.
3024 */
3025static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
3026{
3027	struct cfq_queue *cfqq, *new_cfqq = NULL;
3028
3029	cfqq = cfqd->active_queue;
3030	if (!cfqq)
3031		goto new_queue;
3032
3033	if (!cfqd->rq_queued)
3034		return NULL;
3035
3036	/*
3037	 * We were waiting for group to get backlogged. Expire the queue
3038	 */
3039	if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3040		goto expire;
3041
3042	/*
3043	 * The active queue has run out of time, expire it and select new.
3044	 */
3045	if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3046		/*
3047		 * If slice had not expired at the completion of last request
3048		 * we might not have turned on wait_busy flag. Don't expire
3049		 * the queue yet. Allow the group to get backlogged.
3050		 *
3051		 * The very fact that we have used the slice, that means we
3052		 * have been idling all along on this queue and it should be
3053		 * ok to wait for this request to complete.
3054		 */
3055		if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3056		    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3057			cfqq = NULL;
3058			goto keep_queue;
3059		} else
3060			goto check_group_idle;
3061	}
3062
3063	/*
3064	 * The active queue has requests and isn't expired, allow it to
3065	 * dispatch.
3066	 */
3067	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3068		goto keep_queue;
3069
3070	/*
3071	 * If another queue has a request waiting within our mean seek
3072	 * distance, let it run.  The expire code will check for close
3073	 * cooperators and put the close queue at the front of the service
3074	 * tree.  If possible, merge the expiring queue with the new cfqq.
3075	 */
3076	new_cfqq = cfq_close_cooperator(cfqd, cfqq);
3077	if (new_cfqq) {
3078		if (!cfqq->new_cfqq)
3079			cfq_setup_merge(cfqq, new_cfqq);
3080		goto expire;
3081	}
3082
3083	/*
3084	 * No requests pending. If the active queue still has requests in
3085	 * flight or is idling for a new request, allow either of these
3086	 * conditions to happen (or time out) before selecting a new queue.
3087	 */
3088	if (timer_pending(&cfqd->idle_slice_timer)) {
3089		cfqq = NULL;
3090		goto keep_queue;
3091	}
3092
3093	/*
3094	 * This is a deep seek queue, but the device is much faster than
3095	 * the queue can deliver, don't idle
3096	 **/
3097	if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3098	    (cfq_cfqq_slice_new(cfqq) ||
3099	    (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
3100		cfq_clear_cfqq_deep(cfqq);
3101		cfq_clear_cfqq_idle_window(cfqq);
3102	}
3103
3104	if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3105		cfqq = NULL;
3106		goto keep_queue;
3107	}
3108
3109	/*
3110	 * If group idle is enabled and there are requests dispatched from
3111	 * this group, wait for requests to complete.
3112	 */
3113check_group_idle:
3114	if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3115	    cfqq->cfqg->dispatched &&
3116	    !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
3117		cfqq = NULL;
3118		goto keep_queue;
3119	}
3120
3121expire:
3122	cfq_slice_expired(cfqd, 0);
3123new_queue:
3124	/*
3125	 * Current queue expired. Check if we have to switch to a new
3126	 * service tree
3127	 */
3128	if (!new_cfqq)
3129		cfq_choose_cfqg(cfqd);
3130
3131	cfqq = cfq_set_active_queue(cfqd, new_cfqq);
3132keep_queue:
3133	return cfqq;
3134}
3135
3136static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
3137{
3138	int dispatched = 0;
3139
3140	while (cfqq->next_rq) {
3141		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3142		dispatched++;
3143	}
3144
3145	BUG_ON(!list_empty(&cfqq->fifo));
3146
3147	/* By default cfqq is not expired if it is empty. Do it explicitly */
3148	__cfq_slice_expired(cfqq->cfqd, cfqq, 0);
3149	return dispatched;
3150}
3151
3152/*
3153 * Drain our current requests. Used for barriers and when switching
3154 * io schedulers on-the-fly.
3155 */
3156static int cfq_forced_dispatch(struct cfq_data *cfqd)
3157{
3158	struct cfq_queue *cfqq;
3159	int dispatched = 0;
3160
3161	/* Expire the timeslice of the current active queue first */
3162	cfq_slice_expired(cfqd, 0);
3163	while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3164		__cfq_set_active_queue(cfqd, cfqq);
3165		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3166	}
3167
3168	BUG_ON(cfqd->busy_queues);
3169
3170	cfq_log(cfqd, "forced_dispatch=%d", dispatched);
3171	return dispatched;
3172}
3173
3174static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3175	struct cfq_queue *cfqq)
3176{
3177	/* the queue hasn't finished any request, can't estimate */
3178	if (cfq_cfqq_slice_new(cfqq))
3179		return true;
3180	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3181		cfqq->slice_end))
3182		return true;
3183
3184	return false;
3185}
3186
3187static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3188{
3189	unsigned int max_dispatch;
3190
3191	/*
3192	 * Drain async requests before we start sync IO
3193	 */
3194	if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
3195		return false;
3196
3197	/*
3198	 * If this is an async queue and we have sync IO in flight, let it wait
3199	 */
3200	if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
3201		return false;
3202
3203	max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
3204	if (cfq_class_idle(cfqq))
3205		max_dispatch = 1;
3206
3207	/*
3208	 * Does this cfqq already have too much IO in flight?
3209	 */
3210	if (cfqq->dispatched >= max_dispatch) {
3211		bool promote_sync = false;
3212		/*
3213		 * idle queue must always only have a single IO in flight
3214		 */
3215		if (cfq_class_idle(cfqq))
3216			return false;
3217
3218		/*
3219		 * If there is only one sync queue
3220		 * we can ignore async queue here and give the sync
3221		 * queue no dispatch limit. The reason is a sync queue can
3222		 * preempt async queue, limiting the sync queue doesn't make
3223		 * sense. This is useful for aiostress test.
3224		 */
3225		if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3226			promote_sync = true;
3227
3228		/*
3229		 * We have other queues, don't allow more IO from this one
3230		 */
3231		if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3232				!promote_sync)
3233			return false;
3234
3235		/*
3236		 * Sole queue user, no limit
3237		 */
3238		if (cfqd->busy_queues == 1 || promote_sync)
3239			max_dispatch = -1;
3240		else
3241			/*
3242			 * Normally we start throttling cfqq when cfq_quantum/2
3243			 * requests have been dispatched. But we can drive
3244			 * deeper queue depths at the beginning of slice
3245			 * subjected to upper limit of cfq_quantum.
3246			 * */
3247			max_dispatch = cfqd->cfq_quantum;
3248	}
3249
3250	/*
3251	 * Async queues must wait a bit before being allowed dispatch.
3252	 * We also ramp up the dispatch depth gradually for async IO,
3253	 * based on the last sync IO we serviced
3254	 */
3255	if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
3256		unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
3257		unsigned int depth;
3258
3259		depth = last_sync / cfqd->cfq_slice[1];
3260		if (!depth && !cfqq->dispatched)
3261			depth = 1;
3262		if (depth < max_dispatch)
3263			max_dispatch = depth;
3264	}
3265
3266	/*
3267	 * If we're below the current max, allow a dispatch
3268	 */
3269	return cfqq->dispatched < max_dispatch;
3270}
3271
3272/*
3273 * Dispatch a request from cfqq, moving them to the request queue
3274 * dispatch list.
3275 */
3276static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3277{
3278	struct request *rq;
3279
3280	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3281
3282	if (!cfq_may_dispatch(cfqd, cfqq))
3283		return false;
3284
3285	/*
3286	 * follow expired path, else get first next available
3287	 */
3288	rq = cfq_check_fifo(cfqq);
3289	if (!rq)
3290		rq = cfqq->next_rq;
3291
3292	/*
3293	 * insert request into driver dispatch list
3294	 */
3295	cfq_dispatch_insert(cfqd->queue, rq);
3296
3297	if (!cfqd->active_cic) {
3298		struct cfq_io_cq *cic = RQ_CIC(rq);
3299
3300		atomic_long_inc(&cic->icq.ioc->refcount);
3301		cfqd->active_cic = cic;
3302	}
3303
3304	return true;
3305}
3306
3307/*
3308 * Find the cfqq that we need to service and move a request from that to the
3309 * dispatch list
3310 */
3311static int cfq_dispatch_requests(struct request_queue *q, int force)
3312{
3313	struct cfq_data *cfqd = q->elevator->elevator_data;
3314	struct cfq_queue *cfqq;
3315
3316	if (!cfqd->busy_queues)
3317		return 0;
3318
3319	if (unlikely(force))
3320		return cfq_forced_dispatch(cfqd);
3321
3322	cfqq = cfq_select_queue(cfqd);
3323	if (!cfqq)
3324		return 0;
3325
3326	/*
3327	 * Dispatch a request from this cfqq, if it is allowed
3328	 */
3329	if (!cfq_dispatch_request(cfqd, cfqq))
3330		return 0;
3331
3332	cfqq->slice_dispatch++;
3333	cfq_clear_cfqq_must_dispatch(cfqq);
3334
3335	/*
3336	 * expire an async queue immediately if it has used up its slice. idle
3337	 * queue always expire after 1 dispatch round.
3338	 */
3339	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3340	    cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3341	    cfq_class_idle(cfqq))) {
3342		cfqq->slice_end = jiffies + 1;
3343		cfq_slice_expired(cfqd, 0);
3344	}
3345
3346	cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
3347	return 1;
3348}
3349
3350/*
3351 * task holds one reference to the queue, dropped when task exits. each rq
3352 * in-flight on this queue also holds a reference, dropped when rq is freed.
3353 *
3354 * Each cfq queue took a reference on the parent group. Drop it now.
3355 * queue lock must be held here.
3356 */
3357static void cfq_put_queue(struct cfq_queue *cfqq)
3358{
3359	struct cfq_data *cfqd = cfqq->cfqd;
3360	struct cfq_group *cfqg;
3361
3362	BUG_ON(cfqq->ref <= 0);
3363
3364	cfqq->ref--;
3365	if (cfqq->ref)
3366		return;
3367
3368	cfq_log_cfqq(cfqd, cfqq, "put_queue");
3369	BUG_ON(rb_first(&cfqq->sort_list));
3370	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3371	cfqg = cfqq->cfqg;
3372
3373	if (unlikely(cfqd->active_queue == cfqq)) {
3374		__cfq_slice_expired(cfqd, cfqq, 0);
3375		cfq_schedule_dispatch(cfqd);
3376	}
3377
3378	BUG_ON(cfq_cfqq_on_rr(cfqq));
3379	kmem_cache_free(cfq_pool, cfqq);
3380	cfqg_put(cfqg);
3381}
3382
3383static void cfq_put_cooperator(struct cfq_queue *cfqq)
3384{
3385	struct cfq_queue *__cfqq, *next;
3386
3387	/*
3388	 * If this queue was scheduled to merge with another queue, be
3389	 * sure to drop the reference taken on that queue (and others in
3390	 * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
3391	 */
3392	__cfqq = cfqq->new_cfqq;
3393	while (__cfqq) {
3394		if (__cfqq == cfqq) {
3395			WARN(1, "cfqq->new_cfqq loop detected\n");
3396			break;
3397		}
3398		next = __cfqq->new_cfqq;
3399		cfq_put_queue(__cfqq);
3400		__cfqq = next;
3401	}
3402}
3403
3404static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3405{
3406	if (unlikely(cfqq == cfqd->active_queue)) {
3407		__cfq_slice_expired(cfqd, cfqq, 0);
3408		cfq_schedule_dispatch(cfqd);
3409	}
3410
3411	cfq_put_cooperator(cfqq);
3412
3413	cfq_put_queue(cfqq);
3414}
3415
3416static void cfq_init_icq(struct io_cq *icq)
3417{
3418	struct cfq_io_cq *cic = icq_to_cic(icq);
3419
3420	cic->ttime.last_end_request = jiffies;
3421}
3422
3423static void cfq_exit_icq(struct io_cq *icq)
3424{
3425	struct cfq_io_cq *cic = icq_to_cic(icq);
3426	struct cfq_data *cfqd = cic_to_cfqd(cic);
3427
3428	if (cic->cfqq[BLK_RW_ASYNC]) {
3429		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
3430		cic->cfqq[BLK_RW_ASYNC] = NULL;
3431	}
3432
3433	if (cic->cfqq[BLK_RW_SYNC]) {
3434		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
3435		cic->cfqq[BLK_RW_SYNC] = NULL;
3436	}
3437}
3438
3439static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3440{
3441	struct task_struct *tsk = current;
3442	int ioprio_class;
3443
3444	if (!cfq_cfqq_prio_changed(cfqq))
3445		return;
3446
3447	ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3448	switch (ioprio_class) {
3449	default:
3450		printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3451	case IOPRIO_CLASS_NONE:
3452		/*
3453		 * no prio set, inherit CPU scheduling settings
3454		 */
3455		cfqq->ioprio = task_nice_ioprio(tsk);
3456		cfqq->ioprio_class = task_nice_ioclass(tsk);
3457		break;
3458	case IOPRIO_CLASS_RT:
3459		cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3460		cfqq->ioprio_class = IOPRIO_CLASS_RT;
3461		break;
3462	case IOPRIO_CLASS_BE:
3463		cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3464		cfqq->ioprio_class = IOPRIO_CLASS_BE;
3465		break;
3466	case IOPRIO_CLASS_IDLE:
3467		cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3468		cfqq->ioprio = 7;
3469		cfq_clear_cfqq_idle_window(cfqq);
3470		break;
3471	}
3472
3473	/*
3474	 * keep track of original prio settings in case we have to temporarily
3475	 * elevate the priority of this queue
3476	 */
3477	cfqq->org_ioprio = cfqq->ioprio;
3478	cfq_clear_cfqq_prio_changed(cfqq);
3479}
3480
3481static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3482{
3483	int ioprio = cic->icq.ioc->ioprio;
3484	struct cfq_data *cfqd = cic_to_cfqd(cic);
3485	struct cfq_queue *cfqq;
3486
3487	/*
3488	 * Check whether ioprio has changed.  The condition may trigger
3489	 * spuriously on a newly created cic but there's no harm.
3490	 */
3491	if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3492		return;
3493
3494	cfqq = cic->cfqq[BLK_RW_ASYNC];
3495	if (cfqq) {
3496		struct cfq_queue *new_cfqq;
3497		new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
3498					 GFP_ATOMIC);
3499		if (new_cfqq) {
3500			cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
3501			cfq_put_queue(cfqq);
3502		}
3503	}
3504
3505	cfqq = cic->cfqq[BLK_RW_SYNC];
3506	if (cfqq)
3507		cfq_mark_cfqq_prio_changed(cfqq);
3508
3509	cic->ioprio = ioprio;
3510}
3511
3512static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3513			  pid_t pid, bool is_sync)
3514{
3515	RB_CLEAR_NODE(&cfqq->rb_node);
3516	RB_CLEAR_NODE(&cfqq->p_node);
3517	INIT_LIST_HEAD(&cfqq->fifo);
3518
3519	cfqq->ref = 0;
3520	cfqq->cfqd = cfqd;
3521
3522	cfq_mark_cfqq_prio_changed(cfqq);
3523
3524	if (is_sync) {
3525		if (!cfq_class_idle(cfqq))
3526			cfq_mark_cfqq_idle_window(cfqq);
3527		cfq_mark_cfqq_sync(cfqq);
3528	}
3529	cfqq->pid = pid;
3530}
3531
3532#ifdef CONFIG_CFQ_GROUP_IOSCHED
3533static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3534{
3535	struct cfq_data *cfqd = cic_to_cfqd(cic);
3536	struct cfq_queue *sync_cfqq;
3537	uint64_t id;
3538
3539	rcu_read_lock();
3540	id = bio_blkcg(bio)->id;
3541	rcu_read_unlock();
3542
3543	/*
3544	 * Check whether blkcg has changed.  The condition may trigger
3545	 * spuriously on a newly created cic but there's no harm.
3546	 */
3547	if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
3548		return;
3549
3550	sync_cfqq = cic_to_cfqq(cic, 1);
3551	if (sync_cfqq) {
3552		/*
3553		 * Drop reference to sync queue. A new sync queue will be
3554		 * assigned in new group upon arrival of a fresh request.
3555		 */
3556		cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
3557		cic_set_cfqq(cic, NULL, 1);
3558		cfq_put_queue(sync_cfqq);
3559	}
3560
3561	cic->blkcg_id = id;
3562}
3563#else
3564static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3565#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
3566
3567static struct cfq_queue *
3568cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3569		     struct bio *bio, gfp_t gfp_mask)
3570{
3571	struct blkcg *blkcg;
3572	struct cfq_queue *cfqq, *new_cfqq = NULL;
3573	struct cfq_group *cfqg;
3574
3575retry:
3576	rcu_read_lock();
3577
3578	blkcg = bio_blkcg(bio);
3579	cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
3580	cfqq = cic_to_cfqq(cic, is_sync);
3581
3582	/*
3583	 * Always try a new alloc if we fell back to the OOM cfqq
3584	 * originally, since it should just be a temporary situation.
3585	 */
3586	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3587		cfqq = NULL;
3588		if (new_cfqq) {
3589			cfqq = new_cfqq;
3590			new_cfqq = NULL;
3591		} else if (gfp_mask & __GFP_WAIT) {
3592			rcu_read_unlock();
3593			spin_unlock_irq(cfqd->queue->queue_lock);
3594			new_cfqq = kmem_cache_alloc_node(cfq_pool,
3595					gfp_mask | __GFP_ZERO,
3596					cfqd->queue->node);
3597			spin_lock_irq(cfqd->queue->queue_lock);
3598			if (new_cfqq)
3599				goto retry;
3600			else
3601				return &cfqd->oom_cfqq;
3602		} else {
3603			cfqq = kmem_cache_alloc_node(cfq_pool,
3604					gfp_mask | __GFP_ZERO,
3605					cfqd->queue->node);
3606		}
3607
3608		if (cfqq) {
3609			cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3610			cfq_init_prio_data(cfqq, cic);
3611			cfq_link_cfqq_cfqg(cfqq, cfqg);
3612			cfq_log_cfqq(cfqd, cfqq, "alloced");
3613		} else
3614			cfqq = &cfqd->oom_cfqq;
3615	}
3616
3617	if (new_cfqq)
3618		kmem_cache_free(cfq_pool, new_cfqq);
 
 
 
 
3619
3620	rcu_read_unlock();
3621	return cfqq;
3622}
 
 
 
3623
3624static struct cfq_queue **
3625cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3626{
3627	switch (ioprio_class) {
3628	case IOPRIO_CLASS_RT:
3629		return &cfqd->async_cfqq[0][ioprio];
3630	case IOPRIO_CLASS_NONE:
3631		ioprio = IOPRIO_NORM;
3632		/* fall through */
3633	case IOPRIO_CLASS_BE:
3634		return &cfqd->async_cfqq[1][ioprio];
3635	case IOPRIO_CLASS_IDLE:
3636		return &cfqd->async_idle_cfqq;
3637	default:
3638		BUG();
3639	}
3640}
3641
3642static struct cfq_queue *
3643cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3644	      struct bio *bio, gfp_t gfp_mask)
3645{
3646	const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3647	const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3648	struct cfq_queue **async_cfqq = NULL;
3649	struct cfq_queue *cfqq = NULL;
 
 
 
 
 
 
 
 
3650
3651	if (!is_sync) {
3652		async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
 
 
 
 
 
3653		cfqq = *async_cfqq;
 
 
3654	}
3655
3656	if (!cfqq)
3657		cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
 
 
 
 
3658
3659	/*
3660	 * pin the queue now that it's allocated, scheduler exit will prune it
3661	 */
3662	if (!is_sync && !(*async_cfqq)) {
 
 
 
3663		cfqq->ref++;
3664		*async_cfqq = cfqq;
3665	}
3666
3667	cfqq->ref++;
 
3668	return cfqq;
3669}
3670
3671static void
3672__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3673{
3674	unsigned long elapsed = jiffies - ttime->last_end_request;
3675	elapsed = min(elapsed, 2UL * slice_idle);
3676
3677	ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3678	ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3679	ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3680}
3681
3682static void
3683cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3684			struct cfq_io_cq *cic)
3685{
3686	if (cfq_cfqq_sync(cfqq)) {
3687		__cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3688		__cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3689			cfqd->cfq_slice_idle);
3690	}
3691#ifdef CONFIG_CFQ_GROUP_IOSCHED
3692	__cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3693#endif
3694}
3695
3696static void
3697cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3698		       struct request *rq)
3699{
3700	sector_t sdist = 0;
3701	sector_t n_sec = blk_rq_sectors(rq);
3702	if (cfqq->last_request_pos) {
3703		if (cfqq->last_request_pos < blk_rq_pos(rq))
3704			sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3705		else
3706			sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3707	}
3708
3709	cfqq->seek_history <<= 1;
3710	if (blk_queue_nonrot(cfqd->queue))
3711		cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3712	else
3713		cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3714}
3715
3716/*
3717 * Disable idle window if the process thinks too long or seeks so much that
3718 * it doesn't matter
3719 */
3720static void
3721cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3722		       struct cfq_io_cq *cic)
3723{
3724	int old_idle, enable_idle;
3725
3726	/*
3727	 * Don't idle for async or idle io prio class
3728	 */
3729	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3730		return;
3731
3732	enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3733
3734	if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3735		cfq_mark_cfqq_deep(cfqq);
3736
3737	if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3738		enable_idle = 0;
3739	else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3740		 !cfqd->cfq_slice_idle ||
3741		 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3742		enable_idle = 0;
3743	else if (sample_valid(cic->ttime.ttime_samples)) {
3744		if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3745			enable_idle = 0;
3746		else
3747			enable_idle = 1;
3748	}
3749
3750	if (old_idle != enable_idle) {
3751		cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3752		if (enable_idle)
3753			cfq_mark_cfqq_idle_window(cfqq);
3754		else
3755			cfq_clear_cfqq_idle_window(cfqq);
3756	}
3757}
3758
3759/*
3760 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3761 * no or if we aren't sure, a 1 will cause a preempt.
3762 */
3763static bool
3764cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3765		   struct request *rq)
3766{
3767	struct cfq_queue *cfqq;
3768
3769	cfqq = cfqd->active_queue;
3770	if (!cfqq)
3771		return false;
3772
3773	if (cfq_class_idle(new_cfqq))
3774		return false;
3775
3776	if (cfq_class_idle(cfqq))
3777		return true;
3778
3779	/*
3780	 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3781	 */
3782	if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3783		return false;
3784
3785	/*
3786	 * if the new request is sync, but the currently running queue is
3787	 * not, let the sync request have priority.
3788	 */
3789	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3790		return true;
3791
3792	if (new_cfqq->cfqg != cfqq->cfqg)
 
 
 
 
 
3793		return false;
3794
3795	if (cfq_slice_used(cfqq))
3796		return true;
3797
 
 
 
 
 
 
 
3798	/* Allow preemption only if we are idling on sync-noidle tree */
3799	if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
3800	    cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3801	    new_cfqq->service_tree->count == 2 &&
3802	    RB_EMPTY_ROOT(&cfqq->sort_list))
3803		return true;
3804
3805	/*
3806	 * So both queues are sync. Let the new request get disk time if
3807	 * it's a metadata request and the current queue is doing regular IO.
3808	 */
3809	if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3810		return true;
3811
3812	/*
3813	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3814	 */
3815	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3816		return true;
3817
3818	/* An idle queue should not be idle now for some reason */
3819	if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3820		return true;
3821
3822	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3823		return false;
3824
3825	/*
3826	 * if this request is as-good as one we would expect from the
3827	 * current cfqq, let it preempt
3828	 */
3829	if (cfq_rq_close(cfqd, cfqq, rq))
3830		return true;
3831
3832	return false;
3833}
3834
3835/*
3836 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3837 * let it have half of its nominal slice.
3838 */
3839static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3840{
3841	enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3842
3843	cfq_log_cfqq(cfqd, cfqq, "preempt");
3844	cfq_slice_expired(cfqd, 1);
3845
3846	/*
3847	 * workload type is changed, don't save slice, otherwise preempt
3848	 * doesn't happen
3849	 */
3850	if (old_type != cfqq_type(cfqq))
3851		cfqq->cfqg->saved_wl_slice = 0;
3852
3853	/*
3854	 * Put the new queue at the front of the of the current list,
3855	 * so we know that it will be selected next.
3856	 */
3857	BUG_ON(!cfq_cfqq_on_rr(cfqq));
3858
3859	cfq_service_tree_add(cfqd, cfqq, 1);
3860
3861	cfqq->slice_end = 0;
3862	cfq_mark_cfqq_slice_new(cfqq);
3863}
3864
3865/*
3866 * Called when a new fs request (rq) is added (to cfqq). Check if there's
3867 * something we should do about it
3868 */
3869static void
3870cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3871		struct request *rq)
3872{
3873	struct cfq_io_cq *cic = RQ_CIC(rq);
3874
3875	cfqd->rq_queued++;
3876	if (rq->cmd_flags & REQ_PRIO)
3877		cfqq->prio_pending++;
3878
3879	cfq_update_io_thinktime(cfqd, cfqq, cic);
3880	cfq_update_io_seektime(cfqd, cfqq, rq);
3881	cfq_update_idle_window(cfqd, cfqq, cic);
3882
3883	cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3884
3885	if (cfqq == cfqd->active_queue) {
3886		/*
3887		 * Remember that we saw a request from this process, but
3888		 * don't start queuing just yet. Otherwise we risk seeing lots
3889		 * of tiny requests, because we disrupt the normal plugging
3890		 * and merging. If the request is already larger than a single
3891		 * page, let it rip immediately. For that case we assume that
3892		 * merging is already done. Ditto for a busy system that
3893		 * has other work pending, don't risk delaying until the
3894		 * idle timer unplug to continue working.
3895		 */
3896		if (cfq_cfqq_wait_request(cfqq)) {
3897			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3898			    cfqd->busy_queues > 1) {
3899				cfq_del_timer(cfqd, cfqq);
3900				cfq_clear_cfqq_wait_request(cfqq);
3901				__blk_run_queue(cfqd->queue);
3902			} else {
3903				cfqg_stats_update_idle_time(cfqq->cfqg);
3904				cfq_mark_cfqq_must_dispatch(cfqq);
3905			}
3906		}
3907	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3908		/*
3909		 * not the active queue - expire current slice if it is
3910		 * idle and has expired it's mean thinktime or this new queue
3911		 * has some old slice time left and is of higher priority or
3912		 * this new queue is RT and the current one is BE
3913		 */
3914		cfq_preempt_queue(cfqd, cfqq);
3915		__blk_run_queue(cfqd->queue);
3916	}
3917}
3918
3919static void cfq_insert_request(struct request_queue *q, struct request *rq)
3920{
3921	struct cfq_data *cfqd = q->elevator->elevator_data;
3922	struct cfq_queue *cfqq = RQ_CFQQ(rq);
3923
3924	cfq_log_cfqq(cfqd, cfqq, "insert_request");
3925	cfq_init_prio_data(cfqq, RQ_CIC(rq));
3926
3927	rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
3928	list_add_tail(&rq->queuelist, &cfqq->fifo);
3929	cfq_add_rq_rb(rq);
3930	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
3931				 rq->cmd_flags);
3932	cfq_rq_enqueued(cfqd, cfqq, rq);
3933}
3934
3935/*
3936 * Update hw_tag based on peak queue depth over 50 samples under
3937 * sufficient load.
3938 */
3939static void cfq_update_hw_tag(struct cfq_data *cfqd)
3940{
3941	struct cfq_queue *cfqq = cfqd->active_queue;
3942
3943	if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3944		cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3945
3946	if (cfqd->hw_tag == 1)
3947		return;
3948
3949	if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3950	    cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3951		return;
3952
3953	/*
3954	 * If active queue hasn't enough requests and can idle, cfq might not
3955	 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3956	 * case
3957	 */
3958	if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3959	    cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3960	    CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3961		return;
3962
3963	if (cfqd->hw_tag_samples++ < 50)
3964		return;
3965
3966	if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3967		cfqd->hw_tag = 1;
3968	else
3969		cfqd->hw_tag = 0;
3970}
3971
3972static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3973{
3974	struct cfq_io_cq *cic = cfqd->active_cic;
3975
3976	/* If the queue already has requests, don't wait */
3977	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3978		return false;
3979
3980	/* If there are other queues in the group, don't wait */
3981	if (cfqq->cfqg->nr_cfqq > 1)
3982		return false;
3983
3984	/* the only queue in the group, but think time is big */
3985	if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3986		return false;
3987
3988	if (cfq_slice_used(cfqq))
3989		return true;
3990
3991	/* if slice left is less than think time, wait busy */
3992	if (cic && sample_valid(cic->ttime.ttime_samples)
3993	    && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3994		return true;
3995
3996	/*
3997	 * If think times is less than a jiffy than ttime_mean=0 and above
3998	 * will not be true. It might happen that slice has not expired yet
3999	 * but will expire soon (4-5 ns) during select_queue(). To cover the
4000	 * case where think time is less than a jiffy, mark the queue wait
4001	 * busy if only 1 jiffy is left in the slice.
4002	 */
4003	if (cfqq->slice_end - jiffies == 1)
4004		return true;
4005
4006	return false;
4007}
4008
4009static void cfq_completed_request(struct request_queue *q, struct request *rq)
4010{
4011	struct cfq_queue *cfqq = RQ_CFQQ(rq);
4012	struct cfq_data *cfqd = cfqq->cfqd;
4013	const int sync = rq_is_sync(rq);
4014	unsigned long now;
4015
4016	now = jiffies;
4017	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4018		     !!(rq->cmd_flags & REQ_NOIDLE));
4019
4020	cfq_update_hw_tag(cfqd);
4021
4022	WARN_ON(!cfqd->rq_in_driver);
4023	WARN_ON(!cfqq->dispatched);
4024	cfqd->rq_in_driver--;
4025	cfqq->dispatched--;
4026	(RQ_CFQG(rq))->dispatched--;
4027	cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4028				     rq_io_start_time_ns(rq), rq->cmd_flags);
4029
4030	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
4031
4032	if (sync) {
4033		struct cfq_rb_root *st;
4034
4035		RQ_CIC(rq)->ttime.last_end_request = now;
4036
4037		if (cfq_cfqq_on_rr(cfqq))
4038			st = cfqq->service_tree;
4039		else
4040			st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4041					cfqq_type(cfqq));
4042
4043		st->ttime.last_end_request = now;
4044		if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
4045			cfqd->last_delayed_sync = now;
4046	}
4047
4048#ifdef CONFIG_CFQ_GROUP_IOSCHED
4049	cfqq->cfqg->ttime.last_end_request = now;
4050#endif
4051
4052	/*
4053	 * If this is the active queue, check if it needs to be expired,
4054	 * or if we want to idle in case it has no pending requests.
4055	 */
4056	if (cfqd->active_queue == cfqq) {
4057		const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4058
4059		if (cfq_cfqq_slice_new(cfqq)) {
4060			cfq_set_prio_slice(cfqd, cfqq);
4061			cfq_clear_cfqq_slice_new(cfqq);
4062		}
4063
4064		/*
4065		 * Should we wait for next request to come in before we expire
4066		 * the queue.
4067		 */
4068		if (cfq_should_wait_busy(cfqd, cfqq)) {
4069			unsigned long extend_sl = cfqd->cfq_slice_idle;
4070			if (!cfqd->cfq_slice_idle)
4071				extend_sl = cfqd->cfq_group_idle;
4072			cfqq->slice_end = jiffies + extend_sl;
4073			cfq_mark_cfqq_wait_busy(cfqq);
4074			cfq_log_cfqq(cfqd, cfqq, "will busy wait");
4075		}
4076
4077		/*
4078		 * Idling is not enabled on:
4079		 * - expired queues
4080		 * - idle-priority queues
4081		 * - async queues
4082		 * - queues with still some requests queued
4083		 * - when there is a close cooperator
4084		 */
4085		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
4086			cfq_slice_expired(cfqd, 1);
4087		else if (sync && cfqq_empty &&
4088			 !cfq_close_cooperator(cfqd, cfqq)) {
4089			cfq_arm_slice_timer(cfqd);
4090		}
4091	}
4092
4093	if (!cfqd->rq_in_driver)
4094		cfq_schedule_dispatch(cfqd);
4095}
4096
4097static inline int __cfq_may_queue(struct cfq_queue *cfqq)
4098{
4099	if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
4100		cfq_mark_cfqq_must_alloc_slice(cfqq);
4101		return ELV_MQUEUE_MUST;
4102	}
4103
4104	return ELV_MQUEUE_MAY;
4105}
4106
4107static int cfq_may_queue(struct request_queue *q, int rw)
4108{
4109	struct cfq_data *cfqd = q->elevator->elevator_data;
4110	struct task_struct *tsk = current;
4111	struct cfq_io_cq *cic;
4112	struct cfq_queue *cfqq;
4113
4114	/*
4115	 * don't force setup of a queue from here, as a call to may_queue
4116	 * does not necessarily imply that a request actually will be queued.
4117	 * so just lookup a possibly existing queue, or return 'may queue'
4118	 * if that fails
4119	 */
4120	cic = cfq_cic_lookup(cfqd, tsk->io_context);
4121	if (!cic)
4122		return ELV_MQUEUE_MAY;
4123
4124	cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
4125	if (cfqq) {
4126		cfq_init_prio_data(cfqq, cic);
4127
4128		return __cfq_may_queue(cfqq);
4129	}
4130
4131	return ELV_MQUEUE_MAY;
4132}
4133
4134/*
4135 * queue lock held here
4136 */
4137static void cfq_put_request(struct request *rq)
4138{
4139	struct cfq_queue *cfqq = RQ_CFQQ(rq);
4140
4141	if (cfqq) {
4142		const int rw = rq_data_dir(rq);
4143
4144		BUG_ON(!cfqq->allocated[rw]);
4145		cfqq->allocated[rw]--;
4146
4147		/* Put down rq reference on cfqg */
4148		cfqg_put(RQ_CFQG(rq));
4149		rq->elv.priv[0] = NULL;
4150		rq->elv.priv[1] = NULL;
4151
4152		cfq_put_queue(cfqq);
4153	}
4154}
4155
4156static struct cfq_queue *
4157cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
4158		struct cfq_queue *cfqq)
4159{
4160	cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4161	cic_set_cfqq(cic, cfqq->new_cfqq, 1);
4162	cfq_mark_cfqq_coop(cfqq->new_cfqq);
4163	cfq_put_queue(cfqq);
4164	return cic_to_cfqq(cic, 1);
4165}
4166
4167/*
4168 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4169 * was the last process referring to said cfqq.
4170 */
4171static struct cfq_queue *
4172split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
4173{
4174	if (cfqq_process_refs(cfqq) == 1) {
4175		cfqq->pid = current->pid;
4176		cfq_clear_cfqq_coop(cfqq);
4177		cfq_clear_cfqq_split_coop(cfqq);
4178		return cfqq;
4179	}
4180
4181	cic_set_cfqq(cic, NULL, 1);
4182
4183	cfq_put_cooperator(cfqq);
4184
4185	cfq_put_queue(cfqq);
4186	return NULL;
4187}
4188/*
4189 * Allocate cfq data structures associated with this request.
4190 */
4191static int
4192cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4193		gfp_t gfp_mask)
4194{
4195	struct cfq_data *cfqd = q->elevator->elevator_data;
4196	struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
4197	const int rw = rq_data_dir(rq);
4198	const bool is_sync = rq_is_sync(rq);
4199	struct cfq_queue *cfqq;
4200
4201	might_sleep_if(gfp_mask & __GFP_WAIT);
4202
4203	spin_lock_irq(q->queue_lock);
4204
4205	check_ioprio_changed(cic, bio);
4206	check_blkcg_changed(cic, bio);
4207new_queue:
4208	cfqq = cic_to_cfqq(cic, is_sync);
4209	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
4210		cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
 
 
4211		cic_set_cfqq(cic, cfqq, is_sync);
4212	} else {
4213		/*
4214		 * If the queue was seeky for too long, break it apart.
4215		 */
4216		if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
4217			cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4218			cfqq = split_cfqq(cic, cfqq);
4219			if (!cfqq)
4220				goto new_queue;
4221		}
4222
4223		/*
4224		 * Check to see if this queue is scheduled to merge with
4225		 * another, closely cooperating queue.  The merging of
4226		 * queues happens here as it must be done in process context.
4227		 * The reference on new_cfqq was taken in merge_cfqqs.
4228		 */
4229		if (cfqq->new_cfqq)
4230			cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
4231	}
4232
4233	cfqq->allocated[rw]++;
4234
4235	cfqq->ref++;
4236	cfqg_get(cfqq->cfqg);
4237	rq->elv.priv[0] = cfqq;
4238	rq->elv.priv[1] = cfqq->cfqg;
4239	spin_unlock_irq(q->queue_lock);
4240	return 0;
4241}
4242
4243static void cfq_kick_queue(struct work_struct *work)
4244{
4245	struct cfq_data *cfqd =
4246		container_of(work, struct cfq_data, unplug_work);
4247	struct request_queue *q = cfqd->queue;
4248
4249	spin_lock_irq(q->queue_lock);
4250	__blk_run_queue(cfqd->queue);
4251	spin_unlock_irq(q->queue_lock);
4252}
4253
4254/*
4255 * Timer running if the active_queue is currently idling inside its time slice
4256 */
4257static void cfq_idle_slice_timer(unsigned long data)
4258{
4259	struct cfq_data *cfqd = (struct cfq_data *) data;
4260	struct cfq_queue *cfqq;
4261	unsigned long flags;
4262	int timed_out = 1;
4263
4264	cfq_log(cfqd, "idle timer fired");
4265
4266	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4267
4268	cfqq = cfqd->active_queue;
4269	if (cfqq) {
4270		timed_out = 0;
4271
4272		/*
4273		 * We saw a request before the queue expired, let it through
4274		 */
4275		if (cfq_cfqq_must_dispatch(cfqq))
4276			goto out_kick;
4277
4278		/*
4279		 * expired
4280		 */
4281		if (cfq_slice_used(cfqq))
4282			goto expire;
4283
4284		/*
4285		 * only expire and reinvoke request handler, if there are
4286		 * other queues with pending requests
4287		 */
4288		if (!cfqd->busy_queues)
4289			goto out_cont;
4290
4291		/*
4292		 * not expired and it has a request pending, let it dispatch
4293		 */
4294		if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4295			goto out_kick;
4296
4297		/*
4298		 * Queue depth flag is reset only when the idle didn't succeed
4299		 */
4300		cfq_clear_cfqq_deep(cfqq);
4301	}
4302expire:
4303	cfq_slice_expired(cfqd, timed_out);
4304out_kick:
4305	cfq_schedule_dispatch(cfqd);
4306out_cont:
4307	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4308}
4309
4310static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4311{
4312	del_timer_sync(&cfqd->idle_slice_timer);
4313	cancel_work_sync(&cfqd->unplug_work);
4314}
4315
4316static void cfq_put_async_queues(struct cfq_data *cfqd)
4317{
4318	int i;
4319
4320	for (i = 0; i < IOPRIO_BE_NR; i++) {
4321		if (cfqd->async_cfqq[0][i])
4322			cfq_put_queue(cfqd->async_cfqq[0][i]);
4323		if (cfqd->async_cfqq[1][i])
4324			cfq_put_queue(cfqd->async_cfqq[1][i]);
4325	}
4326
4327	if (cfqd->async_idle_cfqq)
4328		cfq_put_queue(cfqd->async_idle_cfqq);
4329}
4330
4331static void cfq_exit_queue(struct elevator_queue *e)
4332{
4333	struct cfq_data *cfqd = e->elevator_data;
4334	struct request_queue *q = cfqd->queue;
4335
4336	cfq_shutdown_timer_wq(cfqd);
4337
4338	spin_lock_irq(q->queue_lock);
4339
4340	if (cfqd->active_queue)
4341		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
4342
4343	cfq_put_async_queues(cfqd);
4344
4345	spin_unlock_irq(q->queue_lock);
4346
4347	cfq_shutdown_timer_wq(cfqd);
4348
4349#ifdef CONFIG_CFQ_GROUP_IOSCHED
4350	blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4351#else
4352	kfree(cfqd->root_group);
4353#endif
4354	kfree(cfqd);
4355}
4356
4357static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
4358{
4359	struct cfq_data *cfqd;
4360	struct blkcg_gq *blkg __maybe_unused;
4361	int i, ret;
4362	struct elevator_queue *eq;
4363
4364	eq = elevator_alloc(q, e);
4365	if (!eq)
4366		return -ENOMEM;
4367
4368	cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
4369	if (!cfqd) {
4370		kobject_put(&eq->kobj);
4371		return -ENOMEM;
4372	}
4373	eq->elevator_data = cfqd;
4374
4375	cfqd->queue = q;
4376	spin_lock_irq(q->queue_lock);
4377	q->elevator = eq;
4378	spin_unlock_irq(q->queue_lock);
4379
4380	/* Init root service tree */
4381	cfqd->grp_service_tree = CFQ_RB_ROOT;
4382
4383	/* Init root group and prefer root group over other groups by default */
4384#ifdef CONFIG_CFQ_GROUP_IOSCHED
4385	ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
4386	if (ret)
4387		goto out_free;
4388
4389	cfqd->root_group = blkg_to_cfqg(q->root_blkg);
4390#else
4391	ret = -ENOMEM;
4392	cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4393					GFP_KERNEL, cfqd->queue->node);
4394	if (!cfqd->root_group)
4395		goto out_free;
4396
4397	cfq_init_cfqg_base(cfqd->root_group);
 
 
4398#endif
4399	cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
4400	cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
4401
4402	/*
4403	 * Not strictly needed (since RB_ROOT just clears the node and we
4404	 * zeroed cfqd on alloc), but better be safe in case someone decides
4405	 * to add magic to the rb code
4406	 */
4407	for (i = 0; i < CFQ_PRIO_LISTS; i++)
4408		cfqd->prio_trees[i] = RB_ROOT;
4409
4410	/*
4411	 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
4412	 * Grab a permanent reference to it, so that the normal code flow
4413	 * will not attempt to free it.  oom_cfqq is linked to root_group
4414	 * but shouldn't hold a reference as it'll never be unlinked.  Lose
4415	 * the reference from linking right away.
4416	 */
4417	cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4418	cfqd->oom_cfqq.ref++;
4419
4420	spin_lock_irq(q->queue_lock);
4421	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4422	cfqg_put(cfqd->root_group);
4423	spin_unlock_irq(q->queue_lock);
4424
4425	init_timer(&cfqd->idle_slice_timer);
4426	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4427	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4428
4429	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4430
4431	cfqd->cfq_quantum = cfq_quantum;
4432	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4433	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4434	cfqd->cfq_back_max = cfq_back_max;
4435	cfqd->cfq_back_penalty = cfq_back_penalty;
4436	cfqd->cfq_slice[0] = cfq_slice_async;
4437	cfqd->cfq_slice[1] = cfq_slice_sync;
4438	cfqd->cfq_target_latency = cfq_target_latency;
4439	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4440	cfqd->cfq_slice_idle = cfq_slice_idle;
4441	cfqd->cfq_group_idle = cfq_group_idle;
4442	cfqd->cfq_latency = 1;
4443	cfqd->hw_tag = -1;
4444	/*
4445	 * we optimistically start assuming sync ops weren't delayed in last
4446	 * second, in order to have larger depth for async operations.
4447	 */
4448	cfqd->last_delayed_sync = jiffies - HZ;
4449	return 0;
4450
4451out_free:
4452	kfree(cfqd);
4453	kobject_put(&eq->kobj);
4454	return ret;
4455}
4456
 
 
 
 
 
 
 
 
 
 
 
 
4457/*
4458 * sysfs parts below -->
4459 */
4460static ssize_t
4461cfq_var_show(unsigned int var, char *page)
4462{
4463	return sprintf(page, "%d\n", var);
4464}
4465
4466static ssize_t
4467cfq_var_store(unsigned int *var, const char *page, size_t count)
4468{
4469	char *p = (char *) page;
4470
4471	*var = simple_strtoul(p, &p, 10);
4472	return count;
4473}
4474
4475#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
4476static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
4477{									\
4478	struct cfq_data *cfqd = e->elevator_data;			\
4479	unsigned int __data = __VAR;					\
4480	if (__CONV)							\
4481		__data = jiffies_to_msecs(__data);			\
4482	return cfq_var_show(__data, (page));				\
4483}
4484SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4485SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4486SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4487SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4488SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4489SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4490SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4491SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4492SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4493SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4494SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4495SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4496#undef SHOW_FUNCTION
4497
4498#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
4499static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
4500{									\
4501	struct cfq_data *cfqd = e->elevator_data;			\
4502	unsigned int __data;						\
4503	int ret = cfq_var_store(&__data, (page), count);		\
4504	if (__data < (MIN))						\
4505		__data = (MIN);						\
4506	else if (__data > (MAX))					\
4507		__data = (MAX);						\
4508	if (__CONV)							\
4509		*(__PTR) = msecs_to_jiffies(__data);			\
4510	else								\
4511		*(__PTR) = __data;					\
4512	return ret;							\
4513}
4514STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4515STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4516		UINT_MAX, 1);
4517STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4518		UINT_MAX, 1);
4519STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4520STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4521		UINT_MAX, 0);
4522STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4523STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4524STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4525STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4526STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4527		UINT_MAX, 0);
4528STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4529STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
4530#undef STORE_FUNCTION
4531
4532#define CFQ_ATTR(name) \
4533	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4534
4535static struct elv_fs_entry cfq_attrs[] = {
4536	CFQ_ATTR(quantum),
4537	CFQ_ATTR(fifo_expire_sync),
4538	CFQ_ATTR(fifo_expire_async),
4539	CFQ_ATTR(back_seek_max),
4540	CFQ_ATTR(back_seek_penalty),
4541	CFQ_ATTR(slice_sync),
4542	CFQ_ATTR(slice_async),
4543	CFQ_ATTR(slice_async_rq),
4544	CFQ_ATTR(slice_idle),
4545	CFQ_ATTR(group_idle),
4546	CFQ_ATTR(low_latency),
4547	CFQ_ATTR(target_latency),
4548	__ATTR_NULL
4549};
4550
4551static struct elevator_type iosched_cfq = {
4552	.ops = {
4553		.elevator_merge_fn = 		cfq_merge,
4554		.elevator_merged_fn =		cfq_merged_request,
4555		.elevator_merge_req_fn =	cfq_merged_requests,
4556		.elevator_allow_merge_fn =	cfq_allow_merge,
4557		.elevator_bio_merged_fn =	cfq_bio_merged,
4558		.elevator_dispatch_fn =		cfq_dispatch_requests,
4559		.elevator_add_req_fn =		cfq_insert_request,
4560		.elevator_activate_req_fn =	cfq_activate_request,
4561		.elevator_deactivate_req_fn =	cfq_deactivate_request,
4562		.elevator_completed_req_fn =	cfq_completed_request,
4563		.elevator_former_req_fn =	elv_rb_former_request,
4564		.elevator_latter_req_fn =	elv_rb_latter_request,
4565		.elevator_init_icq_fn =		cfq_init_icq,
4566		.elevator_exit_icq_fn =		cfq_exit_icq,
4567		.elevator_set_req_fn =		cfq_set_request,
4568		.elevator_put_req_fn =		cfq_put_request,
4569		.elevator_may_queue_fn =	cfq_may_queue,
4570		.elevator_init_fn =		cfq_init_queue,
4571		.elevator_exit_fn =		cfq_exit_queue,
 
4572	},
4573	.icq_size	=	sizeof(struct cfq_io_cq),
4574	.icq_align	=	__alignof__(struct cfq_io_cq),
4575	.elevator_attrs =	cfq_attrs,
4576	.elevator_name	=	"cfq",
4577	.elevator_owner =	THIS_MODULE,
4578};
4579
4580#ifdef CONFIG_CFQ_GROUP_IOSCHED
4581static struct blkcg_policy blkcg_policy_cfq = {
4582	.pd_size		= sizeof(struct cfq_group),
4583	.cftypes		= cfq_blkcg_files,
 
 
 
 
 
4584
 
4585	.pd_init_fn		= cfq_pd_init,
4586	.pd_offline_fn		= cfq_pd_offline,
 
4587	.pd_reset_stats_fn	= cfq_pd_reset_stats,
4588};
4589#endif
4590
4591static int __init cfq_init(void)
4592{
4593	int ret;
4594
4595	/*
4596	 * could be 0 on HZ < 1000 setups
4597	 */
4598	if (!cfq_slice_async)
4599		cfq_slice_async = 1;
4600	if (!cfq_slice_idle)
4601		cfq_slice_idle = 1;
4602
4603#ifdef CONFIG_CFQ_GROUP_IOSCHED
4604	if (!cfq_group_idle)
4605		cfq_group_idle = 1;
4606
4607	ret = blkcg_policy_register(&blkcg_policy_cfq);
4608	if (ret)
4609		return ret;
4610#else
4611	cfq_group_idle = 0;
4612#endif
4613
4614	ret = -ENOMEM;
4615	cfq_pool = KMEM_CACHE(cfq_queue, 0);
4616	if (!cfq_pool)
4617		goto err_pol_unreg;
4618
4619	ret = elv_register(&iosched_cfq);
4620	if (ret)
4621		goto err_free_pool;
4622
4623	return 0;
4624
4625err_free_pool:
4626	kmem_cache_destroy(cfq_pool);
4627err_pol_unreg:
4628#ifdef CONFIG_CFQ_GROUP_IOSCHED
4629	blkcg_policy_unregister(&blkcg_policy_cfq);
4630#endif
4631	return ret;
4632}
4633
4634static void __exit cfq_exit(void)
4635{
4636#ifdef CONFIG_CFQ_GROUP_IOSCHED
4637	blkcg_policy_unregister(&blkcg_policy_cfq);
4638#endif
4639	elv_unregister(&iosched_cfq);
4640	kmem_cache_destroy(cfq_pool);
4641}
4642
4643module_init(cfq_init);
4644module_exit(cfq_exit);
4645
4646MODULE_AUTHOR("Jens Axboe");
4647MODULE_LICENSE("GPL");
4648MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
v4.6
   1/*
   2 *  CFQ, or complete fairness queueing, disk scheduler.
   3 *
   4 *  Based on ideas from a previously unfinished io
   5 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
   6 *
   7 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   8 */
   9#include <linux/module.h>
  10#include <linux/slab.h>
  11#include <linux/blkdev.h>
  12#include <linux/elevator.h>
  13#include <linux/jiffies.h>
  14#include <linux/rbtree.h>
  15#include <linux/ioprio.h>
  16#include <linux/blktrace_api.h>
  17#include <linux/blk-cgroup.h>
  18#include "blk.h"
 
  19
  20/*
  21 * tunables
  22 */
  23/* max queue in one round of service */
  24static const int cfq_quantum = 8;
  25static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  26/* maximum backwards seek, in KiB */
  27static const int cfq_back_max = 16 * 1024;
  28/* penalty of a backwards seek */
  29static const int cfq_back_penalty = 2;
  30static const int cfq_slice_sync = HZ / 10;
  31static int cfq_slice_async = HZ / 25;
  32static const int cfq_slice_async_rq = 2;
  33static int cfq_slice_idle = HZ / 125;
  34static int cfq_group_idle = HZ / 125;
  35static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  36static const int cfq_hist_divisor = 4;
  37
  38/*
  39 * offset from end of service tree
  40 */
  41#define CFQ_IDLE_DELAY		(HZ / 5)
  42
  43/*
  44 * below this threshold, we consider thinktime immediate
  45 */
  46#define CFQ_MIN_TT		(2)
  47
  48#define CFQ_SLICE_SCALE		(5)
  49#define CFQ_HW_QUEUE_MIN	(5)
  50#define CFQ_SERVICE_SHIFT       12
  51
  52#define CFQQ_SEEK_THR		(sector_t)(8 * 100)
  53#define CFQQ_CLOSE_THR		(sector_t)(8 * 1024)
  54#define CFQQ_SECT_THR_NONROT	(sector_t)(2 * 32)
  55#define CFQQ_SEEKY(cfqq)	(hweight32(cfqq->seek_history) > 32/8)
  56
  57#define RQ_CIC(rq)		icq_to_cic((rq)->elv.icq)
  58#define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elv.priv[0])
  59#define RQ_CFQG(rq)		(struct cfq_group *) ((rq)->elv.priv[1])
  60
  61static struct kmem_cache *cfq_pool;
  62
  63#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
  64#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  65#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  66
  67#define sample_valid(samples)	((samples) > 80)
  68#define rb_entry_cfqg(node)	rb_entry((node), struct cfq_group, rb_node)
  69
  70/* blkio-related constants */
  71#define CFQ_WEIGHT_LEGACY_MIN	10
  72#define CFQ_WEIGHT_LEGACY_DFL	500
  73#define CFQ_WEIGHT_LEGACY_MAX	1000
  74
  75struct cfq_ttime {
  76	unsigned long last_end_request;
  77
  78	unsigned long ttime_total;
  79	unsigned long ttime_samples;
  80	unsigned long ttime_mean;
  81};
  82
  83/*
  84 * Most of our rbtree usage is for sorting with min extraction, so
  85 * if we cache the leftmost node we don't have to walk down the tree
  86 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  87 * move this into the elevator for the rq sorting as well.
  88 */
  89struct cfq_rb_root {
  90	struct rb_root rb;
  91	struct rb_node *left;
  92	unsigned count;
  93	u64 min_vdisktime;
  94	struct cfq_ttime ttime;
  95};
  96#define CFQ_RB_ROOT	(struct cfq_rb_root) { .rb = RB_ROOT, \
  97			.ttime = {.last_end_request = jiffies,},}
  98
  99/*
 100 * Per process-grouping structure
 101 */
 102struct cfq_queue {
 103	/* reference count */
 104	int ref;
 105	/* various state flags, see below */
 106	unsigned int flags;
 107	/* parent cfq_data */
 108	struct cfq_data *cfqd;
 109	/* service_tree member */
 110	struct rb_node rb_node;
 111	/* service_tree key */
 112	unsigned long rb_key;
 113	/* prio tree member */
 114	struct rb_node p_node;
 115	/* prio tree root we belong to, if any */
 116	struct rb_root *p_root;
 117	/* sorted list of pending requests */
 118	struct rb_root sort_list;
 119	/* if fifo isn't expired, next request to serve */
 120	struct request *next_rq;
 121	/* requests queued in sort_list */
 122	int queued[2];
 123	/* currently allocated requests */
 124	int allocated[2];
 125	/* fifo list of requests in sort_list */
 126	struct list_head fifo;
 127
 128	/* time when queue got scheduled in to dispatch first request. */
 129	unsigned long dispatch_start;
 130	unsigned int allocated_slice;
 131	unsigned int slice_dispatch;
 132	/* time when first request from queue completed and slice started. */
 133	unsigned long slice_start;
 134	unsigned long slice_end;
 135	long slice_resid;
 136
 137	/* pending priority requests */
 138	int prio_pending;
 139	/* number of requests that are on the dispatch list or inside driver */
 140	int dispatched;
 141
 142	/* io prio of this group */
 143	unsigned short ioprio, org_ioprio;
 144	unsigned short ioprio_class;
 145
 146	pid_t pid;
 147
 148	u32 seek_history;
 149	sector_t last_request_pos;
 150
 151	struct cfq_rb_root *service_tree;
 152	struct cfq_queue *new_cfqq;
 153	struct cfq_group *cfqg;
 154	/* Number of sectors dispatched from queue in single dispatch round */
 155	unsigned long nr_sectors;
 156};
 157
 158/*
 159 * First index in the service_trees.
 160 * IDLE is handled separately, so it has negative index
 161 */
 162enum wl_class_t {
 163	BE_WORKLOAD = 0,
 164	RT_WORKLOAD = 1,
 165	IDLE_WORKLOAD = 2,
 166	CFQ_PRIO_NR,
 167};
 168
 169/*
 170 * Second index in the service_trees.
 171 */
 172enum wl_type_t {
 173	ASYNC_WORKLOAD = 0,
 174	SYNC_NOIDLE_WORKLOAD = 1,
 175	SYNC_WORKLOAD = 2
 176};
 177
 178struct cfqg_stats {
 179#ifdef CONFIG_CFQ_GROUP_IOSCHED
 
 
 
 
 180	/* number of ios merged */
 181	struct blkg_rwstat		merged;
 182	/* total time spent on device in ns, may not be accurate w/ queueing */
 183	struct blkg_rwstat		service_time;
 184	/* total time spent waiting in scheduler queue in ns */
 185	struct blkg_rwstat		wait_time;
 186	/* number of IOs queued up */
 187	struct blkg_rwstat		queued;
 
 
 188	/* total disk time and nr sectors dispatched by this group */
 189	struct blkg_stat		time;
 190#ifdef CONFIG_DEBUG_BLK_CGROUP
 191	/* time not charged to this cgroup */
 192	struct blkg_stat		unaccounted_time;
 193	/* sum of number of ios queued across all samples */
 194	struct blkg_stat		avg_queue_size_sum;
 195	/* count of samples taken for average */
 196	struct blkg_stat		avg_queue_size_samples;
 197	/* how many times this group has been removed from service tree */
 198	struct blkg_stat		dequeue;
 199	/* total time spent waiting for it to be assigned a timeslice. */
 200	struct blkg_stat		group_wait_time;
 201	/* time spent idling for this blkcg_gq */
 202	struct blkg_stat		idle_time;
 203	/* total time with empty current active q with other requests queued */
 204	struct blkg_stat		empty_time;
 205	/* fields after this shouldn't be cleared on stat reset */
 206	uint64_t			start_group_wait_time;
 207	uint64_t			start_idle_time;
 208	uint64_t			start_empty_time;
 209	uint16_t			flags;
 210#endif	/* CONFIG_DEBUG_BLK_CGROUP */
 211#endif	/* CONFIG_CFQ_GROUP_IOSCHED */
 212};
 213
 214/* Per-cgroup data */
 215struct cfq_group_data {
 216	/* must be the first member */
 217	struct blkcg_policy_data cpd;
 218
 219	unsigned int weight;
 220	unsigned int leaf_weight;
 221};
 222
 223/* This is per cgroup per device grouping structure */
 224struct cfq_group {
 225	/* must be the first member */
 226	struct blkg_policy_data pd;
 227
 228	/* group service_tree member */
 229	struct rb_node rb_node;
 230
 231	/* group service_tree key */
 232	u64 vdisktime;
 233
 234	/*
 235	 * The number of active cfqgs and sum of their weights under this
 236	 * cfqg.  This covers this cfqg's leaf_weight and all children's
 237	 * weights, but does not cover weights of further descendants.
 238	 *
 239	 * If a cfqg is on the service tree, it's active.  An active cfqg
 240	 * also activates its parent and contributes to the children_weight
 241	 * of the parent.
 242	 */
 243	int nr_active;
 244	unsigned int children_weight;
 245
 246	/*
 247	 * vfraction is the fraction of vdisktime that the tasks in this
 248	 * cfqg are entitled to.  This is determined by compounding the
 249	 * ratios walking up from this cfqg to the root.
 250	 *
 251	 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
 252	 * vfractions on a service tree is approximately 1.  The sum may
 253	 * deviate a bit due to rounding errors and fluctuations caused by
 254	 * cfqgs entering and leaving the service tree.
 255	 */
 256	unsigned int vfraction;
 257
 258	/*
 259	 * There are two weights - (internal) weight is the weight of this
 260	 * cfqg against the sibling cfqgs.  leaf_weight is the wight of
 261	 * this cfqg against the child cfqgs.  For the root cfqg, both
 262	 * weights are kept in sync for backward compatibility.
 263	 */
 264	unsigned int weight;
 265	unsigned int new_weight;
 266	unsigned int dev_weight;
 267
 268	unsigned int leaf_weight;
 269	unsigned int new_leaf_weight;
 270	unsigned int dev_leaf_weight;
 271
 272	/* number of cfqq currently on this group */
 273	int nr_cfqq;
 274
 275	/*
 276	 * Per group busy queues average. Useful for workload slice calc. We
 277	 * create the array for each prio class but at run time it is used
 278	 * only for RT and BE class and slot for IDLE class remains unused.
 279	 * This is primarily done to avoid confusion and a gcc warning.
 280	 */
 281	unsigned int busy_queues_avg[CFQ_PRIO_NR];
 282	/*
 283	 * rr lists of queues with requests. We maintain service trees for
 284	 * RT and BE classes. These trees are subdivided in subclasses
 285	 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
 286	 * class there is no subclassification and all the cfq queues go on
 287	 * a single tree service_tree_idle.
 288	 * Counts are embedded in the cfq_rb_root
 289	 */
 290	struct cfq_rb_root service_trees[2][3];
 291	struct cfq_rb_root service_tree_idle;
 292
 293	unsigned long saved_wl_slice;
 294	enum wl_type_t saved_wl_type;
 295	enum wl_class_t saved_wl_class;
 296
 297	/* number of requests that are on the dispatch list or inside driver */
 298	int dispatched;
 299	struct cfq_ttime ttime;
 300	struct cfqg_stats stats;	/* stats for this cfqg */
 301
 302	/* async queue for each priority case */
 303	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
 304	struct cfq_queue *async_idle_cfqq;
 305
 306};
 307
 308struct cfq_io_cq {
 309	struct io_cq		icq;		/* must be the first member */
 310	struct cfq_queue	*cfqq[2];
 311	struct cfq_ttime	ttime;
 312	int			ioprio;		/* the current ioprio */
 313#ifdef CONFIG_CFQ_GROUP_IOSCHED
 314	uint64_t		blkcg_serial_nr; /* the current blkcg serial */
 315#endif
 316};
 317
 318/*
 319 * Per block device queue structure
 320 */
 321struct cfq_data {
 322	struct request_queue *queue;
 323	/* Root service tree for cfq_groups */
 324	struct cfq_rb_root grp_service_tree;
 325	struct cfq_group *root_group;
 326
 327	/*
 328	 * The priority currently being served
 329	 */
 330	enum wl_class_t serving_wl_class;
 331	enum wl_type_t serving_wl_type;
 332	unsigned long workload_expires;
 333	struct cfq_group *serving_group;
 334
 335	/*
 336	 * Each priority tree is sorted by next_request position.  These
 337	 * trees are used when determining if two or more queues are
 338	 * interleaving requests (see cfq_close_cooperator).
 339	 */
 340	struct rb_root prio_trees[CFQ_PRIO_LISTS];
 341
 342	unsigned int busy_queues;
 343	unsigned int busy_sync_queues;
 344
 345	int rq_in_driver;
 346	int rq_in_flight[2];
 347
 348	/*
 349	 * queue-depth detection
 350	 */
 351	int rq_queued;
 352	int hw_tag;
 353	/*
 354	 * hw_tag can be
 355	 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
 356	 *  1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
 357	 *  0 => no NCQ
 358	 */
 359	int hw_tag_est_depth;
 360	unsigned int hw_tag_samples;
 361
 362	/*
 363	 * idle window management
 364	 */
 365	struct timer_list idle_slice_timer;
 366	struct work_struct unplug_work;
 367
 368	struct cfq_queue *active_queue;
 369	struct cfq_io_cq *active_cic;
 370
 
 
 
 
 
 
 371	sector_t last_position;
 372
 373	/*
 374	 * tunables, see top of file
 375	 */
 376	unsigned int cfq_quantum;
 377	unsigned int cfq_fifo_expire[2];
 378	unsigned int cfq_back_penalty;
 379	unsigned int cfq_back_max;
 380	unsigned int cfq_slice[2];
 381	unsigned int cfq_slice_async_rq;
 382	unsigned int cfq_slice_idle;
 383	unsigned int cfq_group_idle;
 384	unsigned int cfq_latency;
 385	unsigned int cfq_target_latency;
 386
 387	/*
 388	 * Fallback dummy cfqq for extreme OOM conditions
 389	 */
 390	struct cfq_queue oom_cfqq;
 391
 392	unsigned long last_delayed_sync;
 393};
 394
 395static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
 396static void cfq_put_queue(struct cfq_queue *cfqq);
 397
 398static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
 399					    enum wl_class_t class,
 400					    enum wl_type_t type)
 401{
 402	if (!cfqg)
 403		return NULL;
 404
 405	if (class == IDLE_WORKLOAD)
 406		return &cfqg->service_tree_idle;
 407
 408	return &cfqg->service_trees[class][type];
 409}
 410
 411enum cfqq_state_flags {
 412	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
 413	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
 414	CFQ_CFQQ_FLAG_must_dispatch,	/* must be allowed a dispatch */
 415	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
 416	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
 417	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
 418	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
 419	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
 420	CFQ_CFQQ_FLAG_sync,		/* synchronous queue */
 421	CFQ_CFQQ_FLAG_coop,		/* cfqq is shared */
 422	CFQ_CFQQ_FLAG_split_coop,	/* shared cfqq will be splitted */
 423	CFQ_CFQQ_FLAG_deep,		/* sync cfqq experienced large depth */
 424	CFQ_CFQQ_FLAG_wait_busy,	/* Waiting for next request */
 425};
 426
 427#define CFQ_CFQQ_FNS(name)						\
 428static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
 429{									\
 430	(cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
 431}									\
 432static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
 433{									\
 434	(cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
 435}									\
 436static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
 437{									\
 438	return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
 439}
 440
 441CFQ_CFQQ_FNS(on_rr);
 442CFQ_CFQQ_FNS(wait_request);
 443CFQ_CFQQ_FNS(must_dispatch);
 444CFQ_CFQQ_FNS(must_alloc_slice);
 445CFQ_CFQQ_FNS(fifo_expire);
 446CFQ_CFQQ_FNS(idle_window);
 447CFQ_CFQQ_FNS(prio_changed);
 448CFQ_CFQQ_FNS(slice_new);
 449CFQ_CFQQ_FNS(sync);
 450CFQ_CFQQ_FNS(coop);
 451CFQ_CFQQ_FNS(split_coop);
 452CFQ_CFQQ_FNS(deep);
 453CFQ_CFQQ_FNS(wait_busy);
 454#undef CFQ_CFQQ_FNS
 455
 
 
 
 
 
 
 
 
 
 
 456#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
 457
 458/* cfqg stats flags */
 459enum cfqg_stats_flags {
 460	CFQG_stats_waiting = 0,
 461	CFQG_stats_idling,
 462	CFQG_stats_empty,
 463};
 464
 465#define CFQG_FLAG_FNS(name)						\
 466static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats)	\
 467{									\
 468	stats->flags |= (1 << CFQG_stats_##name);			\
 469}									\
 470static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats)	\
 471{									\
 472	stats->flags &= ~(1 << CFQG_stats_##name);			\
 473}									\
 474static inline int cfqg_stats_##name(struct cfqg_stats *stats)		\
 475{									\
 476	return (stats->flags & (1 << CFQG_stats_##name)) != 0;		\
 477}									\
 478
 479CFQG_FLAG_FNS(waiting)
 480CFQG_FLAG_FNS(idling)
 481CFQG_FLAG_FNS(empty)
 482#undef CFQG_FLAG_FNS
 483
 484/* This should be called with the queue_lock held. */
 485static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
 486{
 487	unsigned long long now;
 488
 489	if (!cfqg_stats_waiting(stats))
 490		return;
 491
 492	now = sched_clock();
 493	if (time_after64(now, stats->start_group_wait_time))
 494		blkg_stat_add(&stats->group_wait_time,
 495			      now - stats->start_group_wait_time);
 496	cfqg_stats_clear_waiting(stats);
 497}
 498
 499/* This should be called with the queue_lock held. */
 500static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
 501						 struct cfq_group *curr_cfqg)
 502{
 503	struct cfqg_stats *stats = &cfqg->stats;
 504
 505	if (cfqg_stats_waiting(stats))
 506		return;
 507	if (cfqg == curr_cfqg)
 508		return;
 509	stats->start_group_wait_time = sched_clock();
 510	cfqg_stats_mark_waiting(stats);
 511}
 512
 513/* This should be called with the queue_lock held. */
 514static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
 515{
 516	unsigned long long now;
 517
 518	if (!cfqg_stats_empty(stats))
 519		return;
 520
 521	now = sched_clock();
 522	if (time_after64(now, stats->start_empty_time))
 523		blkg_stat_add(&stats->empty_time,
 524			      now - stats->start_empty_time);
 525	cfqg_stats_clear_empty(stats);
 526}
 527
 528static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
 529{
 530	blkg_stat_add(&cfqg->stats.dequeue, 1);
 531}
 532
 533static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
 534{
 535	struct cfqg_stats *stats = &cfqg->stats;
 536
 537	if (blkg_rwstat_total(&stats->queued))
 538		return;
 539
 540	/*
 541	 * group is already marked empty. This can happen if cfqq got new
 542	 * request in parent group and moved to this group while being added
 543	 * to service tree. Just ignore the event and move on.
 544	 */
 545	if (cfqg_stats_empty(stats))
 546		return;
 547
 548	stats->start_empty_time = sched_clock();
 549	cfqg_stats_mark_empty(stats);
 550}
 551
 552static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
 553{
 554	struct cfqg_stats *stats = &cfqg->stats;
 555
 556	if (cfqg_stats_idling(stats)) {
 557		unsigned long long now = sched_clock();
 558
 559		if (time_after64(now, stats->start_idle_time))
 560			blkg_stat_add(&stats->idle_time,
 561				      now - stats->start_idle_time);
 562		cfqg_stats_clear_idling(stats);
 563	}
 564}
 565
 566static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
 567{
 568	struct cfqg_stats *stats = &cfqg->stats;
 569
 570	BUG_ON(cfqg_stats_idling(stats));
 571
 572	stats->start_idle_time = sched_clock();
 573	cfqg_stats_mark_idling(stats);
 574}
 575
 576static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
 577{
 578	struct cfqg_stats *stats = &cfqg->stats;
 579
 580	blkg_stat_add(&stats->avg_queue_size_sum,
 581		      blkg_rwstat_total(&stats->queued));
 582	blkg_stat_add(&stats->avg_queue_size_samples, 1);
 583	cfqg_stats_update_group_wait_time(stats);
 584}
 585
 586#else	/* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 587
 588static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
 589static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
 590static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
 591static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
 592static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
 593static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
 594static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
 595
 596#endif	/* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
 597
 598#ifdef CONFIG_CFQ_GROUP_IOSCHED
 599
 600static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
 601{
 602	return pd ? container_of(pd, struct cfq_group, pd) : NULL;
 603}
 604
 605static struct cfq_group_data
 606*cpd_to_cfqgd(struct blkcg_policy_data *cpd)
 607{
 608	return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL;
 609}
 610
 611static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
 612{
 613	return pd_to_blkg(&cfqg->pd);
 614}
 615
 616static struct blkcg_policy blkcg_policy_cfq;
 617
 618static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
 619{
 620	return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
 621}
 622
 623static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg)
 624{
 625	return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq));
 626}
 627
 628static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
 629{
 630	struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
 631
 632	return pblkg ? blkg_to_cfqg(pblkg) : NULL;
 633}
 634
 635static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
 636				      struct cfq_group *ancestor)
 637{
 638	return cgroup_is_descendant(cfqg_to_blkg(cfqg)->blkcg->css.cgroup,
 639				    cfqg_to_blkg(ancestor)->blkcg->css.cgroup);
 640}
 641
 642static inline void cfqg_get(struct cfq_group *cfqg)
 643{
 644	return blkg_get(cfqg_to_blkg(cfqg));
 645}
 646
 647static inline void cfqg_put(struct cfq_group *cfqg)
 648{
 649	return blkg_put(cfqg_to_blkg(cfqg));
 650}
 651
 652#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	do {			\
 653	char __pbuf[128];						\
 654									\
 655	blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf));	\
 656	blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
 657			cfq_cfqq_sync((cfqq)) ? 'S' : 'A',		\
 658			cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 659			  __pbuf, ##args);				\
 660} while (0)
 661
 662#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)	do {			\
 663	char __pbuf[128];						\
 664									\
 665	blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf));		\
 666	blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args);	\
 667} while (0)
 668
 669static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 670					    struct cfq_group *curr_cfqg, int rw)
 671{
 672	blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
 673	cfqg_stats_end_empty_time(&cfqg->stats);
 674	cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
 675}
 676
 677static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 678			unsigned long time, unsigned long unaccounted_time)
 679{
 680	blkg_stat_add(&cfqg->stats.time, time);
 681#ifdef CONFIG_DEBUG_BLK_CGROUP
 682	blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
 683#endif
 684}
 685
 686static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
 687{
 688	blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
 689}
 690
 691static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
 692{
 693	blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
 694}
 695
 
 
 
 
 
 
 
 
 696static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 697			uint64_t start_time, uint64_t io_start_time, int rw)
 698{
 699	struct cfqg_stats *stats = &cfqg->stats;
 700	unsigned long long now = sched_clock();
 701
 702	if (time_after64(now, io_start_time))
 703		blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
 704	if (time_after64(io_start_time, start_time))
 705		blkg_rwstat_add(&stats->wait_time, rw,
 706				io_start_time - start_time);
 707}
 708
 709/* @stats = 0 */
 710static void cfqg_stats_reset(struct cfqg_stats *stats)
 711{
 712	/* queued stats shouldn't be cleared */
 
 
 713	blkg_rwstat_reset(&stats->merged);
 714	blkg_rwstat_reset(&stats->service_time);
 715	blkg_rwstat_reset(&stats->wait_time);
 716	blkg_stat_reset(&stats->time);
 717#ifdef CONFIG_DEBUG_BLK_CGROUP
 718	blkg_stat_reset(&stats->unaccounted_time);
 719	blkg_stat_reset(&stats->avg_queue_size_sum);
 720	blkg_stat_reset(&stats->avg_queue_size_samples);
 721	blkg_stat_reset(&stats->dequeue);
 722	blkg_stat_reset(&stats->group_wait_time);
 723	blkg_stat_reset(&stats->idle_time);
 724	blkg_stat_reset(&stats->empty_time);
 725#endif
 726}
 727
 728/* @to += @from */
 729static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
 730{
 731	/* queued stats shouldn't be cleared */
 732	blkg_rwstat_add_aux(&to->merged, &from->merged);
 733	blkg_rwstat_add_aux(&to->service_time, &from->service_time);
 734	blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
 735	blkg_stat_add_aux(&from->time, &from->time);
 
 
 736#ifdef CONFIG_DEBUG_BLK_CGROUP
 737	blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
 738	blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
 739	blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
 740	blkg_stat_add_aux(&to->dequeue, &from->dequeue);
 741	blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
 742	blkg_stat_add_aux(&to->idle_time, &from->idle_time);
 743	blkg_stat_add_aux(&to->empty_time, &from->empty_time);
 744#endif
 745}
 746
 747/*
 748 * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
 749 * recursive stats can still account for the amount used by this cfqg after
 750 * it's gone.
 751 */
 752static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
 753{
 754	struct cfq_group *parent = cfqg_parent(cfqg);
 755
 756	lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
 757
 758	if (unlikely(!parent))
 759		return;
 760
 761	cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
 
 762	cfqg_stats_reset(&cfqg->stats);
 
 763}
 764
 765#else	/* CONFIG_CFQ_GROUP_IOSCHED */
 766
 767static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
 768static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
 769				      struct cfq_group *ancestor)
 770{
 771	return true;
 772}
 773static inline void cfqg_get(struct cfq_group *cfqg) { }
 774static inline void cfqg_put(struct cfq_group *cfqg) { }
 775
 776#define cfq_log_cfqq(cfqd, cfqq, fmt, args...)	\
 777	blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid,	\
 778			cfq_cfqq_sync((cfqq)) ? 'S' : 'A',		\
 779			cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
 780				##args)
 781#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0)
 782
 783static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
 784			struct cfq_group *curr_cfqg, int rw) { }
 785static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 786			unsigned long time, unsigned long unaccounted_time) { }
 787static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
 788static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
 
 
 789static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
 790			uint64_t start_time, uint64_t io_start_time, int rw) { }
 791
 792#endif	/* CONFIG_CFQ_GROUP_IOSCHED */
 793
 794#define cfq_log(cfqd, fmt, args...)	\
 795	blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
 796
 797/* Traverses through cfq group service trees */
 798#define for_each_cfqg_st(cfqg, i, j, st) \
 799	for (i = 0; i <= IDLE_WORKLOAD; i++) \
 800		for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
 801			: &cfqg->service_tree_idle; \
 802			(i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
 803			(i == IDLE_WORKLOAD && j == 0); \
 804			j++, st = i < IDLE_WORKLOAD ? \
 805			&cfqg->service_trees[i][j]: NULL) \
 806
 807static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
 808	struct cfq_ttime *ttime, bool group_idle)
 809{
 810	unsigned long slice;
 811	if (!sample_valid(ttime->ttime_samples))
 812		return false;
 813	if (group_idle)
 814		slice = cfqd->cfq_group_idle;
 815	else
 816		slice = cfqd->cfq_slice_idle;
 817	return ttime->ttime_mean > slice;
 818}
 819
 820static inline bool iops_mode(struct cfq_data *cfqd)
 821{
 822	/*
 823	 * If we are not idling on queues and it is a NCQ drive, parallel
 824	 * execution of requests is on and measuring time is not possible
 825	 * in most of the cases until and unless we drive shallower queue
 826	 * depths and that becomes a performance bottleneck. In such cases
 827	 * switch to start providing fairness in terms of number of IOs.
 828	 */
 829	if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
 830		return true;
 831	else
 832		return false;
 833}
 834
 835static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
 836{
 837	if (cfq_class_idle(cfqq))
 838		return IDLE_WORKLOAD;
 839	if (cfq_class_rt(cfqq))
 840		return RT_WORKLOAD;
 841	return BE_WORKLOAD;
 842}
 843
 844
 845static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
 846{
 847	if (!cfq_cfqq_sync(cfqq))
 848		return ASYNC_WORKLOAD;
 849	if (!cfq_cfqq_idle_window(cfqq))
 850		return SYNC_NOIDLE_WORKLOAD;
 851	return SYNC_WORKLOAD;
 852}
 853
 854static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
 855					struct cfq_data *cfqd,
 856					struct cfq_group *cfqg)
 857{
 858	if (wl_class == IDLE_WORKLOAD)
 859		return cfqg->service_tree_idle.count;
 860
 861	return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
 862		cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
 863		cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
 864}
 865
 866static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
 867					struct cfq_group *cfqg)
 868{
 869	return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
 870		cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
 871}
 872
 873static void cfq_dispatch_insert(struct request_queue *, struct request *);
 874static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
 875				       struct cfq_io_cq *cic, struct bio *bio);
 
 876
 877static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
 878{
 879	/* cic->icq is the first member, %NULL will convert to %NULL */
 880	return container_of(icq, struct cfq_io_cq, icq);
 881}
 882
 883static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
 884					       struct io_context *ioc)
 885{
 886	if (ioc)
 887		return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
 888	return NULL;
 889}
 890
 891static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
 892{
 893	return cic->cfqq[is_sync];
 894}
 895
 896static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
 897				bool is_sync)
 898{
 899	cic->cfqq[is_sync] = cfqq;
 900}
 901
 902static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
 903{
 904	return cic->icq.q->elevator->elevator_data;
 905}
 906
 907/*
 908 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 909 * set (in which case it could also be direct WRITE).
 910 */
 911static inline bool cfq_bio_sync(struct bio *bio)
 912{
 913	return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
 914}
 915
 916/*
 917 * scheduler run of queue, if there are requests pending and no one in the
 918 * driver that will restart queueing
 919 */
 920static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
 921{
 922	if (cfqd->busy_queues) {
 923		cfq_log(cfqd, "schedule dispatch");
 924		kblockd_schedule_work(&cfqd->unplug_work);
 925	}
 926}
 927
 928/*
 929 * Scale schedule slice based on io priority. Use the sync time slice only
 930 * if a queue is marked sync and has sync io queued. A sync queue with async
 931 * io only, should not get full sync slice length.
 932 */
 933static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
 934				 unsigned short prio)
 935{
 936	const int base_slice = cfqd->cfq_slice[sync];
 937
 938	WARN_ON(prio >= IOPRIO_BE_NR);
 939
 940	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
 941}
 942
 943static inline int
 944cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 945{
 946	return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
 947}
 948
 949/**
 950 * cfqg_scale_charge - scale disk time charge according to cfqg weight
 951 * @charge: disk time being charged
 952 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
 953 *
 954 * Scale @charge according to @vfraction, which is in range (0, 1].  The
 955 * scaling is inversely proportional.
 956 *
 957 * scaled = charge / vfraction
 958 *
 959 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
 960 */
 961static inline u64 cfqg_scale_charge(unsigned long charge,
 962				    unsigned int vfraction)
 963{
 964	u64 c = charge << CFQ_SERVICE_SHIFT;	/* make it fixed point */
 965
 966	/* charge / vfraction */
 967	c <<= CFQ_SERVICE_SHIFT;
 968	do_div(c, vfraction);
 969	return c;
 970}
 971
 972static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
 973{
 974	s64 delta = (s64)(vdisktime - min_vdisktime);
 975	if (delta > 0)
 976		min_vdisktime = vdisktime;
 977
 978	return min_vdisktime;
 979}
 980
 981static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
 982{
 983	s64 delta = (s64)(vdisktime - min_vdisktime);
 984	if (delta < 0)
 985		min_vdisktime = vdisktime;
 986
 987	return min_vdisktime;
 988}
 989
 990static void update_min_vdisktime(struct cfq_rb_root *st)
 991{
 992	struct cfq_group *cfqg;
 993
 994	if (st->left) {
 995		cfqg = rb_entry_cfqg(st->left);
 996		st->min_vdisktime = max_vdisktime(st->min_vdisktime,
 997						  cfqg->vdisktime);
 998	}
 999}
1000
1001/*
1002 * get averaged number of queues of RT/BE priority.
1003 * average is updated, with a formula that gives more weight to higher numbers,
1004 * to quickly follows sudden increases and decrease slowly
1005 */
1006
1007static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
1008					struct cfq_group *cfqg, bool rt)
1009{
1010	unsigned min_q, max_q;
1011	unsigned mult  = cfq_hist_divisor - 1;
1012	unsigned round = cfq_hist_divisor / 2;
1013	unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
1014
1015	min_q = min(cfqg->busy_queues_avg[rt], busy);
1016	max_q = max(cfqg->busy_queues_avg[rt], busy);
1017	cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
1018		cfq_hist_divisor;
1019	return cfqg->busy_queues_avg[rt];
1020}
1021
1022static inline unsigned
1023cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1024{
1025	return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
1026}
1027
1028static inline unsigned
1029cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1030{
1031	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1032	if (cfqd->cfq_latency) {
1033		/*
1034		 * interested queues (we consider only the ones with the same
1035		 * priority class in the cfq group)
1036		 */
1037		unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1038						cfq_class_rt(cfqq));
1039		unsigned sync_slice = cfqd->cfq_slice[1];
1040		unsigned expect_latency = sync_slice * iq;
1041		unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1042
1043		if (expect_latency > group_slice) {
1044			unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1045			/* scale low_slice according to IO priority
1046			 * and sync vs async */
1047			unsigned low_slice =
1048				min(slice, base_low_slice * slice / sync_slice);
1049			/* the adapted slice value is scaled to fit all iqs
1050			 * into the target latency */
1051			slice = max(slice * group_slice / expect_latency,
1052				    low_slice);
1053		}
1054	}
1055	return slice;
1056}
1057
1058static inline void
1059cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1060{
1061	unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1062
1063	cfqq->slice_start = jiffies;
1064	cfqq->slice_end = jiffies + slice;
1065	cfqq->allocated_slice = slice;
1066	cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
1067}
1068
1069/*
1070 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1071 * isn't valid until the first request from the dispatch is activated
1072 * and the slice time set.
1073 */
1074static inline bool cfq_slice_used(struct cfq_queue *cfqq)
1075{
1076	if (cfq_cfqq_slice_new(cfqq))
1077		return false;
1078	if (time_before(jiffies, cfqq->slice_end))
1079		return false;
1080
1081	return true;
1082}
1083
1084/*
1085 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1086 * We choose the request that is closest to the head right now. Distance
1087 * behind the head is penalized and only allowed to a certain extent.
1088 */
1089static struct request *
1090cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1091{
1092	sector_t s1, s2, d1 = 0, d2 = 0;
1093	unsigned long back_max;
1094#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
1095#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
1096	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1097
1098	if (rq1 == NULL || rq1 == rq2)
1099		return rq2;
1100	if (rq2 == NULL)
1101		return rq1;
1102
1103	if (rq_is_sync(rq1) != rq_is_sync(rq2))
1104		return rq_is_sync(rq1) ? rq1 : rq2;
1105
1106	if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1107		return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
1108
1109	s1 = blk_rq_pos(rq1);
1110	s2 = blk_rq_pos(rq2);
1111
1112	/*
1113	 * by definition, 1KiB is 2 sectors
1114	 */
1115	back_max = cfqd->cfq_back_max * 2;
1116
1117	/*
1118	 * Strict one way elevator _except_ in the case where we allow
1119	 * short backward seeks which are biased as twice the cost of a
1120	 * similar forward seek.
1121	 */
1122	if (s1 >= last)
1123		d1 = s1 - last;
1124	else if (s1 + back_max >= last)
1125		d1 = (last - s1) * cfqd->cfq_back_penalty;
1126	else
1127		wrap |= CFQ_RQ1_WRAP;
1128
1129	if (s2 >= last)
1130		d2 = s2 - last;
1131	else if (s2 + back_max >= last)
1132		d2 = (last - s2) * cfqd->cfq_back_penalty;
1133	else
1134		wrap |= CFQ_RQ2_WRAP;
1135
1136	/* Found required data */
1137
1138	/*
1139	 * By doing switch() on the bit mask "wrap" we avoid having to
1140	 * check two variables for all permutations: --> faster!
1141	 */
1142	switch (wrap) {
1143	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
1144		if (d1 < d2)
1145			return rq1;
1146		else if (d2 < d1)
1147			return rq2;
1148		else {
1149			if (s1 >= s2)
1150				return rq1;
1151			else
1152				return rq2;
1153		}
1154
1155	case CFQ_RQ2_WRAP:
1156		return rq1;
1157	case CFQ_RQ1_WRAP:
1158		return rq2;
1159	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
1160	default:
1161		/*
1162		 * Since both rqs are wrapped,
1163		 * start with the one that's further behind head
1164		 * (--> only *one* back seek required),
1165		 * since back seek takes more time than forward.
1166		 */
1167		if (s1 <= s2)
1168			return rq1;
1169		else
1170			return rq2;
1171	}
1172}
1173
1174/*
1175 * The below is leftmost cache rbtree addon
1176 */
1177static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
1178{
1179	/* Service tree is empty */
1180	if (!root->count)
1181		return NULL;
1182
1183	if (!root->left)
1184		root->left = rb_first(&root->rb);
1185
1186	if (root->left)
1187		return rb_entry(root->left, struct cfq_queue, rb_node);
1188
1189	return NULL;
1190}
1191
1192static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1193{
1194	if (!root->left)
1195		root->left = rb_first(&root->rb);
1196
1197	if (root->left)
1198		return rb_entry_cfqg(root->left);
1199
1200	return NULL;
1201}
1202
1203static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1204{
1205	rb_erase(n, root);
1206	RB_CLEAR_NODE(n);
1207}
1208
1209static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1210{
1211	if (root->left == n)
1212		root->left = NULL;
1213	rb_erase_init(n, &root->rb);
1214	--root->count;
1215}
1216
1217/*
1218 * would be nice to take fifo expire time into account as well
1219 */
1220static struct request *
1221cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1222		  struct request *last)
1223{
1224	struct rb_node *rbnext = rb_next(&last->rb_node);
1225	struct rb_node *rbprev = rb_prev(&last->rb_node);
1226	struct request *next = NULL, *prev = NULL;
1227
1228	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1229
1230	if (rbprev)
1231		prev = rb_entry_rq(rbprev);
1232
1233	if (rbnext)
1234		next = rb_entry_rq(rbnext);
1235	else {
1236		rbnext = rb_first(&cfqq->sort_list);
1237		if (rbnext && rbnext != &last->rb_node)
1238			next = rb_entry_rq(rbnext);
1239	}
1240
1241	return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1242}
1243
1244static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1245				      struct cfq_queue *cfqq)
1246{
1247	/*
1248	 * just an approximation, should be ok.
1249	 */
1250	return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
1251		       cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
1252}
1253
1254static inline s64
1255cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1256{
1257	return cfqg->vdisktime - st->min_vdisktime;
1258}
1259
1260static void
1261__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1262{
1263	struct rb_node **node = &st->rb.rb_node;
1264	struct rb_node *parent = NULL;
1265	struct cfq_group *__cfqg;
1266	s64 key = cfqg_key(st, cfqg);
1267	int left = 1;
1268
1269	while (*node != NULL) {
1270		parent = *node;
1271		__cfqg = rb_entry_cfqg(parent);
1272
1273		if (key < cfqg_key(st, __cfqg))
1274			node = &parent->rb_left;
1275		else {
1276			node = &parent->rb_right;
1277			left = 0;
1278		}
1279	}
1280
1281	if (left)
1282		st->left = &cfqg->rb_node;
1283
1284	rb_link_node(&cfqg->rb_node, parent, node);
1285	rb_insert_color(&cfqg->rb_node, &st->rb);
1286}
1287
1288/*
1289 * This has to be called only on activation of cfqg
1290 */
1291static void
1292cfq_update_group_weight(struct cfq_group *cfqg)
1293{
 
 
1294	if (cfqg->new_weight) {
1295		cfqg->weight = cfqg->new_weight;
1296		cfqg->new_weight = 0;
1297	}
1298}
1299
1300static void
1301cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1302{
1303	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1304
1305	if (cfqg->new_leaf_weight) {
1306		cfqg->leaf_weight = cfqg->new_leaf_weight;
1307		cfqg->new_leaf_weight = 0;
1308	}
1309}
1310
1311static void
1312cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1313{
1314	unsigned int vfr = 1 << CFQ_SERVICE_SHIFT;	/* start with 1 */
1315	struct cfq_group *pos = cfqg;
1316	struct cfq_group *parent;
1317	bool propagate;
1318
1319	/* add to the service tree */
1320	BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1321
1322	/*
1323	 * Update leaf_weight.  We cannot update weight at this point
1324	 * because cfqg might already have been activated and is
1325	 * contributing its current weight to the parent's child_weight.
1326	 */
1327	cfq_update_group_leaf_weight(cfqg);
1328	__cfq_group_service_tree_add(st, cfqg);
1329
1330	/*
1331	 * Activate @cfqg and calculate the portion of vfraction @cfqg is
1332	 * entitled to.  vfraction is calculated by walking the tree
1333	 * towards the root calculating the fraction it has at each level.
1334	 * The compounded ratio is how much vfraction @cfqg owns.
1335	 *
1336	 * Start with the proportion tasks in this cfqg has against active
1337	 * children cfqgs - its leaf_weight against children_weight.
1338	 */
1339	propagate = !pos->nr_active++;
1340	pos->children_weight += pos->leaf_weight;
1341	vfr = vfr * pos->leaf_weight / pos->children_weight;
1342
1343	/*
1344	 * Compound ->weight walking up the tree.  Both activation and
1345	 * vfraction calculation are done in the same loop.  Propagation
1346	 * stops once an already activated node is met.  vfraction
1347	 * calculation should always continue to the root.
1348	 */
1349	while ((parent = cfqg_parent(pos))) {
1350		if (propagate) {
1351			cfq_update_group_weight(pos);
1352			propagate = !parent->nr_active++;
1353			parent->children_weight += pos->weight;
1354		}
1355		vfr = vfr * pos->weight / parent->children_weight;
1356		pos = parent;
1357	}
1358
1359	cfqg->vfraction = max_t(unsigned, vfr, 1);
1360}
1361
1362static void
1363cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1364{
1365	struct cfq_rb_root *st = &cfqd->grp_service_tree;
1366	struct cfq_group *__cfqg;
1367	struct rb_node *n;
1368
1369	cfqg->nr_cfqq++;
1370	if (!RB_EMPTY_NODE(&cfqg->rb_node))
1371		return;
1372
1373	/*
1374	 * Currently put the group at the end. Later implement something
1375	 * so that groups get lesser vtime based on their weights, so that
1376	 * if group does not loose all if it was not continuously backlogged.
1377	 */
1378	n = rb_last(&st->rb);
1379	if (n) {
1380		__cfqg = rb_entry_cfqg(n);
1381		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1382	} else
1383		cfqg->vdisktime = st->min_vdisktime;
1384	cfq_group_service_tree_add(st, cfqg);
1385}
1386
1387static void
1388cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1389{
1390	struct cfq_group *pos = cfqg;
1391	bool propagate;
1392
1393	/*
1394	 * Undo activation from cfq_group_service_tree_add().  Deactivate
1395	 * @cfqg and propagate deactivation upwards.
1396	 */
1397	propagate = !--pos->nr_active;
1398	pos->children_weight -= pos->leaf_weight;
1399
1400	while (propagate) {
1401		struct cfq_group *parent = cfqg_parent(pos);
1402
1403		/* @pos has 0 nr_active at this point */
1404		WARN_ON_ONCE(pos->children_weight);
1405		pos->vfraction = 0;
1406
1407		if (!parent)
1408			break;
1409
1410		propagate = !--parent->nr_active;
1411		parent->children_weight -= pos->weight;
1412		pos = parent;
1413	}
1414
1415	/* remove from the service tree */
1416	if (!RB_EMPTY_NODE(&cfqg->rb_node))
1417		cfq_rb_erase(&cfqg->rb_node, st);
1418}
1419
1420static void
1421cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1422{
1423	struct cfq_rb_root *st = &cfqd->grp_service_tree;
1424
1425	BUG_ON(cfqg->nr_cfqq < 1);
1426	cfqg->nr_cfqq--;
1427
1428	/* If there are other cfq queues under this group, don't delete it */
1429	if (cfqg->nr_cfqq)
1430		return;
1431
1432	cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
1433	cfq_group_service_tree_del(st, cfqg);
1434	cfqg->saved_wl_slice = 0;
1435	cfqg_stats_update_dequeue(cfqg);
1436}
1437
1438static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1439						unsigned int *unaccounted_time)
1440{
1441	unsigned int slice_used;
1442
1443	/*
1444	 * Queue got expired before even a single request completed or
1445	 * got expired immediately after first request completion.
1446	 */
1447	if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1448		/*
1449		 * Also charge the seek time incurred to the group, otherwise
1450		 * if there are mutiple queues in the group, each can dispatch
1451		 * a single request on seeky media and cause lots of seek time
1452		 * and group will never know it.
1453		 */
1454		slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1455					1);
1456	} else {
1457		slice_used = jiffies - cfqq->slice_start;
1458		if (slice_used > cfqq->allocated_slice) {
1459			*unaccounted_time = slice_used - cfqq->allocated_slice;
1460			slice_used = cfqq->allocated_slice;
1461		}
1462		if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1463			*unaccounted_time += cfqq->slice_start -
1464					cfqq->dispatch_start;
1465	}
1466
1467	return slice_used;
1468}
1469
1470static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1471				struct cfq_queue *cfqq)
1472{
1473	struct cfq_rb_root *st = &cfqd->grp_service_tree;
1474	unsigned int used_sl, charge, unaccounted_sl = 0;
1475	int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1476			- cfqg->service_tree_idle.count;
1477	unsigned int vfr;
1478
1479	BUG_ON(nr_sync < 0);
1480	used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
1481
1482	if (iops_mode(cfqd))
1483		charge = cfqq->slice_dispatch;
1484	else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1485		charge = cfqq->allocated_slice;
1486
1487	/*
1488	 * Can't update vdisktime while on service tree and cfqg->vfraction
1489	 * is valid only while on it.  Cache vfr, leave the service tree,
1490	 * update vdisktime and go back on.  The re-addition to the tree
1491	 * will also update the weights as necessary.
1492	 */
1493	vfr = cfqg->vfraction;
1494	cfq_group_service_tree_del(st, cfqg);
1495	cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
1496	cfq_group_service_tree_add(st, cfqg);
1497
1498	/* This group is being expired. Save the context */
1499	if (time_after(cfqd->workload_expires, jiffies)) {
1500		cfqg->saved_wl_slice = cfqd->workload_expires
1501						- jiffies;
1502		cfqg->saved_wl_type = cfqd->serving_wl_type;
1503		cfqg->saved_wl_class = cfqd->serving_wl_class;
1504	} else
1505		cfqg->saved_wl_slice = 0;
1506
1507	cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1508					st->min_vdisktime);
1509	cfq_log_cfqq(cfqq->cfqd, cfqq,
1510		     "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1511		     used_sl, cfqq->slice_dispatch, charge,
1512		     iops_mode(cfqd), cfqq->nr_sectors);
1513	cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1514	cfqg_stats_set_start_empty_time(cfqg);
1515}
1516
1517/**
1518 * cfq_init_cfqg_base - initialize base part of a cfq_group
1519 * @cfqg: cfq_group to initialize
1520 *
1521 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1522 * is enabled or not.
1523 */
1524static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1525{
1526	struct cfq_rb_root *st;
1527	int i, j;
1528
1529	for_each_cfqg_st(cfqg, i, j, st)
1530		*st = CFQ_RB_ROOT;
1531	RB_CLEAR_NODE(&cfqg->rb_node);
1532
1533	cfqg->ttime.last_end_request = jiffies;
1534}
1535
1536#ifdef CONFIG_CFQ_GROUP_IOSCHED
1537static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
1538			    bool on_dfl, bool reset_dev, bool is_leaf_weight);
1539
1540static void cfqg_stats_exit(struct cfqg_stats *stats)
1541{
1542	blkg_rwstat_exit(&stats->merged);
1543	blkg_rwstat_exit(&stats->service_time);
1544	blkg_rwstat_exit(&stats->wait_time);
1545	blkg_rwstat_exit(&stats->queued);
1546	blkg_stat_exit(&stats->time);
1547#ifdef CONFIG_DEBUG_BLK_CGROUP
1548	blkg_stat_exit(&stats->unaccounted_time);
1549	blkg_stat_exit(&stats->avg_queue_size_sum);
1550	blkg_stat_exit(&stats->avg_queue_size_samples);
1551	blkg_stat_exit(&stats->dequeue);
1552	blkg_stat_exit(&stats->group_wait_time);
1553	blkg_stat_exit(&stats->idle_time);
1554	blkg_stat_exit(&stats->empty_time);
1555#endif
1556}
1557
1558static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp)
1559{
1560	if (blkg_rwstat_init(&stats->merged, gfp) ||
1561	    blkg_rwstat_init(&stats->service_time, gfp) ||
1562	    blkg_rwstat_init(&stats->wait_time, gfp) ||
1563	    blkg_rwstat_init(&stats->queued, gfp) ||
1564	    blkg_stat_init(&stats->time, gfp))
1565		goto err;
1566
1567#ifdef CONFIG_DEBUG_BLK_CGROUP
1568	if (blkg_stat_init(&stats->unaccounted_time, gfp) ||
1569	    blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
1570	    blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
1571	    blkg_stat_init(&stats->dequeue, gfp) ||
1572	    blkg_stat_init(&stats->group_wait_time, gfp) ||
1573	    blkg_stat_init(&stats->idle_time, gfp) ||
1574	    blkg_stat_init(&stats->empty_time, gfp))
1575		goto err;
1576#endif
1577	return 0;
1578err:
1579	cfqg_stats_exit(stats);
1580	return -ENOMEM;
1581}
1582
1583static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
1584{
1585	struct cfq_group_data *cgd;
1586
1587	cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
1588	if (!cgd)
1589		return NULL;
1590	return &cgd->cpd;
1591}
1592
1593static void cfq_cpd_init(struct blkcg_policy_data *cpd)
1594{
1595	struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
1596	unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
1597			      CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
1598
1599	if (cpd_to_blkcg(cpd) == &blkcg_root)
1600		weight *= 2;
1601
1602	cgd->weight = weight;
1603	cgd->leaf_weight = weight;
1604}
1605
1606static void cfq_cpd_free(struct blkcg_policy_data *cpd)
1607{
1608	kfree(cpd_to_cfqgd(cpd));
1609}
1610
1611static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
1612{
1613	struct blkcg *blkcg = cpd_to_blkcg(cpd);
1614	bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys);
1615	unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
1616
1617	if (blkcg == &blkcg_root)
1618		weight *= 2;
1619
1620	WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, false));
1621	WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, true));
1622}
1623
1624static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
1625{
1626	struct cfq_group *cfqg;
1627
1628	cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
1629	if (!cfqg)
1630		return NULL;
1631
1632	cfq_init_cfqg_base(cfqg);
1633	if (cfqg_stats_init(&cfqg->stats, gfp)) {
1634		kfree(cfqg);
1635		return NULL;
1636	}
1637
1638	return &cfqg->pd;
1639}
1640
1641static void cfq_pd_init(struct blkg_policy_data *pd)
1642{
1643	struct cfq_group *cfqg = pd_to_cfqg(pd);
1644	struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
1645
1646	cfqg->weight = cgd->weight;
1647	cfqg->leaf_weight = cgd->leaf_weight;
1648}
1649
1650static void cfq_pd_offline(struct blkg_policy_data *pd)
1651{
1652	struct cfq_group *cfqg = pd_to_cfqg(pd);
1653	int i;
1654
1655	for (i = 0; i < IOPRIO_BE_NR; i++) {
1656		if (cfqg->async_cfqq[0][i])
1657			cfq_put_queue(cfqg->async_cfqq[0][i]);
1658		if (cfqg->async_cfqq[1][i])
1659			cfq_put_queue(cfqg->async_cfqq[1][i]);
1660	}
1661
1662	if (cfqg->async_idle_cfqq)
1663		cfq_put_queue(cfqg->async_idle_cfqq);
1664
1665	/*
1666	 * @blkg is going offline and will be ignored by
1667	 * blkg_[rw]stat_recursive_sum().  Transfer stats to the parent so
1668	 * that they don't get lost.  If IOs complete after this point, the
1669	 * stats for them will be lost.  Oh well...
1670	 */
1671	cfqg_stats_xfer_dead(cfqg);
1672}
1673
1674static void cfq_pd_free(struct blkg_policy_data *pd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1675{
1676	struct cfq_group *cfqg = pd_to_cfqg(pd);
1677
1678	cfqg_stats_exit(&cfqg->stats);
1679	return kfree(cfqg);
 
 
1680}
1681
1682static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
1683{
1684	struct cfq_group *cfqg = pd_to_cfqg(pd);
1685
1686	cfqg_stats_reset(&cfqg->stats);
 
1687}
1688
1689static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
1690					 struct blkcg *blkcg)
 
 
 
 
1691{
1692	struct blkcg_gq *blkg;
 
 
 
 
 
 
 
 
 
 
 
 
1693
1694	blkg = blkg_lookup(blkcg, cfqd->queue);
1695	if (likely(blkg))
1696		return blkg_to_cfqg(blkg);
1697	return NULL;
1698}
1699
1700static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1701{
 
 
 
 
1702	cfqq->cfqg = cfqg;
1703	/* cfqq reference on cfqg */
1704	cfqg_get(cfqg);
1705}
1706
1707static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1708				     struct blkg_policy_data *pd, int off)
1709{
1710	struct cfq_group *cfqg = pd_to_cfqg(pd);
1711
1712	if (!cfqg->dev_weight)
1713		return 0;
1714	return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
1715}
1716
1717static int cfqg_print_weight_device(struct seq_file *sf, void *v)
1718{
1719	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1720			  cfqg_prfill_weight_device, &blkcg_policy_cfq,
1721			  0, false);
1722	return 0;
1723}
1724
1725static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1726					  struct blkg_policy_data *pd, int off)
1727{
1728	struct cfq_group *cfqg = pd_to_cfqg(pd);
1729
1730	if (!cfqg->dev_leaf_weight)
1731		return 0;
1732	return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1733}
1734
1735static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
1736{
1737	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1738			  cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1739			  0, false);
1740	return 0;
1741}
1742
1743static int cfq_print_weight(struct seq_file *sf, void *v)
1744{
1745	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1746	struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1747	unsigned int val = 0;
1748
1749	if (cgd)
1750		val = cgd->weight;
1751
1752	seq_printf(sf, "%u\n", val);
1753	return 0;
1754}
1755
1756static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
1757{
1758	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1759	struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1760	unsigned int val = 0;
1761
1762	if (cgd)
1763		val = cgd->leaf_weight;
1764
1765	seq_printf(sf, "%u\n", val);
1766	return 0;
1767}
1768
1769static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
1770					char *buf, size_t nbytes, loff_t off,
1771					bool on_dfl, bool is_leaf_weight)
1772{
1773	unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1774	unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
1775	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1776	struct blkg_conf_ctx ctx;
1777	struct cfq_group *cfqg;
1778	struct cfq_group_data *cfqgd;
1779	int ret;
1780	u64 v;
1781
1782	ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
1783	if (ret)
1784		return ret;
1785
1786	if (sscanf(ctx.body, "%llu", &v) == 1) {
1787		/* require "default" on dfl */
1788		ret = -ERANGE;
1789		if (!v && on_dfl)
1790			goto out_finish;
1791	} else if (!strcmp(strim(ctx.body), "default")) {
1792		v = 0;
1793	} else {
1794		ret = -EINVAL;
1795		goto out_finish;
1796	}
1797
1798	cfqg = blkg_to_cfqg(ctx.blkg);
1799	cfqgd = blkcg_to_cfqgd(blkcg);
1800
1801	ret = -ERANGE;
1802	if (!v || (v >= min && v <= max)) {
1803		if (!is_leaf_weight) {
1804			cfqg->dev_weight = v;
1805			cfqg->new_weight = v ?: cfqgd->weight;
1806		} else {
1807			cfqg->dev_leaf_weight = v;
1808			cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight;
1809		}
1810		ret = 0;
1811	}
1812out_finish:
1813	blkg_conf_finish(&ctx);
1814	return ret ?: nbytes;
1815}
1816
1817static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
1818				      char *buf, size_t nbytes, loff_t off)
1819{
1820	return __cfqg_set_weight_device(of, buf, nbytes, off, false, false);
1821}
1822
1823static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
1824					   char *buf, size_t nbytes, loff_t off)
1825{
1826	return __cfqg_set_weight_device(of, buf, nbytes, off, false, true);
1827}
1828
1829static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
1830			    bool on_dfl, bool reset_dev, bool is_leaf_weight)
1831{
1832	unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1833	unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
1834	struct blkcg *blkcg = css_to_blkcg(css);
1835	struct blkcg_gq *blkg;
1836	struct cfq_group_data *cfqgd;
1837	int ret = 0;
1838
1839	if (val < min || val > max)
1840		return -ERANGE;
1841
1842	spin_lock_irq(&blkcg->lock);
1843	cfqgd = blkcg_to_cfqgd(blkcg);
1844	if (!cfqgd) {
1845		ret = -EINVAL;
1846		goto out;
1847	}
1848
1849	if (!is_leaf_weight)
1850		cfqgd->weight = val;
1851	else
1852		cfqgd->leaf_weight = val;
1853
1854	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1855		struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1856
1857		if (!cfqg)
1858			continue;
1859
1860		if (!is_leaf_weight) {
1861			if (reset_dev)
1862				cfqg->dev_weight = 0;
1863			if (!cfqg->dev_weight)
1864				cfqg->new_weight = cfqgd->weight;
1865		} else {
1866			if (reset_dev)
1867				cfqg->dev_leaf_weight = 0;
1868			if (!cfqg->dev_leaf_weight)
1869				cfqg->new_leaf_weight = cfqgd->leaf_weight;
1870		}
1871	}
1872
1873out:
1874	spin_unlock_irq(&blkcg->lock);
1875	return ret;
1876}
1877
1878static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1879			  u64 val)
1880{
1881	return __cfq_set_weight(css, val, false, false, false);
1882}
1883
1884static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1885			       struct cftype *cft, u64 val)
1886{
1887	return __cfq_set_weight(css, val, false, false, true);
1888}
1889
1890static int cfqg_print_stat(struct seq_file *sf, void *v)
1891{
1892	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1893			  &blkcg_policy_cfq, seq_cft(sf)->private, false);
1894	return 0;
1895}
1896
1897static int cfqg_print_rwstat(struct seq_file *sf, void *v)
1898{
1899	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1900			  &blkcg_policy_cfq, seq_cft(sf)->private, true);
1901	return 0;
1902}
1903
1904static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1905				      struct blkg_policy_data *pd, int off)
1906{
1907	u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
1908					  &blkcg_policy_cfq, off);
1909	return __blkg_prfill_u64(sf, pd, sum);
1910}
1911
1912static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1913					struct blkg_policy_data *pd, int off)
1914{
1915	struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
1916							&blkcg_policy_cfq, off);
1917	return __blkg_prfill_rwstat(sf, pd, &sum);
1918}
1919
1920static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
1921{
1922	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1923			  cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1924			  seq_cft(sf)->private, false);
1925	return 0;
1926}
1927
1928static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1929{
1930	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1931			  cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1932			  seq_cft(sf)->private, true);
1933	return 0;
1934}
1935
1936static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1937			       int off)
1938{
1939	u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
1940
1941	return __blkg_prfill_u64(sf, pd, sum >> 9);
1942}
1943
1944static int cfqg_print_stat_sectors(struct seq_file *sf, void *v)
1945{
1946	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1947			  cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false);
1948	return 0;
1949}
1950
1951static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
1952					 struct blkg_policy_data *pd, int off)
1953{
1954	struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
1955					offsetof(struct blkcg_gq, stat_bytes));
1956	u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
1957		atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
1958
1959	return __blkg_prfill_u64(sf, pd, sum >> 9);
1960}
1961
1962static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1963{
1964	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1965			  cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0,
1966			  false);
1967	return 0;
1968}
1969
1970#ifdef CONFIG_DEBUG_BLK_CGROUP
1971static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1972				      struct blkg_policy_data *pd, int off)
1973{
1974	struct cfq_group *cfqg = pd_to_cfqg(pd);
1975	u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
1976	u64 v = 0;
1977
1978	if (samples) {
1979		v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
1980		v = div64_u64(v, samples);
1981	}
1982	__blkg_prfill_u64(sf, pd, v);
1983	return 0;
1984}
1985
1986/* print avg_queue_size */
1987static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1988{
1989	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1990			  cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
1991			  0, false);
1992	return 0;
1993}
1994#endif	/* CONFIG_DEBUG_BLK_CGROUP */
1995
1996static struct cftype cfq_blkcg_legacy_files[] = {
1997	/* on root, weight is mapped to leaf_weight */
1998	{
1999		.name = "weight_device",
2000		.flags = CFTYPE_ONLY_ON_ROOT,
2001		.seq_show = cfqg_print_leaf_weight_device,
2002		.write = cfqg_set_leaf_weight_device,
2003	},
2004	{
2005		.name = "weight",
2006		.flags = CFTYPE_ONLY_ON_ROOT,
2007		.seq_show = cfq_print_leaf_weight,
2008		.write_u64 = cfq_set_leaf_weight,
2009	},
2010
2011	/* no such mapping necessary for !roots */
2012	{
2013		.name = "weight_device",
2014		.flags = CFTYPE_NOT_ON_ROOT,
2015		.seq_show = cfqg_print_weight_device,
2016		.write = cfqg_set_weight_device,
2017	},
2018	{
2019		.name = "weight",
2020		.flags = CFTYPE_NOT_ON_ROOT,
2021		.seq_show = cfq_print_weight,
2022		.write_u64 = cfq_set_weight,
2023	},
2024
2025	{
2026		.name = "leaf_weight_device",
2027		.seq_show = cfqg_print_leaf_weight_device,
2028		.write = cfqg_set_leaf_weight_device,
2029	},
2030	{
2031		.name = "leaf_weight",
2032		.seq_show = cfq_print_leaf_weight,
2033		.write_u64 = cfq_set_leaf_weight,
2034	},
2035
2036	/* statistics, covers only the tasks in the cfqg */
2037	{
2038		.name = "time",
2039		.private = offsetof(struct cfq_group, stats.time),
2040		.seq_show = cfqg_print_stat,
2041	},
2042	{
2043		.name = "sectors",
2044		.seq_show = cfqg_print_stat_sectors,
 
2045	},
2046	{
2047		.name = "io_service_bytes",
2048		.private = (unsigned long)&blkcg_policy_cfq,
2049		.seq_show = blkg_print_stat_bytes,
2050	},
2051	{
2052		.name = "io_serviced",
2053		.private = (unsigned long)&blkcg_policy_cfq,
2054		.seq_show = blkg_print_stat_ios,
2055	},
2056	{
2057		.name = "io_service_time",
2058		.private = offsetof(struct cfq_group, stats.service_time),
2059		.seq_show = cfqg_print_rwstat,
2060	},
2061	{
2062		.name = "io_wait_time",
2063		.private = offsetof(struct cfq_group, stats.wait_time),
2064		.seq_show = cfqg_print_rwstat,
2065	},
2066	{
2067		.name = "io_merged",
2068		.private = offsetof(struct cfq_group, stats.merged),
2069		.seq_show = cfqg_print_rwstat,
2070	},
2071	{
2072		.name = "io_queued",
2073		.private = offsetof(struct cfq_group, stats.queued),
2074		.seq_show = cfqg_print_rwstat,
2075	},
2076
2077	/* the same statictics which cover the cfqg and its descendants */
2078	{
2079		.name = "time_recursive",
2080		.private = offsetof(struct cfq_group, stats.time),
2081		.seq_show = cfqg_print_stat_recursive,
2082	},
2083	{
2084		.name = "sectors_recursive",
2085		.seq_show = cfqg_print_stat_sectors_recursive,
 
2086	},
2087	{
2088		.name = "io_service_bytes_recursive",
2089		.private = (unsigned long)&blkcg_policy_cfq,
2090		.seq_show = blkg_print_stat_bytes_recursive,
2091	},
2092	{
2093		.name = "io_serviced_recursive",
2094		.private = (unsigned long)&blkcg_policy_cfq,
2095		.seq_show = blkg_print_stat_ios_recursive,
2096	},
2097	{
2098		.name = "io_service_time_recursive",
2099		.private = offsetof(struct cfq_group, stats.service_time),
2100		.seq_show = cfqg_print_rwstat_recursive,
2101	},
2102	{
2103		.name = "io_wait_time_recursive",
2104		.private = offsetof(struct cfq_group, stats.wait_time),
2105		.seq_show = cfqg_print_rwstat_recursive,
2106	},
2107	{
2108		.name = "io_merged_recursive",
2109		.private = offsetof(struct cfq_group, stats.merged),
2110		.seq_show = cfqg_print_rwstat_recursive,
2111	},
2112	{
2113		.name = "io_queued_recursive",
2114		.private = offsetof(struct cfq_group, stats.queued),
2115		.seq_show = cfqg_print_rwstat_recursive,
2116	},
2117#ifdef CONFIG_DEBUG_BLK_CGROUP
2118	{
2119		.name = "avg_queue_size",
2120		.seq_show = cfqg_print_avg_queue_size,
2121	},
2122	{
2123		.name = "group_wait_time",
2124		.private = offsetof(struct cfq_group, stats.group_wait_time),
2125		.seq_show = cfqg_print_stat,
2126	},
2127	{
2128		.name = "idle_time",
2129		.private = offsetof(struct cfq_group, stats.idle_time),
2130		.seq_show = cfqg_print_stat,
2131	},
2132	{
2133		.name = "empty_time",
2134		.private = offsetof(struct cfq_group, stats.empty_time),
2135		.seq_show = cfqg_print_stat,
2136	},
2137	{
2138		.name = "dequeue",
2139		.private = offsetof(struct cfq_group, stats.dequeue),
2140		.seq_show = cfqg_print_stat,
2141	},
2142	{
2143		.name = "unaccounted_time",
2144		.private = offsetof(struct cfq_group, stats.unaccounted_time),
2145		.seq_show = cfqg_print_stat,
2146	},
2147#endif	/* CONFIG_DEBUG_BLK_CGROUP */
2148	{ }	/* terminate */
2149};
2150
2151static int cfq_print_weight_on_dfl(struct seq_file *sf, void *v)
2152{
2153	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2154	struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
2155
2156	seq_printf(sf, "default %u\n", cgd->weight);
2157	blkcg_print_blkgs(sf, blkcg, cfqg_prfill_weight_device,
2158			  &blkcg_policy_cfq, 0, false);
2159	return 0;
2160}
2161
2162static ssize_t cfq_set_weight_on_dfl(struct kernfs_open_file *of,
2163				     char *buf, size_t nbytes, loff_t off)
2164{
2165	char *endp;
2166	int ret;
2167	u64 v;
2168
2169	buf = strim(buf);
2170
2171	/* "WEIGHT" or "default WEIGHT" sets the default weight */
2172	v = simple_strtoull(buf, &endp, 0);
2173	if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
2174		ret = __cfq_set_weight(of_css(of), v, true, false, false);
2175		return ret ?: nbytes;
2176	}
2177
2178	/* "MAJ:MIN WEIGHT" */
2179	return __cfqg_set_weight_device(of, buf, nbytes, off, true, false);
2180}
2181
2182static struct cftype cfq_blkcg_files[] = {
2183	{
2184		.name = "weight",
2185		.flags = CFTYPE_NOT_ON_ROOT,
2186		.seq_show = cfq_print_weight_on_dfl,
2187		.write = cfq_set_weight_on_dfl,
2188	},
2189	{ }	/* terminate */
2190};
2191
2192#else /* GROUP_IOSCHED */
2193static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
2194					 struct blkcg *blkcg)
2195{
2196	return cfqd->root_group;
2197}
2198
2199static inline void
2200cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
2201	cfqq->cfqg = cfqg;
2202}
2203
2204#endif /* GROUP_IOSCHED */
2205
2206/*
2207 * The cfqd->service_trees holds all pending cfq_queue's that have
2208 * requests waiting to be processed. It is sorted in the order that
2209 * we will service the queues.
2210 */
2211static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2212				 bool add_front)
2213{
2214	struct rb_node **p, *parent;
2215	struct cfq_queue *__cfqq;
2216	unsigned long rb_key;
2217	struct cfq_rb_root *st;
2218	int left;
2219	int new_cfqq = 1;
2220
2221	st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
2222	if (cfq_class_idle(cfqq)) {
2223		rb_key = CFQ_IDLE_DELAY;
2224		parent = rb_last(&st->rb);
2225		if (parent && parent != &cfqq->rb_node) {
2226			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2227			rb_key += __cfqq->rb_key;
2228		} else
2229			rb_key += jiffies;
2230	} else if (!add_front) {
2231		/*
2232		 * Get our rb key offset. Subtract any residual slice
2233		 * value carried from last service. A negative resid
2234		 * count indicates slice overrun, and this should position
2235		 * the next service time further away in the tree.
2236		 */
2237		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
2238		rb_key -= cfqq->slice_resid;
2239		cfqq->slice_resid = 0;
2240	} else {
2241		rb_key = -HZ;
2242		__cfqq = cfq_rb_first(st);
2243		rb_key += __cfqq ? __cfqq->rb_key : jiffies;
2244	}
2245
2246	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2247		new_cfqq = 0;
2248		/*
2249		 * same position, nothing more to do
2250		 */
2251		if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
2252			return;
2253
2254		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2255		cfqq->service_tree = NULL;
2256	}
2257
2258	left = 1;
2259	parent = NULL;
2260	cfqq->service_tree = st;
2261	p = &st->rb.rb_node;
2262	while (*p) {
2263		parent = *p;
2264		__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2265
2266		/*
2267		 * sort by key, that represents service time.
2268		 */
2269		if (time_before(rb_key, __cfqq->rb_key))
2270			p = &parent->rb_left;
2271		else {
2272			p = &parent->rb_right;
2273			left = 0;
2274		}
2275	}
2276
2277	if (left)
2278		st->left = &cfqq->rb_node;
2279
2280	cfqq->rb_key = rb_key;
2281	rb_link_node(&cfqq->rb_node, parent, p);
2282	rb_insert_color(&cfqq->rb_node, &st->rb);
2283	st->count++;
2284	if (add_front || !new_cfqq)
2285		return;
2286	cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
2287}
2288
2289static struct cfq_queue *
2290cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2291		     sector_t sector, struct rb_node **ret_parent,
2292		     struct rb_node ***rb_link)
2293{
2294	struct rb_node **p, *parent;
2295	struct cfq_queue *cfqq = NULL;
2296
2297	parent = NULL;
2298	p = &root->rb_node;
2299	while (*p) {
2300		struct rb_node **n;
2301
2302		parent = *p;
2303		cfqq = rb_entry(parent, struct cfq_queue, p_node);
2304
2305		/*
2306		 * Sort strictly based on sector.  Smallest to the left,
2307		 * largest to the right.
2308		 */
2309		if (sector > blk_rq_pos(cfqq->next_rq))
2310			n = &(*p)->rb_right;
2311		else if (sector < blk_rq_pos(cfqq->next_rq))
2312			n = &(*p)->rb_left;
2313		else
2314			break;
2315		p = n;
2316		cfqq = NULL;
2317	}
2318
2319	*ret_parent = parent;
2320	if (rb_link)
2321		*rb_link = p;
2322	return cfqq;
2323}
2324
2325static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2326{
2327	struct rb_node **p, *parent;
2328	struct cfq_queue *__cfqq;
2329
2330	if (cfqq->p_root) {
2331		rb_erase(&cfqq->p_node, cfqq->p_root);
2332		cfqq->p_root = NULL;
2333	}
2334
2335	if (cfq_class_idle(cfqq))
2336		return;
2337	if (!cfqq->next_rq)
2338		return;
2339
2340	cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2341	__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2342				      blk_rq_pos(cfqq->next_rq), &parent, &p);
2343	if (!__cfqq) {
2344		rb_link_node(&cfqq->p_node, parent, p);
2345		rb_insert_color(&cfqq->p_node, cfqq->p_root);
2346	} else
2347		cfqq->p_root = NULL;
2348}
2349
2350/*
2351 * Update cfqq's position in the service tree.
2352 */
2353static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2354{
2355	/*
2356	 * Resorting requires the cfqq to be on the RR list already.
2357	 */
2358	if (cfq_cfqq_on_rr(cfqq)) {
2359		cfq_service_tree_add(cfqd, cfqq, 0);
2360		cfq_prio_tree_add(cfqd, cfqq);
2361	}
2362}
2363
2364/*
2365 * add to busy list of queues for service, trying to be fair in ordering
2366 * the pending list according to last request service
2367 */
2368static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2369{
2370	cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
2371	BUG_ON(cfq_cfqq_on_rr(cfqq));
2372	cfq_mark_cfqq_on_rr(cfqq);
2373	cfqd->busy_queues++;
2374	if (cfq_cfqq_sync(cfqq))
2375		cfqd->busy_sync_queues++;
2376
2377	cfq_resort_rr_list(cfqd, cfqq);
2378}
2379
2380/*
2381 * Called when the cfqq no longer has requests pending, remove it from
2382 * the service tree.
2383 */
2384static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2385{
2386	cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
2387	BUG_ON(!cfq_cfqq_on_rr(cfqq));
2388	cfq_clear_cfqq_on_rr(cfqq);
2389
2390	if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2391		cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2392		cfqq->service_tree = NULL;
2393	}
2394	if (cfqq->p_root) {
2395		rb_erase(&cfqq->p_node, cfqq->p_root);
2396		cfqq->p_root = NULL;
2397	}
2398
2399	cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
2400	BUG_ON(!cfqd->busy_queues);
2401	cfqd->busy_queues--;
2402	if (cfq_cfqq_sync(cfqq))
2403		cfqd->busy_sync_queues--;
2404}
2405
2406/*
2407 * rb tree support functions
2408 */
2409static void cfq_del_rq_rb(struct request *rq)
2410{
2411	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2412	const int sync = rq_is_sync(rq);
2413
2414	BUG_ON(!cfqq->queued[sync]);
2415	cfqq->queued[sync]--;
2416
2417	elv_rb_del(&cfqq->sort_list, rq);
2418
2419	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2420		/*
2421		 * Queue will be deleted from service tree when we actually
2422		 * expire it later. Right now just remove it from prio tree
2423		 * as it is empty.
2424		 */
2425		if (cfqq->p_root) {
2426			rb_erase(&cfqq->p_node, cfqq->p_root);
2427			cfqq->p_root = NULL;
2428		}
2429	}
2430}
2431
2432static void cfq_add_rq_rb(struct request *rq)
2433{
2434	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2435	struct cfq_data *cfqd = cfqq->cfqd;
2436	struct request *prev;
2437
2438	cfqq->queued[rq_is_sync(rq)]++;
2439
2440	elv_rb_add(&cfqq->sort_list, rq);
2441
2442	if (!cfq_cfqq_on_rr(cfqq))
2443		cfq_add_cfqq_rr(cfqd, cfqq);
2444
2445	/*
2446	 * check if this request is a better next-serve candidate
2447	 */
2448	prev = cfqq->next_rq;
2449	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
2450
2451	/*
2452	 * adjust priority tree position, if ->next_rq changes
2453	 */
2454	if (prev != cfqq->next_rq)
2455		cfq_prio_tree_add(cfqd, cfqq);
2456
2457	BUG_ON(!cfqq->next_rq);
2458}
2459
2460static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
2461{
2462	elv_rb_del(&cfqq->sort_list, rq);
2463	cfqq->queued[rq_is_sync(rq)]--;
2464	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2465	cfq_add_rq_rb(rq);
2466	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2467				 rq->cmd_flags);
2468}
2469
2470static struct request *
2471cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
2472{
2473	struct task_struct *tsk = current;
2474	struct cfq_io_cq *cic;
2475	struct cfq_queue *cfqq;
2476
2477	cic = cfq_cic_lookup(cfqd, tsk->io_context);
2478	if (!cic)
2479		return NULL;
2480
2481	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2482	if (cfqq)
2483		return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
2484
2485	return NULL;
2486}
2487
2488static void cfq_activate_request(struct request_queue *q, struct request *rq)
2489{
2490	struct cfq_data *cfqd = q->elevator->elevator_data;
2491
2492	cfqd->rq_in_driver++;
2493	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
2494						cfqd->rq_in_driver);
2495
2496	cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2497}
2498
2499static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
2500{
2501	struct cfq_data *cfqd = q->elevator->elevator_data;
2502
2503	WARN_ON(!cfqd->rq_in_driver);
2504	cfqd->rq_in_driver--;
2505	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
2506						cfqd->rq_in_driver);
2507}
2508
2509static void cfq_remove_request(struct request *rq)
2510{
2511	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2512
2513	if (cfqq->next_rq == rq)
2514		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
2515
2516	list_del_init(&rq->queuelist);
2517	cfq_del_rq_rb(rq);
2518
2519	cfqq->cfqd->rq_queued--;
2520	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
2521	if (rq->cmd_flags & REQ_PRIO) {
2522		WARN_ON(!cfqq->prio_pending);
2523		cfqq->prio_pending--;
2524	}
2525}
2526
2527static int cfq_merge(struct request_queue *q, struct request **req,
2528		     struct bio *bio)
2529{
2530	struct cfq_data *cfqd = q->elevator->elevator_data;
2531	struct request *__rq;
2532
2533	__rq = cfq_find_rq_fmerge(cfqd, bio);
2534	if (__rq && elv_rq_merge_ok(__rq, bio)) {
2535		*req = __rq;
2536		return ELEVATOR_FRONT_MERGE;
2537	}
2538
2539	return ELEVATOR_NO_MERGE;
2540}
2541
2542static void cfq_merged_request(struct request_queue *q, struct request *req,
2543			       int type)
2544{
2545	if (type == ELEVATOR_FRONT_MERGE) {
2546		struct cfq_queue *cfqq = RQ_CFQQ(req);
2547
2548		cfq_reposition_rq_rb(cfqq, req);
2549	}
2550}
2551
2552static void cfq_bio_merged(struct request_queue *q, struct request *req,
2553				struct bio *bio)
2554{
2555	cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
2556}
2557
2558static void
2559cfq_merged_requests(struct request_queue *q, struct request *rq,
2560		    struct request *next)
2561{
2562	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2563	struct cfq_data *cfqd = q->elevator->elevator_data;
2564
2565	/*
2566	 * reposition in fifo if next is older than rq
2567	 */
2568	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
2569	    time_before(next->fifo_time, rq->fifo_time) &&
2570	    cfqq == RQ_CFQQ(next)) {
2571		list_move(&rq->queuelist, &next->queuelist);
2572		rq->fifo_time = next->fifo_time;
2573	}
2574
2575	if (cfqq->next_rq == next)
2576		cfqq->next_rq = rq;
2577	cfq_remove_request(next);
2578	cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
2579
2580	cfqq = RQ_CFQQ(next);
2581	/*
2582	 * all requests of this queue are merged to other queues, delete it
2583	 * from the service tree. If it's the active_queue,
2584	 * cfq_dispatch_requests() will choose to expire it or do idle
2585	 */
2586	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2587	    cfqq != cfqd->active_queue)
2588		cfq_del_cfqq_rr(cfqd, cfqq);
2589}
2590
2591static int cfq_allow_merge(struct request_queue *q, struct request *rq,
2592			   struct bio *bio)
2593{
2594	struct cfq_data *cfqd = q->elevator->elevator_data;
2595	struct cfq_io_cq *cic;
2596	struct cfq_queue *cfqq;
2597
2598	/*
2599	 * Disallow merge of a sync bio into an async request.
2600	 */
2601	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
2602		return false;
2603
2604	/*
2605	 * Lookup the cfqq that this bio will be queued with and allow
2606	 * merge only if rq is queued there.
2607	 */
2608	cic = cfq_cic_lookup(cfqd, current->io_context);
2609	if (!cic)
2610		return false;
2611
2612	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
2613	return cfqq == RQ_CFQQ(rq);
2614}
2615
2616static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2617{
2618	del_timer(&cfqd->idle_slice_timer);
2619	cfqg_stats_update_idle_time(cfqq->cfqg);
2620}
2621
2622static void __cfq_set_active_queue(struct cfq_data *cfqd,
2623				   struct cfq_queue *cfqq)
2624{
2625	if (cfqq) {
2626		cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
2627				cfqd->serving_wl_class, cfqd->serving_wl_type);
2628		cfqg_stats_update_avg_queue_size(cfqq->cfqg);
2629		cfqq->slice_start = 0;
2630		cfqq->dispatch_start = jiffies;
2631		cfqq->allocated_slice = 0;
2632		cfqq->slice_end = 0;
2633		cfqq->slice_dispatch = 0;
2634		cfqq->nr_sectors = 0;
2635
2636		cfq_clear_cfqq_wait_request(cfqq);
2637		cfq_clear_cfqq_must_dispatch(cfqq);
2638		cfq_clear_cfqq_must_alloc_slice(cfqq);
2639		cfq_clear_cfqq_fifo_expire(cfqq);
2640		cfq_mark_cfqq_slice_new(cfqq);
2641
2642		cfq_del_timer(cfqd, cfqq);
2643	}
2644
2645	cfqd->active_queue = cfqq;
2646}
2647
2648/*
2649 * current cfqq expired its slice (or was too idle), select new one
2650 */
2651static void
2652__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2653		    bool timed_out)
2654{
2655	cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2656
2657	if (cfq_cfqq_wait_request(cfqq))
2658		cfq_del_timer(cfqd, cfqq);
2659
2660	cfq_clear_cfqq_wait_request(cfqq);
2661	cfq_clear_cfqq_wait_busy(cfqq);
2662
2663	/*
2664	 * If this cfqq is shared between multiple processes, check to
2665	 * make sure that those processes are still issuing I/Os within
2666	 * the mean seek distance.  If not, it may be time to break the
2667	 * queues apart again.
2668	 */
2669	if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2670		cfq_mark_cfqq_split_coop(cfqq);
2671
2672	/*
2673	 * store what was left of this slice, if the queue idled/timed out
2674	 */
2675	if (timed_out) {
2676		if (cfq_cfqq_slice_new(cfqq))
2677			cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
2678		else
2679			cfqq->slice_resid = cfqq->slice_end - jiffies;
2680		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2681	}
2682
2683	cfq_group_served(cfqd, cfqq->cfqg, cfqq);
2684
2685	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2686		cfq_del_cfqq_rr(cfqd, cfqq);
2687
2688	cfq_resort_rr_list(cfqd, cfqq);
2689
2690	if (cfqq == cfqd->active_queue)
2691		cfqd->active_queue = NULL;
2692
2693	if (cfqd->active_cic) {
2694		put_io_context(cfqd->active_cic->icq.ioc);
2695		cfqd->active_cic = NULL;
2696	}
2697}
2698
2699static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
2700{
2701	struct cfq_queue *cfqq = cfqd->active_queue;
2702
2703	if (cfqq)
2704		__cfq_slice_expired(cfqd, cfqq, timed_out);
2705}
2706
2707/*
2708 * Get next queue for service. Unless we have a queue preemption,
2709 * we'll simply select the first cfqq in the service tree.
2710 */
2711static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
2712{
2713	struct cfq_rb_root *st = st_for(cfqd->serving_group,
2714			cfqd->serving_wl_class, cfqd->serving_wl_type);
2715
2716	if (!cfqd->rq_queued)
2717		return NULL;
2718
2719	/* There is nothing to dispatch */
2720	if (!st)
2721		return NULL;
2722	if (RB_EMPTY_ROOT(&st->rb))
2723		return NULL;
2724	return cfq_rb_first(st);
2725}
2726
2727static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2728{
2729	struct cfq_group *cfqg;
2730	struct cfq_queue *cfqq;
2731	int i, j;
2732	struct cfq_rb_root *st;
2733
2734	if (!cfqd->rq_queued)
2735		return NULL;
2736
2737	cfqg = cfq_get_next_cfqg(cfqd);
2738	if (!cfqg)
2739		return NULL;
2740
2741	for_each_cfqg_st(cfqg, i, j, st)
2742		if ((cfqq = cfq_rb_first(st)) != NULL)
2743			return cfqq;
2744	return NULL;
2745}
2746
2747/*
2748 * Get and set a new active queue for service.
2749 */
2750static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2751					      struct cfq_queue *cfqq)
2752{
2753	if (!cfqq)
2754		cfqq = cfq_get_next_queue(cfqd);
2755
2756	__cfq_set_active_queue(cfqd, cfqq);
2757	return cfqq;
2758}
2759
2760static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2761					  struct request *rq)
2762{
2763	if (blk_rq_pos(rq) >= cfqd->last_position)
2764		return blk_rq_pos(rq) - cfqd->last_position;
2765	else
2766		return cfqd->last_position - blk_rq_pos(rq);
2767}
2768
2769static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2770			       struct request *rq)
2771{
2772	return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
2773}
2774
2775static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2776				    struct cfq_queue *cur_cfqq)
2777{
2778	struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
2779	struct rb_node *parent, *node;
2780	struct cfq_queue *__cfqq;
2781	sector_t sector = cfqd->last_position;
2782
2783	if (RB_EMPTY_ROOT(root))
2784		return NULL;
2785
2786	/*
2787	 * First, if we find a request starting at the end of the last
2788	 * request, choose it.
2789	 */
2790	__cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
2791	if (__cfqq)
2792		return __cfqq;
2793
2794	/*
2795	 * If the exact sector wasn't found, the parent of the NULL leaf
2796	 * will contain the closest sector.
2797	 */
2798	__cfqq = rb_entry(parent, struct cfq_queue, p_node);
2799	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2800		return __cfqq;
2801
2802	if (blk_rq_pos(__cfqq->next_rq) < sector)
2803		node = rb_next(&__cfqq->p_node);
2804	else
2805		node = rb_prev(&__cfqq->p_node);
2806	if (!node)
2807		return NULL;
2808
2809	__cfqq = rb_entry(node, struct cfq_queue, p_node);
2810	if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
2811		return __cfqq;
2812
2813	return NULL;
2814}
2815
2816/*
2817 * cfqd - obvious
2818 * cur_cfqq - passed in so that we don't decide that the current queue is
2819 * 	      closely cooperating with itself.
2820 *
2821 * So, basically we're assuming that that cur_cfqq has dispatched at least
2822 * one request, and that cfqd->last_position reflects a position on the disk
2823 * associated with the I/O issued by cur_cfqq.  I'm not sure this is a valid
2824 * assumption.
2825 */
2826static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
2827					      struct cfq_queue *cur_cfqq)
2828{
2829	struct cfq_queue *cfqq;
2830
2831	if (cfq_class_idle(cur_cfqq))
2832		return NULL;
2833	if (!cfq_cfqq_sync(cur_cfqq))
2834		return NULL;
2835	if (CFQQ_SEEKY(cur_cfqq))
2836		return NULL;
2837
2838	/*
2839	 * Don't search priority tree if it's the only queue in the group.
2840	 */
2841	if (cur_cfqq->cfqg->nr_cfqq == 1)
2842		return NULL;
2843
2844	/*
2845	 * We should notice if some of the queues are cooperating, eg
2846	 * working closely on the same area of the disk. In that case,
2847	 * we can group them together and don't waste time idling.
2848	 */
2849	cfqq = cfqq_close(cfqd, cur_cfqq);
2850	if (!cfqq)
2851		return NULL;
2852
2853	/* If new queue belongs to different cfq_group, don't choose it */
2854	if (cur_cfqq->cfqg != cfqq->cfqg)
2855		return NULL;
2856
2857	/*
2858	 * It only makes sense to merge sync queues.
2859	 */
2860	if (!cfq_cfqq_sync(cfqq))
2861		return NULL;
2862	if (CFQQ_SEEKY(cfqq))
2863		return NULL;
2864
2865	/*
2866	 * Do not merge queues of different priority classes
2867	 */
2868	if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2869		return NULL;
2870
2871	return cfqq;
2872}
2873
2874/*
2875 * Determine whether we should enforce idle window for this queue.
2876 */
2877
2878static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2879{
2880	enum wl_class_t wl_class = cfqq_class(cfqq);
2881	struct cfq_rb_root *st = cfqq->service_tree;
2882
2883	BUG_ON(!st);
2884	BUG_ON(!st->count);
2885
2886	if (!cfqd->cfq_slice_idle)
2887		return false;
2888
2889	/* We never do for idle class queues. */
2890	if (wl_class == IDLE_WORKLOAD)
2891		return false;
2892
2893	/* We do for queues that were marked with idle window flag. */
2894	if (cfq_cfqq_idle_window(cfqq) &&
2895	   !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
2896		return true;
2897
2898	/*
2899	 * Otherwise, we do only if they are the last ones
2900	 * in their service tree.
2901	 */
2902	if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2903	   !cfq_io_thinktime_big(cfqd, &st->ttime, false))
2904		return true;
2905	cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
2906	return false;
2907}
2908
2909static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2910{
2911	struct cfq_queue *cfqq = cfqd->active_queue;
2912	struct cfq_rb_root *st = cfqq->service_tree;
2913	struct cfq_io_cq *cic;
2914	unsigned long sl, group_idle = 0;
2915
2916	/*
2917	 * SSD device without seek penalty, disable idling. But only do so
2918	 * for devices that support queuing, otherwise we still have a problem
2919	 * with sync vs async workloads.
2920	 */
2921	if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2922		return;
2923
2924	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2925	WARN_ON(cfq_cfqq_slice_new(cfqq));
2926
2927	/*
2928	 * idle is disabled, either manually or by past process history
2929	 */
2930	if (!cfq_should_idle(cfqd, cfqq)) {
2931		/* no queue idling. Check for group idling */
2932		if (cfqd->cfq_group_idle)
2933			group_idle = cfqd->cfq_group_idle;
2934		else
2935			return;
2936	}
2937
2938	/*
2939	 * still active requests from this queue, don't idle
2940	 */
2941	if (cfqq->dispatched)
2942		return;
2943
2944	/*
2945	 * task has exited, don't wait
2946	 */
2947	cic = cfqd->active_cic;
2948	if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
2949		return;
2950
2951	/*
2952	 * If our average think time is larger than the remaining time
2953	 * slice, then don't idle. This avoids overrunning the allotted
2954	 * time slice.
2955	 */
2956	if (sample_valid(cic->ttime.ttime_samples) &&
2957	    (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2958		cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2959			     cic->ttime.ttime_mean);
2960		return;
2961	}
2962
2963	/*
2964	 * There are other queues in the group or this is the only group and
2965	 * it has too big thinktime, don't do group idle.
2966	 */
2967	if (group_idle &&
2968	    (cfqq->cfqg->nr_cfqq > 1 ||
2969	     cfq_io_thinktime_big(cfqd, &st->ttime, true)))
2970		return;
2971
2972	cfq_mark_cfqq_wait_request(cfqq);
2973
2974	if (group_idle)
2975		sl = cfqd->cfq_group_idle;
2976	else
2977		sl = cfqd->cfq_slice_idle;
2978
2979	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2980	cfqg_stats_set_start_idle_time(cfqq->cfqg);
2981	cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2982			group_idle ? 1 : 0);
2983}
2984
2985/*
2986 * Move request from internal lists to the request queue dispatch list.
2987 */
2988static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2989{
2990	struct cfq_data *cfqd = q->elevator->elevator_data;
2991	struct cfq_queue *cfqq = RQ_CFQQ(rq);
2992
2993	cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2994
2995	cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2996	cfq_remove_request(rq);
2997	cfqq->dispatched++;
2998	(RQ_CFQG(rq))->dispatched++;
2999	elv_dispatch_sort(q, rq);
3000
3001	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
3002	cfqq->nr_sectors += blk_rq_sectors(rq);
 
3003}
3004
3005/*
3006 * return expired entry, or NULL to just start from scratch in rbtree
3007 */
3008static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
3009{
3010	struct request *rq = NULL;
3011
3012	if (cfq_cfqq_fifo_expire(cfqq))
3013		return NULL;
3014
3015	cfq_mark_cfqq_fifo_expire(cfqq);
3016
3017	if (list_empty(&cfqq->fifo))
3018		return NULL;
3019
3020	rq = rq_entry_fifo(cfqq->fifo.next);
3021	if (time_before(jiffies, rq->fifo_time))
3022		rq = NULL;
3023
3024	cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
3025	return rq;
3026}
3027
3028static inline int
3029cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3030{
3031	const int base_rq = cfqd->cfq_slice_async_rq;
3032
3033	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
3034
3035	return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
3036}
3037
3038/*
3039 * Must be called with the queue_lock held.
3040 */
3041static int cfqq_process_refs(struct cfq_queue *cfqq)
3042{
3043	int process_refs, io_refs;
3044
3045	io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
3046	process_refs = cfqq->ref - io_refs;
3047	BUG_ON(process_refs < 0);
3048	return process_refs;
3049}
3050
3051static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
3052{
3053	int process_refs, new_process_refs;
3054	struct cfq_queue *__cfqq;
3055
3056	/*
3057	 * If there are no process references on the new_cfqq, then it is
3058	 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
3059	 * chain may have dropped their last reference (not just their
3060	 * last process reference).
3061	 */
3062	if (!cfqq_process_refs(new_cfqq))
3063		return;
3064
3065	/* Avoid a circular list and skip interim queue merges */
3066	while ((__cfqq = new_cfqq->new_cfqq)) {
3067		if (__cfqq == cfqq)
3068			return;
3069		new_cfqq = __cfqq;
3070	}
3071
3072	process_refs = cfqq_process_refs(cfqq);
3073	new_process_refs = cfqq_process_refs(new_cfqq);
3074	/*
3075	 * If the process for the cfqq has gone away, there is no
3076	 * sense in merging the queues.
3077	 */
3078	if (process_refs == 0 || new_process_refs == 0)
3079		return;
3080
3081	/*
3082	 * Merge in the direction of the lesser amount of work.
3083	 */
3084	if (new_process_refs >= process_refs) {
3085		cfqq->new_cfqq = new_cfqq;
3086		new_cfqq->ref += process_refs;
3087	} else {
3088		new_cfqq->new_cfqq = cfqq;
3089		cfqq->ref += new_process_refs;
3090	}
3091}
3092
3093static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
3094			struct cfq_group *cfqg, enum wl_class_t wl_class)
3095{
3096	struct cfq_queue *queue;
3097	int i;
3098	bool key_valid = false;
3099	unsigned long lowest_key = 0;
3100	enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
3101
3102	for (i = 0; i <= SYNC_WORKLOAD; ++i) {
3103		/* select the one with lowest rb_key */
3104		queue = cfq_rb_first(st_for(cfqg, wl_class, i));
3105		if (queue &&
3106		    (!key_valid || time_before(queue->rb_key, lowest_key))) {
3107			lowest_key = queue->rb_key;
3108			cur_best = i;
3109			key_valid = true;
3110		}
3111	}
3112
3113	return cur_best;
3114}
3115
3116static void
3117choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
3118{
3119	unsigned slice;
3120	unsigned count;
3121	struct cfq_rb_root *st;
3122	unsigned group_slice;
3123	enum wl_class_t original_class = cfqd->serving_wl_class;
3124
3125	/* Choose next priority. RT > BE > IDLE */
3126	if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
3127		cfqd->serving_wl_class = RT_WORKLOAD;
3128	else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
3129		cfqd->serving_wl_class = BE_WORKLOAD;
3130	else {
3131		cfqd->serving_wl_class = IDLE_WORKLOAD;
3132		cfqd->workload_expires = jiffies + 1;
3133		return;
3134	}
3135
3136	if (original_class != cfqd->serving_wl_class)
3137		goto new_workload;
3138
3139	/*
3140	 * For RT and BE, we have to choose also the type
3141	 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
3142	 * expiration time
3143	 */
3144	st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
3145	count = st->count;
3146
3147	/*
3148	 * check workload expiration, and that we still have other queues ready
3149	 */
3150	if (count && !time_after(jiffies, cfqd->workload_expires))
3151		return;
3152
3153new_workload:
3154	/* otherwise select new workload type */
3155	cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
3156					cfqd->serving_wl_class);
3157	st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
3158	count = st->count;
3159
3160	/*
3161	 * the workload slice is computed as a fraction of target latency
3162	 * proportional to the number of queues in that workload, over
3163	 * all the queues in the same priority class
3164	 */
3165	group_slice = cfq_group_slice(cfqd, cfqg);
3166
3167	slice = group_slice * count /
3168		max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
3169		      cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
3170					cfqg));
3171
3172	if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
3173		unsigned int tmp;
3174
3175		/*
3176		 * Async queues are currently system wide. Just taking
3177		 * proportion of queues with-in same group will lead to higher
3178		 * async ratio system wide as generally root group is going
3179		 * to have higher weight. A more accurate thing would be to
3180		 * calculate system wide asnc/sync ratio.
3181		 */
3182		tmp = cfqd->cfq_target_latency *
3183			cfqg_busy_async_queues(cfqd, cfqg);
3184		tmp = tmp/cfqd->busy_queues;
3185		slice = min_t(unsigned, slice, tmp);
3186
3187		/* async workload slice is scaled down according to
3188		 * the sync/async slice ratio. */
3189		slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
3190	} else
3191		/* sync workload slice is at least 2 * cfq_slice_idle */
3192		slice = max(slice, 2 * cfqd->cfq_slice_idle);
3193
3194	slice = max_t(unsigned, slice, CFQ_MIN_TT);
3195	cfq_log(cfqd, "workload slice:%d", slice);
3196	cfqd->workload_expires = jiffies + slice;
3197}
3198
3199static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
3200{
3201	struct cfq_rb_root *st = &cfqd->grp_service_tree;
3202	struct cfq_group *cfqg;
3203
3204	if (RB_EMPTY_ROOT(&st->rb))
3205		return NULL;
3206	cfqg = cfq_rb_first_group(st);
3207	update_min_vdisktime(st);
3208	return cfqg;
3209}
3210
3211static void cfq_choose_cfqg(struct cfq_data *cfqd)
3212{
3213	struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3214
3215	cfqd->serving_group = cfqg;
3216
3217	/* Restore the workload type data */
3218	if (cfqg->saved_wl_slice) {
3219		cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
3220		cfqd->serving_wl_type = cfqg->saved_wl_type;
3221		cfqd->serving_wl_class = cfqg->saved_wl_class;
3222	} else
3223		cfqd->workload_expires = jiffies - 1;
3224
3225	choose_wl_class_and_type(cfqd, cfqg);
3226}
3227
3228/*
3229 * Select a queue for service. If we have a current active queue,
3230 * check whether to continue servicing it, or retrieve and set a new one.
3231 */
3232static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
3233{
3234	struct cfq_queue *cfqq, *new_cfqq = NULL;
3235
3236	cfqq = cfqd->active_queue;
3237	if (!cfqq)
3238		goto new_queue;
3239
3240	if (!cfqd->rq_queued)
3241		return NULL;
3242
3243	/*
3244	 * We were waiting for group to get backlogged. Expire the queue
3245	 */
3246	if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3247		goto expire;
3248
3249	/*
3250	 * The active queue has run out of time, expire it and select new.
3251	 */
3252	if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3253		/*
3254		 * If slice had not expired at the completion of last request
3255		 * we might not have turned on wait_busy flag. Don't expire
3256		 * the queue yet. Allow the group to get backlogged.
3257		 *
3258		 * The very fact that we have used the slice, that means we
3259		 * have been idling all along on this queue and it should be
3260		 * ok to wait for this request to complete.
3261		 */
3262		if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3263		    && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3264			cfqq = NULL;
3265			goto keep_queue;
3266		} else
3267			goto check_group_idle;
3268	}
3269
3270	/*
3271	 * The active queue has requests and isn't expired, allow it to
3272	 * dispatch.
3273	 */
3274	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3275		goto keep_queue;
3276
3277	/*
3278	 * If another queue has a request waiting within our mean seek
3279	 * distance, let it run.  The expire code will check for close
3280	 * cooperators and put the close queue at the front of the service
3281	 * tree.  If possible, merge the expiring queue with the new cfqq.
3282	 */
3283	new_cfqq = cfq_close_cooperator(cfqd, cfqq);
3284	if (new_cfqq) {
3285		if (!cfqq->new_cfqq)
3286			cfq_setup_merge(cfqq, new_cfqq);
3287		goto expire;
3288	}
3289
3290	/*
3291	 * No requests pending. If the active queue still has requests in
3292	 * flight or is idling for a new request, allow either of these
3293	 * conditions to happen (or time out) before selecting a new queue.
3294	 */
3295	if (timer_pending(&cfqd->idle_slice_timer)) {
3296		cfqq = NULL;
3297		goto keep_queue;
3298	}
3299
3300	/*
3301	 * This is a deep seek queue, but the device is much faster than
3302	 * the queue can deliver, don't idle
3303	 **/
3304	if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3305	    (cfq_cfqq_slice_new(cfqq) ||
3306	    (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
3307		cfq_clear_cfqq_deep(cfqq);
3308		cfq_clear_cfqq_idle_window(cfqq);
3309	}
3310
3311	if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3312		cfqq = NULL;
3313		goto keep_queue;
3314	}
3315
3316	/*
3317	 * If group idle is enabled and there are requests dispatched from
3318	 * this group, wait for requests to complete.
3319	 */
3320check_group_idle:
3321	if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3322	    cfqq->cfqg->dispatched &&
3323	    !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
3324		cfqq = NULL;
3325		goto keep_queue;
3326	}
3327
3328expire:
3329	cfq_slice_expired(cfqd, 0);
3330new_queue:
3331	/*
3332	 * Current queue expired. Check if we have to switch to a new
3333	 * service tree
3334	 */
3335	if (!new_cfqq)
3336		cfq_choose_cfqg(cfqd);
3337
3338	cfqq = cfq_set_active_queue(cfqd, new_cfqq);
3339keep_queue:
3340	return cfqq;
3341}
3342
3343static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
3344{
3345	int dispatched = 0;
3346
3347	while (cfqq->next_rq) {
3348		cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3349		dispatched++;
3350	}
3351
3352	BUG_ON(!list_empty(&cfqq->fifo));
3353
3354	/* By default cfqq is not expired if it is empty. Do it explicitly */
3355	__cfq_slice_expired(cfqq->cfqd, cfqq, 0);
3356	return dispatched;
3357}
3358
3359/*
3360 * Drain our current requests. Used for barriers and when switching
3361 * io schedulers on-the-fly.
3362 */
3363static int cfq_forced_dispatch(struct cfq_data *cfqd)
3364{
3365	struct cfq_queue *cfqq;
3366	int dispatched = 0;
3367
3368	/* Expire the timeslice of the current active queue first */
3369	cfq_slice_expired(cfqd, 0);
3370	while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3371		__cfq_set_active_queue(cfqd, cfqq);
3372		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3373	}
3374
3375	BUG_ON(cfqd->busy_queues);
3376
3377	cfq_log(cfqd, "forced_dispatch=%d", dispatched);
3378	return dispatched;
3379}
3380
3381static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3382	struct cfq_queue *cfqq)
3383{
3384	/* the queue hasn't finished any request, can't estimate */
3385	if (cfq_cfqq_slice_new(cfqq))
3386		return true;
3387	if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3388		cfqq->slice_end))
3389		return true;
3390
3391	return false;
3392}
3393
3394static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3395{
3396	unsigned int max_dispatch;
3397
3398	/*
3399	 * Drain async requests before we start sync IO
3400	 */
3401	if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
3402		return false;
3403
3404	/*
3405	 * If this is an async queue and we have sync IO in flight, let it wait
3406	 */
3407	if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
3408		return false;
3409
3410	max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
3411	if (cfq_class_idle(cfqq))
3412		max_dispatch = 1;
3413
3414	/*
3415	 * Does this cfqq already have too much IO in flight?
3416	 */
3417	if (cfqq->dispatched >= max_dispatch) {
3418		bool promote_sync = false;
3419		/*
3420		 * idle queue must always only have a single IO in flight
3421		 */
3422		if (cfq_class_idle(cfqq))
3423			return false;
3424
3425		/*
3426		 * If there is only one sync queue
3427		 * we can ignore async queue here and give the sync
3428		 * queue no dispatch limit. The reason is a sync queue can
3429		 * preempt async queue, limiting the sync queue doesn't make
3430		 * sense. This is useful for aiostress test.
3431		 */
3432		if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3433			promote_sync = true;
3434
3435		/*
3436		 * We have other queues, don't allow more IO from this one
3437		 */
3438		if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3439				!promote_sync)
3440			return false;
3441
3442		/*
3443		 * Sole queue user, no limit
3444		 */
3445		if (cfqd->busy_queues == 1 || promote_sync)
3446			max_dispatch = -1;
3447		else
3448			/*
3449			 * Normally we start throttling cfqq when cfq_quantum/2
3450			 * requests have been dispatched. But we can drive
3451			 * deeper queue depths at the beginning of slice
3452			 * subjected to upper limit of cfq_quantum.
3453			 * */
3454			max_dispatch = cfqd->cfq_quantum;
3455	}
3456
3457	/*
3458	 * Async queues must wait a bit before being allowed dispatch.
3459	 * We also ramp up the dispatch depth gradually for async IO,
3460	 * based on the last sync IO we serviced
3461	 */
3462	if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
3463		unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
3464		unsigned int depth;
3465
3466		depth = last_sync / cfqd->cfq_slice[1];
3467		if (!depth && !cfqq->dispatched)
3468			depth = 1;
3469		if (depth < max_dispatch)
3470			max_dispatch = depth;
3471	}
3472
3473	/*
3474	 * If we're below the current max, allow a dispatch
3475	 */
3476	return cfqq->dispatched < max_dispatch;
3477}
3478
3479/*
3480 * Dispatch a request from cfqq, moving them to the request queue
3481 * dispatch list.
3482 */
3483static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3484{
3485	struct request *rq;
3486
3487	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3488
3489	if (!cfq_may_dispatch(cfqd, cfqq))
3490		return false;
3491
3492	/*
3493	 * follow expired path, else get first next available
3494	 */
3495	rq = cfq_check_fifo(cfqq);
3496	if (!rq)
3497		rq = cfqq->next_rq;
3498
3499	/*
3500	 * insert request into driver dispatch list
3501	 */
3502	cfq_dispatch_insert(cfqd->queue, rq);
3503
3504	if (!cfqd->active_cic) {
3505		struct cfq_io_cq *cic = RQ_CIC(rq);
3506
3507		atomic_long_inc(&cic->icq.ioc->refcount);
3508		cfqd->active_cic = cic;
3509	}
3510
3511	return true;
3512}
3513
3514/*
3515 * Find the cfqq that we need to service and move a request from that to the
3516 * dispatch list
3517 */
3518static int cfq_dispatch_requests(struct request_queue *q, int force)
3519{
3520	struct cfq_data *cfqd = q->elevator->elevator_data;
3521	struct cfq_queue *cfqq;
3522
3523	if (!cfqd->busy_queues)
3524		return 0;
3525
3526	if (unlikely(force))
3527		return cfq_forced_dispatch(cfqd);
3528
3529	cfqq = cfq_select_queue(cfqd);
3530	if (!cfqq)
3531		return 0;
3532
3533	/*
3534	 * Dispatch a request from this cfqq, if it is allowed
3535	 */
3536	if (!cfq_dispatch_request(cfqd, cfqq))
3537		return 0;
3538
3539	cfqq->slice_dispatch++;
3540	cfq_clear_cfqq_must_dispatch(cfqq);
3541
3542	/*
3543	 * expire an async queue immediately if it has used up its slice. idle
3544	 * queue always expire after 1 dispatch round.
3545	 */
3546	if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3547	    cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3548	    cfq_class_idle(cfqq))) {
3549		cfqq->slice_end = jiffies + 1;
3550		cfq_slice_expired(cfqd, 0);
3551	}
3552
3553	cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
3554	return 1;
3555}
3556
3557/*
3558 * task holds one reference to the queue, dropped when task exits. each rq
3559 * in-flight on this queue also holds a reference, dropped when rq is freed.
3560 *
3561 * Each cfq queue took a reference on the parent group. Drop it now.
3562 * queue lock must be held here.
3563 */
3564static void cfq_put_queue(struct cfq_queue *cfqq)
3565{
3566	struct cfq_data *cfqd = cfqq->cfqd;
3567	struct cfq_group *cfqg;
3568
3569	BUG_ON(cfqq->ref <= 0);
3570
3571	cfqq->ref--;
3572	if (cfqq->ref)
3573		return;
3574
3575	cfq_log_cfqq(cfqd, cfqq, "put_queue");
3576	BUG_ON(rb_first(&cfqq->sort_list));
3577	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3578	cfqg = cfqq->cfqg;
3579
3580	if (unlikely(cfqd->active_queue == cfqq)) {
3581		__cfq_slice_expired(cfqd, cfqq, 0);
3582		cfq_schedule_dispatch(cfqd);
3583	}
3584
3585	BUG_ON(cfq_cfqq_on_rr(cfqq));
3586	kmem_cache_free(cfq_pool, cfqq);
3587	cfqg_put(cfqg);
3588}
3589
3590static void cfq_put_cooperator(struct cfq_queue *cfqq)
3591{
3592	struct cfq_queue *__cfqq, *next;
3593
3594	/*
3595	 * If this queue was scheduled to merge with another queue, be
3596	 * sure to drop the reference taken on that queue (and others in
3597	 * the merge chain).  See cfq_setup_merge and cfq_merge_cfqqs.
3598	 */
3599	__cfqq = cfqq->new_cfqq;
3600	while (__cfqq) {
3601		if (__cfqq == cfqq) {
3602			WARN(1, "cfqq->new_cfqq loop detected\n");
3603			break;
3604		}
3605		next = __cfqq->new_cfqq;
3606		cfq_put_queue(__cfqq);
3607		__cfqq = next;
3608	}
3609}
3610
3611static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3612{
3613	if (unlikely(cfqq == cfqd->active_queue)) {
3614		__cfq_slice_expired(cfqd, cfqq, 0);
3615		cfq_schedule_dispatch(cfqd);
3616	}
3617
3618	cfq_put_cooperator(cfqq);
3619
3620	cfq_put_queue(cfqq);
3621}
3622
3623static void cfq_init_icq(struct io_cq *icq)
3624{
3625	struct cfq_io_cq *cic = icq_to_cic(icq);
3626
3627	cic->ttime.last_end_request = jiffies;
3628}
3629
3630static void cfq_exit_icq(struct io_cq *icq)
3631{
3632	struct cfq_io_cq *cic = icq_to_cic(icq);
3633	struct cfq_data *cfqd = cic_to_cfqd(cic);
3634
3635	if (cic_to_cfqq(cic, false)) {
3636		cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
3637		cic_set_cfqq(cic, NULL, false);
3638	}
3639
3640	if (cic_to_cfqq(cic, true)) {
3641		cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
3642		cic_set_cfqq(cic, NULL, true);
3643	}
3644}
3645
3646static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
3647{
3648	struct task_struct *tsk = current;
3649	int ioprio_class;
3650
3651	if (!cfq_cfqq_prio_changed(cfqq))
3652		return;
3653
3654	ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3655	switch (ioprio_class) {
3656	default:
3657		printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3658	case IOPRIO_CLASS_NONE:
3659		/*
3660		 * no prio set, inherit CPU scheduling settings
3661		 */
3662		cfqq->ioprio = task_nice_ioprio(tsk);
3663		cfqq->ioprio_class = task_nice_ioclass(tsk);
3664		break;
3665	case IOPRIO_CLASS_RT:
3666		cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3667		cfqq->ioprio_class = IOPRIO_CLASS_RT;
3668		break;
3669	case IOPRIO_CLASS_BE:
3670		cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3671		cfqq->ioprio_class = IOPRIO_CLASS_BE;
3672		break;
3673	case IOPRIO_CLASS_IDLE:
3674		cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3675		cfqq->ioprio = 7;
3676		cfq_clear_cfqq_idle_window(cfqq);
3677		break;
3678	}
3679
3680	/*
3681	 * keep track of original prio settings in case we have to temporarily
3682	 * elevate the priority of this queue
3683	 */
3684	cfqq->org_ioprio = cfqq->ioprio;
3685	cfq_clear_cfqq_prio_changed(cfqq);
3686}
3687
3688static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
3689{
3690	int ioprio = cic->icq.ioc->ioprio;
3691	struct cfq_data *cfqd = cic_to_cfqd(cic);
3692	struct cfq_queue *cfqq;
3693
3694	/*
3695	 * Check whether ioprio has changed.  The condition may trigger
3696	 * spuriously on a newly created cic but there's no harm.
3697	 */
3698	if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
3699		return;
3700
3701	cfqq = cic_to_cfqq(cic, false);
3702	if (cfqq) {
3703		cfq_put_queue(cfqq);
3704		cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
3705		cic_set_cfqq(cic, cfqq, false);
 
 
 
 
3706	}
3707
3708	cfqq = cic_to_cfqq(cic, true);
3709	if (cfqq)
3710		cfq_mark_cfqq_prio_changed(cfqq);
3711
3712	cic->ioprio = ioprio;
3713}
3714
3715static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3716			  pid_t pid, bool is_sync)
3717{
3718	RB_CLEAR_NODE(&cfqq->rb_node);
3719	RB_CLEAR_NODE(&cfqq->p_node);
3720	INIT_LIST_HEAD(&cfqq->fifo);
3721
3722	cfqq->ref = 0;
3723	cfqq->cfqd = cfqd;
3724
3725	cfq_mark_cfqq_prio_changed(cfqq);
3726
3727	if (is_sync) {
3728		if (!cfq_class_idle(cfqq))
3729			cfq_mark_cfqq_idle_window(cfqq);
3730		cfq_mark_cfqq_sync(cfqq);
3731	}
3732	cfqq->pid = pid;
3733}
3734
3735#ifdef CONFIG_CFQ_GROUP_IOSCHED
3736static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3737{
3738	struct cfq_data *cfqd = cic_to_cfqd(cic);
3739	struct cfq_queue *cfqq;
3740	uint64_t serial_nr;
3741
3742	rcu_read_lock();
3743	serial_nr = bio_blkcg(bio)->css.serial_nr;
3744	rcu_read_unlock();
3745
3746	/*
3747	 * Check whether blkcg has changed.  The condition may trigger
3748	 * spuriously on a newly created cic but there's no harm.
3749	 */
3750	if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
3751		return;
3752
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3753	/*
3754	 * Drop reference to queues.  New queues will be assigned in new
3755	 * group upon arrival of fresh requests.
3756	 */
3757	cfqq = cic_to_cfqq(cic, false);
3758	if (cfqq) {
3759		cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3760		cic_set_cfqq(cic, NULL, false);
3761		cfq_put_queue(cfqq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3762	}
3763
3764	cfqq = cic_to_cfqq(cic, true);
3765	if (cfqq) {
3766		cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3767		cic_set_cfqq(cic, NULL, true);
3768		cfq_put_queue(cfqq);
3769	}
3770
3771	cic->blkcg_serial_nr = serial_nr;
 
3772}
3773#else
3774static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
3775#endif  /* CONFIG_CFQ_GROUP_IOSCHED */
3776
3777static struct cfq_queue **
3778cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
3779{
3780	switch (ioprio_class) {
3781	case IOPRIO_CLASS_RT:
3782		return &cfqg->async_cfqq[0][ioprio];
3783	case IOPRIO_CLASS_NONE:
3784		ioprio = IOPRIO_NORM;
3785		/* fall through */
3786	case IOPRIO_CLASS_BE:
3787		return &cfqg->async_cfqq[1][ioprio];
3788	case IOPRIO_CLASS_IDLE:
3789		return &cfqg->async_idle_cfqq;
3790	default:
3791		BUG();
3792	}
3793}
3794
3795static struct cfq_queue *
3796cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3797	      struct bio *bio)
3798{
3799	int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3800	int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
3801	struct cfq_queue **async_cfqq = NULL;
3802	struct cfq_queue *cfqq;
3803	struct cfq_group *cfqg;
3804
3805	rcu_read_lock();
3806	cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
3807	if (!cfqg) {
3808		cfqq = &cfqd->oom_cfqq;
3809		goto out;
3810	}
3811
3812	if (!is_sync) {
3813		if (!ioprio_valid(cic->ioprio)) {
3814			struct task_struct *tsk = current;
3815			ioprio = task_nice_ioprio(tsk);
3816			ioprio_class = task_nice_ioclass(tsk);
3817		}
3818		async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
3819		cfqq = *async_cfqq;
3820		if (cfqq)
3821			goto out;
3822	}
3823
3824	cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
3825				     cfqd->queue->node);
3826	if (!cfqq) {
3827		cfqq = &cfqd->oom_cfqq;
3828		goto out;
3829	}
3830
3831	cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3832	cfq_init_prio_data(cfqq, cic);
3833	cfq_link_cfqq_cfqg(cfqq, cfqg);
3834	cfq_log_cfqq(cfqd, cfqq, "alloced");
3835
3836	if (async_cfqq) {
3837		/* a new async queue is created, pin and remember */
3838		cfqq->ref++;
3839		*async_cfqq = cfqq;
3840	}
3841out:
3842	cfqq->ref++;
3843	rcu_read_unlock();
3844	return cfqq;
3845}
3846
3847static void
3848__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3849{
3850	unsigned long elapsed = jiffies - ttime->last_end_request;
3851	elapsed = min(elapsed, 2UL * slice_idle);
3852
3853	ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3854	ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3855	ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3856}
3857
3858static void
3859cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3860			struct cfq_io_cq *cic)
3861{
3862	if (cfq_cfqq_sync(cfqq)) {
3863		__cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3864		__cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3865			cfqd->cfq_slice_idle);
3866	}
3867#ifdef CONFIG_CFQ_GROUP_IOSCHED
3868	__cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3869#endif
3870}
3871
3872static void
3873cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3874		       struct request *rq)
3875{
3876	sector_t sdist = 0;
3877	sector_t n_sec = blk_rq_sectors(rq);
3878	if (cfqq->last_request_pos) {
3879		if (cfqq->last_request_pos < blk_rq_pos(rq))
3880			sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3881		else
3882			sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3883	}
3884
3885	cfqq->seek_history <<= 1;
3886	if (blk_queue_nonrot(cfqd->queue))
3887		cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3888	else
3889		cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3890}
3891
3892/*
3893 * Disable idle window if the process thinks too long or seeks so much that
3894 * it doesn't matter
3895 */
3896static void
3897cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3898		       struct cfq_io_cq *cic)
3899{
3900	int old_idle, enable_idle;
3901
3902	/*
3903	 * Don't idle for async or idle io prio class
3904	 */
3905	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3906		return;
3907
3908	enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3909
3910	if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3911		cfq_mark_cfqq_deep(cfqq);
3912
3913	if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3914		enable_idle = 0;
3915	else if (!atomic_read(&cic->icq.ioc->active_ref) ||
3916		 !cfqd->cfq_slice_idle ||
3917		 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3918		enable_idle = 0;
3919	else if (sample_valid(cic->ttime.ttime_samples)) {
3920		if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3921			enable_idle = 0;
3922		else
3923			enable_idle = 1;
3924	}
3925
3926	if (old_idle != enable_idle) {
3927		cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3928		if (enable_idle)
3929			cfq_mark_cfqq_idle_window(cfqq);
3930		else
3931			cfq_clear_cfqq_idle_window(cfqq);
3932	}
3933}
3934
3935/*
3936 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3937 * no or if we aren't sure, a 1 will cause a preempt.
3938 */
3939static bool
3940cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3941		   struct request *rq)
3942{
3943	struct cfq_queue *cfqq;
3944
3945	cfqq = cfqd->active_queue;
3946	if (!cfqq)
3947		return false;
3948
3949	if (cfq_class_idle(new_cfqq))
3950		return false;
3951
3952	if (cfq_class_idle(cfqq))
3953		return true;
3954
3955	/*
3956	 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3957	 */
3958	if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3959		return false;
3960
3961	/*
3962	 * if the new request is sync, but the currently running queue is
3963	 * not, let the sync request have priority.
3964	 */
3965	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3966		return true;
3967
3968	/*
3969	 * Treat ancestors of current cgroup the same way as current cgroup.
3970	 * For anybody else we disallow preemption to guarantee service
3971	 * fairness among cgroups.
3972	 */
3973	if (!cfqg_is_descendant(cfqq->cfqg, new_cfqq->cfqg))
3974		return false;
3975
3976	if (cfq_slice_used(cfqq))
3977		return true;
3978
3979	/*
3980	 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3981	 */
3982	if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3983		return true;
3984
3985	WARN_ON_ONCE(cfqq->ioprio_class != new_cfqq->ioprio_class);
3986	/* Allow preemption only if we are idling on sync-noidle tree */
3987	if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
3988	    cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
 
3989	    RB_EMPTY_ROOT(&cfqq->sort_list))
3990		return true;
3991
3992	/*
3993	 * So both queues are sync. Let the new request get disk time if
3994	 * it's a metadata request and the current queue is doing regular IO.
3995	 */
3996	if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3997		return true;
3998
 
 
 
 
 
 
3999	/* An idle queue should not be idle now for some reason */
4000	if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
4001		return true;
4002
4003	if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
4004		return false;
4005
4006	/*
4007	 * if this request is as-good as one we would expect from the
4008	 * current cfqq, let it preempt
4009	 */
4010	if (cfq_rq_close(cfqd, cfqq, rq))
4011		return true;
4012
4013	return false;
4014}
4015
4016/*
4017 * cfqq preempts the active queue. if we allowed preempt with no slice left,
4018 * let it have half of its nominal slice.
4019 */
4020static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4021{
4022	enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
4023
4024	cfq_log_cfqq(cfqd, cfqq, "preempt");
4025	cfq_slice_expired(cfqd, 1);
4026
4027	/*
4028	 * workload type is changed, don't save slice, otherwise preempt
4029	 * doesn't happen
4030	 */
4031	if (old_type != cfqq_type(cfqq))
4032		cfqq->cfqg->saved_wl_slice = 0;
4033
4034	/*
4035	 * Put the new queue at the front of the of the current list,
4036	 * so we know that it will be selected next.
4037	 */
4038	BUG_ON(!cfq_cfqq_on_rr(cfqq));
4039
4040	cfq_service_tree_add(cfqd, cfqq, 1);
4041
4042	cfqq->slice_end = 0;
4043	cfq_mark_cfqq_slice_new(cfqq);
4044}
4045
4046/*
4047 * Called when a new fs request (rq) is added (to cfqq). Check if there's
4048 * something we should do about it
4049 */
4050static void
4051cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
4052		struct request *rq)
4053{
4054	struct cfq_io_cq *cic = RQ_CIC(rq);
4055
4056	cfqd->rq_queued++;
4057	if (rq->cmd_flags & REQ_PRIO)
4058		cfqq->prio_pending++;
4059
4060	cfq_update_io_thinktime(cfqd, cfqq, cic);
4061	cfq_update_io_seektime(cfqd, cfqq, rq);
4062	cfq_update_idle_window(cfqd, cfqq, cic);
4063
4064	cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4065
4066	if (cfqq == cfqd->active_queue) {
4067		/*
4068		 * Remember that we saw a request from this process, but
4069		 * don't start queuing just yet. Otherwise we risk seeing lots
4070		 * of tiny requests, because we disrupt the normal plugging
4071		 * and merging. If the request is already larger than a single
4072		 * page, let it rip immediately. For that case we assume that
4073		 * merging is already done. Ditto for a busy system that
4074		 * has other work pending, don't risk delaying until the
4075		 * idle timer unplug to continue working.
4076		 */
4077		if (cfq_cfqq_wait_request(cfqq)) {
4078			if (blk_rq_bytes(rq) > PAGE_SIZE ||
4079			    cfqd->busy_queues > 1) {
4080				cfq_del_timer(cfqd, cfqq);
4081				cfq_clear_cfqq_wait_request(cfqq);
4082				__blk_run_queue(cfqd->queue);
4083			} else {
4084				cfqg_stats_update_idle_time(cfqq->cfqg);
4085				cfq_mark_cfqq_must_dispatch(cfqq);
4086			}
4087		}
4088	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
4089		/*
4090		 * not the active queue - expire current slice if it is
4091		 * idle and has expired it's mean thinktime or this new queue
4092		 * has some old slice time left and is of higher priority or
4093		 * this new queue is RT and the current one is BE
4094		 */
4095		cfq_preempt_queue(cfqd, cfqq);
4096		__blk_run_queue(cfqd->queue);
4097	}
4098}
4099
4100static void cfq_insert_request(struct request_queue *q, struct request *rq)
4101{
4102	struct cfq_data *cfqd = q->elevator->elevator_data;
4103	struct cfq_queue *cfqq = RQ_CFQQ(rq);
4104
4105	cfq_log_cfqq(cfqd, cfqq, "insert_request");
4106	cfq_init_prio_data(cfqq, RQ_CIC(rq));
4107
4108	rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
4109	list_add_tail(&rq->queuelist, &cfqq->fifo);
4110	cfq_add_rq_rb(rq);
4111	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
4112				 rq->cmd_flags);
4113	cfq_rq_enqueued(cfqd, cfqq, rq);
4114}
4115
4116/*
4117 * Update hw_tag based on peak queue depth over 50 samples under
4118 * sufficient load.
4119 */
4120static void cfq_update_hw_tag(struct cfq_data *cfqd)
4121{
4122	struct cfq_queue *cfqq = cfqd->active_queue;
4123
4124	if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
4125		cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
4126
4127	if (cfqd->hw_tag == 1)
4128		return;
4129
4130	if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
4131	    cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
4132		return;
4133
4134	/*
4135	 * If active queue hasn't enough requests and can idle, cfq might not
4136	 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
4137	 * case
4138	 */
4139	if (cfqq && cfq_cfqq_idle_window(cfqq) &&
4140	    cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
4141	    CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
4142		return;
4143
4144	if (cfqd->hw_tag_samples++ < 50)
4145		return;
4146
4147	if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
4148		cfqd->hw_tag = 1;
4149	else
4150		cfqd->hw_tag = 0;
4151}
4152
4153static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4154{
4155	struct cfq_io_cq *cic = cfqd->active_cic;
4156
4157	/* If the queue already has requests, don't wait */
4158	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4159		return false;
4160
4161	/* If there are other queues in the group, don't wait */
4162	if (cfqq->cfqg->nr_cfqq > 1)
4163		return false;
4164
4165	/* the only queue in the group, but think time is big */
4166	if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
4167		return false;
4168
4169	if (cfq_slice_used(cfqq))
4170		return true;
4171
4172	/* if slice left is less than think time, wait busy */
4173	if (cic && sample_valid(cic->ttime.ttime_samples)
4174	    && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
4175		return true;
4176
4177	/*
4178	 * If think times is less than a jiffy than ttime_mean=0 and above
4179	 * will not be true. It might happen that slice has not expired yet
4180	 * but will expire soon (4-5 ns) during select_queue(). To cover the
4181	 * case where think time is less than a jiffy, mark the queue wait
4182	 * busy if only 1 jiffy is left in the slice.
4183	 */
4184	if (cfqq->slice_end - jiffies == 1)
4185		return true;
4186
4187	return false;
4188}
4189
4190static void cfq_completed_request(struct request_queue *q, struct request *rq)
4191{
4192	struct cfq_queue *cfqq = RQ_CFQQ(rq);
4193	struct cfq_data *cfqd = cfqq->cfqd;
4194	const int sync = rq_is_sync(rq);
4195	unsigned long now;
4196
4197	now = jiffies;
4198	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4199		     !!(rq->cmd_flags & REQ_NOIDLE));
4200
4201	cfq_update_hw_tag(cfqd);
4202
4203	WARN_ON(!cfqd->rq_in_driver);
4204	WARN_ON(!cfqq->dispatched);
4205	cfqd->rq_in_driver--;
4206	cfqq->dispatched--;
4207	(RQ_CFQG(rq))->dispatched--;
4208	cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4209				     rq_io_start_time_ns(rq), rq->cmd_flags);
4210
4211	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
4212
4213	if (sync) {
4214		struct cfq_rb_root *st;
4215
4216		RQ_CIC(rq)->ttime.last_end_request = now;
4217
4218		if (cfq_cfqq_on_rr(cfqq))
4219			st = cfqq->service_tree;
4220		else
4221			st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4222					cfqq_type(cfqq));
4223
4224		st->ttime.last_end_request = now;
4225		if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
4226			cfqd->last_delayed_sync = now;
4227	}
4228
4229#ifdef CONFIG_CFQ_GROUP_IOSCHED
4230	cfqq->cfqg->ttime.last_end_request = now;
4231#endif
4232
4233	/*
4234	 * If this is the active queue, check if it needs to be expired,
4235	 * or if we want to idle in case it has no pending requests.
4236	 */
4237	if (cfqd->active_queue == cfqq) {
4238		const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4239
4240		if (cfq_cfqq_slice_new(cfqq)) {
4241			cfq_set_prio_slice(cfqd, cfqq);
4242			cfq_clear_cfqq_slice_new(cfqq);
4243		}
4244
4245		/*
4246		 * Should we wait for next request to come in before we expire
4247		 * the queue.
4248		 */
4249		if (cfq_should_wait_busy(cfqd, cfqq)) {
4250			unsigned long extend_sl = cfqd->cfq_slice_idle;
4251			if (!cfqd->cfq_slice_idle)
4252				extend_sl = cfqd->cfq_group_idle;
4253			cfqq->slice_end = jiffies + extend_sl;
4254			cfq_mark_cfqq_wait_busy(cfqq);
4255			cfq_log_cfqq(cfqd, cfqq, "will busy wait");
4256		}
4257
4258		/*
4259		 * Idling is not enabled on:
4260		 * - expired queues
4261		 * - idle-priority queues
4262		 * - async queues
4263		 * - queues with still some requests queued
4264		 * - when there is a close cooperator
4265		 */
4266		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
4267			cfq_slice_expired(cfqd, 1);
4268		else if (sync && cfqq_empty &&
4269			 !cfq_close_cooperator(cfqd, cfqq)) {
4270			cfq_arm_slice_timer(cfqd);
4271		}
4272	}
4273
4274	if (!cfqd->rq_in_driver)
4275		cfq_schedule_dispatch(cfqd);
4276}
4277
4278static inline int __cfq_may_queue(struct cfq_queue *cfqq)
4279{
4280	if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
4281		cfq_mark_cfqq_must_alloc_slice(cfqq);
4282		return ELV_MQUEUE_MUST;
4283	}
4284
4285	return ELV_MQUEUE_MAY;
4286}
4287
4288static int cfq_may_queue(struct request_queue *q, int rw)
4289{
4290	struct cfq_data *cfqd = q->elevator->elevator_data;
4291	struct task_struct *tsk = current;
4292	struct cfq_io_cq *cic;
4293	struct cfq_queue *cfqq;
4294
4295	/*
4296	 * don't force setup of a queue from here, as a call to may_queue
4297	 * does not necessarily imply that a request actually will be queued.
4298	 * so just lookup a possibly existing queue, or return 'may queue'
4299	 * if that fails
4300	 */
4301	cic = cfq_cic_lookup(cfqd, tsk->io_context);
4302	if (!cic)
4303		return ELV_MQUEUE_MAY;
4304
4305	cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
4306	if (cfqq) {
4307		cfq_init_prio_data(cfqq, cic);
4308
4309		return __cfq_may_queue(cfqq);
4310	}
4311
4312	return ELV_MQUEUE_MAY;
4313}
4314
4315/*
4316 * queue lock held here
4317 */
4318static void cfq_put_request(struct request *rq)
4319{
4320	struct cfq_queue *cfqq = RQ_CFQQ(rq);
4321
4322	if (cfqq) {
4323		const int rw = rq_data_dir(rq);
4324
4325		BUG_ON(!cfqq->allocated[rw]);
4326		cfqq->allocated[rw]--;
4327
4328		/* Put down rq reference on cfqg */
4329		cfqg_put(RQ_CFQG(rq));
4330		rq->elv.priv[0] = NULL;
4331		rq->elv.priv[1] = NULL;
4332
4333		cfq_put_queue(cfqq);
4334	}
4335}
4336
4337static struct cfq_queue *
4338cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
4339		struct cfq_queue *cfqq)
4340{
4341	cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4342	cic_set_cfqq(cic, cfqq->new_cfqq, 1);
4343	cfq_mark_cfqq_coop(cfqq->new_cfqq);
4344	cfq_put_queue(cfqq);
4345	return cic_to_cfqq(cic, 1);
4346}
4347
4348/*
4349 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4350 * was the last process referring to said cfqq.
4351 */
4352static struct cfq_queue *
4353split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
4354{
4355	if (cfqq_process_refs(cfqq) == 1) {
4356		cfqq->pid = current->pid;
4357		cfq_clear_cfqq_coop(cfqq);
4358		cfq_clear_cfqq_split_coop(cfqq);
4359		return cfqq;
4360	}
4361
4362	cic_set_cfqq(cic, NULL, 1);
4363
4364	cfq_put_cooperator(cfqq);
4365
4366	cfq_put_queue(cfqq);
4367	return NULL;
4368}
4369/*
4370 * Allocate cfq data structures associated with this request.
4371 */
4372static int
4373cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4374		gfp_t gfp_mask)
4375{
4376	struct cfq_data *cfqd = q->elevator->elevator_data;
4377	struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
4378	const int rw = rq_data_dir(rq);
4379	const bool is_sync = rq_is_sync(rq);
4380	struct cfq_queue *cfqq;
4381
 
 
4382	spin_lock_irq(q->queue_lock);
4383
4384	check_ioprio_changed(cic, bio);
4385	check_blkcg_changed(cic, bio);
4386new_queue:
4387	cfqq = cic_to_cfqq(cic, is_sync);
4388	if (!cfqq || cfqq == &cfqd->oom_cfqq) {
4389		if (cfqq)
4390			cfq_put_queue(cfqq);
4391		cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
4392		cic_set_cfqq(cic, cfqq, is_sync);
4393	} else {
4394		/*
4395		 * If the queue was seeky for too long, break it apart.
4396		 */
4397		if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
4398			cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4399			cfqq = split_cfqq(cic, cfqq);
4400			if (!cfqq)
4401				goto new_queue;
4402		}
4403
4404		/*
4405		 * Check to see if this queue is scheduled to merge with
4406		 * another, closely cooperating queue.  The merging of
4407		 * queues happens here as it must be done in process context.
4408		 * The reference on new_cfqq was taken in merge_cfqqs.
4409		 */
4410		if (cfqq->new_cfqq)
4411			cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
4412	}
4413
4414	cfqq->allocated[rw]++;
4415
4416	cfqq->ref++;
4417	cfqg_get(cfqq->cfqg);
4418	rq->elv.priv[0] = cfqq;
4419	rq->elv.priv[1] = cfqq->cfqg;
4420	spin_unlock_irq(q->queue_lock);
4421	return 0;
4422}
4423
4424static void cfq_kick_queue(struct work_struct *work)
4425{
4426	struct cfq_data *cfqd =
4427		container_of(work, struct cfq_data, unplug_work);
4428	struct request_queue *q = cfqd->queue;
4429
4430	spin_lock_irq(q->queue_lock);
4431	__blk_run_queue(cfqd->queue);
4432	spin_unlock_irq(q->queue_lock);
4433}
4434
4435/*
4436 * Timer running if the active_queue is currently idling inside its time slice
4437 */
4438static void cfq_idle_slice_timer(unsigned long data)
4439{
4440	struct cfq_data *cfqd = (struct cfq_data *) data;
4441	struct cfq_queue *cfqq;
4442	unsigned long flags;
4443	int timed_out = 1;
4444
4445	cfq_log(cfqd, "idle timer fired");
4446
4447	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4448
4449	cfqq = cfqd->active_queue;
4450	if (cfqq) {
4451		timed_out = 0;
4452
4453		/*
4454		 * We saw a request before the queue expired, let it through
4455		 */
4456		if (cfq_cfqq_must_dispatch(cfqq))
4457			goto out_kick;
4458
4459		/*
4460		 * expired
4461		 */
4462		if (cfq_slice_used(cfqq))
4463			goto expire;
4464
4465		/*
4466		 * only expire and reinvoke request handler, if there are
4467		 * other queues with pending requests
4468		 */
4469		if (!cfqd->busy_queues)
4470			goto out_cont;
4471
4472		/*
4473		 * not expired and it has a request pending, let it dispatch
4474		 */
4475		if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4476			goto out_kick;
4477
4478		/*
4479		 * Queue depth flag is reset only when the idle didn't succeed
4480		 */
4481		cfq_clear_cfqq_deep(cfqq);
4482	}
4483expire:
4484	cfq_slice_expired(cfqd, timed_out);
4485out_kick:
4486	cfq_schedule_dispatch(cfqd);
4487out_cont:
4488	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4489}
4490
4491static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4492{
4493	del_timer_sync(&cfqd->idle_slice_timer);
4494	cancel_work_sync(&cfqd->unplug_work);
4495}
4496
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4497static void cfq_exit_queue(struct elevator_queue *e)
4498{
4499	struct cfq_data *cfqd = e->elevator_data;
4500	struct request_queue *q = cfqd->queue;
4501
4502	cfq_shutdown_timer_wq(cfqd);
4503
4504	spin_lock_irq(q->queue_lock);
4505
4506	if (cfqd->active_queue)
4507		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
4508
 
 
4509	spin_unlock_irq(q->queue_lock);
4510
4511	cfq_shutdown_timer_wq(cfqd);
4512
4513#ifdef CONFIG_CFQ_GROUP_IOSCHED
4514	blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4515#else
4516	kfree(cfqd->root_group);
4517#endif
4518	kfree(cfqd);
4519}
4520
4521static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
4522{
4523	struct cfq_data *cfqd;
4524	struct blkcg_gq *blkg __maybe_unused;
4525	int i, ret;
4526	struct elevator_queue *eq;
4527
4528	eq = elevator_alloc(q, e);
4529	if (!eq)
4530		return -ENOMEM;
4531
4532	cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
4533	if (!cfqd) {
4534		kobject_put(&eq->kobj);
4535		return -ENOMEM;
4536	}
4537	eq->elevator_data = cfqd;
4538
4539	cfqd->queue = q;
4540	spin_lock_irq(q->queue_lock);
4541	q->elevator = eq;
4542	spin_unlock_irq(q->queue_lock);
4543
4544	/* Init root service tree */
4545	cfqd->grp_service_tree = CFQ_RB_ROOT;
4546
4547	/* Init root group and prefer root group over other groups by default */
4548#ifdef CONFIG_CFQ_GROUP_IOSCHED
4549	ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
4550	if (ret)
4551		goto out_free;
4552
4553	cfqd->root_group = blkg_to_cfqg(q->root_blkg);
4554#else
4555	ret = -ENOMEM;
4556	cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4557					GFP_KERNEL, cfqd->queue->node);
4558	if (!cfqd->root_group)
4559		goto out_free;
4560
4561	cfq_init_cfqg_base(cfqd->root_group);
4562	cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4563	cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4564#endif
 
 
4565
4566	/*
4567	 * Not strictly needed (since RB_ROOT just clears the node and we
4568	 * zeroed cfqd on alloc), but better be safe in case someone decides
4569	 * to add magic to the rb code
4570	 */
4571	for (i = 0; i < CFQ_PRIO_LISTS; i++)
4572		cfqd->prio_trees[i] = RB_ROOT;
4573
4574	/*
4575	 * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
4576	 * Grab a permanent reference to it, so that the normal code flow
4577	 * will not attempt to free it.  oom_cfqq is linked to root_group
4578	 * but shouldn't hold a reference as it'll never be unlinked.  Lose
4579	 * the reference from linking right away.
4580	 */
4581	cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
4582	cfqd->oom_cfqq.ref++;
4583
4584	spin_lock_irq(q->queue_lock);
4585	cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
4586	cfqg_put(cfqd->root_group);
4587	spin_unlock_irq(q->queue_lock);
4588
4589	init_timer(&cfqd->idle_slice_timer);
4590	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4591	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4592
4593	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
4594
4595	cfqd->cfq_quantum = cfq_quantum;
4596	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4597	cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
4598	cfqd->cfq_back_max = cfq_back_max;
4599	cfqd->cfq_back_penalty = cfq_back_penalty;
4600	cfqd->cfq_slice[0] = cfq_slice_async;
4601	cfqd->cfq_slice[1] = cfq_slice_sync;
4602	cfqd->cfq_target_latency = cfq_target_latency;
4603	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4604	cfqd->cfq_slice_idle = cfq_slice_idle;
4605	cfqd->cfq_group_idle = cfq_group_idle;
4606	cfqd->cfq_latency = 1;
4607	cfqd->hw_tag = -1;
4608	/*
4609	 * we optimistically start assuming sync ops weren't delayed in last
4610	 * second, in order to have larger depth for async operations.
4611	 */
4612	cfqd->last_delayed_sync = jiffies - HZ;
4613	return 0;
4614
4615out_free:
4616	kfree(cfqd);
4617	kobject_put(&eq->kobj);
4618	return ret;
4619}
4620
4621static void cfq_registered_queue(struct request_queue *q)
4622{
4623	struct elevator_queue *e = q->elevator;
4624	struct cfq_data *cfqd = e->elevator_data;
4625
4626	/*
4627	 * Default to IOPS mode with no idling for SSDs
4628	 */
4629	if (blk_queue_nonrot(q))
4630		cfqd->cfq_slice_idle = 0;
4631}
4632
4633/*
4634 * sysfs parts below -->
4635 */
4636static ssize_t
4637cfq_var_show(unsigned int var, char *page)
4638{
4639	return sprintf(page, "%u\n", var);
4640}
4641
4642static ssize_t
4643cfq_var_store(unsigned int *var, const char *page, size_t count)
4644{
4645	char *p = (char *) page;
4646
4647	*var = simple_strtoul(p, &p, 10);
4648	return count;
4649}
4650
4651#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
4652static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
4653{									\
4654	struct cfq_data *cfqd = e->elevator_data;			\
4655	unsigned int __data = __VAR;					\
4656	if (__CONV)							\
4657		__data = jiffies_to_msecs(__data);			\
4658	return cfq_var_show(__data, (page));				\
4659}
4660SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
4661SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4662SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
4663SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4664SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
4665SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
4666SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4667SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4668SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4669SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4670SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4671SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
4672#undef SHOW_FUNCTION
4673
4674#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
4675static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
4676{									\
4677	struct cfq_data *cfqd = e->elevator_data;			\
4678	unsigned int __data;						\
4679	int ret = cfq_var_store(&__data, (page), count);		\
4680	if (__data < (MIN))						\
4681		__data = (MIN);						\
4682	else if (__data > (MAX))					\
4683		__data = (MAX);						\
4684	if (__CONV)							\
4685		*(__PTR) = msecs_to_jiffies(__data);			\
4686	else								\
4687		*(__PTR) = __data;					\
4688	return ret;							\
4689}
4690STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4691STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4692		UINT_MAX, 1);
4693STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4694		UINT_MAX, 1);
4695STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4696STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4697		UINT_MAX, 0);
4698STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4699STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4700STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4701STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4702STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4703		UINT_MAX, 0);
4704STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4705STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
4706#undef STORE_FUNCTION
4707
4708#define CFQ_ATTR(name) \
4709	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4710
4711static struct elv_fs_entry cfq_attrs[] = {
4712	CFQ_ATTR(quantum),
4713	CFQ_ATTR(fifo_expire_sync),
4714	CFQ_ATTR(fifo_expire_async),
4715	CFQ_ATTR(back_seek_max),
4716	CFQ_ATTR(back_seek_penalty),
4717	CFQ_ATTR(slice_sync),
4718	CFQ_ATTR(slice_async),
4719	CFQ_ATTR(slice_async_rq),
4720	CFQ_ATTR(slice_idle),
4721	CFQ_ATTR(group_idle),
4722	CFQ_ATTR(low_latency),
4723	CFQ_ATTR(target_latency),
4724	__ATTR_NULL
4725};
4726
4727static struct elevator_type iosched_cfq = {
4728	.ops = {
4729		.elevator_merge_fn = 		cfq_merge,
4730		.elevator_merged_fn =		cfq_merged_request,
4731		.elevator_merge_req_fn =	cfq_merged_requests,
4732		.elevator_allow_merge_fn =	cfq_allow_merge,
4733		.elevator_bio_merged_fn =	cfq_bio_merged,
4734		.elevator_dispatch_fn =		cfq_dispatch_requests,
4735		.elevator_add_req_fn =		cfq_insert_request,
4736		.elevator_activate_req_fn =	cfq_activate_request,
4737		.elevator_deactivate_req_fn =	cfq_deactivate_request,
4738		.elevator_completed_req_fn =	cfq_completed_request,
4739		.elevator_former_req_fn =	elv_rb_former_request,
4740		.elevator_latter_req_fn =	elv_rb_latter_request,
4741		.elevator_init_icq_fn =		cfq_init_icq,
4742		.elevator_exit_icq_fn =		cfq_exit_icq,
4743		.elevator_set_req_fn =		cfq_set_request,
4744		.elevator_put_req_fn =		cfq_put_request,
4745		.elevator_may_queue_fn =	cfq_may_queue,
4746		.elevator_init_fn =		cfq_init_queue,
4747		.elevator_exit_fn =		cfq_exit_queue,
4748		.elevator_registered_fn =	cfq_registered_queue,
4749	},
4750	.icq_size	=	sizeof(struct cfq_io_cq),
4751	.icq_align	=	__alignof__(struct cfq_io_cq),
4752	.elevator_attrs =	cfq_attrs,
4753	.elevator_name	=	"cfq",
4754	.elevator_owner =	THIS_MODULE,
4755};
4756
4757#ifdef CONFIG_CFQ_GROUP_IOSCHED
4758static struct blkcg_policy blkcg_policy_cfq = {
4759	.dfl_cftypes		= cfq_blkcg_files,
4760	.legacy_cftypes		= cfq_blkcg_legacy_files,
4761
4762	.cpd_alloc_fn		= cfq_cpd_alloc,
4763	.cpd_init_fn		= cfq_cpd_init,
4764	.cpd_free_fn		= cfq_cpd_free,
4765	.cpd_bind_fn		= cfq_cpd_bind,
4766
4767	.pd_alloc_fn		= cfq_pd_alloc,
4768	.pd_init_fn		= cfq_pd_init,
4769	.pd_offline_fn		= cfq_pd_offline,
4770	.pd_free_fn		= cfq_pd_free,
4771	.pd_reset_stats_fn	= cfq_pd_reset_stats,
4772};
4773#endif
4774
4775static int __init cfq_init(void)
4776{
4777	int ret;
4778
4779	/*
4780	 * could be 0 on HZ < 1000 setups
4781	 */
4782	if (!cfq_slice_async)
4783		cfq_slice_async = 1;
4784	if (!cfq_slice_idle)
4785		cfq_slice_idle = 1;
4786
4787#ifdef CONFIG_CFQ_GROUP_IOSCHED
4788	if (!cfq_group_idle)
4789		cfq_group_idle = 1;
4790
4791	ret = blkcg_policy_register(&blkcg_policy_cfq);
4792	if (ret)
4793		return ret;
4794#else
4795	cfq_group_idle = 0;
4796#endif
4797
4798	ret = -ENOMEM;
4799	cfq_pool = KMEM_CACHE(cfq_queue, 0);
4800	if (!cfq_pool)
4801		goto err_pol_unreg;
4802
4803	ret = elv_register(&iosched_cfq);
4804	if (ret)
4805		goto err_free_pool;
4806
4807	return 0;
4808
4809err_free_pool:
4810	kmem_cache_destroy(cfq_pool);
4811err_pol_unreg:
4812#ifdef CONFIG_CFQ_GROUP_IOSCHED
4813	blkcg_policy_unregister(&blkcg_policy_cfq);
4814#endif
4815	return ret;
4816}
4817
4818static void __exit cfq_exit(void)
4819{
4820#ifdef CONFIG_CFQ_GROUP_IOSCHED
4821	blkcg_policy_unregister(&blkcg_policy_cfq);
4822#endif
4823	elv_unregister(&iosched_cfq);
4824	kmem_cache_destroy(cfq_pool);
4825}
4826
4827module_init(cfq_init);
4828module_exit(cfq_exit);
4829
4830MODULE_AUTHOR("Jens Axboe");
4831MODULE_LICENSE("GPL");
4832MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");