Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/*
   2 * Interface for controlling IO bandwidth on a request queue
   3 *
   4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/slab.h>
   9#include <linux/blkdev.h>
  10#include <linux/bio.h>
  11#include <linux/blktrace_api.h>
  12#include "blk-cgroup.h"
  13#include "blk.h"
  14
  15/* Max dispatch from a group in 1 round */
  16static int throtl_grp_quantum = 8;
  17
  18/* Total max dispatch from all groups in one round */
  19static int throtl_quantum = 32;
  20
  21/* Throttling is performed over 100ms slice and after that slice is renewed */
  22static unsigned long throtl_slice = HZ/10;	/* 100 ms */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  23
  24static struct blkcg_policy blkcg_policy_throtl;
  25
  26/* A workqueue to queue throttle related work */
  27static struct workqueue_struct *kthrotld_workqueue;
  28static void throtl_schedule_delayed_work(struct throtl_data *td,
  29				unsigned long delay);
  30
  31struct throtl_rb_root {
  32	struct rb_root rb;
  33	struct rb_node *left;
  34	unsigned int count;
  35	unsigned long min_disptime;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  36};
  37
  38#define THROTL_RB_ROOT	(struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
  39			.count = 0, .min_disptime = 0}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  40
  41#define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
  42
  43/* Per-cpu group stats */
  44struct tg_stats_cpu {
  45	/* total bytes transferred */
  46	struct blkg_rwstat		service_bytes;
  47	/* total IOs serviced, post merge */
  48	struct blkg_rwstat		serviced;
  49};
  50
  51struct throtl_grp {
  52	/* must be the first member */
  53	struct blkg_policy_data pd;
  54
  55	/* active throtl group service_tree member */
  56	struct rb_node rb_node;
  57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58	/*
  59	 * Dispatch time in jiffies. This is the estimated time when group
  60	 * will unthrottle and is ready to dispatch more bio. It is used as
  61	 * key to sort active groups in service tree.
  62	 */
  63	unsigned long disptime;
  64
  65	unsigned int flags;
  66
  67	/* Two lists for READ and WRITE */
  68	struct bio_list bio_lists[2];
  69
  70	/* Number of queued bios on READ and WRITE lists */
  71	unsigned int nr_queued[2];
  72
  73	/* bytes per second rate limits */
  74	uint64_t bps[2];
  75
  76	/* IOPS limits */
  77	unsigned int iops[2];
 
 
 
 
 
 
 
  78
  79	/* Number of bytes disptached in current slice */
  80	uint64_t bytes_disp[2];
  81	/* Number of bio's dispatched in current slice */
  82	unsigned int io_disp[2];
  83
 
 
 
 
 
 
 
 
 
  84	/* When did we start a new slice */
  85	unsigned long slice_start[2];
  86	unsigned long slice_end[2];
  87
  88	/* Some throttle limits got updated for the group */
  89	int limits_changed;
 
 
 
 
 
 
 
 
 
 
 
  90
  91	/* Per cpu stats pointer */
  92	struct tg_stats_cpu __percpu *stats_cpu;
 
 
  93
  94	/* List of tgs waiting for per cpu stats memory to be allocated */
  95	struct list_head stats_alloc_node;
 
  96};
  97
  98struct throtl_data
  99{
 100	/* service tree for active throtl groups */
 101	struct throtl_rb_root tg_service_tree;
 102
 103	struct request_queue *queue;
 104
 105	/* Total Number of queued bios on READ and WRITE lists */
 106	unsigned int nr_queued[2];
 107
 108	/*
 109	 * number of total undestroyed groups
 110	 */
 111	unsigned int nr_undestroyed_grps;
 112
 113	/* Work for dispatching throttled bios */
 114	struct delayed_work throtl_work;
 
 
 
 
 
 
 
 
 
 
 
 
 
 115
 116	int limits_changed;
 117};
 118
 119/* list and work item to allocate percpu group stats */
 120static DEFINE_SPINLOCK(tg_stats_alloc_lock);
 121static LIST_HEAD(tg_stats_alloc_list);
 122
 123static void tg_stats_alloc_fn(struct work_struct *);
 124static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
 125
 126static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
 127{
 128	return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
 129}
 130
 131static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
 132{
 133	return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
 134}
 135
 136static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
 137{
 138	return pd_to_blkg(&tg->pd);
 139}
 140
 141static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
 
 
 
 
 
 
 
 142{
 143	return blkg_to_tg(td->queue->root_blkg);
 
 
 
 144}
 145
 146enum tg_state_flags {
 147	THROTL_TG_FLAG_on_rr = 0,	/* on round-robin busy list */
 148};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 149
 150#define THROTL_TG_FNS(name)						\
 151static inline void throtl_mark_tg_##name(struct throtl_grp *tg)		\
 152{									\
 153	(tg)->flags |= (1 << THROTL_TG_FLAG_##name);			\
 154}									\
 155static inline void throtl_clear_tg_##name(struct throtl_grp *tg)	\
 156{									\
 157	(tg)->flags &= ~(1 << THROTL_TG_FLAG_##name);			\
 158}									\
 159static inline int throtl_tg_##name(const struct throtl_grp *tg)		\
 160{									\
 161	return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0;	\
 162}
 163
 164THROTL_TG_FNS(on_rr);
 
 
 
 
 
 
 
 165
 166#define throtl_log_tg(td, tg, fmt, args...)	do {			\
 167	char __pbuf[128];						\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 168									\
 169	blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf));		\
 170	blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
 
 
 
 
 
 
 
 171} while (0)
 172
 173#define throtl_log(td, fmt, args...)	\
 174	blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
 
 
 
 
 
 175
 176static inline unsigned int total_nr_queued(struct throtl_data *td)
 177{
 178	return td->nr_queued[0] + td->nr_queued[1];
 
 
 179}
 180
 181/*
 182 * Worker for allocating per cpu stat for tgs. This is scheduled on the
 183 * system_nrt_wq once there are some groups on the alloc_list waiting for
 184 * allocation.
 
 
 
 
 
 185 */
 186static void tg_stats_alloc_fn(struct work_struct *work)
 
 187{
 188	static struct tg_stats_cpu *stats_cpu;	/* this fn is non-reentrant */
 189	struct delayed_work *dwork = to_delayed_work(work);
 190	bool empty = false;
 191
 192alloc_stats:
 193	if (!stats_cpu) {
 194		stats_cpu = alloc_percpu(struct tg_stats_cpu);
 195		if (!stats_cpu) {
 196			/* allocation failed, try again after some time */
 197			queue_delayed_work(system_nrt_wq, dwork,
 198					   msecs_to_jiffies(10));
 199			return;
 200		}
 201	}
 
 202
 203	spin_lock_irq(&tg_stats_alloc_lock);
 
 
 
 
 
 
 
 204
 205	if (!list_empty(&tg_stats_alloc_list)) {
 206		struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
 207							 struct throtl_grp,
 208							 stats_alloc_node);
 209		swap(tg->stats_cpu, stats_cpu);
 210		list_del_init(&tg->stats_alloc_node);
 211	}
 212
 213	empty = list_empty(&tg_stats_alloc_list);
 214	spin_unlock_irq(&tg_stats_alloc_lock);
 215	if (!empty)
 216		goto alloc_stats;
 217}
 218
 219static void throtl_pd_init(struct blkcg_gq *blkg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 220{
 221	struct throtl_grp *tg = blkg_to_tg(blkg);
 222	unsigned long flags;
 223
 224	RB_CLEAR_NODE(&tg->rb_node);
 225	bio_list_init(&tg->bio_lists[0]);
 226	bio_list_init(&tg->bio_lists[1]);
 227	tg->limits_changed = false;
 228
 229	tg->bps[READ] = -1;
 230	tg->bps[WRITE] = -1;
 231	tg->iops[READ] = -1;
 232	tg->iops[WRITE] = -1;
 233
 234	/*
 235	 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
 236	 * but percpu allocator can't be called from IO path.  Queue tg on
 237	 * tg_stats_alloc_list and allocate from work item.
 238	 */
 239	spin_lock_irqsave(&tg_stats_alloc_lock, flags);
 240	list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
 241	queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
 242	spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
 
 
 243}
 244
 245static void throtl_pd_exit(struct blkcg_gq *blkg)
 
 246{
 247	struct throtl_grp *tg = blkg_to_tg(blkg);
 248	unsigned long flags;
 249
 250	spin_lock_irqsave(&tg_stats_alloc_lock, flags);
 251	list_del_init(&tg->stats_alloc_node);
 252	spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
 253
 254	free_percpu(tg->stats_cpu);
 255}
 256
 257static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
 
 
 258{
 259	struct throtl_grp *tg = blkg_to_tg(blkg);
 260	int cpu;
 261
 262	if (tg->stats_cpu == NULL)
 263		return;
 
 264
 265	for_each_possible_cpu(cpu) {
 266		struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
 267
 268		blkg_rwstat_reset(&sc->service_bytes);
 269		blkg_rwstat_reset(&sc->serviced);
 
 270	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 271}
 272
 273static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
 274					   struct blkcg *blkcg)
 275{
 
 
 
 
 
 276	/*
 277	 * This is the common case when there are no blkcgs.  Avoid lookup
 278	 * in this case
 
 
 
 
 
 
 
 
 
 279	 */
 280	if (blkcg == &blkcg_root)
 281		return td_root_tg(td);
 282
 283	return blkg_to_tg(blkg_lookup(blkcg, td->queue));
 284}
 285
 286static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
 287						  struct blkcg *blkcg)
 
 
 
 
 288{
 289	struct request_queue *q = td->queue;
 290	struct throtl_grp *tg = NULL;
 
 
 
 
 
 
 
 
 291
 
 
 
 292	/*
 293	 * This is the common case when there are no blkcgs.  Avoid lookup
 294	 * in this case
 295	 */
 296	if (blkcg == &blkcg_root) {
 297		tg = td_root_tg(td);
 298	} else {
 299		struct blkcg_gq *blkg;
 300
 301		blkg = blkg_lookup_create(blkcg, q);
 
 
 
 
 
 
 
 
 302
 303		/* if %NULL and @q is alive, fall back to root_tg */
 304		if (!IS_ERR(blkg))
 305			tg = blkg_to_tg(blkg);
 306		else if (!blk_queue_dead(q))
 307			tg = td_root_tg(td);
 308	}
 
 309
 310	return tg;
 311}
 312
 313static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
 
 314{
 315	/* Service tree is empty */
 316	if (!root->count)
 317		return NULL;
 318
 319	if (!root->left)
 320		root->left = rb_first(&root->rb);
 
 
 321
 322	if (root->left)
 323		return rb_entry_tg(root->left);
 324
 325	return NULL;
 
 326}
 327
 328static void rb_erase_init(struct rb_node *n, struct rb_root *root)
 329{
 330	rb_erase(n, root);
 331	RB_CLEAR_NODE(n);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 332}
 333
 334static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
 
 335{
 336	if (root->left == n)
 337		root->left = NULL;
 338	rb_erase_init(n, &root->rb);
 339	--root->count;
 340}
 341
 342static void update_min_dispatch_time(struct throtl_rb_root *st)
 343{
 344	struct throtl_grp *tg;
 345
 346	tg = throtl_rb_first(st);
 347	if (!tg)
 348		return;
 349
 350	st->min_disptime = tg->disptime;
 351}
 352
 353static void
 354tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
 355{
 356	struct rb_node **node = &st->rb.rb_node;
 
 357	struct rb_node *parent = NULL;
 358	struct throtl_grp *__tg;
 359	unsigned long key = tg->disptime;
 360	int left = 1;
 361
 362	while (*node != NULL) {
 363		parent = *node;
 364		__tg = rb_entry_tg(parent);
 365
 366		if (time_before(key, __tg->disptime))
 367			node = &parent->rb_left;
 368		else {
 369			node = &parent->rb_right;
 370			left = 0;
 371		}
 372	}
 373
 374	if (left)
 375		st->left = &tg->rb_node;
 376
 377	rb_link_node(&tg->rb_node, parent, node);
 378	rb_insert_color(&tg->rb_node, &st->rb);
 
 379}
 380
 381static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
 382{
 383	struct throtl_rb_root *st = &td->tg_service_tree;
 384
 385	tg_service_tree_add(st, tg);
 386	throtl_mark_tg_on_rr(tg);
 387	st->count++;
 388}
 389
 390static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
 391{
 392	if (!throtl_tg_on_rr(tg))
 393		__throtl_enqueue_tg(td, tg);
 394}
 395
 396static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
 397{
 398	throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
 399	throtl_clear_tg_on_rr(tg);
 400}
 401
 402static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
 403{
 404	if (throtl_tg_on_rr(tg))
 405		__throtl_dequeue_tg(td, tg);
 406}
 407
 408static void throtl_schedule_next_dispatch(struct throtl_data *td)
 
 
 409{
 410	struct throtl_rb_root *st = &td->tg_service_tree;
 411
 412	/*
 413	 * If there are more bios pending, schedule more work.
 
 
 
 
 414	 */
 415	if (!total_nr_queued(td))
 416		return;
 
 
 
 
 417
 418	BUG_ON(!st->count);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 419
 420	update_min_dispatch_time(st);
 421
 422	if (time_before_eq(st->min_disptime, jiffies))
 423		throtl_schedule_delayed_work(td, 0);
 424	else
 425		throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
 
 
 
 
 426}
 427
 428static inline void
 429throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 430{
 431	tg->bytes_disp[rw] = 0;
 432	tg->io_disp[rw] = 0;
 433	tg->slice_start[rw] = jiffies;
 434	tg->slice_end[rw] = jiffies + throtl_slice;
 435	throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
 436			rw == READ ? 'R' : 'W', tg->slice_start[rw],
 437			tg->slice_end[rw], jiffies);
 
 438}
 439
 440static inline void throtl_set_slice_end(struct throtl_data *td,
 441		struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
 442{
 443	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
 444}
 445
 446static inline void throtl_extend_slice(struct throtl_data *td,
 447		struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
 448{
 449	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
 450	throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
 451			rw == READ ? 'R' : 'W', tg->slice_start[rw],
 452			tg->slice_end[rw], jiffies);
 
 453}
 454
 455/* Determine if previously allocated or extended slice is complete or not */
 456static bool
 457throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 458{
 459	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
 460		return 0;
 461
 462	return 1;
 463}
 464
 465/* Trim the used slices and adjust slice start accordingly */
 466static inline void
 467throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 468{
 469	unsigned long nr_slices, time_elapsed, io_trim;
 470	u64 bytes_trim, tmp;
 471
 472	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
 473
 474	/*
 475	 * If bps are unlimited (-1), then time slice don't get
 476	 * renewed. Don't try to trim the slice if slice is used. A new
 477	 * slice will start when appropriate.
 478	 */
 479	if (throtl_slice_used(td, tg, rw))
 480		return;
 481
 482	/*
 483	 * A bio has been dispatched. Also adjust slice_end. It might happen
 484	 * that initially cgroup limit was very low resulting in high
 485	 * slice_end, but later limit was bumped up and bio was dispached
 486	 * sooner, then we need to reduce slice_end. A high bogus slice_end
 487	 * is bad because it does not allow new slice to start.
 488	 */
 489
 490	throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
 491
 492	time_elapsed = jiffies - tg->slice_start[rw];
 493
 494	nr_slices = time_elapsed / throtl_slice;
 495
 496	if (!nr_slices)
 497		return;
 498	tmp = tg->bps[rw] * throtl_slice * nr_slices;
 499	do_div(tmp, HZ);
 500	bytes_trim = tmp;
 501
 502	io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
 
 503
 504	if (!bytes_trim && !io_trim)
 505		return;
 506
 507	if (tg->bytes_disp[rw] >= bytes_trim)
 508		tg->bytes_disp[rw] -= bytes_trim;
 509	else
 510		tg->bytes_disp[rw] = 0;
 511
 512	if (tg->io_disp[rw] >= io_trim)
 513		tg->io_disp[rw] -= io_trim;
 514	else
 515		tg->io_disp[rw] = 0;
 516
 517	tg->slice_start[rw] += nr_slices * throtl_slice;
 518
 519	throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
 520			" start=%lu end=%lu jiffies=%lu",
 521			rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
 522			tg->slice_start[rw], tg->slice_end[rw], jiffies);
 523}
 524
 525static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
 526		struct bio *bio, unsigned long *wait)
 527{
 528	bool rw = bio_data_dir(bio);
 529	unsigned int io_allowed;
 530	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 531	u64 tmp;
 532
 533	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 534
 535	/* Slice has just started. Consider one slice interval */
 536	if (!jiffy_elapsed)
 537		jiffy_elapsed_rnd = throtl_slice;
 538
 539	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 
 540
 541	/*
 542	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
 543	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
 544	 * will allow dispatch after 1 second and after that slice should
 545	 * have been trimmed.
 546	 */
 547
 548	tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
 549	do_div(tmp, HZ);
 550
 551	if (tmp > UINT_MAX)
 552		io_allowed = UINT_MAX;
 553	else
 554		io_allowed = tmp;
 555
 556	if (tg->io_disp[rw] + 1 <= io_allowed) {
 557		if (wait)
 558			*wait = 0;
 559		return 1;
 560	}
 561
 562	/* Calc approx time to dispatch */
 563	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
 564
 565	if (jiffy_wait > jiffy_elapsed)
 566		jiffy_wait = jiffy_wait - jiffy_elapsed;
 567	else
 568		jiffy_wait = 1;
 569
 570	if (wait)
 571		*wait = jiffy_wait;
 572	return 0;
 573}
 574
 575static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
 576		struct bio *bio, unsigned long *wait)
 577{
 578	bool rw = bio_data_dir(bio);
 579	u64 bytes_allowed, extra_bytes, tmp;
 580	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 
 581
 582	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 583
 584	/* Slice has just started. Consider one slice interval */
 585	if (!jiffy_elapsed)
 586		jiffy_elapsed_rnd = throtl_slice;
 587
 588	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 589
 590	tmp = tg->bps[rw] * jiffy_elapsed_rnd;
 591	do_div(tmp, HZ);
 592	bytes_allowed = tmp;
 593
 594	if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
 595		if (wait)
 596			*wait = 0;
 597		return 1;
 598	}
 599
 600	/* Calc approx time to dispatch */
 601	extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
 602	jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
 603
 604	if (!jiffy_wait)
 605		jiffy_wait = 1;
 606
 607	/*
 608	 * This wait time is without taking into consideration the rounding
 609	 * up we did. Add that time also.
 610	 */
 611	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
 612	if (wait)
 613		*wait = jiffy_wait;
 614	return 0;
 615}
 616
 617static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
 618	if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
 619		return 1;
 620	return 0;
 621}
 622
 623/*
 624 * Returns whether one can dispatch a bio or not. Also returns approx number
 625 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
 626 */
 627static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
 628				struct bio *bio, unsigned long *wait)
 629{
 630	bool rw = bio_data_dir(bio);
 631	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
 632
 633	/*
 634 	 * Currently whole state machine of group depends on first bio
 635	 * queued in the group bio list. So one should not be calling
 636	 * this function with a different bio if there are other bios
 637	 * queued.
 638	 */
 639	BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
 
 640
 641	/* If tg->bps = -1, then BW is unlimited */
 642	if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
 
 643		if (wait)
 644			*wait = 0;
 645		return 1;
 646	}
 647
 648	/*
 649	 * If previous slice expired, start a new one otherwise renew/extend
 650	 * existing slice to make sure it is at least throtl_slice interval
 651	 * long since now.
 
 
 652	 */
 653	if (throtl_slice_used(td, tg, rw))
 654		throtl_start_new_slice(td, tg, rw);
 655	else {
 656		if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
 657			throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
 
 
 658	}
 659
 660	if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
 661	    && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
 662		if (wait)
 663			*wait = 0;
 664		return 1;
 665	}
 666
 667	max_wait = max(bps_wait, iops_wait);
 668
 669	if (wait)
 670		*wait = max_wait;
 671
 672	if (time_before(tg->slice_end[rw], jiffies + max_wait))
 673		throtl_extend_slice(td, tg, rw, jiffies + max_wait);
 674
 675	return 0;
 676}
 677
 678static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
 679					 int rw)
 680{
 681	struct throtl_grp *tg = blkg_to_tg(blkg);
 682	struct tg_stats_cpu *stats_cpu;
 683	unsigned long flags;
 684
 685	/* If per cpu stats are not allocated yet, don't do any accounting. */
 686	if (tg->stats_cpu == NULL)
 687		return;
 
 
 688
 689	/*
 690	 * Disabling interrupts to provide mutual exclusion between two
 691	 * writes on same cpu. It probably is not needed for 64bit. Not
 692	 * optimizing that case yet.
 
 693	 */
 694	local_irq_save(flags);
 695
 696	stats_cpu = this_cpu_ptr(tg->stats_cpu);
 697
 698	blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
 699	blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
 700
 701	local_irq_restore(flags);
 702}
 703
 704static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 
 
 
 
 
 
 
 
 
 
 705{
 
 706	bool rw = bio_data_dir(bio);
 707
 708	/* Charge the bio to the group */
 709	tg->bytes_disp[rw] += bio->bi_size;
 710	tg->io_disp[rw]++;
 711
 712	throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
 713}
 
 
 
 
 
 
 714
 715static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
 716			struct bio *bio)
 717{
 718	bool rw = bio_data_dir(bio);
 719
 720	bio_list_add(&tg->bio_lists[rw], bio);
 721	/* Take a bio reference on tg */
 722	blkg_get(tg_to_blkg(tg));
 723	tg->nr_queued[rw]++;
 724	td->nr_queued[rw]++;
 725	throtl_enqueue_tg(td, tg);
 726}
 727
 728static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
 729{
 
 730	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
 731	struct bio *bio;
 732
 733	if ((bio = bio_list_peek(&tg->bio_lists[READ])))
 734		tg_may_dispatch(td, tg, bio, &read_wait);
 735
 736	if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
 737		tg_may_dispatch(td, tg, bio, &write_wait);
 
 
 738
 739	min_wait = min(read_wait, write_wait);
 740	disptime = jiffies + min_wait;
 741
 742	/* Update dispatch time */
 743	throtl_dequeue_tg(td, tg);
 744	tg->disptime = disptime;
 745	throtl_enqueue_tg(td, tg);
 
 
 
 746}
 747
 748static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
 749				bool rw, struct bio_list *bl)
 750{
 751	struct bio *bio;
 
 
 
 752
 753	bio = bio_list_pop(&tg->bio_lists[rw]);
 754	tg->nr_queued[rw]--;
 755	/* Drop bio reference on blkg */
 756	blkg_put(tg_to_blkg(tg));
 757
 758	BUG_ON(td->nr_queued[rw] <= 0);
 759	td->nr_queued[rw]--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 760
 761	throtl_charge_bio(tg, bio);
 762	bio_list_add(bl, bio);
 763	bio->bi_rw |= REQ_THROTTLED;
 764
 765	throtl_trim_slice(td, tg, rw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 766}
 767
 768static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
 769				struct bio_list *bl)
 770{
 
 771	unsigned int nr_reads = 0, nr_writes = 0;
 772	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
 773	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
 774	struct bio *bio;
 775
 776	/* Try to dispatch 75% READS and 25% WRITES */
 777
 778	while ((bio = bio_list_peek(&tg->bio_lists[READ]))
 779		&& tg_may_dispatch(td, tg, bio, NULL)) {
 780
 781		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
 782		nr_reads++;
 783
 784		if (nr_reads >= max_nr_reads)
 785			break;
 786	}
 787
 788	while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
 789		&& tg_may_dispatch(td, tg, bio, NULL)) {
 790
 791		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
 792		nr_writes++;
 793
 794		if (nr_writes >= max_nr_writes)
 795			break;
 796	}
 797
 798	return nr_reads + nr_writes;
 799}
 800
 801static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
 802{
 803	unsigned int nr_disp = 0;
 804	struct throtl_grp *tg;
 805	struct throtl_rb_root *st = &td->tg_service_tree;
 806
 807	while (1) {
 808		tg = throtl_rb_first(st);
 
 809
 810		if (!tg)
 811			break;
 812
 813		if (time_before(jiffies, tg->disptime))
 814			break;
 815
 816		throtl_dequeue_tg(td, tg);
 817
 818		nr_disp += throtl_dispatch_tg(td, tg, bl);
 819
 820		if (tg->nr_queued[0] || tg->nr_queued[1]) {
 821			tg_update_disptime(td, tg);
 822			throtl_enqueue_tg(td, tg);
 823		}
 824
 825		if (nr_disp >= throtl_quantum)
 826			break;
 827	}
 828
 829	return nr_disp;
 830}
 831
 832static void throtl_process_limit_change(struct throtl_data *td)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 833{
 
 
 
 834	struct request_queue *q = td->queue;
 835	struct blkcg_gq *blkg, *n;
 836
 837	if (!td->limits_changed)
 838		return;
 839
 840	xchg(&td->limits_changed, false);
 841
 842	throtl_log(td, "limits changed");
 843
 844	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 845		struct throtl_grp *tg = blkg_to_tg(blkg);
 846
 847		if (!tg->limits_changed)
 848			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 849
 850		if (!xchg(&tg->limits_changed, false))
 851			continue;
 852
 853		throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
 854			" riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
 855			tg->iops[READ], tg->iops[WRITE]);
 
 
 856
 857		/*
 858		 * Restart the slices for both READ and WRITES. It
 859		 * might happen that a group's limit are dropped
 860		 * suddenly and we don't want to account recently
 861		 * dispatched IO with new low rate
 862		 */
 863		throtl_start_new_slice(td, tg, 0);
 864		throtl_start_new_slice(td, tg, 1);
 865
 866		if (throtl_tg_on_rr(tg))
 867			tg_update_disptime(td, tg);
 
 
 
 
 
 
 
 
 
 
 
 
 868	}
 
 
 869}
 870
 871/* Dispatch throttled bios. Should be called without queue lock held. */
 872static int throtl_dispatch(struct request_queue *q)
 
 
 
 
 
 
 
 873{
 874	struct throtl_data *td = q->td;
 875	unsigned int nr_disp = 0;
 
 
 876	struct bio_list bio_list_on_stack;
 877	struct bio *bio;
 878	struct blk_plug plug;
 879
 880	spin_lock_irq(q->queue_lock);
 881
 882	throtl_process_limit_change(td);
 883
 884	if (!total_nr_queued(td))
 885		goto out;
 886
 887	bio_list_init(&bio_list_on_stack);
 888
 889	throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
 890			total_nr_queued(td), td->nr_queued[READ],
 891			td->nr_queued[WRITE]);
 892
 893	nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
 894
 895	if (nr_disp)
 896		throtl_log(td, "bios disp=%u", nr_disp);
 897
 898	throtl_schedule_next_dispatch(td);
 899out:
 900	spin_unlock_irq(q->queue_lock);
 901
 902	/*
 903	 * If we dispatched some requests, unplug the queue to make sure
 904	 * immediate dispatch
 905	 */
 906	if (nr_disp) {
 907		blk_start_plug(&plug);
 908		while((bio = bio_list_pop(&bio_list_on_stack)))
 909			generic_make_request(bio);
 910		blk_finish_plug(&plug);
 911	}
 912	return nr_disp;
 913}
 914
 915void blk_throtl_work(struct work_struct *work)
 916{
 917	struct throtl_data *td = container_of(work, struct throtl_data,
 918					throtl_work.work);
 919	struct request_queue *q = td->queue;
 920
 921	throtl_dispatch(q);
 922}
 923
 924/* Call with queue lock held */
 925static void
 926throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
 927{
 928
 929	struct delayed_work *dwork = &td->throtl_work;
 930
 931	/* schedule work if limits changed even if no bio is queued */
 932	if (total_nr_queued(td) || td->limits_changed) {
 933		/*
 934		 * We might have a work scheduled to be executed in future.
 935		 * Cancel that and schedule a new one.
 936		 */
 937		__cancel_delayed_work(dwork);
 938		queue_delayed_work(kthrotld_workqueue, dwork, delay);
 939		throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
 940				delay, jiffies);
 941	}
 942}
 943
 944static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
 945				struct blkg_policy_data *pd, int off)
 946{
 947	struct throtl_grp *tg = pd_to_tg(pd);
 948	struct blkg_rwstat rwstat = { }, tmp;
 949	int i, cpu;
 950
 951	for_each_possible_cpu(cpu) {
 952		struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
 953
 954		tmp = blkg_rwstat_read((void *)sc + off);
 955		for (i = 0; i < BLKG_RWSTAT_NR; i++)
 956			rwstat.cnt[i] += tmp.cnt[i];
 957	}
 958
 959	return __blkg_prfill_rwstat(sf, pd, &rwstat);
 960}
 961
 962static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
 963			       struct seq_file *sf)
 964{
 965	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
 966
 967	blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
 968			  cft->private, true);
 969	return 0;
 970}
 971
 972static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
 973			      int off)
 974{
 975	struct throtl_grp *tg = pd_to_tg(pd);
 976	u64 v = *(u64 *)((void *)tg + off);
 977
 978	if (v == -1)
 979		return 0;
 980	return __blkg_prfill_u64(sf, pd, v);
 981}
 982
 983static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
 984			       int off)
 985{
 986	struct throtl_grp *tg = pd_to_tg(pd);
 987	unsigned int v = *(unsigned int *)((void *)tg + off);
 988
 989	if (v == -1)
 990		return 0;
 991	return __blkg_prfill_u64(sf, pd, v);
 992}
 993
 994static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
 995			     struct seq_file *sf)
 996{
 997	blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
 998			  &blkcg_policy_throtl, cft->private, false);
 999	return 0;
1000}
1001
1002static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
1003			      struct seq_file *sf)
1004{
1005	blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
1006			  &blkcg_policy_throtl, cft->private, false);
1007	return 0;
1008}
1009
1010static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
1011		       bool is_u64)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1012{
1013	struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
1014	struct blkg_conf_ctx ctx;
1015	struct throtl_grp *tg;
1016	struct throtl_data *td;
1017	int ret;
 
1018
1019	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1020	if (ret)
1021		return ret;
1022
1023	tg = blkg_to_tg(ctx.blkg);
1024	td = ctx.blkg->q->td;
 
 
 
1025
1026	if (!ctx.v)
1027		ctx.v = -1;
1028
1029	if (is_u64)
1030		*(u64 *)((void *)tg + cft->private) = ctx.v;
1031	else
1032		*(unsigned int *)((void *)tg + cft->private) = ctx.v;
1033
1034	/* XXX: we don't need the following deferred processing */
1035	xchg(&tg->limits_changed, true);
1036	xchg(&td->limits_changed, true);
1037	throtl_schedule_delayed_work(td, 0);
1038
 
 
 
1039	blkg_conf_finish(&ctx);
1040	return 0;
1041}
1042
1043static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
1044			   const char *buf)
1045{
1046	return tg_set_conf(cgrp, cft, buf, true);
1047}
1048
1049static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
1050			    const char *buf)
1051{
1052	return tg_set_conf(cgrp, cft, buf, false);
1053}
1054
1055static struct cftype throtl_files[] = {
1056	{
1057		.name = "throttle.read_bps_device",
1058		.private = offsetof(struct throtl_grp, bps[READ]),
1059		.read_seq_string = tg_print_conf_u64,
1060		.write_string = tg_set_conf_u64,
1061		.max_write_len = 256,
1062	},
1063	{
1064		.name = "throttle.write_bps_device",
1065		.private = offsetof(struct throtl_grp, bps[WRITE]),
1066		.read_seq_string = tg_print_conf_u64,
1067		.write_string = tg_set_conf_u64,
1068		.max_write_len = 256,
1069	},
1070	{
1071		.name = "throttle.read_iops_device",
1072		.private = offsetof(struct throtl_grp, iops[READ]),
1073		.read_seq_string = tg_print_conf_uint,
1074		.write_string = tg_set_conf_uint,
1075		.max_write_len = 256,
1076	},
1077	{
1078		.name = "throttle.write_iops_device",
1079		.private = offsetof(struct throtl_grp, iops[WRITE]),
1080		.read_seq_string = tg_print_conf_uint,
1081		.write_string = tg_set_conf_uint,
1082		.max_write_len = 256,
1083	},
1084	{
1085		.name = "throttle.io_service_bytes",
1086		.private = offsetof(struct tg_stats_cpu, service_bytes),
1087		.read_seq_string = tg_print_cpu_rwstat,
 
 
 
 
 
1088	},
1089	{
1090		.name = "throttle.io_serviced",
1091		.private = offsetof(struct tg_stats_cpu, serviced),
1092		.read_seq_string = tg_print_cpu_rwstat,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1093	},
1094	{ }	/* terminate */
1095};
1096
1097static void throtl_shutdown_wq(struct request_queue *q)
1098{
1099	struct throtl_data *td = q->td;
1100
1101	cancel_delayed_work_sync(&td->throtl_work);
1102}
1103
1104static struct blkcg_policy blkcg_policy_throtl = {
1105	.pd_size		= sizeof(struct throtl_grp),
1106	.cftypes		= throtl_files,
1107
 
1108	.pd_init_fn		= throtl_pd_init,
1109	.pd_exit_fn		= throtl_pd_exit,
1110	.pd_reset_stats_fn	= throtl_pd_reset_stats,
 
1111};
1112
1113bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1114{
1115	struct throtl_data *td = q->td;
1116	struct throtl_grp *tg;
1117	bool rw = bio_data_dir(bio), update_disptime = true;
1118	struct blkcg *blkcg;
1119	bool throttled = false;
1120
1121	if (bio->bi_rw & REQ_THROTTLED) {
1122		bio->bi_rw &= ~REQ_THROTTLED;
1123		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1124	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1125
1126	/* bio_associate_current() needs ioc, try creating */
1127	create_io_context(GFP_ATOMIC, q->node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1128
1129	/*
1130	 * A throtl_grp pointer retrieved under rcu can be used to access
1131	 * basic fields like stats and io rates. If a group has no rules,
1132	 * just update the dispatch stats in lockless manner and return.
1133	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1134	rcu_read_lock();
1135	blkcg = bio_blkcg(bio);
1136	tg = throtl_lookup_tg(td, blkcg);
1137	if (tg) {
1138		if (tg_no_rule_group(tg, rw)) {
1139			throtl_update_dispatch_stats(tg_to_blkg(tg),
1140						     bio->bi_size, bio->bi_rw);
1141			goto out_unlock_rcu;
 
 
 
1142		}
1143	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1144
1145	/*
1146	 * Either group has not been allocated yet or it is not an unlimited
1147	 * IO group
1148	 */
1149	spin_lock_irq(q->queue_lock);
1150	tg = throtl_lookup_create_tg(td, blkcg);
1151	if (unlikely(!tg))
1152		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1153
1154	if (tg->nr_queued[rw]) {
1155		/*
1156		 * There is already another bio queued in same dir. No
1157		 * need to update dispatch time.
1158		 */
1159		update_disptime = false;
1160		goto queue_bio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1162	}
1163
1164	/* Bio is with-in rate limit of group */
1165	if (tg_may_dispatch(td, tg, bio, NULL)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1166		throtl_charge_bio(tg, bio);
1167
1168		/*
1169		 * We need to trim slice even when bios are not being queued
1170		 * otherwise it might happen that a bio is not queued for
1171		 * a long time and slice keeps on extending and trim is not
1172		 * called for a long time. Now if limits are reduced suddenly
1173		 * we take into account all the IO dispatched so far at new
1174		 * low rate and * newly queued IO gets a really long dispatch
1175		 * time.
1176		 *
1177		 * So keep on trimming slice even if bio is not queued.
1178		 */
1179		throtl_trim_slice(td, tg, rw);
1180		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
1181	}
1182
1183queue_bio:
1184	throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1185			" iodisp=%u iops=%u queued=%d/%d",
1186			rw == READ ? 'R' : 'W',
1187			tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1188			tg->io_disp[rw], tg->iops[rw],
1189			tg->nr_queued[READ], tg->nr_queued[WRITE]);
 
 
1190
1191	bio_associate_current(bio);
1192	throtl_add_bio_tg(q->td, tg, bio);
1193	throttled = true;
1194
1195	if (update_disptime) {
1196		tg_update_disptime(td, tg);
1197		throtl_schedule_next_dispatch(td);
 
 
 
 
 
 
1198	}
1199
1200out_unlock:
1201	spin_unlock_irq(q->queue_lock);
1202out_unlock_rcu:
1203	rcu_read_unlock();
1204out:
 
 
 
 
 
 
1205	return throttled;
1206}
1207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1208/**
1209 * blk_throtl_drain - drain throttled bios
1210 * @q: request_queue to drain throttled bios for
1211 *
1212 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1213 */
1214void blk_throtl_drain(struct request_queue *q)
1215	__releases(q->queue_lock) __acquires(q->queue_lock)
1216{
1217	struct throtl_data *td = q->td;
1218	struct throtl_rb_root *st = &td->tg_service_tree;
1219	struct throtl_grp *tg;
1220	struct bio_list bl;
1221	struct bio *bio;
 
1222
1223	queue_lockdep_assert_held(q);
1224
1225	bio_list_init(&bl);
 
 
 
 
 
 
 
1226
1227	while ((tg = throtl_rb_first(st))) {
1228		throtl_dequeue_tg(td, tg);
1229
1230		while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1231			tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1232		while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
1233			tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl);
1234	}
1235	spin_unlock_irq(q->queue_lock);
1236
1237	while ((bio = bio_list_pop(&bl)))
1238		generic_make_request(bio);
 
 
 
1239
1240	spin_lock_irq(q->queue_lock);
1241}
1242
1243int blk_throtl_init(struct request_queue *q)
1244{
1245	struct throtl_data *td;
1246	int ret;
1247
1248	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1249	if (!td)
1250		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
1251
1252	td->tg_service_tree = THROTL_RB_ROOT;
1253	td->limits_changed = false;
1254	INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1255
1256	q->td = td;
1257	td->queue = q;
1258
 
 
 
 
 
1259	/* activate policy */
1260	ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
1261	if (ret)
 
 
1262		kfree(td);
 
1263	return ret;
1264}
1265
1266void blk_throtl_exit(struct request_queue *q)
1267{
1268	BUG_ON(!q->td);
1269	throtl_shutdown_wq(q);
1270	blkcg_deactivate_policy(q, &blkcg_policy_throtl);
 
 
1271	kfree(q->td);
1272}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1273
1274static int __init throtl_init(void)
1275{
1276	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1277	if (!kthrotld_workqueue)
1278		panic("Failed to create kthrotld\n");
1279
1280	return blkcg_policy_register(&blkcg_policy_throtl);
1281}
1282
1283module_init(throtl_init);
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Interface for controlling IO bandwidth on a request queue
   4 *
   5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/slab.h>
  10#include <linux/blkdev.h>
  11#include <linux/bio.h>
  12#include <linux/blktrace_api.h>
  13#include <linux/blk-cgroup.h>
  14#include "blk.h"
  15
  16/* Max dispatch from a group in 1 round */
  17static int throtl_grp_quantum = 8;
  18
  19/* Total max dispatch from all groups in one round */
  20static int throtl_quantum = 32;
  21
  22/* Throttling is performed over a slice and after that slice is renewed */
  23#define DFL_THROTL_SLICE_HD (HZ / 10)
  24#define DFL_THROTL_SLICE_SSD (HZ / 50)
  25#define MAX_THROTL_SLICE (HZ)
  26#define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
  27#define MIN_THROTL_BPS (320 * 1024)
  28#define MIN_THROTL_IOPS (10)
  29#define DFL_LATENCY_TARGET (-1L)
  30#define DFL_IDLE_THRESHOLD (0)
  31#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
  32#define LATENCY_FILTERED_SSD (0)
  33/*
  34 * For HD, very small latency comes from sequential IO. Such IO is helpless to
  35 * help determine if its IO is impacted by others, hence we ignore the IO
  36 */
  37#define LATENCY_FILTERED_HD (1000L) /* 1ms */
  38
  39static struct blkcg_policy blkcg_policy_throtl;
  40
  41/* A workqueue to queue throttle related work */
  42static struct workqueue_struct *kthrotld_workqueue;
 
 
  43
  44/*
  45 * To implement hierarchical throttling, throtl_grps form a tree and bios
  46 * are dispatched upwards level by level until they reach the top and get
  47 * issued.  When dispatching bios from the children and local group at each
  48 * level, if the bios are dispatched into a single bio_list, there's a risk
  49 * of a local or child group which can queue many bios at once filling up
  50 * the list starving others.
  51 *
  52 * To avoid such starvation, dispatched bios are queued separately
  53 * according to where they came from.  When they are again dispatched to
  54 * the parent, they're popped in round-robin order so that no single source
  55 * hogs the dispatch window.
  56 *
  57 * throtl_qnode is used to keep the queued bios separated by their sources.
  58 * Bios are queued to throtl_qnode which in turn is queued to
  59 * throtl_service_queue and then dispatched in round-robin order.
  60 *
  61 * It's also used to track the reference counts on blkg's.  A qnode always
  62 * belongs to a throtl_grp and gets queued on itself or the parent, so
  63 * incrementing the reference of the associated throtl_grp when a qnode is
  64 * queued and decrementing when dequeued is enough to keep the whole blkg
  65 * tree pinned while bios are in flight.
  66 */
  67struct throtl_qnode {
  68	struct list_head	node;		/* service_queue->queued[] */
  69	struct bio_list		bios;		/* queued bios */
  70	struct throtl_grp	*tg;		/* tg this qnode belongs to */
  71};
  72
  73struct throtl_service_queue {
  74	struct throtl_service_queue *parent_sq;	/* the parent service_queue */
  75
  76	/*
  77	 * Bios queued directly to this service_queue or dispatched from
  78	 * children throtl_grp's.
  79	 */
  80	struct list_head	queued[2];	/* throtl_qnode [READ/WRITE] */
  81	unsigned int		nr_queued[2];	/* number of queued bios */
  82
  83	/*
  84	 * RB tree of active children throtl_grp's, which are sorted by
  85	 * their ->disptime.
  86	 */
  87	struct rb_root_cached	pending_tree;	/* RB tree of active tgs */
  88	unsigned int		nr_pending;	/* # queued in the tree */
  89	unsigned long		first_pending_disptime;	/* disptime of the first tg */
  90	struct timer_list	pending_timer;	/* fires on first_pending_disptime */
  91};
  92
  93enum tg_state_flags {
  94	THROTL_TG_PENDING	= 1 << 0,	/* on parent's pending tree */
  95	THROTL_TG_WAS_EMPTY	= 1 << 1,	/* bio_lists[] became non-empty */
  96};
  97
  98#define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
  99
 100enum {
 101	LIMIT_LOW,
 102	LIMIT_MAX,
 103	LIMIT_CNT,
 
 
 104};
 105
 106struct throtl_grp {
 107	/* must be the first member */
 108	struct blkg_policy_data pd;
 109
 110	/* active throtl group service_queue member */
 111	struct rb_node rb_node;
 112
 113	/* throtl_data this group belongs to */
 114	struct throtl_data *td;
 115
 116	/* this group's service queue */
 117	struct throtl_service_queue service_queue;
 118
 119	/*
 120	 * qnode_on_self is used when bios are directly queued to this
 121	 * throtl_grp so that local bios compete fairly with bios
 122	 * dispatched from children.  qnode_on_parent is used when bios are
 123	 * dispatched from this throtl_grp into its parent and will compete
 124	 * with the sibling qnode_on_parents and the parent's
 125	 * qnode_on_self.
 126	 */
 127	struct throtl_qnode qnode_on_self[2];
 128	struct throtl_qnode qnode_on_parent[2];
 129
 130	/*
 131	 * Dispatch time in jiffies. This is the estimated time when group
 132	 * will unthrottle and is ready to dispatch more bio. It is used as
 133	 * key to sort active groups in service tree.
 134	 */
 135	unsigned long disptime;
 136
 137	unsigned int flags;
 138
 139	/* are there any throtl rules between this group and td? */
 140	bool has_rules[2];
 
 
 
 
 
 
 141
 142	/* internally used bytes per second rate limits */
 143	uint64_t bps[2][LIMIT_CNT];
 144	/* user configured bps limits */
 145	uint64_t bps_conf[2][LIMIT_CNT];
 146
 147	/* internally used IOPS limits */
 148	unsigned int iops[2][LIMIT_CNT];
 149	/* user configured IOPS limits */
 150	unsigned int iops_conf[2][LIMIT_CNT];
 151
 152	/* Number of bytes disptached in current slice */
 153	uint64_t bytes_disp[2];
 154	/* Number of bio's dispatched in current slice */
 155	unsigned int io_disp[2];
 156
 157	unsigned long last_low_overflow_time[2];
 158
 159	uint64_t last_bytes_disp[2];
 160	unsigned int last_io_disp[2];
 161
 162	unsigned long last_check_time;
 163
 164	unsigned long latency_target; /* us */
 165	unsigned long latency_target_conf; /* us */
 166	/* When did we start a new slice */
 167	unsigned long slice_start[2];
 168	unsigned long slice_end[2];
 169
 170	unsigned long last_finish_time; /* ns / 1024 */
 171	unsigned long checked_last_finish_time; /* ns / 1024 */
 172	unsigned long avg_idletime; /* ns / 1024 */
 173	unsigned long idletime_threshold; /* us */
 174	unsigned long idletime_threshold_conf; /* us */
 175
 176	unsigned int bio_cnt; /* total bios */
 177	unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
 178	unsigned long bio_cnt_reset_time;
 179};
 180
 181/* We measure latency for request size from <= 4k to >= 1M */
 182#define LATENCY_BUCKET_SIZE 9
 183
 184struct latency_bucket {
 185	unsigned long total_latency; /* ns / 1024 */
 186	int samples;
 187};
 188
 189struct avg_latency_bucket {
 190	unsigned long latency; /* ns / 1024 */
 191	bool valid;
 192};
 193
 194struct throtl_data
 195{
 196	/* service tree for active throtl groups */
 197	struct throtl_service_queue service_queue;
 198
 199	struct request_queue *queue;
 200
 201	/* Total Number of queued bios on READ and WRITE lists */
 202	unsigned int nr_queued[2];
 203
 204	unsigned int throtl_slice;
 
 
 
 205
 206	/* Work for dispatching throttled bios */
 207	struct work_struct dispatch_work;
 208	unsigned int limit_index;
 209	bool limit_valid[LIMIT_CNT];
 210
 211	unsigned long low_upgrade_time;
 212	unsigned long low_downgrade_time;
 213
 214	unsigned int scale;
 215
 216	struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
 217	struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
 218	struct latency_bucket __percpu *latency_buckets[2];
 219	unsigned long last_calculate_time;
 220	unsigned long filtered_latency;
 221
 222	bool track_bio_latency;
 223};
 224
 225static void throtl_pending_timer_fn(struct timer_list *t);
 
 
 
 
 
 226
 227static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
 228{
 229	return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
 230}
 231
 232static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
 233{
 234	return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
 235}
 236
 237static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
 238{
 239	return pd_to_blkg(&tg->pd);
 240}
 241
 242/**
 243 * sq_to_tg - return the throl_grp the specified service queue belongs to
 244 * @sq: the throtl_service_queue of interest
 245 *
 246 * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
 247 * embedded in throtl_data, %NULL is returned.
 248 */
 249static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
 250{
 251	if (sq && sq->parent_sq)
 252		return container_of(sq, struct throtl_grp, service_queue);
 253	else
 254		return NULL;
 255}
 256
 257/**
 258 * sq_to_td - return throtl_data the specified service queue belongs to
 259 * @sq: the throtl_service_queue of interest
 260 *
 261 * A service_queue can be embedded in either a throtl_grp or throtl_data.
 262 * Determine the associated throtl_data accordingly and return it.
 263 */
 264static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
 265{
 266	struct throtl_grp *tg = sq_to_tg(sq);
 267
 268	if (tg)
 269		return tg->td;
 270	else
 271		return container_of(sq, struct throtl_data, service_queue);
 272}
 273
 274/*
 275 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
 276 * make the IO dispatch more smooth.
 277 * Scale up: linearly scale up according to lapsed time since upgrade. For
 278 *           every throtl_slice, the limit scales up 1/2 .low limit till the
 279 *           limit hits .max limit
 280 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
 281 */
 282static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
 283{
 284	/* arbitrary value to avoid too big scale */
 285	if (td->scale < 4096 && time_after_eq(jiffies,
 286	    td->low_upgrade_time + td->scale * td->throtl_slice))
 287		td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
 288
 289	return low + (low >> 1) * td->scale;
 
 
 
 
 
 
 
 
 
 
 
 290}
 291
 292static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
 293{
 294	struct blkcg_gq *blkg = tg_to_blkg(tg);
 295	struct throtl_data *td;
 296	uint64_t ret;
 297
 298	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
 299		return U64_MAX;
 300
 301	td = tg->td;
 302	ret = tg->bps[rw][td->limit_index];
 303	if (ret == 0 && td->limit_index == LIMIT_LOW) {
 304		/* intermediate node or iops isn't 0 */
 305		if (!list_empty(&blkg->blkcg->css.children) ||
 306		    tg->iops[rw][td->limit_index])
 307			return U64_MAX;
 308		else
 309			return MIN_THROTL_BPS;
 310	}
 311
 312	if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
 313	    tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
 314		uint64_t adjusted;
 315
 316		adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
 317		ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
 318	}
 319	return ret;
 320}
 321
 322static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
 323{
 324	struct blkcg_gq *blkg = tg_to_blkg(tg);
 325	struct throtl_data *td;
 326	unsigned int ret;
 327
 328	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
 329		return UINT_MAX;
 330
 331	td = tg->td;
 332	ret = tg->iops[rw][td->limit_index];
 333	if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
 334		/* intermediate node or bps isn't 0 */
 335		if (!list_empty(&blkg->blkcg->css.children) ||
 336		    tg->bps[rw][td->limit_index])
 337			return UINT_MAX;
 338		else
 339			return MIN_THROTL_IOPS;
 340	}
 341
 342	if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
 343	    tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
 344		uint64_t adjusted;
 345
 346		adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
 347		if (adjusted > UINT_MAX)
 348			adjusted = UINT_MAX;
 349		ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
 350	}
 351	return ret;
 352}
 353
 354#define request_bucket_index(sectors) \
 355	clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
 356
 357/**
 358 * throtl_log - log debug message via blktrace
 359 * @sq: the service_queue being reported
 360 * @fmt: printf format string
 361 * @args: printf args
 362 *
 363 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
 364 * throtl_grp; otherwise, just "throtl".
 365 */
 366#define throtl_log(sq, fmt, args...)	do {				\
 367	struct throtl_grp *__tg = sq_to_tg((sq));			\
 368	struct throtl_data *__td = sq_to_td((sq));			\
 369									\
 370	(void)__td;							\
 371	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
 372		break;							\
 373	if ((__tg)) {							\
 374		blk_add_cgroup_trace_msg(__td->queue,			\
 375			tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
 376	} else {							\
 377		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
 378	}								\
 379} while (0)
 380
 381static inline unsigned int throtl_bio_data_size(struct bio *bio)
 382{
 383	/* assume it's one sector */
 384	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
 385		return 512;
 386	return bio->bi_iter.bi_size;
 387}
 388
 389static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
 390{
 391	INIT_LIST_HEAD(&qn->node);
 392	bio_list_init(&qn->bios);
 393	qn->tg = tg;
 394}
 395
 396/**
 397 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
 398 * @bio: bio being added
 399 * @qn: qnode to add bio to
 400 * @queued: the service_queue->queued[] list @qn belongs to
 401 *
 402 * Add @bio to @qn and put @qn on @queued if it's not already on.
 403 * @qn->tg's reference count is bumped when @qn is activated.  See the
 404 * comment on top of throtl_qnode definition for details.
 405 */
 406static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
 407				 struct list_head *queued)
 408{
 409	bio_list_add(&qn->bios, bio);
 410	if (list_empty(&qn->node)) {
 411		list_add_tail(&qn->node, queued);
 412		blkg_get(tg_to_blkg(qn->tg));
 
 
 
 
 
 
 
 
 
 413	}
 414}
 415
 416/**
 417 * throtl_peek_queued - peek the first bio on a qnode list
 418 * @queued: the qnode list to peek
 419 */
 420static struct bio *throtl_peek_queued(struct list_head *queued)
 421{
 422	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
 423	struct bio *bio;
 424
 425	if (list_empty(queued))
 426		return NULL;
 
 
 
 
 
 427
 428	bio = bio_list_peek(&qn->bios);
 429	WARN_ON_ONCE(!bio);
 430	return bio;
 
 431}
 432
 433/**
 434 * throtl_pop_queued - pop the first bio form a qnode list
 435 * @queued: the qnode list to pop a bio from
 436 * @tg_to_put: optional out argument for throtl_grp to put
 437 *
 438 * Pop the first bio from the qnode list @queued.  After popping, the first
 439 * qnode is removed from @queued if empty or moved to the end of @queued so
 440 * that the popping order is round-robin.
 441 *
 442 * When the first qnode is removed, its associated throtl_grp should be put
 443 * too.  If @tg_to_put is NULL, this function automatically puts it;
 444 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
 445 * responsible for putting it.
 446 */
 447static struct bio *throtl_pop_queued(struct list_head *queued,
 448				     struct throtl_grp **tg_to_put)
 449{
 450	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
 451	struct bio *bio;
 452
 453	if (list_empty(queued))
 454		return NULL;
 
 
 455
 456	bio = bio_list_pop(&qn->bios);
 457	WARN_ON_ONCE(!bio);
 
 
 458
 459	if (bio_list_empty(&qn->bios)) {
 460		list_del_init(&qn->node);
 461		if (tg_to_put)
 462			*tg_to_put = qn->tg;
 463		else
 464			blkg_put(tg_to_blkg(qn->tg));
 465	} else {
 466		list_move_tail(&qn->node, queued);
 467	}
 468
 469	return bio;
 470}
 471
 472/* init a service_queue, assumes the caller zeroed it */
 473static void throtl_service_queue_init(struct throtl_service_queue *sq)
 474{
 475	INIT_LIST_HEAD(&sq->queued[0]);
 476	INIT_LIST_HEAD(&sq->queued[1]);
 477	sq->pending_tree = RB_ROOT_CACHED;
 478	timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
 
 
 
 
 479}
 480
 481static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
 482						struct request_queue *q,
 483						struct blkcg *blkcg)
 484{
 485	struct throtl_grp *tg;
 486	int rw;
 487
 488	tg = kzalloc_node(sizeof(*tg), gfp, q->node);
 489	if (!tg)
 490		return NULL;
 491
 492	throtl_service_queue_init(&tg->service_queue);
 
 493
 494	for (rw = READ; rw <= WRITE; rw++) {
 495		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
 496		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
 497	}
 498
 499	RB_CLEAR_NODE(&tg->rb_node);
 500	tg->bps[READ][LIMIT_MAX] = U64_MAX;
 501	tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
 502	tg->iops[READ][LIMIT_MAX] = UINT_MAX;
 503	tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
 504	tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
 505	tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
 506	tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
 507	tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
 508	/* LIMIT_LOW will have default value 0 */
 509
 510	tg->latency_target = DFL_LATENCY_TARGET;
 511	tg->latency_target_conf = DFL_LATENCY_TARGET;
 512	tg->idletime_threshold = DFL_IDLE_THRESHOLD;
 513	tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
 514
 515	return &tg->pd;
 516}
 517
 518static void throtl_pd_init(struct blkg_policy_data *pd)
 
 519{
 520	struct throtl_grp *tg = pd_to_tg(pd);
 521	struct blkcg_gq *blkg = tg_to_blkg(tg);
 522	struct throtl_data *td = blkg->q->td;
 523	struct throtl_service_queue *sq = &tg->service_queue;
 524
 525	/*
 526	 * If on the default hierarchy, we switch to properly hierarchical
 527	 * behavior where limits on a given throtl_grp are applied to the
 528	 * whole subtree rather than just the group itself.  e.g. If 16M
 529	 * read_bps limit is set on the root group, the whole system can't
 530	 * exceed 16M for the device.
 531	 *
 532	 * If not on the default hierarchy, the broken flat hierarchy
 533	 * behavior is retained where all throtl_grps are treated as if
 534	 * they're all separate root groups right below throtl_data.
 535	 * Limits of a group don't interact with limits of other groups
 536	 * regardless of the position of the group in the hierarchy.
 537	 */
 538	sq->parent_sq = &td->service_queue;
 539	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
 540		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
 541	tg->td = td;
 542}
 543
 544/*
 545 * Set has_rules[] if @tg or any of its parents have limits configured.
 546 * This doesn't require walking up to the top of the hierarchy as the
 547 * parent's has_rules[] is guaranteed to be correct.
 548 */
 549static void tg_update_has_rules(struct throtl_grp *tg)
 550{
 551	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
 552	struct throtl_data *td = tg->td;
 553	int rw;
 554
 555	for (rw = READ; rw <= WRITE; rw++)
 556		tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
 557			(td->limit_valid[td->limit_index] &&
 558			 (tg_bps_limit(tg, rw) != U64_MAX ||
 559			  tg_iops_limit(tg, rw) != UINT_MAX));
 560}
 561
 562static void throtl_pd_online(struct blkg_policy_data *pd)
 563{
 564	struct throtl_grp *tg = pd_to_tg(pd);
 565	/*
 566	 * We don't want new groups to escape the limits of its ancestors.
 567	 * Update has_rules[] after a new group is brought online.
 568	 */
 569	tg_update_has_rules(tg);
 570}
 
 
 571
 572static void blk_throtl_update_limit_valid(struct throtl_data *td)
 573{
 574	struct cgroup_subsys_state *pos_css;
 575	struct blkcg_gq *blkg;
 576	bool low_valid = false;
 577
 578	rcu_read_lock();
 579	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
 580		struct throtl_grp *tg = blkg_to_tg(blkg);
 581
 582		if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
 583		    tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
 584			low_valid = true;
 585			break;
 586		}
 587	}
 588	rcu_read_unlock();
 589
 590	td->limit_valid[LIMIT_LOW] = low_valid;
 591}
 592
 593static void throtl_upgrade_state(struct throtl_data *td);
 594static void throtl_pd_offline(struct blkg_policy_data *pd)
 595{
 596	struct throtl_grp *tg = pd_to_tg(pd);
 
 
 597
 598	tg->bps[READ][LIMIT_LOW] = 0;
 599	tg->bps[WRITE][LIMIT_LOW] = 0;
 600	tg->iops[READ][LIMIT_LOW] = 0;
 601	tg->iops[WRITE][LIMIT_LOW] = 0;
 602
 603	blk_throtl_update_limit_valid(tg->td);
 
 604
 605	if (!tg->td->limit_valid[tg->td->limit_index])
 606		throtl_upgrade_state(tg->td);
 607}
 608
 609static void throtl_pd_free(struct blkg_policy_data *pd)
 610{
 611	struct throtl_grp *tg = pd_to_tg(pd);
 612
 613	del_timer_sync(&tg->service_queue.pending_timer);
 614	kfree(tg);
 615}
 616
 617static struct throtl_grp *
 618throtl_rb_first(struct throtl_service_queue *parent_sq)
 619{
 620	struct rb_node *n;
 621	/* Service tree is empty */
 622	if (!parent_sq->nr_pending)
 623		return NULL;
 624
 625	n = rb_first_cached(&parent_sq->pending_tree);
 626	WARN_ON_ONCE(!n);
 627	if (!n)
 628		return NULL;
 629	return rb_entry_tg(n);
 630}
 631
 632static void throtl_rb_erase(struct rb_node *n,
 633			    struct throtl_service_queue *parent_sq)
 634{
 635	rb_erase_cached(n, &parent_sq->pending_tree);
 636	RB_CLEAR_NODE(n);
 637	--parent_sq->nr_pending;
 
 638}
 639
 640static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
 641{
 642	struct throtl_grp *tg;
 643
 644	tg = throtl_rb_first(parent_sq);
 645	if (!tg)
 646		return;
 647
 648	parent_sq->first_pending_disptime = tg->disptime;
 649}
 650
 651static void tg_service_queue_add(struct throtl_grp *tg)
 
 652{
 653	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
 654	struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
 655	struct rb_node *parent = NULL;
 656	struct throtl_grp *__tg;
 657	unsigned long key = tg->disptime;
 658	bool leftmost = true;
 659
 660	while (*node != NULL) {
 661		parent = *node;
 662		__tg = rb_entry_tg(parent);
 663
 664		if (time_before(key, __tg->disptime))
 665			node = &parent->rb_left;
 666		else {
 667			node = &parent->rb_right;
 668			leftmost = false;
 669		}
 670	}
 671
 
 
 
 672	rb_link_node(&tg->rb_node, parent, node);
 673	rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
 674			       leftmost);
 675}
 676
 677static void __throtl_enqueue_tg(struct throtl_grp *tg)
 678{
 679	tg_service_queue_add(tg);
 680	tg->flags |= THROTL_TG_PENDING;
 681	tg->service_queue.parent_sq->nr_pending++;
 
 
 682}
 683
 684static void throtl_enqueue_tg(struct throtl_grp *tg)
 685{
 686	if (!(tg->flags & THROTL_TG_PENDING))
 687		__throtl_enqueue_tg(tg);
 688}
 689
 690static void __throtl_dequeue_tg(struct throtl_grp *tg)
 691{
 692	throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
 693	tg->flags &= ~THROTL_TG_PENDING;
 694}
 695
 696static void throtl_dequeue_tg(struct throtl_grp *tg)
 697{
 698	if (tg->flags & THROTL_TG_PENDING)
 699		__throtl_dequeue_tg(tg);
 700}
 701
 702/* Call with queue lock held */
 703static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
 704					  unsigned long expires)
 705{
 706	unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
 707
 708	/*
 709	 * Since we are adjusting the throttle limit dynamically, the sleep
 710	 * time calculated according to previous limit might be invalid. It's
 711	 * possible the cgroup sleep time is very long and no other cgroups
 712	 * have IO running so notify the limit changes. Make sure the cgroup
 713	 * doesn't sleep too long to avoid the missed notification.
 714	 */
 715	if (time_after(expires, max_expire))
 716		expires = max_expire;
 717	mod_timer(&sq->pending_timer, expires);
 718	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
 719		   expires - jiffies, jiffies);
 720}
 721
 722/**
 723 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
 724 * @sq: the service_queue to schedule dispatch for
 725 * @force: force scheduling
 726 *
 727 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
 728 * dispatch time of the first pending child.  Returns %true if either timer
 729 * is armed or there's no pending child left.  %false if the current
 730 * dispatch window is still open and the caller should continue
 731 * dispatching.
 732 *
 733 * If @force is %true, the dispatch timer is always scheduled and this
 734 * function is guaranteed to return %true.  This is to be used when the
 735 * caller can't dispatch itself and needs to invoke pending_timer
 736 * unconditionally.  Note that forced scheduling is likely to induce short
 737 * delay before dispatch starts even if @sq->first_pending_disptime is not
 738 * in the future and thus shouldn't be used in hot paths.
 739 */
 740static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
 741					  bool force)
 742{
 743	/* any pending children left? */
 744	if (!sq->nr_pending)
 745		return true;
 746
 747	update_min_dispatch_time(sq);
 748
 749	/* is the next dispatch time in the future? */
 750	if (force || time_after(sq->first_pending_disptime, jiffies)) {
 751		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
 752		return true;
 753	}
 754
 755	/* tell the caller to continue dispatching */
 756	return false;
 757}
 758
 759static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
 760		bool rw, unsigned long start)
 761{
 762	tg->bytes_disp[rw] = 0;
 763	tg->io_disp[rw] = 0;
 764
 765	/*
 766	 * Previous slice has expired. We must have trimmed it after last
 767	 * bio dispatch. That means since start of last slice, we never used
 768	 * that bandwidth. Do try to make use of that bandwidth while giving
 769	 * credit.
 770	 */
 771	if (time_after_eq(start, tg->slice_start[rw]))
 772		tg->slice_start[rw] = start;
 773
 774	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
 775	throtl_log(&tg->service_queue,
 776		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
 777		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 778		   tg->slice_end[rw], jiffies);
 779}
 780
 781static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
 782{
 783	tg->bytes_disp[rw] = 0;
 784	tg->io_disp[rw] = 0;
 785	tg->slice_start[rw] = jiffies;
 786	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
 787	throtl_log(&tg->service_queue,
 788		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
 789		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 790		   tg->slice_end[rw], jiffies);
 791}
 792
 793static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
 794					unsigned long jiffy_end)
 795{
 796	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
 797}
 798
 799static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
 800				       unsigned long jiffy_end)
 801{
 802	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
 803	throtl_log(&tg->service_queue,
 804		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
 805		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 806		   tg->slice_end[rw], jiffies);
 807}
 808
 809/* Determine if previously allocated or extended slice is complete or not */
 810static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
 
 811{
 812	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
 813		return false;
 814
 815	return true;
 816}
 817
 818/* Trim the used slices and adjust slice start accordingly */
 819static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
 
 820{
 821	unsigned long nr_slices, time_elapsed, io_trim;
 822	u64 bytes_trim, tmp;
 823
 824	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
 825
 826	/*
 827	 * If bps are unlimited (-1), then time slice don't get
 828	 * renewed. Don't try to trim the slice if slice is used. A new
 829	 * slice will start when appropriate.
 830	 */
 831	if (throtl_slice_used(tg, rw))
 832		return;
 833
 834	/*
 835	 * A bio has been dispatched. Also adjust slice_end. It might happen
 836	 * that initially cgroup limit was very low resulting in high
 837	 * slice_end, but later limit was bumped up and bio was dispached
 838	 * sooner, then we need to reduce slice_end. A high bogus slice_end
 839	 * is bad because it does not allow new slice to start.
 840	 */
 841
 842	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
 843
 844	time_elapsed = jiffies - tg->slice_start[rw];
 845
 846	nr_slices = time_elapsed / tg->td->throtl_slice;
 847
 848	if (!nr_slices)
 849		return;
 850	tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
 851	do_div(tmp, HZ);
 852	bytes_trim = tmp;
 853
 854	io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
 855		HZ;
 856
 857	if (!bytes_trim && !io_trim)
 858		return;
 859
 860	if (tg->bytes_disp[rw] >= bytes_trim)
 861		tg->bytes_disp[rw] -= bytes_trim;
 862	else
 863		tg->bytes_disp[rw] = 0;
 864
 865	if (tg->io_disp[rw] >= io_trim)
 866		tg->io_disp[rw] -= io_trim;
 867	else
 868		tg->io_disp[rw] = 0;
 869
 870	tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
 871
 872	throtl_log(&tg->service_queue,
 873		   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
 874		   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
 875		   tg->slice_start[rw], tg->slice_end[rw], jiffies);
 876}
 877
 878static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
 879				  unsigned long *wait)
 880{
 881	bool rw = bio_data_dir(bio);
 882	unsigned int io_allowed;
 883	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 884	u64 tmp;
 885
 886	jiffy_elapsed = jiffies - tg->slice_start[rw];
 
 
 
 
 887
 888	/* Round up to the next throttle slice, wait time must be nonzero */
 889	jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
 890
 891	/*
 892	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
 893	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
 894	 * will allow dispatch after 1 second and after that slice should
 895	 * have been trimmed.
 896	 */
 897
 898	tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
 899	do_div(tmp, HZ);
 900
 901	if (tmp > UINT_MAX)
 902		io_allowed = UINT_MAX;
 903	else
 904		io_allowed = tmp;
 905
 906	if (tg->io_disp[rw] + 1 <= io_allowed) {
 907		if (wait)
 908			*wait = 0;
 909		return true;
 910	}
 911
 912	/* Calc approx time to dispatch */
 913	jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
 
 
 
 
 
 914
 915	if (wait)
 916		*wait = jiffy_wait;
 917	return false;
 918}
 919
 920static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
 921				 unsigned long *wait)
 922{
 923	bool rw = bio_data_dir(bio);
 924	u64 bytes_allowed, extra_bytes, tmp;
 925	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 926	unsigned int bio_size = throtl_bio_data_size(bio);
 927
 928	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 929
 930	/* Slice has just started. Consider one slice interval */
 931	if (!jiffy_elapsed)
 932		jiffy_elapsed_rnd = tg->td->throtl_slice;
 933
 934	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
 935
 936	tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
 937	do_div(tmp, HZ);
 938	bytes_allowed = tmp;
 939
 940	if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
 941		if (wait)
 942			*wait = 0;
 943		return true;
 944	}
 945
 946	/* Calc approx time to dispatch */
 947	extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
 948	jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
 949
 950	if (!jiffy_wait)
 951		jiffy_wait = 1;
 952
 953	/*
 954	 * This wait time is without taking into consideration the rounding
 955	 * up we did. Add that time also.
 956	 */
 957	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
 958	if (wait)
 959		*wait = jiffy_wait;
 960	return false;
 
 
 
 
 
 
 961}
 962
 963/*
 964 * Returns whether one can dispatch a bio or not. Also returns approx number
 965 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
 966 */
 967static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 968			    unsigned long *wait)
 969{
 970	bool rw = bio_data_dir(bio);
 971	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
 972
 973	/*
 974 	 * Currently whole state machine of group depends on first bio
 975	 * queued in the group bio list. So one should not be calling
 976	 * this function with a different bio if there are other bios
 977	 * queued.
 978	 */
 979	BUG_ON(tg->service_queue.nr_queued[rw] &&
 980	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
 981
 982	/* If tg->bps = -1, then BW is unlimited */
 983	if (tg_bps_limit(tg, rw) == U64_MAX &&
 984	    tg_iops_limit(tg, rw) == UINT_MAX) {
 985		if (wait)
 986			*wait = 0;
 987		return true;
 988	}
 989
 990	/*
 991	 * If previous slice expired, start a new one otherwise renew/extend
 992	 * existing slice to make sure it is at least throtl_slice interval
 993	 * long since now. New slice is started only for empty throttle group.
 994	 * If there is queued bio, that means there should be an active
 995	 * slice and it should be extended instead.
 996	 */
 997	if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
 998		throtl_start_new_slice(tg, rw);
 999	else {
1000		if (time_before(tg->slice_end[rw],
1001		    jiffies + tg->td->throtl_slice))
1002			throtl_extend_slice(tg, rw,
1003				jiffies + tg->td->throtl_slice);
1004	}
1005
1006	if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1007	    tg_with_in_iops_limit(tg, bio, &iops_wait)) {
1008		if (wait)
1009			*wait = 0;
1010		return true;
1011	}
1012
1013	max_wait = max(bps_wait, iops_wait);
1014
1015	if (wait)
1016		*wait = max_wait;
1017
1018	if (time_before(tg->slice_end[rw], jiffies + max_wait))
1019		throtl_extend_slice(tg, rw, jiffies + max_wait);
1020
1021	return false;
1022}
1023
1024static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 
1025{
1026	bool rw = bio_data_dir(bio);
1027	unsigned int bio_size = throtl_bio_data_size(bio);
 
1028
1029	/* Charge the bio to the group */
1030	tg->bytes_disp[rw] += bio_size;
1031	tg->io_disp[rw]++;
1032	tg->last_bytes_disp[rw] += bio_size;
1033	tg->last_io_disp[rw]++;
1034
1035	/*
1036	 * BIO_THROTTLED is used to prevent the same bio to be throttled
1037	 * more than once as a throttled bio will go through blk-throtl the
1038	 * second time when it eventually gets issued.  Set it when a bio
1039	 * is being charged to a tg.
1040	 */
1041	if (!bio_flagged(bio, BIO_THROTTLED))
1042		bio_set_flag(bio, BIO_THROTTLED);
 
 
 
 
 
 
1043}
1044
1045/**
1046 * throtl_add_bio_tg - add a bio to the specified throtl_grp
1047 * @bio: bio to add
1048 * @qn: qnode to use
1049 * @tg: the target throtl_grp
1050 *
1051 * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
1052 * tg->qnode_on_self[] is used.
1053 */
1054static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1055			      struct throtl_grp *tg)
1056{
1057	struct throtl_service_queue *sq = &tg->service_queue;
1058	bool rw = bio_data_dir(bio);
1059
1060	if (!qn)
1061		qn = &tg->qnode_on_self[rw];
 
1062
1063	/*
1064	 * If @tg doesn't currently have any bios queued in the same
1065	 * direction, queueing @bio can change when @tg should be
1066	 * dispatched.  Mark that @tg was empty.  This is automatically
1067	 * cleaered on the next tg_update_disptime().
1068	 */
1069	if (!sq->nr_queued[rw])
1070		tg->flags |= THROTL_TG_WAS_EMPTY;
1071
1072	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
 
 
 
1073
1074	sq->nr_queued[rw]++;
1075	throtl_enqueue_tg(tg);
 
 
 
 
1076}
1077
1078static void tg_update_disptime(struct throtl_grp *tg)
1079{
1080	struct throtl_service_queue *sq = &tg->service_queue;
1081	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1082	struct bio *bio;
1083
1084	bio = throtl_peek_queued(&sq->queued[READ]);
1085	if (bio)
1086		tg_may_dispatch(tg, bio, &read_wait);
1087
1088	bio = throtl_peek_queued(&sq->queued[WRITE]);
1089	if (bio)
1090		tg_may_dispatch(tg, bio, &write_wait);
1091
1092	min_wait = min(read_wait, write_wait);
1093	disptime = jiffies + min_wait;
1094
1095	/* Update dispatch time */
1096	throtl_dequeue_tg(tg);
1097	tg->disptime = disptime;
1098	throtl_enqueue_tg(tg);
1099
1100	/* see throtl_add_bio_tg() */
1101	tg->flags &= ~THROTL_TG_WAS_EMPTY;
1102}
1103
1104static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1105					struct throtl_grp *parent_tg, bool rw)
1106{
1107	if (throtl_slice_used(parent_tg, rw)) {
1108		throtl_start_new_slice_with_credit(parent_tg, rw,
1109				child_tg->slice_start[rw]);
1110	}
1111
1112}
 
 
 
1113
1114static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1115{
1116	struct throtl_service_queue *sq = &tg->service_queue;
1117	struct throtl_service_queue *parent_sq = sq->parent_sq;
1118	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1119	struct throtl_grp *tg_to_put = NULL;
1120	struct bio *bio;
1121
1122	/*
1123	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1124	 * from @tg may put its reference and @parent_sq might end up
1125	 * getting released prematurely.  Remember the tg to put and put it
1126	 * after @bio is transferred to @parent_sq.
1127	 */
1128	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1129	sq->nr_queued[rw]--;
1130
1131	throtl_charge_bio(tg, bio);
 
 
1132
1133	/*
1134	 * If our parent is another tg, we just need to transfer @bio to
1135	 * the parent using throtl_add_bio_tg().  If our parent is
1136	 * @td->service_queue, @bio is ready to be issued.  Put it on its
1137	 * bio_lists[] and decrease total number queued.  The caller is
1138	 * responsible for issuing these bios.
1139	 */
1140	if (parent_tg) {
1141		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1142		start_parent_slice_with_credit(tg, parent_tg, rw);
1143	} else {
1144		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1145				     &parent_sq->queued[rw]);
1146		BUG_ON(tg->td->nr_queued[rw] <= 0);
1147		tg->td->nr_queued[rw]--;
1148	}
1149
1150	throtl_trim_slice(tg, rw);
1151
1152	if (tg_to_put)
1153		blkg_put(tg_to_blkg(tg_to_put));
1154}
1155
1156static int throtl_dispatch_tg(struct throtl_grp *tg)
 
1157{
1158	struct throtl_service_queue *sq = &tg->service_queue;
1159	unsigned int nr_reads = 0, nr_writes = 0;
1160	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
1161	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
1162	struct bio *bio;
1163
1164	/* Try to dispatch 75% READS and 25% WRITES */
1165
1166	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1167	       tg_may_dispatch(tg, bio, NULL)) {
1168
1169		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1170		nr_reads++;
1171
1172		if (nr_reads >= max_nr_reads)
1173			break;
1174	}
1175
1176	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1177	       tg_may_dispatch(tg, bio, NULL)) {
1178
1179		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1180		nr_writes++;
1181
1182		if (nr_writes >= max_nr_writes)
1183			break;
1184	}
1185
1186	return nr_reads + nr_writes;
1187}
1188
1189static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1190{
1191	unsigned int nr_disp = 0;
 
 
1192
1193	while (1) {
1194		struct throtl_grp *tg = throtl_rb_first(parent_sq);
1195		struct throtl_service_queue *sq;
1196
1197		if (!tg)
1198			break;
1199
1200		if (time_before(jiffies, tg->disptime))
1201			break;
1202
1203		throtl_dequeue_tg(tg);
1204
1205		nr_disp += throtl_dispatch_tg(tg);
1206
1207		sq = &tg->service_queue;
1208		if (sq->nr_queued[0] || sq->nr_queued[1])
1209			tg_update_disptime(tg);
 
1210
1211		if (nr_disp >= throtl_quantum)
1212			break;
1213	}
1214
1215	return nr_disp;
1216}
1217
1218static bool throtl_can_upgrade(struct throtl_data *td,
1219	struct throtl_grp *this_tg);
1220/**
1221 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1222 * @t: the pending_timer member of the throtl_service_queue being serviced
1223 *
1224 * This timer is armed when a child throtl_grp with active bio's become
1225 * pending and queued on the service_queue's pending_tree and expires when
1226 * the first child throtl_grp should be dispatched.  This function
1227 * dispatches bio's from the children throtl_grps to the parent
1228 * service_queue.
1229 *
1230 * If the parent's parent is another throtl_grp, dispatching is propagated
1231 * by either arming its pending_timer or repeating dispatch directly.  If
1232 * the top-level service_tree is reached, throtl_data->dispatch_work is
1233 * kicked so that the ready bio's are issued.
1234 */
1235static void throtl_pending_timer_fn(struct timer_list *t)
1236{
1237	struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1238	struct throtl_grp *tg = sq_to_tg(sq);
1239	struct throtl_data *td = sq_to_td(sq);
1240	struct request_queue *q = td->queue;
1241	struct throtl_service_queue *parent_sq;
1242	bool dispatched;
1243	int ret;
 
 
 
 
 
 
 
 
1244
1245	spin_lock_irq(&q->queue_lock);
1246	if (throtl_can_upgrade(td, NULL))
1247		throtl_upgrade_state(td);
1248
1249again:
1250	parent_sq = sq->parent_sq;
1251	dispatched = false;
1252
1253	while (true) {
1254		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1255			   sq->nr_queued[READ] + sq->nr_queued[WRITE],
1256			   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1257
1258		ret = throtl_select_dispatch(sq);
1259		if (ret) {
1260			throtl_log(sq, "bios disp=%u", ret);
1261			dispatched = true;
1262		}
1263
1264		if (throtl_schedule_next_dispatch(sq, false))
1265			break;
1266
1267		/* this dispatch windows is still open, relax and repeat */
1268		spin_unlock_irq(&q->queue_lock);
1269		cpu_relax();
1270		spin_lock_irq(&q->queue_lock);
1271	}
1272
1273	if (!dispatched)
1274		goto out_unlock;
 
 
 
 
 
 
1275
1276	if (parent_sq) {
1277		/* @parent_sq is another throl_grp, propagate dispatch */
1278		if (tg->flags & THROTL_TG_WAS_EMPTY) {
1279			tg_update_disptime(tg);
1280			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1281				/* window is already open, repeat dispatching */
1282				sq = parent_sq;
1283				tg = sq_to_tg(sq);
1284				goto again;
1285			}
1286		}
1287	} else {
1288		/* reached the top-level, queue issueing */
1289		queue_work(kthrotld_workqueue, &td->dispatch_work);
1290	}
1291out_unlock:
1292	spin_unlock_irq(&q->queue_lock);
1293}
1294
1295/**
1296 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1297 * @work: work item being executed
1298 *
1299 * This function is queued for execution when bio's reach the bio_lists[]
1300 * of throtl_data->service_queue.  Those bio's are ready and issued by this
1301 * function.
1302 */
1303static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1304{
1305	struct throtl_data *td = container_of(work, struct throtl_data,
1306					      dispatch_work);
1307	struct throtl_service_queue *td_sq = &td->service_queue;
1308	struct request_queue *q = td->queue;
1309	struct bio_list bio_list_on_stack;
1310	struct bio *bio;
1311	struct blk_plug plug;
1312	int rw;
 
 
 
 
 
 
1313
1314	bio_list_init(&bio_list_on_stack);
1315
1316	spin_lock_irq(&q->queue_lock);
1317	for (rw = READ; rw <= WRITE; rw++)
1318		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1319			bio_list_add(&bio_list_on_stack, bio);
1320	spin_unlock_irq(&q->queue_lock);
 
 
 
 
 
 
 
1321
1322	if (!bio_list_empty(&bio_list_on_stack)) {
 
 
 
 
1323		blk_start_plug(&plug);
1324		while((bio = bio_list_pop(&bio_list_on_stack)))
1325			generic_make_request(bio);
1326		blk_finish_plug(&plug);
1327	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1328}
1329
1330static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1331			      int off)
1332{
1333	struct throtl_grp *tg = pd_to_tg(pd);
1334	u64 v = *(u64 *)((void *)tg + off);
1335
1336	if (v == U64_MAX)
1337		return 0;
1338	return __blkg_prfill_u64(sf, pd, v);
1339}
1340
1341static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1342			       int off)
1343{
1344	struct throtl_grp *tg = pd_to_tg(pd);
1345	unsigned int v = *(unsigned int *)((void *)tg + off);
1346
1347	if (v == UINT_MAX)
1348		return 0;
1349	return __blkg_prfill_u64(sf, pd, v);
1350}
1351
1352static int tg_print_conf_u64(struct seq_file *sf, void *v)
 
1353{
1354	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1355			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1356	return 0;
1357}
1358
1359static int tg_print_conf_uint(struct seq_file *sf, void *v)
 
1360{
1361	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1362			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1363	return 0;
1364}
1365
1366static void tg_conf_updated(struct throtl_grp *tg, bool global)
1367{
1368	struct throtl_service_queue *sq = &tg->service_queue;
1369	struct cgroup_subsys_state *pos_css;
1370	struct blkcg_gq *blkg;
1371
1372	throtl_log(&tg->service_queue,
1373		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1374		   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1375		   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1376
1377	/*
1378	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
1379	 * considered to have rules if either the tg itself or any of its
1380	 * ancestors has rules.  This identifies groups without any
1381	 * restrictions in the whole hierarchy and allows them to bypass
1382	 * blk-throttle.
1383	 */
1384	blkg_for_each_descendant_pre(blkg, pos_css,
1385			global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1386		struct throtl_grp *this_tg = blkg_to_tg(blkg);
1387		struct throtl_grp *parent_tg;
1388
1389		tg_update_has_rules(this_tg);
1390		/* ignore root/second level */
1391		if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1392		    !blkg->parent->parent)
1393			continue;
1394		parent_tg = blkg_to_tg(blkg->parent);
1395		/*
1396		 * make sure all children has lower idle time threshold and
1397		 * higher latency target
1398		 */
1399		this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1400				parent_tg->idletime_threshold);
1401		this_tg->latency_target = max(this_tg->latency_target,
1402				parent_tg->latency_target);
1403	}
1404
1405	/*
1406	 * We're already holding queue_lock and know @tg is valid.  Let's
1407	 * apply the new config directly.
1408	 *
1409	 * Restart the slices for both READ and WRITES. It might happen
1410	 * that a group's limit are dropped suddenly and we don't want to
1411	 * account recently dispatched IO with new low rate.
1412	 */
1413	throtl_start_new_slice(tg, 0);
1414	throtl_start_new_slice(tg, 1);
1415
1416	if (tg->flags & THROTL_TG_PENDING) {
1417		tg_update_disptime(tg);
1418		throtl_schedule_next_dispatch(sq->parent_sq, true);
1419	}
1420}
1421
1422static ssize_t tg_set_conf(struct kernfs_open_file *of,
1423			   char *buf, size_t nbytes, loff_t off, bool is_u64)
1424{
1425	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1426	struct blkg_conf_ctx ctx;
1427	struct throtl_grp *tg;
 
1428	int ret;
1429	u64 v;
1430
1431	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1432	if (ret)
1433		return ret;
1434
1435	ret = -EINVAL;
1436	if (sscanf(ctx.body, "%llu", &v) != 1)
1437		goto out_finish;
1438	if (!v)
1439		v = U64_MAX;
1440
1441	tg = blkg_to_tg(ctx.blkg);
 
1442
1443	if (is_u64)
1444		*(u64 *)((void *)tg + of_cft(of)->private) = v;
1445	else
1446		*(unsigned int *)((void *)tg + of_cft(of)->private) = v;
 
 
 
 
 
1447
1448	tg_conf_updated(tg, false);
1449	ret = 0;
1450out_finish:
1451	blkg_conf_finish(&ctx);
1452	return ret ?: nbytes;
1453}
1454
1455static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1456			       char *buf, size_t nbytes, loff_t off)
1457{
1458	return tg_set_conf(of, buf, nbytes, off, true);
1459}
1460
1461static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1462				char *buf, size_t nbytes, loff_t off)
1463{
1464	return tg_set_conf(of, buf, nbytes, off, false);
1465}
1466
1467static struct cftype throtl_legacy_files[] = {
1468	{
1469		.name = "throttle.read_bps_device",
1470		.private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1471		.seq_show = tg_print_conf_u64,
1472		.write = tg_set_conf_u64,
 
1473	},
1474	{
1475		.name = "throttle.write_bps_device",
1476		.private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1477		.seq_show = tg_print_conf_u64,
1478		.write = tg_set_conf_u64,
 
1479	},
1480	{
1481		.name = "throttle.read_iops_device",
1482		.private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1483		.seq_show = tg_print_conf_uint,
1484		.write = tg_set_conf_uint,
 
1485	},
1486	{
1487		.name = "throttle.write_iops_device",
1488		.private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1489		.seq_show = tg_print_conf_uint,
1490		.write = tg_set_conf_uint,
 
1491	},
1492	{
1493		.name = "throttle.io_service_bytes",
1494		.private = (unsigned long)&blkcg_policy_throtl,
1495		.seq_show = blkg_print_stat_bytes,
1496	},
1497	{
1498		.name = "throttle.io_service_bytes_recursive",
1499		.private = (unsigned long)&blkcg_policy_throtl,
1500		.seq_show = blkg_print_stat_bytes_recursive,
1501	},
1502	{
1503		.name = "throttle.io_serviced",
1504		.private = (unsigned long)&blkcg_policy_throtl,
1505		.seq_show = blkg_print_stat_ios,
1506	},
1507	{
1508		.name = "throttle.io_serviced_recursive",
1509		.private = (unsigned long)&blkcg_policy_throtl,
1510		.seq_show = blkg_print_stat_ios_recursive,
1511	},
1512	{ }	/* terminate */
1513};
1514
1515static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1516			 int off)
1517{
1518	struct throtl_grp *tg = pd_to_tg(pd);
1519	const char *dname = blkg_dev_name(pd->blkg);
1520	char bufs[4][21] = { "max", "max", "max", "max" };
1521	u64 bps_dft;
1522	unsigned int iops_dft;
1523	char idle_time[26] = "";
1524	char latency_time[26] = "";
1525
1526	if (!dname)
1527		return 0;
1528
1529	if (off == LIMIT_LOW) {
1530		bps_dft = 0;
1531		iops_dft = 0;
1532	} else {
1533		bps_dft = U64_MAX;
1534		iops_dft = UINT_MAX;
1535	}
1536
1537	if (tg->bps_conf[READ][off] == bps_dft &&
1538	    tg->bps_conf[WRITE][off] == bps_dft &&
1539	    tg->iops_conf[READ][off] == iops_dft &&
1540	    tg->iops_conf[WRITE][off] == iops_dft &&
1541	    (off != LIMIT_LOW ||
1542	     (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1543	      tg->latency_target_conf == DFL_LATENCY_TARGET)))
1544		return 0;
1545
1546	if (tg->bps_conf[READ][off] != U64_MAX)
1547		snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1548			tg->bps_conf[READ][off]);
1549	if (tg->bps_conf[WRITE][off] != U64_MAX)
1550		snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1551			tg->bps_conf[WRITE][off]);
1552	if (tg->iops_conf[READ][off] != UINT_MAX)
1553		snprintf(bufs[2], sizeof(bufs[2]), "%u",
1554			tg->iops_conf[READ][off]);
1555	if (tg->iops_conf[WRITE][off] != UINT_MAX)
1556		snprintf(bufs[3], sizeof(bufs[3]), "%u",
1557			tg->iops_conf[WRITE][off]);
1558	if (off == LIMIT_LOW) {
1559		if (tg->idletime_threshold_conf == ULONG_MAX)
1560			strcpy(idle_time, " idle=max");
1561		else
1562			snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1563				tg->idletime_threshold_conf);
1564
1565		if (tg->latency_target_conf == ULONG_MAX)
1566			strcpy(latency_time, " latency=max");
1567		else
1568			snprintf(latency_time, sizeof(latency_time),
1569				" latency=%lu", tg->latency_target_conf);
1570	}
1571
1572	seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1573		   dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1574		   latency_time);
1575	return 0;
1576}
1577
1578static int tg_print_limit(struct seq_file *sf, void *v)
1579{
1580	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1581			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1582	return 0;
1583}
1584
1585static ssize_t tg_set_limit(struct kernfs_open_file *of,
1586			  char *buf, size_t nbytes, loff_t off)
1587{
1588	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1589	struct blkg_conf_ctx ctx;
1590	struct throtl_grp *tg;
1591	u64 v[4];
1592	unsigned long idle_time;
1593	unsigned long latency_time;
1594	int ret;
1595	int index = of_cft(of)->private;
1596
1597	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1598	if (ret)
1599		return ret;
1600
1601	tg = blkg_to_tg(ctx.blkg);
1602
1603	v[0] = tg->bps_conf[READ][index];
1604	v[1] = tg->bps_conf[WRITE][index];
1605	v[2] = tg->iops_conf[READ][index];
1606	v[3] = tg->iops_conf[WRITE][index];
1607
1608	idle_time = tg->idletime_threshold_conf;
1609	latency_time = tg->latency_target_conf;
1610	while (true) {
1611		char tok[27];	/* wiops=18446744073709551616 */
1612		char *p;
1613		u64 val = U64_MAX;
1614		int len;
1615
1616		if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1617			break;
1618		if (tok[0] == '\0')
1619			break;
1620		ctx.body += len;
1621
1622		ret = -EINVAL;
1623		p = tok;
1624		strsep(&p, "=");
1625		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1626			goto out_finish;
1627
1628		ret = -ERANGE;
1629		if (!val)
1630			goto out_finish;
1631
1632		ret = -EINVAL;
1633		if (!strcmp(tok, "rbps"))
1634			v[0] = val;
1635		else if (!strcmp(tok, "wbps"))
1636			v[1] = val;
1637		else if (!strcmp(tok, "riops"))
1638			v[2] = min_t(u64, val, UINT_MAX);
1639		else if (!strcmp(tok, "wiops"))
1640			v[3] = min_t(u64, val, UINT_MAX);
1641		else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1642			idle_time = val;
1643		else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1644			latency_time = val;
1645		else
1646			goto out_finish;
1647	}
1648
1649	tg->bps_conf[READ][index] = v[0];
1650	tg->bps_conf[WRITE][index] = v[1];
1651	tg->iops_conf[READ][index] = v[2];
1652	tg->iops_conf[WRITE][index] = v[3];
1653
1654	if (index == LIMIT_MAX) {
1655		tg->bps[READ][index] = v[0];
1656		tg->bps[WRITE][index] = v[1];
1657		tg->iops[READ][index] = v[2];
1658		tg->iops[WRITE][index] = v[3];
1659	}
1660	tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1661		tg->bps_conf[READ][LIMIT_MAX]);
1662	tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1663		tg->bps_conf[WRITE][LIMIT_MAX]);
1664	tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1665		tg->iops_conf[READ][LIMIT_MAX]);
1666	tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1667		tg->iops_conf[WRITE][LIMIT_MAX]);
1668	tg->idletime_threshold_conf = idle_time;
1669	tg->latency_target_conf = latency_time;
1670
1671	/* force user to configure all settings for low limit  */
1672	if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1673	      tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1674	    tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1675	    tg->latency_target_conf == DFL_LATENCY_TARGET) {
1676		tg->bps[READ][LIMIT_LOW] = 0;
1677		tg->bps[WRITE][LIMIT_LOW] = 0;
1678		tg->iops[READ][LIMIT_LOW] = 0;
1679		tg->iops[WRITE][LIMIT_LOW] = 0;
1680		tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1681		tg->latency_target = DFL_LATENCY_TARGET;
1682	} else if (index == LIMIT_LOW) {
1683		tg->idletime_threshold = tg->idletime_threshold_conf;
1684		tg->latency_target = tg->latency_target_conf;
1685	}
1686
1687	blk_throtl_update_limit_valid(tg->td);
1688	if (tg->td->limit_valid[LIMIT_LOW]) {
1689		if (index == LIMIT_LOW)
1690			tg->td->limit_index = LIMIT_LOW;
1691	} else
1692		tg->td->limit_index = LIMIT_MAX;
1693	tg_conf_updated(tg, index == LIMIT_LOW &&
1694		tg->td->limit_valid[LIMIT_LOW]);
1695	ret = 0;
1696out_finish:
1697	blkg_conf_finish(&ctx);
1698	return ret ?: nbytes;
1699}
1700
1701static struct cftype throtl_files[] = {
1702#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1703	{
1704		.name = "low",
1705		.flags = CFTYPE_NOT_ON_ROOT,
1706		.seq_show = tg_print_limit,
1707		.write = tg_set_limit,
1708		.private = LIMIT_LOW,
1709	},
1710#endif
1711	{
1712		.name = "max",
1713		.flags = CFTYPE_NOT_ON_ROOT,
1714		.seq_show = tg_print_limit,
1715		.write = tg_set_limit,
1716		.private = LIMIT_MAX,
1717	},
1718	{ }	/* terminate */
1719};
1720
1721static void throtl_shutdown_wq(struct request_queue *q)
1722{
1723	struct throtl_data *td = q->td;
1724
1725	cancel_work_sync(&td->dispatch_work);
1726}
1727
1728static struct blkcg_policy blkcg_policy_throtl = {
1729	.dfl_cftypes		= throtl_files,
1730	.legacy_cftypes		= throtl_legacy_files,
1731
1732	.pd_alloc_fn		= throtl_pd_alloc,
1733	.pd_init_fn		= throtl_pd_init,
1734	.pd_online_fn		= throtl_pd_online,
1735	.pd_offline_fn		= throtl_pd_offline,
1736	.pd_free_fn		= throtl_pd_free,
1737};
1738
1739static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1740{
1741	unsigned long rtime = jiffies, wtime = jiffies;
 
 
 
 
1742
1743	if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1744		rtime = tg->last_low_overflow_time[READ];
1745	if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1746		wtime = tg->last_low_overflow_time[WRITE];
1747	return min(rtime, wtime);
1748}
1749
1750/* tg should not be an intermediate node */
1751static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1752{
1753	struct throtl_service_queue *parent_sq;
1754	struct throtl_grp *parent = tg;
1755	unsigned long ret = __tg_last_low_overflow_time(tg);
1756
1757	while (true) {
1758		parent_sq = parent->service_queue.parent_sq;
1759		parent = sq_to_tg(parent_sq);
1760		if (!parent)
1761			break;
1762
1763		/*
1764		 * The parent doesn't have low limit, it always reaches low
1765		 * limit. Its overflow time is useless for children
1766		 */
1767		if (!parent->bps[READ][LIMIT_LOW] &&
1768		    !parent->iops[READ][LIMIT_LOW] &&
1769		    !parent->bps[WRITE][LIMIT_LOW] &&
1770		    !parent->iops[WRITE][LIMIT_LOW])
1771			continue;
1772		if (time_after(__tg_last_low_overflow_time(parent), ret))
1773			ret = __tg_last_low_overflow_time(parent);
1774	}
1775	return ret;
1776}
1777
1778static bool throtl_tg_is_idle(struct throtl_grp *tg)
1779{
1780	/*
1781	 * cgroup is idle if:
1782	 * - single idle is too long, longer than a fixed value (in case user
1783	 *   configure a too big threshold) or 4 times of idletime threshold
1784	 * - average think time is more than threshold
1785	 * - IO latency is largely below threshold
1786	 */
1787	unsigned long time;
1788	bool ret;
1789
1790	time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1791	ret = tg->latency_target == DFL_LATENCY_TARGET ||
1792	      tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1793	      (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1794	      tg->avg_idletime > tg->idletime_threshold ||
1795	      (tg->latency_target && tg->bio_cnt &&
1796		tg->bad_bio_cnt * 5 < tg->bio_cnt);
1797	throtl_log(&tg->service_queue,
1798		"avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1799		tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1800		tg->bio_cnt, ret, tg->td->scale);
1801	return ret;
1802}
1803
1804static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1805{
1806	struct throtl_service_queue *sq = &tg->service_queue;
1807	bool read_limit, write_limit;
1808
1809	/*
1810	 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1811	 * reaches), it's ok to upgrade to next limit
 
1812	 */
1813	read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1814	write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1815	if (!read_limit && !write_limit)
1816		return true;
1817	if (read_limit && sq->nr_queued[READ] &&
1818	    (!write_limit || sq->nr_queued[WRITE]))
1819		return true;
1820	if (write_limit && sq->nr_queued[WRITE] &&
1821	    (!read_limit || sq->nr_queued[READ]))
1822		return true;
1823
1824	if (time_after_eq(jiffies,
1825		tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1826	    throtl_tg_is_idle(tg))
1827		return true;
1828	return false;
1829}
1830
1831static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1832{
1833	while (true) {
1834		if (throtl_tg_can_upgrade(tg))
1835			return true;
1836		tg = sq_to_tg(tg->service_queue.parent_sq);
1837		if (!tg || !tg_to_blkg(tg)->parent)
1838			return false;
1839	}
1840	return false;
1841}
1842
1843static bool throtl_can_upgrade(struct throtl_data *td,
1844	struct throtl_grp *this_tg)
1845{
1846	struct cgroup_subsys_state *pos_css;
1847	struct blkcg_gq *blkg;
1848
1849	if (td->limit_index != LIMIT_LOW)
1850		return false;
1851
1852	if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1853		return false;
1854
1855	rcu_read_lock();
1856	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1857		struct throtl_grp *tg = blkg_to_tg(blkg);
1858
1859		if (tg == this_tg)
1860			continue;
1861		if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1862			continue;
1863		if (!throtl_hierarchy_can_upgrade(tg)) {
1864			rcu_read_unlock();
1865			return false;
1866		}
1867	}
1868	rcu_read_unlock();
1869	return true;
1870}
1871
1872static void throtl_upgrade_check(struct throtl_grp *tg)
1873{
1874	unsigned long now = jiffies;
1875
1876	if (tg->td->limit_index != LIMIT_LOW)
1877		return;
1878
1879	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1880		return;
1881
1882	tg->last_check_time = now;
1883
1884	if (!time_after_eq(now,
1885	     __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1886		return;
1887
1888	if (throtl_can_upgrade(tg->td, NULL))
1889		throtl_upgrade_state(tg->td);
1890}
1891
1892static void throtl_upgrade_state(struct throtl_data *td)
1893{
1894	struct cgroup_subsys_state *pos_css;
1895	struct blkcg_gq *blkg;
1896
1897	throtl_log(&td->service_queue, "upgrade to max");
1898	td->limit_index = LIMIT_MAX;
1899	td->low_upgrade_time = jiffies;
1900	td->scale = 0;
1901	rcu_read_lock();
1902	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1903		struct throtl_grp *tg = blkg_to_tg(blkg);
1904		struct throtl_service_queue *sq = &tg->service_queue;
1905
1906		tg->disptime = jiffies - 1;
1907		throtl_select_dispatch(sq);
1908		throtl_schedule_next_dispatch(sq, true);
1909	}
1910	rcu_read_unlock();
1911	throtl_select_dispatch(&td->service_queue);
1912	throtl_schedule_next_dispatch(&td->service_queue, true);
1913	queue_work(kthrotld_workqueue, &td->dispatch_work);
1914}
1915
1916static void throtl_downgrade_state(struct throtl_data *td, int new)
1917{
1918	td->scale /= 2;
1919
1920	throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1921	if (td->scale) {
1922		td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1923		return;
1924	}
1925
1926	td->limit_index = new;
1927	td->low_downgrade_time = jiffies;
1928}
1929
1930static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1931{
1932	struct throtl_data *td = tg->td;
1933	unsigned long now = jiffies;
1934
1935	/*
1936	 * If cgroup is below low limit, consider downgrade and throttle other
1937	 * cgroups
1938	 */
1939	if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1940	    time_after_eq(now, tg_last_low_overflow_time(tg) +
1941					td->throtl_slice) &&
1942	    (!throtl_tg_is_idle(tg) ||
1943	     !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1944		return true;
1945	return false;
1946}
1947
1948static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1949{
1950	while (true) {
1951		if (!throtl_tg_can_downgrade(tg))
1952			return false;
1953		tg = sq_to_tg(tg->service_queue.parent_sq);
1954		if (!tg || !tg_to_blkg(tg)->parent)
1955			break;
1956	}
1957	return true;
1958}
1959
1960static void throtl_downgrade_check(struct throtl_grp *tg)
1961{
1962	uint64_t bps;
1963	unsigned int iops;
1964	unsigned long elapsed_time;
1965	unsigned long now = jiffies;
1966
1967	if (tg->td->limit_index != LIMIT_MAX ||
1968	    !tg->td->limit_valid[LIMIT_LOW])
1969		return;
1970	if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1971		return;
1972	if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1973		return;
1974
1975	elapsed_time = now - tg->last_check_time;
1976	tg->last_check_time = now;
1977
1978	if (time_before(now, tg_last_low_overflow_time(tg) +
1979			tg->td->throtl_slice))
1980		return;
1981
1982	if (tg->bps[READ][LIMIT_LOW]) {
1983		bps = tg->last_bytes_disp[READ] * HZ;
1984		do_div(bps, elapsed_time);
1985		if (bps >= tg->bps[READ][LIMIT_LOW])
1986			tg->last_low_overflow_time[READ] = now;
1987	}
1988
1989	if (tg->bps[WRITE][LIMIT_LOW]) {
1990		bps = tg->last_bytes_disp[WRITE] * HZ;
1991		do_div(bps, elapsed_time);
1992		if (bps >= tg->bps[WRITE][LIMIT_LOW])
1993			tg->last_low_overflow_time[WRITE] = now;
1994	}
1995
1996	if (tg->iops[READ][LIMIT_LOW]) {
1997		iops = tg->last_io_disp[READ] * HZ / elapsed_time;
1998		if (iops >= tg->iops[READ][LIMIT_LOW])
1999			tg->last_low_overflow_time[READ] = now;
2000	}
2001
2002	if (tg->iops[WRITE][LIMIT_LOW]) {
2003		iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2004		if (iops >= tg->iops[WRITE][LIMIT_LOW])
2005			tg->last_low_overflow_time[WRITE] = now;
2006	}
2007
2008	/*
2009	 * If cgroup is below low limit, consider downgrade and throttle other
2010	 * cgroups
2011	 */
2012	if (throtl_hierarchy_can_downgrade(tg))
2013		throtl_downgrade_state(tg->td, LIMIT_LOW);
2014
2015	tg->last_bytes_disp[READ] = 0;
2016	tg->last_bytes_disp[WRITE] = 0;
2017	tg->last_io_disp[READ] = 0;
2018	tg->last_io_disp[WRITE] = 0;
2019}
2020
2021static void blk_throtl_update_idletime(struct throtl_grp *tg)
2022{
2023	unsigned long now = ktime_get_ns() >> 10;
2024	unsigned long last_finish_time = tg->last_finish_time;
2025
2026	if (now <= last_finish_time || last_finish_time == 0 ||
2027	    last_finish_time == tg->checked_last_finish_time)
2028		return;
2029
2030	tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2031	tg->checked_last_finish_time = last_finish_time;
2032}
2033
2034#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2035static void throtl_update_latency_buckets(struct throtl_data *td)
2036{
2037	struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
2038	int i, cpu, rw;
2039	unsigned long last_latency[2] = { 0 };
2040	unsigned long latency[2];
2041
2042	if (!blk_queue_nonrot(td->queue))
2043		return;
2044	if (time_before(jiffies, td->last_calculate_time + HZ))
2045		return;
2046	td->last_calculate_time = jiffies;
2047
2048	memset(avg_latency, 0, sizeof(avg_latency));
2049	for (rw = READ; rw <= WRITE; rw++) {
2050		for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2051			struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
2052
2053			for_each_possible_cpu(cpu) {
2054				struct latency_bucket *bucket;
2055
2056				/* this isn't race free, but ok in practice */
2057				bucket = per_cpu_ptr(td->latency_buckets[rw],
2058					cpu);
2059				tmp->total_latency += bucket[i].total_latency;
2060				tmp->samples += bucket[i].samples;
2061				bucket[i].total_latency = 0;
2062				bucket[i].samples = 0;
2063			}
2064
2065			if (tmp->samples >= 32) {
2066				int samples = tmp->samples;
2067
2068				latency[rw] = tmp->total_latency;
2069
2070				tmp->total_latency = 0;
2071				tmp->samples = 0;
2072				latency[rw] /= samples;
2073				if (latency[rw] == 0)
2074					continue;
2075				avg_latency[rw][i].latency = latency[rw];
2076			}
2077		}
2078	}
2079
2080	for (rw = READ; rw <= WRITE; rw++) {
2081		for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2082			if (!avg_latency[rw][i].latency) {
2083				if (td->avg_buckets[rw][i].latency < last_latency[rw])
2084					td->avg_buckets[rw][i].latency =
2085						last_latency[rw];
2086				continue;
2087			}
2088
2089			if (!td->avg_buckets[rw][i].valid)
2090				latency[rw] = avg_latency[rw][i].latency;
2091			else
2092				latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2093					avg_latency[rw][i].latency) >> 3;
2094
2095			td->avg_buckets[rw][i].latency = max(latency[rw],
2096				last_latency[rw]);
2097			td->avg_buckets[rw][i].valid = true;
2098			last_latency[rw] = td->avg_buckets[rw][i].latency;
2099		}
2100	}
2101
2102	for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2103		throtl_log(&td->service_queue,
2104			"Latency bucket %d: read latency=%ld, read valid=%d, "
2105			"write latency=%ld, write valid=%d", i,
2106			td->avg_buckets[READ][i].latency,
2107			td->avg_buckets[READ][i].valid,
2108			td->avg_buckets[WRITE][i].latency,
2109			td->avg_buckets[WRITE][i].valid);
2110}
2111#else
2112static inline void throtl_update_latency_buckets(struct throtl_data *td)
2113{
2114}
2115#endif
2116
2117bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2118		    struct bio *bio)
2119{
2120	struct throtl_qnode *qn = NULL;
2121	struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
2122	struct throtl_service_queue *sq;
2123	bool rw = bio_data_dir(bio);
2124	bool throttled = false;
2125	struct throtl_data *td = tg->td;
2126
2127	WARN_ON_ONCE(!rcu_read_lock_held());
2128
2129	/* see throtl_charge_bio() */
2130	if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
2131		goto out;
2132
2133	spin_lock_irq(&q->queue_lock);
2134
2135	throtl_update_latency_buckets(td);
2136
2137	blk_throtl_update_idletime(tg);
2138
2139	sq = &tg->service_queue;
2140
2141again:
2142	while (true) {
2143		if (tg->last_low_overflow_time[rw] == 0)
2144			tg->last_low_overflow_time[rw] = jiffies;
2145		throtl_downgrade_check(tg);
2146		throtl_upgrade_check(tg);
2147		/* throtl is FIFO - if bios are already queued, should queue */
2148		if (sq->nr_queued[rw])
2149			break;
2150
2151		/* if above limits, break to queue */
2152		if (!tg_may_dispatch(tg, bio, NULL)) {
2153			tg->last_low_overflow_time[rw] = jiffies;
2154			if (throtl_can_upgrade(td, tg)) {
2155				throtl_upgrade_state(td);
2156				goto again;
2157			}
2158			break;
2159		}
2160
2161		/* within limits, let's charge and dispatch directly */
2162		throtl_charge_bio(tg, bio);
2163
2164		/*
2165		 * We need to trim slice even when bios are not being queued
2166		 * otherwise it might happen that a bio is not queued for
2167		 * a long time and slice keeps on extending and trim is not
2168		 * called for a long time. Now if limits are reduced suddenly
2169		 * we take into account all the IO dispatched so far at new
2170		 * low rate and * newly queued IO gets a really long dispatch
2171		 * time.
2172		 *
2173		 * So keep on trimming slice even if bio is not queued.
2174		 */
2175		throtl_trim_slice(tg, rw);
2176
2177		/*
2178		 * @bio passed through this layer without being throttled.
2179		 * Climb up the ladder.  If we''re already at the top, it
2180		 * can be executed directly.
2181		 */
2182		qn = &tg->qnode_on_parent[rw];
2183		sq = sq->parent_sq;
2184		tg = sq_to_tg(sq);
2185		if (!tg)
2186			goto out_unlock;
2187	}
2188
2189	/* out-of-limit, queue to @tg */
2190	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2191		   rw == READ ? 'R' : 'W',
2192		   tg->bytes_disp[rw], bio->bi_iter.bi_size,
2193		   tg_bps_limit(tg, rw),
2194		   tg->io_disp[rw], tg_iops_limit(tg, rw),
2195		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
2196
2197	tg->last_low_overflow_time[rw] = jiffies;
2198
2199	td->nr_queued[rw]++;
2200	throtl_add_bio_tg(bio, qn, tg);
2201	throttled = true;
2202
2203	/*
2204	 * Update @tg's dispatch time and force schedule dispatch if @tg
2205	 * was empty before @bio.  The forced scheduling isn't likely to
2206	 * cause undue delay as @bio is likely to be dispatched directly if
2207	 * its @tg's disptime is not in the future.
2208	 */
2209	if (tg->flags & THROTL_TG_WAS_EMPTY) {
2210		tg_update_disptime(tg);
2211		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2212	}
2213
2214out_unlock:
2215	spin_unlock_irq(&q->queue_lock);
 
 
2216out:
2217	bio_set_flag(bio, BIO_THROTTLED);
2218
2219#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2220	if (throttled || !td->track_bio_latency)
2221		bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2222#endif
2223	return throttled;
2224}
2225
2226#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2227static void throtl_track_latency(struct throtl_data *td, sector_t size,
2228	int op, unsigned long time)
2229{
2230	struct latency_bucket *latency;
2231	int index;
2232
2233	if (!td || td->limit_index != LIMIT_LOW ||
2234	    !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
2235	    !blk_queue_nonrot(td->queue))
2236		return;
2237
2238	index = request_bucket_index(size);
2239
2240	latency = get_cpu_ptr(td->latency_buckets[op]);
2241	latency[index].total_latency += time;
2242	latency[index].samples++;
2243	put_cpu_ptr(td->latency_buckets[op]);
2244}
2245
2246void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2247{
2248	struct request_queue *q = rq->q;
2249	struct throtl_data *td = q->td;
2250
2251	throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
2252			     time_ns >> 10);
2253}
2254
2255void blk_throtl_bio_endio(struct bio *bio)
2256{
2257	struct blkcg_gq *blkg;
2258	struct throtl_grp *tg;
2259	u64 finish_time_ns;
2260	unsigned long finish_time;
2261	unsigned long start_time;
2262	unsigned long lat;
2263	int rw = bio_data_dir(bio);
2264
2265	blkg = bio->bi_blkg;
2266	if (!blkg)
2267		return;
2268	tg = blkg_to_tg(blkg);
2269
2270	finish_time_ns = ktime_get_ns();
2271	tg->last_finish_time = finish_time_ns >> 10;
2272
2273	start_time = bio_issue_time(&bio->bi_issue) >> 10;
2274	finish_time = __bio_issue_time(finish_time_ns) >> 10;
2275	if (!start_time || finish_time <= start_time)
2276		return;
2277
2278	lat = finish_time - start_time;
2279	/* this is only for bio based driver */
2280	if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2281		throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2282				     bio_op(bio), lat);
2283
2284	if (tg->latency_target && lat >= tg->td->filtered_latency) {
2285		int bucket;
2286		unsigned int threshold;
2287
2288		bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
2289		threshold = tg->td->avg_buckets[rw][bucket].latency +
2290			tg->latency_target;
2291		if (lat > threshold)
2292			tg->bad_bio_cnt++;
2293		/*
2294		 * Not race free, could get wrong count, which means cgroups
2295		 * will be throttled
2296		 */
2297		tg->bio_cnt++;
2298	}
2299
2300	if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2301		tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2302		tg->bio_cnt /= 2;
2303		tg->bad_bio_cnt /= 2;
2304	}
2305}
2306#endif
2307
2308/*
2309 * Dispatch all bios from all children tg's queued on @parent_sq.  On
2310 * return, @parent_sq is guaranteed to not have any active children tg's
2311 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2312 */
2313static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2314{
2315	struct throtl_grp *tg;
2316
2317	while ((tg = throtl_rb_first(parent_sq))) {
2318		struct throtl_service_queue *sq = &tg->service_queue;
2319		struct bio *bio;
2320
2321		throtl_dequeue_tg(tg);
2322
2323		while ((bio = throtl_peek_queued(&sq->queued[READ])))
2324			tg_dispatch_one_bio(tg, bio_data_dir(bio));
2325		while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2326			tg_dispatch_one_bio(tg, bio_data_dir(bio));
2327	}
2328}
2329
2330/**
2331 * blk_throtl_drain - drain throttled bios
2332 * @q: request_queue to drain throttled bios for
2333 *
2334 * Dispatch all currently throttled bios on @q through ->make_request_fn().
2335 */
2336void blk_throtl_drain(struct request_queue *q)
2337	__releases(&q->queue_lock) __acquires(&q->queue_lock)
2338{
2339	struct throtl_data *td = q->td;
2340	struct blkcg_gq *blkg;
2341	struct cgroup_subsys_state *pos_css;
 
2342	struct bio *bio;
2343	int rw;
2344
2345	rcu_read_lock();
2346
2347	/*
2348	 * Drain each tg while doing post-order walk on the blkg tree, so
2349	 * that all bios are propagated to td->service_queue.  It'd be
2350	 * better to walk service_queue tree directly but blkg walk is
2351	 * easier.
2352	 */
2353	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2354		tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
2355
2356	/* finally, transfer bios from top-level tg's into the td */
2357	tg_drain_bios(&td->service_queue);
2358
2359	rcu_read_unlock();
2360	spin_unlock_irq(&q->queue_lock);
 
 
 
 
2361
2362	/* all bios now should be in td->service_queue, issue them */
2363	for (rw = READ; rw <= WRITE; rw++)
2364		while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2365						NULL)))
2366			generic_make_request(bio);
2367
2368	spin_lock_irq(&q->queue_lock);
2369}
2370
2371int blk_throtl_init(struct request_queue *q)
2372{
2373	struct throtl_data *td;
2374	int ret;
2375
2376	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2377	if (!td)
2378		return -ENOMEM;
2379	td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2380		LATENCY_BUCKET_SIZE, __alignof__(u64));
2381	if (!td->latency_buckets[READ]) {
2382		kfree(td);
2383		return -ENOMEM;
2384	}
2385	td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2386		LATENCY_BUCKET_SIZE, __alignof__(u64));
2387	if (!td->latency_buckets[WRITE]) {
2388		free_percpu(td->latency_buckets[READ]);
2389		kfree(td);
2390		return -ENOMEM;
2391	}
2392
2393	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2394	throtl_service_queue_init(&td->service_queue);
 
2395
2396	q->td = td;
2397	td->queue = q;
2398
2399	td->limit_valid[LIMIT_MAX] = true;
2400	td->limit_index = LIMIT_MAX;
2401	td->low_upgrade_time = jiffies;
2402	td->low_downgrade_time = jiffies;
2403
2404	/* activate policy */
2405	ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2406	if (ret) {
2407		free_percpu(td->latency_buckets[READ]);
2408		free_percpu(td->latency_buckets[WRITE]);
2409		kfree(td);
2410	}
2411	return ret;
2412}
2413
2414void blk_throtl_exit(struct request_queue *q)
2415{
2416	BUG_ON(!q->td);
2417	throtl_shutdown_wq(q);
2418	blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2419	free_percpu(q->td->latency_buckets[READ]);
2420	free_percpu(q->td->latency_buckets[WRITE]);
2421	kfree(q->td);
2422}
2423
2424void blk_throtl_register_queue(struct request_queue *q)
2425{
2426	struct throtl_data *td;
2427	int i;
2428
2429	td = q->td;
2430	BUG_ON(!td);
2431
2432	if (blk_queue_nonrot(q)) {
2433		td->throtl_slice = DFL_THROTL_SLICE_SSD;
2434		td->filtered_latency = LATENCY_FILTERED_SSD;
2435	} else {
2436		td->throtl_slice = DFL_THROTL_SLICE_HD;
2437		td->filtered_latency = LATENCY_FILTERED_HD;
2438		for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2439			td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2440			td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2441		}
2442	}
2443#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2444	/* if no low limit, use previous default */
2445	td->throtl_slice = DFL_THROTL_SLICE_HD;
2446#endif
2447
2448	td->track_bio_latency = !queue_is_mq(q);
2449	if (!td->track_bio_latency)
2450		blk_stat_enable_accounting(q);
2451}
2452
2453#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2454ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2455{
2456	if (!q->td)
2457		return -EINVAL;
2458	return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2459}
2460
2461ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2462	const char *page, size_t count)
2463{
2464	unsigned long v;
2465	unsigned long t;
2466
2467	if (!q->td)
2468		return -EINVAL;
2469	if (kstrtoul(page, 10, &v))
2470		return -EINVAL;
2471	t = msecs_to_jiffies(v);
2472	if (t == 0 || t > MAX_THROTL_SLICE)
2473		return -EINVAL;
2474	q->td->throtl_slice = t;
2475	return count;
2476}
2477#endif
2478
2479static int __init throtl_init(void)
2480{
2481	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2482	if (!kthrotld_workqueue)
2483		panic("Failed to create kthrotld\n");
2484
2485	return blkcg_policy_register(&blkcg_policy_throtl);
2486}
2487
2488module_init(throtl_init);