Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * Interface for controlling IO bandwidth on a request queue
   3 *
   4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/slab.h>
   9#include <linux/blkdev.h>
  10#include <linux/bio.h>
  11#include <linux/blktrace_api.h>
  12#include "blk-cgroup.h"
 
 
 
  13
  14/* Max dispatch from a group in 1 round */
  15static int throtl_grp_quantum = 8;
  16
  17/* Total max dispatch from all groups in one round */
  18static int throtl_quantum = 32;
  19
  20/* Throttling is performed over 100ms slice and after that slice is renewed */
  21static unsigned long throtl_slice = HZ/10;	/* 100 ms */
 
 
  22
  23/* A workqueue to queue throttle related work */
  24static struct workqueue_struct *kthrotld_workqueue;
  25static void throtl_schedule_delayed_work(struct throtl_data *td,
  26				unsigned long delay);
  27
  28struct throtl_rb_root {
  29	struct rb_root rb;
  30	struct rb_node *left;
  31	unsigned int count;
  32	unsigned long min_disptime;
  33};
  34
  35#define THROTL_RB_ROOT	(struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
  36			.count = 0, .min_disptime = 0}
  37
  38#define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
  39
  40struct throtl_grp {
  41	/* List of throtl groups on the request queue*/
  42	struct hlist_node tg_node;
  43
  44	/* active throtl group service_tree member */
  45	struct rb_node rb_node;
  46
  47	/*
  48	 * Dispatch time in jiffies. This is the estimated time when group
  49	 * will unthrottle and is ready to dispatch more bio. It is used as
  50	 * key to sort active groups in service tree.
  51	 */
  52	unsigned long disptime;
  53
  54	struct blkio_group blkg;
  55	atomic_t ref;
  56	unsigned int flags;
  57
  58	/* Two lists for READ and WRITE */
  59	struct bio_list bio_lists[2];
  60
  61	/* Number of queued bios on READ and WRITE lists */
  62	unsigned int nr_queued[2];
  63
  64	/* bytes per second rate limits */
  65	uint64_t bps[2];
  66
  67	/* IOPS limits */
  68	unsigned int iops[2];
  69
  70	/* Number of bytes disptached in current slice */
  71	uint64_t bytes_disp[2];
  72	/* Number of bio's dispatched in current slice */
  73	unsigned int io_disp[2];
  74
  75	/* When did we start a new slice */
  76	unsigned long slice_start[2];
  77	unsigned long slice_end[2];
  78
  79	/* Some throttle limits got updated for the group */
  80	int limits_changed;
  81
  82	struct rcu_head rcu_head;
  83};
  84
  85struct throtl_data
  86{
  87	/* List of throtl groups */
  88	struct hlist_head tg_list;
  89
  90	/* service tree for active throtl groups */
  91	struct throtl_rb_root tg_service_tree;
  92
  93	struct throtl_grp *root_tg;
  94	struct request_queue *queue;
  95
  96	/* Total Number of queued bios on READ and WRITE lists */
  97	unsigned int nr_queued[2];
  98
  99	/*
 100	 * number of total undestroyed groups
 101	 */
 102	unsigned int nr_undestroyed_grps;
 103
 104	/* Work for dispatching throttled bios */
 105	struct delayed_work throtl_work;
 106
 107	int limits_changed;
 108};
 109
 110enum tg_state_flags {
 111	THROTL_TG_FLAG_on_rr = 0,	/* on round-robin busy list */
 112};
 113
 114#define THROTL_TG_FNS(name)						\
 115static inline void throtl_mark_tg_##name(struct throtl_grp *tg)		\
 116{									\
 117	(tg)->flags |= (1 << THROTL_TG_FLAG_##name);			\
 118}									\
 119static inline void throtl_clear_tg_##name(struct throtl_grp *tg)	\
 120{									\
 121	(tg)->flags &= ~(1 << THROTL_TG_FLAG_##name);			\
 122}									\
 123static inline int throtl_tg_##name(const struct throtl_grp *tg)		\
 124{									\
 125	return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0;	\
 126}
 127
 128THROTL_TG_FNS(on_rr);
 129
 130#define throtl_log_tg(td, tg, fmt, args...)				\
 131	blk_add_trace_msg((td)->queue, "throtl %s " fmt,		\
 132				blkg_path(&(tg)->blkg), ##args);      	\
 133
 134#define throtl_log(td, fmt, args...)	\
 135	blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
 136
 137static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
 138{
 139	if (blkg)
 140		return container_of(blkg, struct throtl_grp, blkg);
 141
 142	return NULL;
 143}
 144
 145static inline unsigned int total_nr_queued(struct throtl_data *td)
 146{
 147	return td->nr_queued[0] + td->nr_queued[1];
 148}
 149
 150static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
 
 
 151{
 152	atomic_inc(&tg->ref);
 153	return tg;
 
 
 154}
 155
 156static void throtl_free_tg(struct rcu_head *head)
 
 
 
 
 
 
 
 157{
 158	struct throtl_grp *tg;
 159
 160	tg = container_of(head, struct throtl_grp, rcu_head);
 161	free_percpu(tg->blkg.stats_cpu);
 162	kfree(tg);
 
 163}
 164
 165static void throtl_put_tg(struct throtl_grp *tg)
 166{
 167	BUG_ON(atomic_read(&tg->ref) <= 0);
 168	if (!atomic_dec_and_test(&tg->ref))
 169		return;
 170
 171	/*
 172	 * A group is freed in rcu manner. But having an rcu lock does not
 173	 * mean that one can access all the fields of blkg and assume these
 174	 * are valid. For example, don't try to follow throtl_data and
 175	 * request queue links.
 176	 *
 177	 * Having a reference to blkg under an rcu allows acess to only
 178	 * values local to groups like group stats and group rate limits
 179	 */
 180	call_rcu(&tg->rcu_head, throtl_free_tg);
 181}
 182
 183static void throtl_init_group(struct throtl_grp *tg)
 184{
 185	INIT_HLIST_NODE(&tg->tg_node);
 186	RB_CLEAR_NODE(&tg->rb_node);
 187	bio_list_init(&tg->bio_lists[0]);
 188	bio_list_init(&tg->bio_lists[1]);
 189	tg->limits_changed = false;
 190
 191	/* Practically unlimited BW */
 192	tg->bps[0] = tg->bps[1] = -1;
 193	tg->iops[0] = tg->iops[1] = -1;
 194
 195	/*
 196	 * Take the initial reference that will be released on destroy
 197	 * This can be thought of a joint reference by cgroup and
 198	 * request queue which will be dropped by either request queue
 199	 * exit or cgroup deletion path depending on who is exiting first.
 200	 */
 201	atomic_set(&tg->ref, 1);
 202}
 203
 204/* Should be called with rcu read lock held (needed for blkcg) */
 205static void
 206throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207{
 208	hlist_add_head(&tg->tg_node, &td->tg_list);
 209	td->nr_undestroyed_grps++;
 
 
 
 210}
 211
 212static void
 213__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
 
 
 
 214{
 215	struct backing_dev_info *bdi = &td->queue->backing_dev_info;
 216	unsigned int major, minor;
 217
 218	if (!tg || tg->blkg.dev)
 219		return;
 220
 221	/*
 222	 * Fill in device details for a group which might not have been
 223	 * filled at group creation time as queue was being instantiated
 224	 * and driver had not attached a device yet
 225	 */
 226	if (bdi->dev && dev_name(bdi->dev)) {
 227		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
 228		tg->blkg.dev = MKDEV(major, minor);
 229	}
 230}
 231
 232/*
 233 * Should be called with without queue lock held. Here queue lock will be
 234 * taken rarely. It will be taken only once during life time of a group
 235 * if need be
 
 
 
 
 
 
 
 
 
 236 */
 237static void
 238throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
 239{
 240	if (!tg || tg->blkg.dev)
 241		return;
 242
 243	spin_lock_irq(td->queue->queue_lock);
 244	__throtl_tg_fill_dev_details(td, tg);
 245	spin_unlock_irq(td->queue->queue_lock);
 246}
 247
 248static void throtl_init_add_tg_lists(struct throtl_data *td,
 249			struct throtl_grp *tg, struct blkio_cgroup *blkcg)
 250{
 251	__throtl_tg_fill_dev_details(td, tg);
 252
 253	/* Add group onto cgroup list */
 254	blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
 255				tg->blkg.dev, BLKIO_POLICY_THROTL);
 
 
 
 
 
 
 256
 257	tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
 258	tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
 259	tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
 260	tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
 261
 262	throtl_add_group_to_td_list(td, tg);
 
 
 
 
 
 
 263}
 264
 265/* Should be called without queue lock and outside of rcu period */
 266static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
 267{
 268	struct throtl_grp *tg = NULL;
 269	int ret;
 270
 271	tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
 272	if (!tg)
 273		return NULL;
 274
 275	ret = blkio_alloc_blkg_stats(&tg->blkg);
 
 276
 277	if (ret) {
 278		kfree(tg);
 279		return NULL;
 
 
 
 
 
 280	}
 281
 282	throtl_init_group(tg);
 283	return tg;
 
 
 
 
 
 
 
 
 
 
 
 284}
 285
 286static struct
 287throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
 288{
 289	struct throtl_grp *tg = NULL;
 290	void *key = td;
 
 
 291
 292	/*
 293	 * This is the common case when there are no blkio cgroups.
 294 	 * Avoid lookup in this case
 295 	 */
 296	if (blkcg == &blkio_root_cgroup)
 297		tg = td->root_tg;
 298	else
 299		tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
 300
 301	__throtl_tg_fill_dev_details(td, tg);
 302	return tg;
 
 
 
 
 
 
 
 303}
 304
 305/*
 306 * This function returns with queue lock unlocked in case of error, like
 307 * request queue is no more
 
 308 */
 309static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
 310{
 311	struct throtl_grp *tg = NULL, *__tg = NULL;
 312	struct blkio_cgroup *blkcg;
 313	struct request_queue *q = td->queue;
 314
 315	rcu_read_lock();
 316	blkcg = task_blkio_cgroup(current);
 317	tg = throtl_find_tg(td, blkcg);
 318	if (tg) {
 319		rcu_read_unlock();
 320		return tg;
 321	}
 322
 323	/*
 324	 * Need to allocate a group. Allocation of group also needs allocation
 325	 * of per cpu stats which in-turn takes a mutex() and can block. Hence
 326	 * we need to drop rcu lock and queue_lock before we call alloc
 327	 *
 328	 * Take the request queue reference to make sure queue does not
 329	 * go away once we return from allocation.
 330	 */
 331	blk_get_queue(q);
 332	rcu_read_unlock();
 333	spin_unlock_irq(q->queue_lock);
 334
 335	tg = throtl_alloc_tg(td);
 336	/*
 337	 * We might have slept in group allocation. Make sure queue is not
 338	 * dead
 339	 */
 340	if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
 341		blk_put_queue(q);
 342		if (tg)
 343			kfree(tg);
 344
 345		return ERR_PTR(-ENODEV);
 
 
 
 
 
 
 346	}
 347	blk_put_queue(q);
 348
 349	/* Group allocated and queue is still alive. take the lock */
 350	spin_lock_irq(q->queue_lock);
 351
 352	/*
 353	 * Initialize the new group. After sleeping, read the blkcg again.
 354	 */
 355	rcu_read_lock();
 356	blkcg = task_blkio_cgroup(current);
 357
 
 
 
 358	/*
 359	 * If some other thread already allocated the group while we were
 360	 * not holding queue lock, free up the group
 361	 */
 362	__tg = throtl_find_tg(td, blkcg);
 363
 364	if (__tg) {
 365		kfree(tg);
 366		rcu_read_unlock();
 367		return __tg;
 368	}
 369
 370	/* Group allocation failed. Account the IO to root group */
 371	if (!tg) {
 372		tg = td->root_tg;
 373		return tg;
 374	}
 375
 376	throtl_init_add_tg_lists(td, tg, blkcg);
 377	rcu_read_unlock();
 378	return tg;
 379}
 380
 381static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
 382{
 383	/* Service tree is empty */
 384	if (!root->count)
 385		return NULL;
 386
 387	if (!root->left)
 388		root->left = rb_first(&root->rb);
 389
 390	if (root->left)
 391		return rb_entry_tg(root->left);
 392
 393	return NULL;
 394}
 395
 396static void rb_erase_init(struct rb_node *n, struct rb_root *root)
 
 397{
 398	rb_erase(n, root);
 399	RB_CLEAR_NODE(n);
 
 
 
 
 
 400}
 401
 402static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
 
 403{
 404	if (root->left == n)
 405		root->left = NULL;
 406	rb_erase_init(n, &root->rb);
 407	--root->count;
 408}
 409
 410static void update_min_dispatch_time(struct throtl_rb_root *st)
 411{
 412	struct throtl_grp *tg;
 413
 414	tg = throtl_rb_first(st);
 415	if (!tg)
 416		return;
 417
 418	st->min_disptime = tg->disptime;
 419}
 420
 421static void
 422tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
 423{
 424	struct rb_node **node = &st->rb.rb_node;
 
 425	struct rb_node *parent = NULL;
 426	struct throtl_grp *__tg;
 427	unsigned long key = tg->disptime;
 428	int left = 1;
 429
 430	while (*node != NULL) {
 431		parent = *node;
 432		__tg = rb_entry_tg(parent);
 433
 434		if (time_before(key, __tg->disptime))
 435			node = &parent->rb_left;
 436		else {
 437			node = &parent->rb_right;
 438			left = 0;
 439		}
 440	}
 441
 442	if (left)
 443		st->left = &tg->rb_node;
 444
 445	rb_link_node(&tg->rb_node, parent, node);
 446	rb_insert_color(&tg->rb_node, &st->rb);
 
 447}
 448
 449static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
 450{
 451	struct throtl_rb_root *st = &td->tg_service_tree;
 452
 453	tg_service_tree_add(st, tg);
 454	throtl_mark_tg_on_rr(tg);
 455	st->count++;
 456}
 457
 458static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
 459{
 460	if (!throtl_tg_on_rr(tg))
 461		__throtl_enqueue_tg(td, tg);
 
 
 
 
 
 
 462}
 463
 464static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
 
 
 465{
 466	throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
 467	throtl_clear_tg_on_rr(tg);
 
 
 
 
 
 
 
 
 
 
 
 
 468}
 469
 470static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 471{
 472	if (throtl_tg_on_rr(tg))
 473		__throtl_dequeue_tg(td, tg);
 
 
 
 
 
 
 
 
 
 
 
 
 474}
 475
 476static void throtl_schedule_next_dispatch(struct throtl_data *td)
 
 477{
 478	struct throtl_rb_root *st = &td->tg_service_tree;
 
 
 
 479
 480	/*
 481	 * If there are more bios pending, schedule more work.
 
 
 
 482	 */
 483	if (!total_nr_queued(td))
 484		return;
 485
 486	BUG_ON(!st->count);
 487
 488	update_min_dispatch_time(st);
 489
 490	if (time_before_eq(st->min_disptime, jiffies))
 491		throtl_schedule_delayed_work(td, 0);
 492	else
 493		throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
 
 494}
 495
 496static inline void
 497throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 498{
 499	tg->bytes_disp[rw] = 0;
 500	tg->io_disp[rw] = 0;
 501	tg->slice_start[rw] = jiffies;
 502	tg->slice_end[rw] = jiffies + throtl_slice;
 503	throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
 504			rw == READ ? 'R' : 'W', tg->slice_start[rw],
 505			tg->slice_end[rw], jiffies);
 
 
 
 
 
 
 506}
 507
 508static inline void throtl_set_slice_end(struct throtl_data *td,
 509		struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
 510{
 511	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
 512}
 513
 514static inline void throtl_extend_slice(struct throtl_data *td,
 515		struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
 516{
 517	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
 518	throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
 519			rw == READ ? 'R' : 'W', tg->slice_start[rw],
 520			tg->slice_end[rw], jiffies);
 
 521}
 522
 523/* Determine if previously allocated or extended slice is complete or not */
 524static bool
 525throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 526{
 527	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
 528		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529
 530	return 1;
 
 
 
 
 
 
 
 
 
 
 
 531}
 532
 533/* Trim the used slices and adjust slice start accordingly */
 534static inline void
 535throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 536{
 537	unsigned long nr_slices, time_elapsed, io_trim;
 538	u64 bytes_trim, tmp;
 
 539
 540	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
 541
 542	/*
 543	 * If bps are unlimited (-1), then time slice don't get
 544	 * renewed. Don't try to trim the slice if slice is used. A new
 545	 * slice will start when appropriate.
 546	 */
 547	if (throtl_slice_used(td, tg, rw))
 548		return;
 549
 550	/*
 551	 * A bio has been dispatched. Also adjust slice_end. It might happen
 552	 * that initially cgroup limit was very low resulting in high
 553	 * slice_end, but later limit was bumped up and bio was dispached
 554	 * sooner, then we need to reduce slice_end. A high bogus slice_end
 555	 * is bad because it does not allow new slice to start.
 556	 */
 557
 558	throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
 559
 560	time_elapsed = jiffies - tg->slice_start[rw];
 561
 562	nr_slices = time_elapsed / throtl_slice;
 563
 564	if (!nr_slices)
 
 
 565		return;
 566	tmp = tg->bps[rw] * throtl_slice * nr_slices;
 567	do_div(tmp, HZ);
 568	bytes_trim = tmp;
 569
 570	io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
 571
 572	if (!bytes_trim && !io_trim)
 
 
 
 
 
 573		return;
 574
 575	if (tg->bytes_disp[rw] >= bytes_trim)
 
 576		tg->bytes_disp[rw] -= bytes_trim;
 577	else
 578		tg->bytes_disp[rw] = 0;
 579
 580	if (tg->io_disp[rw] >= io_trim)
 
 581		tg->io_disp[rw] -= io_trim;
 582	else
 583		tg->io_disp[rw] = 0;
 584
 585	tg->slice_start[rw] += nr_slices * throtl_slice;
 586
 587	throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
 588			" start=%lu end=%lu jiffies=%lu",
 589			rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
 590			tg->slice_start[rw], tg->slice_end[rw], jiffies);
 
 591}
 592
 593static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
 594		struct bio *bio, unsigned long *wait)
 595{
 596	bool rw = bio_data_dir(bio);
 597	unsigned int io_allowed;
 598	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 599	u64 tmp;
 600
 601	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 602
 603	/* Slice has just started. Consider one slice interval */
 604	if (!jiffy_elapsed)
 605		jiffy_elapsed_rnd = throtl_slice;
 606
 607	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 608
 609	/*
 610	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
 611	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
 612	 * will allow dispatch after 1 second and after that slice should
 613	 * have been trimmed.
 614	 */
 
 
 
 
 
 
 
 
 
 615
 616	tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
 617	do_div(tmp, HZ);
 
 
 
 
 618
 619	if (tmp > UINT_MAX)
 620		io_allowed = UINT_MAX;
 621	else
 622		io_allowed = tmp;
 
 623
 624	if (tg->io_disp[rw] + 1 <= io_allowed) {
 625		if (wait)
 626			*wait = 0;
 627		return 1;
 
 
 
 
 
 628	}
 629
 630	/* Calc approx time to dispatch */
 631	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
 632
 633	if (jiffy_wait > jiffy_elapsed)
 634		jiffy_wait = jiffy_wait - jiffy_elapsed;
 635	else
 636		jiffy_wait = 1;
 
 
 637
 638	if (wait)
 639		*wait = jiffy_wait;
 640	return 0;
 
 
 
 641}
 642
 643static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
 644		struct bio *bio, unsigned long *wait)
 645{
 646	bool rw = bio_data_dir(bio);
 647	u64 bytes_allowed, extra_bytes, tmp;
 
 648	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 
 
 
 
 
 
 649
 650	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 651
 652	/* Slice has just started. Consider one slice interval */
 653	if (!jiffy_elapsed)
 654		jiffy_elapsed_rnd = throtl_slice;
 655
 656	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 657
 658	tmp = tg->bps[rw] * jiffy_elapsed_rnd;
 659	do_div(tmp, HZ);
 660	bytes_allowed = tmp;
 661
 662	if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
 663		if (wait)
 664			*wait = 0;
 665		return 1;
 666	}
 667
 668	/* Calc approx time to dispatch */
 669	extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
 670	jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
 671
 672	if (!jiffy_wait)
 673		jiffy_wait = 1;
 674
 675	/*
 676	 * This wait time is without taking into consideration the rounding
 677	 * up we did. Add that time also.
 678	 */
 679	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
 680	if (wait)
 681		*wait = jiffy_wait;
 682	return 0;
 683}
 684
 685static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
 686	if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
 687		return 1;
 688	return 0;
 689}
 690
 691/*
 692 * Returns whether one can dispatch a bio or not. Also returns approx number
 693 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
 694 */
 695static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
 696				struct bio *bio, unsigned long *wait)
 697{
 698	bool rw = bio_data_dir(bio);
 699	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
 
 
 700
 701	/*
 702 	 * Currently whole state machine of group depends on first bio
 703	 * queued in the group bio list. So one should not be calling
 704	 * this function with a different bio if there are other bios
 705	 * queued.
 706	 */
 707	BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
 
 708
 709	/* If tg->bps = -1, then BW is unlimited */
 710	if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
 
 711		if (wait)
 712			*wait = 0;
 713		return 1;
 714	}
 715
 716	/*
 717	 * If previous slice expired, start a new one otherwise renew/extend
 718	 * existing slice to make sure it is at least throtl_slice interval
 719	 * long since now.
 
 
 720	 */
 721	if (throtl_slice_used(td, tg, rw))
 722		throtl_start_new_slice(td, tg, rw);
 723	else {
 724		if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
 725			throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
 
 
 726	}
 727
 728	if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
 729	    && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
 
 730		if (wait)
 731			*wait = 0;
 732		return 1;
 733	}
 734
 735	max_wait = max(bps_wait, iops_wait);
 736
 737	if (wait)
 738		*wait = max_wait;
 739
 740	if (time_before(tg->slice_end[rw], jiffies + max_wait))
 741		throtl_extend_slice(td, tg, rw, jiffies + max_wait);
 742
 743	return 0;
 744}
 745
 746static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 747{
 748	bool rw = bio_data_dir(bio);
 749	bool sync = rw_is_sync(bio->bi_rw);
 750
 751	/* Charge the bio to the group */
 752	tg->bytes_disp[rw] += bio->bi_size;
 753	tg->io_disp[rw]++;
 
 
 754
 755	blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
 
 756}
 757
 758static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
 759			struct bio *bio)
 
 
 
 
 
 
 
 
 
 760{
 
 761	bool rw = bio_data_dir(bio);
 762
 763	bio_list_add(&tg->bio_lists[rw], bio);
 764	/* Take a bio reference on tg */
 765	throtl_ref_get_tg(tg);
 766	tg->nr_queued[rw]++;
 767	td->nr_queued[rw]++;
 768	throtl_enqueue_tg(td, tg);
 
 
 
 
 
 
 
 
 
 
 769}
 770
 771static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
 772{
 
 773	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
 774	struct bio *bio;
 775
 776	if ((bio = bio_list_peek(&tg->bio_lists[READ])))
 777		tg_may_dispatch(td, tg, bio, &read_wait);
 778
 779	if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
 780		tg_may_dispatch(td, tg, bio, &write_wait);
 
 
 781
 782	min_wait = min(read_wait, write_wait);
 783	disptime = jiffies + min_wait;
 784
 785	/* Update dispatch time */
 786	throtl_dequeue_tg(td, tg);
 787	tg->disptime = disptime;
 788	throtl_enqueue_tg(td, tg);
 
 
 
 789}
 790
 791static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
 792				bool rw, struct bio_list *bl)
 793{
 794	struct bio *bio;
 
 
 
 795
 796	bio = bio_list_pop(&tg->bio_lists[rw]);
 797	tg->nr_queued[rw]--;
 798	/* Drop bio reference on tg */
 799	throtl_put_tg(tg);
 800
 801	BUG_ON(td->nr_queued[rw] <= 0);
 802	td->nr_queued[rw]--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 803
 804	throtl_charge_bio(tg, bio);
 805	bio_list_add(bl, bio);
 806	bio->bi_rw |= REQ_THROTTLED;
 807
 808	throtl_trim_slice(td, tg, rw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 809}
 810
 811static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
 812				struct bio_list *bl)
 813{
 
 814	unsigned int nr_reads = 0, nr_writes = 0;
 815	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
 816	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
 817	struct bio *bio;
 818
 819	/* Try to dispatch 75% READS and 25% WRITES */
 820
 821	while ((bio = bio_list_peek(&tg->bio_lists[READ]))
 822		&& tg_may_dispatch(td, tg, bio, NULL)) {
 823
 824		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
 825		nr_reads++;
 826
 827		if (nr_reads >= max_nr_reads)
 828			break;
 829	}
 830
 831	while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
 832		&& tg_may_dispatch(td, tg, bio, NULL)) {
 833
 834		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
 835		nr_writes++;
 836
 837		if (nr_writes >= max_nr_writes)
 838			break;
 839	}
 840
 841	return nr_reads + nr_writes;
 842}
 843
 844static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
 845{
 846	unsigned int nr_disp = 0;
 847	struct throtl_grp *tg;
 848	struct throtl_rb_root *st = &td->tg_service_tree;
 849
 850	while (1) {
 851		tg = throtl_rb_first(st);
 
 
 
 
 852
 
 853		if (!tg)
 854			break;
 855
 856		if (time_before(jiffies, tg->disptime))
 857			break;
 858
 859		throtl_dequeue_tg(td, tg);
 860
 861		nr_disp += throtl_dispatch_tg(td, tg, bl);
 
 
 
 
 862
 863		if (tg->nr_queued[0] || tg->nr_queued[1]) {
 864			tg_update_disptime(td, tg);
 865			throtl_enqueue_tg(td, tg);
 866		}
 867
 868		if (nr_disp >= throtl_quantum)
 869			break;
 870	}
 871
 872	return nr_disp;
 873}
 874
 875static void throtl_process_limit_change(struct throtl_data *td)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876{
 877	struct throtl_grp *tg;
 878	struct hlist_node *pos, *n;
 879
 880	if (!td->limits_changed)
 881		return;
 882
 883	xchg(&td->limits_changed, false);
 884
 885	throtl_log(td, "limits changed");
 
 
 
 
 886
 887	hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
 888		if (!tg->limits_changed)
 889			continue;
 890
 891		if (!xchg(&tg->limits_changed, false))
 892			continue;
 893
 894		throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
 895			" riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
 896			tg->iops[READ], tg->iops[WRITE]);
 
 
 
 
 
 
 
 
 
 
 
 897
 898		/*
 899		 * Restart the slices for both READ and WRITES. It
 900		 * might happen that a group's limit are dropped
 901		 * suddenly and we don't want to account recently
 902		 * dispatched IO with new low rate
 903		 */
 904		throtl_start_new_slice(td, tg, 0);
 905		throtl_start_new_slice(td, tg, 1);
 906
 907		if (throtl_tg_on_rr(tg))
 908			tg_update_disptime(td, tg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 909	}
 
 
 910}
 911
 912/* Dispatch throttled bios. Should be called without queue lock held. */
 913static int throtl_dispatch(struct request_queue *q)
 
 
 
 
 
 
 
 914{
 915	struct throtl_data *td = q->td;
 916	unsigned int nr_disp = 0;
 
 
 917	struct bio_list bio_list_on_stack;
 918	struct bio *bio;
 919	struct blk_plug plug;
 920
 921	spin_lock_irq(q->queue_lock);
 922
 923	throtl_process_limit_change(td);
 924
 925	if (!total_nr_queued(td))
 926		goto out;
 927
 928	bio_list_init(&bio_list_on_stack);
 929
 930	throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
 931			total_nr_queued(td), td->nr_queued[READ],
 932			td->nr_queued[WRITE]);
 933
 934	nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
 935
 936	if (nr_disp)
 937		throtl_log(td, "bios disp=%u", nr_disp);
 938
 939	throtl_schedule_next_dispatch(td);
 940out:
 941	spin_unlock_irq(q->queue_lock);
 942
 943	/*
 944	 * If we dispatched some requests, unplug the queue to make sure
 945	 * immediate dispatch
 946	 */
 947	if (nr_disp) {
 948		blk_start_plug(&plug);
 949		while((bio = bio_list_pop(&bio_list_on_stack)))
 950			generic_make_request(bio);
 951		blk_finish_plug(&plug);
 952	}
 953	return nr_disp;
 954}
 955
 956void blk_throtl_work(struct work_struct *work)
 
 957{
 958	struct throtl_data *td = container_of(work, struct throtl_data,
 959					throtl_work.work);
 960	struct request_queue *q = td->queue;
 
 
 
 
 
 
 
 
 
 
 961
 962	throtl_dispatch(q);
 
 
 963}
 964
 965/* Call with queue lock held */
 966static void
 967throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
 968{
 
 
 
 
 969
 970	struct delayed_work *dwork = &td->throtl_work;
 
 
 
 
 
 971
 972	/* schedule work if limits changed even if no bio is queued */
 973	if (total_nr_queued(td) || td->limits_changed) {
 974		/*
 975		 * We might have a work scheduled to be executed in future.
 976		 * Cancel that and schedule a new one.
 977		 */
 978		__cancel_delayed_work(dwork);
 979		queue_delayed_work(kthrotld_workqueue, dwork, delay);
 980		throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
 981				delay, jiffies);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982	}
 983}
 984
 985static void
 986throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
 987{
 988	/* Something wrong if we are trying to remove same group twice */
 989	BUG_ON(hlist_unhashed(&tg->tg_node));
 
 
 
 
 
 990
 991	hlist_del_init(&tg->tg_node);
 
 992
 993	/*
 994	 * Put the reference taken at the time of creation so that when all
 995	 * queues are gone, group can be destroyed.
 996	 */
 997	throtl_put_tg(tg);
 998	td->nr_undestroyed_grps--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 999}
1000
1001static void throtl_release_tgs(struct throtl_data *td)
 
 
1002{
1003	struct hlist_node *pos, *n;
 
1004	struct throtl_grp *tg;
 
 
1005
1006	hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
1007		/*
1008		 * If cgroup removal path got to blk_group first and removed
1009		 * it from cgroup list, then it will take care of destroying
1010		 * cfqg also.
1011		 */
1012		if (!blkiocg_del_blkio_group(&tg->blkg))
1013			throtl_destroy_tg(td, tg);
 
 
1014	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1015}
1016
1017static void throtl_td_free(struct throtl_data *td)
 
1018{
1019	kfree(td);
1020}
1021
1022/*
1023 * Blk cgroup controller notification saying that blkio_group object is being
1024 * delinked as associated cgroup object is going away. That also means that
1025 * no new IO will come in this group. So get rid of this group as soon as
1026 * any pending IO in the group is finished.
1027 *
1028 * This function is called under rcu_read_lock(). key is the rcu protected
1029 * pointer. That means "key" is a valid throtl_data pointer as long as we are
1030 * rcu read lock.
1031 *
1032 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1033 * it should not be NULL as even if queue was going away, cgroup deltion
1034 * path got to it first.
1035 */
1036void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
1037{
1038	unsigned long flags;
1039	struct throtl_data *td = key;
1040
1041	spin_lock_irqsave(td->queue->queue_lock, flags);
1042	throtl_destroy_tg(td, tg_of_blkg(blkg));
1043	spin_unlock_irqrestore(td->queue->queue_lock, flags);
1044}
1045
1046static void throtl_update_blkio_group_common(struct throtl_data *td,
1047				struct throtl_grp *tg)
1048{
1049	xchg(&tg->limits_changed, true);
1050	xchg(&td->limits_changed, true);
1051	/* Schedule a work now to process the limit change */
1052	throtl_schedule_delayed_work(td, 0);
1053}
1054
1055/*
1056 * For all update functions, key should be a valid pointer because these
1057 * update functions are called under blkcg_lock, that means, blkg is
1058 * valid and in turn key is valid. queue exit path can not race because
1059 * of blkcg_lock
1060 *
1061 * Can not take queue lock in update functions as queue lock under blkcg_lock
1062 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1063 */
1064static void throtl_update_blkio_group_read_bps(void *key,
1065				struct blkio_group *blkg, u64 read_bps)
1066{
1067	struct throtl_data *td = key;
1068	struct throtl_grp *tg = tg_of_blkg(blkg);
1069
1070	tg->bps[READ] = read_bps;
1071	throtl_update_blkio_group_common(td, tg);
 
1072}
1073
1074static void throtl_update_blkio_group_write_bps(void *key,
1075				struct blkio_group *blkg, u64 write_bps)
1076{
1077	struct throtl_data *td = key;
1078	struct throtl_grp *tg = tg_of_blkg(blkg);
1079
1080	tg->bps[WRITE] = write_bps;
1081	throtl_update_blkio_group_common(td, tg);
1082}
1083
1084static void throtl_update_blkio_group_read_iops(void *key,
1085			struct blkio_group *blkg, unsigned int read_iops)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1086{
1087	struct throtl_data *td = key;
1088	struct throtl_grp *tg = tg_of_blkg(blkg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1089
1090	tg->iops[READ] = read_iops;
1091	throtl_update_blkio_group_common(td, tg);
1092}
1093
1094static void throtl_update_blkio_group_write_iops(void *key,
1095			struct blkio_group *blkg, unsigned int write_iops)
1096{
1097	struct throtl_data *td = key;
1098	struct throtl_grp *tg = tg_of_blkg(blkg);
1099
1100	tg->iops[WRITE] = write_iops;
1101	throtl_update_blkio_group_common(td, tg);
1102}
1103
1104static void throtl_shutdown_wq(struct request_queue *q)
 
1105{
1106	struct throtl_data *td = q->td;
 
 
 
 
1107
1108	cancel_delayed_work_sync(&td->throtl_work);
1109}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110
1111static struct blkio_policy_type blkio_policy_throtl = {
1112	.ops = {
1113		.blkio_unlink_group_fn = throtl_unlink_blkio_group,
1114		.blkio_update_group_read_bps_fn =
1115					throtl_update_blkio_group_read_bps,
1116		.blkio_update_group_write_bps_fn =
1117					throtl_update_blkio_group_write_bps,
1118		.blkio_update_group_read_iops_fn =
1119					throtl_update_blkio_group_read_iops,
1120		.blkio_update_group_write_iops_fn =
1121					throtl_update_blkio_group_write_iops,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1122	},
1123	.plid = BLKIO_POLICY_THROTL,
1124};
1125
1126int blk_throtl_bio(struct request_queue *q, struct bio **biop)
1127{
1128	struct throtl_data *td = q->td;
1129	struct throtl_grp *tg;
1130	struct bio *bio = *biop;
1131	bool rw = bio_data_dir(bio), update_disptime = true;
1132	struct blkio_cgroup *blkcg;
1133
1134	if (bio->bi_rw & REQ_THROTTLED) {
1135		bio->bi_rw &= ~REQ_THROTTLED;
1136		return 0;
1137	}
1138
 
 
 
 
 
 
1139	/*
1140	 * A throtl_grp pointer retrieved under rcu can be used to access
1141	 * basic fields like stats and io rates. If a group has no rules,
1142	 * just update the dispatch stats in lockless manner and return.
1143	 */
 
1144
1145	rcu_read_lock();
1146	blkcg = task_blkio_cgroup(current);
1147	tg = throtl_find_tg(td, blkcg);
1148	if (tg) {
1149		throtl_tg_fill_dev_details(td, tg);
1150
1151		if (tg_no_rule_group(tg, rw)) {
1152			blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
1153					rw, rw_is_sync(bio->bi_rw));
1154			rcu_read_unlock();
1155			return 0;
1156		}
1157	}
1158	rcu_read_unlock();
1159
1160	/*
1161	 * Either group has not been allocated yet or it is not an unlimited
1162	 * IO group
1163	 */
 
1164
1165	spin_lock_irq(q->queue_lock);
1166	tg = throtl_get_tg(td);
1167
1168	if (IS_ERR(tg)) {
1169		if (PTR_ERR(tg)	== -ENODEV) {
1170			/*
1171			 * Queue is gone. No queue lock held here.
1172			 */
1173			return -ENODEV;
1174		}
1175	}
1176
1177	if (tg->nr_queued[rw]) {
1178		/*
1179		 * There is already another bio queued in same dir. No
1180		 * need to update dispatch time.
1181		 */
1182		update_disptime = false;
1183		goto queue_bio;
1184
1185	}
 
 
 
 
 
 
 
 
 
 
 
1186
1187	/* Bio is with-in rate limit of group */
1188	if (tg_may_dispatch(td, tg, bio, NULL)) {
1189		throtl_charge_bio(tg, bio);
1190
 
 
 
 
 
 
 
 
1191		/*
1192		 * We need to trim slice even when bios are not being queued
1193		 * otherwise it might happen that a bio is not queued for
1194		 * a long time and slice keeps on extending and trim is not
1195		 * called for a long time. Now if limits are reduced suddenly
1196		 * we take into account all the IO dispatched so far at new
1197		 * low rate and * newly queued IO gets a really long dispatch
1198		 * time.
1199		 *
1200		 * So keep on trimming slice even if bio is not queued.
1201		 */
1202		throtl_trim_slice(td, tg, rw);
1203		goto out;
1204	}
1205
1206queue_bio:
1207	throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1208			" iodisp=%u iops=%u queued=%d/%d",
1209			rw == READ ? 'R' : 'W',
1210			tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1211			tg->io_disp[rw], tg->iops[rw],
1212			tg->nr_queued[READ], tg->nr_queued[WRITE]);
1213
1214	throtl_add_bio_tg(q->td, tg, bio);
1215	*biop = NULL;
1216
1217	if (update_disptime) {
1218		tg_update_disptime(td, tg);
1219		throtl_schedule_next_dispatch(td);
1220	}
1221
1222out:
1223	spin_unlock_irq(q->queue_lock);
1224	return 0;
1225}
1226
1227int blk_throtl_init(struct request_queue *q)
1228{
1229	struct throtl_data *td;
1230	struct throtl_grp *tg;
 
1231
1232	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1233	if (!td)
1234		return -ENOMEM;
1235
1236	INIT_HLIST_HEAD(&td->tg_list);
1237	td->tg_service_tree = THROTL_RB_ROOT;
1238	td->limits_changed = false;
1239	INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
 
 
1240
1241	/* alloc and Init root group. */
1242	td->queue = q;
1243	tg = throtl_alloc_tg(td);
 
 
 
 
 
 
 
1244
1245	if (!tg) {
1246		kfree(td);
1247		return -ENOMEM;
1248	}
1249
1250	td->root_tg = tg;
 
 
 
1251
1252	rcu_read_lock();
1253	throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
1254	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1255
1256	/* Attach throtl data to request queue */
1257	q->td = td;
1258	return 0;
1259}
 
 
 
 
 
 
 
 
 
1260
1261void blk_throtl_exit(struct request_queue *q)
1262{
1263	struct throtl_data *td = q->td;
1264	bool wait = false;
 
 
 
1265
1266	BUG_ON(!td);
 
 
1267
1268	throtl_shutdown_wq(q);
 
 
 
 
 
 
 
 
 
1269
1270	spin_lock_irq(q->queue_lock);
1271	throtl_release_tgs(td);
1272
1273	/* If there are other groups */
1274	if (td->nr_undestroyed_grps > 0)
1275		wait = true;
1276
1277	spin_unlock_irq(q->queue_lock);
 
 
1278
1279	/*
1280	 * Wait for tg->blkg->key accessors to exit their grace periods.
1281	 * Do this wait only if there are other undestroyed groups out
1282	 * there (other than root group). This can happen if cgroup deletion
1283	 * path claimed the responsibility of cleaning up a group before
1284	 * queue cleanup code get to the group.
1285	 *
1286	 * Do not call synchronize_rcu() unconditionally as there are drivers
1287	 * which create/delete request queue hundreds of times during scan/boot
1288	 * and synchronize_rcu() can take significant time and slow down boot.
1289	 */
1290	if (wait)
1291		synchronize_rcu();
1292
1293	/*
1294	 * Just being safe to make sure after previous flush if some body did
1295	 * update limits through cgroup and another work got queued, cancel
1296	 * it.
1297	 */
1298	throtl_shutdown_wq(q);
1299	throtl_td_free(td);
 
1300}
1301
1302static int __init throtl_init(void)
1303{
1304	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1305	if (!kthrotld_workqueue)
1306		panic("Failed to create kthrotld\n");
1307
1308	blkio_policy_register(&blkio_policy_throtl);
1309	return 0;
1310}
1311
1312module_init(throtl_init);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Interface for controlling IO bandwidth on a request queue
   4 *
   5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/slab.h>
  10#include <linux/blkdev.h>
  11#include <linux/bio.h>
  12#include <linux/blktrace_api.h>
  13#include "blk.h"
  14#include "blk-cgroup-rwstat.h"
  15#include "blk-stat.h"
  16#include "blk-throttle.h"
  17
  18/* Max dispatch from a group in 1 round */
  19#define THROTL_GRP_QUANTUM 8
  20
  21/* Total max dispatch from all groups in one round */
  22#define THROTL_QUANTUM 32
  23
  24/* Throttling is performed over a slice and after that slice is renewed */
  25#define DFL_THROTL_SLICE_HD (HZ / 10)
  26#define DFL_THROTL_SLICE_SSD (HZ / 50)
  27#define MAX_THROTL_SLICE (HZ)
  28
  29/* A workqueue to queue throttle related work */
  30static struct workqueue_struct *kthrotld_workqueue;
 
 
 
 
 
 
 
 
 
 
 
 
  31
  32#define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
  33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34struct throtl_data
  35{
 
 
 
  36	/* service tree for active throtl groups */
  37	struct throtl_service_queue service_queue;
  38
 
  39	struct request_queue *queue;
  40
  41	/* Total Number of queued bios on READ and WRITE lists */
  42	unsigned int nr_queued[2];
  43
  44	unsigned int throtl_slice;
 
 
 
  45
  46	/* Work for dispatching throttled bios */
  47	struct work_struct dispatch_work;
 
 
 
  48
  49	bool track_bio_latency;
 
  50};
  51
  52static void throtl_pending_timer_fn(struct timer_list *t);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53
  54static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
 
 
 
  55{
  56	return pd_to_blkg(&tg->pd);
 
 
 
  57}
  58
  59/**
  60 * sq_to_tg - return the throl_grp the specified service queue belongs to
  61 * @sq: the throtl_service_queue of interest
  62 *
  63 * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
  64 * embedded in throtl_data, %NULL is returned.
  65 */
  66static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
  67{
  68	if (sq && sq->parent_sq)
  69		return container_of(sq, struct throtl_grp, service_queue);
  70	else
  71		return NULL;
  72}
  73
  74/**
  75 * sq_to_td - return throtl_data the specified service queue belongs to
  76 * @sq: the throtl_service_queue of interest
  77 *
  78 * A service_queue can be embedded in either a throtl_grp or throtl_data.
  79 * Determine the associated throtl_data accordingly and return it.
  80 */
  81static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
  82{
  83	struct throtl_grp *tg = sq_to_tg(sq);
  84
  85	if (tg)
  86		return tg->td;
  87	else
  88		return container_of(sq, struct throtl_data, service_queue);
  89}
  90
  91static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
  92{
  93	struct blkcg_gq *blkg = tg_to_blkg(tg);
 
 
  94
  95	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
  96		return U64_MAX;
  97
  98	return tg->bps[rw];
 
 
 
 
 
 
  99}
 100
 101static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
 102{
 103	struct blkcg_gq *blkg = tg_to_blkg(tg);
 
 
 
 
 104
 105	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
 106		return UINT_MAX;
 
 107
 108	return tg->iops[rw];
 
 
 
 
 
 
 109}
 110
 111/**
 112 * throtl_log - log debug message via blktrace
 113 * @sq: the service_queue being reported
 114 * @fmt: printf format string
 115 * @args: printf args
 116 *
 117 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
 118 * throtl_grp; otherwise, just "throtl".
 119 */
 120#define throtl_log(sq, fmt, args...)	do {				\
 121	struct throtl_grp *__tg = sq_to_tg((sq));			\
 122	struct throtl_data *__td = sq_to_td((sq));			\
 123									\
 124	(void)__td;							\
 125	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
 126		break;							\
 127	if ((__tg)) {							\
 128		blk_add_cgroup_trace_msg(__td->queue,			\
 129			&tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
 130	} else {							\
 131		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
 132	}								\
 133} while (0)
 134
 135static inline unsigned int throtl_bio_data_size(struct bio *bio)
 136{
 137	/* assume it's one sector */
 138	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
 139		return 512;
 140	return bio->bi_iter.bi_size;
 141}
 142
 143static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
 144{
 145	INIT_LIST_HEAD(&qn->node);
 146	bio_list_init(&qn->bios);
 147	qn->tg = tg;
 148}
 149
 150/**
 151 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
 152 * @bio: bio being added
 153 * @qn: qnode to add bio to
 154 * @queued: the service_queue->queued[] list @qn belongs to
 155 *
 156 * Add @bio to @qn and put @qn on @queued if it's not already on.
 157 * @qn->tg's reference count is bumped when @qn is activated.  See the
 158 * comment on top of throtl_qnode definition for details.
 159 */
 160static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
 161				 struct list_head *queued)
 162{
 163	bio_list_add(&qn->bios, bio);
 164	if (list_empty(&qn->node)) {
 165		list_add_tail(&qn->node, queued);
 166		blkg_get(tg_to_blkg(qn->tg));
 167	}
 168}
 169
 170/**
 171 * throtl_peek_queued - peek the first bio on a qnode list
 172 * @queued: the qnode list to peek
 173 */
 174static struct bio *throtl_peek_queued(struct list_head *queued)
 175{
 176	struct throtl_qnode *qn;
 177	struct bio *bio;
 178
 179	if (list_empty(queued))
 180		return NULL;
 181
 182	qn = list_first_entry(queued, struct throtl_qnode, node);
 183	bio = bio_list_peek(&qn->bios);
 184	WARN_ON_ONCE(!bio);
 185	return bio;
 
 
 
 
 
 186}
 187
 188/**
 189 * throtl_pop_queued - pop the first bio form a qnode list
 190 * @queued: the qnode list to pop a bio from
 191 * @tg_to_put: optional out argument for throtl_grp to put
 192 *
 193 * Pop the first bio from the qnode list @queued.  After popping, the first
 194 * qnode is removed from @queued if empty or moved to the end of @queued so
 195 * that the popping order is round-robin.
 196 *
 197 * When the first qnode is removed, its associated throtl_grp should be put
 198 * too.  If @tg_to_put is NULL, this function automatically puts it;
 199 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
 200 * responsible for putting it.
 201 */
 202static struct bio *throtl_pop_queued(struct list_head *queued,
 203				     struct throtl_grp **tg_to_put)
 204{
 205	struct throtl_qnode *qn;
 206	struct bio *bio;
 207
 208	if (list_empty(queued))
 209		return NULL;
 
 
 210
 211	qn = list_first_entry(queued, struct throtl_qnode, node);
 212	bio = bio_list_pop(&qn->bios);
 213	WARN_ON_ONCE(!bio);
 
 214
 215	if (bio_list_empty(&qn->bios)) {
 216		list_del_init(&qn->node);
 217		if (tg_to_put)
 218			*tg_to_put = qn->tg;
 219		else
 220			blkg_put(tg_to_blkg(qn->tg));
 221	} else {
 222		list_move_tail(&qn->node, queued);
 223	}
 224
 225	return bio;
 226}
 
 
 227
 228/* init a service_queue, assumes the caller zeroed it */
 229static void throtl_service_queue_init(struct throtl_service_queue *sq)
 230{
 231	INIT_LIST_HEAD(&sq->queued[READ]);
 232	INIT_LIST_HEAD(&sq->queued[WRITE]);
 233	sq->pending_tree = RB_ROOT_CACHED;
 234	timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
 235}
 236
 237static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk,
 238		struct blkcg *blkcg, gfp_t gfp)
 239{
 240	struct throtl_grp *tg;
 241	int rw;
 242
 243	tg = kzalloc_node(sizeof(*tg), gfp, disk->node_id);
 244	if (!tg)
 245		return NULL;
 246
 247	if (blkg_rwstat_init(&tg->stat_bytes, gfp))
 248		goto err_free_tg;
 249
 250	if (blkg_rwstat_init(&tg->stat_ios, gfp))
 251		goto err_exit_stat_bytes;
 252
 253	throtl_service_queue_init(&tg->service_queue);
 254
 255	for (rw = READ; rw <= WRITE; rw++) {
 256		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
 257		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
 258	}
 259
 260	RB_CLEAR_NODE(&tg->rb_node);
 261	tg->bps[READ] = U64_MAX;
 262	tg->bps[WRITE] = U64_MAX;
 263	tg->iops[READ] = UINT_MAX;
 264	tg->iops[WRITE] = UINT_MAX;
 265
 266	return &tg->pd;
 267
 268err_exit_stat_bytes:
 269	blkg_rwstat_exit(&tg->stat_bytes);
 270err_free_tg:
 271	kfree(tg);
 272	return NULL;
 273}
 274
 275static void throtl_pd_init(struct blkg_policy_data *pd)
 
 276{
 277	struct throtl_grp *tg = pd_to_tg(pd);
 278	struct blkcg_gq *blkg = tg_to_blkg(tg);
 279	struct throtl_data *td = blkg->q->td;
 280	struct throtl_service_queue *sq = &tg->service_queue;
 281
 282	/*
 283	 * If on the default hierarchy, we switch to properly hierarchical
 284	 * behavior where limits on a given throtl_grp are applied to the
 285	 * whole subtree rather than just the group itself.  e.g. If 16M
 286	 * read_bps limit is set on a parent group, summary bps of
 287	 * parent group and its subtree groups can't exceed 16M for the
 288	 * device.
 289	 *
 290	 * If not on the default hierarchy, the broken flat hierarchy
 291	 * behavior is retained where all throtl_grps are treated as if
 292	 * they're all separate root groups right below throtl_data.
 293	 * Limits of a group don't interact with limits of other groups
 294	 * regardless of the position of the group in the hierarchy.
 295	 */
 296	sq->parent_sq = &td->service_queue;
 297	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
 298		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
 299	tg->td = td;
 300}
 301
 302/*
 303 * Set has_rules[] if @tg or any of its parents have limits configured.
 304 * This doesn't require walking up to the top of the hierarchy as the
 305 * parent's has_rules[] is guaranteed to be correct.
 306 */
 307static void tg_update_has_rules(struct throtl_grp *tg)
 308{
 309	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
 310	int rw;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 311
 312	for (rw = READ; rw <= WRITE; rw++) {
 313		tg->has_rules_iops[rw] =
 314			(parent_tg && parent_tg->has_rules_iops[rw]) ||
 315			tg_iops_limit(tg, rw) != UINT_MAX;
 316		tg->has_rules_bps[rw] =
 317			(parent_tg && parent_tg->has_rules_bps[rw]) ||
 318			tg_bps_limit(tg, rw) != U64_MAX;
 319	}
 320}
 
 
 
 
 
 
 
 
 
 321
 322static void throtl_pd_online(struct blkg_policy_data *pd)
 323{
 324	struct throtl_grp *tg = pd_to_tg(pd);
 325	/*
 326	 * We don't want new groups to escape the limits of its ancestors.
 327	 * Update has_rules[] after a new group is brought online.
 328	 */
 329	tg_update_has_rules(tg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330}
 331
 332static void throtl_pd_free(struct blkg_policy_data *pd)
 333{
 334	struct throtl_grp *tg = pd_to_tg(pd);
 
 
 
 
 
 335
 336	del_timer_sync(&tg->service_queue.pending_timer);
 337	blkg_rwstat_exit(&tg->stat_bytes);
 338	blkg_rwstat_exit(&tg->stat_ios);
 339	kfree(tg);
 340}
 341
 342static struct throtl_grp *
 343throtl_rb_first(struct throtl_service_queue *parent_sq)
 344{
 345	struct rb_node *n;
 346
 347	n = rb_first_cached(&parent_sq->pending_tree);
 348	WARN_ON_ONCE(!n);
 349	if (!n)
 350		return NULL;
 351	return rb_entry_tg(n);
 352}
 353
 354static void throtl_rb_erase(struct rb_node *n,
 355			    struct throtl_service_queue *parent_sq)
 356{
 357	rb_erase_cached(n, &parent_sq->pending_tree);
 358	RB_CLEAR_NODE(n);
 
 
 359}
 360
 361static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
 362{
 363	struct throtl_grp *tg;
 364
 365	tg = throtl_rb_first(parent_sq);
 366	if (!tg)
 367		return;
 368
 369	parent_sq->first_pending_disptime = tg->disptime;
 370}
 371
 372static void tg_service_queue_add(struct throtl_grp *tg)
 
 373{
 374	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
 375	struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
 376	struct rb_node *parent = NULL;
 377	struct throtl_grp *__tg;
 378	unsigned long key = tg->disptime;
 379	bool leftmost = true;
 380
 381	while (*node != NULL) {
 382		parent = *node;
 383		__tg = rb_entry_tg(parent);
 384
 385		if (time_before(key, __tg->disptime))
 386			node = &parent->rb_left;
 387		else {
 388			node = &parent->rb_right;
 389			leftmost = false;
 390		}
 391	}
 392
 
 
 
 393	rb_link_node(&tg->rb_node, parent, node);
 394	rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
 395			       leftmost);
 396}
 397
 398static void throtl_enqueue_tg(struct throtl_grp *tg)
 399{
 400	if (!(tg->flags & THROTL_TG_PENDING)) {
 401		tg_service_queue_add(tg);
 402		tg->flags |= THROTL_TG_PENDING;
 403		tg->service_queue.parent_sq->nr_pending++;
 404	}
 405}
 406
 407static void throtl_dequeue_tg(struct throtl_grp *tg)
 408{
 409	if (tg->flags & THROTL_TG_PENDING) {
 410		struct throtl_service_queue *parent_sq =
 411			tg->service_queue.parent_sq;
 412
 413		throtl_rb_erase(&tg->rb_node, parent_sq);
 414		--parent_sq->nr_pending;
 415		tg->flags &= ~THROTL_TG_PENDING;
 416	}
 417}
 418
 419/* Call with queue lock held */
 420static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
 421					  unsigned long expires)
 422{
 423	unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
 424
 425	/*
 426	 * Since we are adjusting the throttle limit dynamically, the sleep
 427	 * time calculated according to previous limit might be invalid. It's
 428	 * possible the cgroup sleep time is very long and no other cgroups
 429	 * have IO running so notify the limit changes. Make sure the cgroup
 430	 * doesn't sleep too long to avoid the missed notification.
 431	 */
 432	if (time_after(expires, max_expire))
 433		expires = max_expire;
 434	mod_timer(&sq->pending_timer, expires);
 435	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
 436		   expires - jiffies, jiffies);
 437}
 438
 439/**
 440 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
 441 * @sq: the service_queue to schedule dispatch for
 442 * @force: force scheduling
 443 *
 444 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
 445 * dispatch time of the first pending child.  Returns %true if either timer
 446 * is armed or there's no pending child left.  %false if the current
 447 * dispatch window is still open and the caller should continue
 448 * dispatching.
 449 *
 450 * If @force is %true, the dispatch timer is always scheduled and this
 451 * function is guaranteed to return %true.  This is to be used when the
 452 * caller can't dispatch itself and needs to invoke pending_timer
 453 * unconditionally.  Note that forced scheduling is likely to induce short
 454 * delay before dispatch starts even if @sq->first_pending_disptime is not
 455 * in the future and thus shouldn't be used in hot paths.
 456 */
 457static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
 458					  bool force)
 459{
 460	/* any pending children left? */
 461	if (!sq->nr_pending)
 462		return true;
 463
 464	update_min_dispatch_time(sq);
 465
 466	/* is the next dispatch time in the future? */
 467	if (force || time_after(sq->first_pending_disptime, jiffies)) {
 468		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
 469		return true;
 470	}
 471
 472	/* tell the caller to continue dispatching */
 473	return false;
 474}
 475
 476static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
 477		bool rw, unsigned long start)
 478{
 479	tg->bytes_disp[rw] = 0;
 480	tg->io_disp[rw] = 0;
 481	tg->carryover_bytes[rw] = 0;
 482	tg->carryover_ios[rw] = 0;
 483
 484	/*
 485	 * Previous slice has expired. We must have trimmed it after last
 486	 * bio dispatch. That means since start of last slice, we never used
 487	 * that bandwidth. Do try to make use of that bandwidth while giving
 488	 * credit.
 489	 */
 490	if (time_after(start, tg->slice_start[rw]))
 491		tg->slice_start[rw] = start;
 
 
 
 
 492
 493	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
 494	throtl_log(&tg->service_queue,
 495		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
 496		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 497		   tg->slice_end[rw], jiffies);
 498}
 499
 500static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
 501					  bool clear_carryover)
 502{
 503	tg->bytes_disp[rw] = 0;
 504	tg->io_disp[rw] = 0;
 505	tg->slice_start[rw] = jiffies;
 506	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
 507	if (clear_carryover) {
 508		tg->carryover_bytes[rw] = 0;
 509		tg->carryover_ios[rw] = 0;
 510	}
 511
 512	throtl_log(&tg->service_queue,
 513		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
 514		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 515		   tg->slice_end[rw], jiffies);
 516}
 517
 518static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
 519					unsigned long jiffy_end)
 520{
 521	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
 522}
 523
 524static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
 525				       unsigned long jiffy_end)
 526{
 527	throtl_set_slice_end(tg, rw, jiffy_end);
 528	throtl_log(&tg->service_queue,
 529		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
 530		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 531		   tg->slice_end[rw], jiffies);
 532}
 533
 534/* Determine if previously allocated or extended slice is complete or not */
 535static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
 
 536{
 537	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
 538		return false;
 539
 540	return true;
 541}
 542
 543static unsigned int calculate_io_allowed(u32 iops_limit,
 544					 unsigned long jiffy_elapsed)
 545{
 546	unsigned int io_allowed;
 547	u64 tmp;
 548
 549	/*
 550	 * jiffy_elapsed should not be a big value as minimum iops can be
 551	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
 552	 * will allow dispatch after 1 second and after that slice should
 553	 * have been trimmed.
 554	 */
 555
 556	tmp = (u64)iops_limit * jiffy_elapsed;
 557	do_div(tmp, HZ);
 558
 559	if (tmp > UINT_MAX)
 560		io_allowed = UINT_MAX;
 561	else
 562		io_allowed = tmp;
 563
 564	return io_allowed;
 565}
 566
 567static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
 568{
 569	/*
 570	 * Can result be wider than 64 bits?
 571	 * We check against 62, not 64, due to ilog2 truncation.
 572	 */
 573	if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62)
 574		return U64_MAX;
 575	return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
 576}
 577
 578/* Trim the used slices and adjust slice start accordingly */
 579static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
 
 580{
 581	unsigned long time_elapsed;
 582	long long bytes_trim;
 583	int io_trim;
 584
 585	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
 586
 587	/*
 588	 * If bps are unlimited (-1), then time slice don't get
 589	 * renewed. Don't try to trim the slice if slice is used. A new
 590	 * slice will start when appropriate.
 591	 */
 592	if (throtl_slice_used(tg, rw))
 593		return;
 594
 595	/*
 596	 * A bio has been dispatched. Also adjust slice_end. It might happen
 597	 * that initially cgroup limit was very low resulting in high
 598	 * slice_end, but later limit was bumped up and bio was dispatched
 599	 * sooner, then we need to reduce slice_end. A high bogus slice_end
 600	 * is bad because it does not allow new slice to start.
 601	 */
 602
 603	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
 
 
 
 
 604
 605	time_elapsed = rounddown(jiffies - tg->slice_start[rw],
 606				 tg->td->throtl_slice);
 607	if (!time_elapsed)
 608		return;
 
 
 
 
 
 609
 610	bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
 611					     time_elapsed) +
 612		     tg->carryover_bytes[rw];
 613	io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
 614		  tg->carryover_ios[rw];
 615	if (bytes_trim <= 0 && io_trim <= 0)
 616		return;
 617
 618	tg->carryover_bytes[rw] = 0;
 619	if ((long long)tg->bytes_disp[rw] >= bytes_trim)
 620		tg->bytes_disp[rw] -= bytes_trim;
 621	else
 622		tg->bytes_disp[rw] = 0;
 623
 624	tg->carryover_ios[rw] = 0;
 625	if ((int)tg->io_disp[rw] >= io_trim)
 626		tg->io_disp[rw] -= io_trim;
 627	else
 628		tg->io_disp[rw] = 0;
 629
 630	tg->slice_start[rw] += time_elapsed;
 631
 632	throtl_log(&tg->service_queue,
 633		   "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
 634		   rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
 635		   bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
 636		   jiffies);
 637}
 638
 639static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
 
 640{
 641	unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
 642	u64 bps_limit = tg_bps_limit(tg, rw);
 643	u32 iops_limit = tg_iops_limit(tg, rw);
 
 
 
 
 
 
 
 
 
 644
 645	/*
 646	 * If config is updated while bios are still throttled, calculate and
 647	 * accumulate how many bytes/ios are waited across changes. And
 648	 * carryover_bytes/ios will be used to calculate new wait time under new
 649	 * configuration.
 650	 */
 651	if (bps_limit != U64_MAX)
 652		tg->carryover_bytes[rw] +=
 653			calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
 654			tg->bytes_disp[rw];
 655	if (iops_limit != UINT_MAX)
 656		tg->carryover_ios[rw] +=
 657			calculate_io_allowed(iops_limit, jiffy_elapsed) -
 658			tg->io_disp[rw];
 659}
 660
 661static void tg_update_carryover(struct throtl_grp *tg)
 662{
 663	if (tg->service_queue.nr_queued[READ])
 664		__tg_update_carryover(tg, READ);
 665	if (tg->service_queue.nr_queued[WRITE])
 666		__tg_update_carryover(tg, WRITE);
 667
 668	/* see comments in struct throtl_grp for meaning of these fields. */
 669	throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
 670		   tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
 671		   tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
 672}
 673
 674static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
 675				 u32 iops_limit)
 676{
 677	bool rw = bio_data_dir(bio);
 678	int io_allowed;
 679	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 680
 681	if (iops_limit == UINT_MAX) {
 682		return 0;
 683	}
 684
 685	jiffy_elapsed = jiffies - tg->slice_start[rw];
 
 686
 687	/* Round up to the next throttle slice, wait time must be nonzero */
 688	jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
 689	io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
 690		     tg->carryover_ios[rw];
 691	if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
 692		return 0;
 693
 694	/* Calc approx time to dispatch */
 695	jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
 696
 697	/* make sure at least one io can be dispatched after waiting */
 698	jiffy_wait = max(jiffy_wait, HZ / iops_limit + 1);
 699	return jiffy_wait;
 700}
 701
 702static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
 703				u64 bps_limit)
 704{
 705	bool rw = bio_data_dir(bio);
 706	long long bytes_allowed;
 707	u64 extra_bytes;
 708	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 709	unsigned int bio_size = throtl_bio_data_size(bio);
 710
 711	/* no need to throttle if this bio's bytes have been accounted */
 712	if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
 713		return 0;
 714	}
 715
 716	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 717
 718	/* Slice has just started. Consider one slice interval */
 719	if (!jiffy_elapsed)
 720		jiffy_elapsed_rnd = tg->td->throtl_slice;
 
 
 721
 722	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
 723	bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
 724			tg->carryover_bytes[rw];
 725	if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
 726		return 0;
 
 
 
 
 727
 728	/* Calc approx time to dispatch */
 729	extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
 730	jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
 731
 732	if (!jiffy_wait)
 733		jiffy_wait = 1;
 734
 735	/*
 736	 * This wait time is without taking into consideration the rounding
 737	 * up we did. Add that time also.
 738	 */
 739	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
 740	return jiffy_wait;
 
 
 
 
 
 
 
 
 741}
 742
 743/*
 744 * Returns whether one can dispatch a bio or not. Also returns approx number
 745 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
 746 */
 747static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 748			    unsigned long *wait)
 749{
 750	bool rw = bio_data_dir(bio);
 751	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
 752	u64 bps_limit = tg_bps_limit(tg, rw);
 753	u32 iops_limit = tg_iops_limit(tg, rw);
 754
 755	/*
 756 	 * Currently whole state machine of group depends on first bio
 757	 * queued in the group bio list. So one should not be calling
 758	 * this function with a different bio if there are other bios
 759	 * queued.
 760	 */
 761	BUG_ON(tg->service_queue.nr_queued[rw] &&
 762	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
 763
 764	/* If tg->bps = -1, then BW is unlimited */
 765	if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
 766	    tg->flags & THROTL_TG_CANCELING) {
 767		if (wait)
 768			*wait = 0;
 769		return true;
 770	}
 771
 772	/*
 773	 * If previous slice expired, start a new one otherwise renew/extend
 774	 * existing slice to make sure it is at least throtl_slice interval
 775	 * long since now. New slice is started only for empty throttle group.
 776	 * If there is queued bio, that means there should be an active
 777	 * slice and it should be extended instead.
 778	 */
 779	if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
 780		throtl_start_new_slice(tg, rw, true);
 781	else {
 782		if (time_before(tg->slice_end[rw],
 783		    jiffies + tg->td->throtl_slice))
 784			throtl_extend_slice(tg, rw,
 785				jiffies + tg->td->throtl_slice);
 786	}
 787
 788	bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
 789	iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
 790	if (bps_wait + iops_wait == 0) {
 791		if (wait)
 792			*wait = 0;
 793		return true;
 794	}
 795
 796	max_wait = max(bps_wait, iops_wait);
 797
 798	if (wait)
 799		*wait = max_wait;
 800
 801	if (time_before(tg->slice_end[rw], jiffies + max_wait))
 802		throtl_extend_slice(tg, rw, jiffies + max_wait);
 803
 804	return false;
 805}
 806
 807static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 808{
 809	bool rw = bio_data_dir(bio);
 810	unsigned int bio_size = throtl_bio_data_size(bio);
 811
 812	/* Charge the bio to the group */
 813	if (!bio_flagged(bio, BIO_BPS_THROTTLED)) {
 814		tg->bytes_disp[rw] += bio_size;
 815		tg->last_bytes_disp[rw] += bio_size;
 816	}
 817
 818	tg->io_disp[rw]++;
 819	tg->last_io_disp[rw]++;
 820}
 821
 822/**
 823 * throtl_add_bio_tg - add a bio to the specified throtl_grp
 824 * @bio: bio to add
 825 * @qn: qnode to use
 826 * @tg: the target throtl_grp
 827 *
 828 * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
 829 * tg->qnode_on_self[] is used.
 830 */
 831static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
 832			      struct throtl_grp *tg)
 833{
 834	struct throtl_service_queue *sq = &tg->service_queue;
 835	bool rw = bio_data_dir(bio);
 836
 837	if (!qn)
 838		qn = &tg->qnode_on_self[rw];
 839
 840	/*
 841	 * If @tg doesn't currently have any bios queued in the same
 842	 * direction, queueing @bio can change when @tg should be
 843	 * dispatched.  Mark that @tg was empty.  This is automatically
 844	 * cleared on the next tg_update_disptime().
 845	 */
 846	if (!sq->nr_queued[rw])
 847		tg->flags |= THROTL_TG_WAS_EMPTY;
 848
 849	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
 850
 851	sq->nr_queued[rw]++;
 852	throtl_enqueue_tg(tg);
 853}
 854
 855static void tg_update_disptime(struct throtl_grp *tg)
 856{
 857	struct throtl_service_queue *sq = &tg->service_queue;
 858	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
 859	struct bio *bio;
 860
 861	bio = throtl_peek_queued(&sq->queued[READ]);
 862	if (bio)
 863		tg_may_dispatch(tg, bio, &read_wait);
 864
 865	bio = throtl_peek_queued(&sq->queued[WRITE]);
 866	if (bio)
 867		tg_may_dispatch(tg, bio, &write_wait);
 868
 869	min_wait = min(read_wait, write_wait);
 870	disptime = jiffies + min_wait;
 871
 872	/* Update dispatch time */
 873	throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
 874	tg->disptime = disptime;
 875	tg_service_queue_add(tg);
 876
 877	/* see throtl_add_bio_tg() */
 878	tg->flags &= ~THROTL_TG_WAS_EMPTY;
 879}
 880
 881static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
 882					struct throtl_grp *parent_tg, bool rw)
 883{
 884	if (throtl_slice_used(parent_tg, rw)) {
 885		throtl_start_new_slice_with_credit(parent_tg, rw,
 886				child_tg->slice_start[rw]);
 887	}
 888
 889}
 
 
 
 890
 891static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
 892{
 893	struct throtl_service_queue *sq = &tg->service_queue;
 894	struct throtl_service_queue *parent_sq = sq->parent_sq;
 895	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
 896	struct throtl_grp *tg_to_put = NULL;
 897	struct bio *bio;
 898
 899	/*
 900	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
 901	 * from @tg may put its reference and @parent_sq might end up
 902	 * getting released prematurely.  Remember the tg to put and put it
 903	 * after @bio is transferred to @parent_sq.
 904	 */
 905	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
 906	sq->nr_queued[rw]--;
 907
 908	throtl_charge_bio(tg, bio);
 
 
 909
 910	/*
 911	 * If our parent is another tg, we just need to transfer @bio to
 912	 * the parent using throtl_add_bio_tg().  If our parent is
 913	 * @td->service_queue, @bio is ready to be issued.  Put it on its
 914	 * bio_lists[] and decrease total number queued.  The caller is
 915	 * responsible for issuing these bios.
 916	 */
 917	if (parent_tg) {
 918		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
 919		start_parent_slice_with_credit(tg, parent_tg, rw);
 920	} else {
 921		bio_set_flag(bio, BIO_BPS_THROTTLED);
 922		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
 923				     &parent_sq->queued[rw]);
 924		BUG_ON(tg->td->nr_queued[rw] <= 0);
 925		tg->td->nr_queued[rw]--;
 926	}
 927
 928	throtl_trim_slice(tg, rw);
 929
 930	if (tg_to_put)
 931		blkg_put(tg_to_blkg(tg_to_put));
 932}
 933
 934static int throtl_dispatch_tg(struct throtl_grp *tg)
 
 935{
 936	struct throtl_service_queue *sq = &tg->service_queue;
 937	unsigned int nr_reads = 0, nr_writes = 0;
 938	unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
 939	unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
 940	struct bio *bio;
 941
 942	/* Try to dispatch 75% READS and 25% WRITES */
 943
 944	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
 945	       tg_may_dispatch(tg, bio, NULL)) {
 946
 947		tg_dispatch_one_bio(tg, READ);
 948		nr_reads++;
 949
 950		if (nr_reads >= max_nr_reads)
 951			break;
 952	}
 953
 954	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
 955	       tg_may_dispatch(tg, bio, NULL)) {
 956
 957		tg_dispatch_one_bio(tg, WRITE);
 958		nr_writes++;
 959
 960		if (nr_writes >= max_nr_writes)
 961			break;
 962	}
 963
 964	return nr_reads + nr_writes;
 965}
 966
 967static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
 968{
 969	unsigned int nr_disp = 0;
 
 
 970
 971	while (1) {
 972		struct throtl_grp *tg;
 973		struct throtl_service_queue *sq;
 974
 975		if (!parent_sq->nr_pending)
 976			break;
 977
 978		tg = throtl_rb_first(parent_sq);
 979		if (!tg)
 980			break;
 981
 982		if (time_before(jiffies, tg->disptime))
 983			break;
 984
 985		nr_disp += throtl_dispatch_tg(tg);
 986
 987		sq = &tg->service_queue;
 988		if (sq->nr_queued[READ] || sq->nr_queued[WRITE])
 989			tg_update_disptime(tg);
 990		else
 991			throtl_dequeue_tg(tg);
 992
 993		if (nr_disp >= THROTL_QUANTUM)
 
 
 
 
 
 994			break;
 995	}
 996
 997	return nr_disp;
 998}
 999
1000/**
1001 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1002 * @t: the pending_timer member of the throtl_service_queue being serviced
1003 *
1004 * This timer is armed when a child throtl_grp with active bio's become
1005 * pending and queued on the service_queue's pending_tree and expires when
1006 * the first child throtl_grp should be dispatched.  This function
1007 * dispatches bio's from the children throtl_grps to the parent
1008 * service_queue.
1009 *
1010 * If the parent's parent is another throtl_grp, dispatching is propagated
1011 * by either arming its pending_timer or repeating dispatch directly.  If
1012 * the top-level service_tree is reached, throtl_data->dispatch_work is
1013 * kicked so that the ready bio's are issued.
1014 */
1015static void throtl_pending_timer_fn(struct timer_list *t)
1016{
1017	struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1018	struct throtl_grp *tg = sq_to_tg(sq);
1019	struct throtl_data *td = sq_to_td(sq);
1020	struct throtl_service_queue *parent_sq;
1021	struct request_queue *q;
1022	bool dispatched;
1023	int ret;
1024
1025	/* throtl_data may be gone, so figure out request queue by blkg */
1026	if (tg)
1027		q = tg->pd.blkg->q;
1028	else
1029		q = td->queue;
1030
1031	spin_lock_irq(&q->queue_lock);
 
 
1032
1033	if (!q->root_blkg)
1034		goto out_unlock;
1035
1036again:
1037	parent_sq = sq->parent_sq;
1038	dispatched = false;
1039
1040	while (true) {
1041		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1042			   sq->nr_queued[READ] + sq->nr_queued[WRITE],
1043			   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1044
1045		ret = throtl_select_dispatch(sq);
1046		if (ret) {
1047			throtl_log(sq, "bios disp=%u", ret);
1048			dispatched = true;
1049		}
1050
1051		if (throtl_schedule_next_dispatch(sq, false))
1052			break;
 
 
 
 
 
 
1053
1054		/* this dispatch windows is still open, relax and repeat */
1055		spin_unlock_irq(&q->queue_lock);
1056		cpu_relax();
1057		spin_lock_irq(&q->queue_lock);
1058	}
1059
1060	if (!dispatched)
1061		goto out_unlock;
1062
1063	if (parent_sq) {
1064		/* @parent_sq is another throl_grp, propagate dispatch */
1065		if (tg->flags & THROTL_TG_WAS_EMPTY) {
1066			tg_update_disptime(tg);
1067			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1068				/* window is already open, repeat dispatching */
1069				sq = parent_sq;
1070				tg = sq_to_tg(sq);
1071				goto again;
1072			}
1073		}
1074	} else {
1075		/* reached the top-level, queue issuing */
1076		queue_work(kthrotld_workqueue, &td->dispatch_work);
1077	}
1078out_unlock:
1079	spin_unlock_irq(&q->queue_lock);
1080}
1081
1082/**
1083 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1084 * @work: work item being executed
1085 *
1086 * This function is queued for execution when bios reach the bio_lists[]
1087 * of throtl_data->service_queue.  Those bios are ready and issued by this
1088 * function.
1089 */
1090static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1091{
1092	struct throtl_data *td = container_of(work, struct throtl_data,
1093					      dispatch_work);
1094	struct throtl_service_queue *td_sq = &td->service_queue;
1095	struct request_queue *q = td->queue;
1096	struct bio_list bio_list_on_stack;
1097	struct bio *bio;
1098	struct blk_plug plug;
1099	int rw;
 
 
 
 
 
 
1100
1101	bio_list_init(&bio_list_on_stack);
1102
1103	spin_lock_irq(&q->queue_lock);
1104	for (rw = READ; rw <= WRITE; rw++)
1105		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1106			bio_list_add(&bio_list_on_stack, bio);
1107	spin_unlock_irq(&q->queue_lock);
1108
1109	if (!bio_list_empty(&bio_list_on_stack)) {
 
 
 
 
 
 
 
 
 
 
 
1110		blk_start_plug(&plug);
1111		while ((bio = bio_list_pop(&bio_list_on_stack)))
1112			submit_bio_noacct_nocheck(bio);
1113		blk_finish_plug(&plug);
1114	}
 
1115}
1116
1117static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1118			      int off)
1119{
1120	struct throtl_grp *tg = pd_to_tg(pd);
1121	u64 v = *(u64 *)((void *)tg + off);
1122
1123	if (v == U64_MAX)
1124		return 0;
1125	return __blkg_prfill_u64(sf, pd, v);
1126}
1127
1128static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1129			       int off)
1130{
1131	struct throtl_grp *tg = pd_to_tg(pd);
1132	unsigned int v = *(unsigned int *)((void *)tg + off);
1133
1134	if (v == UINT_MAX)
1135		return 0;
1136	return __blkg_prfill_u64(sf, pd, v);
1137}
1138
1139static int tg_print_conf_u64(struct seq_file *sf, void *v)
 
 
1140{
1141	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1142			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1143	return 0;
1144}
1145
1146static int tg_print_conf_uint(struct seq_file *sf, void *v)
1147{
1148	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1149			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1150	return 0;
1151}
1152
1153static void tg_conf_updated(struct throtl_grp *tg, bool global)
1154{
1155	struct throtl_service_queue *sq = &tg->service_queue;
1156	struct cgroup_subsys_state *pos_css;
1157	struct blkcg_gq *blkg;
1158
1159	throtl_log(&tg->service_queue,
1160		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1161		   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1162		   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1163
1164	rcu_read_lock();
1165	/*
1166	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
1167	 * considered to have rules if either the tg itself or any of its
1168	 * ancestors has rules.  This identifies groups without any
1169	 * restrictions in the whole hierarchy and allows them to bypass
1170	 * blk-throttle.
1171	 */
1172	blkg_for_each_descendant_pre(blkg, pos_css,
1173			global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1174		struct throtl_grp *this_tg = blkg_to_tg(blkg);
1175
1176		tg_update_has_rules(this_tg);
1177		/* ignore root/second level */
1178		if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1179		    !blkg->parent->parent)
1180			continue;
1181	}
1182	rcu_read_unlock();
1183
1184	/*
1185	 * We're already holding queue_lock and know @tg is valid.  Let's
1186	 * apply the new config directly.
1187	 *
1188	 * Restart the slices for both READ and WRITES. It might happen
1189	 * that a group's limit are dropped suddenly and we don't want to
1190	 * account recently dispatched IO with new low rate.
1191	 */
1192	throtl_start_new_slice(tg, READ, false);
1193	throtl_start_new_slice(tg, WRITE, false);
1194
1195	if (tg->flags & THROTL_TG_PENDING) {
1196		tg_update_disptime(tg);
1197		throtl_schedule_next_dispatch(sq->parent_sq, true);
1198	}
1199}
1200
1201static int blk_throtl_init(struct gendisk *disk)
 
1202{
1203	struct request_queue *q = disk->queue;
1204	struct throtl_data *td;
1205	int ret;
1206
1207	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1208	if (!td)
1209		return -ENOMEM;
1210
1211	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1212	throtl_service_queue_init(&td->service_queue);
1213
1214	/*
1215	 * Freeze queue before activating policy, to synchronize with IO path,
1216	 * which is protected by 'q_usage_counter'.
1217	 */
1218	blk_mq_freeze_queue(disk->queue);
1219	blk_mq_quiesce_queue(disk->queue);
1220
1221	q->td = td;
1222	td->queue = q;
1223
1224	/* activate policy */
1225	ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
1226	if (ret) {
1227		q->td = NULL;
1228		kfree(td);
1229		goto out;
1230	}
1231
1232	if (blk_queue_nonrot(q))
1233		td->throtl_slice = DFL_THROTL_SLICE_SSD;
1234	else
1235		td->throtl_slice = DFL_THROTL_SLICE_HD;
1236	td->track_bio_latency = !queue_is_mq(q);
1237	if (!td->track_bio_latency)
1238		blk_stat_enable_accounting(q);
1239
1240out:
1241	blk_mq_unquiesce_queue(disk->queue);
1242	blk_mq_unfreeze_queue(disk->queue);
1243
1244	return ret;
1245}
1246
1247
1248static ssize_t tg_set_conf(struct kernfs_open_file *of,
1249			   char *buf, size_t nbytes, loff_t off, bool is_u64)
1250{
1251	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1252	struct blkg_conf_ctx ctx;
1253	struct throtl_grp *tg;
1254	int ret;
1255	u64 v;
1256
1257	blkg_conf_init(&ctx, buf);
1258
1259	ret = blkg_conf_open_bdev(&ctx);
1260	if (ret)
1261		goto out_finish;
1262
1263	if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1264		ret = blk_throtl_init(ctx.bdev->bd_disk);
1265		if (ret)
1266			goto out_finish;
1267	}
1268
1269	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
1270	if (ret)
1271		goto out_finish;
1272
1273	ret = -EINVAL;
1274	if (sscanf(ctx.body, "%llu", &v) != 1)
1275		goto out_finish;
1276	if (!v)
1277		v = U64_MAX;
1278
1279	tg = blkg_to_tg(ctx.blkg);
1280	tg_update_carryover(tg);
1281
1282	if (is_u64)
1283		*(u64 *)((void *)tg + of_cft(of)->private) = v;
1284	else
1285		*(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1286
1287	tg_conf_updated(tg, false);
1288	ret = 0;
1289out_finish:
1290	blkg_conf_exit(&ctx);
1291	return ret ?: nbytes;
1292}
1293
1294static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1295			       char *buf, size_t nbytes, loff_t off)
1296{
1297	return tg_set_conf(of, buf, nbytes, off, true);
1298}
1299
1300static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1301				char *buf, size_t nbytes, loff_t off)
 
 
 
 
 
 
 
 
 
 
 
 
 
1302{
1303	return tg_set_conf(of, buf, nbytes, off, false);
 
 
 
 
 
1304}
1305
1306static int tg_print_rwstat(struct seq_file *sf, void *v)
 
1307{
1308	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1309			  blkg_prfill_rwstat, &blkcg_policy_throtl,
1310			  seq_cft(sf)->private, true);
1311	return 0;
1312}
1313
1314static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1315				      struct blkg_policy_data *pd, int off)
 
 
 
 
 
 
 
 
 
1316{
1317	struct blkg_rwstat_sample sum;
 
1318
1319	blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1320				  &sum);
1321	return __blkg_prfill_rwstat(sf, pd, &sum);
1322}
1323
1324static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
 
1325{
1326	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1327			  tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1328			  seq_cft(sf)->private, true);
1329	return 0;
 
1330}
1331
1332static struct cftype throtl_legacy_files[] = {
1333	{
1334		.name = "throttle.read_bps_device",
1335		.private = offsetof(struct throtl_grp, bps[READ]),
1336		.seq_show = tg_print_conf_u64,
1337		.write = tg_set_conf_u64,
1338	},
1339	{
1340		.name = "throttle.write_bps_device",
1341		.private = offsetof(struct throtl_grp, bps[WRITE]),
1342		.seq_show = tg_print_conf_u64,
1343		.write = tg_set_conf_u64,
1344	},
1345	{
1346		.name = "throttle.read_iops_device",
1347		.private = offsetof(struct throtl_grp, iops[READ]),
1348		.seq_show = tg_print_conf_uint,
1349		.write = tg_set_conf_uint,
1350	},
1351	{
1352		.name = "throttle.write_iops_device",
1353		.private = offsetof(struct throtl_grp, iops[WRITE]),
1354		.seq_show = tg_print_conf_uint,
1355		.write = tg_set_conf_uint,
1356	},
1357	{
1358		.name = "throttle.io_service_bytes",
1359		.private = offsetof(struct throtl_grp, stat_bytes),
1360		.seq_show = tg_print_rwstat,
1361	},
1362	{
1363		.name = "throttle.io_service_bytes_recursive",
1364		.private = offsetof(struct throtl_grp, stat_bytes),
1365		.seq_show = tg_print_rwstat_recursive,
1366	},
1367	{
1368		.name = "throttle.io_serviced",
1369		.private = offsetof(struct throtl_grp, stat_ios),
1370		.seq_show = tg_print_rwstat,
1371	},
1372	{
1373		.name = "throttle.io_serviced_recursive",
1374		.private = offsetof(struct throtl_grp, stat_ios),
1375		.seq_show = tg_print_rwstat_recursive,
1376	},
1377	{ }	/* terminate */
1378};
1379
1380static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1381			 int off)
1382{
1383	struct throtl_grp *tg = pd_to_tg(pd);
1384	const char *dname = blkg_dev_name(pd->blkg);
1385	u64 bps_dft;
1386	unsigned int iops_dft;
1387
1388	if (!dname)
1389		return 0;
1390
1391	bps_dft = U64_MAX;
1392	iops_dft = UINT_MAX;
1393
1394	if (tg->bps[READ] == bps_dft &&
1395	    tg->bps[WRITE] == bps_dft &&
1396	    tg->iops[READ] == iops_dft &&
1397	    tg->iops[WRITE] == iops_dft)
1398		return 0;
1399
1400	seq_printf(sf, "%s", dname);
1401	if (tg->bps[READ] == U64_MAX)
1402		seq_printf(sf, " rbps=max");
1403	else
1404		seq_printf(sf, " rbps=%llu", tg->bps[READ]);
1405
1406	if (tg->bps[WRITE] == U64_MAX)
1407		seq_printf(sf, " wbps=max");
1408	else
1409		seq_printf(sf, " wbps=%llu", tg->bps[WRITE]);
1410
1411	if (tg->iops[READ] == UINT_MAX)
1412		seq_printf(sf, " riops=max");
1413	else
1414		seq_printf(sf, " riops=%u", tg->iops[READ]);
1415
1416	if (tg->iops[WRITE] == UINT_MAX)
1417		seq_printf(sf, " wiops=max");
1418	else
1419		seq_printf(sf, " wiops=%u", tg->iops[WRITE]);
1420
1421	seq_printf(sf, "\n");
1422	return 0;
1423}
1424
1425static int tg_print_limit(struct seq_file *sf, void *v)
 
1426{
1427	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1428			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1429	return 0;
 
 
1430}
1431
1432static ssize_t tg_set_limit(struct kernfs_open_file *of,
1433			  char *buf, size_t nbytes, loff_t off)
1434{
1435	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1436	struct blkg_conf_ctx ctx;
1437	struct throtl_grp *tg;
1438	u64 v[4];
1439	int ret;
1440
1441	blkg_conf_init(&ctx, buf);
1442
1443	ret = blkg_conf_open_bdev(&ctx);
1444	if (ret)
1445		goto out_finish;
1446
1447	if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1448		ret = blk_throtl_init(ctx.bdev->bd_disk);
1449		if (ret)
1450			goto out_finish;
1451	}
1452
1453	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
1454	if (ret)
1455		goto out_finish;
1456
1457	tg = blkg_to_tg(ctx.blkg);
1458	tg_update_carryover(tg);
1459
1460	v[0] = tg->bps[READ];
1461	v[1] = tg->bps[WRITE];
1462	v[2] = tg->iops[READ];
1463	v[3] = tg->iops[WRITE];
1464
1465	while (true) {
1466		char tok[27];	/* wiops=18446744073709551616 */
1467		char *p;
1468		u64 val = U64_MAX;
1469		int len;
1470
1471		if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1472			break;
1473		if (tok[0] == '\0')
1474			break;
1475		ctx.body += len;
1476
1477		ret = -EINVAL;
1478		p = tok;
1479		strsep(&p, "=");
1480		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1481			goto out_finish;
1482
1483		ret = -ERANGE;
1484		if (!val)
1485			goto out_finish;
1486
1487		ret = -EINVAL;
1488		if (!strcmp(tok, "rbps"))
1489			v[0] = val;
1490		else if (!strcmp(tok, "wbps"))
1491			v[1] = val;
1492		else if (!strcmp(tok, "riops"))
1493			v[2] = min_t(u64, val, UINT_MAX);
1494		else if (!strcmp(tok, "wiops"))
1495			v[3] = min_t(u64, val, UINT_MAX);
1496		else
1497			goto out_finish;
1498	}
1499
1500	tg->bps[READ] = v[0];
1501	tg->bps[WRITE] = v[1];
1502	tg->iops[READ] = v[2];
1503	tg->iops[WRITE] = v[3];
1504
1505	tg_conf_updated(tg, false);
1506	ret = 0;
1507out_finish:
1508	blkg_conf_exit(&ctx);
1509	return ret ?: nbytes;
1510}
1511
1512static struct cftype throtl_files[] = {
1513	{
1514		.name = "max",
1515		.flags = CFTYPE_NOT_ON_ROOT,
1516		.seq_show = tg_print_limit,
1517		.write = tg_set_limit,
1518	},
1519	{ }	/* terminate */
1520};
1521
1522static void throtl_shutdown_wq(struct request_queue *q)
1523{
1524	struct throtl_data *td = q->td;
 
 
 
 
1525
1526	cancel_work_sync(&td->dispatch_work);
1527}
 
 
1528
1529static void tg_flush_bios(struct throtl_grp *tg)
1530{
1531	struct throtl_service_queue *sq = &tg->service_queue;
1532
1533	if (tg->flags & THROTL_TG_CANCELING)
1534		return;
1535	/*
1536	 * Set the flag to make sure throtl_pending_timer_fn() won't
1537	 * stop until all throttled bios are dispatched.
 
1538	 */
1539	tg->flags |= THROTL_TG_CANCELING;
1540
1541	/*
1542	 * Do not dispatch cgroup without THROTL_TG_PENDING or cgroup
1543	 * will be inserted to service queue without THROTL_TG_PENDING
1544	 * set in tg_update_disptime below. Then IO dispatched from
1545	 * child in tg_dispatch_one_bio will trigger double insertion
1546	 * and corrupt the tree.
1547	 */
1548	if (!(tg->flags & THROTL_TG_PENDING))
1549		return;
 
 
 
 
 
1550
1551	/*
1552	 * Update disptime after setting the above flag to make sure
1553	 * throtl_select_dispatch() won't exit without dispatching.
1554	 */
1555	tg_update_disptime(tg);
1556
1557	throtl_schedule_pending_timer(sq, jiffies + 1);
1558}
1559
1560static void throtl_pd_offline(struct blkg_policy_data *pd)
1561{
1562	tg_flush_bios(pd_to_tg(pd));
1563}
 
 
 
 
1564
1565struct blkcg_policy blkcg_policy_throtl = {
1566	.dfl_cftypes		= throtl_files,
1567	.legacy_cftypes		= throtl_legacy_files,
 
 
 
 
1568
1569	.pd_alloc_fn		= throtl_pd_alloc,
1570	.pd_init_fn		= throtl_pd_init,
1571	.pd_online_fn		= throtl_pd_online,
1572	.pd_offline_fn		= throtl_pd_offline,
1573	.pd_free_fn		= throtl_pd_free,
1574};
1575
1576void blk_throtl_cancel_bios(struct gendisk *disk)
1577{
1578	struct request_queue *q = disk->queue;
1579	struct cgroup_subsys_state *pos_css;
1580	struct blkcg_gq *blkg;
1581
1582	if (!blk_throtl_activated(q))
1583		return;
 
1584
1585	spin_lock_irq(&q->queue_lock);
1586	/*
1587	 * queue_lock is held, rcu lock is not needed here technically.
1588	 * However, rcu lock is still held to emphasize that following
1589	 * path need RCU protection and to prevent warning from lockdep.
1590	 */
1591	rcu_read_lock();
1592	blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
1593		/*
1594		 * disk_release will call pd_offline_fn to cancel bios.
1595		 * However, disk_release can't be called if someone get
1596		 * the refcount of device and issued bios which are
1597		 * inflight after del_gendisk.
1598		 * Cancel bios here to ensure no bios are inflight after
1599		 * del_gendisk.
 
 
 
1600		 */
1601		tg_flush_bios(blkg_to_tg(blkg));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1602	}
1603	rcu_read_unlock();
1604	spin_unlock_irq(&q->queue_lock);
 
 
1605}
1606
1607static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
1608{
1609	/* throtl is FIFO - if bios are already queued, should queue */
1610	if (tg->service_queue.nr_queued[rw])
1611		return false;
1612
1613	return tg_may_dispatch(tg, bio, NULL);
1614}
 
1615
1616static void tg_dispatch_in_debt(struct throtl_grp *tg, struct bio *bio, bool rw)
1617{
1618	if (!bio_flagged(bio, BIO_BPS_THROTTLED))
1619		tg->carryover_bytes[rw] -= throtl_bio_data_size(bio);
1620	tg->carryover_ios[rw]--;
1621}
1622
1623bool __blk_throtl_bio(struct bio *bio)
1624{
1625	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1626	struct blkcg_gq *blkg = bio->bi_blkg;
1627	struct throtl_qnode *qn = NULL;
1628	struct throtl_grp *tg = blkg_to_tg(blkg);
1629	struct throtl_service_queue *sq;
1630	bool rw = bio_data_dir(bio);
1631	bool throttled = false;
1632	struct throtl_data *td = tg->td;
1633
1634	rcu_read_lock();
1635	spin_lock_irq(&q->queue_lock);
1636	sq = &tg->service_queue;
 
1637
1638	while (true) {
1639		if (tg_within_limit(tg, bio, rw)) {
1640			/* within limits, let's charge and dispatch directly */
1641			throtl_charge_bio(tg, bio);
1642
1643			/*
1644			 * We need to trim slice even when bios are not being
1645			 * queued otherwise it might happen that a bio is not
1646			 * queued for a long time and slice keeps on extending
1647			 * and trim is not called for a long time. Now if limits
1648			 * are reduced suddenly we take into account all the IO
1649			 * dispatched so far at new low rate and * newly queued
1650			 * IO gets a really long dispatch time.
1651			 *
1652			 * So keep on trimming slice even if bio is not queued.
1653			 */
1654			throtl_trim_slice(tg, rw);
1655		} else if (bio_issue_as_root_blkg(bio)) {
1656			/*
1657			 * IOs which may cause priority inversions are
1658			 * dispatched directly, even if they're over limit.
1659			 * Debts are handled by carryover_bytes/ios while
1660			 * calculating wait time.
1661			 */
1662			tg_dispatch_in_debt(tg, bio, rw);
1663		} else {
1664			/* if above limits, break to queue */
1665			break;
1666		}
1667
1668		/*
1669		 * @bio passed through this layer without being throttled.
1670		 * Climb up the ladder.  If we're already at the top, it
1671		 * can be executed directly.
1672		 */
1673		qn = &tg->qnode_on_parent[rw];
1674		sq = sq->parent_sq;
1675		tg = sq_to_tg(sq);
1676		if (!tg) {
1677			bio_set_flag(bio, BIO_BPS_THROTTLED);
1678			goto out_unlock;
1679		}
1680	}
1681
1682	/* out-of-limit, queue to @tg */
1683	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1684		   rw == READ ? 'R' : 'W',
1685		   tg->bytes_disp[rw], bio->bi_iter.bi_size,
1686		   tg_bps_limit(tg, rw),
1687		   tg->io_disp[rw], tg_iops_limit(tg, rw),
1688		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1689
1690	td->nr_queued[rw]++;
1691	throtl_add_bio_tg(bio, qn, tg);
1692	throttled = true;
1693
1694	/*
1695	 * Update @tg's dispatch time and force schedule dispatch if @tg
1696	 * was empty before @bio.  The forced scheduling isn't likely to
1697	 * cause undue delay as @bio is likely to be dispatched directly if
1698	 * its @tg's disptime is not in the future.
1699	 */
1700	if (tg->flags & THROTL_TG_WAS_EMPTY) {
1701		tg_update_disptime(tg);
1702		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
1703	}
1704
1705out_unlock:
1706	spin_unlock_irq(&q->queue_lock);
1707
1708	rcu_read_unlock();
1709	return throttled;
1710}
1711
1712void blk_throtl_exit(struct gendisk *disk)
1713{
1714	struct request_queue *q = disk->queue;
1715
1716	if (!blk_throtl_activated(q))
1717		return;
 
 
 
 
 
 
 
 
 
 
 
1718
1719	del_timer_sync(&q->td->service_queue.pending_timer);
 
 
 
 
1720	throtl_shutdown_wq(q);
1721	blkcg_deactivate_policy(disk, &blkcg_policy_throtl);
1722	kfree(q->td);
1723}
1724
1725static int __init throtl_init(void)
1726{
1727	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1728	if (!kthrotld_workqueue)
1729		panic("Failed to create kthrotld\n");
1730
1731	return blkcg_policy_register(&blkcg_policy_throtl);
 
1732}
1733
1734module_init(throtl_init);