Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * Interface for controlling IO bandwidth on a request queue
   3 *
   4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/slab.h>
   9#include <linux/blkdev.h>
  10#include <linux/bio.h>
  11#include <linux/blktrace_api.h>
  12#include "blk-cgroup.h"
  13#include "blk.h"
 
 
 
  14
  15/* Max dispatch from a group in 1 round */
  16static int throtl_grp_quantum = 8;
  17
  18/* Total max dispatch from all groups in one round */
  19static int throtl_quantum = 32;
  20
  21/* Throttling is performed over 100ms slice and after that slice is renewed */
  22static unsigned long throtl_slice = HZ/10;	/* 100 ms */
  23
  24static struct blkcg_policy blkcg_policy_throtl;
  25
  26/* A workqueue to queue throttle related work */
  27static struct workqueue_struct *kthrotld_workqueue;
  28
  29/*
  30 * To implement hierarchical throttling, throtl_grps form a tree and bios
  31 * are dispatched upwards level by level until they reach the top and get
  32 * issued.  When dispatching bios from the children and local group at each
  33 * level, if the bios are dispatched into a single bio_list, there's a risk
  34 * of a local or child group which can queue many bios at once filling up
  35 * the list starving others.
  36 *
  37 * To avoid such starvation, dispatched bios are queued separately
  38 * according to where they came from.  When they are again dispatched to
  39 * the parent, they're popped in round-robin order so that no single source
  40 * hogs the dispatch window.
  41 *
  42 * throtl_qnode is used to keep the queued bios separated by their sources.
  43 * Bios are queued to throtl_qnode which in turn is queued to
  44 * throtl_service_queue and then dispatched in round-robin order.
  45 *
  46 * It's also used to track the reference counts on blkg's.  A qnode always
  47 * belongs to a throtl_grp and gets queued on itself or the parent, so
  48 * incrementing the reference of the associated throtl_grp when a qnode is
  49 * queued and decrementing when dequeued is enough to keep the whole blkg
  50 * tree pinned while bios are in flight.
  51 */
  52struct throtl_qnode {
  53	struct list_head	node;		/* service_queue->queued[] */
  54	struct bio_list		bios;		/* queued bios */
  55	struct throtl_grp	*tg;		/* tg this qnode belongs to */
  56};
  57
  58struct throtl_service_queue {
  59	struct throtl_service_queue *parent_sq;	/* the parent service_queue */
  60
  61	/*
  62	 * Bios queued directly to this service_queue or dispatched from
  63	 * children throtl_grp's.
  64	 */
  65	struct list_head	queued[2];	/* throtl_qnode [READ/WRITE] */
  66	unsigned int		nr_queued[2];	/* number of queued bios */
  67
  68	/*
  69	 * RB tree of active children throtl_grp's, which are sorted by
  70	 * their ->disptime.
  71	 */
  72	struct rb_root		pending_tree;	/* RB tree of active tgs */
  73	struct rb_node		*first_pending;	/* first node in the tree */
  74	unsigned int		nr_pending;	/* # queued in the tree */
  75	unsigned long		first_pending_disptime;	/* disptime of the first tg */
  76	struct timer_list	pending_timer;	/* fires on first_pending_disptime */
  77};
  78
  79enum tg_state_flags {
  80	THROTL_TG_PENDING	= 1 << 0,	/* on parent's pending tree */
  81	THROTL_TG_WAS_EMPTY	= 1 << 1,	/* bio_lists[] became non-empty */
  82};
  83
  84#define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
  85
  86/* Per-cpu group stats */
  87struct tg_stats_cpu {
  88	/* total bytes transferred */
  89	struct blkg_rwstat		service_bytes;
  90	/* total IOs serviced, post merge */
  91	struct blkg_rwstat		serviced;
  92};
  93
  94struct throtl_grp {
  95	/* must be the first member */
  96	struct blkg_policy_data pd;
  97
  98	/* active throtl group service_queue member */
  99	struct rb_node rb_node;
 100
 101	/* throtl_data this group belongs to */
 102	struct throtl_data *td;
 103
 104	/* this group's service queue */
 105	struct throtl_service_queue service_queue;
 106
 107	/*
 108	 * qnode_on_self is used when bios are directly queued to this
 109	 * throtl_grp so that local bios compete fairly with bios
 110	 * dispatched from children.  qnode_on_parent is used when bios are
 111	 * dispatched from this throtl_grp into its parent and will compete
 112	 * with the sibling qnode_on_parents and the parent's
 113	 * qnode_on_self.
 114	 */
 115	struct throtl_qnode qnode_on_self[2];
 116	struct throtl_qnode qnode_on_parent[2];
 117
 118	/*
 119	 * Dispatch time in jiffies. This is the estimated time when group
 120	 * will unthrottle and is ready to dispatch more bio. It is used as
 121	 * key to sort active groups in service tree.
 122	 */
 123	unsigned long disptime;
 124
 125	unsigned int flags;
 126
 127	/* are there any throtl rules between this group and td? */
 128	bool has_rules[2];
 129
 130	/* bytes per second rate limits */
 131	uint64_t bps[2];
 132
 133	/* IOPS limits */
 134	unsigned int iops[2];
 135
 136	/* Number of bytes disptached in current slice */
 137	uint64_t bytes_disp[2];
 138	/* Number of bio's dispatched in current slice */
 139	unsigned int io_disp[2];
 140
 141	/* When did we start a new slice */
 142	unsigned long slice_start[2];
 143	unsigned long slice_end[2];
 144
 145	/* Per cpu stats pointer */
 146	struct tg_stats_cpu __percpu *stats_cpu;
 147
 148	/* List of tgs waiting for per cpu stats memory to be allocated */
 149	struct list_head stats_alloc_node;
 150};
 151
 152struct throtl_data
 153{
 154	/* service tree for active throtl groups */
 155	struct throtl_service_queue service_queue;
 156
 157	struct request_queue *queue;
 158
 159	/* Total Number of queued bios on READ and WRITE lists */
 160	unsigned int nr_queued[2];
 161
 162	/*
 163	 * number of total undestroyed groups
 164	 */
 165	unsigned int nr_undestroyed_grps;
 166
 167	/* Work for dispatching throttled bios */
 168	struct work_struct dispatch_work;
 169};
 170
 171/* list and work item to allocate percpu group stats */
 172static DEFINE_SPINLOCK(tg_stats_alloc_lock);
 173static LIST_HEAD(tg_stats_alloc_list);
 174
 175static void tg_stats_alloc_fn(struct work_struct *);
 176static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
 177
 178static void throtl_pending_timer_fn(unsigned long arg);
 179
 180static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
 181{
 182	return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
 183}
 184
 185static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
 186{
 187	return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
 188}
 189
 190static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
 191{
 192	return pd_to_blkg(&tg->pd);
 193}
 194
 195static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
 196{
 197	return blkg_to_tg(td->queue->root_blkg);
 198}
 199
 200/**
 201 * sq_to_tg - return the throl_grp the specified service queue belongs to
 202 * @sq: the throtl_service_queue of interest
 203 *
 204 * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
 205 * embedded in throtl_data, %NULL is returned.
 206 */
 207static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
 208{
 209	if (sq && sq->parent_sq)
 210		return container_of(sq, struct throtl_grp, service_queue);
 211	else
 212		return NULL;
 213}
 214
 215/**
 216 * sq_to_td - return throtl_data the specified service queue belongs to
 217 * @sq: the throtl_service_queue of interest
 218 *
 219 * A service_queue can be embeded in either a throtl_grp or throtl_data.
 220 * Determine the associated throtl_data accordingly and return it.
 221 */
 222static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
 223{
 224	struct throtl_grp *tg = sq_to_tg(sq);
 225
 226	if (tg)
 227		return tg->td;
 228	else
 229		return container_of(sq, struct throtl_data, service_queue);
 230}
 231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 232/**
 233 * throtl_log - log debug message via blktrace
 234 * @sq: the service_queue being reported
 235 * @fmt: printf format string
 236 * @args: printf args
 237 *
 238 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
 239 * throtl_grp; otherwise, just "throtl".
 240 *
 241 * TODO: this should be made a function and name formatting should happen
 242 * after testing whether blktrace is enabled.
 243 */
 244#define throtl_log(sq, fmt, args...)	do {				\
 245	struct throtl_grp *__tg = sq_to_tg((sq));			\
 246	struct throtl_data *__td = sq_to_td((sq));			\
 247									\
 248	(void)__td;							\
 
 
 249	if ((__tg)) {							\
 250		char __pbuf[128];					\
 251									\
 252		blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf));	\
 253		blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
 254	} else {							\
 255		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
 256	}								\
 257} while (0)
 258
 259static void tg_stats_init(struct tg_stats_cpu *tg_stats)
 260{
 261	blkg_rwstat_init(&tg_stats->service_bytes);
 262	blkg_rwstat_init(&tg_stats->serviced);
 263}
 264
 265/*
 266 * Worker for allocating per cpu stat for tgs. This is scheduled on the
 267 * system_wq once there are some groups on the alloc_list waiting for
 268 * allocation.
 269 */
 270static void tg_stats_alloc_fn(struct work_struct *work)
 271{
 272	static struct tg_stats_cpu *stats_cpu;	/* this fn is non-reentrant */
 273	struct delayed_work *dwork = to_delayed_work(work);
 274	bool empty = false;
 275
 276alloc_stats:
 277	if (!stats_cpu) {
 278		int cpu;
 279
 280		stats_cpu = alloc_percpu(struct tg_stats_cpu);
 281		if (!stats_cpu) {
 282			/* allocation failed, try again after some time */
 283			schedule_delayed_work(dwork, msecs_to_jiffies(10));
 284			return;
 285		}
 286		for_each_possible_cpu(cpu)
 287			tg_stats_init(per_cpu_ptr(stats_cpu, cpu));
 288	}
 289
 290	spin_lock_irq(&tg_stats_alloc_lock);
 291
 292	if (!list_empty(&tg_stats_alloc_list)) {
 293		struct throtl_grp *tg = list_first_entry(&tg_stats_alloc_list,
 294							 struct throtl_grp,
 295							 stats_alloc_node);
 296		swap(tg->stats_cpu, stats_cpu);
 297		list_del_init(&tg->stats_alloc_node);
 298	}
 299
 300	empty = list_empty(&tg_stats_alloc_list);
 301	spin_unlock_irq(&tg_stats_alloc_lock);
 302	if (!empty)
 303		goto alloc_stats;
 304}
 305
 306static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
 307{
 308	INIT_LIST_HEAD(&qn->node);
 309	bio_list_init(&qn->bios);
 310	qn->tg = tg;
 311}
 312
 313/**
 314 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
 315 * @bio: bio being added
 316 * @qn: qnode to add bio to
 317 * @queued: the service_queue->queued[] list @qn belongs to
 318 *
 319 * Add @bio to @qn and put @qn on @queued if it's not already on.
 320 * @qn->tg's reference count is bumped when @qn is activated.  See the
 321 * comment on top of throtl_qnode definition for details.
 322 */
 323static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
 324				 struct list_head *queued)
 325{
 326	bio_list_add(&qn->bios, bio);
 327	if (list_empty(&qn->node)) {
 328		list_add_tail(&qn->node, queued);
 329		blkg_get(tg_to_blkg(qn->tg));
 330	}
 331}
 332
 333/**
 334 * throtl_peek_queued - peek the first bio on a qnode list
 335 * @queued: the qnode list to peek
 336 */
 337static struct bio *throtl_peek_queued(struct list_head *queued)
 338{
 339	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
 340	struct bio *bio;
 341
 342	if (list_empty(queued))
 343		return NULL;
 344
 
 345	bio = bio_list_peek(&qn->bios);
 346	WARN_ON_ONCE(!bio);
 347	return bio;
 348}
 349
 350/**
 351 * throtl_pop_queued - pop the first bio form a qnode list
 352 * @queued: the qnode list to pop a bio from
 353 * @tg_to_put: optional out argument for throtl_grp to put
 354 *
 355 * Pop the first bio from the qnode list @queued.  After popping, the first
 356 * qnode is removed from @queued if empty or moved to the end of @queued so
 357 * that the popping order is round-robin.
 358 *
 359 * When the first qnode is removed, its associated throtl_grp should be put
 360 * too.  If @tg_to_put is NULL, this function automatically puts it;
 361 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
 362 * responsible for putting it.
 363 */
 364static struct bio *throtl_pop_queued(struct list_head *queued,
 365				     struct throtl_grp **tg_to_put)
 366{
 367	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
 368	struct bio *bio;
 369
 370	if (list_empty(queued))
 371		return NULL;
 372
 
 373	bio = bio_list_pop(&qn->bios);
 374	WARN_ON_ONCE(!bio);
 375
 376	if (bio_list_empty(&qn->bios)) {
 377		list_del_init(&qn->node);
 378		if (tg_to_put)
 379			*tg_to_put = qn->tg;
 380		else
 381			blkg_put(tg_to_blkg(qn->tg));
 382	} else {
 383		list_move_tail(&qn->node, queued);
 384	}
 385
 386	return bio;
 387}
 388
 389/* init a service_queue, assumes the caller zeroed it */
 390static void throtl_service_queue_init(struct throtl_service_queue *sq,
 391				      struct throtl_service_queue *parent_sq)
 392{
 393	INIT_LIST_HEAD(&sq->queued[0]);
 394	INIT_LIST_HEAD(&sq->queued[1]);
 395	sq->pending_tree = RB_ROOT;
 396	sq->parent_sq = parent_sq;
 397	setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
 398		    (unsigned long)sq);
 399}
 400
 401static void throtl_service_queue_exit(struct throtl_service_queue *sq)
 
 402{
 403	del_timer_sync(&sq->pending_timer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 404}
 405
 406static void throtl_pd_init(struct blkcg_gq *blkg)
 407{
 408	struct throtl_grp *tg = blkg_to_tg(blkg);
 
 409	struct throtl_data *td = blkg->q->td;
 410	struct throtl_service_queue *parent_sq;
 411	unsigned long flags;
 412	int rw;
 413
 414	/*
 415	 * If sane_hierarchy is enabled, we switch to properly hierarchical
 416	 * behavior where limits on a given throtl_grp are applied to the
 417	 * whole subtree rather than just the group itself.  e.g. If 16M
 418	 * read_bps limit is set on the root group, the whole system can't
 419	 * exceed 16M for the device.
 
 420	 *
 421	 * If sane_hierarchy is not enabled, the broken flat hierarchy
 422	 * behavior is retained where all throtl_grps are treated as if
 423	 * they're all separate root groups right below throtl_data.
 424	 * Limits of a group don't interact with limits of other groups
 425	 * regardless of the position of the group in the hierarchy.
 426	 */
 427	parent_sq = &td->service_queue;
 428
 429	if (cgroup_sane_behavior(blkg->blkcg->css.cgroup) && blkg->parent)
 430		parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
 431
 432	throtl_service_queue_init(&tg->service_queue, parent_sq);
 433
 434	for (rw = READ; rw <= WRITE; rw++) {
 435		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
 436		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
 437	}
 438
 439	RB_CLEAR_NODE(&tg->rb_node);
 440	tg->td = td;
 441
 442	tg->bps[READ] = -1;
 443	tg->bps[WRITE] = -1;
 444	tg->iops[READ] = -1;
 445	tg->iops[WRITE] = -1;
 446
 447	/*
 448	 * Ugh... We need to perform per-cpu allocation for tg->stats_cpu
 449	 * but percpu allocator can't be called from IO path.  Queue tg on
 450	 * tg_stats_alloc_list and allocate from work item.
 451	 */
 452	spin_lock_irqsave(&tg_stats_alloc_lock, flags);
 453	list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
 454	schedule_delayed_work(&tg_stats_alloc_work, 0);
 455	spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
 456}
 457
 458/*
 459 * Set has_rules[] if @tg or any of its parents have limits configured.
 460 * This doesn't require walking up to the top of the hierarchy as the
 461 * parent's has_rules[] is guaranteed to be correct.
 462 */
 463static void tg_update_has_rules(struct throtl_grp *tg)
 464{
 465	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
 466	int rw;
 467
 468	for (rw = READ; rw <= WRITE; rw++)
 469		tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
 470				    (tg->bps[rw] != -1 || tg->iops[rw] != -1);
 
 
 
 
 
 471}
 472
 473static void throtl_pd_online(struct blkcg_gq *blkg)
 474{
 
 475	/*
 476	 * We don't want new groups to escape the limits of its ancestors.
 477	 * Update has_rules[] after a new group is brought online.
 478	 */
 479	tg_update_has_rules(blkg_to_tg(blkg));
 480}
 481
 482static void throtl_pd_exit(struct blkcg_gq *blkg)
 483{
 484	struct throtl_grp *tg = blkg_to_tg(blkg);
 485	unsigned long flags;
 486
 487	spin_lock_irqsave(&tg_stats_alloc_lock, flags);
 488	list_del_init(&tg->stats_alloc_node);
 489	spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
 490
 491	free_percpu(tg->stats_cpu);
 492
 493	throtl_service_queue_exit(&tg->service_queue);
 494}
 495
 496static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
 497{
 498	struct throtl_grp *tg = blkg_to_tg(blkg);
 499	int cpu;
 500
 501	if (tg->stats_cpu == NULL)
 502		return;
 503
 504	for_each_possible_cpu(cpu) {
 505		struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
 506
 507		blkg_rwstat_reset(&sc->service_bytes);
 508		blkg_rwstat_reset(&sc->serviced);
 509	}
 510}
 511
 512static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
 513					   struct blkcg *blkcg)
 514{
 515	/*
 516	 * This is the common case when there are no blkcgs.  Avoid lookup
 517	 * in this case
 518	 */
 519	if (blkcg == &blkcg_root)
 520		return td_root_tg(td);
 521
 522	return blkg_to_tg(blkg_lookup(blkcg, td->queue));
 523}
 524
 525static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
 526						  struct blkcg *blkcg)
 527{
 528	struct request_queue *q = td->queue;
 529	struct throtl_grp *tg = NULL;
 530
 531	/*
 532	 * This is the common case when there are no blkcgs.  Avoid lookup
 533	 * in this case
 534	 */
 535	if (blkcg == &blkcg_root) {
 536		tg = td_root_tg(td);
 537	} else {
 538		struct blkcg_gq *blkg;
 539
 540		blkg = blkg_lookup_create(blkcg, q);
 541
 542		/* if %NULL and @q is alive, fall back to root_tg */
 543		if (!IS_ERR(blkg))
 544			tg = blkg_to_tg(blkg);
 545		else if (!blk_queue_dying(q))
 546			tg = td_root_tg(td);
 547	}
 548
 549	return tg;
 
 
 
 550}
 551
 552static struct throtl_grp *
 553throtl_rb_first(struct throtl_service_queue *parent_sq)
 554{
 555	/* Service tree is empty */
 556	if (!parent_sq->nr_pending)
 557		return NULL;
 558
 559	if (!parent_sq->first_pending)
 560		parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
 561
 562	if (parent_sq->first_pending)
 563		return rb_entry_tg(parent_sq->first_pending);
 564
 565	return NULL;
 566}
 567
 568static void rb_erase_init(struct rb_node *n, struct rb_root *root)
 569{
 570	rb_erase(n, root);
 571	RB_CLEAR_NODE(n);
 572}
 573
 574static void throtl_rb_erase(struct rb_node *n,
 575			    struct throtl_service_queue *parent_sq)
 576{
 577	if (parent_sq->first_pending == n)
 578		parent_sq->first_pending = NULL;
 579	rb_erase_init(n, &parent_sq->pending_tree);
 580	--parent_sq->nr_pending;
 581}
 582
 583static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
 584{
 585	struct throtl_grp *tg;
 586
 587	tg = throtl_rb_first(parent_sq);
 588	if (!tg)
 589		return;
 590
 591	parent_sq->first_pending_disptime = tg->disptime;
 592}
 593
 594static void tg_service_queue_add(struct throtl_grp *tg)
 595{
 596	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
 597	struct rb_node **node = &parent_sq->pending_tree.rb_node;
 598	struct rb_node *parent = NULL;
 599	struct throtl_grp *__tg;
 600	unsigned long key = tg->disptime;
 601	int left = 1;
 602
 603	while (*node != NULL) {
 604		parent = *node;
 605		__tg = rb_entry_tg(parent);
 606
 607		if (time_before(key, __tg->disptime))
 608			node = &parent->rb_left;
 609		else {
 610			node = &parent->rb_right;
 611			left = 0;
 612		}
 613	}
 614
 615	if (left)
 616		parent_sq->first_pending = &tg->rb_node;
 617
 618	rb_link_node(&tg->rb_node, parent, node);
 619	rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
 620}
 621
 622static void __throtl_enqueue_tg(struct throtl_grp *tg)
 623{
 624	tg_service_queue_add(tg);
 625	tg->flags |= THROTL_TG_PENDING;
 626	tg->service_queue.parent_sq->nr_pending++;
 627}
 628
 629static void throtl_enqueue_tg(struct throtl_grp *tg)
 630{
 631	if (!(tg->flags & THROTL_TG_PENDING))
 632		__throtl_enqueue_tg(tg);
 633}
 634
 635static void __throtl_dequeue_tg(struct throtl_grp *tg)
 636{
 637	throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
 638	tg->flags &= ~THROTL_TG_PENDING;
 639}
 640
 641static void throtl_dequeue_tg(struct throtl_grp *tg)
 642{
 643	if (tg->flags & THROTL_TG_PENDING)
 644		__throtl_dequeue_tg(tg);
 
 
 
 
 
 
 645}
 646
 647/* Call with queue lock held */
 648static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
 649					  unsigned long expires)
 650{
 
 
 
 
 
 
 
 
 
 
 
 651	mod_timer(&sq->pending_timer, expires);
 652	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
 653		   expires - jiffies, jiffies);
 654}
 655
 656/**
 657 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
 658 * @sq: the service_queue to schedule dispatch for
 659 * @force: force scheduling
 660 *
 661 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
 662 * dispatch time of the first pending child.  Returns %true if either timer
 663 * is armed or there's no pending child left.  %false if the current
 664 * dispatch window is still open and the caller should continue
 665 * dispatching.
 666 *
 667 * If @force is %true, the dispatch timer is always scheduled and this
 668 * function is guaranteed to return %true.  This is to be used when the
 669 * caller can't dispatch itself and needs to invoke pending_timer
 670 * unconditionally.  Note that forced scheduling is likely to induce short
 671 * delay before dispatch starts even if @sq->first_pending_disptime is not
 672 * in the future and thus shouldn't be used in hot paths.
 673 */
 674static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
 675					  bool force)
 676{
 677	/* any pending children left? */
 678	if (!sq->nr_pending)
 679		return true;
 680
 681	update_min_dispatch_time(sq);
 682
 683	/* is the next dispatch time in the future? */
 684	if (force || time_after(sq->first_pending_disptime, jiffies)) {
 685		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
 686		return true;
 687	}
 688
 689	/* tell the caller to continue dispatching */
 690	return false;
 691}
 692
 693static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
 694		bool rw, unsigned long start)
 695{
 696	tg->bytes_disp[rw] = 0;
 697	tg->io_disp[rw] = 0;
 
 
 698
 699	/*
 700	 * Previous slice has expired. We must have trimmed it after last
 701	 * bio dispatch. That means since start of last slice, we never used
 702	 * that bandwidth. Do try to make use of that bandwidth while giving
 703	 * credit.
 704	 */
 705	if (time_after_eq(start, tg->slice_start[rw]))
 706		tg->slice_start[rw] = start;
 707
 708	tg->slice_end[rw] = jiffies + throtl_slice;
 709	throtl_log(&tg->service_queue,
 710		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
 711		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 712		   tg->slice_end[rw], jiffies);
 713}
 714
 715static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
 
 716{
 717	tg->bytes_disp[rw] = 0;
 718	tg->io_disp[rw] = 0;
 719	tg->slice_start[rw] = jiffies;
 720	tg->slice_end[rw] = jiffies + throtl_slice;
 
 
 
 
 
 721	throtl_log(&tg->service_queue,
 722		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
 723		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 724		   tg->slice_end[rw], jiffies);
 725}
 726
 727static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
 728					unsigned long jiffy_end)
 729{
 730	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
 731}
 732
 733static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
 734				       unsigned long jiffy_end)
 735{
 736	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
 737	throtl_log(&tg->service_queue,
 738		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
 739		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 740		   tg->slice_end[rw], jiffies);
 741}
 742
 743/* Determine if previously allocated or extended slice is complete or not */
 744static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
 745{
 746	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
 747		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 748
 749	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 750}
 751
 752/* Trim the used slices and adjust slice start accordingly */
 753static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
 754{
 755	unsigned long nr_slices, time_elapsed, io_trim;
 756	u64 bytes_trim, tmp;
 
 757
 758	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
 759
 760	/*
 761	 * If bps are unlimited (-1), then time slice don't get
 762	 * renewed. Don't try to trim the slice if slice is used. A new
 763	 * slice will start when appropriate.
 764	 */
 765	if (throtl_slice_used(tg, rw))
 766		return;
 767
 768	/*
 769	 * A bio has been dispatched. Also adjust slice_end. It might happen
 770	 * that initially cgroup limit was very low resulting in high
 771	 * slice_end, but later limit was bumped up and bio was dispached
 772	 * sooner, then we need to reduce slice_end. A high bogus slice_end
 773	 * is bad because it does not allow new slice to start.
 774	 */
 775
 776	throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
 777
 778	time_elapsed = jiffies - tg->slice_start[rw];
 779
 780	nr_slices = time_elapsed / throtl_slice;
 781
 782	if (!nr_slices)
 783		return;
 784	tmp = tg->bps[rw] * throtl_slice * nr_slices;
 785	do_div(tmp, HZ);
 786	bytes_trim = tmp;
 787
 788	io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
 789
 790	if (!bytes_trim && !io_trim)
 
 
 
 791		return;
 792
 793	if (tg->bytes_disp[rw] >= bytes_trim)
 
 794		tg->bytes_disp[rw] -= bytes_trim;
 795	else
 796		tg->bytes_disp[rw] = 0;
 797
 798	if (tg->io_disp[rw] >= io_trim)
 
 799		tg->io_disp[rw] -= io_trim;
 800	else
 801		tg->io_disp[rw] = 0;
 802
 803	tg->slice_start[rw] += nr_slices * throtl_slice;
 804
 805	throtl_log(&tg->service_queue,
 806		   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
 807		   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
 808		   tg->slice_start[rw], tg->slice_end[rw], jiffies);
 
 809}
 810
 811static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
 812				  unsigned long *wait)
 813{
 814	bool rw = bio_data_dir(bio);
 815	unsigned int io_allowed;
 816	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 817	u64 tmp;
 818
 819	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 820
 821	/* Slice has just started. Consider one slice interval */
 822	if (!jiffy_elapsed)
 823		jiffy_elapsed_rnd = throtl_slice;
 824
 825	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 826
 827	/*
 828	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
 829	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
 830	 * will allow dispatch after 1 second and after that slice should
 831	 * have been trimmed.
 832	 */
 
 
 
 
 
 
 
 
 
 833
 834	tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
 835	do_div(tmp, HZ);
 
 
 
 
 836
 837	if (tmp > UINT_MAX)
 838		io_allowed = UINT_MAX;
 839	else
 840		io_allowed = tmp;
 
 841
 842	if (tg->io_disp[rw] + 1 <= io_allowed) {
 843		if (wait)
 844			*wait = 0;
 845		return 1;
 
 
 
 
 
 846	}
 847
 848	/* Calc approx time to dispatch */
 849	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
 850
 851	if (jiffy_wait > jiffy_elapsed)
 852		jiffy_wait = jiffy_wait - jiffy_elapsed;
 853	else
 854		jiffy_wait = 1;
 
 
 855
 856	if (wait)
 857		*wait = jiffy_wait;
 858	return 0;
 
 
 
 859}
 860
 861static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
 862				 unsigned long *wait)
 863{
 864	bool rw = bio_data_dir(bio);
 865	u64 bytes_allowed, extra_bytes, tmp;
 
 866	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 
 
 
 
 
 
 867
 868	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 869
 870	/* Slice has just started. Consider one slice interval */
 871	if (!jiffy_elapsed)
 872		jiffy_elapsed_rnd = throtl_slice;
 873
 874	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 875
 876	tmp = tg->bps[rw] * jiffy_elapsed_rnd;
 877	do_div(tmp, HZ);
 878	bytes_allowed = tmp;
 879
 880	if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
 881		if (wait)
 882			*wait = 0;
 883		return 1;
 884	}
 885
 886	/* Calc approx time to dispatch */
 887	extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
 888	jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
 889
 890	if (!jiffy_wait)
 891		jiffy_wait = 1;
 892
 893	/*
 894	 * This wait time is without taking into consideration the rounding
 895	 * up we did. Add that time also.
 896	 */
 897	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
 898	if (wait)
 899		*wait = jiffy_wait;
 900	return 0;
 901}
 902
 903/*
 904 * Returns whether one can dispatch a bio or not. Also returns approx number
 905 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
 906 */
 907static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 908			    unsigned long *wait)
 909{
 910	bool rw = bio_data_dir(bio);
 911	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
 
 
 912
 913	/*
 914 	 * Currently whole state machine of group depends on first bio
 915	 * queued in the group bio list. So one should not be calling
 916	 * this function with a different bio if there are other bios
 917	 * queued.
 918	 */
 919	BUG_ON(tg->service_queue.nr_queued[rw] &&
 920	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
 921
 922	/* If tg->bps = -1, then BW is unlimited */
 923	if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
 
 924		if (wait)
 925			*wait = 0;
 926		return 1;
 927	}
 928
 929	/*
 930	 * If previous slice expired, start a new one otherwise renew/extend
 931	 * existing slice to make sure it is at least throtl_slice interval
 932	 * long since now.
 
 
 933	 */
 934	if (throtl_slice_used(tg, rw))
 935		throtl_start_new_slice(tg, rw);
 936	else {
 937		if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
 938			throtl_extend_slice(tg, rw, jiffies + throtl_slice);
 
 
 939	}
 940
 941	if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
 942	    tg_with_in_iops_limit(tg, bio, &iops_wait)) {
 
 943		if (wait)
 944			*wait = 0;
 945		return 1;
 946	}
 947
 948	max_wait = max(bps_wait, iops_wait);
 949
 950	if (wait)
 951		*wait = max_wait;
 952
 953	if (time_before(tg->slice_end[rw], jiffies + max_wait))
 954		throtl_extend_slice(tg, rw, jiffies + max_wait);
 955
 956	return 0;
 957}
 958
 959static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
 960					 int rw)
 961{
 962	struct throtl_grp *tg = blkg_to_tg(blkg);
 963	struct tg_stats_cpu *stats_cpu;
 964	unsigned long flags;
 965
 966	/* If per cpu stats are not allocated yet, don't do any accounting. */
 967	if (tg->stats_cpu == NULL)
 968		return;
 969
 970	/*
 971	 * Disabling interrupts to provide mutual exclusion between two
 972	 * writes on same cpu. It probably is not needed for 64bit. Not
 973	 * optimizing that case yet.
 974	 */
 975	local_irq_save(flags);
 976
 977	stats_cpu = this_cpu_ptr(tg->stats_cpu);
 978
 979	blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
 980	blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
 981
 982	local_irq_restore(flags);
 983}
 984
 985static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 986{
 987	bool rw = bio_data_dir(bio);
 
 988
 989	/* Charge the bio to the group */
 990	tg->bytes_disp[rw] += bio->bi_iter.bi_size;
 991	tg->io_disp[rw]++;
 992
 993	/*
 994	 * REQ_THROTTLED is used to prevent the same bio to be throttled
 995	 * more than once as a throttled bio will go through blk-throtl the
 996	 * second time when it eventually gets issued.  Set it when a bio
 997	 * is being charged to a tg.
 998	 *
 999	 * Dispatch stats aren't recursive and each @bio should only be
1000	 * accounted by the @tg it was originally associated with.  Let's
1001	 * update the stats when setting REQ_THROTTLED for the first time
1002	 * which is guaranteed to be for the @bio's original tg.
1003	 */
1004	if (!(bio->bi_rw & REQ_THROTTLED)) {
1005		bio->bi_rw |= REQ_THROTTLED;
1006		throtl_update_dispatch_stats(tg_to_blkg(tg),
1007					     bio->bi_iter.bi_size, bio->bi_rw);
1008	}
 
 
 
1009}
1010
1011/**
1012 * throtl_add_bio_tg - add a bio to the specified throtl_grp
1013 * @bio: bio to add
1014 * @qn: qnode to use
1015 * @tg: the target throtl_grp
1016 *
1017 * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
1018 * tg->qnode_on_self[] is used.
1019 */
1020static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1021			      struct throtl_grp *tg)
1022{
1023	struct throtl_service_queue *sq = &tg->service_queue;
1024	bool rw = bio_data_dir(bio);
1025
1026	if (!qn)
1027		qn = &tg->qnode_on_self[rw];
1028
1029	/*
1030	 * If @tg doesn't currently have any bios queued in the same
1031	 * direction, queueing @bio can change when @tg should be
1032	 * dispatched.  Mark that @tg was empty.  This is automatically
1033	 * cleaered on the next tg_update_disptime().
1034	 */
1035	if (!sq->nr_queued[rw])
1036		tg->flags |= THROTL_TG_WAS_EMPTY;
1037
1038	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1039
1040	sq->nr_queued[rw]++;
1041	throtl_enqueue_tg(tg);
1042}
1043
1044static void tg_update_disptime(struct throtl_grp *tg)
1045{
1046	struct throtl_service_queue *sq = &tg->service_queue;
1047	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1048	struct bio *bio;
1049
1050	if ((bio = throtl_peek_queued(&sq->queued[READ])))
 
1051		tg_may_dispatch(tg, bio, &read_wait);
1052
1053	if ((bio = throtl_peek_queued(&sq->queued[WRITE])))
 
1054		tg_may_dispatch(tg, bio, &write_wait);
1055
1056	min_wait = min(read_wait, write_wait);
1057	disptime = jiffies + min_wait;
1058
1059	/* Update dispatch time */
1060	throtl_dequeue_tg(tg);
1061	tg->disptime = disptime;
1062	throtl_enqueue_tg(tg);
1063
1064	/* see throtl_add_bio_tg() */
1065	tg->flags &= ~THROTL_TG_WAS_EMPTY;
1066}
1067
1068static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1069					struct throtl_grp *parent_tg, bool rw)
1070{
1071	if (throtl_slice_used(parent_tg, rw)) {
1072		throtl_start_new_slice_with_credit(parent_tg, rw,
1073				child_tg->slice_start[rw]);
1074	}
1075
1076}
1077
1078static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1079{
1080	struct throtl_service_queue *sq = &tg->service_queue;
1081	struct throtl_service_queue *parent_sq = sq->parent_sq;
1082	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1083	struct throtl_grp *tg_to_put = NULL;
1084	struct bio *bio;
1085
1086	/*
1087	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1088	 * from @tg may put its reference and @parent_sq might end up
1089	 * getting released prematurely.  Remember the tg to put and put it
1090	 * after @bio is transferred to @parent_sq.
1091	 */
1092	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1093	sq->nr_queued[rw]--;
1094
1095	throtl_charge_bio(tg, bio);
1096
1097	/*
1098	 * If our parent is another tg, we just need to transfer @bio to
1099	 * the parent using throtl_add_bio_tg().  If our parent is
1100	 * @td->service_queue, @bio is ready to be issued.  Put it on its
1101	 * bio_lists[] and decrease total number queued.  The caller is
1102	 * responsible for issuing these bios.
1103	 */
1104	if (parent_tg) {
1105		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1106		start_parent_slice_with_credit(tg, parent_tg, rw);
1107	} else {
 
1108		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1109				     &parent_sq->queued[rw]);
1110		BUG_ON(tg->td->nr_queued[rw] <= 0);
1111		tg->td->nr_queued[rw]--;
1112	}
1113
1114	throtl_trim_slice(tg, rw);
1115
1116	if (tg_to_put)
1117		blkg_put(tg_to_blkg(tg_to_put));
1118}
1119
1120static int throtl_dispatch_tg(struct throtl_grp *tg)
1121{
1122	struct throtl_service_queue *sq = &tg->service_queue;
1123	unsigned int nr_reads = 0, nr_writes = 0;
1124	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
1125	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
1126	struct bio *bio;
1127
1128	/* Try to dispatch 75% READS and 25% WRITES */
1129
1130	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1131	       tg_may_dispatch(tg, bio, NULL)) {
1132
1133		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1134		nr_reads++;
1135
1136		if (nr_reads >= max_nr_reads)
1137			break;
1138	}
1139
1140	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1141	       tg_may_dispatch(tg, bio, NULL)) {
1142
1143		tg_dispatch_one_bio(tg, bio_data_dir(bio));
1144		nr_writes++;
1145
1146		if (nr_writes >= max_nr_writes)
1147			break;
1148	}
1149
1150	return nr_reads + nr_writes;
1151}
1152
1153static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1154{
1155	unsigned int nr_disp = 0;
1156
1157	while (1) {
1158		struct throtl_grp *tg = throtl_rb_first(parent_sq);
1159		struct throtl_service_queue *sq = &tg->service_queue;
1160
 
 
 
 
1161		if (!tg)
1162			break;
1163
1164		if (time_before(jiffies, tg->disptime))
1165			break;
1166
1167		throtl_dequeue_tg(tg);
1168
1169		nr_disp += throtl_dispatch_tg(tg);
1170
1171		if (sq->nr_queued[0] || sq->nr_queued[1])
 
1172			tg_update_disptime(tg);
 
 
1173
1174		if (nr_disp >= throtl_quantum)
1175			break;
1176	}
1177
1178	return nr_disp;
1179}
1180
1181/**
1182 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1183 * @arg: the throtl_service_queue being serviced
1184 *
1185 * This timer is armed when a child throtl_grp with active bio's become
1186 * pending and queued on the service_queue's pending_tree and expires when
1187 * the first child throtl_grp should be dispatched.  This function
1188 * dispatches bio's from the children throtl_grps to the parent
1189 * service_queue.
1190 *
1191 * If the parent's parent is another throtl_grp, dispatching is propagated
1192 * by either arming its pending_timer or repeating dispatch directly.  If
1193 * the top-level service_tree is reached, throtl_data->dispatch_work is
1194 * kicked so that the ready bio's are issued.
1195 */
1196static void throtl_pending_timer_fn(unsigned long arg)
1197{
1198	struct throtl_service_queue *sq = (void *)arg;
1199	struct throtl_grp *tg = sq_to_tg(sq);
1200	struct throtl_data *td = sq_to_td(sq);
1201	struct request_queue *q = td->queue;
1202	struct throtl_service_queue *parent_sq;
 
1203	bool dispatched;
1204	int ret;
1205
1206	spin_lock_irq(q->queue_lock);
 
 
 
 
 
 
 
 
 
 
1207again:
1208	parent_sq = sq->parent_sq;
1209	dispatched = false;
1210
1211	while (true) {
1212		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1213			   sq->nr_queued[READ] + sq->nr_queued[WRITE],
1214			   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1215
1216		ret = throtl_select_dispatch(sq);
1217		if (ret) {
1218			throtl_log(sq, "bios disp=%u", ret);
1219			dispatched = true;
1220		}
1221
1222		if (throtl_schedule_next_dispatch(sq, false))
1223			break;
1224
1225		/* this dispatch windows is still open, relax and repeat */
1226		spin_unlock_irq(q->queue_lock);
1227		cpu_relax();
1228		spin_lock_irq(q->queue_lock);
1229	}
1230
1231	if (!dispatched)
1232		goto out_unlock;
1233
1234	if (parent_sq) {
1235		/* @parent_sq is another throl_grp, propagate dispatch */
1236		if (tg->flags & THROTL_TG_WAS_EMPTY) {
1237			tg_update_disptime(tg);
1238			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1239				/* window is already open, repeat dispatching */
1240				sq = parent_sq;
1241				tg = sq_to_tg(sq);
1242				goto again;
1243			}
1244		}
1245	} else {
1246		/* reached the top-level, queue issueing */
1247		queue_work(kthrotld_workqueue, &td->dispatch_work);
1248	}
1249out_unlock:
1250	spin_unlock_irq(q->queue_lock);
1251}
1252
1253/**
1254 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1255 * @work: work item being executed
1256 *
1257 * This function is queued for execution when bio's reach the bio_lists[]
1258 * of throtl_data->service_queue.  Those bio's are ready and issued by this
1259 * function.
1260 */
1261void blk_throtl_dispatch_work_fn(struct work_struct *work)
1262{
1263	struct throtl_data *td = container_of(work, struct throtl_data,
1264					      dispatch_work);
1265	struct throtl_service_queue *td_sq = &td->service_queue;
1266	struct request_queue *q = td->queue;
1267	struct bio_list bio_list_on_stack;
1268	struct bio *bio;
1269	struct blk_plug plug;
1270	int rw;
1271
1272	bio_list_init(&bio_list_on_stack);
1273
1274	spin_lock_irq(q->queue_lock);
1275	for (rw = READ; rw <= WRITE; rw++)
1276		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1277			bio_list_add(&bio_list_on_stack, bio);
1278	spin_unlock_irq(q->queue_lock);
1279
1280	if (!bio_list_empty(&bio_list_on_stack)) {
1281		blk_start_plug(&plug);
1282		while((bio = bio_list_pop(&bio_list_on_stack)))
1283			generic_make_request(bio);
1284		blk_finish_plug(&plug);
1285	}
1286}
1287
1288static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
1289				struct blkg_policy_data *pd, int off)
1290{
1291	struct throtl_grp *tg = pd_to_tg(pd);
1292	struct blkg_rwstat rwstat = { }, tmp;
1293	int i, cpu;
1294
1295	for_each_possible_cpu(cpu) {
1296		struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
1297
1298		tmp = blkg_rwstat_read((void *)sc + off);
1299		for (i = 0; i < BLKG_RWSTAT_NR; i++)
1300			rwstat.cnt[i] += tmp.cnt[i];
1301	}
1302
1303	return __blkg_prfill_rwstat(sf, pd, &rwstat);
1304}
1305
1306static int tg_print_cpu_rwstat(struct seq_file *sf, void *v)
1307{
1308	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_cpu_rwstat,
1309			  &blkcg_policy_throtl, seq_cft(sf)->private, true);
1310	return 0;
1311}
1312
1313static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1314			      int off)
1315{
1316	struct throtl_grp *tg = pd_to_tg(pd);
1317	u64 v = *(u64 *)((void *)tg + off);
1318
1319	if (v == -1)
1320		return 0;
1321	return __blkg_prfill_u64(sf, pd, v);
1322}
1323
1324static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1325			       int off)
1326{
1327	struct throtl_grp *tg = pd_to_tg(pd);
1328	unsigned int v = *(unsigned int *)((void *)tg + off);
1329
1330	if (v == -1)
1331		return 0;
1332	return __blkg_prfill_u64(sf, pd, v);
1333}
1334
1335static int tg_print_conf_u64(struct seq_file *sf, void *v)
1336{
1337	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1338			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1339	return 0;
1340}
1341
1342static int tg_print_conf_uint(struct seq_file *sf, void *v)
1343{
1344	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1345			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1346	return 0;
1347}
1348
1349static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
1350		       const char *buf, bool is_u64)
1351{
1352	struct blkcg *blkcg = css_to_blkcg(css);
1353	struct blkg_conf_ctx ctx;
1354	struct throtl_grp *tg;
1355	struct throtl_service_queue *sq;
1356	struct blkcg_gq *blkg;
1357	struct cgroup_subsys_state *pos_css;
1358	int ret;
1359
1360	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1361	if (ret)
1362		return ret;
1363
1364	tg = blkg_to_tg(ctx.blkg);
1365	sq = &tg->service_queue;
1366
1367	if (!ctx.v)
1368		ctx.v = -1;
1369
1370	if (is_u64)
1371		*(u64 *)((void *)tg + cft->private) = ctx.v;
1372	else
1373		*(unsigned int *)((void *)tg + cft->private) = ctx.v;
1374
1375	throtl_log(&tg->service_queue,
1376		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1377		   tg->bps[READ], tg->bps[WRITE],
1378		   tg->iops[READ], tg->iops[WRITE]);
1379
 
1380	/*
1381	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
1382	 * considered to have rules if either the tg itself or any of its
1383	 * ancestors has rules.  This identifies groups without any
1384	 * restrictions in the whole hierarchy and allows them to bypass
1385	 * blk-throttle.
1386	 */
1387	blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
1388		tg_update_has_rules(blkg_to_tg(blkg));
 
 
 
 
 
 
 
 
 
1389
1390	/*
1391	 * We're already holding queue_lock and know @tg is valid.  Let's
1392	 * apply the new config directly.
1393	 *
1394	 * Restart the slices for both READ and WRITES. It might happen
1395	 * that a group's limit are dropped suddenly and we don't want to
1396	 * account recently dispatched IO with new low rate.
1397	 */
1398	throtl_start_new_slice(tg, 0);
1399	throtl_start_new_slice(tg, 1);
1400
1401	if (tg->flags & THROTL_TG_PENDING) {
1402		tg_update_disptime(tg);
1403		throtl_schedule_next_dispatch(sq->parent_sq, true);
1404	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1405
1406	blkg_conf_finish(&ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1407	return 0;
1408}
1409
1410static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft,
1411			   char *buf)
1412{
1413	return tg_set_conf(css, cft, buf, true);
 
 
 
 
1414}
1415
1416static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft,
1417			    char *buf)
1418{
1419	return tg_set_conf(css, cft, buf, false);
 
 
 
1420}
1421
1422static struct cftype throtl_files[] = {
1423	{
1424		.name = "throttle.read_bps_device",
1425		.private = offsetof(struct throtl_grp, bps[READ]),
1426		.seq_show = tg_print_conf_u64,
1427		.write_string = tg_set_conf_u64,
1428	},
1429	{
1430		.name = "throttle.write_bps_device",
1431		.private = offsetof(struct throtl_grp, bps[WRITE]),
1432		.seq_show = tg_print_conf_u64,
1433		.write_string = tg_set_conf_u64,
1434	},
1435	{
1436		.name = "throttle.read_iops_device",
1437		.private = offsetof(struct throtl_grp, iops[READ]),
1438		.seq_show = tg_print_conf_uint,
1439		.write_string = tg_set_conf_uint,
1440	},
1441	{
1442		.name = "throttle.write_iops_device",
1443		.private = offsetof(struct throtl_grp, iops[WRITE]),
1444		.seq_show = tg_print_conf_uint,
1445		.write_string = tg_set_conf_uint,
1446	},
1447	{
1448		.name = "throttle.io_service_bytes",
1449		.private = offsetof(struct tg_stats_cpu, service_bytes),
1450		.seq_show = tg_print_cpu_rwstat,
 
 
 
 
 
1451	},
1452	{
1453		.name = "throttle.io_serviced",
1454		.private = offsetof(struct tg_stats_cpu, serviced),
1455		.seq_show = tg_print_cpu_rwstat,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1456	},
1457	{ }	/* terminate */
1458};
1459
1460static void throtl_shutdown_wq(struct request_queue *q)
1461{
1462	struct throtl_data *td = q->td;
1463
1464	cancel_work_sync(&td->dispatch_work);
1465}
1466
1467static struct blkcg_policy blkcg_policy_throtl = {
1468	.pd_size		= sizeof(struct throtl_grp),
1469	.cftypes		= throtl_files,
1470
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1471	.pd_init_fn		= throtl_pd_init,
1472	.pd_online_fn		= throtl_pd_online,
1473	.pd_exit_fn		= throtl_pd_exit,
1474	.pd_reset_stats_fn	= throtl_pd_reset_stats,
1475};
1476
1477bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1478{
1479	struct throtl_data *td = q->td;
1480	struct throtl_qnode *qn = NULL;
1481	struct throtl_grp *tg;
1482	struct throtl_service_queue *sq;
1483	bool rw = bio_data_dir(bio);
1484	struct blkcg *blkcg;
1485	bool throttled = false;
1486
1487	/* see throtl_charge_bio() */
1488	if (bio->bi_rw & REQ_THROTTLED)
1489		goto out;
1490
 
1491	/*
1492	 * A throtl_grp pointer retrieved under rcu can be used to access
1493	 * basic fields like stats and io rates. If a group has no rules,
1494	 * just update the dispatch stats in lockless manner and return.
1495	 */
1496	rcu_read_lock();
1497	blkcg = bio_blkcg(bio);
1498	tg = throtl_lookup_tg(td, blkcg);
1499	if (tg) {
1500		if (!tg->has_rules[rw]) {
1501			throtl_update_dispatch_stats(tg_to_blkg(tg),
1502					bio->bi_iter.bi_size, bio->bi_rw);
1503			goto out_unlock_rcu;
1504		}
 
 
1505	}
 
 
 
1506
1507	/*
1508	 * Either group has not been allocated yet or it is not an unlimited
1509	 * IO group
1510	 */
1511	spin_lock_irq(q->queue_lock);
1512	tg = throtl_lookup_create_tg(td, blkcg);
1513	if (unlikely(!tg))
1514		goto out_unlock;
1515
1516	sq = &tg->service_queue;
 
1517
1518	while (true) {
1519		/* throtl is FIFO - if bios are already queued, should queue */
1520		if (sq->nr_queued[rw])
1521			break;
 
 
1522
1523		/* if above limits, break to queue */
1524		if (!tg_may_dispatch(tg, bio, NULL))
1525			break;
 
 
 
 
 
 
 
1526
1527		/* within limits, let's charge and dispatch directly */
1528		throtl_charge_bio(tg, bio);
 
1529
1530		/*
1531		 * We need to trim slice even when bios are not being queued
1532		 * otherwise it might happen that a bio is not queued for
1533		 * a long time and slice keeps on extending and trim is not
1534		 * called for a long time. Now if limits are reduced suddenly
1535		 * we take into account all the IO dispatched so far at new
1536		 * low rate and * newly queued IO gets a really long dispatch
1537		 * time.
1538		 *
1539		 * So keep on trimming slice even if bio is not queued.
1540		 */
1541		throtl_trim_slice(tg, rw);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1542
1543		/*
1544		 * @bio passed through this layer without being throttled.
1545		 * Climb up the ladder.  If we''re already at the top, it
1546		 * can be executed directly.
1547		 */
1548		qn = &tg->qnode_on_parent[rw];
1549		sq = sq->parent_sq;
1550		tg = sq_to_tg(sq);
1551		if (!tg)
 
1552			goto out_unlock;
 
1553	}
1554
1555	/* out-of-limit, queue to @tg */
1556	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1557		   rw == READ ? 'R' : 'W',
1558		   tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
1559		   tg->io_disp[rw], tg->iops[rw],
 
1560		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1561
1562	bio_associate_current(bio);
1563	tg->td->nr_queued[rw]++;
1564	throtl_add_bio_tg(bio, qn, tg);
1565	throttled = true;
1566
1567	/*
1568	 * Update @tg's dispatch time and force schedule dispatch if @tg
1569	 * was empty before @bio.  The forced scheduling isn't likely to
1570	 * cause undue delay as @bio is likely to be dispatched directly if
1571	 * its @tg's disptime is not in the future.
1572	 */
1573	if (tg->flags & THROTL_TG_WAS_EMPTY) {
1574		tg_update_disptime(tg);
1575		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
1576	}
1577
1578out_unlock:
1579	spin_unlock_irq(q->queue_lock);
1580out_unlock_rcu:
1581	rcu_read_unlock();
1582out:
1583	/*
1584	 * As multiple blk-throtls may stack in the same issue path, we
1585	 * don't want bios to leave with the flag set.  Clear the flag if
1586	 * being issued.
1587	 */
1588	if (!throttled)
1589		bio->bi_rw &= ~REQ_THROTTLED;
1590	return throttled;
1591}
1592
1593/*
1594 * Dispatch all bios from all children tg's queued on @parent_sq.  On
1595 * return, @parent_sq is guaranteed to not have any active children tg's
1596 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1597 */
1598static void tg_drain_bios(struct throtl_service_queue *parent_sq)
1599{
1600	struct throtl_grp *tg;
1601
1602	while ((tg = throtl_rb_first(parent_sq))) {
1603		struct throtl_service_queue *sq = &tg->service_queue;
1604		struct bio *bio;
1605
1606		throtl_dequeue_tg(tg);
1607
1608		while ((bio = throtl_peek_queued(&sq->queued[READ])))
1609			tg_dispatch_one_bio(tg, bio_data_dir(bio));
1610		while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
1611			tg_dispatch_one_bio(tg, bio_data_dir(bio));
1612	}
1613}
1614
1615/**
1616 * blk_throtl_drain - drain throttled bios
1617 * @q: request_queue to drain throttled bios for
1618 *
1619 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1620 */
1621void blk_throtl_drain(struct request_queue *q)
1622	__releases(q->queue_lock) __acquires(q->queue_lock)
1623{
1624	struct throtl_data *td = q->td;
1625	struct blkcg_gq *blkg;
1626	struct cgroup_subsys_state *pos_css;
1627	struct bio *bio;
1628	int rw;
1629
1630	queue_lockdep_assert_held(q);
1631	rcu_read_lock();
1632
1633	/*
1634	 * Drain each tg while doing post-order walk on the blkg tree, so
1635	 * that all bios are propagated to td->service_queue.  It'd be
1636	 * better to walk service_queue tree directly but blkg walk is
1637	 * easier.
1638	 */
1639	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
1640		tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
1641
1642	/* finally, transfer bios from top-level tg's into the td */
1643	tg_drain_bios(&td->service_queue);
1644
1645	rcu_read_unlock();
1646	spin_unlock_irq(q->queue_lock);
1647
1648	/* all bios now should be in td->service_queue, issue them */
1649	for (rw = READ; rw <= WRITE; rw++)
1650		while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
1651						NULL)))
1652			generic_make_request(bio);
1653
1654	spin_lock_irq(q->queue_lock);
1655}
1656
1657int blk_throtl_init(struct request_queue *q)
1658{
1659	struct throtl_data *td;
1660	int ret;
1661
1662	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1663	if (!td)
1664		return -ENOMEM;
1665
1666	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1667	throtl_service_queue_init(&td->service_queue, NULL);
1668
1669	q->td = td;
1670	td->queue = q;
1671
1672	/* activate policy */
1673	ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
1674	if (ret)
1675		kfree(td);
1676	return ret;
1677}
1678
1679void blk_throtl_exit(struct request_queue *q)
1680{
1681	BUG_ON(!q->td);
1682	throtl_shutdown_wq(q);
1683	blkcg_deactivate_policy(q, &blkcg_policy_throtl);
1684	kfree(q->td);
1685}
1686
1687static int __init throtl_init(void)
1688{
1689	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1690	if (!kthrotld_workqueue)
1691		panic("Failed to create kthrotld\n");
1692
1693	return blkcg_policy_register(&blkcg_policy_throtl);
1694}
1695
1696module_init(throtl_init);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Interface for controlling IO bandwidth on a request queue
   4 *
   5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/slab.h>
  10#include <linux/blkdev.h>
  11#include <linux/bio.h>
  12#include <linux/blktrace_api.h>
 
  13#include "blk.h"
  14#include "blk-cgroup-rwstat.h"
  15#include "blk-stat.h"
  16#include "blk-throttle.h"
  17
  18/* Max dispatch from a group in 1 round */
  19#define THROTL_GRP_QUANTUM 8
  20
  21/* Total max dispatch from all groups in one round */
  22#define THROTL_QUANTUM 32
  23
  24/* Throttling is performed over a slice and after that slice is renewed */
  25#define DFL_THROTL_SLICE_HD (HZ / 10)
  26#define DFL_THROTL_SLICE_SSD (HZ / 50)
  27#define MAX_THROTL_SLICE (HZ)
  28
  29/* A workqueue to queue throttle related work */
  30static struct workqueue_struct *kthrotld_workqueue;
  31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  32#define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
  33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34struct throtl_data
  35{
  36	/* service tree for active throtl groups */
  37	struct throtl_service_queue service_queue;
  38
  39	struct request_queue *queue;
  40
  41	/* Total Number of queued bios on READ and WRITE lists */
  42	unsigned int nr_queued[2];
  43
  44	unsigned int throtl_slice;
 
 
 
  45
  46	/* Work for dispatching throttled bios */
  47	struct work_struct dispatch_work;
 
  48
  49	bool track_bio_latency;
  50};
 
 
 
 
 
 
 
 
 
 
 
  51
  52static void throtl_pending_timer_fn(struct timer_list *t);
 
 
 
  53
  54static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
  55{
  56	return pd_to_blkg(&tg->pd);
  57}
  58
 
 
 
 
 
  59/**
  60 * sq_to_tg - return the throl_grp the specified service queue belongs to
  61 * @sq: the throtl_service_queue of interest
  62 *
  63 * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
  64 * embedded in throtl_data, %NULL is returned.
  65 */
  66static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
  67{
  68	if (sq && sq->parent_sq)
  69		return container_of(sq, struct throtl_grp, service_queue);
  70	else
  71		return NULL;
  72}
  73
  74/**
  75 * sq_to_td - return throtl_data the specified service queue belongs to
  76 * @sq: the throtl_service_queue of interest
  77 *
  78 * A service_queue can be embedded in either a throtl_grp or throtl_data.
  79 * Determine the associated throtl_data accordingly and return it.
  80 */
  81static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
  82{
  83	struct throtl_grp *tg = sq_to_tg(sq);
  84
  85	if (tg)
  86		return tg->td;
  87	else
  88		return container_of(sq, struct throtl_data, service_queue);
  89}
  90
  91static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
  92{
  93	struct blkcg_gq *blkg = tg_to_blkg(tg);
  94
  95	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
  96		return U64_MAX;
  97
  98	return tg->bps[rw];
  99}
 100
 101static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
 102{
 103	struct blkcg_gq *blkg = tg_to_blkg(tg);
 104
 105	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
 106		return UINT_MAX;
 107
 108	return tg->iops[rw];
 109}
 110
 111/**
 112 * throtl_log - log debug message via blktrace
 113 * @sq: the service_queue being reported
 114 * @fmt: printf format string
 115 * @args: printf args
 116 *
 117 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
 118 * throtl_grp; otherwise, just "throtl".
 
 
 
 119 */
 120#define throtl_log(sq, fmt, args...)	do {				\
 121	struct throtl_grp *__tg = sq_to_tg((sq));			\
 122	struct throtl_data *__td = sq_to_td((sq));			\
 123									\
 124	(void)__td;							\
 125	if (likely(!blk_trace_note_message_enabled(__td->queue)))	\
 126		break;							\
 127	if ((__tg)) {							\
 128		blk_add_cgroup_trace_msg(__td->queue,			\
 129			&tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\
 
 
 130	} else {							\
 131		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
 132	}								\
 133} while (0)
 134
 135static inline unsigned int throtl_bio_data_size(struct bio *bio)
 
 
 
 
 
 
 
 
 
 
 
 136{
 137	/* assume it's one sector */
 138	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
 139		return 512;
 140	return bio->bi_iter.bi_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 141}
 142
 143static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
 144{
 145	INIT_LIST_HEAD(&qn->node);
 146	bio_list_init(&qn->bios);
 147	qn->tg = tg;
 148}
 149
 150/**
 151 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
 152 * @bio: bio being added
 153 * @qn: qnode to add bio to
 154 * @queued: the service_queue->queued[] list @qn belongs to
 155 *
 156 * Add @bio to @qn and put @qn on @queued if it's not already on.
 157 * @qn->tg's reference count is bumped when @qn is activated.  See the
 158 * comment on top of throtl_qnode definition for details.
 159 */
 160static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
 161				 struct list_head *queued)
 162{
 163	bio_list_add(&qn->bios, bio);
 164	if (list_empty(&qn->node)) {
 165		list_add_tail(&qn->node, queued);
 166		blkg_get(tg_to_blkg(qn->tg));
 167	}
 168}
 169
 170/**
 171 * throtl_peek_queued - peek the first bio on a qnode list
 172 * @queued: the qnode list to peek
 173 */
 174static struct bio *throtl_peek_queued(struct list_head *queued)
 175{
 176	struct throtl_qnode *qn;
 177	struct bio *bio;
 178
 179	if (list_empty(queued))
 180		return NULL;
 181
 182	qn = list_first_entry(queued, struct throtl_qnode, node);
 183	bio = bio_list_peek(&qn->bios);
 184	WARN_ON_ONCE(!bio);
 185	return bio;
 186}
 187
 188/**
 189 * throtl_pop_queued - pop the first bio form a qnode list
 190 * @queued: the qnode list to pop a bio from
 191 * @tg_to_put: optional out argument for throtl_grp to put
 192 *
 193 * Pop the first bio from the qnode list @queued.  After popping, the first
 194 * qnode is removed from @queued if empty or moved to the end of @queued so
 195 * that the popping order is round-robin.
 196 *
 197 * When the first qnode is removed, its associated throtl_grp should be put
 198 * too.  If @tg_to_put is NULL, this function automatically puts it;
 199 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
 200 * responsible for putting it.
 201 */
 202static struct bio *throtl_pop_queued(struct list_head *queued,
 203				     struct throtl_grp **tg_to_put)
 204{
 205	struct throtl_qnode *qn;
 206	struct bio *bio;
 207
 208	if (list_empty(queued))
 209		return NULL;
 210
 211	qn = list_first_entry(queued, struct throtl_qnode, node);
 212	bio = bio_list_pop(&qn->bios);
 213	WARN_ON_ONCE(!bio);
 214
 215	if (bio_list_empty(&qn->bios)) {
 216		list_del_init(&qn->node);
 217		if (tg_to_put)
 218			*tg_to_put = qn->tg;
 219		else
 220			blkg_put(tg_to_blkg(qn->tg));
 221	} else {
 222		list_move_tail(&qn->node, queued);
 223	}
 224
 225	return bio;
 226}
 227
 228/* init a service_queue, assumes the caller zeroed it */
 229static void throtl_service_queue_init(struct throtl_service_queue *sq)
 
 230{
 231	INIT_LIST_HEAD(&sq->queued[READ]);
 232	INIT_LIST_HEAD(&sq->queued[WRITE]);
 233	sq->pending_tree = RB_ROOT_CACHED;
 234	timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
 
 
 235}
 236
 237static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk,
 238		struct blkcg *blkcg, gfp_t gfp)
 239{
 240	struct throtl_grp *tg;
 241	int rw;
 242
 243	tg = kzalloc_node(sizeof(*tg), gfp, disk->node_id);
 244	if (!tg)
 245		return NULL;
 246
 247	if (blkg_rwstat_init(&tg->stat_bytes, gfp))
 248		goto err_free_tg;
 249
 250	if (blkg_rwstat_init(&tg->stat_ios, gfp))
 251		goto err_exit_stat_bytes;
 252
 253	throtl_service_queue_init(&tg->service_queue);
 254
 255	for (rw = READ; rw <= WRITE; rw++) {
 256		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
 257		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
 258	}
 259
 260	RB_CLEAR_NODE(&tg->rb_node);
 261	tg->bps[READ] = U64_MAX;
 262	tg->bps[WRITE] = U64_MAX;
 263	tg->iops[READ] = UINT_MAX;
 264	tg->iops[WRITE] = UINT_MAX;
 265
 266	return &tg->pd;
 267
 268err_exit_stat_bytes:
 269	blkg_rwstat_exit(&tg->stat_bytes);
 270err_free_tg:
 271	kfree(tg);
 272	return NULL;
 273}
 274
 275static void throtl_pd_init(struct blkg_policy_data *pd)
 276{
 277	struct throtl_grp *tg = pd_to_tg(pd);
 278	struct blkcg_gq *blkg = tg_to_blkg(tg);
 279	struct throtl_data *td = blkg->q->td;
 280	struct throtl_service_queue *sq = &tg->service_queue;
 
 
 281
 282	/*
 283	 * If on the default hierarchy, we switch to properly hierarchical
 284	 * behavior where limits on a given throtl_grp are applied to the
 285	 * whole subtree rather than just the group itself.  e.g. If 16M
 286	 * read_bps limit is set on a parent group, summary bps of
 287	 * parent group and its subtree groups can't exceed 16M for the
 288	 * device.
 289	 *
 290	 * If not on the default hierarchy, the broken flat hierarchy
 291	 * behavior is retained where all throtl_grps are treated as if
 292	 * they're all separate root groups right below throtl_data.
 293	 * Limits of a group don't interact with limits of other groups
 294	 * regardless of the position of the group in the hierarchy.
 295	 */
 296	sq->parent_sq = &td->service_queue;
 297	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
 298		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
 
 
 
 
 
 
 
 
 
 
 299	tg->td = td;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300}
 301
 302/*
 303 * Set has_rules[] if @tg or any of its parents have limits configured.
 304 * This doesn't require walking up to the top of the hierarchy as the
 305 * parent's has_rules[] is guaranteed to be correct.
 306 */
 307static void tg_update_has_rules(struct throtl_grp *tg)
 308{
 309	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
 310	int rw;
 311
 312	for (rw = READ; rw <= WRITE; rw++) {
 313		tg->has_rules_iops[rw] =
 314			(parent_tg && parent_tg->has_rules_iops[rw]) ||
 315			tg_iops_limit(tg, rw) != UINT_MAX;
 316		tg->has_rules_bps[rw] =
 317			(parent_tg && parent_tg->has_rules_bps[rw]) ||
 318			tg_bps_limit(tg, rw) != U64_MAX;
 319	}
 320}
 321
 322static void throtl_pd_online(struct blkg_policy_data *pd)
 323{
 324	struct throtl_grp *tg = pd_to_tg(pd);
 325	/*
 326	 * We don't want new groups to escape the limits of its ancestors.
 327	 * Update has_rules[] after a new group is brought online.
 328	 */
 329	tg_update_has_rules(tg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330}
 331
 332static void throtl_pd_free(struct blkg_policy_data *pd)
 
 333{
 334	struct throtl_grp *tg = pd_to_tg(pd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335
 336	del_timer_sync(&tg->service_queue.pending_timer);
 337	blkg_rwstat_exit(&tg->stat_bytes);
 338	blkg_rwstat_exit(&tg->stat_ios);
 339	kfree(tg);
 340}
 341
 342static struct throtl_grp *
 343throtl_rb_first(struct throtl_service_queue *parent_sq)
 344{
 345	struct rb_node *n;
 
 
 346
 347	n = rb_first_cached(&parent_sq->pending_tree);
 348	WARN_ON_ONCE(!n);
 349	if (!n)
 350		return NULL;
 351	return rb_entry_tg(n);
 
 
 
 
 
 
 
 
 352}
 353
 354static void throtl_rb_erase(struct rb_node *n,
 355			    struct throtl_service_queue *parent_sq)
 356{
 357	rb_erase_cached(n, &parent_sq->pending_tree);
 358	RB_CLEAR_NODE(n);
 
 
 359}
 360
 361static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
 362{
 363	struct throtl_grp *tg;
 364
 365	tg = throtl_rb_first(parent_sq);
 366	if (!tg)
 367		return;
 368
 369	parent_sq->first_pending_disptime = tg->disptime;
 370}
 371
 372static void tg_service_queue_add(struct throtl_grp *tg)
 373{
 374	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
 375	struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
 376	struct rb_node *parent = NULL;
 377	struct throtl_grp *__tg;
 378	unsigned long key = tg->disptime;
 379	bool leftmost = true;
 380
 381	while (*node != NULL) {
 382		parent = *node;
 383		__tg = rb_entry_tg(parent);
 384
 385		if (time_before(key, __tg->disptime))
 386			node = &parent->rb_left;
 387		else {
 388			node = &parent->rb_right;
 389			leftmost = false;
 390		}
 391	}
 392
 
 
 
 393	rb_link_node(&tg->rb_node, parent, node);
 394	rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
 395			       leftmost);
 
 
 
 
 
 
 396}
 397
 398static void throtl_enqueue_tg(struct throtl_grp *tg)
 399{
 400	if (!(tg->flags & THROTL_TG_PENDING)) {
 401		tg_service_queue_add(tg);
 402		tg->flags |= THROTL_TG_PENDING;
 403		tg->service_queue.parent_sq->nr_pending++;
 404	}
 
 
 
 405}
 406
 407static void throtl_dequeue_tg(struct throtl_grp *tg)
 408{
 409	if (tg->flags & THROTL_TG_PENDING) {
 410		struct throtl_service_queue *parent_sq =
 411			tg->service_queue.parent_sq;
 412
 413		throtl_rb_erase(&tg->rb_node, parent_sq);
 414		--parent_sq->nr_pending;
 415		tg->flags &= ~THROTL_TG_PENDING;
 416	}
 417}
 418
 419/* Call with queue lock held */
 420static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
 421					  unsigned long expires)
 422{
 423	unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
 424
 425	/*
 426	 * Since we are adjusting the throttle limit dynamically, the sleep
 427	 * time calculated according to previous limit might be invalid. It's
 428	 * possible the cgroup sleep time is very long and no other cgroups
 429	 * have IO running so notify the limit changes. Make sure the cgroup
 430	 * doesn't sleep too long to avoid the missed notification.
 431	 */
 432	if (time_after(expires, max_expire))
 433		expires = max_expire;
 434	mod_timer(&sq->pending_timer, expires);
 435	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
 436		   expires - jiffies, jiffies);
 437}
 438
 439/**
 440 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
 441 * @sq: the service_queue to schedule dispatch for
 442 * @force: force scheduling
 443 *
 444 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
 445 * dispatch time of the first pending child.  Returns %true if either timer
 446 * is armed or there's no pending child left.  %false if the current
 447 * dispatch window is still open and the caller should continue
 448 * dispatching.
 449 *
 450 * If @force is %true, the dispatch timer is always scheduled and this
 451 * function is guaranteed to return %true.  This is to be used when the
 452 * caller can't dispatch itself and needs to invoke pending_timer
 453 * unconditionally.  Note that forced scheduling is likely to induce short
 454 * delay before dispatch starts even if @sq->first_pending_disptime is not
 455 * in the future and thus shouldn't be used in hot paths.
 456 */
 457static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
 458					  bool force)
 459{
 460	/* any pending children left? */
 461	if (!sq->nr_pending)
 462		return true;
 463
 464	update_min_dispatch_time(sq);
 465
 466	/* is the next dispatch time in the future? */
 467	if (force || time_after(sq->first_pending_disptime, jiffies)) {
 468		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
 469		return true;
 470	}
 471
 472	/* tell the caller to continue dispatching */
 473	return false;
 474}
 475
 476static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
 477		bool rw, unsigned long start)
 478{
 479	tg->bytes_disp[rw] = 0;
 480	tg->io_disp[rw] = 0;
 481	tg->carryover_bytes[rw] = 0;
 482	tg->carryover_ios[rw] = 0;
 483
 484	/*
 485	 * Previous slice has expired. We must have trimmed it after last
 486	 * bio dispatch. That means since start of last slice, we never used
 487	 * that bandwidth. Do try to make use of that bandwidth while giving
 488	 * credit.
 489	 */
 490	if (time_after(start, tg->slice_start[rw]))
 491		tg->slice_start[rw] = start;
 492
 493	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
 494	throtl_log(&tg->service_queue,
 495		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
 496		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 497		   tg->slice_end[rw], jiffies);
 498}
 499
 500static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
 501					  bool clear_carryover)
 502{
 503	tg->bytes_disp[rw] = 0;
 504	tg->io_disp[rw] = 0;
 505	tg->slice_start[rw] = jiffies;
 506	tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
 507	if (clear_carryover) {
 508		tg->carryover_bytes[rw] = 0;
 509		tg->carryover_ios[rw] = 0;
 510	}
 511
 512	throtl_log(&tg->service_queue,
 513		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
 514		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 515		   tg->slice_end[rw], jiffies);
 516}
 517
 518static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
 519					unsigned long jiffy_end)
 520{
 521	tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
 522}
 523
 524static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
 525				       unsigned long jiffy_end)
 526{
 527	throtl_set_slice_end(tg, rw, jiffy_end);
 528	throtl_log(&tg->service_queue,
 529		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
 530		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 531		   tg->slice_end[rw], jiffies);
 532}
 533
 534/* Determine if previously allocated or extended slice is complete or not */
 535static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
 536{
 537	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
 538		return false;
 539
 540	return true;
 541}
 542
 543static unsigned int calculate_io_allowed(u32 iops_limit,
 544					 unsigned long jiffy_elapsed)
 545{
 546	unsigned int io_allowed;
 547	u64 tmp;
 548
 549	/*
 550	 * jiffy_elapsed should not be a big value as minimum iops can be
 551	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
 552	 * will allow dispatch after 1 second and after that slice should
 553	 * have been trimmed.
 554	 */
 555
 556	tmp = (u64)iops_limit * jiffy_elapsed;
 557	do_div(tmp, HZ);
 558
 559	if (tmp > UINT_MAX)
 560		io_allowed = UINT_MAX;
 561	else
 562		io_allowed = tmp;
 563
 564	return io_allowed;
 565}
 566
 567static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
 568{
 569	/*
 570	 * Can result be wider than 64 bits?
 571	 * We check against 62, not 64, due to ilog2 truncation.
 572	 */
 573	if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62)
 574		return U64_MAX;
 575	return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
 576}
 577
 578/* Trim the used slices and adjust slice start accordingly */
 579static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
 580{
 581	unsigned long time_elapsed;
 582	long long bytes_trim;
 583	int io_trim;
 584
 585	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
 586
 587	/*
 588	 * If bps are unlimited (-1), then time slice don't get
 589	 * renewed. Don't try to trim the slice if slice is used. A new
 590	 * slice will start when appropriate.
 591	 */
 592	if (throtl_slice_used(tg, rw))
 593		return;
 594
 595	/*
 596	 * A bio has been dispatched. Also adjust slice_end. It might happen
 597	 * that initially cgroup limit was very low resulting in high
 598	 * slice_end, but later limit was bumped up and bio was dispatched
 599	 * sooner, then we need to reduce slice_end. A high bogus slice_end
 600	 * is bad because it does not allow new slice to start.
 601	 */
 602
 603	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
 
 
 604
 605	time_elapsed = rounddown(jiffies - tg->slice_start[rw],
 606				 tg->td->throtl_slice);
 607	if (!time_elapsed)
 608		return;
 
 
 
 609
 610	bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
 611					     time_elapsed) +
 612		     tg->carryover_bytes[rw];
 613	io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
 614		  tg->carryover_ios[rw];
 615	if (bytes_trim <= 0 && io_trim <= 0)
 616		return;
 617
 618	tg->carryover_bytes[rw] = 0;
 619	if ((long long)tg->bytes_disp[rw] >= bytes_trim)
 620		tg->bytes_disp[rw] -= bytes_trim;
 621	else
 622		tg->bytes_disp[rw] = 0;
 623
 624	tg->carryover_ios[rw] = 0;
 625	if ((int)tg->io_disp[rw] >= io_trim)
 626		tg->io_disp[rw] -= io_trim;
 627	else
 628		tg->io_disp[rw] = 0;
 629
 630	tg->slice_start[rw] += time_elapsed;
 631
 632	throtl_log(&tg->service_queue,
 633		   "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
 634		   rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
 635		   bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
 636		   jiffies);
 637}
 638
 639static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
 
 640{
 641	unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
 642	u64 bps_limit = tg_bps_limit(tg, rw);
 643	u32 iops_limit = tg_iops_limit(tg, rw);
 
 
 
 
 
 
 
 
 
 644
 645	/*
 646	 * If config is updated while bios are still throttled, calculate and
 647	 * accumulate how many bytes/ios are waited across changes. And
 648	 * carryover_bytes/ios will be used to calculate new wait time under new
 649	 * configuration.
 650	 */
 651	if (bps_limit != U64_MAX)
 652		tg->carryover_bytes[rw] +=
 653			calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
 654			tg->bytes_disp[rw];
 655	if (iops_limit != UINT_MAX)
 656		tg->carryover_ios[rw] +=
 657			calculate_io_allowed(iops_limit, jiffy_elapsed) -
 658			tg->io_disp[rw];
 659}
 660
 661static void tg_update_carryover(struct throtl_grp *tg)
 662{
 663	if (tg->service_queue.nr_queued[READ])
 664		__tg_update_carryover(tg, READ);
 665	if (tg->service_queue.nr_queued[WRITE])
 666		__tg_update_carryover(tg, WRITE);
 667
 668	/* see comments in struct throtl_grp for meaning of these fields. */
 669	throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
 670		   tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
 671		   tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
 672}
 673
 674static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
 675				 u32 iops_limit)
 676{
 677	bool rw = bio_data_dir(bio);
 678	int io_allowed;
 679	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 680
 681	if (iops_limit == UINT_MAX) {
 682		return 0;
 683	}
 684
 685	jiffy_elapsed = jiffies - tg->slice_start[rw];
 
 686
 687	/* Round up to the next throttle slice, wait time must be nonzero */
 688	jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
 689	io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
 690		     tg->carryover_ios[rw];
 691	if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
 692		return 0;
 693
 694	/* Calc approx time to dispatch */
 695	jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
 696
 697	/* make sure at least one io can be dispatched after waiting */
 698	jiffy_wait = max(jiffy_wait, HZ / iops_limit + 1);
 699	return jiffy_wait;
 700}
 701
 702static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
 703				u64 bps_limit)
 704{
 705	bool rw = bio_data_dir(bio);
 706	long long bytes_allowed;
 707	u64 extra_bytes;
 708	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 709	unsigned int bio_size = throtl_bio_data_size(bio);
 710
 711	/* no need to throttle if this bio's bytes have been accounted */
 712	if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
 713		return 0;
 714	}
 715
 716	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 717
 718	/* Slice has just started. Consider one slice interval */
 719	if (!jiffy_elapsed)
 720		jiffy_elapsed_rnd = tg->td->throtl_slice;
 
 
 721
 722	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
 723	bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
 724			tg->carryover_bytes[rw];
 725	if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
 726		return 0;
 
 
 
 
 727
 728	/* Calc approx time to dispatch */
 729	extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
 730	jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
 731
 732	if (!jiffy_wait)
 733		jiffy_wait = 1;
 734
 735	/*
 736	 * This wait time is without taking into consideration the rounding
 737	 * up we did. Add that time also.
 738	 */
 739	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
 740	return jiffy_wait;
 
 
 741}
 742
 743/*
 744 * Returns whether one can dispatch a bio or not. Also returns approx number
 745 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
 746 */
 747static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 748			    unsigned long *wait)
 749{
 750	bool rw = bio_data_dir(bio);
 751	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
 752	u64 bps_limit = tg_bps_limit(tg, rw);
 753	u32 iops_limit = tg_iops_limit(tg, rw);
 754
 755	/*
 756 	 * Currently whole state machine of group depends on first bio
 757	 * queued in the group bio list. So one should not be calling
 758	 * this function with a different bio if there are other bios
 759	 * queued.
 760	 */
 761	BUG_ON(tg->service_queue.nr_queued[rw] &&
 762	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
 763
 764	/* If tg->bps = -1, then BW is unlimited */
 765	if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
 766	    tg->flags & THROTL_TG_CANCELING) {
 767		if (wait)
 768			*wait = 0;
 769		return true;
 770	}
 771
 772	/*
 773	 * If previous slice expired, start a new one otherwise renew/extend
 774	 * existing slice to make sure it is at least throtl_slice interval
 775	 * long since now. New slice is started only for empty throttle group.
 776	 * If there is queued bio, that means there should be an active
 777	 * slice and it should be extended instead.
 778	 */
 779	if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
 780		throtl_start_new_slice(tg, rw, true);
 781	else {
 782		if (time_before(tg->slice_end[rw],
 783		    jiffies + tg->td->throtl_slice))
 784			throtl_extend_slice(tg, rw,
 785				jiffies + tg->td->throtl_slice);
 786	}
 787
 788	bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
 789	iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
 790	if (bps_wait + iops_wait == 0) {
 791		if (wait)
 792			*wait = 0;
 793		return true;
 794	}
 795
 796	max_wait = max(bps_wait, iops_wait);
 797
 798	if (wait)
 799		*wait = max_wait;
 800
 801	if (time_before(tg->slice_end[rw], jiffies + max_wait))
 802		throtl_extend_slice(tg, rw, jiffies + max_wait);
 803
 804	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 805}
 806
 807static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 808{
 809	bool rw = bio_data_dir(bio);
 810	unsigned int bio_size = throtl_bio_data_size(bio);
 811
 812	/* Charge the bio to the group */
 813	if (!bio_flagged(bio, BIO_BPS_THROTTLED)) {
 814		tg->bytes_disp[rw] += bio_size;
 815		tg->last_bytes_disp[rw] += bio_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 816	}
 817
 818	tg->io_disp[rw]++;
 819	tg->last_io_disp[rw]++;
 820}
 821
 822/**
 823 * throtl_add_bio_tg - add a bio to the specified throtl_grp
 824 * @bio: bio to add
 825 * @qn: qnode to use
 826 * @tg: the target throtl_grp
 827 *
 828 * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
 829 * tg->qnode_on_self[] is used.
 830 */
 831static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
 832			      struct throtl_grp *tg)
 833{
 834	struct throtl_service_queue *sq = &tg->service_queue;
 835	bool rw = bio_data_dir(bio);
 836
 837	if (!qn)
 838		qn = &tg->qnode_on_self[rw];
 839
 840	/*
 841	 * If @tg doesn't currently have any bios queued in the same
 842	 * direction, queueing @bio can change when @tg should be
 843	 * dispatched.  Mark that @tg was empty.  This is automatically
 844	 * cleared on the next tg_update_disptime().
 845	 */
 846	if (!sq->nr_queued[rw])
 847		tg->flags |= THROTL_TG_WAS_EMPTY;
 848
 849	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
 850
 851	sq->nr_queued[rw]++;
 852	throtl_enqueue_tg(tg);
 853}
 854
 855static void tg_update_disptime(struct throtl_grp *tg)
 856{
 857	struct throtl_service_queue *sq = &tg->service_queue;
 858	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
 859	struct bio *bio;
 860
 861	bio = throtl_peek_queued(&sq->queued[READ]);
 862	if (bio)
 863		tg_may_dispatch(tg, bio, &read_wait);
 864
 865	bio = throtl_peek_queued(&sq->queued[WRITE]);
 866	if (bio)
 867		tg_may_dispatch(tg, bio, &write_wait);
 868
 869	min_wait = min(read_wait, write_wait);
 870	disptime = jiffies + min_wait;
 871
 872	/* Update dispatch time */
 873	throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
 874	tg->disptime = disptime;
 875	tg_service_queue_add(tg);
 876
 877	/* see throtl_add_bio_tg() */
 878	tg->flags &= ~THROTL_TG_WAS_EMPTY;
 879}
 880
 881static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
 882					struct throtl_grp *parent_tg, bool rw)
 883{
 884	if (throtl_slice_used(parent_tg, rw)) {
 885		throtl_start_new_slice_with_credit(parent_tg, rw,
 886				child_tg->slice_start[rw]);
 887	}
 888
 889}
 890
 891static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
 892{
 893	struct throtl_service_queue *sq = &tg->service_queue;
 894	struct throtl_service_queue *parent_sq = sq->parent_sq;
 895	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
 896	struct throtl_grp *tg_to_put = NULL;
 897	struct bio *bio;
 898
 899	/*
 900	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
 901	 * from @tg may put its reference and @parent_sq might end up
 902	 * getting released prematurely.  Remember the tg to put and put it
 903	 * after @bio is transferred to @parent_sq.
 904	 */
 905	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
 906	sq->nr_queued[rw]--;
 907
 908	throtl_charge_bio(tg, bio);
 909
 910	/*
 911	 * If our parent is another tg, we just need to transfer @bio to
 912	 * the parent using throtl_add_bio_tg().  If our parent is
 913	 * @td->service_queue, @bio is ready to be issued.  Put it on its
 914	 * bio_lists[] and decrease total number queued.  The caller is
 915	 * responsible for issuing these bios.
 916	 */
 917	if (parent_tg) {
 918		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
 919		start_parent_slice_with_credit(tg, parent_tg, rw);
 920	} else {
 921		bio_set_flag(bio, BIO_BPS_THROTTLED);
 922		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
 923				     &parent_sq->queued[rw]);
 924		BUG_ON(tg->td->nr_queued[rw] <= 0);
 925		tg->td->nr_queued[rw]--;
 926	}
 927
 928	throtl_trim_slice(tg, rw);
 929
 930	if (tg_to_put)
 931		blkg_put(tg_to_blkg(tg_to_put));
 932}
 933
 934static int throtl_dispatch_tg(struct throtl_grp *tg)
 935{
 936	struct throtl_service_queue *sq = &tg->service_queue;
 937	unsigned int nr_reads = 0, nr_writes = 0;
 938	unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
 939	unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
 940	struct bio *bio;
 941
 942	/* Try to dispatch 75% READS and 25% WRITES */
 943
 944	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
 945	       tg_may_dispatch(tg, bio, NULL)) {
 946
 947		tg_dispatch_one_bio(tg, READ);
 948		nr_reads++;
 949
 950		if (nr_reads >= max_nr_reads)
 951			break;
 952	}
 953
 954	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
 955	       tg_may_dispatch(tg, bio, NULL)) {
 956
 957		tg_dispatch_one_bio(tg, WRITE);
 958		nr_writes++;
 959
 960		if (nr_writes >= max_nr_writes)
 961			break;
 962	}
 963
 964	return nr_reads + nr_writes;
 965}
 966
 967static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
 968{
 969	unsigned int nr_disp = 0;
 970
 971	while (1) {
 972		struct throtl_grp *tg;
 973		struct throtl_service_queue *sq;
 974
 975		if (!parent_sq->nr_pending)
 976			break;
 977
 978		tg = throtl_rb_first(parent_sq);
 979		if (!tg)
 980			break;
 981
 982		if (time_before(jiffies, tg->disptime))
 983			break;
 984
 
 
 985		nr_disp += throtl_dispatch_tg(tg);
 986
 987		sq = &tg->service_queue;
 988		if (sq->nr_queued[READ] || sq->nr_queued[WRITE])
 989			tg_update_disptime(tg);
 990		else
 991			throtl_dequeue_tg(tg);
 992
 993		if (nr_disp >= THROTL_QUANTUM)
 994			break;
 995	}
 996
 997	return nr_disp;
 998}
 999
1000/**
1001 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1002 * @t: the pending_timer member of the throtl_service_queue being serviced
1003 *
1004 * This timer is armed when a child throtl_grp with active bio's become
1005 * pending and queued on the service_queue's pending_tree and expires when
1006 * the first child throtl_grp should be dispatched.  This function
1007 * dispatches bio's from the children throtl_grps to the parent
1008 * service_queue.
1009 *
1010 * If the parent's parent is another throtl_grp, dispatching is propagated
1011 * by either arming its pending_timer or repeating dispatch directly.  If
1012 * the top-level service_tree is reached, throtl_data->dispatch_work is
1013 * kicked so that the ready bio's are issued.
1014 */
1015static void throtl_pending_timer_fn(struct timer_list *t)
1016{
1017	struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1018	struct throtl_grp *tg = sq_to_tg(sq);
1019	struct throtl_data *td = sq_to_td(sq);
 
1020	struct throtl_service_queue *parent_sq;
1021	struct request_queue *q;
1022	bool dispatched;
1023	int ret;
1024
1025	/* throtl_data may be gone, so figure out request queue by blkg */
1026	if (tg)
1027		q = tg->pd.blkg->q;
1028	else
1029		q = td->queue;
1030
1031	spin_lock_irq(&q->queue_lock);
1032
1033	if (!q->root_blkg)
1034		goto out_unlock;
1035
1036again:
1037	parent_sq = sq->parent_sq;
1038	dispatched = false;
1039
1040	while (true) {
1041		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1042			   sq->nr_queued[READ] + sq->nr_queued[WRITE],
1043			   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1044
1045		ret = throtl_select_dispatch(sq);
1046		if (ret) {
1047			throtl_log(sq, "bios disp=%u", ret);
1048			dispatched = true;
1049		}
1050
1051		if (throtl_schedule_next_dispatch(sq, false))
1052			break;
1053
1054		/* this dispatch windows is still open, relax and repeat */
1055		spin_unlock_irq(&q->queue_lock);
1056		cpu_relax();
1057		spin_lock_irq(&q->queue_lock);
1058	}
1059
1060	if (!dispatched)
1061		goto out_unlock;
1062
1063	if (parent_sq) {
1064		/* @parent_sq is another throl_grp, propagate dispatch */
1065		if (tg->flags & THROTL_TG_WAS_EMPTY) {
1066			tg_update_disptime(tg);
1067			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1068				/* window is already open, repeat dispatching */
1069				sq = parent_sq;
1070				tg = sq_to_tg(sq);
1071				goto again;
1072			}
1073		}
1074	} else {
1075		/* reached the top-level, queue issuing */
1076		queue_work(kthrotld_workqueue, &td->dispatch_work);
1077	}
1078out_unlock:
1079	spin_unlock_irq(&q->queue_lock);
1080}
1081
1082/**
1083 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1084 * @work: work item being executed
1085 *
1086 * This function is queued for execution when bios reach the bio_lists[]
1087 * of throtl_data->service_queue.  Those bios are ready and issued by this
1088 * function.
1089 */
1090static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1091{
1092	struct throtl_data *td = container_of(work, struct throtl_data,
1093					      dispatch_work);
1094	struct throtl_service_queue *td_sq = &td->service_queue;
1095	struct request_queue *q = td->queue;
1096	struct bio_list bio_list_on_stack;
1097	struct bio *bio;
1098	struct blk_plug plug;
1099	int rw;
1100
1101	bio_list_init(&bio_list_on_stack);
1102
1103	spin_lock_irq(&q->queue_lock);
1104	for (rw = READ; rw <= WRITE; rw++)
1105		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1106			bio_list_add(&bio_list_on_stack, bio);
1107	spin_unlock_irq(&q->queue_lock);
1108
1109	if (!bio_list_empty(&bio_list_on_stack)) {
1110		blk_start_plug(&plug);
1111		while ((bio = bio_list_pop(&bio_list_on_stack)))
1112			submit_bio_noacct_nocheck(bio);
1113		blk_finish_plug(&plug);
1114	}
1115}
1116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1117static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1118			      int off)
1119{
1120	struct throtl_grp *tg = pd_to_tg(pd);
1121	u64 v = *(u64 *)((void *)tg + off);
1122
1123	if (v == U64_MAX)
1124		return 0;
1125	return __blkg_prfill_u64(sf, pd, v);
1126}
1127
1128static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1129			       int off)
1130{
1131	struct throtl_grp *tg = pd_to_tg(pd);
1132	unsigned int v = *(unsigned int *)((void *)tg + off);
1133
1134	if (v == UINT_MAX)
1135		return 0;
1136	return __blkg_prfill_u64(sf, pd, v);
1137}
1138
1139static int tg_print_conf_u64(struct seq_file *sf, void *v)
1140{
1141	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1142			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1143	return 0;
1144}
1145
1146static int tg_print_conf_uint(struct seq_file *sf, void *v)
1147{
1148	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1149			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1150	return 0;
1151}
1152
1153static void tg_conf_updated(struct throtl_grp *tg, bool global)
 
1154{
1155	struct throtl_service_queue *sq = &tg->service_queue;
 
 
 
 
1156	struct cgroup_subsys_state *pos_css;
1157	struct blkcg_gq *blkg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1158
1159	throtl_log(&tg->service_queue,
1160		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1161		   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1162		   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1163
1164	rcu_read_lock();
1165	/*
1166	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
1167	 * considered to have rules if either the tg itself or any of its
1168	 * ancestors has rules.  This identifies groups without any
1169	 * restrictions in the whole hierarchy and allows them to bypass
1170	 * blk-throttle.
1171	 */
1172	blkg_for_each_descendant_pre(blkg, pos_css,
1173			global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1174		struct throtl_grp *this_tg = blkg_to_tg(blkg);
1175
1176		tg_update_has_rules(this_tg);
1177		/* ignore root/second level */
1178		if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1179		    !blkg->parent->parent)
1180			continue;
1181	}
1182	rcu_read_unlock();
1183
1184	/*
1185	 * We're already holding queue_lock and know @tg is valid.  Let's
1186	 * apply the new config directly.
1187	 *
1188	 * Restart the slices for both READ and WRITES. It might happen
1189	 * that a group's limit are dropped suddenly and we don't want to
1190	 * account recently dispatched IO with new low rate.
1191	 */
1192	throtl_start_new_slice(tg, READ, false);
1193	throtl_start_new_slice(tg, WRITE, false);
1194
1195	if (tg->flags & THROTL_TG_PENDING) {
1196		tg_update_disptime(tg);
1197		throtl_schedule_next_dispatch(sq->parent_sq, true);
1198	}
1199}
1200
1201static int blk_throtl_init(struct gendisk *disk)
1202{
1203	struct request_queue *q = disk->queue;
1204	struct throtl_data *td;
1205	int ret;
1206
1207	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1208	if (!td)
1209		return -ENOMEM;
1210
1211	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1212	throtl_service_queue_init(&td->service_queue);
1213
1214	/*
1215	 * Freeze queue before activating policy, to synchronize with IO path,
1216	 * which is protected by 'q_usage_counter'.
1217	 */
1218	blk_mq_freeze_queue(disk->queue);
1219	blk_mq_quiesce_queue(disk->queue);
1220
1221	q->td = td;
1222	td->queue = q;
1223
1224	/* activate policy */
1225	ret = blkcg_activate_policy(disk, &blkcg_policy_throtl);
1226	if (ret) {
1227		q->td = NULL;
1228		kfree(td);
1229		goto out;
1230	}
1231
1232	if (blk_queue_nonrot(q))
1233		td->throtl_slice = DFL_THROTL_SLICE_SSD;
1234	else
1235		td->throtl_slice = DFL_THROTL_SLICE_HD;
1236	td->track_bio_latency = !queue_is_mq(q);
1237	if (!td->track_bio_latency)
1238		blk_stat_enable_accounting(q);
1239
1240out:
1241	blk_mq_unquiesce_queue(disk->queue);
1242	blk_mq_unfreeze_queue(disk->queue);
1243
1244	return ret;
1245}
1246
1247
1248static ssize_t tg_set_conf(struct kernfs_open_file *of,
1249			   char *buf, size_t nbytes, loff_t off, bool is_u64)
1250{
1251	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1252	struct blkg_conf_ctx ctx;
1253	struct throtl_grp *tg;
1254	int ret;
1255	u64 v;
1256
1257	blkg_conf_init(&ctx, buf);
1258
1259	ret = blkg_conf_open_bdev(&ctx);
1260	if (ret)
1261		goto out_finish;
1262
1263	if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1264		ret = blk_throtl_init(ctx.bdev->bd_disk);
1265		if (ret)
1266			goto out_finish;
1267	}
1268
1269	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
1270	if (ret)
1271		goto out_finish;
1272
1273	ret = -EINVAL;
1274	if (sscanf(ctx.body, "%llu", &v) != 1)
1275		goto out_finish;
1276	if (!v)
1277		v = U64_MAX;
1278
1279	tg = blkg_to_tg(ctx.blkg);
1280	tg_update_carryover(tg);
1281
1282	if (is_u64)
1283		*(u64 *)((void *)tg + of_cft(of)->private) = v;
1284	else
1285		*(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1286
1287	tg_conf_updated(tg, false);
1288	ret = 0;
1289out_finish:
1290	blkg_conf_exit(&ctx);
1291	return ret ?: nbytes;
1292}
1293
1294static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1295			       char *buf, size_t nbytes, loff_t off)
1296{
1297	return tg_set_conf(of, buf, nbytes, off, true);
1298}
1299
1300static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1301				char *buf, size_t nbytes, loff_t off)
1302{
1303	return tg_set_conf(of, buf, nbytes, off, false);
1304}
1305
1306static int tg_print_rwstat(struct seq_file *sf, void *v)
1307{
1308	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1309			  blkg_prfill_rwstat, &blkcg_policy_throtl,
1310			  seq_cft(sf)->private, true);
1311	return 0;
1312}
1313
1314static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1315				      struct blkg_policy_data *pd, int off)
1316{
1317	struct blkg_rwstat_sample sum;
1318
1319	blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1320				  &sum);
1321	return __blkg_prfill_rwstat(sf, pd, &sum);
1322}
1323
1324static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
 
1325{
1326	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1327			  tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1328			  seq_cft(sf)->private, true);
1329	return 0;
1330}
1331
1332static struct cftype throtl_legacy_files[] = {
1333	{
1334		.name = "throttle.read_bps_device",
1335		.private = offsetof(struct throtl_grp, bps[READ]),
1336		.seq_show = tg_print_conf_u64,
1337		.write = tg_set_conf_u64,
1338	},
1339	{
1340		.name = "throttle.write_bps_device",
1341		.private = offsetof(struct throtl_grp, bps[WRITE]),
1342		.seq_show = tg_print_conf_u64,
1343		.write = tg_set_conf_u64,
1344	},
1345	{
1346		.name = "throttle.read_iops_device",
1347		.private = offsetof(struct throtl_grp, iops[READ]),
1348		.seq_show = tg_print_conf_uint,
1349		.write = tg_set_conf_uint,
1350	},
1351	{
1352		.name = "throttle.write_iops_device",
1353		.private = offsetof(struct throtl_grp, iops[WRITE]),
1354		.seq_show = tg_print_conf_uint,
1355		.write = tg_set_conf_uint,
1356	},
1357	{
1358		.name = "throttle.io_service_bytes",
1359		.private = offsetof(struct throtl_grp, stat_bytes),
1360		.seq_show = tg_print_rwstat,
1361	},
1362	{
1363		.name = "throttle.io_service_bytes_recursive",
1364		.private = offsetof(struct throtl_grp, stat_bytes),
1365		.seq_show = tg_print_rwstat_recursive,
1366	},
1367	{
1368		.name = "throttle.io_serviced",
1369		.private = offsetof(struct throtl_grp, stat_ios),
1370		.seq_show = tg_print_rwstat,
1371	},
1372	{
1373		.name = "throttle.io_serviced_recursive",
1374		.private = offsetof(struct throtl_grp, stat_ios),
1375		.seq_show = tg_print_rwstat_recursive,
1376	},
1377	{ }	/* terminate */
1378};
1379
1380static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1381			 int off)
1382{
1383	struct throtl_grp *tg = pd_to_tg(pd);
1384	const char *dname = blkg_dev_name(pd->blkg);
1385	u64 bps_dft;
1386	unsigned int iops_dft;
1387
1388	if (!dname)
1389		return 0;
1390
1391	bps_dft = U64_MAX;
1392	iops_dft = UINT_MAX;
1393
1394	if (tg->bps[READ] == bps_dft &&
1395	    tg->bps[WRITE] == bps_dft &&
1396	    tg->iops[READ] == iops_dft &&
1397	    tg->iops[WRITE] == iops_dft)
1398		return 0;
1399
1400	seq_printf(sf, "%s", dname);
1401	if (tg->bps[READ] == U64_MAX)
1402		seq_printf(sf, " rbps=max");
1403	else
1404		seq_printf(sf, " rbps=%llu", tg->bps[READ]);
1405
1406	if (tg->bps[WRITE] == U64_MAX)
1407		seq_printf(sf, " wbps=max");
1408	else
1409		seq_printf(sf, " wbps=%llu", tg->bps[WRITE]);
1410
1411	if (tg->iops[READ] == UINT_MAX)
1412		seq_printf(sf, " riops=max");
1413	else
1414		seq_printf(sf, " riops=%u", tg->iops[READ]);
1415
1416	if (tg->iops[WRITE] == UINT_MAX)
1417		seq_printf(sf, " wiops=max");
1418	else
1419		seq_printf(sf, " wiops=%u", tg->iops[WRITE]);
1420
1421	seq_printf(sf, "\n");
1422	return 0;
1423}
1424
1425static int tg_print_limit(struct seq_file *sf, void *v)
1426{
1427	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1428			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1429	return 0;
1430}
1431
1432static ssize_t tg_set_limit(struct kernfs_open_file *of,
1433			  char *buf, size_t nbytes, loff_t off)
1434{
1435	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1436	struct blkg_conf_ctx ctx;
1437	struct throtl_grp *tg;
1438	u64 v[4];
1439	int ret;
1440
1441	blkg_conf_init(&ctx, buf);
1442
1443	ret = blkg_conf_open_bdev(&ctx);
1444	if (ret)
1445		goto out_finish;
1446
1447	if (!blk_throtl_activated(ctx.bdev->bd_queue)) {
1448		ret = blk_throtl_init(ctx.bdev->bd_disk);
1449		if (ret)
1450			goto out_finish;
1451	}
1452
1453	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx);
1454	if (ret)
1455		goto out_finish;
1456
1457	tg = blkg_to_tg(ctx.blkg);
1458	tg_update_carryover(tg);
1459
1460	v[0] = tg->bps[READ];
1461	v[1] = tg->bps[WRITE];
1462	v[2] = tg->iops[READ];
1463	v[3] = tg->iops[WRITE];
1464
1465	while (true) {
1466		char tok[27];	/* wiops=18446744073709551616 */
1467		char *p;
1468		u64 val = U64_MAX;
1469		int len;
1470
1471		if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1472			break;
1473		if (tok[0] == '\0')
1474			break;
1475		ctx.body += len;
1476
1477		ret = -EINVAL;
1478		p = tok;
1479		strsep(&p, "=");
1480		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1481			goto out_finish;
1482
1483		ret = -ERANGE;
1484		if (!val)
1485			goto out_finish;
1486
1487		ret = -EINVAL;
1488		if (!strcmp(tok, "rbps"))
1489			v[0] = val;
1490		else if (!strcmp(tok, "wbps"))
1491			v[1] = val;
1492		else if (!strcmp(tok, "riops"))
1493			v[2] = min_t(u64, val, UINT_MAX);
1494		else if (!strcmp(tok, "wiops"))
1495			v[3] = min_t(u64, val, UINT_MAX);
1496		else
1497			goto out_finish;
1498	}
1499
1500	tg->bps[READ] = v[0];
1501	tg->bps[WRITE] = v[1];
1502	tg->iops[READ] = v[2];
1503	tg->iops[WRITE] = v[3];
1504
1505	tg_conf_updated(tg, false);
1506	ret = 0;
1507out_finish:
1508	blkg_conf_exit(&ctx);
1509	return ret ?: nbytes;
1510}
1511
1512static struct cftype throtl_files[] = {
1513	{
1514		.name = "max",
1515		.flags = CFTYPE_NOT_ON_ROOT,
1516		.seq_show = tg_print_limit,
1517		.write = tg_set_limit,
1518	},
1519	{ }	/* terminate */
1520};
1521
1522static void throtl_shutdown_wq(struct request_queue *q)
1523{
1524	struct throtl_data *td = q->td;
1525
1526	cancel_work_sync(&td->dispatch_work);
1527}
1528
1529static void tg_flush_bios(struct throtl_grp *tg)
1530{
1531	struct throtl_service_queue *sq = &tg->service_queue;
1532
1533	if (tg->flags & THROTL_TG_CANCELING)
1534		return;
1535	/*
1536	 * Set the flag to make sure throtl_pending_timer_fn() won't
1537	 * stop until all throttled bios are dispatched.
1538	 */
1539	tg->flags |= THROTL_TG_CANCELING;
1540
1541	/*
1542	 * Do not dispatch cgroup without THROTL_TG_PENDING or cgroup
1543	 * will be inserted to service queue without THROTL_TG_PENDING
1544	 * set in tg_update_disptime below. Then IO dispatched from
1545	 * child in tg_dispatch_one_bio will trigger double insertion
1546	 * and corrupt the tree.
1547	 */
1548	if (!(tg->flags & THROTL_TG_PENDING))
1549		return;
1550
1551	/*
1552	 * Update disptime after setting the above flag to make sure
1553	 * throtl_select_dispatch() won't exit without dispatching.
1554	 */
1555	tg_update_disptime(tg);
1556
1557	throtl_schedule_pending_timer(sq, jiffies + 1);
1558}
1559
1560static void throtl_pd_offline(struct blkg_policy_data *pd)
1561{
1562	tg_flush_bios(pd_to_tg(pd));
1563}
1564
1565struct blkcg_policy blkcg_policy_throtl = {
1566	.dfl_cftypes		= throtl_files,
1567	.legacy_cftypes		= throtl_legacy_files,
1568
1569	.pd_alloc_fn		= throtl_pd_alloc,
1570	.pd_init_fn		= throtl_pd_init,
1571	.pd_online_fn		= throtl_pd_online,
1572	.pd_offline_fn		= throtl_pd_offline,
1573	.pd_free_fn		= throtl_pd_free,
1574};
1575
1576void blk_throtl_cancel_bios(struct gendisk *disk)
1577{
1578	struct request_queue *q = disk->queue;
1579	struct cgroup_subsys_state *pos_css;
1580	struct blkcg_gq *blkg;
 
 
 
 
1581
1582	if (!blk_throtl_activated(q))
1583		return;
 
1584
1585	spin_lock_irq(&q->queue_lock);
1586	/*
1587	 * queue_lock is held, rcu lock is not needed here technically.
1588	 * However, rcu lock is still held to emphasize that following
1589	 * path need RCU protection and to prevent warning from lockdep.
1590	 */
1591	rcu_read_lock();
1592	blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
1593		/*
1594		 * disk_release will call pd_offline_fn to cancel bios.
1595		 * However, disk_release can't be called if someone get
1596		 * the refcount of device and issued bios which are
1597		 * inflight after del_gendisk.
1598		 * Cancel bios here to ensure no bios are inflight after
1599		 * del_gendisk.
1600		 */
1601		tg_flush_bios(blkg_to_tg(blkg));
1602	}
1603	rcu_read_unlock();
1604	spin_unlock_irq(&q->queue_lock);
1605}
1606
1607static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
1608{
1609	/* throtl is FIFO - if bios are already queued, should queue */
1610	if (tg->service_queue.nr_queued[rw])
1611		return false;
 
 
 
1612
1613	return tg_may_dispatch(tg, bio, NULL);
1614}
1615
1616static void tg_dispatch_in_debt(struct throtl_grp *tg, struct bio *bio, bool rw)
1617{
1618	if (!bio_flagged(bio, BIO_BPS_THROTTLED))
1619		tg->carryover_bytes[rw] -= throtl_bio_data_size(bio);
1620	tg->carryover_ios[rw]--;
1621}
1622
1623bool __blk_throtl_bio(struct bio *bio)
1624{
1625	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1626	struct blkcg_gq *blkg = bio->bi_blkg;
1627	struct throtl_qnode *qn = NULL;
1628	struct throtl_grp *tg = blkg_to_tg(blkg);
1629	struct throtl_service_queue *sq;
1630	bool rw = bio_data_dir(bio);
1631	bool throttled = false;
1632	struct throtl_data *td = tg->td;
1633
1634	rcu_read_lock();
1635	spin_lock_irq(&q->queue_lock);
1636	sq = &tg->service_queue;
1637
1638	while (true) {
1639		if (tg_within_limit(tg, bio, rw)) {
1640			/* within limits, let's charge and dispatch directly */
1641			throtl_charge_bio(tg, bio);
1642
1643			/*
1644			 * We need to trim slice even when bios are not being
1645			 * queued otherwise it might happen that a bio is not
1646			 * queued for a long time and slice keeps on extending
1647			 * and trim is not called for a long time. Now if limits
1648			 * are reduced suddenly we take into account all the IO
1649			 * dispatched so far at new low rate and * newly queued
1650			 * IO gets a really long dispatch time.
1651			 *
1652			 * So keep on trimming slice even if bio is not queued.
1653			 */
1654			throtl_trim_slice(tg, rw);
1655		} else if (bio_issue_as_root_blkg(bio)) {
1656			/*
1657			 * IOs which may cause priority inversions are
1658			 * dispatched directly, even if they're over limit.
1659			 * Debts are handled by carryover_bytes/ios while
1660			 * calculating wait time.
1661			 */
1662			tg_dispatch_in_debt(tg, bio, rw);
1663		} else {
1664			/* if above limits, break to queue */
1665			break;
1666		}
1667
1668		/*
1669		 * @bio passed through this layer without being throttled.
1670		 * Climb up the ladder.  If we're already at the top, it
1671		 * can be executed directly.
1672		 */
1673		qn = &tg->qnode_on_parent[rw];
1674		sq = sq->parent_sq;
1675		tg = sq_to_tg(sq);
1676		if (!tg) {
1677			bio_set_flag(bio, BIO_BPS_THROTTLED);
1678			goto out_unlock;
1679		}
1680	}
1681
1682	/* out-of-limit, queue to @tg */
1683	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1684		   rw == READ ? 'R' : 'W',
1685		   tg->bytes_disp[rw], bio->bi_iter.bi_size,
1686		   tg_bps_limit(tg, rw),
1687		   tg->io_disp[rw], tg_iops_limit(tg, rw),
1688		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1689
1690	td->nr_queued[rw]++;
 
1691	throtl_add_bio_tg(bio, qn, tg);
1692	throttled = true;
1693
1694	/*
1695	 * Update @tg's dispatch time and force schedule dispatch if @tg
1696	 * was empty before @bio.  The forced scheduling isn't likely to
1697	 * cause undue delay as @bio is likely to be dispatched directly if
1698	 * its @tg's disptime is not in the future.
1699	 */
1700	if (tg->flags & THROTL_TG_WAS_EMPTY) {
1701		tg_update_disptime(tg);
1702		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
1703	}
1704
1705out_unlock:
1706	spin_unlock_irq(&q->queue_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1707
1708	rcu_read_unlock();
1709	return throttled;
 
 
 
 
 
 
 
 
1710}
1711
1712void blk_throtl_exit(struct gendisk *disk)
1713{
1714	struct request_queue *q = disk->queue;
 
 
 
 
 
1715
1716	if (!blk_throtl_activated(q))
1717		return;
 
 
 
 
 
 
 
 
 
 
1718
1719	del_timer_sync(&q->td->service_queue.pending_timer);
 
 
1720	throtl_shutdown_wq(q);
1721	blkcg_deactivate_policy(disk, &blkcg_policy_throtl);
1722	kfree(q->td);
1723}
1724
1725static int __init throtl_init(void)
1726{
1727	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1728	if (!kthrotld_workqueue)
1729		panic("Failed to create kthrotld\n");
1730
1731	return blkcg_policy_register(&blkcg_policy_throtl);
1732}
1733
1734module_init(throtl_init);