Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Interface for controlling IO bandwidth on a request queue
   3 *
   4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/slab.h>
   9#include <linux/blkdev.h>
  10#include <linux/bio.h>
  11#include <linux/blktrace_api.h>
  12#include <linux/blk-cgroup.h>
  13#include "blk.h"
  14
  15/* Max dispatch from a group in 1 round */
  16static int throtl_grp_quantum = 8;
  17
  18/* Total max dispatch from all groups in one round */
  19static int throtl_quantum = 32;
  20
  21/* Throttling is performed over 100ms slice and after that slice is renewed */
  22static unsigned long throtl_slice = HZ/10;	/* 100 ms */
  23
  24static struct blkcg_policy blkcg_policy_throtl;
  25
  26/* A workqueue to queue throttle related work */
  27static struct workqueue_struct *kthrotld_workqueue;
 
 
  28
  29/*
  30 * To implement hierarchical throttling, throtl_grps form a tree and bios
  31 * are dispatched upwards level by level until they reach the top and get
  32 * issued.  When dispatching bios from the children and local group at each
  33 * level, if the bios are dispatched into a single bio_list, there's a risk
  34 * of a local or child group which can queue many bios at once filling up
  35 * the list starving others.
  36 *
  37 * To avoid such starvation, dispatched bios are queued separately
  38 * according to where they came from.  When they are again dispatched to
  39 * the parent, they're popped in round-robin order so that no single source
  40 * hogs the dispatch window.
  41 *
  42 * throtl_qnode is used to keep the queued bios separated by their sources.
  43 * Bios are queued to throtl_qnode which in turn is queued to
  44 * throtl_service_queue and then dispatched in round-robin order.
  45 *
  46 * It's also used to track the reference counts on blkg's.  A qnode always
  47 * belongs to a throtl_grp and gets queued on itself or the parent, so
  48 * incrementing the reference of the associated throtl_grp when a qnode is
  49 * queued and decrementing when dequeued is enough to keep the whole blkg
  50 * tree pinned while bios are in flight.
  51 */
  52struct throtl_qnode {
  53	struct list_head	node;		/* service_queue->queued[] */
  54	struct bio_list		bios;		/* queued bios */
  55	struct throtl_grp	*tg;		/* tg this qnode belongs to */
  56};
  57
  58struct throtl_service_queue {
  59	struct throtl_service_queue *parent_sq;	/* the parent service_queue */
  60
  61	/*
  62	 * Bios queued directly to this service_queue or dispatched from
  63	 * children throtl_grp's.
  64	 */
  65	struct list_head	queued[2];	/* throtl_qnode [READ/WRITE] */
  66	unsigned int		nr_queued[2];	/* number of queued bios */
  67
  68	/*
  69	 * RB tree of active children throtl_grp's, which are sorted by
  70	 * their ->disptime.
  71	 */
  72	struct rb_root		pending_tree;	/* RB tree of active tgs */
  73	struct rb_node		*first_pending;	/* first node in the tree */
  74	unsigned int		nr_pending;	/* # queued in the tree */
  75	unsigned long		first_pending_disptime;	/* disptime of the first tg */
  76	struct timer_list	pending_timer;	/* fires on first_pending_disptime */
  77};
  78
  79enum tg_state_flags {
  80	THROTL_TG_PENDING	= 1 << 0,	/* on parent's pending tree */
  81	THROTL_TG_WAS_EMPTY	= 1 << 1,	/* bio_lists[] became non-empty */
  82};
  83
  84#define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
  85
  86struct throtl_grp {
  87	/* must be the first member */
  88	struct blkg_policy_data pd;
  89
  90	/* active throtl group service_queue member */
  91	struct rb_node rb_node;
  92
  93	/* throtl_data this group belongs to */
  94	struct throtl_data *td;
  95
  96	/* this group's service queue */
  97	struct throtl_service_queue service_queue;
  98
  99	/*
 100	 * qnode_on_self is used when bios are directly queued to this
 101	 * throtl_grp so that local bios compete fairly with bios
 102	 * dispatched from children.  qnode_on_parent is used when bios are
 103	 * dispatched from this throtl_grp into its parent and will compete
 104	 * with the sibling qnode_on_parents and the parent's
 105	 * qnode_on_self.
 106	 */
 107	struct throtl_qnode qnode_on_self[2];
 108	struct throtl_qnode qnode_on_parent[2];
 109
 110	/*
 111	 * Dispatch time in jiffies. This is the estimated time when group
 112	 * will unthrottle and is ready to dispatch more bio. It is used as
 113	 * key to sort active groups in service tree.
 114	 */
 115	unsigned long disptime;
 116
 
 
 117	unsigned int flags;
 118
 119	/* are there any throtl rules between this group and td? */
 120	bool has_rules[2];
 
 
 
 121
 122	/* bytes per second rate limits */
 123	uint64_t bps[2];
 124
 125	/* IOPS limits */
 126	unsigned int iops[2];
 127
 128	/* Number of bytes disptached in current slice */
 129	uint64_t bytes_disp[2];
 130	/* Number of bio's dispatched in current slice */
 131	unsigned int io_disp[2];
 132
 133	/* When did we start a new slice */
 134	unsigned long slice_start[2];
 135	unsigned long slice_end[2];
 
 
 
 
 
 136};
 137
 138struct throtl_data
 139{
 
 
 
 140	/* service tree for active throtl groups */
 141	struct throtl_service_queue service_queue;
 142
 
 143	struct request_queue *queue;
 144
 145	/* Total Number of queued bios on READ and WRITE lists */
 146	unsigned int nr_queued[2];
 147
 148	/*
 149	 * number of total undestroyed groups
 150	 */
 151	unsigned int nr_undestroyed_grps;
 152
 153	/* Work for dispatching throttled bios */
 154	struct work_struct dispatch_work;
 
 
 
 
 
 
 155};
 156
 157static void throtl_pending_timer_fn(unsigned long arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 158
 159static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
 160{
 161	return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
 162}
 163
 164static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
 165{
 166	return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
 
 
 
 167}
 168
 169static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
 170{
 171	return pd_to_blkg(&tg->pd);
 172}
 173
 174/**
 175 * sq_to_tg - return the throl_grp the specified service queue belongs to
 176 * @sq: the throtl_service_queue of interest
 177 *
 178 * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
 179 * embedded in throtl_data, %NULL is returned.
 180 */
 181static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
 182{
 183	if (sq && sq->parent_sq)
 184		return container_of(sq, struct throtl_grp, service_queue);
 185	else
 186		return NULL;
 187}
 188
 189/**
 190 * sq_to_td - return throtl_data the specified service queue belongs to
 191 * @sq: the throtl_service_queue of interest
 192 *
 193 * A service_queue can be embeded in either a throtl_grp or throtl_data.
 194 * Determine the associated throtl_data accordingly and return it.
 195 */
 196static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
 197{
 198	struct throtl_grp *tg = sq_to_tg(sq);
 199
 200	if (tg)
 201		return tg->td;
 202	else
 203		return container_of(sq, struct throtl_data, service_queue);
 204}
 205
 206/**
 207 * throtl_log - log debug message via blktrace
 208 * @sq: the service_queue being reported
 209 * @fmt: printf format string
 210 * @args: printf args
 211 *
 212 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
 213 * throtl_grp; otherwise, just "throtl".
 214 *
 215 * TODO: this should be made a function and name formatting should happen
 216 * after testing whether blktrace is enabled.
 217 */
 218#define throtl_log(sq, fmt, args...)	do {				\
 219	struct throtl_grp *__tg = sq_to_tg((sq));			\
 220	struct throtl_data *__td = sq_to_td((sq));			\
 221									\
 222	(void)__td;							\
 223	if ((__tg)) {							\
 224		char __pbuf[128];					\
 225									\
 226		blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf));	\
 227		blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
 228	} else {							\
 229		blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);	\
 230	}								\
 231} while (0)
 232
 233static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
 234{
 235	INIT_LIST_HEAD(&qn->node);
 236	bio_list_init(&qn->bios);
 237	qn->tg = tg;
 238}
 239
 240/**
 241 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
 242 * @bio: bio being added
 243 * @qn: qnode to add bio to
 244 * @queued: the service_queue->queued[] list @qn belongs to
 245 *
 246 * Add @bio to @qn and put @qn on @queued if it's not already on.
 247 * @qn->tg's reference count is bumped when @qn is activated.  See the
 248 * comment on top of throtl_qnode definition for details.
 249 */
 250static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
 251				 struct list_head *queued)
 252{
 253	bio_list_add(&qn->bios, bio);
 254	if (list_empty(&qn->node)) {
 255		list_add_tail(&qn->node, queued);
 256		blkg_get(tg_to_blkg(qn->tg));
 257	}
 
 
 
 
 
 
 
 
 
 258}
 259
 260/**
 261 * throtl_peek_queued - peek the first bio on a qnode list
 262 * @queued: the qnode list to peek
 263 */
 264static struct bio *throtl_peek_queued(struct list_head *queued)
 265{
 266	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
 267	struct bio *bio;
 
 
 
 268
 269	if (list_empty(queued))
 270		return NULL;
 
 271
 272	bio = bio_list_peek(&qn->bios);
 273	WARN_ON_ONCE(!bio);
 274	return bio;
 
 
 
 
 275}
 276
 277/**
 278 * throtl_pop_queued - pop the first bio form a qnode list
 279 * @queued: the qnode list to pop a bio from
 280 * @tg_to_put: optional out argument for throtl_grp to put
 281 *
 282 * Pop the first bio from the qnode list @queued.  After popping, the first
 283 * qnode is removed from @queued if empty or moved to the end of @queued so
 284 * that the popping order is round-robin.
 285 *
 286 * When the first qnode is removed, its associated throtl_grp should be put
 287 * too.  If @tg_to_put is NULL, this function automatically puts it;
 288 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
 289 * responsible for putting it.
 290 */
 291static struct bio *throtl_pop_queued(struct list_head *queued,
 292				     struct throtl_grp **tg_to_put)
 293{
 294	struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
 295	struct bio *bio;
 
 296
 297	if (list_empty(queued))
 298		return NULL;
 
 
 
 299
 300	bio = bio_list_pop(&qn->bios);
 301	WARN_ON_ONCE(!bio);
 302
 303	if (bio_list_empty(&qn->bios)) {
 304		list_del_init(&qn->node);
 305		if (tg_to_put)
 306			*tg_to_put = qn->tg;
 307		else
 308			blkg_put(tg_to_blkg(qn->tg));
 309	} else {
 310		list_move_tail(&qn->node, queued);
 311	}
 
 312
 313	return bio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 314}
 315
 316/* init a service_queue, assumes the caller zeroed it */
 317static void throtl_service_queue_init(struct throtl_service_queue *sq)
 318{
 319	INIT_LIST_HEAD(&sq->queued[0]);
 320	INIT_LIST_HEAD(&sq->queued[1]);
 321	sq->pending_tree = RB_ROOT;
 322	setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
 323		    (unsigned long)sq);
 
 
 
 
 
 
 
 324}
 325
 326static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
 
 327{
 328	struct throtl_grp *tg;
 329	int rw;
 330
 331	tg = kzalloc_node(sizeof(*tg), gfp, node);
 332	if (!tg)
 333		return NULL;
 334
 335	throtl_service_queue_init(&tg->service_queue);
 336
 337	for (rw = READ; rw <= WRITE; rw++) {
 338		throtl_qnode_init(&tg->qnode_on_self[rw], tg);
 339		throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
 340	}
 341
 342	RB_CLEAR_NODE(&tg->rb_node);
 343	tg->bps[READ] = -1;
 344	tg->bps[WRITE] = -1;
 345	tg->iops[READ] = -1;
 346	tg->iops[WRITE] = -1;
 347
 348	return &tg->pd;
 349}
 350
 351static void throtl_pd_init(struct blkg_policy_data *pd)
 
 352{
 353	struct throtl_grp *tg = pd_to_tg(pd);
 354	struct blkcg_gq *blkg = tg_to_blkg(tg);
 355	struct throtl_data *td = blkg->q->td;
 356	struct throtl_service_queue *sq = &tg->service_queue;
 357
 358	/*
 359	 * If on the default hierarchy, we switch to properly hierarchical
 360	 * behavior where limits on a given throtl_grp are applied to the
 361	 * whole subtree rather than just the group itself.  e.g. If 16M
 362	 * read_bps limit is set on the root group, the whole system can't
 363	 * exceed 16M for the device.
 364	 *
 365	 * If not on the default hierarchy, the broken flat hierarchy
 366	 * behavior is retained where all throtl_grps are treated as if
 367	 * they're all separate root groups right below throtl_data.
 368	 * Limits of a group don't interact with limits of other groups
 369	 * regardless of the position of the group in the hierarchy.
 370	 */
 371	sq->parent_sq = &td->service_queue;
 372	if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
 373		sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
 374	tg->td = td;
 375}
 376
 377/*
 378 * Set has_rules[] if @tg or any of its parents have limits configured.
 379 * This doesn't require walking up to the top of the hierarchy as the
 380 * parent's has_rules[] is guaranteed to be correct.
 381 */
 382static void tg_update_has_rules(struct throtl_grp *tg)
 383{
 384	struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
 385	int rw;
 
 386
 387	for (rw = READ; rw <= WRITE; rw++)
 388		tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
 389				    (tg->bps[rw] != -1 || tg->iops[rw] != -1);
 390}
 
 
 
 391
 392static void throtl_pd_online(struct blkg_policy_data *pd)
 393{
 394	/*
 395	 * We don't want new groups to escape the limits of its ancestors.
 396	 * Update has_rules[] after a new group is brought online.
 
 
 
 
 397	 */
 398	tg_update_has_rules(pd_to_tg(pd));
 399}
 
 400
 401static void throtl_pd_free(struct blkg_policy_data *pd)
 402{
 403	struct throtl_grp *tg = pd_to_tg(pd);
 
 
 
 
 
 
 404
 405	del_timer_sync(&tg->service_queue.pending_timer);
 406	kfree(tg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407}
 408
 409static struct throtl_grp *
 410throtl_rb_first(struct throtl_service_queue *parent_sq)
 411{
 412	/* Service tree is empty */
 413	if (!parent_sq->nr_pending)
 414		return NULL;
 415
 416	if (!parent_sq->first_pending)
 417		parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
 418
 419	if (parent_sq->first_pending)
 420		return rb_entry_tg(parent_sq->first_pending);
 421
 422	return NULL;
 423}
 424
 425static void rb_erase_init(struct rb_node *n, struct rb_root *root)
 426{
 427	rb_erase(n, root);
 428	RB_CLEAR_NODE(n);
 429}
 430
 431static void throtl_rb_erase(struct rb_node *n,
 432			    struct throtl_service_queue *parent_sq)
 433{
 434	if (parent_sq->first_pending == n)
 435		parent_sq->first_pending = NULL;
 436	rb_erase_init(n, &parent_sq->pending_tree);
 437	--parent_sq->nr_pending;
 438}
 439
 440static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
 441{
 442	struct throtl_grp *tg;
 443
 444	tg = throtl_rb_first(parent_sq);
 445	if (!tg)
 446		return;
 447
 448	parent_sq->first_pending_disptime = tg->disptime;
 449}
 450
 451static void tg_service_queue_add(struct throtl_grp *tg)
 
 452{
 453	struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
 454	struct rb_node **node = &parent_sq->pending_tree.rb_node;
 455	struct rb_node *parent = NULL;
 456	struct throtl_grp *__tg;
 457	unsigned long key = tg->disptime;
 458	int left = 1;
 459
 460	while (*node != NULL) {
 461		parent = *node;
 462		__tg = rb_entry_tg(parent);
 463
 464		if (time_before(key, __tg->disptime))
 465			node = &parent->rb_left;
 466		else {
 467			node = &parent->rb_right;
 468			left = 0;
 469		}
 470	}
 471
 472	if (left)
 473		parent_sq->first_pending = &tg->rb_node;
 474
 475	rb_link_node(&tg->rb_node, parent, node);
 476	rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
 477}
 478
 479static void __throtl_enqueue_tg(struct throtl_grp *tg)
 480{
 481	tg_service_queue_add(tg);
 482	tg->flags |= THROTL_TG_PENDING;
 483	tg->service_queue.parent_sq->nr_pending++;
 484}
 485
 486static void throtl_enqueue_tg(struct throtl_grp *tg)
 487{
 488	if (!(tg->flags & THROTL_TG_PENDING))
 489		__throtl_enqueue_tg(tg);
 490}
 491
 492static void __throtl_dequeue_tg(struct throtl_grp *tg)
 493{
 494	throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
 495	tg->flags &= ~THROTL_TG_PENDING;
 496}
 497
 498static void throtl_dequeue_tg(struct throtl_grp *tg)
 499{
 500	if (tg->flags & THROTL_TG_PENDING)
 501		__throtl_dequeue_tg(tg);
 502}
 503
 504/* Call with queue lock held */
 505static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
 506					  unsigned long expires)
 507{
 508	mod_timer(&sq->pending_timer, expires);
 509	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
 510		   expires - jiffies, jiffies);
 511}
 512
 513/**
 514 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
 515 * @sq: the service_queue to schedule dispatch for
 516 * @force: force scheduling
 517 *
 518 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
 519 * dispatch time of the first pending child.  Returns %true if either timer
 520 * is armed or there's no pending child left.  %false if the current
 521 * dispatch window is still open and the caller should continue
 522 * dispatching.
 523 *
 524 * If @force is %true, the dispatch timer is always scheduled and this
 525 * function is guaranteed to return %true.  This is to be used when the
 526 * caller can't dispatch itself and needs to invoke pending_timer
 527 * unconditionally.  Note that forced scheduling is likely to induce short
 528 * delay before dispatch starts even if @sq->first_pending_disptime is not
 529 * in the future and thus shouldn't be used in hot paths.
 530 */
 531static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
 532					  bool force)
 533{
 534	/* any pending children left? */
 535	if (!sq->nr_pending)
 536		return true;
 537
 538	update_min_dispatch_time(sq);
 539
 540	/* is the next dispatch time in the future? */
 541	if (force || time_after(sq->first_pending_disptime, jiffies)) {
 542		throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
 543		return true;
 544	}
 545
 546	/* tell the caller to continue dispatching */
 547	return false;
 548}
 549
 550static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
 551		bool rw, unsigned long start)
 552{
 553	tg->bytes_disp[rw] = 0;
 554	tg->io_disp[rw] = 0;
 555
 556	/*
 557	 * Previous slice has expired. We must have trimmed it after last
 558	 * bio dispatch. That means since start of last slice, we never used
 559	 * that bandwidth. Do try to make use of that bandwidth while giving
 560	 * credit.
 561	 */
 562	if (time_after_eq(start, tg->slice_start[rw]))
 563		tg->slice_start[rw] = start;
 564
 565	tg->slice_end[rw] = jiffies + throtl_slice;
 566	throtl_log(&tg->service_queue,
 567		   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
 568		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 569		   tg->slice_end[rw], jiffies);
 
 
 
 570}
 571
 572static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
 
 573{
 574	tg->bytes_disp[rw] = 0;
 575	tg->io_disp[rw] = 0;
 576	tg->slice_start[rw] = jiffies;
 577	tg->slice_end[rw] = jiffies + throtl_slice;
 578	throtl_log(&tg->service_queue,
 579		   "[%c] new slice start=%lu end=%lu jiffies=%lu",
 580		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 581		   tg->slice_end[rw], jiffies);
 582}
 583
 584static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
 585					unsigned long jiffy_end)
 586{
 587	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
 588}
 589
 590static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
 591				       unsigned long jiffy_end)
 592{
 593	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
 594	throtl_log(&tg->service_queue,
 595		   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
 596		   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 597		   tg->slice_end[rw], jiffies);
 598}
 599
 600/* Determine if previously allocated or extended slice is complete or not */
 601static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
 
 602{
 603	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
 604		return false;
 605
 606	return 1;
 607}
 608
 609/* Trim the used slices and adjust slice start accordingly */
 610static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
 
 611{
 612	unsigned long nr_slices, time_elapsed, io_trim;
 613	u64 bytes_trim, tmp;
 614
 615	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
 616
 617	/*
 618	 * If bps are unlimited (-1), then time slice don't get
 619	 * renewed. Don't try to trim the slice if slice is used. A new
 620	 * slice will start when appropriate.
 621	 */
 622	if (throtl_slice_used(tg, rw))
 623		return;
 624
 625	/*
 626	 * A bio has been dispatched. Also adjust slice_end. It might happen
 627	 * that initially cgroup limit was very low resulting in high
 628	 * slice_end, but later limit was bumped up and bio was dispached
 629	 * sooner, then we need to reduce slice_end. A high bogus slice_end
 630	 * is bad because it does not allow new slice to start.
 631	 */
 632
 633	throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
 634
 635	time_elapsed = jiffies - tg->slice_start[rw];
 636
 637	nr_slices = time_elapsed / throtl_slice;
 638
 639	if (!nr_slices)
 640		return;
 641	tmp = tg->bps[rw] * throtl_slice * nr_slices;
 642	do_div(tmp, HZ);
 643	bytes_trim = tmp;
 644
 645	io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
 646
 647	if (!bytes_trim && !io_trim)
 648		return;
 649
 650	if (tg->bytes_disp[rw] >= bytes_trim)
 651		tg->bytes_disp[rw] -= bytes_trim;
 652	else
 653		tg->bytes_disp[rw] = 0;
 654
 655	if (tg->io_disp[rw] >= io_trim)
 656		tg->io_disp[rw] -= io_trim;
 657	else
 658		tg->io_disp[rw] = 0;
 659
 660	tg->slice_start[rw] += nr_slices * throtl_slice;
 661
 662	throtl_log(&tg->service_queue,
 663		   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
 664		   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
 665		   tg->slice_start[rw], tg->slice_end[rw], jiffies);
 666}
 667
 668static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
 669				  unsigned long *wait)
 670{
 671	bool rw = bio_data_dir(bio);
 672	unsigned int io_allowed;
 673	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 674	u64 tmp;
 675
 676	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 677
 678	/* Slice has just started. Consider one slice interval */
 679	if (!jiffy_elapsed)
 680		jiffy_elapsed_rnd = throtl_slice;
 681
 682	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 683
 684	/*
 685	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
 686	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
 687	 * will allow dispatch after 1 second and after that slice should
 688	 * have been trimmed.
 689	 */
 690
 691	tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
 692	do_div(tmp, HZ);
 693
 694	if (tmp > UINT_MAX)
 695		io_allowed = UINT_MAX;
 696	else
 697		io_allowed = tmp;
 698
 699	if (tg->io_disp[rw] + 1 <= io_allowed) {
 700		if (wait)
 701			*wait = 0;
 702		return true;
 703	}
 704
 705	/* Calc approx time to dispatch */
 706	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
 707
 708	if (jiffy_wait > jiffy_elapsed)
 709		jiffy_wait = jiffy_wait - jiffy_elapsed;
 710	else
 711		jiffy_wait = 1;
 712
 713	if (wait)
 714		*wait = jiffy_wait;
 715	return 0;
 716}
 717
 718static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
 719				 unsigned long *wait)
 720{
 721	bool rw = bio_data_dir(bio);
 722	u64 bytes_allowed, extra_bytes, tmp;
 723	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 724
 725	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 726
 727	/* Slice has just started. Consider one slice interval */
 728	if (!jiffy_elapsed)
 729		jiffy_elapsed_rnd = throtl_slice;
 730
 731	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 732
 733	tmp = tg->bps[rw] * jiffy_elapsed_rnd;
 734	do_div(tmp, HZ);
 735	bytes_allowed = tmp;
 736
 737	if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
 738		if (wait)
 739			*wait = 0;
 740		return true;
 741	}
 742
 743	/* Calc approx time to dispatch */
 744	extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
 745	jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
 746
 747	if (!jiffy_wait)
 748		jiffy_wait = 1;
 749
 750	/*
 751	 * This wait time is without taking into consideration the rounding
 752	 * up we did. Add that time also.
 753	 */
 754	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
 755	if (wait)
 756		*wait = jiffy_wait;
 757	return 0;
 758}
 759
 
 
 
 
 
 
 760/*
 761 * Returns whether one can dispatch a bio or not. Also returns approx number
 762 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
 763 */
 764static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 765			    unsigned long *wait)
 766{
 767	bool rw = bio_data_dir(bio);
 768	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
 769
 770	/*
 771 	 * Currently whole state machine of group depends on first bio
 772	 * queued in the group bio list. So one should not be calling
 773	 * this function with a different bio if there are other bios
 774	 * queued.
 775	 */
 776	BUG_ON(tg->service_queue.nr_queued[rw] &&
 777	       bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
 778
 779	/* If tg->bps = -1, then BW is unlimited */
 780	if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
 781		if (wait)
 782			*wait = 0;
 783		return true;
 784	}
 785
 786	/*
 787	 * If previous slice expired, start a new one otherwise renew/extend
 788	 * existing slice to make sure it is at least throtl_slice interval
 789	 * long since now.
 790	 */
 791	if (throtl_slice_used(tg, rw))
 792		throtl_start_new_slice(tg, rw);
 793	else {
 794		if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
 795			throtl_extend_slice(tg, rw, jiffies + throtl_slice);
 796	}
 797
 798	if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
 799	    tg_with_in_iops_limit(tg, bio, &iops_wait)) {
 800		if (wait)
 801			*wait = 0;
 802		return 1;
 803	}
 804
 805	max_wait = max(bps_wait, iops_wait);
 806
 807	if (wait)
 808		*wait = max_wait;
 809
 810	if (time_before(tg->slice_end[rw], jiffies + max_wait))
 811		throtl_extend_slice(tg, rw, jiffies + max_wait);
 812
 813	return 0;
 814}
 815
 816static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 817{
 818	bool rw = bio_data_dir(bio);
 
 819
 820	/* Charge the bio to the group */
 821	tg->bytes_disp[rw] += bio->bi_iter.bi_size;
 822	tg->io_disp[rw]++;
 823
 824	/*
 825	 * REQ_THROTTLED is used to prevent the same bio to be throttled
 826	 * more than once as a throttled bio will go through blk-throtl the
 827	 * second time when it eventually gets issued.  Set it when a bio
 828	 * is being charged to a tg.
 829	 */
 830	if (!(bio->bi_rw & REQ_THROTTLED))
 831		bio->bi_rw |= REQ_THROTTLED;
 832}
 833
 834/**
 835 * throtl_add_bio_tg - add a bio to the specified throtl_grp
 836 * @bio: bio to add
 837 * @qn: qnode to use
 838 * @tg: the target throtl_grp
 839 *
 840 * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
 841 * tg->qnode_on_self[] is used.
 842 */
 843static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
 844			      struct throtl_grp *tg)
 845{
 846	struct throtl_service_queue *sq = &tg->service_queue;
 847	bool rw = bio_data_dir(bio);
 848
 849	if (!qn)
 850		qn = &tg->qnode_on_self[rw];
 851
 852	/*
 853	 * If @tg doesn't currently have any bios queued in the same
 854	 * direction, queueing @bio can change when @tg should be
 855	 * dispatched.  Mark that @tg was empty.  This is automatically
 856	 * cleaered on the next tg_update_disptime().
 857	 */
 858	if (!sq->nr_queued[rw])
 859		tg->flags |= THROTL_TG_WAS_EMPTY;
 860
 861	throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
 862
 863	sq->nr_queued[rw]++;
 864	throtl_enqueue_tg(tg);
 865}
 866
 867static void tg_update_disptime(struct throtl_grp *tg)
 868{
 869	struct throtl_service_queue *sq = &tg->service_queue;
 870	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
 871	struct bio *bio;
 872
 873	if ((bio = throtl_peek_queued(&sq->queued[READ])))
 874		tg_may_dispatch(tg, bio, &read_wait);
 875
 876	if ((bio = throtl_peek_queued(&sq->queued[WRITE])))
 877		tg_may_dispatch(tg, bio, &write_wait);
 878
 879	min_wait = min(read_wait, write_wait);
 880	disptime = jiffies + min_wait;
 881
 882	/* Update dispatch time */
 883	throtl_dequeue_tg(tg);
 884	tg->disptime = disptime;
 885	throtl_enqueue_tg(tg);
 886
 887	/* see throtl_add_bio_tg() */
 888	tg->flags &= ~THROTL_TG_WAS_EMPTY;
 889}
 890
 891static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
 892					struct throtl_grp *parent_tg, bool rw)
 893{
 894	if (throtl_slice_used(parent_tg, rw)) {
 895		throtl_start_new_slice_with_credit(parent_tg, rw,
 896				child_tg->slice_start[rw]);
 897	}
 898
 899}
 900
 901static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
 902{
 903	struct throtl_service_queue *sq = &tg->service_queue;
 904	struct throtl_service_queue *parent_sq = sq->parent_sq;
 905	struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
 906	struct throtl_grp *tg_to_put = NULL;
 907	struct bio *bio;
 908
 909	/*
 910	 * @bio is being transferred from @tg to @parent_sq.  Popping a bio
 911	 * from @tg may put its reference and @parent_sq might end up
 912	 * getting released prematurely.  Remember the tg to put and put it
 913	 * after @bio is transferred to @parent_sq.
 914	 */
 915	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
 916	sq->nr_queued[rw]--;
 917
 918	throtl_charge_bio(tg, bio);
 919
 920	/*
 921	 * If our parent is another tg, we just need to transfer @bio to
 922	 * the parent using throtl_add_bio_tg().  If our parent is
 923	 * @td->service_queue, @bio is ready to be issued.  Put it on its
 924	 * bio_lists[] and decrease total number queued.  The caller is
 925	 * responsible for issuing these bios.
 926	 */
 927	if (parent_tg) {
 928		throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
 929		start_parent_slice_with_credit(tg, parent_tg, rw);
 930	} else {
 931		throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
 932				     &parent_sq->queued[rw]);
 933		BUG_ON(tg->td->nr_queued[rw] <= 0);
 934		tg->td->nr_queued[rw]--;
 935	}
 936
 937	throtl_trim_slice(tg, rw);
 
 
 938
 939	if (tg_to_put)
 940		blkg_put(tg_to_blkg(tg_to_put));
 941}
 942
 943static int throtl_dispatch_tg(struct throtl_grp *tg)
 
 944{
 945	struct throtl_service_queue *sq = &tg->service_queue;
 946	unsigned int nr_reads = 0, nr_writes = 0;
 947	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
 948	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
 949	struct bio *bio;
 950
 951	/* Try to dispatch 75% READS and 25% WRITES */
 952
 953	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
 954	       tg_may_dispatch(tg, bio, NULL)) {
 955
 956		tg_dispatch_one_bio(tg, bio_data_dir(bio));
 957		nr_reads++;
 958
 959		if (nr_reads >= max_nr_reads)
 960			break;
 961	}
 962
 963	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
 964	       tg_may_dispatch(tg, bio, NULL)) {
 965
 966		tg_dispatch_one_bio(tg, bio_data_dir(bio));
 967		nr_writes++;
 968
 969		if (nr_writes >= max_nr_writes)
 970			break;
 971	}
 972
 973	return nr_reads + nr_writes;
 974}
 975
 976static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
 977{
 978	unsigned int nr_disp = 0;
 
 
 979
 980	while (1) {
 981		struct throtl_grp *tg = throtl_rb_first(parent_sq);
 982		struct throtl_service_queue *sq = &tg->service_queue;
 983
 984		if (!tg)
 985			break;
 986
 987		if (time_before(jiffies, tg->disptime))
 988			break;
 989
 990		throtl_dequeue_tg(tg);
 991
 992		nr_disp += throtl_dispatch_tg(tg);
 993
 994		if (sq->nr_queued[0] || sq->nr_queued[1])
 995			tg_update_disptime(tg);
 
 
 996
 997		if (nr_disp >= throtl_quantum)
 998			break;
 999	}
1000
1001	return nr_disp;
1002}
1003
1004/**
1005 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1006 * @arg: the throtl_service_queue being serviced
1007 *
1008 * This timer is armed when a child throtl_grp with active bio's become
1009 * pending and queued on the service_queue's pending_tree and expires when
1010 * the first child throtl_grp should be dispatched.  This function
1011 * dispatches bio's from the children throtl_grps to the parent
1012 * service_queue.
1013 *
1014 * If the parent's parent is another throtl_grp, dispatching is propagated
1015 * by either arming its pending_timer or repeating dispatch directly.  If
1016 * the top-level service_tree is reached, throtl_data->dispatch_work is
1017 * kicked so that the ready bio's are issued.
1018 */
1019static void throtl_pending_timer_fn(unsigned long arg)
1020{
1021	struct throtl_service_queue *sq = (void *)arg;
1022	struct throtl_grp *tg = sq_to_tg(sq);
1023	struct throtl_data *td = sq_to_td(sq);
1024	struct request_queue *q = td->queue;
1025	struct throtl_service_queue *parent_sq;
1026	bool dispatched;
1027	int ret;
1028
1029	spin_lock_irq(q->queue_lock);
1030again:
1031	parent_sq = sq->parent_sq;
1032	dispatched = false;
1033
1034	while (true) {
1035		throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1036			   sq->nr_queued[READ] + sq->nr_queued[WRITE],
1037			   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1038
1039		ret = throtl_select_dispatch(sq);
1040		if (ret) {
1041			throtl_log(sq, "bios disp=%u", ret);
1042			dispatched = true;
1043		}
1044
1045		if (throtl_schedule_next_dispatch(sq, false))
1046			break;
1047
1048		/* this dispatch windows is still open, relax and repeat */
1049		spin_unlock_irq(q->queue_lock);
1050		cpu_relax();
1051		spin_lock_irq(q->queue_lock);
1052	}
1053
1054	if (!dispatched)
1055		goto out_unlock;
1056
1057	if (parent_sq) {
1058		/* @parent_sq is another throl_grp, propagate dispatch */
1059		if (tg->flags & THROTL_TG_WAS_EMPTY) {
1060			tg_update_disptime(tg);
1061			if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1062				/* window is already open, repeat dispatching */
1063				sq = parent_sq;
1064				tg = sq_to_tg(sq);
1065				goto again;
1066			}
1067		}
1068	} else {
1069		/* reached the top-level, queue issueing */
1070		queue_work(kthrotld_workqueue, &td->dispatch_work);
 
1071	}
1072out_unlock:
1073	spin_unlock_irq(q->queue_lock);
1074}
1075
1076/**
1077 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1078 * @work: work item being executed
1079 *
1080 * This function is queued for execution when bio's reach the bio_lists[]
1081 * of throtl_data->service_queue.  Those bio's are ready and issued by this
1082 * function.
1083 */
1084static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1085{
1086	struct throtl_data *td = container_of(work, struct throtl_data,
1087					      dispatch_work);
1088	struct throtl_service_queue *td_sq = &td->service_queue;
1089	struct request_queue *q = td->queue;
1090	struct bio_list bio_list_on_stack;
1091	struct bio *bio;
1092	struct blk_plug plug;
1093	int rw;
 
 
 
 
 
 
1094
1095	bio_list_init(&bio_list_on_stack);
1096
1097	spin_lock_irq(q->queue_lock);
1098	for (rw = READ; rw <= WRITE; rw++)
1099		while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1100			bio_list_add(&bio_list_on_stack, bio);
 
 
 
 
 
 
 
1101	spin_unlock_irq(q->queue_lock);
1102
1103	if (!bio_list_empty(&bio_list_on_stack)) {
 
 
 
 
1104		blk_start_plug(&plug);
1105		while((bio = bio_list_pop(&bio_list_on_stack)))
1106			generic_make_request(bio);
1107		blk_finish_plug(&plug);
1108	}
 
1109}
1110
1111static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1112			      int off)
1113{
1114	struct throtl_grp *tg = pd_to_tg(pd);
1115	u64 v = *(u64 *)((void *)tg + off);
 
1116
1117	if (v == -1)
1118		return 0;
1119	return __blkg_prfill_u64(sf, pd, v);
1120}
1121
1122static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1123			       int off)
 
1124{
1125	struct throtl_grp *tg = pd_to_tg(pd);
1126	unsigned int v = *(unsigned int *)((void *)tg + off);
1127
1128	if (v == -1)
1129		return 0;
1130	return __blkg_prfill_u64(sf, pd, v);
1131}
1132
1133static int tg_print_conf_u64(struct seq_file *sf, void *v)
1134{
1135	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1136			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1137	return 0;
1138}
1139
1140static int tg_print_conf_uint(struct seq_file *sf, void *v)
1141{
1142	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1143			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1144	return 0;
 
 
 
 
 
 
1145}
1146
1147static void tg_conf_updated(struct throtl_grp *tg)
 
1148{
1149	struct throtl_service_queue *sq = &tg->service_queue;
1150	struct cgroup_subsys_state *pos_css;
1151	struct blkcg_gq *blkg;
1152
1153	throtl_log(&tg->service_queue,
1154		   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1155		   tg->bps[READ], tg->bps[WRITE],
1156		   tg->iops[READ], tg->iops[WRITE]);
1157
1158	/*
1159	 * Update has_rules[] flags for the updated tg's subtree.  A tg is
1160	 * considered to have rules if either the tg itself or any of its
1161	 * ancestors has rules.  This identifies groups without any
1162	 * restrictions in the whole hierarchy and allows them to bypass
1163	 * blk-throttle.
1164	 */
1165	blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg))
1166		tg_update_has_rules(blkg_to_tg(blkg));
1167
1168	/*
1169	 * We're already holding queue_lock and know @tg is valid.  Let's
1170	 * apply the new config directly.
1171	 *
1172	 * Restart the slices for both READ and WRITES. It might happen
1173	 * that a group's limit are dropped suddenly and we don't want to
1174	 * account recently dispatched IO with new low rate.
1175	 */
1176	throtl_start_new_slice(tg, 0);
1177	throtl_start_new_slice(tg, 1);
1178
1179	if (tg->flags & THROTL_TG_PENDING) {
1180		tg_update_disptime(tg);
1181		throtl_schedule_next_dispatch(sq->parent_sq, true);
1182	}
1183}
1184
1185static ssize_t tg_set_conf(struct kernfs_open_file *of,
1186			   char *buf, size_t nbytes, loff_t off, bool is_u64)
1187{
1188	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1189	struct blkg_conf_ctx ctx;
1190	struct throtl_grp *tg;
1191	int ret;
1192	u64 v;
1193
1194	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1195	if (ret)
1196		return ret;
1197
1198	ret = -EINVAL;
1199	if (sscanf(ctx.body, "%llu", &v) != 1)
1200		goto out_finish;
1201	if (!v)
1202		v = -1;
1203
1204	tg = blkg_to_tg(ctx.blkg);
1205
1206	if (is_u64)
1207		*(u64 *)((void *)tg + of_cft(of)->private) = v;
1208	else
1209		*(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1210
1211	tg_conf_updated(tg);
1212	ret = 0;
1213out_finish:
1214	blkg_conf_finish(&ctx);
1215	return ret ?: nbytes;
1216}
1217
1218static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1219			       char *buf, size_t nbytes, loff_t off)
1220{
1221	return tg_set_conf(of, buf, nbytes, off, true);
1222}
1223
1224static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1225				char *buf, size_t nbytes, loff_t off)
 
 
 
 
 
 
 
 
 
 
 
 
 
1226{
1227	return tg_set_conf(of, buf, nbytes, off, false);
1228}
1229
1230static struct cftype throtl_legacy_files[] = {
1231	{
1232		.name = "throttle.read_bps_device",
1233		.private = offsetof(struct throtl_grp, bps[READ]),
1234		.seq_show = tg_print_conf_u64,
1235		.write = tg_set_conf_u64,
1236	},
1237	{
1238		.name = "throttle.write_bps_device",
1239		.private = offsetof(struct throtl_grp, bps[WRITE]),
1240		.seq_show = tg_print_conf_u64,
1241		.write = tg_set_conf_u64,
1242	},
1243	{
1244		.name = "throttle.read_iops_device",
1245		.private = offsetof(struct throtl_grp, iops[READ]),
1246		.seq_show = tg_print_conf_uint,
1247		.write = tg_set_conf_uint,
1248	},
1249	{
1250		.name = "throttle.write_iops_device",
1251		.private = offsetof(struct throtl_grp, iops[WRITE]),
1252		.seq_show = tg_print_conf_uint,
1253		.write = tg_set_conf_uint,
1254	},
1255	{
1256		.name = "throttle.io_service_bytes",
1257		.private = (unsigned long)&blkcg_policy_throtl,
1258		.seq_show = blkg_print_stat_bytes,
1259	},
1260	{
1261		.name = "throttle.io_serviced",
1262		.private = (unsigned long)&blkcg_policy_throtl,
1263		.seq_show = blkg_print_stat_ios,
1264	},
1265	{ }	/* terminate */
1266};
1267
1268static u64 tg_prfill_max(struct seq_file *sf, struct blkg_policy_data *pd,
1269			 int off)
1270{
1271	struct throtl_grp *tg = pd_to_tg(pd);
1272	const char *dname = blkg_dev_name(pd->blkg);
1273	char bufs[4][21] = { "max", "max", "max", "max" };
1274
1275	if (!dname)
1276		return 0;
1277	if (tg->bps[READ] == -1 && tg->bps[WRITE] == -1 &&
1278	    tg->iops[READ] == -1 && tg->iops[WRITE] == -1)
1279		return 0;
1280
1281	if (tg->bps[READ] != -1)
1282		snprintf(bufs[0], sizeof(bufs[0]), "%llu", tg->bps[READ]);
1283	if (tg->bps[WRITE] != -1)
1284		snprintf(bufs[1], sizeof(bufs[1]), "%llu", tg->bps[WRITE]);
1285	if (tg->iops[READ] != -1)
1286		snprintf(bufs[2], sizeof(bufs[2]), "%u", tg->iops[READ]);
1287	if (tg->iops[WRITE] != -1)
1288		snprintf(bufs[3], sizeof(bufs[3]), "%u", tg->iops[WRITE]);
 
 
 
 
 
 
1289
1290	seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s\n",
1291		   dname, bufs[0], bufs[1], bufs[2], bufs[3]);
1292	return 0;
1293}
1294
1295static int tg_print_max(struct seq_file *sf, void *v)
 
1296{
1297	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_max,
1298			  &blkcg_policy_throtl, seq_cft(sf)->private, false);
1299	return 0;
 
 
1300}
1301
1302static ssize_t tg_set_max(struct kernfs_open_file *of,
1303			  char *buf, size_t nbytes, loff_t off)
1304{
1305	struct blkcg *blkcg = css_to_blkcg(of_css(of));
1306	struct blkg_conf_ctx ctx;
1307	struct throtl_grp *tg;
1308	u64 v[4];
1309	int ret;
1310
1311	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1312	if (ret)
1313		return ret;
1314
1315	tg = blkg_to_tg(ctx.blkg);
1316
1317	v[0] = tg->bps[READ];
1318	v[1] = tg->bps[WRITE];
1319	v[2] = tg->iops[READ];
1320	v[3] = tg->iops[WRITE];
1321
1322	while (true) {
1323		char tok[27];	/* wiops=18446744073709551616 */
1324		char *p;
1325		u64 val = -1;
1326		int len;
1327
1328		if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1329			break;
1330		if (tok[0] == '\0')
1331			break;
1332		ctx.body += len;
1333
1334		ret = -EINVAL;
1335		p = tok;
1336		strsep(&p, "=");
1337		if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1338			goto out_finish;
1339
1340		ret = -ERANGE;
1341		if (!val)
1342			goto out_finish;
1343
1344		ret = -EINVAL;
1345		if (!strcmp(tok, "rbps"))
1346			v[0] = val;
1347		else if (!strcmp(tok, "wbps"))
1348			v[1] = val;
1349		else if (!strcmp(tok, "riops"))
1350			v[2] = min_t(u64, val, UINT_MAX);
1351		else if (!strcmp(tok, "wiops"))
1352			v[3] = min_t(u64, val, UINT_MAX);
1353		else
1354			goto out_finish;
1355	}
1356
1357	tg->bps[READ] = v[0];
1358	tg->bps[WRITE] = v[1];
1359	tg->iops[READ] = v[2];
1360	tg->iops[WRITE] = v[3];
1361
1362	tg_conf_updated(tg);
1363	ret = 0;
1364out_finish:
1365	blkg_conf_finish(&ctx);
1366	return ret ?: nbytes;
1367}
1368
1369static struct cftype throtl_files[] = {
1370	{
1371		.name = "max",
1372		.flags = CFTYPE_NOT_ON_ROOT,
1373		.seq_show = tg_print_max,
1374		.write = tg_set_max,
1375	},
1376	{ }	/* terminate */
1377};
1378
1379static void throtl_shutdown_wq(struct request_queue *q)
1380{
1381	struct throtl_data *td = q->td;
1382
1383	cancel_work_sync(&td->dispatch_work);
1384}
1385
1386static struct blkcg_policy blkcg_policy_throtl = {
1387	.dfl_cftypes		= throtl_files,
1388	.legacy_cftypes		= throtl_legacy_files,
1389
1390	.pd_alloc_fn		= throtl_pd_alloc,
1391	.pd_init_fn		= throtl_pd_init,
1392	.pd_online_fn		= throtl_pd_online,
1393	.pd_free_fn		= throtl_pd_free,
 
 
 
 
 
1394};
1395
1396bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
1397		    struct bio *bio)
1398{
1399	struct throtl_qnode *qn = NULL;
1400	struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
1401	struct throtl_service_queue *sq;
1402	bool rw = bio_data_dir(bio);
1403	bool throttled = false;
1404
1405	WARN_ON_ONCE(!rcu_read_lock_held());
 
 
 
1406
1407	/* see throtl_charge_bio() */
1408	if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
1409		goto out;
 
 
1410
1411	spin_lock_irq(q->queue_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
1412
1413	if (unlikely(blk_queue_bypass(q)))
1414		goto out_unlock;
 
 
1415
1416	sq = &tg->service_queue;
 
1417
1418	while (true) {
1419		/* throtl is FIFO - if bios are already queued, should queue */
1420		if (sq->nr_queued[rw])
1421			break;
 
 
 
 
1422
1423		/* if above limits, break to queue */
1424		if (!tg_may_dispatch(tg, bio, NULL))
1425			break;
 
 
 
 
1426
1427		/* within limits, let's charge and dispatch directly */
 
 
 
1428		throtl_charge_bio(tg, bio);
1429
1430		/*
1431		 * We need to trim slice even when bios are not being queued
1432		 * otherwise it might happen that a bio is not queued for
1433		 * a long time and slice keeps on extending and trim is not
1434		 * called for a long time. Now if limits are reduced suddenly
1435		 * we take into account all the IO dispatched so far at new
1436		 * low rate and * newly queued IO gets a really long dispatch
1437		 * time.
1438		 *
1439		 * So keep on trimming slice even if bio is not queued.
1440		 */
1441		throtl_trim_slice(tg, rw);
1442
1443		/*
1444		 * @bio passed through this layer without being throttled.
1445		 * Climb up the ladder.  If we''re already at the top, it
1446		 * can be executed directly.
1447		 */
1448		qn = &tg->qnode_on_parent[rw];
1449		sq = sq->parent_sq;
1450		tg = sq_to_tg(sq);
1451		if (!tg)
1452			goto out_unlock;
1453	}
1454
1455	/* out-of-limit, queue to @tg */
1456	throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1457		   rw == READ ? 'R' : 'W',
1458		   tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
1459		   tg->io_disp[rw], tg->iops[rw],
1460		   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1461
1462	bio_associate_current(bio);
1463	tg->td->nr_queued[rw]++;
1464	throtl_add_bio_tg(bio, qn, tg);
1465	throttled = true;
1466
1467	/*
1468	 * Update @tg's dispatch time and force schedule dispatch if @tg
1469	 * was empty before @bio.  The forced scheduling isn't likely to
1470	 * cause undue delay as @bio is likely to be dispatched directly if
1471	 * its @tg's disptime is not in the future.
1472	 */
1473	if (tg->flags & THROTL_TG_WAS_EMPTY) {
1474		tg_update_disptime(tg);
1475		throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
1476	}
1477
1478out_unlock:
1479	spin_unlock_irq(q->queue_lock);
1480out:
1481	/*
1482	 * As multiple blk-throtls may stack in the same issue path, we
1483	 * don't want bios to leave with the flag set.  Clear the flag if
1484	 * being issued.
1485	 */
1486	if (!throttled)
1487		bio->bi_rw &= ~REQ_THROTTLED;
1488	return throttled;
1489}
1490
1491/*
1492 * Dispatch all bios from all children tg's queued on @parent_sq.  On
1493 * return, @parent_sq is guaranteed to not have any active children tg's
1494 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1495 */
1496static void tg_drain_bios(struct throtl_service_queue *parent_sq)
1497{
 
1498	struct throtl_grp *tg;
1499
1500	while ((tg = throtl_rb_first(parent_sq))) {
1501		struct throtl_service_queue *sq = &tg->service_queue;
1502		struct bio *bio;
1503
1504		throtl_dequeue_tg(tg);
 
 
 
 
 
 
 
1505
1506		while ((bio = throtl_peek_queued(&sq->queued[READ])))
1507			tg_dispatch_one_bio(tg, bio_data_dir(bio));
1508		while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
1509			tg_dispatch_one_bio(tg, bio_data_dir(bio));
1510	}
1511}
1512
1513/**
1514 * blk_throtl_drain - drain throttled bios
1515 * @q: request_queue to drain throttled bios for
1516 *
1517 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1518 */
1519void blk_throtl_drain(struct request_queue *q)
1520	__releases(q->queue_lock) __acquires(q->queue_lock)
1521{
1522	struct throtl_data *td = q->td;
1523	struct blkcg_gq *blkg;
1524	struct cgroup_subsys_state *pos_css;
1525	struct bio *bio;
1526	int rw;
1527
1528	queue_lockdep_assert_held(q);
1529	rcu_read_lock();
1530
1531	/*
1532	 * Drain each tg while doing post-order walk on the blkg tree, so
1533	 * that all bios are propagated to td->service_queue.  It'd be
1534	 * better to walk service_queue tree directly but blkg walk is
1535	 * easier.
1536	 */
1537	blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
1538		tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
1539
1540	/* finally, transfer bios from top-level tg's into the td */
1541	tg_drain_bios(&td->service_queue);
1542
1543	rcu_read_unlock();
1544	spin_unlock_irq(q->queue_lock);
1545
1546	/* all bios now should be in td->service_queue, issue them */
1547	for (rw = READ; rw <= WRITE; rw++)
1548		while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
1549						NULL)))
1550			generic_make_request(bio);
1551
1552	spin_lock_irq(q->queue_lock);
 
 
1553}
1554
1555int blk_throtl_init(struct request_queue *q)
1556{
1557	struct throtl_data *td;
1558	int ret;
1559
1560	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1561	if (!td)
1562		return -ENOMEM;
1563
1564	INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1565	throtl_service_queue_init(&td->service_queue);
1566
1567	q->td = td;
1568	td->queue = q;
1569
1570	/* activate policy */
1571	ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
1572	if (ret)
1573		kfree(td);
1574	return ret;
1575}
1576
1577void blk_throtl_exit(struct request_queue *q)
1578{
1579	BUG_ON(!q->td);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1580	throtl_shutdown_wq(q);
1581	blkcg_deactivate_policy(q, &blkcg_policy_throtl);
1582	kfree(q->td);
1583}
1584
1585static int __init throtl_init(void)
1586{
1587	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1588	if (!kthrotld_workqueue)
1589		panic("Failed to create kthrotld\n");
1590
1591	return blkcg_policy_register(&blkcg_policy_throtl);
 
1592}
1593
1594module_init(throtl_init);
v3.1
   1/*
   2 * Interface for controlling IO bandwidth on a request queue
   3 *
   4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/slab.h>
   9#include <linux/blkdev.h>
  10#include <linux/bio.h>
  11#include <linux/blktrace_api.h>
  12#include "blk-cgroup.h"
 
  13
  14/* Max dispatch from a group in 1 round */
  15static int throtl_grp_quantum = 8;
  16
  17/* Total max dispatch from all groups in one round */
  18static int throtl_quantum = 32;
  19
  20/* Throttling is performed over 100ms slice and after that slice is renewed */
  21static unsigned long throtl_slice = HZ/10;	/* 100 ms */
  22
 
 
  23/* A workqueue to queue throttle related work */
  24static struct workqueue_struct *kthrotld_workqueue;
  25static void throtl_schedule_delayed_work(struct throtl_data *td,
  26				unsigned long delay);
  27
  28struct throtl_rb_root {
  29	struct rb_root rb;
  30	struct rb_node *left;
  31	unsigned int count;
  32	unsigned long min_disptime;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  33};
  34
  35#define THROTL_RB_ROOT	(struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
  36			.count = 0, .min_disptime = 0}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  37
  38#define rb_entry_tg(node)	rb_entry((node), struct throtl_grp, rb_node)
  39
  40struct throtl_grp {
  41	/* List of throtl groups on the request queue*/
  42	struct hlist_node tg_node;
  43
  44	/* active throtl group service_tree member */
  45	struct rb_node rb_node;
  46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  47	/*
  48	 * Dispatch time in jiffies. This is the estimated time when group
  49	 * will unthrottle and is ready to dispatch more bio. It is used as
  50	 * key to sort active groups in service tree.
  51	 */
  52	unsigned long disptime;
  53
  54	struct blkio_group blkg;
  55	atomic_t ref;
  56	unsigned int flags;
  57
  58	/* Two lists for READ and WRITE */
  59	struct bio_list bio_lists[2];
  60
  61	/* Number of queued bios on READ and WRITE lists */
  62	unsigned int nr_queued[2];
  63
  64	/* bytes per second rate limits */
  65	uint64_t bps[2];
  66
  67	/* IOPS limits */
  68	unsigned int iops[2];
  69
  70	/* Number of bytes disptached in current slice */
  71	uint64_t bytes_disp[2];
  72	/* Number of bio's dispatched in current slice */
  73	unsigned int io_disp[2];
  74
  75	/* When did we start a new slice */
  76	unsigned long slice_start[2];
  77	unsigned long slice_end[2];
  78
  79	/* Some throttle limits got updated for the group */
  80	int limits_changed;
  81
  82	struct rcu_head rcu_head;
  83};
  84
  85struct throtl_data
  86{
  87	/* List of throtl groups */
  88	struct hlist_head tg_list;
  89
  90	/* service tree for active throtl groups */
  91	struct throtl_rb_root tg_service_tree;
  92
  93	struct throtl_grp *root_tg;
  94	struct request_queue *queue;
  95
  96	/* Total Number of queued bios on READ and WRITE lists */
  97	unsigned int nr_queued[2];
  98
  99	/*
 100	 * number of total undestroyed groups
 101	 */
 102	unsigned int nr_undestroyed_grps;
 103
 104	/* Work for dispatching throttled bios */
 105	struct delayed_work throtl_work;
 106
 107	int limits_changed;
 108};
 109
 110enum tg_state_flags {
 111	THROTL_TG_FLAG_on_rr = 0,	/* on round-robin busy list */
 112};
 113
 114#define THROTL_TG_FNS(name)						\
 115static inline void throtl_mark_tg_##name(struct throtl_grp *tg)		\
 116{									\
 117	(tg)->flags |= (1 << THROTL_TG_FLAG_##name);			\
 118}									\
 119static inline void throtl_clear_tg_##name(struct throtl_grp *tg)	\
 120{									\
 121	(tg)->flags &= ~(1 << THROTL_TG_FLAG_##name);			\
 122}									\
 123static inline int throtl_tg_##name(const struct throtl_grp *tg)		\
 124{									\
 125	return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0;	\
 126}
 127
 128THROTL_TG_FNS(on_rr);
 129
 130#define throtl_log_tg(td, tg, fmt, args...)				\
 131	blk_add_trace_msg((td)->queue, "throtl %s " fmt,		\
 132				blkg_path(&(tg)->blkg), ##args);      	\
 133
 134#define throtl_log(td, fmt, args...)	\
 135	blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
 
 
 136
 137static inline struct throtl_grp *tg_of_blkg(struct blkio_group *blkg)
 138{
 139	if (blkg)
 140		return container_of(blkg, struct throtl_grp, blkg);
 141
 142	return NULL;
 143}
 144
 145static inline unsigned int total_nr_queued(struct throtl_data *td)
 146{
 147	return td->nr_queued[0] + td->nr_queued[1];
 148}
 149
 150static inline struct throtl_grp *throtl_ref_get_tg(struct throtl_grp *tg)
 
 
 
 
 
 
 
 151{
 152	atomic_inc(&tg->ref);
 153	return tg;
 
 
 154}
 155
 156static void throtl_free_tg(struct rcu_head *head)
 
 
 
 
 
 
 
 157{
 158	struct throtl_grp *tg;
 159
 160	tg = container_of(head, struct throtl_grp, rcu_head);
 161	free_percpu(tg->blkg.stats_cpu);
 162	kfree(tg);
 
 163}
 164
 165static void throtl_put_tg(struct throtl_grp *tg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166{
 167	BUG_ON(atomic_read(&tg->ref) <= 0);
 168	if (!atomic_dec_and_test(&tg->ref))
 169		return;
 170
 171	/*
 172	 * A group is freed in rcu manner. But having an rcu lock does not
 173	 * mean that one can access all the fields of blkg and assume these
 174	 * are valid. For example, don't try to follow throtl_data and
 175	 * request queue links.
 176	 *
 177	 * Having a reference to blkg under an rcu allows acess to only
 178	 * values local to groups like group stats and group rate limits
 179	 */
 180	call_rcu(&tg->rcu_head, throtl_free_tg);
 181}
 182
 183static void throtl_init_group(struct throtl_grp *tg)
 
 
 
 
 184{
 185	INIT_HLIST_NODE(&tg->tg_node);
 186	RB_CLEAR_NODE(&tg->rb_node);
 187	bio_list_init(&tg->bio_lists[0]);
 188	bio_list_init(&tg->bio_lists[1]);
 189	tg->limits_changed = false;
 190
 191	/* Practically unlimited BW */
 192	tg->bps[0] = tg->bps[1] = -1;
 193	tg->iops[0] = tg->iops[1] = -1;
 194
 195	/*
 196	 * Take the initial reference that will be released on destroy
 197	 * This can be thought of a joint reference by cgroup and
 198	 * request queue which will be dropped by either request queue
 199	 * exit or cgroup deletion path depending on who is exiting first.
 200	 */
 201	atomic_set(&tg->ref, 1);
 202}
 203
 204/* Should be called with rcu read lock held (needed for blkcg) */
 205static void
 206throtl_add_group_to_td_list(struct throtl_data *td, struct throtl_grp *tg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 207{
 208	hlist_add_head(&tg->tg_node, &td->tg_list);
 209	td->nr_undestroyed_grps++;
 210}
 211
 212static void
 213__throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
 214{
 215	struct backing_dev_info *bdi = &td->queue->backing_dev_info;
 216	unsigned int major, minor;
 217
 218	if (!tg || tg->blkg.dev)
 219		return;
 220
 221	/*
 222	 * Fill in device details for a group which might not have been
 223	 * filled at group creation time as queue was being instantiated
 224	 * and driver had not attached a device yet
 225	 */
 226	if (bdi->dev && dev_name(bdi->dev)) {
 227		sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
 228		tg->blkg.dev = MKDEV(major, minor);
 229	}
 230}
 231
 232/*
 233 * Should be called with without queue lock held. Here queue lock will be
 234 * taken rarely. It will be taken only once during life time of a group
 235 * if need be
 236 */
 237static void
 238throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
 239{
 240	if (!tg || tg->blkg.dev)
 241		return;
 242
 243	spin_lock_irq(td->queue->queue_lock);
 244	__throtl_tg_fill_dev_details(td, tg);
 245	spin_unlock_irq(td->queue->queue_lock);
 246}
 247
 248static void throtl_init_add_tg_lists(struct throtl_data *td,
 249			struct throtl_grp *tg, struct blkio_cgroup *blkcg)
 250{
 251	__throtl_tg_fill_dev_details(td, tg);
 252
 253	/* Add group onto cgroup list */
 254	blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td,
 255				tg->blkg.dev, BLKIO_POLICY_THROTL);
 256
 257	tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev);
 258	tg->bps[WRITE] = blkcg_get_write_bps(blkcg, tg->blkg.dev);
 259	tg->iops[READ] = blkcg_get_read_iops(blkcg, tg->blkg.dev);
 260	tg->iops[WRITE] = blkcg_get_write_iops(blkcg, tg->blkg.dev);
 261
 262	throtl_add_group_to_td_list(td, tg);
 263}
 264
 265/* Should be called without queue lock and outside of rcu period */
 266static struct throtl_grp *throtl_alloc_tg(struct throtl_data *td)
 267{
 268	struct throtl_grp *tg = NULL;
 269	int ret;
 270
 271	tg = kzalloc_node(sizeof(*tg), GFP_ATOMIC, td->queue->node);
 272	if (!tg)
 273		return NULL;
 274
 275	ret = blkio_alloc_blkg_stats(&tg->blkg);
 276
 277	if (ret) {
 278		kfree(tg);
 279		return NULL;
 280	}
 281
 282	throtl_init_group(tg);
 283	return tg;
 
 
 
 
 
 284}
 285
 286static struct
 287throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
 288{
 289	struct throtl_grp *tg = NULL;
 290	void *key = td;
 
 
 291
 292	/*
 293	 * This is the common case when there are no blkio cgroups.
 294 	 * Avoid lookup in this case
 295 	 */
 296	if (blkcg == &blkio_root_cgroup)
 297		tg = td->root_tg;
 298	else
 299		tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
 300
 301	__throtl_tg_fill_dev_details(td, tg);
 302	return tg;
 
 
 
 
 
 
 303}
 304
 305/*
 306 * This function returns with queue lock unlocked in case of error, like
 307 * request queue is no more
 
 308 */
 309static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
 310{
 311	struct throtl_grp *tg = NULL, *__tg = NULL;
 312	struct blkio_cgroup *blkcg;
 313	struct request_queue *q = td->queue;
 314
 315	rcu_read_lock();
 316	blkcg = task_blkio_cgroup(current);
 317	tg = throtl_find_tg(td, blkcg);
 318	if (tg) {
 319		rcu_read_unlock();
 320		return tg;
 321	}
 322
 
 
 323	/*
 324	 * Need to allocate a group. Allocation of group also needs allocation
 325	 * of per cpu stats which in-turn takes a mutex() and can block. Hence
 326	 * we need to drop rcu lock and queue_lock before we call alloc
 327	 *
 328	 * Take the request queue reference to make sure queue does not
 329	 * go away once we return from allocation.
 330	 */
 331	blk_get_queue(q);
 332	rcu_read_unlock();
 333	spin_unlock_irq(q->queue_lock);
 334
 335	tg = throtl_alloc_tg(td);
 336	/*
 337	 * We might have slept in group allocation. Make sure queue is not
 338	 * dead
 339	 */
 340	if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
 341		blk_put_queue(q);
 342		if (tg)
 343			kfree(tg);
 344
 345		return ERR_PTR(-ENODEV);
 346	}
 347	blk_put_queue(q);
 348
 349	/* Group allocated and queue is still alive. take the lock */
 350	spin_lock_irq(q->queue_lock);
 351
 352	/*
 353	 * Initialize the new group. After sleeping, read the blkcg again.
 354	 */
 355	rcu_read_lock();
 356	blkcg = task_blkio_cgroup(current);
 357
 358	/*
 359	 * If some other thread already allocated the group while we were
 360	 * not holding queue lock, free up the group
 361	 */
 362	__tg = throtl_find_tg(td, blkcg);
 363
 364	if (__tg) {
 365		kfree(tg);
 366		rcu_read_unlock();
 367		return __tg;
 368	}
 369
 370	/* Group allocation failed. Account the IO to root group */
 371	if (!tg) {
 372		tg = td->root_tg;
 373		return tg;
 374	}
 375
 376	throtl_init_add_tg_lists(td, tg, blkcg);
 377	rcu_read_unlock();
 378	return tg;
 379}
 380
 381static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root)
 
 382{
 383	/* Service tree is empty */
 384	if (!root->count)
 385		return NULL;
 386
 387	if (!root->left)
 388		root->left = rb_first(&root->rb);
 389
 390	if (root->left)
 391		return rb_entry_tg(root->left);
 392
 393	return NULL;
 394}
 395
 396static void rb_erase_init(struct rb_node *n, struct rb_root *root)
 397{
 398	rb_erase(n, root);
 399	RB_CLEAR_NODE(n);
 400}
 401
 402static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root)
 
 403{
 404	if (root->left == n)
 405		root->left = NULL;
 406	rb_erase_init(n, &root->rb);
 407	--root->count;
 408}
 409
 410static void update_min_dispatch_time(struct throtl_rb_root *st)
 411{
 412	struct throtl_grp *tg;
 413
 414	tg = throtl_rb_first(st);
 415	if (!tg)
 416		return;
 417
 418	st->min_disptime = tg->disptime;
 419}
 420
 421static void
 422tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg)
 423{
 424	struct rb_node **node = &st->rb.rb_node;
 
 425	struct rb_node *parent = NULL;
 426	struct throtl_grp *__tg;
 427	unsigned long key = tg->disptime;
 428	int left = 1;
 429
 430	while (*node != NULL) {
 431		parent = *node;
 432		__tg = rb_entry_tg(parent);
 433
 434		if (time_before(key, __tg->disptime))
 435			node = &parent->rb_left;
 436		else {
 437			node = &parent->rb_right;
 438			left = 0;
 439		}
 440	}
 441
 442	if (left)
 443		st->left = &tg->rb_node;
 444
 445	rb_link_node(&tg->rb_node, parent, node);
 446	rb_insert_color(&tg->rb_node, &st->rb);
 447}
 448
 449static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
 450{
 451	struct throtl_rb_root *st = &td->tg_service_tree;
 
 
 
 452
 453	tg_service_tree_add(st, tg);
 454	throtl_mark_tg_on_rr(tg);
 455	st->count++;
 
 456}
 457
 458static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg)
 459{
 460	if (!throtl_tg_on_rr(tg))
 461		__throtl_enqueue_tg(td, tg);
 462}
 463
 464static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
 465{
 466	throtl_rb_erase(&tg->rb_node, &td->tg_service_tree);
 467	throtl_clear_tg_on_rr(tg);
 468}
 469
 470static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg)
 
 
 471{
 472	if (throtl_tg_on_rr(tg))
 473		__throtl_dequeue_tg(td, tg);
 
 474}
 475
 476static void throtl_schedule_next_dispatch(struct throtl_data *td)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 477{
 478	struct throtl_rb_root *st = &td->tg_service_tree;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 479
 480	/*
 481	 * If there are more bios pending, schedule more work.
 
 
 
 482	 */
 483	if (!total_nr_queued(td))
 484		return;
 485
 486	BUG_ON(!st->count);
 487
 488	update_min_dispatch_time(st);
 489
 490	if (time_before_eq(st->min_disptime, jiffies))
 491		throtl_schedule_delayed_work(td, 0);
 492	else
 493		throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
 494}
 495
 496static inline void
 497throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 498{
 499	tg->bytes_disp[rw] = 0;
 500	tg->io_disp[rw] = 0;
 501	tg->slice_start[rw] = jiffies;
 502	tg->slice_end[rw] = jiffies + throtl_slice;
 503	throtl_log_tg(td, tg, "[%c] new slice start=%lu end=%lu jiffies=%lu",
 504			rw == READ ? 'R' : 'W', tg->slice_start[rw],
 505			tg->slice_end[rw], jiffies);
 
 506}
 507
 508static inline void throtl_set_slice_end(struct throtl_data *td,
 509		struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
 510{
 511	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
 512}
 513
 514static inline void throtl_extend_slice(struct throtl_data *td,
 515		struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
 516{
 517	tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
 518	throtl_log_tg(td, tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
 519			rw == READ ? 'R' : 'W', tg->slice_start[rw],
 520			tg->slice_end[rw], jiffies);
 
 521}
 522
 523/* Determine if previously allocated or extended slice is complete or not */
 524static bool
 525throtl_slice_used(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 526{
 527	if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
 528		return 0;
 529
 530	return 1;
 531}
 532
 533/* Trim the used slices and adjust slice start accordingly */
 534static inline void
 535throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
 536{
 537	unsigned long nr_slices, time_elapsed, io_trim;
 538	u64 bytes_trim, tmp;
 539
 540	BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
 541
 542	/*
 543	 * If bps are unlimited (-1), then time slice don't get
 544	 * renewed. Don't try to trim the slice if slice is used. A new
 545	 * slice will start when appropriate.
 546	 */
 547	if (throtl_slice_used(td, tg, rw))
 548		return;
 549
 550	/*
 551	 * A bio has been dispatched. Also adjust slice_end. It might happen
 552	 * that initially cgroup limit was very low resulting in high
 553	 * slice_end, but later limit was bumped up and bio was dispached
 554	 * sooner, then we need to reduce slice_end. A high bogus slice_end
 555	 * is bad because it does not allow new slice to start.
 556	 */
 557
 558	throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
 559
 560	time_elapsed = jiffies - tg->slice_start[rw];
 561
 562	nr_slices = time_elapsed / throtl_slice;
 563
 564	if (!nr_slices)
 565		return;
 566	tmp = tg->bps[rw] * throtl_slice * nr_slices;
 567	do_div(tmp, HZ);
 568	bytes_trim = tmp;
 569
 570	io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
 571
 572	if (!bytes_trim && !io_trim)
 573		return;
 574
 575	if (tg->bytes_disp[rw] >= bytes_trim)
 576		tg->bytes_disp[rw] -= bytes_trim;
 577	else
 578		tg->bytes_disp[rw] = 0;
 579
 580	if (tg->io_disp[rw] >= io_trim)
 581		tg->io_disp[rw] -= io_trim;
 582	else
 583		tg->io_disp[rw] = 0;
 584
 585	tg->slice_start[rw] += nr_slices * throtl_slice;
 586
 587	throtl_log_tg(td, tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
 588			" start=%lu end=%lu jiffies=%lu",
 589			rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
 590			tg->slice_start[rw], tg->slice_end[rw], jiffies);
 591}
 592
 593static bool tg_with_in_iops_limit(struct throtl_data *td, struct throtl_grp *tg,
 594		struct bio *bio, unsigned long *wait)
 595{
 596	bool rw = bio_data_dir(bio);
 597	unsigned int io_allowed;
 598	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 599	u64 tmp;
 600
 601	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 602
 603	/* Slice has just started. Consider one slice interval */
 604	if (!jiffy_elapsed)
 605		jiffy_elapsed_rnd = throtl_slice;
 606
 607	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 608
 609	/*
 610	 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
 611	 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
 612	 * will allow dispatch after 1 second and after that slice should
 613	 * have been trimmed.
 614	 */
 615
 616	tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
 617	do_div(tmp, HZ);
 618
 619	if (tmp > UINT_MAX)
 620		io_allowed = UINT_MAX;
 621	else
 622		io_allowed = tmp;
 623
 624	if (tg->io_disp[rw] + 1 <= io_allowed) {
 625		if (wait)
 626			*wait = 0;
 627		return 1;
 628	}
 629
 630	/* Calc approx time to dispatch */
 631	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
 632
 633	if (jiffy_wait > jiffy_elapsed)
 634		jiffy_wait = jiffy_wait - jiffy_elapsed;
 635	else
 636		jiffy_wait = 1;
 637
 638	if (wait)
 639		*wait = jiffy_wait;
 640	return 0;
 641}
 642
 643static bool tg_with_in_bps_limit(struct throtl_data *td, struct throtl_grp *tg,
 644		struct bio *bio, unsigned long *wait)
 645{
 646	bool rw = bio_data_dir(bio);
 647	u64 bytes_allowed, extra_bytes, tmp;
 648	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 649
 650	jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 651
 652	/* Slice has just started. Consider one slice interval */
 653	if (!jiffy_elapsed)
 654		jiffy_elapsed_rnd = throtl_slice;
 655
 656	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 657
 658	tmp = tg->bps[rw] * jiffy_elapsed_rnd;
 659	do_div(tmp, HZ);
 660	bytes_allowed = tmp;
 661
 662	if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) {
 663		if (wait)
 664			*wait = 0;
 665		return 1;
 666	}
 667
 668	/* Calc approx time to dispatch */
 669	extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed;
 670	jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
 671
 672	if (!jiffy_wait)
 673		jiffy_wait = 1;
 674
 675	/*
 676	 * This wait time is without taking into consideration the rounding
 677	 * up we did. Add that time also.
 678	 */
 679	jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
 680	if (wait)
 681		*wait = jiffy_wait;
 682	return 0;
 683}
 684
 685static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
 686	if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
 687		return 1;
 688	return 0;
 689}
 690
 691/*
 692 * Returns whether one can dispatch a bio or not. Also returns approx number
 693 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
 694 */
 695static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
 696				struct bio *bio, unsigned long *wait)
 697{
 698	bool rw = bio_data_dir(bio);
 699	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
 700
 701	/*
 702 	 * Currently whole state machine of group depends on first bio
 703	 * queued in the group bio list. So one should not be calling
 704	 * this function with a different bio if there are other bios
 705	 * queued.
 706	 */
 707	BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
 
 708
 709	/* If tg->bps = -1, then BW is unlimited */
 710	if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
 711		if (wait)
 712			*wait = 0;
 713		return 1;
 714	}
 715
 716	/*
 717	 * If previous slice expired, start a new one otherwise renew/extend
 718	 * existing slice to make sure it is at least throtl_slice interval
 719	 * long since now.
 720	 */
 721	if (throtl_slice_used(td, tg, rw))
 722		throtl_start_new_slice(td, tg, rw);
 723	else {
 724		if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
 725			throtl_extend_slice(td, tg, rw, jiffies + throtl_slice);
 726	}
 727
 728	if (tg_with_in_bps_limit(td, tg, bio, &bps_wait)
 729	    && tg_with_in_iops_limit(td, tg, bio, &iops_wait)) {
 730		if (wait)
 731			*wait = 0;
 732		return 1;
 733	}
 734
 735	max_wait = max(bps_wait, iops_wait);
 736
 737	if (wait)
 738		*wait = max_wait;
 739
 740	if (time_before(tg->slice_end[rw], jiffies + max_wait))
 741		throtl_extend_slice(td, tg, rw, jiffies + max_wait);
 742
 743	return 0;
 744}
 745
 746static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 747{
 748	bool rw = bio_data_dir(bio);
 749	bool sync = rw_is_sync(bio->bi_rw);
 750
 751	/* Charge the bio to the group */
 752	tg->bytes_disp[rw] += bio->bi_size;
 753	tg->io_disp[rw]++;
 754
 755	blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
 
 
 
 
 
 
 
 756}
 757
 758static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
 759			struct bio *bio)
 
 
 
 
 
 
 
 
 
 760{
 
 761	bool rw = bio_data_dir(bio);
 762
 763	bio_list_add(&tg->bio_lists[rw], bio);
 764	/* Take a bio reference on tg */
 765	throtl_ref_get_tg(tg);
 766	tg->nr_queued[rw]++;
 767	td->nr_queued[rw]++;
 768	throtl_enqueue_tg(td, tg);
 
 
 
 
 
 
 
 
 
 
 769}
 770
 771static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg)
 772{
 
 773	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
 774	struct bio *bio;
 775
 776	if ((bio = bio_list_peek(&tg->bio_lists[READ])))
 777		tg_may_dispatch(td, tg, bio, &read_wait);
 778
 779	if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
 780		tg_may_dispatch(td, tg, bio, &write_wait);
 781
 782	min_wait = min(read_wait, write_wait);
 783	disptime = jiffies + min_wait;
 784
 785	/* Update dispatch time */
 786	throtl_dequeue_tg(td, tg);
 787	tg->disptime = disptime;
 788	throtl_enqueue_tg(td, tg);
 
 
 
 789}
 790
 791static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
 792				bool rw, struct bio_list *bl)
 793{
 
 
 
 
 
 
 
 
 
 
 
 
 
 794	struct bio *bio;
 795
 796	bio = bio_list_pop(&tg->bio_lists[rw]);
 797	tg->nr_queued[rw]--;
 798	/* Drop bio reference on tg */
 799	throtl_put_tg(tg);
 
 
 
 
 800
 801	BUG_ON(td->nr_queued[rw] <= 0);
 802	td->nr_queued[rw]--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 803
 804	throtl_charge_bio(tg, bio);
 805	bio_list_add(bl, bio);
 806	bio->bi_rw |= REQ_THROTTLED;
 807
 808	throtl_trim_slice(td, tg, rw);
 
 809}
 810
 811static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
 812				struct bio_list *bl)
 813{
 
 814	unsigned int nr_reads = 0, nr_writes = 0;
 815	unsigned int max_nr_reads = throtl_grp_quantum*3/4;
 816	unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
 817	struct bio *bio;
 818
 819	/* Try to dispatch 75% READS and 25% WRITES */
 820
 821	while ((bio = bio_list_peek(&tg->bio_lists[READ]))
 822		&& tg_may_dispatch(td, tg, bio, NULL)) {
 823
 824		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
 825		nr_reads++;
 826
 827		if (nr_reads >= max_nr_reads)
 828			break;
 829	}
 830
 831	while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))
 832		&& tg_may_dispatch(td, tg, bio, NULL)) {
 833
 834		tg_dispatch_one_bio(td, tg, bio_data_dir(bio), bl);
 835		nr_writes++;
 836
 837		if (nr_writes >= max_nr_writes)
 838			break;
 839	}
 840
 841	return nr_reads + nr_writes;
 842}
 843
 844static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
 845{
 846	unsigned int nr_disp = 0;
 847	struct throtl_grp *tg;
 848	struct throtl_rb_root *st = &td->tg_service_tree;
 849
 850	while (1) {
 851		tg = throtl_rb_first(st);
 
 852
 853		if (!tg)
 854			break;
 855
 856		if (time_before(jiffies, tg->disptime))
 857			break;
 858
 859		throtl_dequeue_tg(td, tg);
 860
 861		nr_disp += throtl_dispatch_tg(td, tg, bl);
 862
 863		if (tg->nr_queued[0] || tg->nr_queued[1]) {
 864			tg_update_disptime(td, tg);
 865			throtl_enqueue_tg(td, tg);
 866		}
 867
 868		if (nr_disp >= throtl_quantum)
 869			break;
 870	}
 871
 872	return nr_disp;
 873}
 874
 875static void throtl_process_limit_change(struct throtl_data *td)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876{
 877	struct throtl_grp *tg;
 878	struct hlist_node *pos, *n;
 
 
 
 
 
 879
 880	if (!td->limits_changed)
 881		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 882
 883	xchg(&td->limits_changed, false);
 
 884
 885	throtl_log(td, "limits changed");
 886
 887	hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
 888		if (!tg->limits_changed)
 889			continue;
 890
 891		if (!xchg(&tg->limits_changed, false))
 892			continue;
 893
 894		throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
 895			" riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE],
 896			tg->iops[READ], tg->iops[WRITE]);
 897
 898		/*
 899		 * Restart the slices for both READ and WRITES. It
 900		 * might happen that a group's limit are dropped
 901		 * suddenly and we don't want to account recently
 902		 * dispatched IO with new low rate
 903		 */
 904		throtl_start_new_slice(td, tg, 0);
 905		throtl_start_new_slice(td, tg, 1);
 906
 907		if (throtl_tg_on_rr(tg))
 908			tg_update_disptime(td, tg);
 909	}
 
 
 910}
 911
 912/* Dispatch throttled bios. Should be called without queue lock held. */
 913static int throtl_dispatch(struct request_queue *q)
 
 
 
 
 
 
 
 914{
 915	struct throtl_data *td = q->td;
 916	unsigned int nr_disp = 0;
 
 
 917	struct bio_list bio_list_on_stack;
 918	struct bio *bio;
 919	struct blk_plug plug;
 920
 921	spin_lock_irq(q->queue_lock);
 922
 923	throtl_process_limit_change(td);
 924
 925	if (!total_nr_queued(td))
 926		goto out;
 927
 928	bio_list_init(&bio_list_on_stack);
 929
 930	throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
 931			total_nr_queued(td), td->nr_queued[READ],
 932			td->nr_queued[WRITE]);
 933
 934	nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
 935
 936	if (nr_disp)
 937		throtl_log(td, "bios disp=%u", nr_disp);
 938
 939	throtl_schedule_next_dispatch(td);
 940out:
 941	spin_unlock_irq(q->queue_lock);
 942
 943	/*
 944	 * If we dispatched some requests, unplug the queue to make sure
 945	 * immediate dispatch
 946	 */
 947	if (nr_disp) {
 948		blk_start_plug(&plug);
 949		while((bio = bio_list_pop(&bio_list_on_stack)))
 950			generic_make_request(bio);
 951		blk_finish_plug(&plug);
 952	}
 953	return nr_disp;
 954}
 955
 956void blk_throtl_work(struct work_struct *work)
 
 957{
 958	struct throtl_data *td = container_of(work, struct throtl_data,
 959					throtl_work.work);
 960	struct request_queue *q = td->queue;
 961
 962	throtl_dispatch(q);
 
 
 963}
 964
 965/* Call with queue lock held */
 966static void
 967throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
 968{
 
 
 
 
 
 
 
 969
 970	struct delayed_work *dwork = &td->throtl_work;
 
 
 
 
 
 971
 972	/* schedule work if limits changed even if no bio is queued */
 973	if (total_nr_queued(td) || td->limits_changed) {
 974		/*
 975		 * We might have a work scheduled to be executed in future.
 976		 * Cancel that and schedule a new one.
 977		 */
 978		__cancel_delayed_work(dwork);
 979		queue_delayed_work(kthrotld_workqueue, dwork, delay);
 980		throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
 981				delay, jiffies);
 982	}
 983}
 984
 985static void
 986throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
 987{
 988	/* Something wrong if we are trying to remove same group twice */
 989	BUG_ON(hlist_unhashed(&tg->tg_node));
 
 990
 991	hlist_del_init(&tg->tg_node);
 
 
 
 992
 993	/*
 994	 * Put the reference taken at the time of creation so that when all
 995	 * queues are gone, group can be destroyed.
 
 
 
 996	 */
 997	throtl_put_tg(tg);
 998	td->nr_undestroyed_grps--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 999}
1000
1001static void throtl_release_tgs(struct throtl_data *td)
 
1002{
1003	struct hlist_node *pos, *n;
 
1004	struct throtl_grp *tg;
 
 
1005
1006	hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
1007		/*
1008		 * If cgroup removal path got to blk_group first and removed
1009		 * it from cgroup list, then it will take care of destroying
1010		 * cfqg also.
1011		 */
1012		if (!blkiocg_del_blkio_group(&tg->blkg))
1013			throtl_destroy_tg(td, tg);
1014	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1015}
1016
1017static void throtl_td_free(struct throtl_data *td)
 
1018{
1019	kfree(td);
1020}
1021
1022/*
1023 * Blk cgroup controller notification saying that blkio_group object is being
1024 * delinked as associated cgroup object is going away. That also means that
1025 * no new IO will come in this group. So get rid of this group as soon as
1026 * any pending IO in the group is finished.
1027 *
1028 * This function is called under rcu_read_lock(). key is the rcu protected
1029 * pointer. That means "key" is a valid throtl_data pointer as long as we are
1030 * rcu read lock.
1031 *
1032 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1033 * it should not be NULL as even if queue was going away, cgroup deltion
1034 * path got to it first.
1035 */
1036void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg)
1037{
1038	unsigned long flags;
1039	struct throtl_data *td = key;
1040
1041	spin_lock_irqsave(td->queue->queue_lock, flags);
1042	throtl_destroy_tg(td, tg_of_blkg(blkg));
1043	spin_unlock_irqrestore(td->queue->queue_lock, flags);
1044}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1045
1046static void throtl_update_blkio_group_common(struct throtl_data *td,
1047				struct throtl_grp *tg)
1048{
1049	xchg(&tg->limits_changed, true);
1050	xchg(&td->limits_changed, true);
1051	/* Schedule a work now to process the limit change */
1052	throtl_schedule_delayed_work(td, 0);
1053}
 
 
 
 
1054
1055/*
1056 * For all update functions, key should be a valid pointer because these
1057 * update functions are called under blkcg_lock, that means, blkg is
1058 * valid and in turn key is valid. queue exit path can not race because
1059 * of blkcg_lock
1060 *
1061 * Can not take queue lock in update functions as queue lock under blkcg_lock
1062 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1063 */
1064static void throtl_update_blkio_group_read_bps(void *key,
1065				struct blkio_group *blkg, u64 read_bps)
1066{
1067	struct throtl_data *td = key;
1068	struct throtl_grp *tg = tg_of_blkg(blkg);
1069
1070	tg->bps[READ] = read_bps;
1071	throtl_update_blkio_group_common(td, tg);
 
1072}
1073
1074static void throtl_update_blkio_group_write_bps(void *key,
1075				struct blkio_group *blkg, u64 write_bps)
1076{
1077	struct throtl_data *td = key;
1078	struct throtl_grp *tg = tg_of_blkg(blkg);
1079
1080	tg->bps[WRITE] = write_bps;
1081	throtl_update_blkio_group_common(td, tg);
1082}
1083
1084static void throtl_update_blkio_group_read_iops(void *key,
1085			struct blkio_group *blkg, unsigned int read_iops)
1086{
1087	struct throtl_data *td = key;
1088	struct throtl_grp *tg = tg_of_blkg(blkg);
 
 
 
1089
1090	tg->iops[READ] = read_iops;
1091	throtl_update_blkio_group_common(td, tg);
1092}
 
 
 
 
 
 
 
 
 
 
 
 
 
1093
1094static void throtl_update_blkio_group_write_iops(void *key,
1095			struct blkio_group *blkg, unsigned int write_iops)
1096{
1097	struct throtl_data *td = key;
1098	struct throtl_grp *tg = tg_of_blkg(blkg);
1099
1100	tg->iops[WRITE] = write_iops;
1101	throtl_update_blkio_group_common(td, tg);
1102}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103
1104static void throtl_shutdown_wq(struct request_queue *q)
1105{
1106	struct throtl_data *td = q->td;
1107
1108	cancel_delayed_work_sync(&td->throtl_work);
1109}
1110
1111static struct blkio_policy_type blkio_policy_throtl = {
1112	.ops = {
1113		.blkio_unlink_group_fn = throtl_unlink_blkio_group,
1114		.blkio_update_group_read_bps_fn =
1115					throtl_update_blkio_group_read_bps,
1116		.blkio_update_group_write_bps_fn =
1117					throtl_update_blkio_group_write_bps,
1118		.blkio_update_group_read_iops_fn =
1119					throtl_update_blkio_group_read_iops,
1120		.blkio_update_group_write_iops_fn =
1121					throtl_update_blkio_group_write_iops,
1122	},
1123	.plid = BLKIO_POLICY_THROTL,
1124};
1125
1126int blk_throtl_bio(struct request_queue *q, struct bio **biop)
 
1127{
1128	struct throtl_data *td = q->td;
1129	struct throtl_grp *tg;
1130	struct bio *bio = *biop;
1131	bool rw = bio_data_dir(bio), update_disptime = true;
1132	struct blkio_cgroup *blkcg;
1133
1134	if (bio->bi_rw & REQ_THROTTLED) {
1135		bio->bi_rw &= ~REQ_THROTTLED;
1136		return 0;
1137	}
1138
1139	/*
1140	 * A throtl_grp pointer retrieved under rcu can be used to access
1141	 * basic fields like stats and io rates. If a group has no rules,
1142	 * just update the dispatch stats in lockless manner and return.
1143	 */
1144
1145	rcu_read_lock();
1146	blkcg = task_blkio_cgroup(current);
1147	tg = throtl_find_tg(td, blkcg);
1148	if (tg) {
1149		throtl_tg_fill_dev_details(td, tg);
1150
1151		if (tg_no_rule_group(tg, rw)) {
1152			blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
1153					rw, rw_is_sync(bio->bi_rw));
1154			rcu_read_unlock();
1155			return 0;
1156		}
1157	}
1158	rcu_read_unlock();
1159
1160	/*
1161	 * Either group has not been allocated yet or it is not an unlimited
1162	 * IO group
1163	 */
1164
1165	spin_lock_irq(q->queue_lock);
1166	tg = throtl_get_tg(td);
1167
1168	if (IS_ERR(tg)) {
1169		if (PTR_ERR(tg)	== -ENODEV) {
1170			/*
1171			 * Queue is gone. No queue lock held here.
1172			 */
1173			return -ENODEV;
1174		}
1175	}
1176
1177	if (tg->nr_queued[rw]) {
1178		/*
1179		 * There is already another bio queued in same dir. No
1180		 * need to update dispatch time.
1181		 */
1182		update_disptime = false;
1183		goto queue_bio;
1184
1185	}
1186
1187	/* Bio is with-in rate limit of group */
1188	if (tg_may_dispatch(td, tg, bio, NULL)) {
1189		throtl_charge_bio(tg, bio);
1190
1191		/*
1192		 * We need to trim slice even when bios are not being queued
1193		 * otherwise it might happen that a bio is not queued for
1194		 * a long time and slice keeps on extending and trim is not
1195		 * called for a long time. Now if limits are reduced suddenly
1196		 * we take into account all the IO dispatched so far at new
1197		 * low rate and * newly queued IO gets a really long dispatch
1198		 * time.
1199		 *
1200		 * So keep on trimming slice even if bio is not queued.
1201		 */
1202		throtl_trim_slice(td, tg, rw);
1203		goto out;
 
 
 
 
 
 
 
 
 
 
1204	}
1205
1206queue_bio:
1207	throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1208			" iodisp=%u iops=%u queued=%d/%d",
1209			rw == READ ? 'R' : 'W',
1210			tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1211			tg->io_disp[rw], tg->iops[rw],
1212			tg->nr_queued[READ], tg->nr_queued[WRITE]);
1213
1214	throtl_add_bio_tg(q->td, tg, bio);
1215	*biop = NULL;
1216
1217	if (update_disptime) {
1218		tg_update_disptime(td, tg);
1219		throtl_schedule_next_dispatch(td);
 
 
 
 
 
 
 
1220	}
1221
 
 
1222out:
1223	spin_unlock_irq(q->queue_lock);
1224	return 0;
 
 
 
 
 
 
1225}
1226
1227int blk_throtl_init(struct request_queue *q)
 
 
 
 
 
1228{
1229	struct throtl_data *td;
1230	struct throtl_grp *tg;
1231
1232	td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1233	if (!td)
1234		return -ENOMEM;
1235
1236	INIT_HLIST_HEAD(&td->tg_list);
1237	td->tg_service_tree = THROTL_RB_ROOT;
1238	td->limits_changed = false;
1239	INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
1240
1241	/* alloc and Init root group. */
1242	td->queue = q;
1243	tg = throtl_alloc_tg(td);
1244
1245	if (!tg) {
1246		kfree(td);
1247		return -ENOMEM;
 
1248	}
 
1249
1250	td->root_tg = tg;
 
 
 
 
 
 
 
 
 
 
 
 
 
1251
 
1252	rcu_read_lock();
1253	throtl_init_add_tg_lists(td, tg, &blkio_root_cgroup);
 
 
 
 
 
 
 
 
 
 
 
 
1254	rcu_read_unlock();
 
 
 
 
 
 
 
1255
1256	/* Attach throtl data to request queue */
1257	q->td = td;
1258	return 0;
1259}
1260
1261void blk_throtl_exit(struct request_queue *q)
1262{
1263	struct throtl_data *td = q->td;
1264	bool wait = false;
1265
1266	BUG_ON(!td);
 
 
1267
1268	throtl_shutdown_wq(q);
 
1269
1270	spin_lock_irq(q->queue_lock);
1271	throtl_release_tgs(td);
1272
1273	/* If there are other groups */
1274	if (td->nr_undestroyed_grps > 0)
1275		wait = true;
 
 
 
1276
1277	spin_unlock_irq(q->queue_lock);
1278
1279	/*
1280	 * Wait for tg->blkg->key accessors to exit their grace periods.
1281	 * Do this wait only if there are other undestroyed groups out
1282	 * there (other than root group). This can happen if cgroup deletion
1283	 * path claimed the responsibility of cleaning up a group before
1284	 * queue cleanup code get to the group.
1285	 *
1286	 * Do not call synchronize_rcu() unconditionally as there are drivers
1287	 * which create/delete request queue hundreds of times during scan/boot
1288	 * and synchronize_rcu() can take significant time and slow down boot.
1289	 */
1290	if (wait)
1291		synchronize_rcu();
1292
1293	/*
1294	 * Just being safe to make sure after previous flush if some body did
1295	 * update limits through cgroup and another work got queued, cancel
1296	 * it.
1297	 */
1298	throtl_shutdown_wq(q);
1299	throtl_td_free(td);
 
1300}
1301
1302static int __init throtl_init(void)
1303{
1304	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1305	if (!kthrotld_workqueue)
1306		panic("Failed to create kthrotld\n");
1307
1308	blkio_policy_register(&blkio_policy_throtl);
1309	return 0;
1310}
1311
1312module_init(throtl_init);