Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v4.6
 
   1/*
   2 * Common Block IO controller cgroup interface
   3 *
   4 * Based on ideas and code from CFQ, CFS and BFQ:
   5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   6 *
   7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   8 *		      Paolo Valente <paolo.valente@unimore.it>
   9 *
  10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11 * 	              Nauman Rafique <nauman@google.com>
  12 *
  13 * For policy-specific per-blkcg data:
  14 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
  15 *                    Arianna Avanzini <avanzini.arianna@gmail.com>
  16 */
  17#include <linux/ioprio.h>
  18#include <linux/kdev_t.h>
  19#include <linux/module.h>
 
  20#include <linux/err.h>
  21#include <linux/blkdev.h>
  22#include <linux/backing-dev.h>
  23#include <linux/slab.h>
  24#include <linux/genhd.h>
  25#include <linux/delay.h>
  26#include <linux/atomic.h>
  27#include <linux/ctype.h>
  28#include <linux/blk-cgroup.h>
 
 
  29#include "blk.h"
  30
  31#define MAX_KEY_LEN 100
  32
  33/*
  34 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
  35 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
  36 * policy [un]register operations including cgroup file additions /
  37 * removals.  Putting cgroup file registration outside blkcg_pol_mutex
  38 * allows grabbing it from cgroup callbacks.
  39 */
  40static DEFINE_MUTEX(blkcg_pol_register_mutex);
  41static DEFINE_MUTEX(blkcg_pol_mutex);
  42
  43struct blkcg blkcg_root;
  44EXPORT_SYMBOL_GPL(blkcg_root);
  45
  46struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
 
  47
  48static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  49
  50static LIST_HEAD(all_blkcgs);		/* protected by blkcg_pol_mutex */
  51
 
 
 
  52static bool blkcg_policy_enabled(struct request_queue *q,
  53				 const struct blkcg_policy *pol)
  54{
  55	return pol && test_bit(pol->plid, q->blkcg_pols);
  56}
  57
  58/**
  59 * blkg_free - free a blkg
  60 * @blkg: blkg to free
  61 *
  62 * Free @blkg which may be partially allocated.
  63 */
  64static void blkg_free(struct blkcg_gq *blkg)
  65{
  66	int i;
  67
  68	if (!blkg)
  69		return;
  70
  71	for (i = 0; i < BLKCG_MAX_POLS; i++)
  72		if (blkg->pd[i])
  73			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
  74
  75	if (blkg->blkcg != &blkcg_root)
  76		blk_exit_rl(&blkg->rl);
  77
  78	blkg_rwstat_exit(&blkg->stat_ios);
  79	blkg_rwstat_exit(&blkg->stat_bytes);
 
  80	kfree(blkg);
  81}
  82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  83/**
  84 * blkg_alloc - allocate a blkg
  85 * @blkcg: block cgroup the new blkg is associated with
  86 * @q: request_queue the new blkg is associated with
  87 * @gfp_mask: allocation mask to use
  88 *
  89 * Allocate a new blkg assocating @blkcg and @q.
  90 */
  91static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
  92				   gfp_t gfp_mask)
  93{
  94	struct blkcg_gq *blkg;
  95	int i;
  96
  97	/* alloc and init base part */
  98	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
  99	if (!blkg)
 100		return NULL;
 101
 
 
 
 102	if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
 103	    blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
 104		goto err_free;
 105
 106	blkg->q = q;
 107	INIT_LIST_HEAD(&blkg->q_node);
 
 
 
 108	blkg->blkcg = blkcg;
 109	atomic_set(&blkg->refcnt, 1);
 110
 111	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
 112	if (blkcg != &blkcg_root) {
 113		if (blk_init_rl(&blkg->rl, q, gfp_mask))
 114			goto err_free;
 115		blkg->rl.blkg = blkg;
 116	}
 117
 118	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 119		struct blkcg_policy *pol = blkcg_policy[i];
 120		struct blkg_policy_data *pd;
 121
 122		if (!blkcg_policy_enabled(q, pol))
 123			continue;
 124
 125		/* alloc per-policy data and attach it to blkg */
 126		pd = pol->pd_alloc_fn(gfp_mask, q->node);
 127		if (!pd)
 128			goto err_free;
 129
 130		blkg->pd[i] = pd;
 131		pd->blkg = blkg;
 132		pd->plid = i;
 133	}
 134
 135	return blkg;
 136
 137err_free:
 138	blkg_free(blkg);
 139	return NULL;
 140}
 141
 142struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
 143				      struct request_queue *q, bool update_hint)
 144{
 145	struct blkcg_gq *blkg;
 146
 147	/*
 148	 * Hint didn't match.  Look up from the radix tree.  Note that the
 149	 * hint can only be updated under queue_lock as otherwise @blkg
 150	 * could have already been removed from blkg_tree.  The caller is
 151	 * responsible for grabbing queue_lock if @update_hint.
 152	 */
 153	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
 154	if (blkg && blkg->q == q) {
 155		if (update_hint) {
 156			lockdep_assert_held(q->queue_lock);
 157			rcu_assign_pointer(blkcg->blkg_hint, blkg);
 158		}
 159		return blkg;
 160	}
 161
 162	return NULL;
 163}
 164EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
 165
 166/*
 167 * If @new_blkg is %NULL, this function tries to allocate a new one as
 168 * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
 169 */
 170static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 171				    struct request_queue *q,
 172				    struct blkcg_gq *new_blkg)
 173{
 174	struct blkcg_gq *blkg;
 175	struct bdi_writeback_congested *wb_congested;
 176	int i, ret;
 177
 178	WARN_ON_ONCE(!rcu_read_lock_held());
 179	lockdep_assert_held(q->queue_lock);
 
 
 
 
 
 
 180
 181	/* blkg holds a reference to blkcg */
 182	if (!css_tryget_online(&blkcg->css)) {
 183		ret = -ENODEV;
 184		goto err_free_blkg;
 185	}
 186
 187	wb_congested = wb_congested_get_create(&q->backing_dev_info,
 188					       blkcg->css.id, GFP_NOWAIT);
 
 189	if (!wb_congested) {
 190		ret = -ENOMEM;
 191		goto err_put_css;
 192	}
 193
 194	/* allocate */
 195	if (!new_blkg) {
 196		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
 197		if (unlikely(!new_blkg)) {
 198			ret = -ENOMEM;
 199			goto err_put_congested;
 200		}
 201	}
 202	blkg = new_blkg;
 203	blkg->wb_congested = wb_congested;
 204
 205	/* link parent */
 206	if (blkcg_parent(blkcg)) {
 207		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
 208		if (WARN_ON_ONCE(!blkg->parent)) {
 209			ret = -ENODEV;
 210			goto err_put_congested;
 211		}
 212		blkg_get(blkg->parent);
 213	}
 214
 215	/* invoke per-policy init */
 216	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 217		struct blkcg_policy *pol = blkcg_policy[i];
 218
 219		if (blkg->pd[i] && pol->pd_init_fn)
 220			pol->pd_init_fn(blkg->pd[i]);
 221	}
 222
 223	/* insert */
 224	spin_lock(&blkcg->lock);
 225	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
 226	if (likely(!ret)) {
 227		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
 228		list_add(&blkg->q_node, &q->blkg_list);
 229
 230		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 231			struct blkcg_policy *pol = blkcg_policy[i];
 232
 233			if (blkg->pd[i] && pol->pd_online_fn)
 234				pol->pd_online_fn(blkg->pd[i]);
 235		}
 236	}
 237	blkg->online = true;
 238	spin_unlock(&blkcg->lock);
 239
 240	if (!ret)
 241		return blkg;
 242
 243	/* @blkg failed fully initialized, use the usual release path */
 244	blkg_put(blkg);
 245	return ERR_PTR(ret);
 246
 247err_put_congested:
 248	wb_congested_put(wb_congested);
 249err_put_css:
 250	css_put(&blkcg->css);
 251err_free_blkg:
 252	blkg_free(new_blkg);
 253	return ERR_PTR(ret);
 254}
 255
 256/**
 257 * blkg_lookup_create - lookup blkg, try to create one if not there
 258 * @blkcg: blkcg of interest
 259 * @q: request_queue of interest
 260 *
 261 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
 262 * create one.  blkg creation is performed recursively from blkcg_root such
 263 * that all non-root blkg's have access to the parent blkg.  This function
 264 * should be called under RCU read lock and @q->queue_lock.
 265 *
 266 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
 267 * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
 268 * dead and bypassing, returns ERR_PTR(-EBUSY).
 269 */
 270struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 271				    struct request_queue *q)
 272{
 273	struct blkcg_gq *blkg;
 274
 275	WARN_ON_ONCE(!rcu_read_lock_held());
 276	lockdep_assert_held(q->queue_lock);
 277
 278	/*
 279	 * This could be the first entry point of blkcg implementation and
 280	 * we shouldn't allow anything to go through for a bypassing queue.
 281	 */
 282	if (unlikely(blk_queue_bypass(q)))
 283		return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
 284
 285	blkg = __blkg_lookup(blkcg, q, true);
 286	if (blkg)
 287		return blkg;
 288
 289	/*
 290	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
 291	 * non-root blkgs have access to their parents.
 
 292	 */
 293	while (true) {
 294		struct blkcg *pos = blkcg;
 295		struct blkcg *parent = blkcg_parent(blkcg);
 
 296
 297		while (parent && !__blkg_lookup(parent, q, false)) {
 
 
 
 
 
 
 298			pos = parent;
 299			parent = blkcg_parent(parent);
 300		}
 301
 302		blkg = blkg_create(pos, q, NULL);
 303		if (pos == blkcg || IS_ERR(blkg))
 
 
 304			return blkg;
 305	}
 306}
 307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308static void blkg_destroy(struct blkcg_gq *blkg)
 309{
 310	struct blkcg *blkcg = blkg->blkcg;
 311	struct blkcg_gq *parent = blkg->parent;
 312	int i;
 313
 314	lockdep_assert_held(blkg->q->queue_lock);
 315	lockdep_assert_held(&blkcg->lock);
 316
 317	/* Something wrong if we are trying to remove same group twice */
 318	WARN_ON_ONCE(list_empty(&blkg->q_node));
 319	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 320
 321	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 322		struct blkcg_policy *pol = blkcg_policy[i];
 323
 324		if (blkg->pd[i] && pol->pd_offline_fn)
 325			pol->pd_offline_fn(blkg->pd[i]);
 326	}
 327
 328	if (parent) {
 329		blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
 330		blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
 331	}
 332
 333	blkg->online = false;
 334
 335	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
 336	list_del_init(&blkg->q_node);
 337	hlist_del_init_rcu(&blkg->blkcg_node);
 338
 339	/*
 340	 * Both setting lookup hint to and clearing it from @blkg are done
 341	 * under queue_lock.  If it's not pointing to @blkg now, it never
 342	 * will.  Hint assignment itself can race safely.
 343	 */
 344	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
 345		rcu_assign_pointer(blkcg->blkg_hint, NULL);
 346
 347	/*
 348	 * Put the reference taken at the time of creation so that when all
 349	 * queues are gone, group can be destroyed.
 350	 */
 351	blkg_put(blkg);
 352}
 353
 354/**
 355 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 356 * @q: request_queue of interest
 357 *
 358 * Destroy all blkgs associated with @q.
 359 */
 360static void blkg_destroy_all(struct request_queue *q)
 361{
 362	struct blkcg_gq *blkg, *n;
 363
 364	lockdep_assert_held(q->queue_lock);
 365
 366	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 367		struct blkcg *blkcg = blkg->blkcg;
 368
 369		spin_lock(&blkcg->lock);
 370		blkg_destroy(blkg);
 371		spin_unlock(&blkcg->lock);
 372	}
 373
 374	q->root_blkg = NULL;
 375	q->root_rl.blkg = NULL;
 376}
 377
 378/*
 379 * A group is RCU protected, but having an rcu lock does not mean that one
 380 * can access all the fields of blkg and assume these are valid.  For
 381 * example, don't try to follow throtl_data and request queue links.
 382 *
 383 * Having a reference to blkg under an rcu allows accesses to only values
 384 * local to groups like group stats and group rate limits.
 385 */
 386void __blkg_release_rcu(struct rcu_head *rcu_head)
 387{
 388	struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
 389
 390	/* release the blkcg and parent blkg refs this blkg has been holding */
 391	css_put(&blkg->blkcg->css);
 392	if (blkg->parent)
 393		blkg_put(blkg->parent);
 394
 395	wb_congested_put(blkg->wb_congested);
 396
 397	blkg_free(blkg);
 398}
 399EXPORT_SYMBOL_GPL(__blkg_release_rcu);
 400
 401/*
 402 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 403 * because the root blkg uses @q->root_rl instead of its own rl.
 404 */
 405struct request_list *__blk_queue_next_rl(struct request_list *rl,
 406					 struct request_queue *q)
 407{
 408	struct list_head *ent;
 409	struct blkcg_gq *blkg;
 410
 411	/*
 412	 * Determine the current blkg list_head.  The first entry is
 413	 * root_rl which is off @q->blkg_list and mapped to the head.
 414	 */
 415	if (rl == &q->root_rl) {
 416		ent = &q->blkg_list;
 417		/* There are no more block groups, hence no request lists */
 418		if (list_empty(ent))
 419			return NULL;
 420	} else {
 421		blkg = container_of(rl, struct blkcg_gq, rl);
 422		ent = &blkg->q_node;
 423	}
 424
 425	/* walk to the next list_head, skip root blkcg */
 426	ent = ent->next;
 427	if (ent == &q->root_blkg->q_node)
 428		ent = ent->next;
 429	if (ent == &q->blkg_list)
 430		return NULL;
 431
 432	blkg = container_of(ent, struct blkcg_gq, q_node);
 433	return &blkg->rl;
 434}
 435
 436static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 437			     struct cftype *cftype, u64 val)
 438{
 439	struct blkcg *blkcg = css_to_blkcg(css);
 440	struct blkcg_gq *blkg;
 441	int i;
 442
 443	mutex_lock(&blkcg_pol_mutex);
 444	spin_lock_irq(&blkcg->lock);
 445
 446	/*
 447	 * Note that stat reset is racy - it doesn't synchronize against
 448	 * stat updates.  This is a debug feature which shouldn't exist
 449	 * anyway.  If you get hit by a race, retry.
 450	 */
 451	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
 452		blkg_rwstat_reset(&blkg->stat_bytes);
 453		blkg_rwstat_reset(&blkg->stat_ios);
 454
 455		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 456			struct blkcg_policy *pol = blkcg_policy[i];
 457
 458			if (blkg->pd[i] && pol->pd_reset_stats_fn)
 459				pol->pd_reset_stats_fn(blkg->pd[i]);
 460		}
 461	}
 462
 463	spin_unlock_irq(&blkcg->lock);
 464	mutex_unlock(&blkcg_pol_mutex);
 465	return 0;
 466}
 467
 468const char *blkg_dev_name(struct blkcg_gq *blkg)
 469{
 470	/* some drivers (floppy) instantiate a queue w/o disk registered */
 471	if (blkg->q->backing_dev_info.dev)
 472		return dev_name(blkg->q->backing_dev_info.dev);
 473	return NULL;
 474}
 475EXPORT_SYMBOL_GPL(blkg_dev_name);
 476
 477/**
 478 * blkcg_print_blkgs - helper for printing per-blkg data
 479 * @sf: seq_file to print to
 480 * @blkcg: blkcg of interest
 481 * @prfill: fill function to print out a blkg
 482 * @pol: policy in question
 483 * @data: data to be passed to @prfill
 484 * @show_total: to print out sum of prfill return values or not
 485 *
 486 * This function invokes @prfill on each blkg of @blkcg if pd for the
 487 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 488 * policy data and @data and the matching queue lock held.  If @show_total
 489 * is %true, the sum of the return values from @prfill is printed with
 490 * "Total" label at the end.
 491 *
 492 * This is to be used to construct print functions for
 493 * cftype->read_seq_string method.
 494 */
 495void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 496		       u64 (*prfill)(struct seq_file *,
 497				     struct blkg_policy_data *, int),
 498		       const struct blkcg_policy *pol, int data,
 499		       bool show_total)
 500{
 501	struct blkcg_gq *blkg;
 502	u64 total = 0;
 503
 504	rcu_read_lock();
 505	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 506		spin_lock_irq(blkg->q->queue_lock);
 507		if (blkcg_policy_enabled(blkg->q, pol))
 508			total += prfill(sf, blkg->pd[pol->plid], data);
 509		spin_unlock_irq(blkg->q->queue_lock);
 510	}
 511	rcu_read_unlock();
 512
 513	if (show_total)
 514		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
 515}
 516EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
 517
 518/**
 519 * __blkg_prfill_u64 - prfill helper for a single u64 value
 520 * @sf: seq_file to print to
 521 * @pd: policy private data of interest
 522 * @v: value to print
 523 *
 524 * Print @v to @sf for the device assocaited with @pd.
 525 */
 526u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
 527{
 528	const char *dname = blkg_dev_name(pd->blkg);
 529
 530	if (!dname)
 531		return 0;
 532
 533	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
 534	return v;
 535}
 536EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
 537
 538/**
 539 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 540 * @sf: seq_file to print to
 541 * @pd: policy private data of interest
 542 * @rwstat: rwstat to print
 543 *
 544 * Print @rwstat to @sf for the device assocaited with @pd.
 545 */
 546u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 547			 const struct blkg_rwstat *rwstat)
 548{
 549	static const char *rwstr[] = {
 550		[BLKG_RWSTAT_READ]	= "Read",
 551		[BLKG_RWSTAT_WRITE]	= "Write",
 552		[BLKG_RWSTAT_SYNC]	= "Sync",
 553		[BLKG_RWSTAT_ASYNC]	= "Async",
 
 554	};
 555	const char *dname = blkg_dev_name(pd->blkg);
 556	u64 v;
 557	int i;
 558
 559	if (!dname)
 560		return 0;
 561
 562	for (i = 0; i < BLKG_RWSTAT_NR; i++)
 563		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
 564			   (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
 565
 566	v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
 567		atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
 568	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
 
 569	return v;
 570}
 571EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
 572
 573/**
 574 * blkg_prfill_stat - prfill callback for blkg_stat
 575 * @sf: seq_file to print to
 576 * @pd: policy private data of interest
 577 * @off: offset to the blkg_stat in @pd
 578 *
 579 * prfill callback for printing a blkg_stat.
 580 */
 581u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
 582{
 583	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
 584}
 585EXPORT_SYMBOL_GPL(blkg_prfill_stat);
 586
 587/**
 588 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 589 * @sf: seq_file to print to
 590 * @pd: policy private data of interest
 591 * @off: offset to the blkg_rwstat in @pd
 592 *
 593 * prfill callback for printing a blkg_rwstat.
 594 */
 595u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 596		       int off)
 597{
 598	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
 599
 
 600	return __blkg_prfill_rwstat(sf, pd, &rwstat);
 601}
 602EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
 603
 604static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
 605				    struct blkg_policy_data *pd, int off)
 606{
 607	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
 608
 
 609	return __blkg_prfill_rwstat(sf, pd, &rwstat);
 610}
 611
 612/**
 613 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
 614 * @sf: seq_file to print to
 615 * @v: unused
 616 *
 617 * To be used as cftype->seq_show to print blkg->stat_bytes.
 618 * cftype->private must be set to the blkcg_policy.
 619 */
 620int blkg_print_stat_bytes(struct seq_file *sf, void *v)
 621{
 622	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 623			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
 624			  offsetof(struct blkcg_gq, stat_bytes), true);
 625	return 0;
 626}
 627EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
 628
 629/**
 630 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
 631 * @sf: seq_file to print to
 632 * @v: unused
 633 *
 634 * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
 635 * must be set to the blkcg_policy.
 636 */
 637int blkg_print_stat_ios(struct seq_file *sf, void *v)
 638{
 639	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 640			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
 641			  offsetof(struct blkcg_gq, stat_ios), true);
 642	return 0;
 643}
 644EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
 645
 646static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
 647					      struct blkg_policy_data *pd,
 648					      int off)
 649{
 650	struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
 651							      NULL, off);
 
 652	return __blkg_prfill_rwstat(sf, pd, &rwstat);
 653}
 654
 655/**
 656 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
 657 * @sf: seq_file to print to
 658 * @v: unused
 659 */
 660int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
 661{
 662	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 663			  blkg_prfill_rwstat_field_recursive,
 664			  (void *)seq_cft(sf)->private,
 665			  offsetof(struct blkcg_gq, stat_bytes), true);
 666	return 0;
 667}
 668EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
 669
 670/**
 671 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
 672 * @sf: seq_file to print to
 673 * @v: unused
 674 */
 675int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
 676{
 677	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 678			  blkg_prfill_rwstat_field_recursive,
 679			  (void *)seq_cft(sf)->private,
 680			  offsetof(struct blkcg_gq, stat_ios), true);
 681	return 0;
 682}
 683EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
 684
 685/**
 686 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
 687 * @blkg: blkg of interest
 688 * @pol: blkcg_policy which contains the blkg_stat
 689 * @off: offset to the blkg_stat in blkg_policy_data or @blkg
 
 690 *
 691 * Collect the blkg_stat specified by @blkg, @pol and @off and all its
 692 * online descendants and their aux counts.  The caller must be holding the
 693 * queue lock for online tests.
 694 *
 695 * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
 696 * at @off bytes into @blkg's blkg_policy_data of the policy.
 697 */
 698u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
 699			    struct blkcg_policy *pol, int off)
 700{
 701	struct blkcg_gq *pos_blkg;
 702	struct cgroup_subsys_state *pos_css;
 703	u64 sum = 0;
 704
 705	lockdep_assert_held(blkg->q->queue_lock);
 706
 707	rcu_read_lock();
 708	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
 709		struct blkg_stat *stat;
 710
 711		if (!pos_blkg->online)
 712			continue;
 713
 714		if (pol)
 715			stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
 716		else
 717			stat = (void *)blkg + off;
 718
 719		sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
 
 720	}
 721	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 722
 723	return sum;
 
 
 724}
 725EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
 726
 727/**
 728 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
 729 * @blkg: blkg of interest
 730 * @pol: blkcg_policy which contains the blkg_rwstat
 731 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
 732 *
 733 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
 734 * online descendants and their aux counts.  The caller must be holding the
 735 * queue lock for online tests.
 
 736 *
 737 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
 738 * is at @off bytes into @blkg's blkg_policy_data of the policy.
 739 */
 740struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
 741					     struct blkcg_policy *pol, int off)
 742{
 743	struct blkcg_gq *pos_blkg;
 744	struct cgroup_subsys_state *pos_css;
 745	struct blkg_rwstat sum = { };
 746	int i;
 747
 748	lockdep_assert_held(blkg->q->queue_lock);
 749
 750	rcu_read_lock();
 751	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
 752		struct blkg_rwstat *rwstat;
 753
 754		if (!pos_blkg->online)
 755			continue;
 756
 757		if (pol)
 758			rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
 759		else
 760			rwstat = (void *)pos_blkg + off;
 761
 762		for (i = 0; i < BLKG_RWSTAT_NR; i++)
 763			atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
 764				percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
 765				&sum.aux_cnt[i]);
 
 
 766	}
 767	rcu_read_unlock();
 768
 769	return sum;
 
 770}
 771EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
 772
 773/**
 774 * blkg_conf_prep - parse and prepare for per-blkg config update
 775 * @blkcg: target block cgroup
 776 * @pol: target policy
 777 * @input: input string
 778 * @ctx: blkg_conf_ctx to be filled
 779 *
 780 * Parse per-blkg config update from @input and initialize @ctx with the
 781 * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
 782 * part of @input following MAJ:MIN.  This function returns with RCU read
 783 * lock and queue lock held and must be paired with blkg_conf_finish().
 784 */
 785int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 786		   char *input, struct blkg_conf_ctx *ctx)
 787	__acquires(rcu) __acquires(disk->queue->queue_lock)
 788{
 789	struct gendisk *disk;
 
 790	struct blkcg_gq *blkg;
 791	struct module *owner;
 792	unsigned int major, minor;
 793	int key_len, part, ret;
 794	char *body;
 795
 796	if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
 797		return -EINVAL;
 798
 799	body = input + key_len;
 800	if (!isspace(*body))
 801		return -EINVAL;
 802	body = skip_spaces(body);
 803
 804	disk = get_gendisk(MKDEV(major, minor), &part);
 805	if (!disk)
 806		return -ENODEV;
 807	if (part) {
 808		owner = disk->fops->owner;
 809		put_disk(disk);
 810		module_put(owner);
 811		return -ENODEV;
 812	}
 813
 814	rcu_read_lock();
 815	spin_lock_irq(disk->queue->queue_lock);
 816
 817	if (blkcg_policy_enabled(disk->queue, pol))
 818		blkg = blkg_lookup_create(blkcg, disk->queue);
 819	else
 820		blkg = ERR_PTR(-EOPNOTSUPP);
 821
 
 822	if (IS_ERR(blkg)) {
 823		ret = PTR_ERR(blkg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 824		rcu_read_unlock();
 825		spin_unlock_irq(disk->queue->queue_lock);
 826		owner = disk->fops->owner;
 827		put_disk(disk);
 828		module_put(owner);
 829		/*
 830		 * If queue was bypassing, we should retry.  Do so after a
 831		 * short msleep().  It isn't strictly necessary but queue
 832		 * can be bypassing for some time and it's always nice to
 833		 * avoid busy looping.
 834		 */
 835		if (ret == -EBUSY) {
 836			msleep(10);
 837			ret = restart_syscall();
 
 
 
 
 
 
 
 
 
 
 
 838		}
 839		return ret;
 840	}
 841
 
 
 
 
 842	ctx->disk = disk;
 843	ctx->blkg = blkg;
 844	ctx->body = body;
 845	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 846}
 847EXPORT_SYMBOL_GPL(blkg_conf_prep);
 848
 849/**
 850 * blkg_conf_finish - finish up per-blkg config update
 851 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 852 *
 853 * Finish up after per-blkg config update.  This function must be paired
 854 * with blkg_conf_prep().
 855 */
 856void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 857	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
 858{
 859	struct module *owner;
 860
 861	spin_unlock_irq(ctx->disk->queue->queue_lock);
 862	rcu_read_unlock();
 863	owner = ctx->disk->fops->owner;
 864	put_disk(ctx->disk);
 865	module_put(owner);
 866}
 867EXPORT_SYMBOL_GPL(blkg_conf_finish);
 868
 869static int blkcg_print_stat(struct seq_file *sf, void *v)
 870{
 871	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
 872	struct blkcg_gq *blkg;
 873
 874	rcu_read_lock();
 875
 876	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 877		const char *dname;
 878		struct blkg_rwstat rwstat;
 879		u64 rbytes, wbytes, rios, wios;
 
 
 
 
 
 
 
 
 
 880
 881		dname = blkg_dev_name(blkg);
 882		if (!dname)
 883			continue;
 
 
 
 
 
 
 
 
 884
 885		spin_lock_irq(blkg->q->queue_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 886
 887		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
 888					offsetof(struct blkcg_gq, stat_bytes));
 889		rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
 890		wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
 891
 892		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
 893					offsetof(struct blkcg_gq, stat_ios));
 894		rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
 895		wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
 896
 897		spin_unlock_irq(blkg->q->queue_lock);
 898
 899		if (rbytes || wbytes || rios || wios)
 900			seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
 901				   dname, rbytes, wbytes, rios, wios);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 902	}
 903
 904	rcu_read_unlock();
 905	return 0;
 906}
 907
 908struct cftype blkcg_files[] = {
 909	{
 910		.name = "stat",
 911		.flags = CFTYPE_NOT_ON_ROOT,
 912		.seq_show = blkcg_print_stat,
 913	},
 914	{ }	/* terminate */
 915};
 916
 917struct cftype blkcg_legacy_files[] = {
 918	{
 919		.name = "reset_stats",
 920		.write_u64 = blkcg_reset_stats,
 921	},
 922	{ }	/* terminate */
 923};
 924
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925/**
 926 * blkcg_css_offline - cgroup css_offline callback
 927 * @css: css of interest
 928 *
 929 * This function is called when @css is about to go away and responsible
 930 * for shooting down all blkgs associated with @css.  blkgs should be
 931 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 932 * inside q lock, this function performs reverse double lock dancing.
 933 *
 934 * This is the blkcg counterpart of ioc_release_fn().
 935 */
 936static void blkcg_css_offline(struct cgroup_subsys_state *css)
 937{
 938	struct blkcg *blkcg = css_to_blkcg(css);
 939
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 940	spin_lock_irq(&blkcg->lock);
 941
 942	while (!hlist_empty(&blkcg->blkg_list)) {
 943		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
 944						struct blkcg_gq, blkcg_node);
 945		struct request_queue *q = blkg->q;
 946
 947		if (spin_trylock(q->queue_lock)) {
 948			blkg_destroy(blkg);
 949			spin_unlock(q->queue_lock);
 950		} else {
 951			spin_unlock_irq(&blkcg->lock);
 952			cpu_relax();
 953			spin_lock_irq(&blkcg->lock);
 954		}
 955	}
 956
 957	spin_unlock_irq(&blkcg->lock);
 958
 959	wb_blkcg_offline(blkcg);
 960}
 961
 962static void blkcg_css_free(struct cgroup_subsys_state *css)
 963{
 964	struct blkcg *blkcg = css_to_blkcg(css);
 965	int i;
 966
 967	mutex_lock(&blkcg_pol_mutex);
 968
 969	list_del(&blkcg->all_blkcgs_node);
 970
 971	for (i = 0; i < BLKCG_MAX_POLS; i++)
 972		if (blkcg->cpd[i])
 973			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
 974
 975	mutex_unlock(&blkcg_pol_mutex);
 976
 977	kfree(blkcg);
 978}
 979
 980static struct cgroup_subsys_state *
 981blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
 982{
 983	struct blkcg *blkcg;
 984	struct cgroup_subsys_state *ret;
 985	int i;
 986
 987	mutex_lock(&blkcg_pol_mutex);
 988
 989	if (!parent_css) {
 990		blkcg = &blkcg_root;
 991	} else {
 992		blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
 993		if (!blkcg) {
 994			ret = ERR_PTR(-ENOMEM);
 995			goto free_blkcg;
 996		}
 997	}
 998
 999	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1000		struct blkcg_policy *pol = blkcg_policy[i];
1001		struct blkcg_policy_data *cpd;
1002
1003		/*
1004		 * If the policy hasn't been attached yet, wait for it
1005		 * to be attached before doing anything else. Otherwise,
1006		 * check if the policy requires any specific per-cgroup
1007		 * data: if it does, allocate and initialize it.
1008		 */
1009		if (!pol || !pol->cpd_alloc_fn)
1010			continue;
1011
1012		cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1013		if (!cpd) {
1014			ret = ERR_PTR(-ENOMEM);
1015			goto free_pd_blkcg;
1016		}
1017		blkcg->cpd[i] = cpd;
1018		cpd->blkcg = blkcg;
1019		cpd->plid = i;
1020		if (pol->cpd_init_fn)
1021			pol->cpd_init_fn(cpd);
1022	}
1023
1024	spin_lock_init(&blkcg->lock);
1025	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
1026	INIT_HLIST_HEAD(&blkcg->blkg_list);
1027#ifdef CONFIG_CGROUP_WRITEBACK
1028	INIT_LIST_HEAD(&blkcg->cgwb_list);
 
1029#endif
1030	list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1031
1032	mutex_unlock(&blkcg_pol_mutex);
1033	return &blkcg->css;
1034
1035free_pd_blkcg:
1036	for (i--; i >= 0; i--)
1037		if (blkcg->cpd[i])
1038			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1039free_blkcg:
1040	kfree(blkcg);
 
 
1041	mutex_unlock(&blkcg_pol_mutex);
1042	return ret;
1043}
1044
1045/**
1046 * blkcg_init_queue - initialize blkcg part of request queue
1047 * @q: request_queue to initialize
1048 *
1049 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1050 * part of new request_queue @q.
1051 *
1052 * RETURNS:
1053 * 0 on success, -errno on failure.
1054 */
1055int blkcg_init_queue(struct request_queue *q)
1056{
1057	struct blkcg_gq *new_blkg, *blkg;
1058	bool preloaded;
1059	int ret;
1060
1061	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1062	if (!new_blkg)
1063		return -ENOMEM;
1064
1065	preloaded = !radix_tree_preload(GFP_KERNEL);
1066
1067	/*
1068	 * Make sure the root blkg exists and count the existing blkgs.  As
1069	 * @q is bypassing at this point, blkg_lookup_create() can't be
1070	 * used.  Open code insertion.
1071	 */
1072	rcu_read_lock();
1073	spin_lock_irq(q->queue_lock);
1074	blkg = blkg_create(&blkcg_root, q, new_blkg);
1075	spin_unlock_irq(q->queue_lock);
 
 
 
1076	rcu_read_unlock();
1077
1078	if (preloaded)
1079		radix_tree_preload_end();
1080
1081	if (IS_ERR(blkg)) {
1082		blkg_free(new_blkg);
1083		return PTR_ERR(blkg);
1084	}
1085
1086	q->root_blkg = blkg;
1087	q->root_rl.blkg = blkg;
1088
1089	ret = blk_throtl_init(q);
1090	if (ret) {
1091		spin_lock_irq(q->queue_lock);
1092		blkg_destroy_all(q);
1093		spin_unlock_irq(q->queue_lock);
1094	}
 
1095	return ret;
 
 
 
 
 
 
1096}
1097
1098/**
1099 * blkcg_drain_queue - drain blkcg part of request_queue
1100 * @q: request_queue to drain
1101 *
1102 * Called from blk_drain_queue().  Responsible for draining blkcg part.
1103 */
1104void blkcg_drain_queue(struct request_queue *q)
1105{
1106	lockdep_assert_held(q->queue_lock);
1107
1108	/*
1109	 * @q could be exiting and already have destroyed all blkgs as
1110	 * indicated by NULL root_blkg.  If so, don't confuse policies.
1111	 */
1112	if (!q->root_blkg)
1113		return;
1114
1115	blk_throtl_drain(q);
1116}
1117
1118/**
1119 * blkcg_exit_queue - exit and release blkcg part of request_queue
1120 * @q: request_queue being released
1121 *
1122 * Called from blk_release_queue().  Responsible for exiting blkcg part.
1123 */
1124void blkcg_exit_queue(struct request_queue *q)
1125{
1126	spin_lock_irq(q->queue_lock);
1127	blkg_destroy_all(q);
1128	spin_unlock_irq(q->queue_lock);
1129
1130	blk_throtl_exit(q);
1131}
1132
1133/*
1134 * We cannot support shared io contexts, as we have no mean to support
1135 * two tasks with the same ioc in two different groups without major rework
1136 * of the main cic data structures.  For now we allow a task to change
1137 * its cgroup only if it's the only owner of its ioc.
1138 */
1139static int blkcg_can_attach(struct cgroup_taskset *tset)
1140{
1141	struct task_struct *task;
1142	struct cgroup_subsys_state *dst_css;
1143	struct io_context *ioc;
1144	int ret = 0;
1145
1146	/* task_lock() is needed to avoid races with exit_io_context() */
1147	cgroup_taskset_for_each(task, dst_css, tset) {
1148		task_lock(task);
1149		ioc = task->io_context;
1150		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1151			ret = -EINVAL;
1152		task_unlock(task);
1153		if (ret)
1154			break;
1155	}
1156	return ret;
1157}
1158
1159static void blkcg_bind(struct cgroup_subsys_state *root_css)
1160{
1161	int i;
1162
1163	mutex_lock(&blkcg_pol_mutex);
1164
1165	for (i = 0; i < BLKCG_MAX_POLS; i++) {
1166		struct blkcg_policy *pol = blkcg_policy[i];
1167		struct blkcg *blkcg;
1168
1169		if (!pol || !pol->cpd_bind_fn)
1170			continue;
1171
1172		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1173			if (blkcg->cpd[pol->plid])
1174				pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1175	}
1176	mutex_unlock(&blkcg_pol_mutex);
1177}
1178
 
 
 
 
 
 
 
1179struct cgroup_subsys io_cgrp_subsys = {
1180	.css_alloc = blkcg_css_alloc,
1181	.css_offline = blkcg_css_offline,
1182	.css_free = blkcg_css_free,
1183	.can_attach = blkcg_can_attach,
1184	.bind = blkcg_bind,
1185	.dfl_cftypes = blkcg_files,
1186	.legacy_cftypes = blkcg_legacy_files,
1187	.legacy_name = "blkio",
 
1188#ifdef CONFIG_MEMCG
1189	/*
1190	 * This ensures that, if available, memcg is automatically enabled
1191	 * together on the default hierarchy so that the owner cgroup can
1192	 * be retrieved from writeback pages.
1193	 */
1194	.depends_on = 1 << memory_cgrp_id,
1195#endif
1196};
1197EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1198
1199/**
1200 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1201 * @q: request_queue of interest
1202 * @pol: blkcg policy to activate
1203 *
1204 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1205 * bypass mode to populate its blkgs with policy_data for @pol.
1206 *
1207 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1208 * from IO path.  Update of each blkg is protected by both queue and blkcg
1209 * locks so that holding either lock and testing blkcg_policy_enabled() is
1210 * always enough for dereferencing policy data.
1211 *
1212 * The caller is responsible for synchronizing [de]activations and policy
1213 * [un]registerations.  Returns 0 on success, -errno on failure.
1214 */
1215int blkcg_activate_policy(struct request_queue *q,
1216			  const struct blkcg_policy *pol)
1217{
1218	struct blkg_policy_data *pd_prealloc = NULL;
1219	struct blkcg_gq *blkg;
1220	int ret;
1221
1222	if (blkcg_policy_enabled(q, pol))
1223		return 0;
1224
1225	blk_queue_bypass_start(q);
1226pd_prealloc:
1227	if (!pd_prealloc) {
1228		pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1229		if (!pd_prealloc) {
1230			ret = -ENOMEM;
1231			goto out_bypass_end;
1232		}
1233	}
1234
1235	spin_lock_irq(q->queue_lock);
1236
1237	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1238		struct blkg_policy_data *pd;
1239
1240		if (blkg->pd[pol->plid])
1241			continue;
1242
1243		pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
1244		if (!pd)
1245			swap(pd, pd_prealloc);
 
 
 
 
 
 
1246		if (!pd) {
1247			spin_unlock_irq(q->queue_lock);
1248			goto pd_prealloc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1249		}
1250
1251		blkg->pd[pol->plid] = pd;
1252		pd->blkg = blkg;
1253		pd->plid = pol->plid;
1254		if (pol->pd_init_fn)
1255			pol->pd_init_fn(pd);
1256	}
1257
 
 
 
 
 
1258	__set_bit(pol->plid, q->blkcg_pols);
1259	ret = 0;
1260
1261	spin_unlock_irq(q->queue_lock);
1262out_bypass_end:
1263	blk_queue_bypass_end(q);
 
 
 
1264	if (pd_prealloc)
1265		pol->pd_free_fn(pd_prealloc);
1266	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
1267}
1268EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1269
1270/**
1271 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1272 * @q: request_queue of interest
1273 * @pol: blkcg policy to deactivate
1274 *
1275 * Deactivate @pol on @q.  Follows the same synchronization rules as
1276 * blkcg_activate_policy().
1277 */
1278void blkcg_deactivate_policy(struct request_queue *q,
1279			     const struct blkcg_policy *pol)
1280{
1281	struct blkcg_gq *blkg;
1282
1283	if (!blkcg_policy_enabled(q, pol))
1284		return;
1285
1286	blk_queue_bypass_start(q);
1287	spin_lock_irq(q->queue_lock);
 
 
1288
1289	__clear_bit(pol->plid, q->blkcg_pols);
1290
1291	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1292		/* grab blkcg lock too while removing @pd from @blkg */
1293		spin_lock(&blkg->blkcg->lock);
1294
1295		if (blkg->pd[pol->plid]) {
1296			if (pol->pd_offline_fn)
1297				pol->pd_offline_fn(blkg->pd[pol->plid]);
1298			pol->pd_free_fn(blkg->pd[pol->plid]);
1299			blkg->pd[pol->plid] = NULL;
1300		}
1301
1302		spin_unlock(&blkg->blkcg->lock);
1303	}
1304
1305	spin_unlock_irq(q->queue_lock);
1306	blk_queue_bypass_end(q);
 
 
1307}
1308EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1309
1310/**
1311 * blkcg_policy_register - register a blkcg policy
1312 * @pol: blkcg policy to register
1313 *
1314 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1315 * successful registration.  Returns 0 on success and -errno on failure.
1316 */
1317int blkcg_policy_register(struct blkcg_policy *pol)
1318{
1319	struct blkcg *blkcg;
1320	int i, ret;
1321
1322	mutex_lock(&blkcg_pol_register_mutex);
1323	mutex_lock(&blkcg_pol_mutex);
1324
1325	/* find an empty slot */
1326	ret = -ENOSPC;
1327	for (i = 0; i < BLKCG_MAX_POLS; i++)
1328		if (!blkcg_policy[i])
1329			break;
1330	if (i >= BLKCG_MAX_POLS)
 
 
 
 
 
 
 
1331		goto err_unlock;
1332
1333	/* register @pol */
1334	pol->plid = i;
1335	blkcg_policy[pol->plid] = pol;
1336
1337	/* allocate and install cpd's */
1338	if (pol->cpd_alloc_fn) {
1339		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1340			struct blkcg_policy_data *cpd;
1341
1342			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1343			if (!cpd) {
1344				mutex_unlock(&blkcg_pol_mutex);
1345				goto err_free_cpds;
1346			}
1347
1348			blkcg->cpd[pol->plid] = cpd;
1349			cpd->blkcg = blkcg;
1350			cpd->plid = pol->plid;
1351			pol->cpd_init_fn(cpd);
 
1352		}
1353	}
1354
1355	mutex_unlock(&blkcg_pol_mutex);
1356
1357	/* everything is in place, add intf files for the new policy */
1358	if (pol->dfl_cftypes)
1359		WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1360					       pol->dfl_cftypes));
1361	if (pol->legacy_cftypes)
1362		WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1363						  pol->legacy_cftypes));
1364	mutex_unlock(&blkcg_pol_register_mutex);
1365	return 0;
1366
1367err_free_cpds:
1368	if (pol->cpd_alloc_fn) {
1369		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1370			if (blkcg->cpd[pol->plid]) {
1371				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1372				blkcg->cpd[pol->plid] = NULL;
1373			}
1374		}
1375	}
1376	blkcg_policy[pol->plid] = NULL;
1377err_unlock:
1378	mutex_unlock(&blkcg_pol_mutex);
1379	mutex_unlock(&blkcg_pol_register_mutex);
1380	return ret;
1381}
1382EXPORT_SYMBOL_GPL(blkcg_policy_register);
1383
1384/**
1385 * blkcg_policy_unregister - unregister a blkcg policy
1386 * @pol: blkcg policy to unregister
1387 *
1388 * Undo blkcg_policy_register(@pol).  Might sleep.
1389 */
1390void blkcg_policy_unregister(struct blkcg_policy *pol)
1391{
1392	struct blkcg *blkcg;
1393
1394	mutex_lock(&blkcg_pol_register_mutex);
1395
1396	if (WARN_ON(blkcg_policy[pol->plid] != pol))
1397		goto out_unlock;
1398
1399	/* kill the intf files first */
1400	if (pol->dfl_cftypes)
1401		cgroup_rm_cftypes(pol->dfl_cftypes);
1402	if (pol->legacy_cftypes)
1403		cgroup_rm_cftypes(pol->legacy_cftypes);
1404
1405	/* remove cpds and unregister */
1406	mutex_lock(&blkcg_pol_mutex);
1407
1408	if (pol->cpd_alloc_fn) {
1409		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1410			if (blkcg->cpd[pol->plid]) {
1411				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1412				blkcg->cpd[pol->plid] = NULL;
1413			}
1414		}
1415	}
1416	blkcg_policy[pol->plid] = NULL;
1417
1418	mutex_unlock(&blkcg_pol_mutex);
1419out_unlock:
1420	mutex_unlock(&blkcg_pol_register_mutex);
1421}
1422EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Common Block IO controller cgroup interface
   4 *
   5 * Based on ideas and code from CFQ, CFS and BFQ:
   6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   7 *
   8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   9 *		      Paolo Valente <paolo.valente@unimore.it>
  10 *
  11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  12 * 	              Nauman Rafique <nauman@google.com>
  13 *
  14 * For policy-specific per-blkcg data:
  15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
  16 *                    Arianna Avanzini <avanzini.arianna@gmail.com>
  17 */
  18#include <linux/ioprio.h>
  19#include <linux/kdev_t.h>
  20#include <linux/module.h>
  21#include <linux/sched/signal.h>
  22#include <linux/err.h>
  23#include <linux/blkdev.h>
  24#include <linux/backing-dev.h>
  25#include <linux/slab.h>
  26#include <linux/genhd.h>
  27#include <linux/delay.h>
  28#include <linux/atomic.h>
  29#include <linux/ctype.h>
  30#include <linux/blk-cgroup.h>
  31#include <linux/tracehook.h>
  32#include <linux/psi.h>
  33#include "blk.h"
  34
  35#define MAX_KEY_LEN 100
  36
  37/*
  38 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
  39 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
  40 * policy [un]register operations including cgroup file additions /
  41 * removals.  Putting cgroup file registration outside blkcg_pol_mutex
  42 * allows grabbing it from cgroup callbacks.
  43 */
  44static DEFINE_MUTEX(blkcg_pol_register_mutex);
  45static DEFINE_MUTEX(blkcg_pol_mutex);
  46
  47struct blkcg blkcg_root;
  48EXPORT_SYMBOL_GPL(blkcg_root);
  49
  50struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
  51EXPORT_SYMBOL_GPL(blkcg_root_css);
  52
  53static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  54
  55static LIST_HEAD(all_blkcgs);		/* protected by blkcg_pol_mutex */
  56
  57bool blkcg_debug_stats = false;
  58static struct workqueue_struct *blkcg_punt_bio_wq;
  59
  60static bool blkcg_policy_enabled(struct request_queue *q,
  61				 const struct blkcg_policy *pol)
  62{
  63	return pol && test_bit(pol->plid, q->blkcg_pols);
  64}
  65
  66/**
  67 * blkg_free - free a blkg
  68 * @blkg: blkg to free
  69 *
  70 * Free @blkg which may be partially allocated.
  71 */
  72static void blkg_free(struct blkcg_gq *blkg)
  73{
  74	int i;
  75
  76	if (!blkg)
  77		return;
  78
  79	for (i = 0; i < BLKCG_MAX_POLS; i++)
  80		if (blkg->pd[i])
  81			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
  82
 
 
 
  83	blkg_rwstat_exit(&blkg->stat_ios);
  84	blkg_rwstat_exit(&blkg->stat_bytes);
  85	percpu_ref_exit(&blkg->refcnt);
  86	kfree(blkg);
  87}
  88
  89static void __blkg_release(struct rcu_head *rcu)
  90{
  91	struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
  92
  93	WARN_ON(!bio_list_empty(&blkg->async_bios));
  94
  95	/* release the blkcg and parent blkg refs this blkg has been holding */
  96	css_put(&blkg->blkcg->css);
  97	if (blkg->parent)
  98		blkg_put(blkg->parent);
  99
 100	wb_congested_put(blkg->wb_congested);
 101
 102	blkg_free(blkg);
 103}
 104
 105/*
 106 * A group is RCU protected, but having an rcu lock does not mean that one
 107 * can access all the fields of blkg and assume these are valid.  For
 108 * example, don't try to follow throtl_data and request queue links.
 109 *
 110 * Having a reference to blkg under an rcu allows accesses to only values
 111 * local to groups like group stats and group rate limits.
 112 */
 113static void blkg_release(struct percpu_ref *ref)
 114{
 115	struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
 116
 117	call_rcu(&blkg->rcu_head, __blkg_release);
 118}
 119
 120static void blkg_async_bio_workfn(struct work_struct *work)
 121{
 122	struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
 123					     async_bio_work);
 124	struct bio_list bios = BIO_EMPTY_LIST;
 125	struct bio *bio;
 126
 127	/* as long as there are pending bios, @blkg can't go away */
 128	spin_lock_bh(&blkg->async_bio_lock);
 129	bio_list_merge(&bios, &blkg->async_bios);
 130	bio_list_init(&blkg->async_bios);
 131	spin_unlock_bh(&blkg->async_bio_lock);
 132
 133	while ((bio = bio_list_pop(&bios)))
 134		submit_bio(bio);
 135}
 136
 137/**
 138 * blkg_alloc - allocate a blkg
 139 * @blkcg: block cgroup the new blkg is associated with
 140 * @q: request_queue the new blkg is associated with
 141 * @gfp_mask: allocation mask to use
 142 *
 143 * Allocate a new blkg assocating @blkcg and @q.
 144 */
 145static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
 146				   gfp_t gfp_mask)
 147{
 148	struct blkcg_gq *blkg;
 149	int i;
 150
 151	/* alloc and init base part */
 152	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
 153	if (!blkg)
 154		return NULL;
 155
 156	if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
 157		goto err_free;
 158
 159	if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
 160	    blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
 161		goto err_free;
 162
 163	blkg->q = q;
 164	INIT_LIST_HEAD(&blkg->q_node);
 165	spin_lock_init(&blkg->async_bio_lock);
 166	bio_list_init(&blkg->async_bios);
 167	INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
 168	blkg->blkcg = blkcg;
 
 
 
 
 
 
 
 
 169
 170	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 171		struct blkcg_policy *pol = blkcg_policy[i];
 172		struct blkg_policy_data *pd;
 173
 174		if (!blkcg_policy_enabled(q, pol))
 175			continue;
 176
 177		/* alloc per-policy data and attach it to blkg */
 178		pd = pol->pd_alloc_fn(gfp_mask, q, blkcg);
 179		if (!pd)
 180			goto err_free;
 181
 182		blkg->pd[i] = pd;
 183		pd->blkg = blkg;
 184		pd->plid = i;
 185	}
 186
 187	return blkg;
 188
 189err_free:
 190	blkg_free(blkg);
 191	return NULL;
 192}
 193
 194struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
 195				      struct request_queue *q, bool update_hint)
 196{
 197	struct blkcg_gq *blkg;
 198
 199	/*
 200	 * Hint didn't match.  Look up from the radix tree.  Note that the
 201	 * hint can only be updated under queue_lock as otherwise @blkg
 202	 * could have already been removed from blkg_tree.  The caller is
 203	 * responsible for grabbing queue_lock if @update_hint.
 204	 */
 205	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
 206	if (blkg && blkg->q == q) {
 207		if (update_hint) {
 208			lockdep_assert_held(&q->queue_lock);
 209			rcu_assign_pointer(blkcg->blkg_hint, blkg);
 210		}
 211		return blkg;
 212	}
 213
 214	return NULL;
 215}
 216EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
 217
 218/*
 219 * If @new_blkg is %NULL, this function tries to allocate a new one as
 220 * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
 221 */
 222static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 223				    struct request_queue *q,
 224				    struct blkcg_gq *new_blkg)
 225{
 226	struct blkcg_gq *blkg;
 227	struct bdi_writeback_congested *wb_congested;
 228	int i, ret;
 229
 230	WARN_ON_ONCE(!rcu_read_lock_held());
 231	lockdep_assert_held(&q->queue_lock);
 232
 233	/* request_queue is dying, do not create/recreate a blkg */
 234	if (blk_queue_dying(q)) {
 235		ret = -ENODEV;
 236		goto err_free_blkg;
 237	}
 238
 239	/* blkg holds a reference to blkcg */
 240	if (!css_tryget_online(&blkcg->css)) {
 241		ret = -ENODEV;
 242		goto err_free_blkg;
 243	}
 244
 245	wb_congested = wb_congested_get_create(q->backing_dev_info,
 246					       blkcg->css.id,
 247					       GFP_NOWAIT | __GFP_NOWARN);
 248	if (!wb_congested) {
 249		ret = -ENOMEM;
 250		goto err_put_css;
 251	}
 252
 253	/* allocate */
 254	if (!new_blkg) {
 255		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
 256		if (unlikely(!new_blkg)) {
 257			ret = -ENOMEM;
 258			goto err_put_congested;
 259		}
 260	}
 261	blkg = new_blkg;
 262	blkg->wb_congested = wb_congested;
 263
 264	/* link parent */
 265	if (blkcg_parent(blkcg)) {
 266		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
 267		if (WARN_ON_ONCE(!blkg->parent)) {
 268			ret = -ENODEV;
 269			goto err_put_congested;
 270		}
 271		blkg_get(blkg->parent);
 272	}
 273
 274	/* invoke per-policy init */
 275	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 276		struct blkcg_policy *pol = blkcg_policy[i];
 277
 278		if (blkg->pd[i] && pol->pd_init_fn)
 279			pol->pd_init_fn(blkg->pd[i]);
 280	}
 281
 282	/* insert */
 283	spin_lock(&blkcg->lock);
 284	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
 285	if (likely(!ret)) {
 286		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
 287		list_add(&blkg->q_node, &q->blkg_list);
 288
 289		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 290			struct blkcg_policy *pol = blkcg_policy[i];
 291
 292			if (blkg->pd[i] && pol->pd_online_fn)
 293				pol->pd_online_fn(blkg->pd[i]);
 294		}
 295	}
 296	blkg->online = true;
 297	spin_unlock(&blkcg->lock);
 298
 299	if (!ret)
 300		return blkg;
 301
 302	/* @blkg failed fully initialized, use the usual release path */
 303	blkg_put(blkg);
 304	return ERR_PTR(ret);
 305
 306err_put_congested:
 307	wb_congested_put(wb_congested);
 308err_put_css:
 309	css_put(&blkcg->css);
 310err_free_blkg:
 311	blkg_free(new_blkg);
 312	return ERR_PTR(ret);
 313}
 314
 315/**
 316 * __blkg_lookup_create - lookup blkg, try to create one if not there
 317 * @blkcg: blkcg of interest
 318 * @q: request_queue of interest
 319 *
 320 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
 321 * create one.  blkg creation is performed recursively from blkcg_root such
 322 * that all non-root blkg's have access to the parent blkg.  This function
 323 * should be called under RCU read lock and @q->queue_lock.
 324 *
 325 * Returns the blkg or the closest blkg if blkg_create() fails as it walks
 326 * down from root.
 
 327 */
 328struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
 329				      struct request_queue *q)
 330{
 331	struct blkcg_gq *blkg;
 332
 333	WARN_ON_ONCE(!rcu_read_lock_held());
 334	lockdep_assert_held(&q->queue_lock);
 
 
 
 
 
 
 
 335
 336	blkg = __blkg_lookup(blkcg, q, true);
 337	if (blkg)
 338		return blkg;
 339
 340	/*
 341	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
 342	 * non-root blkgs have access to their parents.  Returns the closest
 343	 * blkg to the intended blkg should blkg_create() fail.
 344	 */
 345	while (true) {
 346		struct blkcg *pos = blkcg;
 347		struct blkcg *parent = blkcg_parent(blkcg);
 348		struct blkcg_gq *ret_blkg = q->root_blkg;
 349
 350		while (parent) {
 351			blkg = __blkg_lookup(parent, q, false);
 352			if (blkg) {
 353				/* remember closest blkg */
 354				ret_blkg = blkg;
 355				break;
 356			}
 357			pos = parent;
 358			parent = blkcg_parent(parent);
 359		}
 360
 361		blkg = blkg_create(pos, q, NULL);
 362		if (IS_ERR(blkg))
 363			return ret_blkg;
 364		if (pos == blkcg)
 365			return blkg;
 366	}
 367}
 368
 369/**
 370 * blkg_lookup_create - find or create a blkg
 371 * @blkcg: target block cgroup
 372 * @q: target request_queue
 373 *
 374 * This looks up or creates the blkg representing the unique pair
 375 * of the blkcg and the request_queue.
 376 */
 377struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 378				    struct request_queue *q)
 379{
 380	struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
 381
 382	if (unlikely(!blkg)) {
 383		unsigned long flags;
 384
 385		spin_lock_irqsave(&q->queue_lock, flags);
 386		blkg = __blkg_lookup_create(blkcg, q);
 387		spin_unlock_irqrestore(&q->queue_lock, flags);
 388	}
 389
 390	return blkg;
 391}
 392
 393static void blkg_destroy(struct blkcg_gq *blkg)
 394{
 395	struct blkcg *blkcg = blkg->blkcg;
 396	struct blkcg_gq *parent = blkg->parent;
 397	int i;
 398
 399	lockdep_assert_held(&blkg->q->queue_lock);
 400	lockdep_assert_held(&blkcg->lock);
 401
 402	/* Something wrong if we are trying to remove same group twice */
 403	WARN_ON_ONCE(list_empty(&blkg->q_node));
 404	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 405
 406	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 407		struct blkcg_policy *pol = blkcg_policy[i];
 408
 409		if (blkg->pd[i] && pol->pd_offline_fn)
 410			pol->pd_offline_fn(blkg->pd[i]);
 411	}
 412
 413	if (parent) {
 414		blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
 415		blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
 416	}
 417
 418	blkg->online = false;
 419
 420	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
 421	list_del_init(&blkg->q_node);
 422	hlist_del_init_rcu(&blkg->blkcg_node);
 423
 424	/*
 425	 * Both setting lookup hint to and clearing it from @blkg are done
 426	 * under queue_lock.  If it's not pointing to @blkg now, it never
 427	 * will.  Hint assignment itself can race safely.
 428	 */
 429	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
 430		rcu_assign_pointer(blkcg->blkg_hint, NULL);
 431
 432	/*
 433	 * Put the reference taken at the time of creation so that when all
 434	 * queues are gone, group can be destroyed.
 435	 */
 436	percpu_ref_kill(&blkg->refcnt);
 437}
 438
 439/**
 440 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 441 * @q: request_queue of interest
 442 *
 443 * Destroy all blkgs associated with @q.
 444 */
 445static void blkg_destroy_all(struct request_queue *q)
 446{
 447	struct blkcg_gq *blkg, *n;
 448
 449	spin_lock_irq(&q->queue_lock);
 
 450	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 451		struct blkcg *blkcg = blkg->blkcg;
 452
 453		spin_lock(&blkcg->lock);
 454		blkg_destroy(blkg);
 455		spin_unlock(&blkcg->lock);
 456	}
 457
 458	q->root_blkg = NULL;
 459	spin_unlock_irq(&q->queue_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 460}
 461
 462static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 463			     struct cftype *cftype, u64 val)
 464{
 465	struct blkcg *blkcg = css_to_blkcg(css);
 466	struct blkcg_gq *blkg;
 467	int i;
 468
 469	mutex_lock(&blkcg_pol_mutex);
 470	spin_lock_irq(&blkcg->lock);
 471
 472	/*
 473	 * Note that stat reset is racy - it doesn't synchronize against
 474	 * stat updates.  This is a debug feature which shouldn't exist
 475	 * anyway.  If you get hit by a race, retry.
 476	 */
 477	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
 478		blkg_rwstat_reset(&blkg->stat_bytes);
 479		blkg_rwstat_reset(&blkg->stat_ios);
 480
 481		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 482			struct blkcg_policy *pol = blkcg_policy[i];
 483
 484			if (blkg->pd[i] && pol->pd_reset_stats_fn)
 485				pol->pd_reset_stats_fn(blkg->pd[i]);
 486		}
 487	}
 488
 489	spin_unlock_irq(&blkcg->lock);
 490	mutex_unlock(&blkcg_pol_mutex);
 491	return 0;
 492}
 493
 494const char *blkg_dev_name(struct blkcg_gq *blkg)
 495{
 496	/* some drivers (floppy) instantiate a queue w/o disk registered */
 497	if (blkg->q->backing_dev_info->dev)
 498		return dev_name(blkg->q->backing_dev_info->dev);
 499	return NULL;
 500}
 
 501
 502/**
 503 * blkcg_print_blkgs - helper for printing per-blkg data
 504 * @sf: seq_file to print to
 505 * @blkcg: blkcg of interest
 506 * @prfill: fill function to print out a blkg
 507 * @pol: policy in question
 508 * @data: data to be passed to @prfill
 509 * @show_total: to print out sum of prfill return values or not
 510 *
 511 * This function invokes @prfill on each blkg of @blkcg if pd for the
 512 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 513 * policy data and @data and the matching queue lock held.  If @show_total
 514 * is %true, the sum of the return values from @prfill is printed with
 515 * "Total" label at the end.
 516 *
 517 * This is to be used to construct print functions for
 518 * cftype->read_seq_string method.
 519 */
 520void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 521		       u64 (*prfill)(struct seq_file *,
 522				     struct blkg_policy_data *, int),
 523		       const struct blkcg_policy *pol, int data,
 524		       bool show_total)
 525{
 526	struct blkcg_gq *blkg;
 527	u64 total = 0;
 528
 529	rcu_read_lock();
 530	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 531		spin_lock_irq(&blkg->q->queue_lock);
 532		if (blkcg_policy_enabled(blkg->q, pol))
 533			total += prfill(sf, blkg->pd[pol->plid], data);
 534		spin_unlock_irq(&blkg->q->queue_lock);
 535	}
 536	rcu_read_unlock();
 537
 538	if (show_total)
 539		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
 540}
 541EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
 542
 543/**
 544 * __blkg_prfill_u64 - prfill helper for a single u64 value
 545 * @sf: seq_file to print to
 546 * @pd: policy private data of interest
 547 * @v: value to print
 548 *
 549 * Print @v to @sf for the device assocaited with @pd.
 550 */
 551u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
 552{
 553	const char *dname = blkg_dev_name(pd->blkg);
 554
 555	if (!dname)
 556		return 0;
 557
 558	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
 559	return v;
 560}
 561EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
 562
 563/**
 564 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 565 * @sf: seq_file to print to
 566 * @pd: policy private data of interest
 567 * @rwstat: rwstat to print
 568 *
 569 * Print @rwstat to @sf for the device assocaited with @pd.
 570 */
 571u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 572			 const struct blkg_rwstat_sample *rwstat)
 573{
 574	static const char *rwstr[] = {
 575		[BLKG_RWSTAT_READ]	= "Read",
 576		[BLKG_RWSTAT_WRITE]	= "Write",
 577		[BLKG_RWSTAT_SYNC]	= "Sync",
 578		[BLKG_RWSTAT_ASYNC]	= "Async",
 579		[BLKG_RWSTAT_DISCARD]	= "Discard",
 580	};
 581	const char *dname = blkg_dev_name(pd->blkg);
 582	u64 v;
 583	int i;
 584
 585	if (!dname)
 586		return 0;
 587
 588	for (i = 0; i < BLKG_RWSTAT_NR; i++)
 589		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
 590			   rwstat->cnt[i]);
 591
 592	v = rwstat->cnt[BLKG_RWSTAT_READ] +
 593		rwstat->cnt[BLKG_RWSTAT_WRITE] +
 594		rwstat->cnt[BLKG_RWSTAT_DISCARD];
 595	seq_printf(sf, "%s Total %llu\n", dname, v);
 596	return v;
 597}
 598EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
 599
 600/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 601 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 602 * @sf: seq_file to print to
 603 * @pd: policy private data of interest
 604 * @off: offset to the blkg_rwstat in @pd
 605 *
 606 * prfill callback for printing a blkg_rwstat.
 607 */
 608u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 609		       int off)
 610{
 611	struct blkg_rwstat_sample rwstat = { };
 612
 613	blkg_rwstat_read((void *)pd + off, &rwstat);
 614	return __blkg_prfill_rwstat(sf, pd, &rwstat);
 615}
 616EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
 617
 618static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
 619				    struct blkg_policy_data *pd, int off)
 620{
 621	struct blkg_rwstat_sample rwstat = { };
 622
 623	blkg_rwstat_read((void *)pd->blkg + off, &rwstat);
 624	return __blkg_prfill_rwstat(sf, pd, &rwstat);
 625}
 626
 627/**
 628 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
 629 * @sf: seq_file to print to
 630 * @v: unused
 631 *
 632 * To be used as cftype->seq_show to print blkg->stat_bytes.
 633 * cftype->private must be set to the blkcg_policy.
 634 */
 635int blkg_print_stat_bytes(struct seq_file *sf, void *v)
 636{
 637	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 638			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
 639			  offsetof(struct blkcg_gq, stat_bytes), true);
 640	return 0;
 641}
 642EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
 643
 644/**
 645 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
 646 * @sf: seq_file to print to
 647 * @v: unused
 648 *
 649 * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
 650 * must be set to the blkcg_policy.
 651 */
 652int blkg_print_stat_ios(struct seq_file *sf, void *v)
 653{
 654	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 655			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
 656			  offsetof(struct blkcg_gq, stat_ios), true);
 657	return 0;
 658}
 659EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
 660
 661static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
 662					      struct blkg_policy_data *pd,
 663					      int off)
 664{
 665	struct blkg_rwstat_sample rwstat;
 666
 667	blkg_rwstat_recursive_sum(pd->blkg, NULL, off, &rwstat);
 668	return __blkg_prfill_rwstat(sf, pd, &rwstat);
 669}
 670
 671/**
 672 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
 673 * @sf: seq_file to print to
 674 * @v: unused
 675 */
 676int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
 677{
 678	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 679			  blkg_prfill_rwstat_field_recursive,
 680			  (void *)seq_cft(sf)->private,
 681			  offsetof(struct blkcg_gq, stat_bytes), true);
 682	return 0;
 683}
 684EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
 685
 686/**
 687 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
 688 * @sf: seq_file to print to
 689 * @v: unused
 690 */
 691int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
 692{
 693	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 694			  blkg_prfill_rwstat_field_recursive,
 695			  (void *)seq_cft(sf)->private,
 696			  offsetof(struct blkcg_gq, stat_ios), true);
 697	return 0;
 698}
 699EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
 700
 701/**
 702 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
 703 * @blkg: blkg of interest
 704 * @pol: blkcg_policy which contains the blkg_rwstat
 705 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
 706 * @sum: blkg_rwstat_sample structure containing the results
 707 *
 708 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
 709 * online descendants and their aux counts.  The caller must be holding the
 710 * queue lock for online tests.
 711 *
 712 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
 713 * is at @off bytes into @blkg's blkg_policy_data of the policy.
 714 */
 715void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
 716		int off, struct blkg_rwstat_sample *sum)
 717{
 718	struct blkcg_gq *pos_blkg;
 719	struct cgroup_subsys_state *pos_css;
 720	unsigned int i;
 721
 722	lockdep_assert_held(&blkg->q->queue_lock);
 723
 724	rcu_read_lock();
 725	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
 726		struct blkg_rwstat *rwstat;
 727
 728		if (!pos_blkg->online)
 729			continue;
 730
 731		if (pol)
 732			rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
 733		else
 734			rwstat = (void *)pos_blkg + off;
 735
 736		for (i = 0; i < BLKG_RWSTAT_NR; i++)
 737			sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
 738	}
 739	rcu_read_unlock();
 740}
 741EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
 742
 743/* Performs queue bypass and policy enabled checks then looks up blkg. */
 744static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
 745					  const struct blkcg_policy *pol,
 746					  struct request_queue *q)
 747{
 748	WARN_ON_ONCE(!rcu_read_lock_held());
 749	lockdep_assert_held(&q->queue_lock);
 750
 751	if (!blkcg_policy_enabled(q, pol))
 752		return ERR_PTR(-EOPNOTSUPP);
 753	return __blkg_lookup(blkcg, q, true /* update_hint */);
 754}
 
 755
 756/**
 757 * blkg_conf_prep - parse and prepare for per-blkg config update
 758 * @inputp: input string pointer
 
 
 759 *
 760 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update
 761 * from @input and get and return the matching gendisk.  *@inputp is
 762 * updated to point past the device node prefix.  Returns an ERR_PTR()
 763 * value on error.
 764 *
 765 * Use this function iff blkg_conf_prep() can't be used for some reason.
 
 766 */
 767struct gendisk *blkcg_conf_get_disk(char **inputp)
 
 768{
 769	char *input = *inputp;
 770	unsigned int major, minor;
 771	struct gendisk *disk;
 772	int key_len, part;
 
 
 
 
 
 
 773
 774	if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
 775		return ERR_PTR(-EINVAL);
 776
 777	input += key_len;
 778	if (!isspace(*input))
 779		return ERR_PTR(-EINVAL);
 780	input = skip_spaces(input);
 781
 782	disk = get_gendisk(MKDEV(major, minor), &part);
 783	if (!disk)
 784		return ERR_PTR(-ENODEV);
 785	if (part) {
 786		put_disk_and_module(disk);
 787		return ERR_PTR(-ENODEV);
 788	}
 
 789
 790	*inputp = input;
 791	return disk;
 792}
 
 793
 794/**
 795 * blkg_conf_prep - parse and prepare for per-blkg config update
 796 * @blkcg: target block cgroup
 797 * @pol: target policy
 798 * @input: input string
 799 * @ctx: blkg_conf_ctx to be filled
 800 *
 801 * Parse per-blkg config update from @input and initialize @ctx with the
 802 * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
 803 * part of @input following MAJ:MIN.  This function returns with RCU read
 804 * lock and queue lock held and must be paired with blkg_conf_finish().
 805 */
 806int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 807		   char *input, struct blkg_conf_ctx *ctx)
 808	__acquires(rcu) __acquires(&disk->queue->queue_lock)
 809{
 810	struct gendisk *disk;
 811	struct request_queue *q;
 812	struct blkcg_gq *blkg;
 813	int ret;
 
 
 
 
 
 
 814
 815	disk = blkcg_conf_get_disk(&input);
 816	if (IS_ERR(disk))
 817		return PTR_ERR(disk);
 
 818
 819	q = disk->queue;
 
 
 
 
 
 
 
 
 820
 821	rcu_read_lock();
 822	spin_lock_irq(&q->queue_lock);
 
 
 
 
 
 823
 824	blkg = blkg_lookup_check(blkcg, pol, q);
 825	if (IS_ERR(blkg)) {
 826		ret = PTR_ERR(blkg);
 827		goto fail_unlock;
 828	}
 829
 830	if (blkg)
 831		goto success;
 832
 833	/*
 834	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
 835	 * non-root blkgs have access to their parents.
 836	 */
 837	while (true) {
 838		struct blkcg *pos = blkcg;
 839		struct blkcg *parent;
 840		struct blkcg_gq *new_blkg;
 841
 842		parent = blkcg_parent(blkcg);
 843		while (parent && !__blkg_lookup(parent, q, false)) {
 844			pos = parent;
 845			parent = blkcg_parent(parent);
 846		}
 847
 848		/* Drop locks to do new blkg allocation with GFP_KERNEL. */
 849		spin_unlock_irq(&q->queue_lock);
 850		rcu_read_unlock();
 851
 852		new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
 853		if (unlikely(!new_blkg)) {
 854			ret = -ENOMEM;
 855			goto fail;
 856		}
 857
 858		rcu_read_lock();
 859		spin_lock_irq(&q->queue_lock);
 860
 861		blkg = blkg_lookup_check(pos, pol, q);
 862		if (IS_ERR(blkg)) {
 863			ret = PTR_ERR(blkg);
 864			goto fail_unlock;
 865		}
 866
 867		if (blkg) {
 868			blkg_free(new_blkg);
 869		} else {
 870			blkg = blkg_create(pos, q, new_blkg);
 871			if (IS_ERR(blkg)) {
 872				ret = PTR_ERR(blkg);
 873				goto fail_unlock;
 874			}
 875		}
 
 
 876
 877		if (pos == blkcg)
 878			goto success;
 879	}
 880success:
 881	ctx->disk = disk;
 882	ctx->blkg = blkg;
 883	ctx->body = input;
 884	return 0;
 885
 886fail_unlock:
 887	spin_unlock_irq(&q->queue_lock);
 888	rcu_read_unlock();
 889fail:
 890	put_disk_and_module(disk);
 891	/*
 892	 * If queue was bypassing, we should retry.  Do so after a
 893	 * short msleep().  It isn't strictly necessary but queue
 894	 * can be bypassing for some time and it's always nice to
 895	 * avoid busy looping.
 896	 */
 897	if (ret == -EBUSY) {
 898		msleep(10);
 899		ret = restart_syscall();
 900	}
 901	return ret;
 902}
 903EXPORT_SYMBOL_GPL(blkg_conf_prep);
 904
 905/**
 906 * blkg_conf_finish - finish up per-blkg config update
 907 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 908 *
 909 * Finish up after per-blkg config update.  This function must be paired
 910 * with blkg_conf_prep().
 911 */
 912void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 913	__releases(&ctx->disk->queue->queue_lock) __releases(rcu)
 914{
 915	spin_unlock_irq(&ctx->disk->queue->queue_lock);
 
 
 916	rcu_read_unlock();
 917	put_disk_and_module(ctx->disk);
 
 
 918}
 919EXPORT_SYMBOL_GPL(blkg_conf_finish);
 920
 921static int blkcg_print_stat(struct seq_file *sf, void *v)
 922{
 923	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
 924	struct blkcg_gq *blkg;
 925
 926	rcu_read_lock();
 927
 928	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 929		const char *dname;
 930		char *buf;
 931		struct blkg_rwstat_sample rwstat;
 932		u64 rbytes, wbytes, rios, wios, dbytes, dios;
 933		size_t size = seq_get_buf(sf, &buf), off = 0;
 934		int i;
 935		bool has_stats = false;
 936
 937		spin_lock_irq(&blkg->q->queue_lock);
 938
 939		if (!blkg->online)
 940			goto skip;
 941
 942		dname = blkg_dev_name(blkg);
 943		if (!dname)
 944			goto skip;
 945
 946		/*
 947		 * Hooray string manipulation, count is the size written NOT
 948		 * INCLUDING THE \0, so size is now count+1 less than what we
 949		 * had before, but we want to start writing the next bit from
 950		 * the \0 so we only add count to buf.
 951		 */
 952		off += scnprintf(buf+off, size-off, "%s ", dname);
 953
 954		blkg_rwstat_recursive_sum(blkg, NULL,
 955				offsetof(struct blkcg_gq, stat_bytes), &rwstat);
 956		rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
 957		wbytes = rwstat.cnt[BLKG_RWSTAT_WRITE];
 958		dbytes = rwstat.cnt[BLKG_RWSTAT_DISCARD];
 959
 960		blkg_rwstat_recursive_sum(blkg, NULL,
 961					offsetof(struct blkcg_gq, stat_ios), &rwstat);
 962		rios = rwstat.cnt[BLKG_RWSTAT_READ];
 963		wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
 964		dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
 965
 966		if (rbytes || wbytes || rios || wios) {
 967			has_stats = true;
 968			off += scnprintf(buf+off, size-off,
 969					 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
 970					 rbytes, wbytes, rios, wios,
 971					 dbytes, dios);
 972		}
 973
 974		if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
 975			has_stats = true;
 976			off += scnprintf(buf+off, size-off,
 977					 " use_delay=%d delay_nsec=%llu",
 978					 atomic_read(&blkg->use_delay),
 979					(unsigned long long)atomic64_read(&blkg->delay_nsec));
 980		}
 981
 982		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 983			struct blkcg_policy *pol = blkcg_policy[i];
 984			size_t written;
 985
 986			if (!blkg->pd[i] || !pol->pd_stat_fn)
 987				continue;
 988
 989			written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
 990			if (written)
 991				has_stats = true;
 992			off += written;
 993		}
 994
 995		if (has_stats) {
 996			if (off < size - 1) {
 997				off += scnprintf(buf+off, size-off, "\n");
 998				seq_commit(sf, off);
 999			} else {
1000				seq_commit(sf, -1);
1001			}
1002		}
1003	skip:
1004		spin_unlock_irq(&blkg->q->queue_lock);
1005	}
1006
1007	rcu_read_unlock();
1008	return 0;
1009}
1010
1011static struct cftype blkcg_files[] = {
1012	{
1013		.name = "stat",
1014		.flags = CFTYPE_NOT_ON_ROOT,
1015		.seq_show = blkcg_print_stat,
1016	},
1017	{ }	/* terminate */
1018};
1019
1020static struct cftype blkcg_legacy_files[] = {
1021	{
1022		.name = "reset_stats",
1023		.write_u64 = blkcg_reset_stats,
1024	},
1025	{ }	/* terminate */
1026};
1027
1028/*
1029 * blkcg destruction is a three-stage process.
1030 *
1031 * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
1032 *    which offlines writeback.  Here we tie the next stage of blkg destruction
1033 *    to the completion of writeback associated with the blkcg.  This lets us
1034 *    avoid punting potentially large amounts of outstanding writeback to root
1035 *    while maintaining any ongoing policies.  The next stage is triggered when
1036 *    the nr_cgwbs count goes to zero.
1037 *
1038 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
1039 *    and handles the destruction of blkgs.  Here the css reference held by
1040 *    the blkg is put back eventually allowing blkcg_css_free() to be called.
1041 *    This work may occur in cgwb_release_workfn() on the cgwb_release
1042 *    workqueue.  Any submitted ios that fail to get the blkg ref will be
1043 *    punted to the root_blkg.
1044 *
1045 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1046 *    This finally frees the blkcg.
1047 */
1048
1049/**
1050 * blkcg_css_offline - cgroup css_offline callback
1051 * @css: css of interest
1052 *
1053 * This function is called when @css is about to go away.  Here the cgwbs are
1054 * offlined first and only once writeback associated with the blkcg has
1055 * finished do we start step 2 (see above).
 
 
 
1056 */
1057static void blkcg_css_offline(struct cgroup_subsys_state *css)
1058{
1059	struct blkcg *blkcg = css_to_blkcg(css);
1060
1061	/* this prevents anyone from attaching or migrating to this blkcg */
1062	wb_blkcg_offline(blkcg);
1063
1064	/* put the base cgwb reference allowing step 2 to be triggered */
1065	blkcg_cgwb_put(blkcg);
1066}
1067
1068/**
1069 * blkcg_destroy_blkgs - responsible for shooting down blkgs
1070 * @blkcg: blkcg of interest
1071 *
1072 * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
1073 * is nested inside q lock, this function performs reverse double lock dancing.
1074 * Destroying the blkgs releases the reference held on the blkcg's css allowing
1075 * blkcg_css_free to eventually be called.
1076 *
1077 * This is the blkcg counterpart of ioc_release_fn().
1078 */
1079void blkcg_destroy_blkgs(struct blkcg *blkcg)
1080{
1081	spin_lock_irq(&blkcg->lock);
1082
1083	while (!hlist_empty(&blkcg->blkg_list)) {
1084		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1085						struct blkcg_gq, blkcg_node);
1086		struct request_queue *q = blkg->q;
1087
1088		if (spin_trylock(&q->queue_lock)) {
1089			blkg_destroy(blkg);
1090			spin_unlock(&q->queue_lock);
1091		} else {
1092			spin_unlock_irq(&blkcg->lock);
1093			cpu_relax();
1094			spin_lock_irq(&blkcg->lock);
1095		}
1096	}
1097
1098	spin_unlock_irq(&blkcg->lock);
 
 
1099}
1100
1101static void blkcg_css_free(struct cgroup_subsys_state *css)
1102{
1103	struct blkcg *blkcg = css_to_blkcg(css);
1104	int i;
1105
1106	mutex_lock(&blkcg_pol_mutex);
1107
1108	list_del(&blkcg->all_blkcgs_node);
1109
1110	for (i = 0; i < BLKCG_MAX_POLS; i++)
1111		if (blkcg->cpd[i])
1112			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1113
1114	mutex_unlock(&blkcg_pol_mutex);
1115
1116	kfree(blkcg);
1117}
1118
1119static struct cgroup_subsys_state *
1120blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1121{
1122	struct blkcg *blkcg;
1123	struct cgroup_subsys_state *ret;
1124	int i;
1125
1126	mutex_lock(&blkcg_pol_mutex);
1127
1128	if (!parent_css) {
1129		blkcg = &blkcg_root;
1130	} else {
1131		blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1132		if (!blkcg) {
1133			ret = ERR_PTR(-ENOMEM);
1134			goto unlock;
1135		}
1136	}
1137
1138	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1139		struct blkcg_policy *pol = blkcg_policy[i];
1140		struct blkcg_policy_data *cpd;
1141
1142		/*
1143		 * If the policy hasn't been attached yet, wait for it
1144		 * to be attached before doing anything else. Otherwise,
1145		 * check if the policy requires any specific per-cgroup
1146		 * data: if it does, allocate and initialize it.
1147		 */
1148		if (!pol || !pol->cpd_alloc_fn)
1149			continue;
1150
1151		cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1152		if (!cpd) {
1153			ret = ERR_PTR(-ENOMEM);
1154			goto free_pd_blkcg;
1155		}
1156		blkcg->cpd[i] = cpd;
1157		cpd->blkcg = blkcg;
1158		cpd->plid = i;
1159		if (pol->cpd_init_fn)
1160			pol->cpd_init_fn(cpd);
1161	}
1162
1163	spin_lock_init(&blkcg->lock);
1164	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1165	INIT_HLIST_HEAD(&blkcg->blkg_list);
1166#ifdef CONFIG_CGROUP_WRITEBACK
1167	INIT_LIST_HEAD(&blkcg->cgwb_list);
1168	refcount_set(&blkcg->cgwb_refcnt, 1);
1169#endif
1170	list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1171
1172	mutex_unlock(&blkcg_pol_mutex);
1173	return &blkcg->css;
1174
1175free_pd_blkcg:
1176	for (i--; i >= 0; i--)
1177		if (blkcg->cpd[i])
1178			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1179
1180	if (blkcg != &blkcg_root)
1181		kfree(blkcg);
1182unlock:
1183	mutex_unlock(&blkcg_pol_mutex);
1184	return ret;
1185}
1186
1187/**
1188 * blkcg_init_queue - initialize blkcg part of request queue
1189 * @q: request_queue to initialize
1190 *
1191 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1192 * part of new request_queue @q.
1193 *
1194 * RETURNS:
1195 * 0 on success, -errno on failure.
1196 */
1197int blkcg_init_queue(struct request_queue *q)
1198{
1199	struct blkcg_gq *new_blkg, *blkg;
1200	bool preloaded;
1201	int ret;
1202
1203	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1204	if (!new_blkg)
1205		return -ENOMEM;
1206
1207	preloaded = !radix_tree_preload(GFP_KERNEL);
1208
1209	/* Make sure the root blkg exists. */
 
 
 
 
1210	rcu_read_lock();
1211	spin_lock_irq(&q->queue_lock);
1212	blkg = blkg_create(&blkcg_root, q, new_blkg);
1213	if (IS_ERR(blkg))
1214		goto err_unlock;
1215	q->root_blkg = blkg;
1216	spin_unlock_irq(&q->queue_lock);
1217	rcu_read_unlock();
1218
1219	if (preloaded)
1220		radix_tree_preload_end();
1221
1222	ret = blk_iolatency_init(q);
1223	if (ret)
1224		goto err_destroy_all;
 
 
 
 
1225
1226	ret = blk_throtl_init(q);
1227	if (ret)
1228		goto err_destroy_all;
1229	return 0;
1230
1231err_destroy_all:
1232	blkg_destroy_all(q);
1233	return ret;
1234err_unlock:
1235	spin_unlock_irq(&q->queue_lock);
1236	rcu_read_unlock();
1237	if (preloaded)
1238		radix_tree_preload_end();
1239	return PTR_ERR(blkg);
1240}
1241
1242/**
1243 * blkcg_drain_queue - drain blkcg part of request_queue
1244 * @q: request_queue to drain
1245 *
1246 * Called from blk_drain_queue().  Responsible for draining blkcg part.
1247 */
1248void blkcg_drain_queue(struct request_queue *q)
1249{
1250	lockdep_assert_held(&q->queue_lock);
1251
1252	/*
1253	 * @q could be exiting and already have destroyed all blkgs as
1254	 * indicated by NULL root_blkg.  If so, don't confuse policies.
1255	 */
1256	if (!q->root_blkg)
1257		return;
1258
1259	blk_throtl_drain(q);
1260}
1261
1262/**
1263 * blkcg_exit_queue - exit and release blkcg part of request_queue
1264 * @q: request_queue being released
1265 *
1266 * Called from blk_exit_queue().  Responsible for exiting blkcg part.
1267 */
1268void blkcg_exit_queue(struct request_queue *q)
1269{
 
1270	blkg_destroy_all(q);
 
 
1271	blk_throtl_exit(q);
1272}
1273
1274/*
1275 * We cannot support shared io contexts, as we have no mean to support
1276 * two tasks with the same ioc in two different groups without major rework
1277 * of the main cic data structures.  For now we allow a task to change
1278 * its cgroup only if it's the only owner of its ioc.
1279 */
1280static int blkcg_can_attach(struct cgroup_taskset *tset)
1281{
1282	struct task_struct *task;
1283	struct cgroup_subsys_state *dst_css;
1284	struct io_context *ioc;
1285	int ret = 0;
1286
1287	/* task_lock() is needed to avoid races with exit_io_context() */
1288	cgroup_taskset_for_each(task, dst_css, tset) {
1289		task_lock(task);
1290		ioc = task->io_context;
1291		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1292			ret = -EINVAL;
1293		task_unlock(task);
1294		if (ret)
1295			break;
1296	}
1297	return ret;
1298}
1299
1300static void blkcg_bind(struct cgroup_subsys_state *root_css)
1301{
1302	int i;
1303
1304	mutex_lock(&blkcg_pol_mutex);
1305
1306	for (i = 0; i < BLKCG_MAX_POLS; i++) {
1307		struct blkcg_policy *pol = blkcg_policy[i];
1308		struct blkcg *blkcg;
1309
1310		if (!pol || !pol->cpd_bind_fn)
1311			continue;
1312
1313		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1314			if (blkcg->cpd[pol->plid])
1315				pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1316	}
1317	mutex_unlock(&blkcg_pol_mutex);
1318}
1319
1320static void blkcg_exit(struct task_struct *tsk)
1321{
1322	if (tsk->throttle_queue)
1323		blk_put_queue(tsk->throttle_queue);
1324	tsk->throttle_queue = NULL;
1325}
1326
1327struct cgroup_subsys io_cgrp_subsys = {
1328	.css_alloc = blkcg_css_alloc,
1329	.css_offline = blkcg_css_offline,
1330	.css_free = blkcg_css_free,
1331	.can_attach = blkcg_can_attach,
1332	.bind = blkcg_bind,
1333	.dfl_cftypes = blkcg_files,
1334	.legacy_cftypes = blkcg_legacy_files,
1335	.legacy_name = "blkio",
1336	.exit = blkcg_exit,
1337#ifdef CONFIG_MEMCG
1338	/*
1339	 * This ensures that, if available, memcg is automatically enabled
1340	 * together on the default hierarchy so that the owner cgroup can
1341	 * be retrieved from writeback pages.
1342	 */
1343	.depends_on = 1 << memory_cgrp_id,
1344#endif
1345};
1346EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1347
1348/**
1349 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1350 * @q: request_queue of interest
1351 * @pol: blkcg policy to activate
1352 *
1353 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1354 * bypass mode to populate its blkgs with policy_data for @pol.
1355 *
1356 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1357 * from IO path.  Update of each blkg is protected by both queue and blkcg
1358 * locks so that holding either lock and testing blkcg_policy_enabled() is
1359 * always enough for dereferencing policy data.
1360 *
1361 * The caller is responsible for synchronizing [de]activations and policy
1362 * [un]registerations.  Returns 0 on success, -errno on failure.
1363 */
1364int blkcg_activate_policy(struct request_queue *q,
1365			  const struct blkcg_policy *pol)
1366{
1367	struct blkg_policy_data *pd_prealloc = NULL;
1368	struct blkcg_gq *blkg, *pinned_blkg = NULL;
1369	int ret;
1370
1371	if (blkcg_policy_enabled(q, pol))
1372		return 0;
1373
1374	if (queue_is_mq(q))
1375		blk_mq_freeze_queue(q);
1376retry:
1377	spin_lock_irq(&q->queue_lock);
 
 
 
 
 
1378
1379	/* blkg_list is pushed at the head, reverse walk to allocate parents first */
1380	list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
 
1381		struct blkg_policy_data *pd;
1382
1383		if (blkg->pd[pol->plid])
1384			continue;
1385
1386		/* If prealloc matches, use it; otherwise try GFP_NOWAIT */
1387		if (blkg == pinned_blkg) {
1388			pd = pd_prealloc;
1389			pd_prealloc = NULL;
1390		} else {
1391			pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q,
1392					      blkg->blkcg);
1393		}
1394
1395		if (!pd) {
1396			/*
1397			 * GFP_NOWAIT failed.  Free the existing one and
1398			 * prealloc for @blkg w/ GFP_KERNEL.
1399			 */
1400			if (pinned_blkg)
1401				blkg_put(pinned_blkg);
1402			blkg_get(blkg);
1403			pinned_blkg = blkg;
1404
1405			spin_unlock_irq(&q->queue_lock);
1406
1407			if (pd_prealloc)
1408				pol->pd_free_fn(pd_prealloc);
1409			pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q,
1410						       blkg->blkcg);
1411			if (pd_prealloc)
1412				goto retry;
1413			else
1414				goto enomem;
1415		}
1416
1417		blkg->pd[pol->plid] = pd;
1418		pd->blkg = blkg;
1419		pd->plid = pol->plid;
 
 
1420	}
1421
1422	/* all allocated, init in the same order */
1423	if (pol->pd_init_fn)
1424		list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
1425			pol->pd_init_fn(blkg->pd[pol->plid]);
1426
1427	__set_bit(pol->plid, q->blkcg_pols);
1428	ret = 0;
1429
1430	spin_unlock_irq(&q->queue_lock);
1431out:
1432	if (queue_is_mq(q))
1433		blk_mq_unfreeze_queue(q);
1434	if (pinned_blkg)
1435		blkg_put(pinned_blkg);
1436	if (pd_prealloc)
1437		pol->pd_free_fn(pd_prealloc);
1438	return ret;
1439
1440enomem:
1441	/* alloc failed, nothing's initialized yet, free everything */
1442	spin_lock_irq(&q->queue_lock);
1443	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1444		if (blkg->pd[pol->plid]) {
1445			pol->pd_free_fn(blkg->pd[pol->plid]);
1446			blkg->pd[pol->plid] = NULL;
1447		}
1448	}
1449	spin_unlock_irq(&q->queue_lock);
1450	ret = -ENOMEM;
1451	goto out;
1452}
1453EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1454
1455/**
1456 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1457 * @q: request_queue of interest
1458 * @pol: blkcg policy to deactivate
1459 *
1460 * Deactivate @pol on @q.  Follows the same synchronization rules as
1461 * blkcg_activate_policy().
1462 */
1463void blkcg_deactivate_policy(struct request_queue *q,
1464			     const struct blkcg_policy *pol)
1465{
1466	struct blkcg_gq *blkg;
1467
1468	if (!blkcg_policy_enabled(q, pol))
1469		return;
1470
1471	if (queue_is_mq(q))
1472		blk_mq_freeze_queue(q);
1473
1474	spin_lock_irq(&q->queue_lock);
1475
1476	__clear_bit(pol->plid, q->blkcg_pols);
1477
1478	list_for_each_entry(blkg, &q->blkg_list, q_node) {
 
 
 
1479		if (blkg->pd[pol->plid]) {
1480			if (pol->pd_offline_fn)
1481				pol->pd_offline_fn(blkg->pd[pol->plid]);
1482			pol->pd_free_fn(blkg->pd[pol->plid]);
1483			blkg->pd[pol->plid] = NULL;
1484		}
 
 
1485	}
1486
1487	spin_unlock_irq(&q->queue_lock);
1488
1489	if (queue_is_mq(q))
1490		blk_mq_unfreeze_queue(q);
1491}
1492EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1493
1494/**
1495 * blkcg_policy_register - register a blkcg policy
1496 * @pol: blkcg policy to register
1497 *
1498 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1499 * successful registration.  Returns 0 on success and -errno on failure.
1500 */
1501int blkcg_policy_register(struct blkcg_policy *pol)
1502{
1503	struct blkcg *blkcg;
1504	int i, ret;
1505
1506	mutex_lock(&blkcg_pol_register_mutex);
1507	mutex_lock(&blkcg_pol_mutex);
1508
1509	/* find an empty slot */
1510	ret = -ENOSPC;
1511	for (i = 0; i < BLKCG_MAX_POLS; i++)
1512		if (!blkcg_policy[i])
1513			break;
1514	if (i >= BLKCG_MAX_POLS) {
1515		pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1516		goto err_unlock;
1517	}
1518
1519	/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1520	if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1521		(!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1522		goto err_unlock;
1523
1524	/* register @pol */
1525	pol->plid = i;
1526	blkcg_policy[pol->plid] = pol;
1527
1528	/* allocate and install cpd's */
1529	if (pol->cpd_alloc_fn) {
1530		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1531			struct blkcg_policy_data *cpd;
1532
1533			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1534			if (!cpd)
 
1535				goto err_free_cpds;
 
1536
1537			blkcg->cpd[pol->plid] = cpd;
1538			cpd->blkcg = blkcg;
1539			cpd->plid = pol->plid;
1540			if (pol->cpd_init_fn)
1541				pol->cpd_init_fn(cpd);
1542		}
1543	}
1544
1545	mutex_unlock(&blkcg_pol_mutex);
1546
1547	/* everything is in place, add intf files for the new policy */
1548	if (pol->dfl_cftypes)
1549		WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1550					       pol->dfl_cftypes));
1551	if (pol->legacy_cftypes)
1552		WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1553						  pol->legacy_cftypes));
1554	mutex_unlock(&blkcg_pol_register_mutex);
1555	return 0;
1556
1557err_free_cpds:
1558	if (pol->cpd_free_fn) {
1559		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1560			if (blkcg->cpd[pol->plid]) {
1561				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1562				blkcg->cpd[pol->plid] = NULL;
1563			}
1564		}
1565	}
1566	blkcg_policy[pol->plid] = NULL;
1567err_unlock:
1568	mutex_unlock(&blkcg_pol_mutex);
1569	mutex_unlock(&blkcg_pol_register_mutex);
1570	return ret;
1571}
1572EXPORT_SYMBOL_GPL(blkcg_policy_register);
1573
1574/**
1575 * blkcg_policy_unregister - unregister a blkcg policy
1576 * @pol: blkcg policy to unregister
1577 *
1578 * Undo blkcg_policy_register(@pol).  Might sleep.
1579 */
1580void blkcg_policy_unregister(struct blkcg_policy *pol)
1581{
1582	struct blkcg *blkcg;
1583
1584	mutex_lock(&blkcg_pol_register_mutex);
1585
1586	if (WARN_ON(blkcg_policy[pol->plid] != pol))
1587		goto out_unlock;
1588
1589	/* kill the intf files first */
1590	if (pol->dfl_cftypes)
1591		cgroup_rm_cftypes(pol->dfl_cftypes);
1592	if (pol->legacy_cftypes)
1593		cgroup_rm_cftypes(pol->legacy_cftypes);
1594
1595	/* remove cpds and unregister */
1596	mutex_lock(&blkcg_pol_mutex);
1597
1598	if (pol->cpd_free_fn) {
1599		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1600			if (blkcg->cpd[pol->plid]) {
1601				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1602				blkcg->cpd[pol->plid] = NULL;
1603			}
1604		}
1605	}
1606	blkcg_policy[pol->plid] = NULL;
1607
1608	mutex_unlock(&blkcg_pol_mutex);
1609out_unlock:
1610	mutex_unlock(&blkcg_pol_register_mutex);
1611}
1612EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1613
1614bool __blkcg_punt_bio_submit(struct bio *bio)
1615{
1616	struct blkcg_gq *blkg = bio->bi_blkg;
1617
1618	/* consume the flag first */
1619	bio->bi_opf &= ~REQ_CGROUP_PUNT;
1620
1621	/* never bounce for the root cgroup */
1622	if (!blkg->parent)
1623		return false;
1624
1625	spin_lock_bh(&blkg->async_bio_lock);
1626	bio_list_add(&blkg->async_bios, bio);
1627	spin_unlock_bh(&blkg->async_bio_lock);
1628
1629	queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
1630	return true;
1631}
1632
1633/*
1634 * Scale the accumulated delay based on how long it has been since we updated
1635 * the delay.  We only call this when we are adding delay, in case it's been a
1636 * while since we added delay, and when we are checking to see if we need to
1637 * delay a task, to account for any delays that may have occurred.
1638 */
1639static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1640{
1641	u64 old = atomic64_read(&blkg->delay_start);
1642
1643	/*
1644	 * We only want to scale down every second.  The idea here is that we
1645	 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1646	 * time window.  We only want to throttle tasks for recent delay that
1647	 * has occurred, in 1 second time windows since that's the maximum
1648	 * things can be throttled.  We save the current delay window in
1649	 * blkg->last_delay so we know what amount is still left to be charged
1650	 * to the blkg from this point onward.  blkg->last_use keeps track of
1651	 * the use_delay counter.  The idea is if we're unthrottling the blkg we
1652	 * are ok with whatever is happening now, and we can take away more of
1653	 * the accumulated delay as we've already throttled enough that
1654	 * everybody is happy with their IO latencies.
1655	 */
1656	if (time_before64(old + NSEC_PER_SEC, now) &&
1657	    atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1658		u64 cur = atomic64_read(&blkg->delay_nsec);
1659		u64 sub = min_t(u64, blkg->last_delay, now - old);
1660		int cur_use = atomic_read(&blkg->use_delay);
1661
1662		/*
1663		 * We've been unthrottled, subtract a larger chunk of our
1664		 * accumulated delay.
1665		 */
1666		if (cur_use < blkg->last_use)
1667			sub = max_t(u64, sub, blkg->last_delay >> 1);
1668
1669		/*
1670		 * This shouldn't happen, but handle it anyway.  Our delay_nsec
1671		 * should only ever be growing except here where we subtract out
1672		 * min(last_delay, 1 second), but lord knows bugs happen and I'd
1673		 * rather not end up with negative numbers.
1674		 */
1675		if (unlikely(cur < sub)) {
1676			atomic64_set(&blkg->delay_nsec, 0);
1677			blkg->last_delay = 0;
1678		} else {
1679			atomic64_sub(sub, &blkg->delay_nsec);
1680			blkg->last_delay = cur - sub;
1681		}
1682		blkg->last_use = cur_use;
1683	}
1684}
1685
1686/*
1687 * This is called when we want to actually walk up the hierarchy and check to
1688 * see if we need to throttle, and then actually throttle if there is some
1689 * accumulated delay.  This should only be called upon return to user space so
1690 * we're not holding some lock that would induce a priority inversion.
1691 */
1692static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1693{
1694	unsigned long pflags;
1695	u64 now = ktime_to_ns(ktime_get());
1696	u64 exp;
1697	u64 delay_nsec = 0;
1698	int tok;
1699
1700	while (blkg->parent) {
1701		if (atomic_read(&blkg->use_delay)) {
1702			blkcg_scale_delay(blkg, now);
1703			delay_nsec = max_t(u64, delay_nsec,
1704					   atomic64_read(&blkg->delay_nsec));
1705		}
1706		blkg = blkg->parent;
1707	}
1708
1709	if (!delay_nsec)
1710		return;
1711
1712	/*
1713	 * Let's not sleep for all eternity if we've amassed a huge delay.
1714	 * Swapping or metadata IO can accumulate 10's of seconds worth of
1715	 * delay, and we want userspace to be able to do _something_ so cap the
1716	 * delays at 1 second.  If there's 10's of seconds worth of delay then
1717	 * the tasks will be delayed for 1 second for every syscall.
1718	 */
1719	delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1720
1721	if (use_memdelay)
1722		psi_memstall_enter(&pflags);
1723
1724	exp = ktime_add_ns(now, delay_nsec);
1725	tok = io_schedule_prepare();
1726	do {
1727		__set_current_state(TASK_KILLABLE);
1728		if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1729			break;
1730	} while (!fatal_signal_pending(current));
1731	io_schedule_finish(tok);
1732
1733	if (use_memdelay)
1734		psi_memstall_leave(&pflags);
1735}
1736
1737/**
1738 * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1739 *
1740 * This is only called if we've been marked with set_notify_resume().  Obviously
1741 * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1742 * check to see if current->throttle_queue is set and if not this doesn't do
1743 * anything.  This should only ever be called by the resume code, it's not meant
1744 * to be called by people willy-nilly as it will actually do the work to
1745 * throttle the task if it is setup for throttling.
1746 */
1747void blkcg_maybe_throttle_current(void)
1748{
1749	struct request_queue *q = current->throttle_queue;
1750	struct cgroup_subsys_state *css;
1751	struct blkcg *blkcg;
1752	struct blkcg_gq *blkg;
1753	bool use_memdelay = current->use_memdelay;
1754
1755	if (!q)
1756		return;
1757
1758	current->throttle_queue = NULL;
1759	current->use_memdelay = false;
1760
1761	rcu_read_lock();
1762	css = kthread_blkcg();
1763	if (css)
1764		blkcg = css_to_blkcg(css);
1765	else
1766		blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1767
1768	if (!blkcg)
1769		goto out;
1770	blkg = blkg_lookup(blkcg, q);
1771	if (!blkg)
1772		goto out;
1773	if (!blkg_tryget(blkg))
1774		goto out;
1775	rcu_read_unlock();
1776
1777	blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1778	blkg_put(blkg);
1779	blk_put_queue(q);
1780	return;
1781out:
1782	rcu_read_unlock();
1783	blk_put_queue(q);
1784}
1785
1786/**
1787 * blkcg_schedule_throttle - this task needs to check for throttling
1788 * @q: the request queue IO was submitted on
1789 * @use_memdelay: do we charge this to memory delay for PSI
1790 *
1791 * This is called by the IO controller when we know there's delay accumulated
1792 * for the blkg for this task.  We do not pass the blkg because there are places
1793 * we call this that may not have that information, the swapping code for
1794 * instance will only have a request_queue at that point.  This set's the
1795 * notify_resume for the task to check and see if it requires throttling before
1796 * returning to user space.
1797 *
1798 * We will only schedule once per syscall.  You can call this over and over
1799 * again and it will only do the check once upon return to user space, and only
1800 * throttle once.  If the task needs to be throttled again it'll need to be
1801 * re-set at the next time we see the task.
1802 */
1803void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1804{
1805	if (unlikely(current->flags & PF_KTHREAD))
1806		return;
1807
1808	if (!blk_get_queue(q))
1809		return;
1810
1811	if (current->throttle_queue)
1812		blk_put_queue(current->throttle_queue);
1813	current->throttle_queue = q;
1814	if (use_memdelay)
1815		current->use_memdelay = use_memdelay;
1816	set_notify_resume(current);
1817}
1818
1819/**
1820 * blkcg_add_delay - add delay to this blkg
1821 * @blkg: blkg of interest
1822 * @now: the current time in nanoseconds
1823 * @delta: how many nanoseconds of delay to add
1824 *
1825 * Charge @delta to the blkg's current delay accumulation.  This is used to
1826 * throttle tasks if an IO controller thinks we need more throttling.
1827 */
1828void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1829{
1830	blkcg_scale_delay(blkg, now);
1831	atomic64_add(delta, &blkg->delay_nsec);
1832}
1833
1834static int __init blkcg_init(void)
1835{
1836	blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
1837					    WQ_MEM_RECLAIM | WQ_FREEZABLE |
1838					    WQ_UNBOUND | WQ_SYSFS, 0);
1839	if (!blkcg_punt_bio_wq)
1840		return -ENOMEM;
1841	return 0;
1842}
1843subsys_initcall(blkcg_init);
1844
1845module_param(blkcg_debug_stats, bool, 0644);
1846MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");