Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * Common Block IO controller cgroup interface
   3 *
   4 * Based on ideas and code from CFQ, CFS and BFQ:
   5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   6 *
   7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   8 *		      Paolo Valente <paolo.valente@unimore.it>
   9 *
  10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11 * 	              Nauman Rafique <nauman@google.com>
 
 
 
 
  12 */
  13#include <linux/ioprio.h>
  14#include <linux/kdev_t.h>
  15#include <linux/module.h>
 
  16#include <linux/err.h>
  17#include <linux/blkdev.h>
 
  18#include <linux/slab.h>
  19#include <linux/genhd.h>
  20#include <linux/delay.h>
  21#include <linux/atomic.h>
  22#include "blk-cgroup.h"
 
 
 
  23#include "blk.h"
 
  24
  25#define MAX_KEY_LEN 100
  26
 
 
 
 
 
 
  27static DEFINE_MUTEX(blkcg_pol_mutex);
  28
  29struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT,
  30			    .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, };
  31EXPORT_SYMBOL_GPL(blkcg_root);
  32
 
 
 
  33static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  34
 
 
 
 
 
 
 
  35static bool blkcg_policy_enabled(struct request_queue *q,
  36				 const struct blkcg_policy *pol)
  37{
  38	return pol && test_bit(pol->plid, q->blkcg_pols);
  39}
  40
  41/**
  42 * blkg_free - free a blkg
  43 * @blkg: blkg to free
  44 *
  45 * Free @blkg which may be partially allocated.
  46 */
  47static void blkg_free(struct blkcg_gq *blkg)
  48{
  49	int i;
  50
  51	if (!blkg)
  52		return;
  53
  54	for (i = 0; i < BLKCG_MAX_POLS; i++)
  55		kfree(blkg->pd[i]);
 
  56
  57	blk_exit_rl(&blkg->rl);
 
  58	kfree(blkg);
  59}
  60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  61/**
  62 * blkg_alloc - allocate a blkg
  63 * @blkcg: block cgroup the new blkg is associated with
  64 * @q: request_queue the new blkg is associated with
  65 * @gfp_mask: allocation mask to use
  66 *
  67 * Allocate a new blkg assocating @blkcg and @q.
  68 */
  69static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
  70				   gfp_t gfp_mask)
  71{
  72	struct blkcg_gq *blkg;
  73	int i;
  74
  75	/* alloc and init base part */
  76	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
  77	if (!blkg)
  78		return NULL;
  79
 
 
 
 
 
 
 
  80	blkg->q = q;
  81	INIT_LIST_HEAD(&blkg->q_node);
 
 
 
  82	blkg->blkcg = blkcg;
  83	blkg->refcnt = 1;
  84
  85	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
  86	if (blkcg != &blkcg_root) {
  87		if (blk_init_rl(&blkg->rl, q, gfp_mask))
  88			goto err_free;
  89		blkg->rl.blkg = blkg;
  90	}
  91
  92	for (i = 0; i < BLKCG_MAX_POLS; i++) {
  93		struct blkcg_policy *pol = blkcg_policy[i];
  94		struct blkg_policy_data *pd;
  95
  96		if (!blkcg_policy_enabled(q, pol))
  97			continue;
  98
  99		/* alloc per-policy data and attach it to blkg */
 100		pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
 101		if (!pd)
 102			goto err_free;
 103
 104		blkg->pd[i] = pd;
 105		pd->blkg = blkg;
 106		pd->plid = i;
 107	}
 108
 109	return blkg;
 110
 111err_free:
 112	blkg_free(blkg);
 113	return NULL;
 114}
 115
 116/**
 117 * __blkg_lookup - internal version of blkg_lookup()
 118 * @blkcg: blkcg of interest
 119 * @q: request_queue of interest
 120 * @update_hint: whether to update lookup hint with the result or not
 121 *
 122 * This is internal version and shouldn't be used by policy
 123 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 124 * @q's bypass state.  If @update_hint is %true, the caller should be
 125 * holding @q->queue_lock and lookup hint is updated on success.
 126 */
 127struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
 128			       bool update_hint)
 129{
 130	struct blkcg_gq *blkg;
 131
 132	blkg = rcu_dereference(blkcg->blkg_hint);
 133	if (blkg && blkg->q == q)
 134		return blkg;
 135
 136	/*
 137	 * Hint didn't match.  Look up from the radix tree.  Note that the
 138	 * hint can only be updated under queue_lock as otherwise @blkg
 139	 * could have already been removed from blkg_tree.  The caller is
 140	 * responsible for grabbing queue_lock if @update_hint.
 141	 */
 142	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
 143	if (blkg && blkg->q == q) {
 144		if (update_hint) {
 145			lockdep_assert_held(q->queue_lock);
 146			rcu_assign_pointer(blkcg->blkg_hint, blkg);
 147		}
 148		return blkg;
 149	}
 150
 151	return NULL;
 152}
 153
 154/**
 155 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 156 * @blkcg: blkcg of interest
 157 * @q: request_queue of interest
 158 *
 159 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 160 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 161 * - see blk_queue_bypass_start() for details.
 162 */
 163struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
 164{
 165	WARN_ON_ONCE(!rcu_read_lock_held());
 166
 167	if (unlikely(blk_queue_bypass(q)))
 168		return NULL;
 169	return __blkg_lookup(blkcg, q, false);
 170}
 171EXPORT_SYMBOL_GPL(blkg_lookup);
 172
 173/*
 174 * If @new_blkg is %NULL, this function tries to allocate a new one as
 175 * necessary using %GFP_ATOMIC.  @new_blkg is always consumed on return.
 176 */
 177static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 178				    struct request_queue *q,
 179				    struct blkcg_gq *new_blkg)
 180{
 181	struct blkcg_gq *blkg;
 182	int i, ret;
 183
 184	WARN_ON_ONCE(!rcu_read_lock_held());
 185	lockdep_assert_held(q->queue_lock);
 
 
 
 
 
 
 186
 187	/* blkg holds a reference to blkcg */
 188	if (!css_tryget(&blkcg->css)) {
 189		ret = -EINVAL;
 190		goto err_free_blkg;
 191	}
 192
 193	/* allocate */
 194	if (!new_blkg) {
 195		new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
 196		if (unlikely(!new_blkg)) {
 197			ret = -ENOMEM;
 198			goto err_put_css;
 199		}
 200	}
 201	blkg = new_blkg;
 202
 203	/* link parent */
 204	if (blkcg_parent(blkcg)) {
 205		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
 206		if (WARN_ON_ONCE(!blkg->parent)) {
 207			ret = -EINVAL;
 208			goto err_put_css;
 209		}
 210		blkg_get(blkg->parent);
 211	}
 212
 213	/* invoke per-policy init */
 214	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 215		struct blkcg_policy *pol = blkcg_policy[i];
 216
 217		if (blkg->pd[i] && pol->pd_init_fn)
 218			pol->pd_init_fn(blkg);
 219	}
 220
 221	/* insert */
 222	spin_lock(&blkcg->lock);
 223	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
 224	if (likely(!ret)) {
 225		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
 226		list_add(&blkg->q_node, &q->blkg_list);
 227
 228		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 229			struct blkcg_policy *pol = blkcg_policy[i];
 230
 231			if (blkg->pd[i] && pol->pd_online_fn)
 232				pol->pd_online_fn(blkg);
 233		}
 234	}
 235	blkg->online = true;
 236	spin_unlock(&blkcg->lock);
 237
 238	if (!ret) {
 239		if (blkcg == &blkcg_root) {
 240			q->root_blkg = blkg;
 241			q->root_rl.blkg = blkg;
 242		}
 243		return blkg;
 244	}
 245
 246	/* @blkg failed fully initialized, use the usual release path */
 247	blkg_put(blkg);
 248	return ERR_PTR(ret);
 249
 250err_put_css:
 251	css_put(&blkcg->css);
 252err_free_blkg:
 253	blkg_free(new_blkg);
 254	return ERR_PTR(ret);
 255}
 256
 257/**
 258 * blkg_lookup_create - lookup blkg, try to create one if not there
 259 * @blkcg: blkcg of interest
 260 * @q: request_queue of interest
 261 *
 262 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
 263 * create one.  blkg creation is performed recursively from blkcg_root such
 264 * that all non-root blkg's have access to the parent blkg.  This function
 265 * should be called under RCU read lock and @q->queue_lock.
 266 *
 267 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
 268 * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
 269 * dead and bypassing, returns ERR_PTR(-EBUSY).
 270 */
 271struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 272				    struct request_queue *q)
 273{
 274	struct blkcg_gq *blkg;
 
 275
 276	WARN_ON_ONCE(!rcu_read_lock_held());
 277	lockdep_assert_held(q->queue_lock);
 278
 279	/*
 280	 * This could be the first entry point of blkcg implementation and
 281	 * we shouldn't allow anything to go through for a bypassing queue.
 282	 */
 283	if (unlikely(blk_queue_bypass(q)))
 284		return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
 285
 
 286	blkg = __blkg_lookup(blkcg, q, true);
 287	if (blkg)
 288		return blkg;
 289
 290	/*
 291	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
 292	 * non-root blkgs have access to their parents.
 
 293	 */
 294	while (true) {
 295		struct blkcg *pos = blkcg;
 296		struct blkcg *parent = blkcg_parent(blkcg);
 
 297
 298		while (parent && !__blkg_lookup(parent, q, false)) {
 
 
 
 
 
 
 299			pos = parent;
 300			parent = blkcg_parent(parent);
 301		}
 302
 303		blkg = blkg_create(pos, q, NULL);
 304		if (pos == blkcg || IS_ERR(blkg))
 305			return blkg;
 
 
 
 
 306	}
 
 
 
 
 307}
 308EXPORT_SYMBOL_GPL(blkg_lookup_create);
 309
 310static void blkg_destroy(struct blkcg_gq *blkg)
 311{
 312	struct blkcg *blkcg = blkg->blkcg;
 313	int i;
 314
 315	lockdep_assert_held(blkg->q->queue_lock);
 316	lockdep_assert_held(&blkcg->lock);
 317
 318	/* Something wrong if we are trying to remove same group twice */
 319	WARN_ON_ONCE(list_empty(&blkg->q_node));
 320	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 321
 322	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 323		struct blkcg_policy *pol = blkcg_policy[i];
 324
 325		if (blkg->pd[i] && pol->pd_offline_fn)
 326			pol->pd_offline_fn(blkg);
 327	}
 
 328	blkg->online = false;
 329
 330	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
 331	list_del_init(&blkg->q_node);
 332	hlist_del_init_rcu(&blkg->blkcg_node);
 333
 334	/*
 335	 * Both setting lookup hint to and clearing it from @blkg are done
 336	 * under queue_lock.  If it's not pointing to @blkg now, it never
 337	 * will.  Hint assignment itself can race safely.
 338	 */
 339	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
 340		rcu_assign_pointer(blkcg->blkg_hint, NULL);
 341
 342	/*
 343	 * If root blkg is destroyed.  Just clear the pointer since root_rl
 344	 * does not take reference on root blkg.
 345	 */
 346	if (blkcg == &blkcg_root) {
 347		blkg->q->root_blkg = NULL;
 348		blkg->q->root_rl.blkg = NULL;
 349	}
 350
 351	/*
 352	 * Put the reference taken at the time of creation so that when all
 353	 * queues are gone, group can be destroyed.
 354	 */
 355	blkg_put(blkg);
 356}
 357
 358/**
 359 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 360 * @q: request_queue of interest
 361 *
 362 * Destroy all blkgs associated with @q.
 363 */
 364static void blkg_destroy_all(struct request_queue *q)
 365{
 366	struct blkcg_gq *blkg, *n;
 
 367
 368	lockdep_assert_held(q->queue_lock);
 369
 370	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 371		struct blkcg *blkcg = blkg->blkcg;
 372
 373		spin_lock(&blkcg->lock);
 374		blkg_destroy(blkg);
 375		spin_unlock(&blkcg->lock);
 376	}
 377}
 378
 379/*
 380 * A group is RCU protected, but having an rcu lock does not mean that one
 381 * can access all the fields of blkg and assume these are valid.  For
 382 * example, don't try to follow throtl_data and request queue links.
 383 *
 384 * Having a reference to blkg under an rcu allows accesses to only values
 385 * local to groups like group stats and group rate limits.
 386 */
 387void __blkg_release_rcu(struct rcu_head *rcu_head)
 388{
 389	struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
 390	int i;
 391
 392	/* tell policies that this one is being freed */
 393	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 394		struct blkcg_policy *pol = blkcg_policy[i];
 395
 396		if (blkg->pd[i] && pol->pd_exit_fn)
 397			pol->pd_exit_fn(blkg);
 398	}
 399
 400	/* release the blkcg and parent blkg refs this blkg has been holding */
 401	css_put(&blkg->blkcg->css);
 402	if (blkg->parent) {
 403		spin_lock_irq(blkg->q->queue_lock);
 404		blkg_put(blkg->parent);
 405		spin_unlock_irq(blkg->q->queue_lock);
 406	}
 407
 408	blkg_free(blkg);
 409}
 410EXPORT_SYMBOL_GPL(__blkg_release_rcu);
 411
 412/*
 413 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 414 * because the root blkg uses @q->root_rl instead of its own rl.
 415 */
 416struct request_list *__blk_queue_next_rl(struct request_list *rl,
 417					 struct request_queue *q)
 418{
 419	struct list_head *ent;
 420	struct blkcg_gq *blkg;
 421
 422	/*
 423	 * Determine the current blkg list_head.  The first entry is
 424	 * root_rl which is off @q->blkg_list and mapped to the head.
 425	 */
 426	if (rl == &q->root_rl) {
 427		ent = &q->blkg_list;
 428		/* There are no more block groups, hence no request lists */
 429		if (list_empty(ent))
 430			return NULL;
 431	} else {
 432		blkg = container_of(rl, struct blkcg_gq, rl);
 433		ent = &blkg->q_node;
 434	}
 435
 436	/* walk to the next list_head, skip root blkcg */
 437	ent = ent->next;
 438	if (ent == &q->root_blkg->q_node)
 439		ent = ent->next;
 440	if (ent == &q->blkg_list)
 441		return NULL;
 442
 443	blkg = container_of(ent, struct blkcg_gq, q_node);
 444	return &blkg->rl;
 445}
 446
 447static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 448			     struct cftype *cftype, u64 val)
 449{
 450	struct blkcg *blkcg = css_to_blkcg(css);
 451	struct blkcg_gq *blkg;
 452	int i;
 453
 454	/*
 455	 * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
 456	 * which ends up putting cgroup's internal cgroup_tree_mutex under
 457	 * it; however, cgroup_tree_mutex is nested above cgroup file
 458	 * active protection and grabbing blkcg_pol_mutex from a cgroup
 459	 * file operation creates a possible circular dependency.  cgroup
 460	 * internal locking is planned to go through further simplification
 461	 * and this issue should go away soon.  For now, let's trylock
 462	 * blkcg_pol_mutex and restart the write on failure.
 463	 *
 464	 * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
 465	 */
 466	if (!mutex_trylock(&blkcg_pol_mutex))
 467		return restart_syscall();
 468	spin_lock_irq(&blkcg->lock);
 469
 470	/*
 471	 * Note that stat reset is racy - it doesn't synchronize against
 472	 * stat updates.  This is a debug feature which shouldn't exist
 473	 * anyway.  If you get hit by a race, retry.
 474	 */
 475	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
 
 
 
 
 
 
 
 476		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 477			struct blkcg_policy *pol = blkcg_policy[i];
 478
 479			if (blkcg_policy_enabled(blkg->q, pol) &&
 480			    pol->pd_reset_stats_fn)
 481				pol->pd_reset_stats_fn(blkg);
 482		}
 483	}
 484
 485	spin_unlock_irq(&blkcg->lock);
 486	mutex_unlock(&blkcg_pol_mutex);
 487	return 0;
 488}
 489
 490static const char *blkg_dev_name(struct blkcg_gq *blkg)
 491{
 492	/* some drivers (floppy) instantiate a queue w/o disk registered */
 493	if (blkg->q->backing_dev_info.dev)
 494		return dev_name(blkg->q->backing_dev_info.dev);
 495	return NULL;
 496}
 497
 498/**
 499 * blkcg_print_blkgs - helper for printing per-blkg data
 500 * @sf: seq_file to print to
 501 * @blkcg: blkcg of interest
 502 * @prfill: fill function to print out a blkg
 503 * @pol: policy in question
 504 * @data: data to be passed to @prfill
 505 * @show_total: to print out sum of prfill return values or not
 506 *
 507 * This function invokes @prfill on each blkg of @blkcg if pd for the
 508 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 509 * policy data and @data and the matching queue lock held.  If @show_total
 510 * is %true, the sum of the return values from @prfill is printed with
 511 * "Total" label at the end.
 512 *
 513 * This is to be used to construct print functions for
 514 * cftype->read_seq_string method.
 515 */
 516void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 517		       u64 (*prfill)(struct seq_file *,
 518				     struct blkg_policy_data *, int),
 519		       const struct blkcg_policy *pol, int data,
 520		       bool show_total)
 521{
 522	struct blkcg_gq *blkg;
 523	u64 total = 0;
 524
 525	rcu_read_lock();
 526	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 527		spin_lock_irq(blkg->q->queue_lock);
 528		if (blkcg_policy_enabled(blkg->q, pol))
 529			total += prfill(sf, blkg->pd[pol->plid], data);
 530		spin_unlock_irq(blkg->q->queue_lock);
 531	}
 532	rcu_read_unlock();
 533
 534	if (show_total)
 535		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
 536}
 537EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
 538
 539/**
 540 * __blkg_prfill_u64 - prfill helper for a single u64 value
 541 * @sf: seq_file to print to
 542 * @pd: policy private data of interest
 543 * @v: value to print
 544 *
 545 * Print @v to @sf for the device assocaited with @pd.
 546 */
 547u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
 548{
 549	const char *dname = blkg_dev_name(pd->blkg);
 550
 551	if (!dname)
 552		return 0;
 553
 554	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
 555	return v;
 556}
 557EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
 558
 559/**
 560 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 561 * @sf: seq_file to print to
 562 * @pd: policy private data of interest
 563 * @rwstat: rwstat to print
 564 *
 565 * Print @rwstat to @sf for the device assocaited with @pd.
 566 */
 567u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 568			 const struct blkg_rwstat *rwstat)
 569{
 570	static const char *rwstr[] = {
 571		[BLKG_RWSTAT_READ]	= "Read",
 572		[BLKG_RWSTAT_WRITE]	= "Write",
 573		[BLKG_RWSTAT_SYNC]	= "Sync",
 574		[BLKG_RWSTAT_ASYNC]	= "Async",
 575	};
 576	const char *dname = blkg_dev_name(pd->blkg);
 577	u64 v;
 578	int i;
 579
 580	if (!dname)
 581		return 0;
 582
 583	for (i = 0; i < BLKG_RWSTAT_NR; i++)
 584		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
 585			   (unsigned long long)rwstat->cnt[i]);
 586
 587	v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
 588	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
 589	return v;
 590}
 591EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
 592
 593/**
 594 * blkg_prfill_stat - prfill callback for blkg_stat
 595 * @sf: seq_file to print to
 596 * @pd: policy private data of interest
 597 * @off: offset to the blkg_stat in @pd
 598 *
 599 * prfill callback for printing a blkg_stat.
 600 */
 601u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
 602{
 603	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
 604}
 605EXPORT_SYMBOL_GPL(blkg_prfill_stat);
 606
 607/**
 608 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 609 * @sf: seq_file to print to
 610 * @pd: policy private data of interest
 611 * @off: offset to the blkg_rwstat in @pd
 612 *
 613 * prfill callback for printing a blkg_rwstat.
 614 */
 615u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 616		       int off)
 617{
 618	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
 619
 620	return __blkg_prfill_rwstat(sf, pd, &rwstat);
 621}
 622EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
 623
 624/**
 625 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
 626 * @pd: policy private data of interest
 627 * @off: offset to the blkg_stat in @pd
 628 *
 629 * Collect the blkg_stat specified by @off from @pd and all its online
 630 * descendants and return the sum.  The caller must be holding the queue
 631 * lock for online tests.
 632 */
 633u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
 634{
 635	struct blkcg_policy *pol = blkcg_policy[pd->plid];
 636	struct blkcg_gq *pos_blkg;
 637	struct cgroup_subsys_state *pos_css;
 638	u64 sum = 0;
 639
 640	lockdep_assert_held(pd->blkg->q->queue_lock);
 
 641
 642	rcu_read_lock();
 643	blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
 644		struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
 645		struct blkg_stat *stat = (void *)pos_pd + off;
 646
 647		if (pos_blkg->online)
 648			sum += blkg_stat_read(stat);
 
 
 
 
 649	}
 650	rcu_read_unlock();
 651
 652	return sum;
 
 653}
 654EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
 655
 656/**
 657 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
 658 * @pd: policy private data of interest
 659 * @off: offset to the blkg_stat in @pd
 660 *
 661 * Collect the blkg_rwstat specified by @off from @pd and all its online
 662 * descendants and return the sum.  The caller must be holding the queue
 663 * lock for online tests.
 664 */
 665struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
 666					     int off)
 667{
 668	struct blkcg_policy *pol = blkcg_policy[pd->plid];
 669	struct blkcg_gq *pos_blkg;
 670	struct cgroup_subsys_state *pos_css;
 671	struct blkg_rwstat sum = { };
 672	int i;
 673
 674	lockdep_assert_held(pd->blkg->q->queue_lock);
 675
 676	rcu_read_lock();
 677	blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
 678		struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
 679		struct blkg_rwstat *rwstat = (void *)pos_pd + off;
 680		struct blkg_rwstat tmp;
 681
 682		if (!pos_blkg->online)
 683			continue;
 684
 685		tmp = blkg_rwstat_read(rwstat);
 686
 687		for (i = 0; i < BLKG_RWSTAT_NR; i++)
 688			sum.cnt[i] += tmp.cnt[i];
 689	}
 690	rcu_read_unlock();
 691
 692	return sum;
 693}
 694EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
 695
 696/**
 697 * blkg_conf_prep - parse and prepare for per-blkg config update
 698 * @blkcg: target block cgroup
 699 * @pol: target policy
 700 * @input: input string
 701 * @ctx: blkg_conf_ctx to be filled
 702 *
 703 * Parse per-blkg config update from @input and initialize @ctx with the
 704 * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
 705 * value.  This function returns with RCU read lock and queue lock held and
 706 * must be paired with blkg_conf_finish().
 707 */
 708int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 709		   const char *input, struct blkg_conf_ctx *ctx)
 710	__acquires(rcu) __acquires(disk->queue->queue_lock)
 711{
 712	struct gendisk *disk;
 
 713	struct blkcg_gq *blkg;
 714	unsigned int major, minor;
 715	unsigned long long v;
 716	int part, ret;
 717
 718	if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
 719		return -EINVAL;
 
 720
 721	disk = get_gendisk(MKDEV(major, minor), &part);
 722	if (!disk || part)
 723		return -EINVAL;
 724
 725	rcu_read_lock();
 726	spin_lock_irq(disk->queue->queue_lock);
 727
 728	if (blkcg_policy_enabled(disk->queue, pol))
 729		blkg = blkg_lookup_create(blkcg, disk->queue);
 730	else
 731		blkg = ERR_PTR(-EINVAL);
 732
 
 733	if (IS_ERR(blkg)) {
 734		ret = PTR_ERR(blkg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 735		rcu_read_unlock();
 736		spin_unlock_irq(disk->queue->queue_lock);
 737		put_disk(disk);
 738		/*
 739		 * If queue was bypassing, we should retry.  Do so after a
 740		 * short msleep().  It isn't strictly necessary but queue
 741		 * can be bypassing for some time and it's always nice to
 742		 * avoid busy looping.
 743		 */
 744		if (ret == -EBUSY) {
 745			msleep(10);
 746			ret = restart_syscall();
 747		}
 748		return ret;
 749	}
 750
 751	ctx->disk = disk;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 752	ctx->blkg = blkg;
 753	ctx->v = v;
 754	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 755}
 756EXPORT_SYMBOL_GPL(blkg_conf_prep);
 757
 758/**
 759 * blkg_conf_finish - finish up per-blkg config update
 760 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 761 *
 762 * Finish up after per-blkg config update.  This function must be paired
 763 * with blkg_conf_prep().
 764 */
 765void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 766	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
 767{
 768	spin_unlock_irq(ctx->disk->queue->queue_lock);
 769	rcu_read_unlock();
 770	put_disk(ctx->disk);
 771}
 772EXPORT_SYMBOL_GPL(blkg_conf_finish);
 773
 774struct cftype blkcg_files[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 775	{
 776		.name = "reset_stats",
 777		.write_u64 = blkcg_reset_stats,
 778	},
 779	{ }	/* terminate */
 780};
 781
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 782/**
 783 * blkcg_css_offline - cgroup css_offline callback
 784 * @css: css of interest
 785 *
 786 * This function is called when @css is about to go away and responsible
 787 * for shooting down all blkgs associated with @css.  blkgs should be
 788 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 789 * inside q lock, this function performs reverse double lock dancing.
 790 *
 791 * This is the blkcg counterpart of ioc_release_fn().
 792 */
 793static void blkcg_css_offline(struct cgroup_subsys_state *css)
 794{
 795	struct blkcg *blkcg = css_to_blkcg(css);
 796
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 797	spin_lock_irq(&blkcg->lock);
 798
 799	while (!hlist_empty(&blkcg->blkg_list)) {
 800		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
 801						struct blkcg_gq, blkcg_node);
 802		struct request_queue *q = blkg->q;
 803
 804		if (spin_trylock(q->queue_lock)) {
 805			blkg_destroy(blkg);
 806			spin_unlock(q->queue_lock);
 807		} else {
 
 
 808			spin_unlock_irq(&blkcg->lock);
 809			cpu_relax();
 810			spin_lock_irq(&blkcg->lock);
 
 811		}
 
 
 
 812	}
 813
 814	spin_unlock_irq(&blkcg->lock);
 815}
 816
 817static void blkcg_css_free(struct cgroup_subsys_state *css)
 818{
 819	struct blkcg *blkcg = css_to_blkcg(css);
 
 820
 821	if (blkcg != &blkcg_root)
 822		kfree(blkcg);
 
 
 
 
 
 
 
 
 
 823}
 824
 825static struct cgroup_subsys_state *
 826blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
 827{
 828	static atomic64_t id_seq = ATOMIC64_INIT(0);
 829	struct blkcg *blkcg;
 
 
 
 
 830
 831	if (!parent_css) {
 832		blkcg = &blkcg_root;
 833		goto done;
 
 
 
 
 
 834	}
 835
 836	blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
 837	if (!blkcg)
 838		return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 839
 840	blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
 841	blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
 842	blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
 843done:
 844	spin_lock_init(&blkcg->lock);
 845	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
 
 846	INIT_HLIST_HEAD(&blkcg->blkg_list);
 
 
 
 
 847
 
 848	return &blkcg->css;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 849}
 850
 851/**
 852 * blkcg_init_queue - initialize blkcg part of request queue
 853 * @q: request_queue to initialize
 854 *
 855 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 856 * part of new request_queue @q.
 857 *
 858 * RETURNS:
 859 * 0 on success, -errno on failure.
 860 */
 861int blkcg_init_queue(struct request_queue *q)
 862{
 863	might_sleep();
 
 
 864
 865	return blk_throtl_init(q);
 866}
 
 867
 868/**
 869 * blkcg_drain_queue - drain blkcg part of request_queue
 870 * @q: request_queue to drain
 871 *
 872 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 873 */
 874void blkcg_drain_queue(struct request_queue *q)
 875{
 876	lockdep_assert_held(q->queue_lock);
 
 
 
 
 
 877
 878	blk_throtl_drain(q);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 879}
 880
 881/**
 882 * blkcg_exit_queue - exit and release blkcg part of request_queue
 883 * @q: request_queue being released
 884 *
 885 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 886 */
 887void blkcg_exit_queue(struct request_queue *q)
 888{
 889	spin_lock_irq(q->queue_lock);
 890	blkg_destroy_all(q);
 891	spin_unlock_irq(q->queue_lock);
 892
 893	blk_throtl_exit(q);
 894}
 895
 896/*
 897 * We cannot support shared io contexts, as we have no mean to support
 898 * two tasks with the same ioc in two different groups without major rework
 899 * of the main cic data structures.  For now we allow a task to change
 900 * its cgroup only if it's the only owner of its ioc.
 901 */
 902static int blkcg_can_attach(struct cgroup_subsys_state *css,
 903			    struct cgroup_taskset *tset)
 904{
 905	struct task_struct *task;
 906	struct io_context *ioc;
 907	int ret = 0;
 908
 909	/* task_lock() is needed to avoid races with exit_io_context() */
 910	cgroup_taskset_for_each(task, tset) {
 911		task_lock(task);
 912		ioc = task->io_context;
 913		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
 914			ret = -EINVAL;
 915		task_unlock(task);
 916		if (ret)
 917			break;
 918	}
 919	return ret;
 920}
 921
 922struct cgroup_subsys blkio_cgrp_subsys = {
 
 
 
 
 
 
 
 923	.css_alloc = blkcg_css_alloc,
 
 924	.css_offline = blkcg_css_offline,
 925	.css_free = blkcg_css_free,
 926	.can_attach = blkcg_can_attach,
 927	.base_cftypes = blkcg_files,
 
 
 
 
 
 
 
 
 
 
 
 
 928};
 929EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
 930
 931/**
 932 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 933 * @q: request_queue of interest
 934 * @pol: blkcg policy to activate
 935 *
 936 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 937 * bypass mode to populate its blkgs with policy_data for @pol.
 938 *
 939 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 940 * from IO path.  Update of each blkg is protected by both queue and blkcg
 941 * locks so that holding either lock and testing blkcg_policy_enabled() is
 942 * always enough for dereferencing policy data.
 943 *
 944 * The caller is responsible for synchronizing [de]activations and policy
 945 * [un]registerations.  Returns 0 on success, -errno on failure.
 946 */
 947int blkcg_activate_policy(struct request_queue *q,
 948			  const struct blkcg_policy *pol)
 949{
 950	LIST_HEAD(pds);
 951	struct blkcg_gq *blkg, *new_blkg;
 952	struct blkg_policy_data *pd, *n;
 953	int cnt = 0, ret;
 954	bool preloaded;
 955
 956	if (blkcg_policy_enabled(q, pol))
 957		return 0;
 958
 959	/* preallocations for root blkg */
 960	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
 961	if (!new_blkg)
 962		return -ENOMEM;
 963
 964	blk_queue_bypass_start(q);
 965
 966	preloaded = !radix_tree_preload(GFP_KERNEL);
 967
 968	/*
 969	 * Make sure the root blkg exists and count the existing blkgs.  As
 970	 * @q is bypassing at this point, blkg_lookup_create() can't be
 971	 * used.  Open code it.
 972	 */
 973	spin_lock_irq(q->queue_lock);
 974
 975	rcu_read_lock();
 976	blkg = __blkg_lookup(&blkcg_root, q, false);
 977	if (blkg)
 978		blkg_free(new_blkg);
 979	else
 980		blkg = blkg_create(&blkcg_root, q, new_blkg);
 981	rcu_read_unlock();
 982
 983	if (preloaded)
 984		radix_tree_preload_end();
 985
 986	if (IS_ERR(blkg)) {
 987		ret = PTR_ERR(blkg);
 988		goto out_unlock;
 989	}
 990
 991	list_for_each_entry(blkg, &q->blkg_list, q_node)
 992		cnt++;
 993
 994	spin_unlock_irq(q->queue_lock);
 
 995
 996	/* allocate policy_data for all existing blkgs */
 997	while (cnt--) {
 998		pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
 999		if (!pd) {
1000			ret = -ENOMEM;
1001			goto out_free;
 
1002		}
1003		list_add_tail(&pd->alloc_node, &pds);
1004	}
1005
1006	/*
1007	 * Install the allocated pds.  With @q bypassing, no new blkg
1008	 * should have been created while the queue lock was dropped.
1009	 */
1010	spin_lock_irq(q->queue_lock);
1011
1012	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1013		if (WARN_ON(list_empty(&pds))) {
1014			/* umm... this shouldn't happen, just abort */
1015			ret = -ENOMEM;
1016			goto out_unlock;
 
 
 
 
 
 
 
 
 
1017		}
1018		pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
1019		list_del_init(&pd->alloc_node);
1020
1021		/* grab blkcg lock too while installing @pd on @blkg */
1022		spin_lock(&blkg->blkcg->lock);
1023
1024		blkg->pd[pol->plid] = pd;
1025		pd->blkg = blkg;
1026		pd->plid = pol->plid;
1027		pol->pd_init_fn(blkg);
1028
1029		spin_unlock(&blkg->blkcg->lock);
1030	}
1031
 
 
 
 
 
1032	__set_bit(pol->plid, q->blkcg_pols);
1033	ret = 0;
1034out_unlock:
1035	spin_unlock_irq(q->queue_lock);
1036out_free:
1037	blk_queue_bypass_end(q);
1038	list_for_each_entry_safe(pd, n, &pds, alloc_node)
1039		kfree(pd);
 
 
 
1040	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1041}
1042EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1043
1044/**
1045 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1046 * @q: request_queue of interest
1047 * @pol: blkcg policy to deactivate
1048 *
1049 * Deactivate @pol on @q.  Follows the same synchronization rules as
1050 * blkcg_activate_policy().
1051 */
1052void blkcg_deactivate_policy(struct request_queue *q,
1053			     const struct blkcg_policy *pol)
1054{
1055	struct blkcg_gq *blkg;
1056
1057	if (!blkcg_policy_enabled(q, pol))
1058		return;
1059
1060	blk_queue_bypass_start(q);
1061	spin_lock_irq(q->queue_lock);
1062
1063	__clear_bit(pol->plid, q->blkcg_pols);
1064
1065	/* if no policy is left, no need for blkgs - shoot them down */
1066	if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
1067		blkg_destroy_all(q);
1068
1069	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1070		/* grab blkcg lock too while removing @pd from @blkg */
1071		spin_lock(&blkg->blkcg->lock);
1072
1073		if (pol->pd_offline_fn)
1074			pol->pd_offline_fn(blkg);
1075		if (pol->pd_exit_fn)
1076			pol->pd_exit_fn(blkg);
1077
1078		kfree(blkg->pd[pol->plid]);
1079		blkg->pd[pol->plid] = NULL;
1080
1081		spin_unlock(&blkg->blkcg->lock);
 
 
 
 
 
 
 
1082	}
1083
1084	spin_unlock_irq(q->queue_lock);
1085	blk_queue_bypass_end(q);
 
 
1086}
1087EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1088
1089/**
1090 * blkcg_policy_register - register a blkcg policy
1091 * @pol: blkcg policy to register
1092 *
1093 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1094 * successful registration.  Returns 0 on success and -errno on failure.
1095 */
1096int blkcg_policy_register(struct blkcg_policy *pol)
1097{
 
1098	int i, ret;
1099
1100	if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
1101		return -EINVAL;
1102
1103	mutex_lock(&blkcg_pol_mutex);
1104
1105	/* find an empty slot */
1106	ret = -ENOSPC;
1107	for (i = 0; i < BLKCG_MAX_POLS; i++)
1108		if (!blkcg_policy[i])
1109			break;
1110	if (i >= BLKCG_MAX_POLS)
1111		goto out_unlock;
 
 
 
 
 
 
 
1112
1113	/* register and update blkgs */
1114	pol->plid = i;
1115	blkcg_policy[i] = pol;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1116
1117	/* everything is in place, add intf files for the new policy */
1118	if (pol->cftypes)
1119		WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes));
1120	ret = 0;
1121out_unlock:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1122	mutex_unlock(&blkcg_pol_mutex);
 
1123	return ret;
1124}
1125EXPORT_SYMBOL_GPL(blkcg_policy_register);
1126
1127/**
1128 * blkcg_policy_unregister - unregister a blkcg policy
1129 * @pol: blkcg policy to unregister
1130 *
1131 * Undo blkcg_policy_register(@pol).  Might sleep.
1132 */
1133void blkcg_policy_unregister(struct blkcg_policy *pol)
1134{
1135	mutex_lock(&blkcg_pol_mutex);
 
 
1136
1137	if (WARN_ON(blkcg_policy[pol->plid] != pol))
1138		goto out_unlock;
1139
1140	/* kill the intf files first */
1141	if (pol->cftypes)
1142		cgroup_rm_cftypes(pol->cftypes);
 
 
 
 
 
1143
1144	/* unregister and update blkgs */
 
 
 
 
 
 
 
1145	blkcg_policy[pol->plid] = NULL;
1146out_unlock:
1147	mutex_unlock(&blkcg_pol_mutex);
 
 
1148}
1149EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Common Block IO controller cgroup interface
   4 *
   5 * Based on ideas and code from CFQ, CFS and BFQ:
   6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   7 *
   8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   9 *		      Paolo Valente <paolo.valente@unimore.it>
  10 *
  11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  12 * 	              Nauman Rafique <nauman@google.com>
  13 *
  14 * For policy-specific per-blkcg data:
  15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
  16 *                    Arianna Avanzini <avanzini.arianna@gmail.com>
  17 */
  18#include <linux/ioprio.h>
  19#include <linux/kdev_t.h>
  20#include <linux/module.h>
  21#include <linux/sched/signal.h>
  22#include <linux/err.h>
  23#include <linux/blkdev.h>
  24#include <linux/backing-dev.h>
  25#include <linux/slab.h>
  26#include <linux/genhd.h>
  27#include <linux/delay.h>
  28#include <linux/atomic.h>
  29#include <linux/ctype.h>
  30#include <linux/blk-cgroup.h>
  31#include <linux/tracehook.h>
  32#include <linux/psi.h>
  33#include "blk.h"
  34#include "blk-ioprio.h"
  35
  36/*
  37 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
  38 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
  39 * policy [un]register operations including cgroup file additions /
  40 * removals.  Putting cgroup file registration outside blkcg_pol_mutex
  41 * allows grabbing it from cgroup callbacks.
  42 */
  43static DEFINE_MUTEX(blkcg_pol_register_mutex);
  44static DEFINE_MUTEX(blkcg_pol_mutex);
  45
  46struct blkcg blkcg_root;
 
  47EXPORT_SYMBOL_GPL(blkcg_root);
  48
  49struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
  50EXPORT_SYMBOL_GPL(blkcg_root_css);
  51
  52static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  53
  54static LIST_HEAD(all_blkcgs);		/* protected by blkcg_pol_mutex */
  55
  56bool blkcg_debug_stats = false;
  57static struct workqueue_struct *blkcg_punt_bio_wq;
  58
  59#define BLKG_DESTROY_BATCH_SIZE  64
  60
  61static bool blkcg_policy_enabled(struct request_queue *q,
  62				 const struct blkcg_policy *pol)
  63{
  64	return pol && test_bit(pol->plid, q->blkcg_pols);
  65}
  66
  67/**
  68 * blkg_free - free a blkg
  69 * @blkg: blkg to free
  70 *
  71 * Free @blkg which may be partially allocated.
  72 */
  73static void blkg_free(struct blkcg_gq *blkg)
  74{
  75	int i;
  76
  77	if (!blkg)
  78		return;
  79
  80	for (i = 0; i < BLKCG_MAX_POLS; i++)
  81		if (blkg->pd[i])
  82			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
  83
  84	free_percpu(blkg->iostat_cpu);
  85	percpu_ref_exit(&blkg->refcnt);
  86	kfree(blkg);
  87}
  88
  89static void __blkg_release(struct rcu_head *rcu)
  90{
  91	struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
  92
  93	WARN_ON(!bio_list_empty(&blkg->async_bios));
  94
  95	/* release the blkcg and parent blkg refs this blkg has been holding */
  96	css_put(&blkg->blkcg->css);
  97	if (blkg->parent)
  98		blkg_put(blkg->parent);
  99	blkg_free(blkg);
 100}
 101
 102/*
 103 * A group is RCU protected, but having an rcu lock does not mean that one
 104 * can access all the fields of blkg and assume these are valid.  For
 105 * example, don't try to follow throtl_data and request queue links.
 106 *
 107 * Having a reference to blkg under an rcu allows accesses to only values
 108 * local to groups like group stats and group rate limits.
 109 */
 110static void blkg_release(struct percpu_ref *ref)
 111{
 112	struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
 113
 114	call_rcu(&blkg->rcu_head, __blkg_release);
 115}
 116
 117static void blkg_async_bio_workfn(struct work_struct *work)
 118{
 119	struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
 120					     async_bio_work);
 121	struct bio_list bios = BIO_EMPTY_LIST;
 122	struct bio *bio;
 123	struct blk_plug plug;
 124	bool need_plug = false;
 125
 126	/* as long as there are pending bios, @blkg can't go away */
 127	spin_lock_bh(&blkg->async_bio_lock);
 128	bio_list_merge(&bios, &blkg->async_bios);
 129	bio_list_init(&blkg->async_bios);
 130	spin_unlock_bh(&blkg->async_bio_lock);
 131
 132	/* start plug only when bio_list contains at least 2 bios */
 133	if (bios.head && bios.head->bi_next) {
 134		need_plug = true;
 135		blk_start_plug(&plug);
 136	}
 137	while ((bio = bio_list_pop(&bios)))
 138		submit_bio(bio);
 139	if (need_plug)
 140		blk_finish_plug(&plug);
 141}
 142
 143/**
 144 * blkg_alloc - allocate a blkg
 145 * @blkcg: block cgroup the new blkg is associated with
 146 * @q: request_queue the new blkg is associated with
 147 * @gfp_mask: allocation mask to use
 148 *
 149 * Allocate a new blkg assocating @blkcg and @q.
 150 */
 151static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
 152				   gfp_t gfp_mask)
 153{
 154	struct blkcg_gq *blkg;
 155	int i, cpu;
 156
 157	/* alloc and init base part */
 158	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
 159	if (!blkg)
 160		return NULL;
 161
 162	if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
 163		goto err_free;
 164
 165	blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask);
 166	if (!blkg->iostat_cpu)
 167		goto err_free;
 168
 169	blkg->q = q;
 170	INIT_LIST_HEAD(&blkg->q_node);
 171	spin_lock_init(&blkg->async_bio_lock);
 172	bio_list_init(&blkg->async_bios);
 173	INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
 174	blkg->blkcg = blkcg;
 
 175
 176	u64_stats_init(&blkg->iostat.sync);
 177	for_each_possible_cpu(cpu)
 178		u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync);
 
 
 
 179
 180	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 181		struct blkcg_policy *pol = blkcg_policy[i];
 182		struct blkg_policy_data *pd;
 183
 184		if (!blkcg_policy_enabled(q, pol))
 185			continue;
 186
 187		/* alloc per-policy data and attach it to blkg */
 188		pd = pol->pd_alloc_fn(gfp_mask, q, blkcg);
 189		if (!pd)
 190			goto err_free;
 191
 192		blkg->pd[i] = pd;
 193		pd->blkg = blkg;
 194		pd->plid = i;
 195	}
 196
 197	return blkg;
 198
 199err_free:
 200	blkg_free(blkg);
 201	return NULL;
 202}
 203
 204struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
 205				      struct request_queue *q, bool update_hint)
 
 
 
 
 
 
 
 
 
 
 
 206{
 207	struct blkcg_gq *blkg;
 208
 
 
 
 
 209	/*
 210	 * Hint didn't match.  Look up from the radix tree.  Note that the
 211	 * hint can only be updated under queue_lock as otherwise @blkg
 212	 * could have already been removed from blkg_tree.  The caller is
 213	 * responsible for grabbing queue_lock if @update_hint.
 214	 */
 215	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
 216	if (blkg && blkg->q == q) {
 217		if (update_hint) {
 218			lockdep_assert_held(&q->queue_lock);
 219			rcu_assign_pointer(blkcg->blkg_hint, blkg);
 220		}
 221		return blkg;
 222	}
 223
 224	return NULL;
 225}
 226EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 227
 228/*
 229 * If @new_blkg is %NULL, this function tries to allocate a new one as
 230 * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
 231 */
 232static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 233				    struct request_queue *q,
 234				    struct blkcg_gq *new_blkg)
 235{
 236	struct blkcg_gq *blkg;
 237	int i, ret;
 238
 239	WARN_ON_ONCE(!rcu_read_lock_held());
 240	lockdep_assert_held(&q->queue_lock);
 241
 242	/* request_queue is dying, do not create/recreate a blkg */
 243	if (blk_queue_dying(q)) {
 244		ret = -ENODEV;
 245		goto err_free_blkg;
 246	}
 247
 248	/* blkg holds a reference to blkcg */
 249	if (!css_tryget_online(&blkcg->css)) {
 250		ret = -ENODEV;
 251		goto err_free_blkg;
 252	}
 253
 254	/* allocate */
 255	if (!new_blkg) {
 256		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
 257		if (unlikely(!new_blkg)) {
 258			ret = -ENOMEM;
 259			goto err_put_css;
 260		}
 261	}
 262	blkg = new_blkg;
 263
 264	/* link parent */
 265	if (blkcg_parent(blkcg)) {
 266		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
 267		if (WARN_ON_ONCE(!blkg->parent)) {
 268			ret = -ENODEV;
 269			goto err_put_css;
 270		}
 271		blkg_get(blkg->parent);
 272	}
 273
 274	/* invoke per-policy init */
 275	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 276		struct blkcg_policy *pol = blkcg_policy[i];
 277
 278		if (blkg->pd[i] && pol->pd_init_fn)
 279			pol->pd_init_fn(blkg->pd[i]);
 280	}
 281
 282	/* insert */
 283	spin_lock(&blkcg->lock);
 284	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
 285	if (likely(!ret)) {
 286		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
 287		list_add(&blkg->q_node, &q->blkg_list);
 288
 289		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 290			struct blkcg_policy *pol = blkcg_policy[i];
 291
 292			if (blkg->pd[i] && pol->pd_online_fn)
 293				pol->pd_online_fn(blkg->pd[i]);
 294		}
 295	}
 296	blkg->online = true;
 297	spin_unlock(&blkcg->lock);
 298
 299	if (!ret)
 
 
 
 
 300		return blkg;
 
 301
 302	/* @blkg failed fully initialized, use the usual release path */
 303	blkg_put(blkg);
 304	return ERR_PTR(ret);
 305
 306err_put_css:
 307	css_put(&blkcg->css);
 308err_free_blkg:
 309	blkg_free(new_blkg);
 310	return ERR_PTR(ret);
 311}
 312
 313/**
 314 * blkg_lookup_create - lookup blkg, try to create one if not there
 315 * @blkcg: blkcg of interest
 316 * @q: request_queue of interest
 317 *
 318 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
 319 * create one.  blkg creation is performed recursively from blkcg_root such
 320 * that all non-root blkg's have access to the parent blkg.  This function
 321 * should be called under RCU read lock and takes @q->queue_lock.
 322 *
 323 * Returns the blkg or the closest blkg if blkg_create() fails as it walks
 324 * down from root.
 
 325 */
 326static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 327		struct request_queue *q)
 328{
 329	struct blkcg_gq *blkg;
 330	unsigned long flags;
 331
 332	WARN_ON_ONCE(!rcu_read_lock_held());
 
 333
 334	blkg = blkg_lookup(blkcg, q);
 335	if (blkg)
 336		return blkg;
 
 
 
 337
 338	spin_lock_irqsave(&q->queue_lock, flags);
 339	blkg = __blkg_lookup(blkcg, q, true);
 340	if (blkg)
 341		goto found;
 342
 343	/*
 344	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
 345	 * non-root blkgs have access to their parents.  Returns the closest
 346	 * blkg to the intended blkg should blkg_create() fail.
 347	 */
 348	while (true) {
 349		struct blkcg *pos = blkcg;
 350		struct blkcg *parent = blkcg_parent(blkcg);
 351		struct blkcg_gq *ret_blkg = q->root_blkg;
 352
 353		while (parent) {
 354			blkg = __blkg_lookup(parent, q, false);
 355			if (blkg) {
 356				/* remember closest blkg */
 357				ret_blkg = blkg;
 358				break;
 359			}
 360			pos = parent;
 361			parent = blkcg_parent(parent);
 362		}
 363
 364		blkg = blkg_create(pos, q, NULL);
 365		if (IS_ERR(blkg)) {
 366			blkg = ret_blkg;
 367			break;
 368		}
 369		if (pos == blkcg)
 370			break;
 371	}
 372
 373found:
 374	spin_unlock_irqrestore(&q->queue_lock, flags);
 375	return blkg;
 376}
 
 377
 378static void blkg_destroy(struct blkcg_gq *blkg)
 379{
 380	struct blkcg *blkcg = blkg->blkcg;
 381	int i;
 382
 383	lockdep_assert_held(&blkg->q->queue_lock);
 384	lockdep_assert_held(&blkcg->lock);
 385
 386	/* Something wrong if we are trying to remove same group twice */
 387	WARN_ON_ONCE(list_empty(&blkg->q_node));
 388	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 389
 390	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 391		struct blkcg_policy *pol = blkcg_policy[i];
 392
 393		if (blkg->pd[i] && pol->pd_offline_fn)
 394			pol->pd_offline_fn(blkg->pd[i]);
 395	}
 396
 397	blkg->online = false;
 398
 399	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
 400	list_del_init(&blkg->q_node);
 401	hlist_del_init_rcu(&blkg->blkcg_node);
 402
 403	/*
 404	 * Both setting lookup hint to and clearing it from @blkg are done
 405	 * under queue_lock.  If it's not pointing to @blkg now, it never
 406	 * will.  Hint assignment itself can race safely.
 407	 */
 408	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
 409		rcu_assign_pointer(blkcg->blkg_hint, NULL);
 410
 411	/*
 
 
 
 
 
 
 
 
 
 412	 * Put the reference taken at the time of creation so that when all
 413	 * queues are gone, group can be destroyed.
 414	 */
 415	percpu_ref_kill(&blkg->refcnt);
 416}
 417
 418/**
 419 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 420 * @q: request_queue of interest
 421 *
 422 * Destroy all blkgs associated with @q.
 423 */
 424static void blkg_destroy_all(struct request_queue *q)
 425{
 426	struct blkcg_gq *blkg, *n;
 427	int count = BLKG_DESTROY_BATCH_SIZE;
 428
 429restart:
 430	spin_lock_irq(&q->queue_lock);
 431	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 432		struct blkcg *blkcg = blkg->blkcg;
 433
 434		spin_lock(&blkcg->lock);
 435		blkg_destroy(blkg);
 436		spin_unlock(&blkcg->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437
 438		/*
 439		 * in order to avoid holding the spin lock for too long, release
 440		 * it when a batch of blkgs are destroyed.
 441		 */
 442		if (!(--count)) {
 443			count = BLKG_DESTROY_BATCH_SIZE;
 444			spin_unlock_irq(&q->queue_lock);
 445			cond_resched();
 446			goto restart;
 447		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 448	}
 449
 450	q->root_blkg = NULL;
 451	spin_unlock_irq(&q->queue_lock);
 
 
 
 
 
 
 
 452}
 453
 454static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 455			     struct cftype *cftype, u64 val)
 456{
 457	struct blkcg *blkcg = css_to_blkcg(css);
 458	struct blkcg_gq *blkg;
 459	int i, cpu;
 460
 461	mutex_lock(&blkcg_pol_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 462	spin_lock_irq(&blkcg->lock);
 463
 464	/*
 465	 * Note that stat reset is racy - it doesn't synchronize against
 466	 * stat updates.  This is a debug feature which shouldn't exist
 467	 * anyway.  If you get hit by a race, retry.
 468	 */
 469	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
 470		for_each_possible_cpu(cpu) {
 471			struct blkg_iostat_set *bis =
 472				per_cpu_ptr(blkg->iostat_cpu, cpu);
 473			memset(bis, 0, sizeof(*bis));
 474		}
 475		memset(&blkg->iostat, 0, sizeof(blkg->iostat));
 476
 477		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 478			struct blkcg_policy *pol = blkcg_policy[i];
 479
 480			if (blkg->pd[i] && pol->pd_reset_stats_fn)
 481				pol->pd_reset_stats_fn(blkg->pd[i]);
 
 482		}
 483	}
 484
 485	spin_unlock_irq(&blkcg->lock);
 486	mutex_unlock(&blkcg_pol_mutex);
 487	return 0;
 488}
 489
 490const char *blkg_dev_name(struct blkcg_gq *blkg)
 491{
 492	/* some drivers (floppy) instantiate a queue w/o disk registered */
 493	if (blkg->q->backing_dev_info->dev)
 494		return bdi_dev_name(blkg->q->backing_dev_info);
 495	return NULL;
 496}
 497
 498/**
 499 * blkcg_print_blkgs - helper for printing per-blkg data
 500 * @sf: seq_file to print to
 501 * @blkcg: blkcg of interest
 502 * @prfill: fill function to print out a blkg
 503 * @pol: policy in question
 504 * @data: data to be passed to @prfill
 505 * @show_total: to print out sum of prfill return values or not
 506 *
 507 * This function invokes @prfill on each blkg of @blkcg if pd for the
 508 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 509 * policy data and @data and the matching queue lock held.  If @show_total
 510 * is %true, the sum of the return values from @prfill is printed with
 511 * "Total" label at the end.
 512 *
 513 * This is to be used to construct print functions for
 514 * cftype->read_seq_string method.
 515 */
 516void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 517		       u64 (*prfill)(struct seq_file *,
 518				     struct blkg_policy_data *, int),
 519		       const struct blkcg_policy *pol, int data,
 520		       bool show_total)
 521{
 522	struct blkcg_gq *blkg;
 523	u64 total = 0;
 524
 525	rcu_read_lock();
 526	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 527		spin_lock_irq(&blkg->q->queue_lock);
 528		if (blkcg_policy_enabled(blkg->q, pol))
 529			total += prfill(sf, blkg->pd[pol->plid], data);
 530		spin_unlock_irq(&blkg->q->queue_lock);
 531	}
 532	rcu_read_unlock();
 533
 534	if (show_total)
 535		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
 536}
 537EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
 538
 539/**
 540 * __blkg_prfill_u64 - prfill helper for a single u64 value
 541 * @sf: seq_file to print to
 542 * @pd: policy private data of interest
 543 * @v: value to print
 544 *
 545 * Print @v to @sf for the device assocaited with @pd.
 546 */
 547u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
 548{
 549	const char *dname = blkg_dev_name(pd->blkg);
 550
 551	if (!dname)
 552		return 0;
 553
 554	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
 555	return v;
 556}
 557EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
 558
 559/* Performs queue bypass and policy enabled checks then looks up blkg. */
 560static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
 561					  const struct blkcg_policy *pol,
 562					  struct request_queue *q)
 
 
 
 
 
 
 563{
 564	WARN_ON_ONCE(!rcu_read_lock_held());
 565	lockdep_assert_held(&q->queue_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 566
 567	if (!blkcg_policy_enabled(q, pol))
 568		return ERR_PTR(-EOPNOTSUPP);
 569	return __blkg_lookup(blkcg, q, true /* update_hint */);
 570}
 
 571
 572/**
 573 * blkcg_conf_open_bdev - parse and open bdev for per-blkg config update
 574 * @inputp: input string pointer
 
 
 575 *
 576 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update
 577 * from @input and get and return the matching bdev.  *@inputp is
 578 * updated to point past the device node prefix.  Returns an ERR_PTR()
 579 * value on error.
 
 
 
 
 
 
 
 
 
 580 *
 581 * Use this function iff blkg_conf_prep() can't be used for some reason.
 582 */
 583struct block_device *blkcg_conf_open_bdev(char **inputp)
 
 584{
 585	char *input = *inputp;
 586	unsigned int major, minor;
 587	struct block_device *bdev;
 588	int key_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 589
 590	if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
 591		return ERR_PTR(-EINVAL);
 592
 593	input += key_len;
 594	if (!isspace(*input))
 595		return ERR_PTR(-EINVAL);
 596	input = skip_spaces(input);
 597
 598	bdev = blkdev_get_no_open(MKDEV(major, minor));
 599	if (!bdev)
 600		return ERR_PTR(-ENODEV);
 601	if (bdev_is_partition(bdev)) {
 602		blkdev_put_no_open(bdev);
 603		return ERR_PTR(-ENODEV);
 604	}
 
 605
 606	*inputp = input;
 607	return bdev;
 608}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 609
 610/**
 611 * blkg_conf_prep - parse and prepare for per-blkg config update
 612 * @blkcg: target block cgroup
 613 * @pol: target policy
 614 * @input: input string
 615 * @ctx: blkg_conf_ctx to be filled
 616 *
 617 * Parse per-blkg config update from @input and initialize @ctx with the
 618 * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
 619 * part of @input following MAJ:MIN.  This function returns with RCU read
 620 * lock and queue lock held and must be paired with blkg_conf_finish().
 621 */
 622int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 623		   char *input, struct blkg_conf_ctx *ctx)
 624	__acquires(rcu) __acquires(&bdev->bd_disk->queue->queue_lock)
 625{
 626	struct block_device *bdev;
 627	struct request_queue *q;
 628	struct blkcg_gq *blkg;
 629	int ret;
 
 
 630
 631	bdev = blkcg_conf_open_bdev(&input);
 632	if (IS_ERR(bdev))
 633		return PTR_ERR(bdev);
 634
 635	q = bdev->bd_disk->queue;
 
 
 636
 637	rcu_read_lock();
 638	spin_lock_irq(&q->queue_lock);
 
 
 
 
 
 639
 640	blkg = blkg_lookup_check(blkcg, pol, q);
 641	if (IS_ERR(blkg)) {
 642		ret = PTR_ERR(blkg);
 643		goto fail_unlock;
 644	}
 645
 646	if (blkg)
 647		goto success;
 648
 649	/*
 650	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
 651	 * non-root blkgs have access to their parents.
 652	 */
 653	while (true) {
 654		struct blkcg *pos = blkcg;
 655		struct blkcg *parent;
 656		struct blkcg_gq *new_blkg;
 657
 658		parent = blkcg_parent(blkcg);
 659		while (parent && !__blkg_lookup(parent, q, false)) {
 660			pos = parent;
 661			parent = blkcg_parent(parent);
 662		}
 663
 664		/* Drop locks to do new blkg allocation with GFP_KERNEL. */
 665		spin_unlock_irq(&q->queue_lock);
 666		rcu_read_unlock();
 667
 668		new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
 669		if (unlikely(!new_blkg)) {
 670			ret = -ENOMEM;
 671			goto fail;
 672		}
 673
 674		if (radix_tree_preload(GFP_KERNEL)) {
 675			blkg_free(new_blkg);
 676			ret = -ENOMEM;
 677			goto fail;
 678		}
 
 
 679
 680		rcu_read_lock();
 681		spin_lock_irq(&q->queue_lock);
 682
 683		blkg = blkg_lookup_check(pos, pol, q);
 684		if (IS_ERR(blkg)) {
 685			ret = PTR_ERR(blkg);
 686			blkg_free(new_blkg);
 687			goto fail_preloaded;
 688		}
 689
 690		if (blkg) {
 691			blkg_free(new_blkg);
 692		} else {
 693			blkg = blkg_create(pos, q, new_blkg);
 694			if (IS_ERR(blkg)) {
 695				ret = PTR_ERR(blkg);
 696				goto fail_preloaded;
 697			}
 698		}
 699
 700		radix_tree_preload_end();
 701
 702		if (pos == blkcg)
 703			goto success;
 704	}
 705success:
 706	ctx->bdev = bdev;
 707	ctx->blkg = blkg;
 708	ctx->body = input;
 709	return 0;
 710
 711fail_preloaded:
 712	radix_tree_preload_end();
 713fail_unlock:
 714	spin_unlock_irq(&q->queue_lock);
 715	rcu_read_unlock();
 716fail:
 717	blkdev_put_no_open(bdev);
 718	/*
 719	 * If queue was bypassing, we should retry.  Do so after a
 720	 * short msleep().  It isn't strictly necessary but queue
 721	 * can be bypassing for some time and it's always nice to
 722	 * avoid busy looping.
 723	 */
 724	if (ret == -EBUSY) {
 725		msleep(10);
 726		ret = restart_syscall();
 727	}
 728	return ret;
 729}
 730EXPORT_SYMBOL_GPL(blkg_conf_prep);
 731
 732/**
 733 * blkg_conf_finish - finish up per-blkg config update
 734 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 735 *
 736 * Finish up after per-blkg config update.  This function must be paired
 737 * with blkg_conf_prep().
 738 */
 739void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 740	__releases(&ctx->bdev->bd_disk->queue->queue_lock) __releases(rcu)
 741{
 742	spin_unlock_irq(&ctx->bdev->bd_disk->queue->queue_lock);
 743	rcu_read_unlock();
 744	blkdev_put_no_open(ctx->bdev);
 745}
 746EXPORT_SYMBOL_GPL(blkg_conf_finish);
 747
 748static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
 749{
 750	int i;
 751
 752	for (i = 0; i < BLKG_IOSTAT_NR; i++) {
 753		dst->bytes[i] = src->bytes[i];
 754		dst->ios[i] = src->ios[i];
 755	}
 756}
 757
 758static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
 759{
 760	int i;
 761
 762	for (i = 0; i < BLKG_IOSTAT_NR; i++) {
 763		dst->bytes[i] += src->bytes[i];
 764		dst->ios[i] += src->ios[i];
 765	}
 766}
 767
 768static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src)
 769{
 770	int i;
 771
 772	for (i = 0; i < BLKG_IOSTAT_NR; i++) {
 773		dst->bytes[i] -= src->bytes[i];
 774		dst->ios[i] -= src->ios[i];
 775	}
 776}
 777
 778static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
 779{
 780	struct blkcg *blkcg = css_to_blkcg(css);
 781	struct blkcg_gq *blkg;
 782
 783	/* Root-level stats are sourced from system-wide IO stats */
 784	if (!cgroup_parent(css->cgroup))
 785		return;
 786
 787	rcu_read_lock();
 788
 789	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 790		struct blkcg_gq *parent = blkg->parent;
 791		struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu);
 792		struct blkg_iostat cur, delta;
 793		unsigned long flags;
 794		unsigned int seq;
 795
 796		/* fetch the current per-cpu values */
 797		do {
 798			seq = u64_stats_fetch_begin(&bisc->sync);
 799			blkg_iostat_set(&cur, &bisc->cur);
 800		} while (u64_stats_fetch_retry(&bisc->sync, seq));
 801
 802		/* propagate percpu delta to global */
 803		flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
 804		blkg_iostat_set(&delta, &cur);
 805		blkg_iostat_sub(&delta, &bisc->last);
 806		blkg_iostat_add(&blkg->iostat.cur, &delta);
 807		blkg_iostat_add(&bisc->last, &delta);
 808		u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
 809
 810		/* propagate global delta to parent (unless that's root) */
 811		if (parent && parent->parent) {
 812			flags = u64_stats_update_begin_irqsave(&parent->iostat.sync);
 813			blkg_iostat_set(&delta, &blkg->iostat.cur);
 814			blkg_iostat_sub(&delta, &blkg->iostat.last);
 815			blkg_iostat_add(&parent->iostat.cur, &delta);
 816			blkg_iostat_add(&blkg->iostat.last, &delta);
 817			u64_stats_update_end_irqrestore(&parent->iostat.sync, flags);
 818		}
 819	}
 820
 821	rcu_read_unlock();
 822}
 823
 824/*
 825 * We source root cgroup stats from the system-wide stats to avoid
 826 * tracking the same information twice and incurring overhead when no
 827 * cgroups are defined. For that reason, cgroup_rstat_flush in
 828 * blkcg_print_stat does not actually fill out the iostat in the root
 829 * cgroup's blkcg_gq.
 830 *
 831 * However, we would like to re-use the printing code between the root and
 832 * non-root cgroups to the extent possible. For that reason, we simulate
 833 * flushing the root cgroup's stats by explicitly filling in the iostat
 834 * with disk level statistics.
 835 */
 836static void blkcg_fill_root_iostats(void)
 837{
 838	struct class_dev_iter iter;
 839	struct device *dev;
 840
 841	class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
 842	while ((dev = class_dev_iter_next(&iter))) {
 843		struct block_device *bdev = dev_to_bdev(dev);
 844		struct blkcg_gq *blkg =
 845			blk_queue_root_blkg(bdev->bd_disk->queue);
 846		struct blkg_iostat tmp;
 847		int cpu;
 848
 849		memset(&tmp, 0, sizeof(tmp));
 850		for_each_possible_cpu(cpu) {
 851			struct disk_stats *cpu_dkstats;
 852			unsigned long flags;
 853
 854			cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
 855			tmp.ios[BLKG_IOSTAT_READ] +=
 856				cpu_dkstats->ios[STAT_READ];
 857			tmp.ios[BLKG_IOSTAT_WRITE] +=
 858				cpu_dkstats->ios[STAT_WRITE];
 859			tmp.ios[BLKG_IOSTAT_DISCARD] +=
 860				cpu_dkstats->ios[STAT_DISCARD];
 861			// convert sectors to bytes
 862			tmp.bytes[BLKG_IOSTAT_READ] +=
 863				cpu_dkstats->sectors[STAT_READ] << 9;
 864			tmp.bytes[BLKG_IOSTAT_WRITE] +=
 865				cpu_dkstats->sectors[STAT_WRITE] << 9;
 866			tmp.bytes[BLKG_IOSTAT_DISCARD] +=
 867				cpu_dkstats->sectors[STAT_DISCARD] << 9;
 868
 869			flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
 870			blkg_iostat_set(&blkg->iostat.cur, &tmp);
 871			u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
 872		}
 873	}
 874}
 875
 876static int blkcg_print_stat(struct seq_file *sf, void *v)
 877{
 878	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
 879	struct blkcg_gq *blkg;
 880
 881	if (!seq_css(sf)->parent)
 882		blkcg_fill_root_iostats();
 883	else
 884		cgroup_rstat_flush(blkcg->css.cgroup);
 885
 886	rcu_read_lock();
 887
 888	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 889		struct blkg_iostat_set *bis = &blkg->iostat;
 890		const char *dname;
 891		char *buf;
 892		u64 rbytes, wbytes, rios, wios, dbytes, dios;
 893		size_t size = seq_get_buf(sf, &buf), off = 0;
 894		int i;
 895		bool has_stats = false;
 896		unsigned seq;
 897
 898		spin_lock_irq(&blkg->q->queue_lock);
 899
 900		if (!blkg->online)
 901			goto skip;
 902
 903		dname = blkg_dev_name(blkg);
 904		if (!dname)
 905			goto skip;
 906
 907		/*
 908		 * Hooray string manipulation, count is the size written NOT
 909		 * INCLUDING THE \0, so size is now count+1 less than what we
 910		 * had before, but we want to start writing the next bit from
 911		 * the \0 so we only add count to buf.
 912		 */
 913		off += scnprintf(buf+off, size-off, "%s ", dname);
 914
 915		do {
 916			seq = u64_stats_fetch_begin(&bis->sync);
 917
 918			rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
 919			wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
 920			dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
 921			rios = bis->cur.ios[BLKG_IOSTAT_READ];
 922			wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
 923			dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
 924		} while (u64_stats_fetch_retry(&bis->sync, seq));
 925
 926		if (rbytes || wbytes || rios || wios) {
 927			has_stats = true;
 928			off += scnprintf(buf+off, size-off,
 929					 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
 930					 rbytes, wbytes, rios, wios,
 931					 dbytes, dios);
 932		}
 933
 934		if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
 935			has_stats = true;
 936			off += scnprintf(buf+off, size-off,
 937					 " use_delay=%d delay_nsec=%llu",
 938					 atomic_read(&blkg->use_delay),
 939					(unsigned long long)atomic64_read(&blkg->delay_nsec));
 940		}
 941
 942		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 943			struct blkcg_policy *pol = blkcg_policy[i];
 944			size_t written;
 945
 946			if (!blkg->pd[i] || !pol->pd_stat_fn)
 947				continue;
 948
 949			written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
 950			if (written)
 951				has_stats = true;
 952			off += written;
 953		}
 954
 955		if (has_stats) {
 956			if (off < size - 1) {
 957				off += scnprintf(buf+off, size-off, "\n");
 958				seq_commit(sf, off);
 959			} else {
 960				seq_commit(sf, -1);
 961			}
 962		}
 963	skip:
 964		spin_unlock_irq(&blkg->q->queue_lock);
 965	}
 966
 967	rcu_read_unlock();
 968	return 0;
 969}
 970
 971static struct cftype blkcg_files[] = {
 972	{
 973		.name = "stat",
 974		.seq_show = blkcg_print_stat,
 975	},
 976	{ }	/* terminate */
 977};
 978
 979static struct cftype blkcg_legacy_files[] = {
 980	{
 981		.name = "reset_stats",
 982		.write_u64 = blkcg_reset_stats,
 983	},
 984	{ }	/* terminate */
 985};
 986
 987/*
 988 * blkcg destruction is a three-stage process.
 989 *
 990 * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
 991 *    which offlines writeback.  Here we tie the next stage of blkg destruction
 992 *    to the completion of writeback associated with the blkcg.  This lets us
 993 *    avoid punting potentially large amounts of outstanding writeback to root
 994 *    while maintaining any ongoing policies.  The next stage is triggered when
 995 *    the nr_cgwbs count goes to zero.
 996 *
 997 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
 998 *    and handles the destruction of blkgs.  Here the css reference held by
 999 *    the blkg is put back eventually allowing blkcg_css_free() to be called.
1000 *    This work may occur in cgwb_release_workfn() on the cgwb_release
1001 *    workqueue.  Any submitted ios that fail to get the blkg ref will be
1002 *    punted to the root_blkg.
1003 *
1004 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1005 *    This finally frees the blkcg.
1006 */
1007
1008/**
1009 * blkcg_css_offline - cgroup css_offline callback
1010 * @css: css of interest
1011 *
1012 * This function is called when @css is about to go away.  Here the cgwbs are
1013 * offlined first and only once writeback associated with the blkcg has
1014 * finished do we start step 2 (see above).
 
 
 
1015 */
1016static void blkcg_css_offline(struct cgroup_subsys_state *css)
1017{
1018	struct blkcg *blkcg = css_to_blkcg(css);
1019
1020	/* this prevents anyone from attaching or migrating to this blkcg */
1021	wb_blkcg_offline(blkcg);
1022
1023	/* put the base online pin allowing step 2 to be triggered */
1024	blkcg_unpin_online(blkcg);
1025}
1026
1027/**
1028 * blkcg_destroy_blkgs - responsible for shooting down blkgs
1029 * @blkcg: blkcg of interest
1030 *
1031 * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
1032 * is nested inside q lock, this function performs reverse double lock dancing.
1033 * Destroying the blkgs releases the reference held on the blkcg's css allowing
1034 * blkcg_css_free to eventually be called.
1035 *
1036 * This is the blkcg counterpart of ioc_release_fn().
1037 */
1038void blkcg_destroy_blkgs(struct blkcg *blkcg)
1039{
1040	might_sleep();
1041
1042	spin_lock_irq(&blkcg->lock);
1043
1044	while (!hlist_empty(&blkcg->blkg_list)) {
1045		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1046						struct blkcg_gq, blkcg_node);
1047		struct request_queue *q = blkg->q;
1048
1049		if (need_resched() || !spin_trylock(&q->queue_lock)) {
1050			/*
1051			 * Given that the system can accumulate a huge number
1052			 * of blkgs in pathological cases, check to see if we
1053			 * need to rescheduling to avoid softlockup.
1054			 */
1055			spin_unlock_irq(&blkcg->lock);
1056			cond_resched();
1057			spin_lock_irq(&blkcg->lock);
1058			continue;
1059		}
1060
1061		blkg_destroy(blkg);
1062		spin_unlock(&q->queue_lock);
1063	}
1064
1065	spin_unlock_irq(&blkcg->lock);
1066}
1067
1068static void blkcg_css_free(struct cgroup_subsys_state *css)
1069{
1070	struct blkcg *blkcg = css_to_blkcg(css);
1071	int i;
1072
1073	mutex_lock(&blkcg_pol_mutex);
1074
1075	list_del(&blkcg->all_blkcgs_node);
1076
1077	for (i = 0; i < BLKCG_MAX_POLS; i++)
1078		if (blkcg->cpd[i])
1079			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1080
1081	mutex_unlock(&blkcg_pol_mutex);
1082
1083	kfree(blkcg);
1084}
1085
1086static struct cgroup_subsys_state *
1087blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1088{
 
1089	struct blkcg *blkcg;
1090	struct cgroup_subsys_state *ret;
1091	int i;
1092
1093	mutex_lock(&blkcg_pol_mutex);
1094
1095	if (!parent_css) {
1096		blkcg = &blkcg_root;
1097	} else {
1098		blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1099		if (!blkcg) {
1100			ret = ERR_PTR(-ENOMEM);
1101			goto unlock;
1102		}
1103	}
1104
1105	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1106		struct blkcg_policy *pol = blkcg_policy[i];
1107		struct blkcg_policy_data *cpd;
1108
1109		/*
1110		 * If the policy hasn't been attached yet, wait for it
1111		 * to be attached before doing anything else. Otherwise,
1112		 * check if the policy requires any specific per-cgroup
1113		 * data: if it does, allocate and initialize it.
1114		 */
1115		if (!pol || !pol->cpd_alloc_fn)
1116			continue;
1117
1118		cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1119		if (!cpd) {
1120			ret = ERR_PTR(-ENOMEM);
1121			goto free_pd_blkcg;
1122		}
1123		blkcg->cpd[i] = cpd;
1124		cpd->blkcg = blkcg;
1125		cpd->plid = i;
1126		if (pol->cpd_init_fn)
1127			pol->cpd_init_fn(cpd);
1128	}
1129
 
 
 
 
1130	spin_lock_init(&blkcg->lock);
1131	refcount_set(&blkcg->online_pin, 1);
1132	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1133	INIT_HLIST_HEAD(&blkcg->blkg_list);
1134#ifdef CONFIG_CGROUP_WRITEBACK
1135	INIT_LIST_HEAD(&blkcg->cgwb_list);
1136#endif
1137	list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1138
1139	mutex_unlock(&blkcg_pol_mutex);
1140	return &blkcg->css;
1141
1142free_pd_blkcg:
1143	for (i--; i >= 0; i--)
1144		if (blkcg->cpd[i])
1145			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1146
1147	if (blkcg != &blkcg_root)
1148		kfree(blkcg);
1149unlock:
1150	mutex_unlock(&blkcg_pol_mutex);
1151	return ret;
1152}
1153
1154static int blkcg_css_online(struct cgroup_subsys_state *css)
1155{
1156	struct blkcg *blkcg = css_to_blkcg(css);
1157	struct blkcg *parent = blkcg_parent(blkcg);
1158
1159	/*
1160	 * blkcg_pin_online() is used to delay blkcg offline so that blkgs
1161	 * don't go offline while cgwbs are still active on them.  Pin the
1162	 * parent so that offline always happens towards the root.
1163	 */
1164	if (parent)
1165		blkcg_pin_online(parent);
1166	return 0;
1167}
1168
1169/**
1170 * blkcg_init_queue - initialize blkcg part of request queue
1171 * @q: request_queue to initialize
1172 *
1173 * Called from blk_alloc_queue(). Responsible for initializing blkcg
1174 * part of new request_queue @q.
1175 *
1176 * RETURNS:
1177 * 0 on success, -errno on failure.
1178 */
1179int blkcg_init_queue(struct request_queue *q)
1180{
1181	struct blkcg_gq *new_blkg, *blkg;
1182	bool preloaded;
1183	int ret;
1184
1185	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1186	if (!new_blkg)
1187		return -ENOMEM;
1188
1189	preloaded = !radix_tree_preload(GFP_KERNEL);
1190
1191	/* Make sure the root blkg exists. */
1192	rcu_read_lock();
1193	spin_lock_irq(&q->queue_lock);
1194	blkg = blkg_create(&blkcg_root, q, new_blkg);
1195	if (IS_ERR(blkg))
1196		goto err_unlock;
1197	q->root_blkg = blkg;
1198	spin_unlock_irq(&q->queue_lock);
1199	rcu_read_unlock();
1200
1201	if (preloaded)
1202		radix_tree_preload_end();
1203
1204	ret = blk_ioprio_init(q);
1205	if (ret)
1206		goto err_destroy_all;
1207
1208	ret = blk_throtl_init(q);
1209	if (ret)
1210		goto err_destroy_all;
1211
1212	ret = blk_iolatency_init(q);
1213	if (ret) {
1214		blk_throtl_exit(q);
1215		goto err_destroy_all;
1216	}
1217
1218	return 0;
1219
1220err_destroy_all:
1221	blkg_destroy_all(q);
1222	return ret;
1223err_unlock:
1224	spin_unlock_irq(&q->queue_lock);
1225	rcu_read_unlock();
1226	if (preloaded)
1227		radix_tree_preload_end();
1228	return PTR_ERR(blkg);
1229}
1230
1231/**
1232 * blkcg_exit_queue - exit and release blkcg part of request_queue
1233 * @q: request_queue being released
1234 *
1235 * Called from blk_exit_queue().  Responsible for exiting blkcg part.
1236 */
1237void blkcg_exit_queue(struct request_queue *q)
1238{
 
1239	blkg_destroy_all(q);
 
 
1240	blk_throtl_exit(q);
1241}
1242
1243static void blkcg_bind(struct cgroup_subsys_state *root_css)
1244{
1245	int i;
1246
1247	mutex_lock(&blkcg_pol_mutex);
1248
1249	for (i = 0; i < BLKCG_MAX_POLS; i++) {
1250		struct blkcg_policy *pol = blkcg_policy[i];
1251		struct blkcg *blkcg;
1252
1253		if (!pol || !pol->cpd_bind_fn)
1254			continue;
1255
1256		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1257			if (blkcg->cpd[pol->plid])
1258				pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
 
 
 
 
 
 
1259	}
1260	mutex_unlock(&blkcg_pol_mutex);
1261}
1262
1263static void blkcg_exit(struct task_struct *tsk)
1264{
1265	if (tsk->throttle_queue)
1266		blk_put_queue(tsk->throttle_queue);
1267	tsk->throttle_queue = NULL;
1268}
1269
1270struct cgroup_subsys io_cgrp_subsys = {
1271	.css_alloc = blkcg_css_alloc,
1272	.css_online = blkcg_css_online,
1273	.css_offline = blkcg_css_offline,
1274	.css_free = blkcg_css_free,
1275	.css_rstat_flush = blkcg_rstat_flush,
1276	.bind = blkcg_bind,
1277	.dfl_cftypes = blkcg_files,
1278	.legacy_cftypes = blkcg_legacy_files,
1279	.legacy_name = "blkio",
1280	.exit = blkcg_exit,
1281#ifdef CONFIG_MEMCG
1282	/*
1283	 * This ensures that, if available, memcg is automatically enabled
1284	 * together on the default hierarchy so that the owner cgroup can
1285	 * be retrieved from writeback pages.
1286	 */
1287	.depends_on = 1 << memory_cgrp_id,
1288#endif
1289};
1290EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1291
1292/**
1293 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1294 * @q: request_queue of interest
1295 * @pol: blkcg policy to activate
1296 *
1297 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1298 * bypass mode to populate its blkgs with policy_data for @pol.
1299 *
1300 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1301 * from IO path.  Update of each blkg is protected by both queue and blkcg
1302 * locks so that holding either lock and testing blkcg_policy_enabled() is
1303 * always enough for dereferencing policy data.
1304 *
1305 * The caller is responsible for synchronizing [de]activations and policy
1306 * [un]registerations.  Returns 0 on success, -errno on failure.
1307 */
1308int blkcg_activate_policy(struct request_queue *q,
1309			  const struct blkcg_policy *pol)
1310{
1311	struct blkg_policy_data *pd_prealloc = NULL;
1312	struct blkcg_gq *blkg, *pinned_blkg = NULL;
1313	int ret;
 
 
1314
1315	if (blkcg_policy_enabled(q, pol))
1316		return 0;
1317
1318	if (queue_is_mq(q))
1319		blk_mq_freeze_queue(q);
1320retry:
1321	spin_lock_irq(&q->queue_lock);
 
 
 
 
 
 
 
 
 
 
 
1322
1323	/* blkg_list is pushed at the head, reverse walk to allocate parents first */
1324	list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
1325		struct blkg_policy_data *pd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1326
1327		if (blkg->pd[pol->plid])
1328			continue;
1329
1330		/* If prealloc matches, use it; otherwise try GFP_NOWAIT */
1331		if (blkg == pinned_blkg) {
1332			pd = pd_prealloc;
1333			pd_prealloc = NULL;
1334		} else {
1335			pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q,
1336					      blkg->blkcg);
1337		}
 
 
1338
1339		if (!pd) {
1340			/*
1341			 * GFP_NOWAIT failed.  Free the existing one and
1342			 * prealloc for @blkg w/ GFP_KERNEL.
1343			 */
1344			if (pinned_blkg)
1345				blkg_put(pinned_blkg);
1346			blkg_get(blkg);
1347			pinned_blkg = blkg;
1348
1349			spin_unlock_irq(&q->queue_lock);
1350
1351			if (pd_prealloc)
1352				pol->pd_free_fn(pd_prealloc);
1353			pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q,
1354						       blkg->blkcg);
1355			if (pd_prealloc)
1356				goto retry;
1357			else
1358				goto enomem;
1359		}
 
 
 
 
 
1360
1361		blkg->pd[pol->plid] = pd;
1362		pd->blkg = blkg;
1363		pd->plid = pol->plid;
 
 
 
1364	}
1365
1366	/* all allocated, init in the same order */
1367	if (pol->pd_init_fn)
1368		list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
1369			pol->pd_init_fn(blkg->pd[pol->plid]);
1370
1371	__set_bit(pol->plid, q->blkcg_pols);
1372	ret = 0;
1373
1374	spin_unlock_irq(&q->queue_lock);
1375out:
1376	if (queue_is_mq(q))
1377		blk_mq_unfreeze_queue(q);
1378	if (pinned_blkg)
1379		blkg_put(pinned_blkg);
1380	if (pd_prealloc)
1381		pol->pd_free_fn(pd_prealloc);
1382	return ret;
1383
1384enomem:
1385	/* alloc failed, nothing's initialized yet, free everything */
1386	spin_lock_irq(&q->queue_lock);
1387	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1388		struct blkcg *blkcg = blkg->blkcg;
1389
1390		spin_lock(&blkcg->lock);
1391		if (blkg->pd[pol->plid]) {
1392			pol->pd_free_fn(blkg->pd[pol->plid]);
1393			blkg->pd[pol->plid] = NULL;
1394		}
1395		spin_unlock(&blkcg->lock);
1396	}
1397	spin_unlock_irq(&q->queue_lock);
1398	ret = -ENOMEM;
1399	goto out;
1400}
1401EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1402
1403/**
1404 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1405 * @q: request_queue of interest
1406 * @pol: blkcg policy to deactivate
1407 *
1408 * Deactivate @pol on @q.  Follows the same synchronization rules as
1409 * blkcg_activate_policy().
1410 */
1411void blkcg_deactivate_policy(struct request_queue *q,
1412			     const struct blkcg_policy *pol)
1413{
1414	struct blkcg_gq *blkg;
1415
1416	if (!blkcg_policy_enabled(q, pol))
1417		return;
1418
1419	if (queue_is_mq(q))
1420		blk_mq_freeze_queue(q);
1421
1422	spin_lock_irq(&q->queue_lock);
1423
1424	__clear_bit(pol->plid, q->blkcg_pols);
 
 
1425
1426	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1427		struct blkcg *blkcg = blkg->blkcg;
 
 
 
 
 
 
 
 
 
1428
1429		spin_lock(&blkcg->lock);
1430		if (blkg->pd[pol->plid]) {
1431			if (pol->pd_offline_fn)
1432				pol->pd_offline_fn(blkg->pd[pol->plid]);
1433			pol->pd_free_fn(blkg->pd[pol->plid]);
1434			blkg->pd[pol->plid] = NULL;
1435		}
1436		spin_unlock(&blkcg->lock);
1437	}
1438
1439	spin_unlock_irq(&q->queue_lock);
1440
1441	if (queue_is_mq(q))
1442		blk_mq_unfreeze_queue(q);
1443}
1444EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1445
1446/**
1447 * blkcg_policy_register - register a blkcg policy
1448 * @pol: blkcg policy to register
1449 *
1450 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1451 * successful registration.  Returns 0 on success and -errno on failure.
1452 */
1453int blkcg_policy_register(struct blkcg_policy *pol)
1454{
1455	struct blkcg *blkcg;
1456	int i, ret;
1457
1458	mutex_lock(&blkcg_pol_register_mutex);
 
 
1459	mutex_lock(&blkcg_pol_mutex);
1460
1461	/* find an empty slot */
1462	ret = -ENOSPC;
1463	for (i = 0; i < BLKCG_MAX_POLS; i++)
1464		if (!blkcg_policy[i])
1465			break;
1466	if (i >= BLKCG_MAX_POLS) {
1467		pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1468		goto err_unlock;
1469	}
1470
1471	/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1472	if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1473		(!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1474		goto err_unlock;
1475
1476	/* register @pol */
1477	pol->plid = i;
1478	blkcg_policy[pol->plid] = pol;
1479
1480	/* allocate and install cpd's */
1481	if (pol->cpd_alloc_fn) {
1482		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1483			struct blkcg_policy_data *cpd;
1484
1485			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1486			if (!cpd)
1487				goto err_free_cpds;
1488
1489			blkcg->cpd[pol->plid] = cpd;
1490			cpd->blkcg = blkcg;
1491			cpd->plid = pol->plid;
1492			if (pol->cpd_init_fn)
1493				pol->cpd_init_fn(cpd);
1494		}
1495	}
1496
1497	mutex_unlock(&blkcg_pol_mutex);
1498
1499	/* everything is in place, add intf files for the new policy */
1500	if (pol->dfl_cftypes)
1501		WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1502					       pol->dfl_cftypes));
1503	if (pol->legacy_cftypes)
1504		WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1505						  pol->legacy_cftypes));
1506	mutex_unlock(&blkcg_pol_register_mutex);
1507	return 0;
1508
1509err_free_cpds:
1510	if (pol->cpd_free_fn) {
1511		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1512			if (blkcg->cpd[pol->plid]) {
1513				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1514				blkcg->cpd[pol->plid] = NULL;
1515			}
1516		}
1517	}
1518	blkcg_policy[pol->plid] = NULL;
1519err_unlock:
1520	mutex_unlock(&blkcg_pol_mutex);
1521	mutex_unlock(&blkcg_pol_register_mutex);
1522	return ret;
1523}
1524EXPORT_SYMBOL_GPL(blkcg_policy_register);
1525
1526/**
1527 * blkcg_policy_unregister - unregister a blkcg policy
1528 * @pol: blkcg policy to unregister
1529 *
1530 * Undo blkcg_policy_register(@pol).  Might sleep.
1531 */
1532void blkcg_policy_unregister(struct blkcg_policy *pol)
1533{
1534	struct blkcg *blkcg;
1535
1536	mutex_lock(&blkcg_pol_register_mutex);
1537
1538	if (WARN_ON(blkcg_policy[pol->plid] != pol))
1539		goto out_unlock;
1540
1541	/* kill the intf files first */
1542	if (pol->dfl_cftypes)
1543		cgroup_rm_cftypes(pol->dfl_cftypes);
1544	if (pol->legacy_cftypes)
1545		cgroup_rm_cftypes(pol->legacy_cftypes);
1546
1547	/* remove cpds and unregister */
1548	mutex_lock(&blkcg_pol_mutex);
1549
1550	if (pol->cpd_free_fn) {
1551		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1552			if (blkcg->cpd[pol->plid]) {
1553				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1554				blkcg->cpd[pol->plid] = NULL;
1555			}
1556		}
1557	}
1558	blkcg_policy[pol->plid] = NULL;
1559
1560	mutex_unlock(&blkcg_pol_mutex);
1561out_unlock:
1562	mutex_unlock(&blkcg_pol_register_mutex);
1563}
1564EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1565
1566bool __blkcg_punt_bio_submit(struct bio *bio)
1567{
1568	struct blkcg_gq *blkg = bio->bi_blkg;
1569
1570	/* consume the flag first */
1571	bio->bi_opf &= ~REQ_CGROUP_PUNT;
1572
1573	/* never bounce for the root cgroup */
1574	if (!blkg->parent)
1575		return false;
1576
1577	spin_lock_bh(&blkg->async_bio_lock);
1578	bio_list_add(&blkg->async_bios, bio);
1579	spin_unlock_bh(&blkg->async_bio_lock);
1580
1581	queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
1582	return true;
1583}
1584
1585/*
1586 * Scale the accumulated delay based on how long it has been since we updated
1587 * the delay.  We only call this when we are adding delay, in case it's been a
1588 * while since we added delay, and when we are checking to see if we need to
1589 * delay a task, to account for any delays that may have occurred.
1590 */
1591static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1592{
1593	u64 old = atomic64_read(&blkg->delay_start);
1594
1595	/* negative use_delay means no scaling, see blkcg_set_delay() */
1596	if (atomic_read(&blkg->use_delay) < 0)
1597		return;
1598
1599	/*
1600	 * We only want to scale down every second.  The idea here is that we
1601	 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1602	 * time window.  We only want to throttle tasks for recent delay that
1603	 * has occurred, in 1 second time windows since that's the maximum
1604	 * things can be throttled.  We save the current delay window in
1605	 * blkg->last_delay so we know what amount is still left to be charged
1606	 * to the blkg from this point onward.  blkg->last_use keeps track of
1607	 * the use_delay counter.  The idea is if we're unthrottling the blkg we
1608	 * are ok with whatever is happening now, and we can take away more of
1609	 * the accumulated delay as we've already throttled enough that
1610	 * everybody is happy with their IO latencies.
1611	 */
1612	if (time_before64(old + NSEC_PER_SEC, now) &&
1613	    atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1614		u64 cur = atomic64_read(&blkg->delay_nsec);
1615		u64 sub = min_t(u64, blkg->last_delay, now - old);
1616		int cur_use = atomic_read(&blkg->use_delay);
1617
1618		/*
1619		 * We've been unthrottled, subtract a larger chunk of our
1620		 * accumulated delay.
1621		 */
1622		if (cur_use < blkg->last_use)
1623			sub = max_t(u64, sub, blkg->last_delay >> 1);
1624
1625		/*
1626		 * This shouldn't happen, but handle it anyway.  Our delay_nsec
1627		 * should only ever be growing except here where we subtract out
1628		 * min(last_delay, 1 second), but lord knows bugs happen and I'd
1629		 * rather not end up with negative numbers.
1630		 */
1631		if (unlikely(cur < sub)) {
1632			atomic64_set(&blkg->delay_nsec, 0);
1633			blkg->last_delay = 0;
1634		} else {
1635			atomic64_sub(sub, &blkg->delay_nsec);
1636			blkg->last_delay = cur - sub;
1637		}
1638		blkg->last_use = cur_use;
1639	}
1640}
1641
1642/*
1643 * This is called when we want to actually walk up the hierarchy and check to
1644 * see if we need to throttle, and then actually throttle if there is some
1645 * accumulated delay.  This should only be called upon return to user space so
1646 * we're not holding some lock that would induce a priority inversion.
1647 */
1648static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1649{
1650	unsigned long pflags;
1651	bool clamp;
1652	u64 now = ktime_to_ns(ktime_get());
1653	u64 exp;
1654	u64 delay_nsec = 0;
1655	int tok;
1656
1657	while (blkg->parent) {
1658		int use_delay = atomic_read(&blkg->use_delay);
1659
1660		if (use_delay) {
1661			u64 this_delay;
1662
1663			blkcg_scale_delay(blkg, now);
1664			this_delay = atomic64_read(&blkg->delay_nsec);
1665			if (this_delay > delay_nsec) {
1666				delay_nsec = this_delay;
1667				clamp = use_delay > 0;
1668			}
1669		}
1670		blkg = blkg->parent;
1671	}
1672
1673	if (!delay_nsec)
1674		return;
1675
1676	/*
1677	 * Let's not sleep for all eternity if we've amassed a huge delay.
1678	 * Swapping or metadata IO can accumulate 10's of seconds worth of
1679	 * delay, and we want userspace to be able to do _something_ so cap the
1680	 * delays at 0.25s. If there's 10's of seconds worth of delay then the
1681	 * tasks will be delayed for 0.25 second for every syscall. If
1682	 * blkcg_set_delay() was used as indicated by negative use_delay, the
1683	 * caller is responsible for regulating the range.
1684	 */
1685	if (clamp)
1686		delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1687
1688	if (use_memdelay)
1689		psi_memstall_enter(&pflags);
1690
1691	exp = ktime_add_ns(now, delay_nsec);
1692	tok = io_schedule_prepare();
1693	do {
1694		__set_current_state(TASK_KILLABLE);
1695		if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1696			break;
1697	} while (!fatal_signal_pending(current));
1698	io_schedule_finish(tok);
1699
1700	if (use_memdelay)
1701		psi_memstall_leave(&pflags);
1702}
1703
1704/**
1705 * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1706 *
1707 * This is only called if we've been marked with set_notify_resume().  Obviously
1708 * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1709 * check to see if current->throttle_queue is set and if not this doesn't do
1710 * anything.  This should only ever be called by the resume code, it's not meant
1711 * to be called by people willy-nilly as it will actually do the work to
1712 * throttle the task if it is setup for throttling.
1713 */
1714void blkcg_maybe_throttle_current(void)
1715{
1716	struct request_queue *q = current->throttle_queue;
1717	struct cgroup_subsys_state *css;
1718	struct blkcg *blkcg;
1719	struct blkcg_gq *blkg;
1720	bool use_memdelay = current->use_memdelay;
1721
1722	if (!q)
1723		return;
1724
1725	current->throttle_queue = NULL;
1726	current->use_memdelay = false;
1727
1728	rcu_read_lock();
1729	css = kthread_blkcg();
1730	if (css)
1731		blkcg = css_to_blkcg(css);
1732	else
1733		blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1734
1735	if (!blkcg)
1736		goto out;
1737	blkg = blkg_lookup(blkcg, q);
1738	if (!blkg)
1739		goto out;
1740	if (!blkg_tryget(blkg))
1741		goto out;
1742	rcu_read_unlock();
1743
1744	blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1745	blkg_put(blkg);
1746	blk_put_queue(q);
1747	return;
1748out:
1749	rcu_read_unlock();
1750	blk_put_queue(q);
1751}
1752
1753/**
1754 * blkcg_schedule_throttle - this task needs to check for throttling
1755 * @q: the request queue IO was submitted on
1756 * @use_memdelay: do we charge this to memory delay for PSI
1757 *
1758 * This is called by the IO controller when we know there's delay accumulated
1759 * for the blkg for this task.  We do not pass the blkg because there are places
1760 * we call this that may not have that information, the swapping code for
1761 * instance will only have a request_queue at that point.  This set's the
1762 * notify_resume for the task to check and see if it requires throttling before
1763 * returning to user space.
1764 *
1765 * We will only schedule once per syscall.  You can call this over and over
1766 * again and it will only do the check once upon return to user space, and only
1767 * throttle once.  If the task needs to be throttled again it'll need to be
1768 * re-set at the next time we see the task.
1769 */
1770void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1771{
1772	if (unlikely(current->flags & PF_KTHREAD))
1773		return;
1774
1775	if (current->throttle_queue != q) {
1776		if (!blk_get_queue(q))
1777			return;
1778
1779		if (current->throttle_queue)
1780			blk_put_queue(current->throttle_queue);
1781		current->throttle_queue = q;
1782	}
1783
1784	if (use_memdelay)
1785		current->use_memdelay = use_memdelay;
1786	set_notify_resume(current);
1787}
1788
1789/**
1790 * blkcg_add_delay - add delay to this blkg
1791 * @blkg: blkg of interest
1792 * @now: the current time in nanoseconds
1793 * @delta: how many nanoseconds of delay to add
1794 *
1795 * Charge @delta to the blkg's current delay accumulation.  This is used to
1796 * throttle tasks if an IO controller thinks we need more throttling.
1797 */
1798void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1799{
1800	if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
1801		return;
1802	blkcg_scale_delay(blkg, now);
1803	atomic64_add(delta, &blkg->delay_nsec);
1804}
1805
1806/**
1807 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
1808 * @bio: target bio
1809 * @css: target css
1810 *
1811 * As the failure mode here is to walk up the blkg tree, this ensure that the
1812 * blkg->parent pointers are always valid.  This returns the blkg that it ended
1813 * up taking a reference on or %NULL if no reference was taken.
1814 */
1815static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
1816		struct cgroup_subsys_state *css)
1817{
1818	struct blkcg_gq *blkg, *ret_blkg = NULL;
1819
1820	rcu_read_lock();
1821	blkg = blkg_lookup_create(css_to_blkcg(css),
1822				  bio->bi_bdev->bd_disk->queue);
1823	while (blkg) {
1824		if (blkg_tryget(blkg)) {
1825			ret_blkg = blkg;
1826			break;
1827		}
1828		blkg = blkg->parent;
1829	}
1830	rcu_read_unlock();
1831
1832	return ret_blkg;
1833}
1834
1835/**
1836 * bio_associate_blkg_from_css - associate a bio with a specified css
1837 * @bio: target bio
1838 * @css: target css
1839 *
1840 * Associate @bio with the blkg found by combining the css's blkg and the
1841 * request_queue of the @bio.  An association failure is handled by walking up
1842 * the blkg tree.  Therefore, the blkg associated can be anything between @blkg
1843 * and q->root_blkg.  This situation only happens when a cgroup is dying and
1844 * then the remaining bios will spill to the closest alive blkg.
1845 *
1846 * A reference will be taken on the blkg and will be released when @bio is
1847 * freed.
1848 */
1849void bio_associate_blkg_from_css(struct bio *bio,
1850				 struct cgroup_subsys_state *css)
1851{
1852	if (bio->bi_blkg)
1853		blkg_put(bio->bi_blkg);
1854
1855	if (css && css->parent) {
1856		bio->bi_blkg = blkg_tryget_closest(bio, css);
1857	} else {
1858		blkg_get(bio->bi_bdev->bd_disk->queue->root_blkg);
1859		bio->bi_blkg = bio->bi_bdev->bd_disk->queue->root_blkg;
1860	}
1861}
1862EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
1863
1864/**
1865 * bio_associate_blkg - associate a bio with a blkg
1866 * @bio: target bio
1867 *
1868 * Associate @bio with the blkg found from the bio's css and request_queue.
1869 * If one is not found, bio_lookup_blkg() creates the blkg.  If a blkg is
1870 * already associated, the css is reused and association redone as the
1871 * request_queue may have changed.
1872 */
1873void bio_associate_blkg(struct bio *bio)
1874{
1875	struct cgroup_subsys_state *css;
1876
1877	rcu_read_lock();
1878
1879	if (bio->bi_blkg)
1880		css = &bio_blkcg(bio)->css;
1881	else
1882		css = blkcg_css();
1883
1884	bio_associate_blkg_from_css(bio, css);
1885
1886	rcu_read_unlock();
1887}
1888EXPORT_SYMBOL_GPL(bio_associate_blkg);
1889
1890/**
1891 * bio_clone_blkg_association - clone blkg association from src to dst bio
1892 * @dst: destination bio
1893 * @src: source bio
1894 */
1895void bio_clone_blkg_association(struct bio *dst, struct bio *src)
1896{
1897	if (src->bi_blkg) {
1898		if (dst->bi_blkg)
1899			blkg_put(dst->bi_blkg);
1900		blkg_get(src->bi_blkg);
1901		dst->bi_blkg = src->bi_blkg;
1902	}
1903}
1904EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
1905
1906static int blk_cgroup_io_type(struct bio *bio)
1907{
1908	if (op_is_discard(bio->bi_opf))
1909		return BLKG_IOSTAT_DISCARD;
1910	if (op_is_write(bio->bi_opf))
1911		return BLKG_IOSTAT_WRITE;
1912	return BLKG_IOSTAT_READ;
1913}
1914
1915void blk_cgroup_bio_start(struct bio *bio)
1916{
1917	int rwd = blk_cgroup_io_type(bio), cpu;
1918	struct blkg_iostat_set *bis;
1919	unsigned long flags;
1920
1921	cpu = get_cpu();
1922	bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
1923	flags = u64_stats_update_begin_irqsave(&bis->sync);
1924
1925	/*
1926	 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
1927	 * bio and we would have already accounted for the size of the bio.
1928	 */
1929	if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
1930		bio_set_flag(bio, BIO_CGROUP_ACCT);
1931		bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
1932	}
1933	bis->cur.ios[rwd]++;
1934
1935	u64_stats_update_end_irqrestore(&bis->sync, flags);
1936	if (cgroup_subsys_on_dfl(io_cgrp_subsys))
1937		cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu);
1938	put_cpu();
1939}
1940
1941static int __init blkcg_init(void)
1942{
1943	blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
1944					    WQ_MEM_RECLAIM | WQ_FREEZABLE |
1945					    WQ_UNBOUND | WQ_SYSFS, 0);
1946	if (!blkcg_punt_bio_wq)
1947		return -ENOMEM;
1948	return 0;
1949}
1950subsys_initcall(blkcg_init);
1951
1952module_param(blkcg_debug_stats, bool, 0644);
1953MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");