Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Common Block IO controller cgroup interface
   4 *
   5 * Based on ideas and code from CFQ, CFS and BFQ:
   6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   7 *
   8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   9 *		      Paolo Valente <paolo.valente@unimore.it>
  10 *
  11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  12 * 	              Nauman Rafique <nauman@google.com>
  13 *
  14 * For policy-specific per-blkcg data:
  15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
  16 *                    Arianna Avanzini <avanzini.arianna@gmail.com>
  17 */
  18#include <linux/ioprio.h>
  19#include <linux/kdev_t.h>
  20#include <linux/module.h>
  21#include <linux/sched/signal.h>
  22#include <linux/err.h>
  23#include <linux/blkdev.h>
  24#include <linux/backing-dev.h>
  25#include <linux/slab.h>
 
  26#include <linux/delay.h>
  27#include <linux/atomic.h>
  28#include <linux/ctype.h>
  29#include <linux/resume_user_mode.h>
  30#include <linux/psi.h>
  31#include <linux/part_stat.h>
  32#include "blk.h"
  33#include "blk-cgroup.h"
  34#include "blk-ioprio.h"
  35#include "blk-throttle.h"
  36#include "blk-rq-qos.h"
  37
  38/*
  39 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
  40 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
  41 * policy [un]register operations including cgroup file additions /
  42 * removals.  Putting cgroup file registration outside blkcg_pol_mutex
  43 * allows grabbing it from cgroup callbacks.
  44 */
  45static DEFINE_MUTEX(blkcg_pol_register_mutex);
  46static DEFINE_MUTEX(blkcg_pol_mutex);
  47
  48struct blkcg blkcg_root;
  49EXPORT_SYMBOL_GPL(blkcg_root);
  50
  51struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
  52EXPORT_SYMBOL_GPL(blkcg_root_css);
  53
  54static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  55
  56static LIST_HEAD(all_blkcgs);		/* protected by blkcg_pol_mutex */
  57
  58bool blkcg_debug_stats = false;
  59static struct workqueue_struct *blkcg_punt_bio_wq;
  60
  61#define BLKG_DESTROY_BATCH_SIZE  64
  62
  63/*
  64 * Lockless lists for tracking IO stats update
  65 *
  66 * New IO stats are stored in the percpu iostat_cpu within blkcg_gq (blkg).
  67 * There are multiple blkg's (one for each block device) attached to each
  68 * blkcg. The rstat code keeps track of which cpu has IO stats updated,
  69 * but it doesn't know which blkg has the updated stats. If there are many
  70 * block devices in a system, the cost of iterating all the blkg's to flush
  71 * out the IO stats can be high. To reduce such overhead, a set of percpu
  72 * lockless lists (lhead) per blkcg are used to track the set of recently
  73 * updated iostat_cpu's since the last flush. An iostat_cpu will be put
  74 * onto the lockless list on the update side [blk_cgroup_bio_start()] if
  75 * not there yet and then removed when being flushed [blkcg_rstat_flush()].
  76 * References to blkg are gotten and then put back in the process to
  77 * protect against blkg removal.
  78 *
  79 * Return: 0 if successful or -ENOMEM if allocation fails.
  80 */
  81static int init_blkcg_llists(struct blkcg *blkcg)
  82{
  83	int cpu;
  84
  85	blkcg->lhead = alloc_percpu_gfp(struct llist_head, GFP_KERNEL);
  86	if (!blkcg->lhead)
  87		return -ENOMEM;
  88
  89	for_each_possible_cpu(cpu)
  90		init_llist_head(per_cpu_ptr(blkcg->lhead, cpu));
  91	return 0;
  92}
  93
  94/**
  95 * blkcg_css - find the current css
  96 *
  97 * Find the css associated with either the kthread or the current task.
  98 * This may return a dying css, so it is up to the caller to use tryget logic
  99 * to confirm it is alive and well.
 100 */
 101static struct cgroup_subsys_state *blkcg_css(void)
 102{
 103	struct cgroup_subsys_state *css;
 104
 105	css = kthread_blkcg();
 106	if (css)
 107		return css;
 108	return task_css(current, io_cgrp_id);
 109}
 110
 111static bool blkcg_policy_enabled(struct request_queue *q,
 112				 const struct blkcg_policy *pol)
 113{
 114	return pol && test_bit(pol->plid, q->blkcg_pols);
 115}
 116
 117static void blkg_free_workfn(struct work_struct *work)
 118{
 119	struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
 120					     free_work);
 121	int i;
 122
 123	for (i = 0; i < BLKCG_MAX_POLS; i++)
 124		if (blkg->pd[i])
 125			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 126
 127	if (blkg->q)
 128		blk_put_queue(blkg->q);
 129	free_percpu(blkg->iostat_cpu);
 130	percpu_ref_exit(&blkg->refcnt);
 131	kfree(blkg);
 132}
 133
 134/**
 135 * blkg_free - free a blkg
 136 * @blkg: blkg to free
 137 *
 138 * Free @blkg which may be partially allocated.
 139 */
 140static void blkg_free(struct blkcg_gq *blkg)
 141{
 
 
 142	if (!blkg)
 143		return;
 144
 145	/*
 146	 * Both ->pd_free_fn() and request queue's release handler may
 147	 * sleep, so free us by scheduling one work func
 148	 */
 149	INIT_WORK(&blkg->free_work, blkg_free_workfn);
 150	schedule_work(&blkg->free_work);
 151}
 152
 153static void __blkg_release(struct rcu_head *rcu)
 154{
 155	struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
 156
 157	WARN_ON(!bio_list_empty(&blkg->async_bios));
 158
 159	/* release the blkcg and parent blkg refs this blkg has been holding */
 160	css_put(&blkg->blkcg->css);
 161	if (blkg->parent)
 162		blkg_put(blkg->parent);
 163	blkg_free(blkg);
 164}
 165
 166/*
 167 * A group is RCU protected, but having an rcu lock does not mean that one
 168 * can access all the fields of blkg and assume these are valid.  For
 169 * example, don't try to follow throtl_data and request queue links.
 170 *
 171 * Having a reference to blkg under an rcu allows accesses to only values
 172 * local to groups like group stats and group rate limits.
 173 */
 174static void blkg_release(struct percpu_ref *ref)
 175{
 176	struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
 177
 178	call_rcu(&blkg->rcu_head, __blkg_release);
 179}
 180
 181static void blkg_async_bio_workfn(struct work_struct *work)
 182{
 183	struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
 184					     async_bio_work);
 185	struct bio_list bios = BIO_EMPTY_LIST;
 186	struct bio *bio;
 187	struct blk_plug plug;
 188	bool need_plug = false;
 189
 190	/* as long as there are pending bios, @blkg can't go away */
 191	spin_lock_bh(&blkg->async_bio_lock);
 192	bio_list_merge(&bios, &blkg->async_bios);
 193	bio_list_init(&blkg->async_bios);
 194	spin_unlock_bh(&blkg->async_bio_lock);
 195
 196	/* start plug only when bio_list contains at least 2 bios */
 197	if (bios.head && bios.head->bi_next) {
 198		need_plug = true;
 199		blk_start_plug(&plug);
 200	}
 201	while ((bio = bio_list_pop(&bios)))
 202		submit_bio(bio);
 203	if (need_plug)
 204		blk_finish_plug(&plug);
 205}
 206
 207/**
 208 * bio_blkcg_css - return the blkcg CSS associated with a bio
 209 * @bio: target bio
 210 *
 211 * This returns the CSS for the blkcg associated with a bio, or %NULL if not
 212 * associated. Callers are expected to either handle %NULL or know association
 213 * has been done prior to calling this.
 214 */
 215struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
 216{
 217	if (!bio || !bio->bi_blkg)
 218		return NULL;
 219	return &bio->bi_blkg->blkcg->css;
 220}
 221EXPORT_SYMBOL_GPL(bio_blkcg_css);
 222
 223/**
 224 * blkcg_parent - get the parent of a blkcg
 225 * @blkcg: blkcg of interest
 226 *
 227 * Return the parent blkcg of @blkcg.  Can be called anytime.
 228 */
 229static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
 230{
 231	return css_to_blkcg(blkcg->css.parent);
 232}
 233
 234/**
 235 * blkg_alloc - allocate a blkg
 236 * @blkcg: block cgroup the new blkg is associated with
 237 * @disk: gendisk the new blkg is associated with
 238 * @gfp_mask: allocation mask to use
 239 *
 240 * Allocate a new blkg assocating @blkcg and @q.
 241 */
 242static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
 243				   gfp_t gfp_mask)
 244{
 245	struct blkcg_gq *blkg;
 246	int i, cpu;
 247
 248	/* alloc and init base part */
 249	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node);
 250	if (!blkg)
 251		return NULL;
 252
 253	if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
 254		goto err_free;
 255
 256	blkg->iostat_cpu = alloc_percpu_gfp(struct blkg_iostat_set, gfp_mask);
 257	if (!blkg->iostat_cpu)
 258		goto err_free;
 259
 260	if (!blk_get_queue(disk->queue))
 261		goto err_free;
 262
 263	blkg->q = disk->queue;
 264	INIT_LIST_HEAD(&blkg->q_node);
 265	spin_lock_init(&blkg->async_bio_lock);
 266	bio_list_init(&blkg->async_bios);
 267	INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
 268	blkg->blkcg = blkcg;
 
 269
 270	u64_stats_init(&blkg->iostat.sync);
 271	for_each_possible_cpu(cpu) {
 272		u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync);
 273		per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg;
 
 274	}
 275
 276	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 277		struct blkcg_policy *pol = blkcg_policy[i];
 278		struct blkg_policy_data *pd;
 279
 280		if (!blkcg_policy_enabled(disk->queue, pol))
 281			continue;
 282
 283		/* alloc per-policy data and attach it to blkg */
 284		pd = pol->pd_alloc_fn(gfp_mask, disk->queue, blkcg);
 285		if (!pd)
 286			goto err_free;
 287
 288		blkg->pd[i] = pd;
 289		pd->blkg = blkg;
 290		pd->plid = i;
 291	}
 292
 293	return blkg;
 294
 295err_free:
 296	blkg_free(blkg);
 297	return NULL;
 298}
 299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300/*
 301 * If @new_blkg is %NULL, this function tries to allocate a new one as
 302 * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
 303 */
 304static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
 
 305				    struct blkcg_gq *new_blkg)
 306{
 307	struct blkcg_gq *blkg;
 
 308	int i, ret;
 309
 310	lockdep_assert_held(&disk->queue->queue_lock);
 311
 312	/* request_queue is dying, do not create/recreate a blkg */
 313	if (blk_queue_dying(disk->queue)) {
 314		ret = -ENODEV;
 315		goto err_free_blkg;
 316	}
 317
 318	/* blkg holds a reference to blkcg */
 319	if (!css_tryget_online(&blkcg->css)) {
 320		ret = -ENODEV;
 321		goto err_free_blkg;
 322	}
 323
 
 
 
 
 
 
 
 324	/* allocate */
 325	if (!new_blkg) {
 326		new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN);
 327		if (unlikely(!new_blkg)) {
 328			ret = -ENOMEM;
 329			goto err_put_css;
 330		}
 331	}
 332	blkg = new_blkg;
 
 333
 334	/* link parent */
 335	if (blkcg_parent(blkcg)) {
 336		blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue);
 337		if (WARN_ON_ONCE(!blkg->parent)) {
 338			ret = -ENODEV;
 339			goto err_put_css;
 340		}
 341		blkg_get(blkg->parent);
 342	}
 343
 344	/* invoke per-policy init */
 345	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 346		struct blkcg_policy *pol = blkcg_policy[i];
 347
 348		if (blkg->pd[i] && pol->pd_init_fn)
 349			pol->pd_init_fn(blkg->pd[i]);
 350	}
 351
 352	/* insert */
 353	spin_lock(&blkcg->lock);
 354	ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg);
 355	if (likely(!ret)) {
 356		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
 357		list_add(&blkg->q_node, &disk->queue->blkg_list);
 358
 359		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 360			struct blkcg_policy *pol = blkcg_policy[i];
 361
 362			if (blkg->pd[i] && pol->pd_online_fn)
 363				pol->pd_online_fn(blkg->pd[i]);
 364		}
 365	}
 366	blkg->online = true;
 367	spin_unlock(&blkcg->lock);
 368
 369	if (!ret)
 370		return blkg;
 371
 372	/* @blkg failed fully initialized, use the usual release path */
 373	blkg_put(blkg);
 374	return ERR_PTR(ret);
 375
 
 
 376err_put_css:
 377	css_put(&blkcg->css);
 378err_free_blkg:
 379	blkg_free(new_blkg);
 380	return ERR_PTR(ret);
 381}
 382
 383/**
 384 * blkg_lookup_create - lookup blkg, try to create one if not there
 385 * @blkcg: blkcg of interest
 386 * @disk: gendisk of interest
 387 *
 388 * Lookup blkg for the @blkcg - @disk pair.  If it doesn't exist, try to
 389 * create one.  blkg creation is performed recursively from blkcg_root such
 390 * that all non-root blkg's have access to the parent blkg.  This function
 391 * should be called under RCU read lock and takes @disk->queue->queue_lock.
 392 *
 393 * Returns the blkg or the closest blkg if blkg_create() fails as it walks
 394 * down from root.
 
 395 */
 396static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 397		struct gendisk *disk)
 398{
 399	struct request_queue *q = disk->queue;
 400	struct blkcg_gq *blkg;
 401	unsigned long flags;
 402
 403	WARN_ON_ONCE(!rcu_read_lock_held());
 
 
 
 
 
 
 
 
 404
 405	blkg = blkg_lookup(blkcg, q);
 406	if (blkg)
 407		return blkg;
 408
 409	spin_lock_irqsave(&q->queue_lock, flags);
 410	blkg = blkg_lookup(blkcg, q);
 411	if (blkg) {
 412		if (blkcg != &blkcg_root &&
 413		    blkg != rcu_dereference(blkcg->blkg_hint))
 414			rcu_assign_pointer(blkcg->blkg_hint, blkg);
 415		goto found;
 416	}
 417
 418	/*
 419	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
 420	 * non-root blkgs have access to their parents.  Returns the closest
 421	 * blkg to the intended blkg should blkg_create() fail.
 422	 */
 423	while (true) {
 424		struct blkcg *pos = blkcg;
 425		struct blkcg *parent = blkcg_parent(blkcg);
 426		struct blkcg_gq *ret_blkg = q->root_blkg;
 427
 428		while (parent) {
 429			blkg = blkg_lookup(parent, q);
 430			if (blkg) {
 431				/* remember closest blkg */
 432				ret_blkg = blkg;
 433				break;
 434			}
 435			pos = parent;
 436			parent = blkcg_parent(parent);
 437		}
 438
 439		blkg = blkg_create(pos, disk, NULL);
 440		if (IS_ERR(blkg)) {
 441			blkg = ret_blkg;
 442			break;
 443		}
 444		if (pos == blkcg)
 445			break;
 446	}
 447
 448found:
 449	spin_unlock_irqrestore(&q->queue_lock, flags);
 450	return blkg;
 451}
 452
 453static void blkg_destroy(struct blkcg_gq *blkg)
 454{
 455	struct blkcg *blkcg = blkg->blkcg;
 
 456	int i;
 457
 458	lockdep_assert_held(&blkg->q->queue_lock);
 459	lockdep_assert_held(&blkcg->lock);
 460
 461	/* Something wrong if we are trying to remove same group twice */
 462	WARN_ON_ONCE(list_empty(&blkg->q_node));
 463	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 464
 465	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 466		struct blkcg_policy *pol = blkcg_policy[i];
 467
 468		if (blkg->pd[i] && pol->pd_offline_fn)
 469			pol->pd_offline_fn(blkg->pd[i]);
 470	}
 471
 
 
 
 
 
 472	blkg->online = false;
 473
 474	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
 475	list_del_init(&blkg->q_node);
 476	hlist_del_init_rcu(&blkg->blkcg_node);
 477
 478	/*
 479	 * Both setting lookup hint to and clearing it from @blkg are done
 480	 * under queue_lock.  If it's not pointing to @blkg now, it never
 481	 * will.  Hint assignment itself can race safely.
 482	 */
 483	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
 484		rcu_assign_pointer(blkcg->blkg_hint, NULL);
 485
 486	/*
 487	 * Put the reference taken at the time of creation so that when all
 488	 * queues are gone, group can be destroyed.
 489	 */
 490	percpu_ref_kill(&blkg->refcnt);
 491}
 492
 493static void blkg_destroy_all(struct gendisk *disk)
 
 
 
 
 
 
 494{
 495	struct request_queue *q = disk->queue;
 496	struct blkcg_gq *blkg, *n;
 497	int count = BLKG_DESTROY_BATCH_SIZE;
 498
 499restart:
 500	spin_lock_irq(&q->queue_lock);
 501	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 502		struct blkcg *blkcg = blkg->blkcg;
 503
 504		spin_lock(&blkcg->lock);
 505		blkg_destroy(blkg);
 506		spin_unlock(&blkcg->lock);
 507
 508		/*
 509		 * in order to avoid holding the spin lock for too long, release
 510		 * it when a batch of blkgs are destroyed.
 511		 */
 512		if (!(--count)) {
 513			count = BLKG_DESTROY_BATCH_SIZE;
 514			spin_unlock_irq(&q->queue_lock);
 515			cond_resched();
 516			goto restart;
 517		}
 518	}
 519
 520	q->root_blkg = NULL;
 521	spin_unlock_irq(&q->queue_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 522}
 523
 524static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 525			     struct cftype *cftype, u64 val)
 526{
 527	struct blkcg *blkcg = css_to_blkcg(css);
 528	struct blkcg_gq *blkg;
 529	int i, cpu;
 530
 531	mutex_lock(&blkcg_pol_mutex);
 532	spin_lock_irq(&blkcg->lock);
 533
 534	/*
 535	 * Note that stat reset is racy - it doesn't synchronize against
 536	 * stat updates.  This is a debug feature which shouldn't exist
 537	 * anyway.  If you get hit by a race, retry.
 538	 */
 539	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
 540		for_each_possible_cpu(cpu) {
 541			struct blkg_iostat_set *bis =
 542				per_cpu_ptr(blkg->iostat_cpu, cpu);
 543			memset(bis, 0, sizeof(*bis));
 544		}
 545		memset(&blkg->iostat, 0, sizeof(blkg->iostat));
 546
 547		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 548			struct blkcg_policy *pol = blkcg_policy[i];
 549
 550			if (blkg->pd[i] && pol->pd_reset_stats_fn)
 551				pol->pd_reset_stats_fn(blkg->pd[i]);
 552		}
 553	}
 554
 555	spin_unlock_irq(&blkcg->lock);
 556	mutex_unlock(&blkcg_pol_mutex);
 557	return 0;
 558}
 559
 560const char *blkg_dev_name(struct blkcg_gq *blkg)
 561{
 562	if (!blkg->q->disk || !blkg->q->disk->bdi->dev)
 563		return NULL;
 564	return bdi_dev_name(blkg->q->disk->bdi);
 
 565}
 
 566
 567/**
 568 * blkcg_print_blkgs - helper for printing per-blkg data
 569 * @sf: seq_file to print to
 570 * @blkcg: blkcg of interest
 571 * @prfill: fill function to print out a blkg
 572 * @pol: policy in question
 573 * @data: data to be passed to @prfill
 574 * @show_total: to print out sum of prfill return values or not
 575 *
 576 * This function invokes @prfill on each blkg of @blkcg if pd for the
 577 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 578 * policy data and @data and the matching queue lock held.  If @show_total
 579 * is %true, the sum of the return values from @prfill is printed with
 580 * "Total" label at the end.
 581 *
 582 * This is to be used to construct print functions for
 583 * cftype->read_seq_string method.
 584 */
 585void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 586		       u64 (*prfill)(struct seq_file *,
 587				     struct blkg_policy_data *, int),
 588		       const struct blkcg_policy *pol, int data,
 589		       bool show_total)
 590{
 591	struct blkcg_gq *blkg;
 592	u64 total = 0;
 593
 594	rcu_read_lock();
 595	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 596		spin_lock_irq(&blkg->q->queue_lock);
 597		if (blkcg_policy_enabled(blkg->q, pol))
 598			total += prfill(sf, blkg->pd[pol->plid], data);
 599		spin_unlock_irq(&blkg->q->queue_lock);
 600	}
 601	rcu_read_unlock();
 602
 603	if (show_total)
 604		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
 605}
 606EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
 607
 608/**
 609 * __blkg_prfill_u64 - prfill helper for a single u64 value
 610 * @sf: seq_file to print to
 611 * @pd: policy private data of interest
 612 * @v: value to print
 613 *
 614 * Print @v to @sf for the device associated with @pd.
 615 */
 616u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
 617{
 618	const char *dname = blkg_dev_name(pd->blkg);
 619
 620	if (!dname)
 621		return 0;
 622
 623	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
 624	return v;
 625}
 626EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
 627
 628/**
 629 * blkcg_conf_open_bdev - parse and open bdev for per-blkg config update
 630 * @inputp: input string pointer
 631 *
 632 * Parse the device node prefix part, MAJ:MIN, of per-blkg config update
 633 * from @input and get and return the matching bdev.  *@inputp is
 634 * updated to point past the device node prefix.  Returns an ERR_PTR()
 635 * value on error.
 636 *
 637 * Use this function iff blkg_conf_prep() can't be used for some reason.
 638 */
 639struct block_device *blkcg_conf_open_bdev(char **inputp)
 
 640{
 641	char *input = *inputp;
 642	unsigned int major, minor;
 643	struct block_device *bdev;
 644	int key_len;
 645
 646	if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
 647		return ERR_PTR(-EINVAL);
 
 
 648
 649	input += key_len;
 650	if (!isspace(*input))
 651		return ERR_PTR(-EINVAL);
 652	input = skip_spaces(input);
 653
 654	bdev = blkdev_get_no_open(MKDEV(major, minor));
 655	if (!bdev)
 656		return ERR_PTR(-ENODEV);
 657	if (bdev_is_partition(bdev)) {
 658		blkdev_put_no_open(bdev);
 659		return ERR_PTR(-ENODEV);
 660	}
 661
 662	*inputp = input;
 663	return bdev;
 
 
 
 
 
 
 664}
 
 665
 666/**
 667 * blkg_conf_prep - parse and prepare for per-blkg config update
 668 * @blkcg: target block cgroup
 669 * @pol: target policy
 670 * @input: input string
 671 * @ctx: blkg_conf_ctx to be filled
 672 *
 673 * Parse per-blkg config update from @input and initialize @ctx with the
 674 * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
 675 * part of @input following MAJ:MIN.  This function returns with RCU read
 676 * lock and queue lock held and must be paired with blkg_conf_finish().
 677 */
 678int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 679		   char *input, struct blkg_conf_ctx *ctx)
 680	__acquires(rcu) __acquires(&bdev->bd_queue->queue_lock)
 681{
 682	struct block_device *bdev;
 683	struct gendisk *disk;
 684	struct request_queue *q;
 685	struct blkcg_gq *blkg;
 686	int ret;
 687
 688	bdev = blkcg_conf_open_bdev(&input);
 689	if (IS_ERR(bdev))
 690		return PTR_ERR(bdev);
 691	disk = bdev->bd_disk;
 692	q = disk->queue;
 693
 694	/*
 695	 * blkcg_deactivate_policy() requires queue to be frozen, we can grab
 696	 * q_usage_counter to prevent concurrent with blkcg_deactivate_policy().
 697	 */
 698	ret = blk_queue_enter(q, 0);
 699	if (ret)
 700		goto fail;
 701
 702	rcu_read_lock();
 703	spin_lock_irq(&q->queue_lock);
 704
 705	if (!blkcg_policy_enabled(q, pol)) {
 706		ret = -EOPNOTSUPP;
 707		goto fail_unlock;
 708	}
 709
 710	blkg = blkg_lookup(blkcg, q);
 711	if (blkg)
 712		goto success;
 713
 714	/*
 715	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
 716	 * non-root blkgs have access to their parents.
 717	 */
 718	while (true) {
 719		struct blkcg *pos = blkcg;
 720		struct blkcg *parent;
 721		struct blkcg_gq *new_blkg;
 722
 723		parent = blkcg_parent(blkcg);
 724		while (parent && !blkg_lookup(parent, q)) {
 725			pos = parent;
 726			parent = blkcg_parent(parent);
 727		}
 728
 729		/* Drop locks to do new blkg allocation with GFP_KERNEL. */
 730		spin_unlock_irq(&q->queue_lock);
 731		rcu_read_unlock();
 732
 733		new_blkg = blkg_alloc(pos, disk, GFP_KERNEL);
 734		if (unlikely(!new_blkg)) {
 735			ret = -ENOMEM;
 736			goto fail_exit_queue;
 737		}
 738
 739		if (radix_tree_preload(GFP_KERNEL)) {
 740			blkg_free(new_blkg);
 741			ret = -ENOMEM;
 742			goto fail_exit_queue;
 743		}
 744
 745		rcu_read_lock();
 746		spin_lock_irq(&q->queue_lock);
 747
 748		if (!blkcg_policy_enabled(q, pol)) {
 749			blkg_free(new_blkg);
 750			ret = -EOPNOTSUPP;
 751			goto fail_preloaded;
 752		}
 753
 754		blkg = blkg_lookup(pos, q);
 755		if (blkg) {
 756			blkg_free(new_blkg);
 757		} else {
 758			blkg = blkg_create(pos, disk, new_blkg);
 759			if (IS_ERR(blkg)) {
 760				ret = PTR_ERR(blkg);
 761				goto fail_preloaded;
 762			}
 763		}
 
 
 764
 765		radix_tree_preload_end();
 
 
 766
 767		if (pos == blkcg)
 768			goto success;
 769	}
 770success:
 771	blk_queue_exit(q);
 772	ctx->bdev = bdev;
 773	ctx->blkg = blkg;
 774	ctx->body = input;
 775	return 0;
 776
 777fail_preloaded:
 778	radix_tree_preload_end();
 779fail_unlock:
 780	spin_unlock_irq(&q->queue_lock);
 781	rcu_read_unlock();
 782fail_exit_queue:
 783	blk_queue_exit(q);
 784fail:
 785	blkdev_put_no_open(bdev);
 786	/*
 787	 * If queue was bypassing, we should retry.  Do so after a
 788	 * short msleep().  It isn't strictly necessary but queue
 789	 * can be bypassing for some time and it's always nice to
 790	 * avoid busy looping.
 791	 */
 792	if (ret == -EBUSY) {
 793		msleep(10);
 794		ret = restart_syscall();
 795	}
 796	return ret;
 797}
 798EXPORT_SYMBOL_GPL(blkg_conf_prep);
 799
 800/**
 801 * blkg_conf_finish - finish up per-blkg config update
 802 * @ctx: blkg_conf_ctx initialized by blkg_conf_prep()
 
 803 *
 804 * Finish up after per-blkg config update.  This function must be paired
 805 * with blkg_conf_prep().
 806 */
 807void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 808	__releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu)
 809{
 810	spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock);
 811	rcu_read_unlock();
 812	blkdev_put_no_open(ctx->bdev);
 
 813}
 814EXPORT_SYMBOL_GPL(blkg_conf_finish);
 815
 816static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
 
 
 
 
 
 
 
 
 817{
 818	int i;
 819
 820	for (i = 0; i < BLKG_IOSTAT_NR; i++) {
 821		dst->bytes[i] = src->bytes[i];
 822		dst->ios[i] = src->ios[i];
 823	}
 824}
 
 825
 826static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
 
 
 827{
 828	int i;
 829
 830	for (i = 0; i < BLKG_IOSTAT_NR; i++) {
 831		dst->bytes[i] += src->bytes[i];
 832		dst->ios[i] += src->ios[i];
 833	}
 834}
 835
 836static void blkg_iostat_sub(struct blkg_iostat *dst, struct blkg_iostat *src)
 
 
 
 
 
 837{
 838	int i;
 839
 840	for (i = 0; i < BLKG_IOSTAT_NR; i++) {
 841		dst->bytes[i] -= src->bytes[i];
 842		dst->ios[i] -= src->ios[i];
 843	}
 844}
 
 845
 846static void blkcg_iostat_update(struct blkcg_gq *blkg, struct blkg_iostat *cur,
 847				struct blkg_iostat *last)
 
 
 
 
 848{
 849	struct blkg_iostat delta;
 850	unsigned long flags;
 851
 852	/* propagate percpu delta to global */
 853	flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
 854	blkg_iostat_set(&delta, cur);
 855	blkg_iostat_sub(&delta, last);
 856	blkg_iostat_add(&blkg->iostat.cur, &delta);
 857	blkg_iostat_add(last, &delta);
 858	u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
 859}
 
 860
 861static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 862{
 863	struct blkcg *blkcg = css_to_blkcg(css);
 864	struct llist_head *lhead = per_cpu_ptr(blkcg->lhead, cpu);
 865	struct llist_node *lnode;
 866	struct blkg_iostat_set *bisc, *next_bisc;
 867
 868	/* Root-level stats are sourced from system-wide IO stats */
 869	if (!cgroup_parent(css->cgroup))
 870		return;
 871
 872	rcu_read_lock();
 
 
 873
 874	lnode = llist_del_all(lhead);
 875	if (!lnode)
 876		goto out;
 877
 878	/*
 879	 * Iterate only the iostat_cpu's queued in the lockless list.
 880	 */
 881	llist_for_each_entry_safe(bisc, next_bisc, lnode, lnode) {
 882		struct blkcg_gq *blkg = bisc->blkg;
 883		struct blkcg_gq *parent = blkg->parent;
 884		struct blkg_iostat cur;
 885		unsigned int seq;
 886
 887		WRITE_ONCE(bisc->lqueued, false);
 888
 889		/* fetch the current per-cpu values */
 890		do {
 891			seq = u64_stats_fetch_begin(&bisc->sync);
 892			blkg_iostat_set(&cur, &bisc->cur);
 893		} while (u64_stats_fetch_retry(&bisc->sync, seq));
 894
 895		blkcg_iostat_update(blkg, &cur, &bisc->last);
 896
 897		/* propagate global delta to parent (unless that's root) */
 898		if (parent && parent->parent)
 899			blkcg_iostat_update(parent, &blkg->iostat.cur,
 900					    &blkg->iostat.last);
 901		percpu_ref_put(&blkg->refcnt);
 902	}
 903
 904out:
 
 905	rcu_read_unlock();
 
 
 906}
 
 907
 908/*
 909 * We source root cgroup stats from the system-wide stats to avoid
 910 * tracking the same information twice and incurring overhead when no
 911 * cgroups are defined. For that reason, cgroup_rstat_flush in
 912 * blkcg_print_stat does not actually fill out the iostat in the root
 913 * cgroup's blkcg_gq.
 914 *
 915 * However, we would like to re-use the printing code between the root and
 916 * non-root cgroups to the extent possible. For that reason, we simulate
 917 * flushing the root cgroup's stats by explicitly filling in the iostat
 918 * with disk level statistics.
 919 */
 920static void blkcg_fill_root_iostats(void)
 921{
 922	struct class_dev_iter iter;
 923	struct device *dev;
 924
 925	class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
 926	while ((dev = class_dev_iter_next(&iter))) {
 927		struct block_device *bdev = dev_to_bdev(dev);
 928		struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg;
 929		struct blkg_iostat tmp;
 930		int cpu;
 931		unsigned long flags;
 932
 933		memset(&tmp, 0, sizeof(tmp));
 934		for_each_possible_cpu(cpu) {
 935			struct disk_stats *cpu_dkstats;
 936
 937			cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
 938			tmp.ios[BLKG_IOSTAT_READ] +=
 939				cpu_dkstats->ios[STAT_READ];
 940			tmp.ios[BLKG_IOSTAT_WRITE] +=
 941				cpu_dkstats->ios[STAT_WRITE];
 942			tmp.ios[BLKG_IOSTAT_DISCARD] +=
 943				cpu_dkstats->ios[STAT_DISCARD];
 944			// convert sectors to bytes
 945			tmp.bytes[BLKG_IOSTAT_READ] +=
 946				cpu_dkstats->sectors[STAT_READ] << 9;
 947			tmp.bytes[BLKG_IOSTAT_WRITE] +=
 948				cpu_dkstats->sectors[STAT_WRITE] << 9;
 949			tmp.bytes[BLKG_IOSTAT_DISCARD] +=
 950				cpu_dkstats->sectors[STAT_DISCARD] << 9;
 951		}
 952
 953		flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync);
 954		blkg_iostat_set(&blkg->iostat.cur, &tmp);
 955		u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 956	}
 
 
 
 957}
 
 958
 959static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960{
 961	struct blkg_iostat_set *bis = &blkg->iostat;
 962	u64 rbytes, wbytes, rios, wios, dbytes, dios;
 963	const char *dname;
 964	unsigned seq;
 965	int i;
 
 966
 967	if (!blkg->online)
 968		return;
 969
 970	dname = blkg_dev_name(blkg);
 971	if (!dname)
 972		return;
 
 
 
 
 
 
 
 
 
 
 
 973
 974	seq_printf(s, "%s ", dname);
 
 975
 976	do {
 977		seq = u64_stats_fetch_begin(&bis->sync);
 
 
 978
 979		rbytes = bis->cur.bytes[BLKG_IOSTAT_READ];
 980		wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE];
 981		dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD];
 982		rios = bis->cur.ios[BLKG_IOSTAT_READ];
 983		wios = bis->cur.ios[BLKG_IOSTAT_WRITE];
 984		dios = bis->cur.ios[BLKG_IOSTAT_DISCARD];
 985	} while (u64_stats_fetch_retry(&bis->sync, seq));
 986
 987	if (rbytes || wbytes || rios || wios) {
 988		seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
 989			rbytes, wbytes, rios, wios,
 990			dbytes, dios);
 991	}
 992
 993	if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
 994		seq_printf(s, " use_delay=%d delay_nsec=%llu",
 995			atomic_read(&blkg->use_delay),
 996			atomic64_read(&blkg->delay_nsec));
 997	}
 998
 999	for (i = 0; i < BLKCG_MAX_POLS; i++) {
1000		struct blkcg_policy *pol = blkcg_policy[i];
1001
1002		if (!blkg->pd[i] || !pol->pd_stat_fn)
1003			continue;
 
1004
1005		pol->pd_stat_fn(blkg->pd[i], s);
1006	}
 
 
 
 
 
 
 
 
 
1007
1008	seq_puts(s, "\n");
 
 
 
 
1009}
 
1010
1011static int blkcg_print_stat(struct seq_file *sf, void *v)
1012{
1013	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1014	struct blkcg_gq *blkg;
1015
1016	if (!seq_css(sf)->parent)
1017		blkcg_fill_root_iostats();
1018	else
1019		cgroup_rstat_flush(blkcg->css.cgroup);
1020
1021	rcu_read_lock();
 
1022	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
1023		spin_lock_irq(&blkg->q->queue_lock);
1024		blkcg_print_one_stat(blkg, sf);
1025		spin_unlock_irq(&blkg->q->queue_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1026	}
 
1027	rcu_read_unlock();
1028	return 0;
1029}
1030
1031static struct cftype blkcg_files[] = {
1032	{
1033		.name = "stat",
 
1034		.seq_show = blkcg_print_stat,
1035	},
1036	{ }	/* terminate */
1037};
1038
1039static struct cftype blkcg_legacy_files[] = {
1040	{
1041		.name = "reset_stats",
1042		.write_u64 = blkcg_reset_stats,
1043	},
1044	{ }	/* terminate */
1045};
1046
1047#ifdef CONFIG_CGROUP_WRITEBACK
1048struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css)
1049{
1050	return &css_to_blkcg(css)->cgwb_list;
1051}
1052#endif
1053
1054/*
1055 * blkcg destruction is a three-stage process.
1056 *
1057 * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
1058 *    which offlines writeback.  Here we tie the next stage of blkg destruction
1059 *    to the completion of writeback associated with the blkcg.  This lets us
1060 *    avoid punting potentially large amounts of outstanding writeback to root
1061 *    while maintaining any ongoing policies.  The next stage is triggered when
1062 *    the nr_cgwbs count goes to zero.
1063 *
1064 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
1065 *    and handles the destruction of blkgs.  Here the css reference held by
1066 *    the blkg is put back eventually allowing blkcg_css_free() to be called.
1067 *    This work may occur in cgwb_release_workfn() on the cgwb_release
1068 *    workqueue.  Any submitted ios that fail to get the blkg ref will be
1069 *    punted to the root_blkg.
1070 *
1071 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1072 *    This finally frees the blkcg.
1073 */
1074
1075/**
1076 * blkcg_destroy_blkgs - responsible for shooting down blkgs
1077 * @blkcg: blkcg of interest
1078 *
1079 * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
1080 * is nested inside q lock, this function performs reverse double lock dancing.
1081 * Destroying the blkgs releases the reference held on the blkcg's css allowing
1082 * blkcg_css_free to eventually be called.
1083 *
1084 * This is the blkcg counterpart of ioc_release_fn().
1085 */
1086static void blkcg_destroy_blkgs(struct blkcg *blkcg)
1087{
1088	might_sleep();
1089
1090	spin_lock_irq(&blkcg->lock);
1091
1092	while (!hlist_empty(&blkcg->blkg_list)) {
1093		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1094						struct blkcg_gq, blkcg_node);
1095		struct request_queue *q = blkg->q;
1096
1097		if (need_resched() || !spin_trylock(&q->queue_lock)) {
1098			/*
1099			 * Given that the system can accumulate a huge number
1100			 * of blkgs in pathological cases, check to see if we
1101			 * need to rescheduling to avoid softlockup.
1102			 */
1103			spin_unlock_irq(&blkcg->lock);
1104			cond_resched();
1105			spin_lock_irq(&blkcg->lock);
1106			continue;
1107		}
1108
1109		blkg_destroy(blkg);
1110		spin_unlock(&q->queue_lock);
1111	}
1112
1113	spin_unlock_irq(&blkcg->lock);
1114}
1115
1116/**
1117 * blkcg_pin_online - pin online state
1118 * @blkcg_css: blkcg of interest
1119 *
1120 * While pinned, a blkcg is kept online.  This is primarily used to
1121 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
1122 * while an associated cgwb is still active.
1123 */
1124void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css)
1125{
1126	refcount_inc(&css_to_blkcg(blkcg_css)->online_pin);
1127}
1128
1129/**
1130 * blkcg_unpin_online - unpin online state
1131 * @blkcg_css: blkcg of interest
1132 *
1133 * This is primarily used to impedance-match blkg and cgwb lifetimes so
1134 * that blkg doesn't go offline while an associated cgwb is still active.
1135 * When this count goes to zero, all active cgwbs have finished so the
1136 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
1137 */
1138void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css)
1139{
1140	struct blkcg *blkcg = css_to_blkcg(blkcg_css);
1141
1142	do {
1143		if (!refcount_dec_and_test(&blkcg->online_pin))
1144			break;
1145		blkcg_destroy_blkgs(blkcg);
1146		blkcg = blkcg_parent(blkcg);
1147	} while (blkcg);
1148}
1149
1150/**
1151 * blkcg_css_offline - cgroup css_offline callback
1152 * @css: css of interest
1153 *
1154 * This function is called when @css is about to go away.  Here the cgwbs are
1155 * offlined first and only once writeback associated with the blkcg has
1156 * finished do we start step 2 (see above).
1157 */
1158static void blkcg_css_offline(struct cgroup_subsys_state *css)
1159{
1160	/* this prevents anyone from attaching or migrating to this blkcg */
1161	wb_blkcg_offline(css);
1162
1163	/* put the base online pin allowing step 2 to be triggered */
1164	blkcg_unpin_online(css);
1165}
1166
1167static void blkcg_css_free(struct cgroup_subsys_state *css)
1168{
1169	struct blkcg *blkcg = css_to_blkcg(css);
1170	int i;
1171
1172	mutex_lock(&blkcg_pol_mutex);
1173
1174	list_del(&blkcg->all_blkcgs_node);
1175
1176	for (i = 0; i < BLKCG_MAX_POLS; i++)
1177		if (blkcg->cpd[i])
1178			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1179
1180	mutex_unlock(&blkcg_pol_mutex);
1181
1182	free_percpu(blkcg->lhead);
1183	kfree(blkcg);
1184}
1185
1186static struct cgroup_subsys_state *
1187blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1188{
1189	struct blkcg *blkcg;
 
1190	int i;
1191
1192	mutex_lock(&blkcg_pol_mutex);
1193
1194	if (!parent_css) {
1195		blkcg = &blkcg_root;
1196	} else {
1197		blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1198		if (!blkcg)
1199			goto unlock;
 
 
1200	}
1201
1202	if (init_blkcg_llists(blkcg))
1203		goto free_blkcg;
1204
1205	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1206		struct blkcg_policy *pol = blkcg_policy[i];
1207		struct blkcg_policy_data *cpd;
1208
1209		/*
1210		 * If the policy hasn't been attached yet, wait for it
1211		 * to be attached before doing anything else. Otherwise,
1212		 * check if the policy requires any specific per-cgroup
1213		 * data: if it does, allocate and initialize it.
1214		 */
1215		if (!pol || !pol->cpd_alloc_fn)
1216			continue;
1217
1218		cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1219		if (!cpd)
 
1220			goto free_pd_blkcg;
1221
1222		blkcg->cpd[i] = cpd;
1223		cpd->blkcg = blkcg;
1224		cpd->plid = i;
1225		if (pol->cpd_init_fn)
1226			pol->cpd_init_fn(cpd);
1227	}
1228
1229	spin_lock_init(&blkcg->lock);
1230	refcount_set(&blkcg->online_pin, 1);
1231	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1232	INIT_HLIST_HEAD(&blkcg->blkg_list);
1233#ifdef CONFIG_CGROUP_WRITEBACK
1234	INIT_LIST_HEAD(&blkcg->cgwb_list);
1235#endif
1236	list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1237
1238	mutex_unlock(&blkcg_pol_mutex);
1239	return &blkcg->css;
1240
1241free_pd_blkcg:
1242	for (i--; i >= 0; i--)
1243		if (blkcg->cpd[i])
1244			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1245	free_percpu(blkcg->lhead);
1246free_blkcg:
1247	if (blkcg != &blkcg_root)
1248		kfree(blkcg);
1249unlock:
1250	mutex_unlock(&blkcg_pol_mutex);
1251	return ERR_PTR(-ENOMEM);
1252}
1253
1254static int blkcg_css_online(struct cgroup_subsys_state *css)
1255{
1256	struct blkcg *parent = blkcg_parent(css_to_blkcg(css));
1257
1258	/*
1259	 * blkcg_pin_online() is used to delay blkcg offline so that blkgs
1260	 * don't go offline while cgwbs are still active on them.  Pin the
1261	 * parent so that offline always happens towards the root.
1262	 */
1263	if (parent)
1264		blkcg_pin_online(&parent->css);
1265	return 0;
1266}
1267
1268int blkcg_init_disk(struct gendisk *disk)
 
 
 
 
 
 
 
 
 
 
1269{
1270	struct request_queue *q = disk->queue;
1271	struct blkcg_gq *new_blkg, *blkg;
1272	bool preloaded;
1273	int ret;
1274
1275	INIT_LIST_HEAD(&q->blkg_list);
1276
1277	new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
1278	if (!new_blkg)
1279		return -ENOMEM;
1280
1281	preloaded = !radix_tree_preload(GFP_KERNEL);
1282
1283	/* Make sure the root blkg exists. */
1284	/* spin_lock_irq can serve as RCU read-side critical section. */
1285	spin_lock_irq(&q->queue_lock);
1286	blkg = blkg_create(&blkcg_root, disk, new_blkg);
1287	if (IS_ERR(blkg))
1288		goto err_unlock;
1289	q->root_blkg = blkg;
1290	spin_unlock_irq(&q->queue_lock);
 
 
1291
1292	if (preloaded)
1293		radix_tree_preload_end();
1294
1295	ret = blk_ioprio_init(disk);
1296	if (ret)
1297		goto err_destroy_all;
1298
1299	ret = blk_throtl_init(disk);
1300	if (ret)
1301		goto err_ioprio_exit;
1302
1303	ret = blk_iolatency_init(disk);
1304	if (ret)
1305		goto err_throtl_exit;
1306
1307	return 0;
 
1308
1309err_throtl_exit:
1310	blk_throtl_exit(disk);
1311err_ioprio_exit:
1312	blk_ioprio_exit(disk);
1313err_destroy_all:
1314	blkg_destroy_all(disk);
1315	return ret;
1316err_unlock:
1317	spin_unlock_irq(&q->queue_lock);
1318	if (preloaded)
1319		radix_tree_preload_end();
1320	return PTR_ERR(blkg);
1321}
1322
1323void blkcg_exit_disk(struct gendisk *disk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324{
1325	blkg_destroy_all(disk);
1326	rq_qos_exit(disk->queue);
1327	blk_throtl_exit(disk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1328}
1329
1330static void blkcg_bind(struct cgroup_subsys_state *root_css)
1331{
1332	int i;
1333
1334	mutex_lock(&blkcg_pol_mutex);
1335
1336	for (i = 0; i < BLKCG_MAX_POLS; i++) {
1337		struct blkcg_policy *pol = blkcg_policy[i];
1338		struct blkcg *blkcg;
1339
1340		if (!pol || !pol->cpd_bind_fn)
1341			continue;
1342
1343		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1344			if (blkcg->cpd[pol->plid])
1345				pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1346	}
1347	mutex_unlock(&blkcg_pol_mutex);
1348}
1349
1350static void blkcg_exit(struct task_struct *tsk)
1351{
1352	if (tsk->throttle_queue)
1353		blk_put_queue(tsk->throttle_queue);
1354	tsk->throttle_queue = NULL;
1355}
1356
1357struct cgroup_subsys io_cgrp_subsys = {
1358	.css_alloc = blkcg_css_alloc,
1359	.css_online = blkcg_css_online,
1360	.css_offline = blkcg_css_offline,
1361	.css_free = blkcg_css_free,
1362	.css_rstat_flush = blkcg_rstat_flush,
1363	.bind = blkcg_bind,
1364	.dfl_cftypes = blkcg_files,
1365	.legacy_cftypes = blkcg_legacy_files,
1366	.legacy_name = "blkio",
1367	.exit = blkcg_exit,
1368#ifdef CONFIG_MEMCG
1369	/*
1370	 * This ensures that, if available, memcg is automatically enabled
1371	 * together on the default hierarchy so that the owner cgroup can
1372	 * be retrieved from writeback pages.
1373	 */
1374	.depends_on = 1 << memory_cgrp_id,
1375#endif
1376};
1377EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1378
1379/**
1380 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1381 * @q: request_queue of interest
1382 * @pol: blkcg policy to activate
1383 *
1384 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1385 * bypass mode to populate its blkgs with policy_data for @pol.
1386 *
1387 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1388 * from IO path.  Update of each blkg is protected by both queue and blkcg
1389 * locks so that holding either lock and testing blkcg_policy_enabled() is
1390 * always enough for dereferencing policy data.
1391 *
1392 * The caller is responsible for synchronizing [de]activations and policy
1393 * [un]registerations.  Returns 0 on success, -errno on failure.
1394 */
1395int blkcg_activate_policy(struct request_queue *q,
1396			  const struct blkcg_policy *pol)
1397{
1398	struct blkg_policy_data *pd_prealloc = NULL;
1399	struct blkcg_gq *blkg, *pinned_blkg = NULL;
1400	int ret;
1401
1402	if (blkcg_policy_enabled(q, pol))
1403		return 0;
1404
1405	if (queue_is_mq(q))
1406		blk_mq_freeze_queue(q);
1407retry:
1408	spin_lock_irq(&q->queue_lock);
 
 
 
 
 
 
 
1409
1410	/* blkg_list is pushed at the head, reverse walk to allocate parents first */
1411	list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
1412		struct blkg_policy_data *pd;
1413
1414		if (blkg->pd[pol->plid])
1415			continue;
1416
1417		/* If prealloc matches, use it; otherwise try GFP_NOWAIT */
1418		if (blkg == pinned_blkg) {
1419			pd = pd_prealloc;
1420			pd_prealloc = NULL;
1421		} else {
1422			pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q,
1423					      blkg->blkcg);
1424		}
1425
1426		if (!pd) {
1427			/*
1428			 * GFP_NOWAIT failed.  Free the existing one and
1429			 * prealloc for @blkg w/ GFP_KERNEL.
1430			 */
1431			if (pinned_blkg)
1432				blkg_put(pinned_blkg);
1433			blkg_get(blkg);
1434			pinned_blkg = blkg;
1435
1436			spin_unlock_irq(&q->queue_lock);
1437
1438			if (pd_prealloc)
1439				pol->pd_free_fn(pd_prealloc);
1440			pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q,
1441						       blkg->blkcg);
1442			if (pd_prealloc)
1443				goto retry;
1444			else
1445				goto enomem;
1446		}
1447
1448		blkg->pd[pol->plid] = pd;
1449		pd->blkg = blkg;
1450		pd->plid = pol->plid;
 
 
1451	}
1452
1453	/* all allocated, init in the same order */
1454	if (pol->pd_init_fn)
1455		list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
1456			pol->pd_init_fn(blkg->pd[pol->plid]);
1457
1458	if (pol->pd_online_fn)
1459		list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
1460			pol->pd_online_fn(blkg->pd[pol->plid]);
1461
1462	__set_bit(pol->plid, q->blkcg_pols);
1463	ret = 0;
1464
1465	spin_unlock_irq(&q->queue_lock);
1466out:
1467	if (queue_is_mq(q))
1468		blk_mq_unfreeze_queue(q);
1469	if (pinned_blkg)
1470		blkg_put(pinned_blkg);
1471	if (pd_prealloc)
1472		pol->pd_free_fn(pd_prealloc);
1473	return ret;
1474
1475enomem:
1476	/* alloc failed, nothing's initialized yet, free everything */
1477	spin_lock_irq(&q->queue_lock);
1478	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1479		struct blkcg *blkcg = blkg->blkcg;
1480
1481		spin_lock(&blkcg->lock);
1482		if (blkg->pd[pol->plid]) {
1483			pol->pd_free_fn(blkg->pd[pol->plid]);
1484			blkg->pd[pol->plid] = NULL;
1485		}
1486		spin_unlock(&blkcg->lock);
1487	}
1488	spin_unlock_irq(&q->queue_lock);
1489	ret = -ENOMEM;
1490	goto out;
1491}
1492EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1493
1494/**
1495 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1496 * @q: request_queue of interest
1497 * @pol: blkcg policy to deactivate
1498 *
1499 * Deactivate @pol on @q.  Follows the same synchronization rules as
1500 * blkcg_activate_policy().
1501 */
1502void blkcg_deactivate_policy(struct request_queue *q,
1503			     const struct blkcg_policy *pol)
1504{
1505	struct blkcg_gq *blkg;
1506
1507	if (!blkcg_policy_enabled(q, pol))
1508		return;
1509
1510	if (queue_is_mq(q))
1511		blk_mq_freeze_queue(q);
1512
1513	spin_lock_irq(&q->queue_lock);
1514
1515	__clear_bit(pol->plid, q->blkcg_pols);
1516
1517	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1518		struct blkcg *blkcg = blkg->blkcg;
 
1519
1520		spin_lock(&blkcg->lock);
1521		if (blkg->pd[pol->plid]) {
1522			if (pol->pd_offline_fn)
1523				pol->pd_offline_fn(blkg->pd[pol->plid]);
1524			pol->pd_free_fn(blkg->pd[pol->plid]);
1525			blkg->pd[pol->plid] = NULL;
1526		}
1527		spin_unlock(&blkcg->lock);
1528	}
1529
1530	spin_unlock_irq(&q->queue_lock);
 
1531
1532	if (queue_is_mq(q))
1533		blk_mq_unfreeze_queue(q);
1534}
1535EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1536
1537static void blkcg_free_all_cpd(struct blkcg_policy *pol)
1538{
1539	struct blkcg *blkcg;
1540
1541	list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1542		if (blkcg->cpd[pol->plid]) {
1543			pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1544			blkcg->cpd[pol->plid] = NULL;
1545		}
1546	}
1547}
1548
1549/**
1550 * blkcg_policy_register - register a blkcg policy
1551 * @pol: blkcg policy to register
1552 *
1553 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1554 * successful registration.  Returns 0 on success and -errno on failure.
1555 */
1556int blkcg_policy_register(struct blkcg_policy *pol)
1557{
1558	struct blkcg *blkcg;
1559	int i, ret;
1560
1561	mutex_lock(&blkcg_pol_register_mutex);
1562	mutex_lock(&blkcg_pol_mutex);
1563
1564	/* find an empty slot */
1565	ret = -ENOSPC;
1566	for (i = 0; i < BLKCG_MAX_POLS; i++)
1567		if (!blkcg_policy[i])
1568			break;
1569	if (i >= BLKCG_MAX_POLS) {
1570		pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1571		goto err_unlock;
1572	}
1573
1574	/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1575	if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1576		(!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1577		goto err_unlock;
1578
1579	/* register @pol */
1580	pol->plid = i;
1581	blkcg_policy[pol->plid] = pol;
1582
1583	/* allocate and install cpd's */
1584	if (pol->cpd_alloc_fn) {
1585		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1586			struct blkcg_policy_data *cpd;
1587
1588			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1589			if (!cpd)
 
1590				goto err_free_cpds;
 
1591
1592			blkcg->cpd[pol->plid] = cpd;
1593			cpd->blkcg = blkcg;
1594			cpd->plid = pol->plid;
1595			if (pol->cpd_init_fn)
1596				pol->cpd_init_fn(cpd);
1597		}
1598	}
1599
1600	mutex_unlock(&blkcg_pol_mutex);
1601
1602	/* everything is in place, add intf files for the new policy */
1603	if (pol->dfl_cftypes)
1604		WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1605					       pol->dfl_cftypes));
1606	if (pol->legacy_cftypes)
1607		WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1608						  pol->legacy_cftypes));
1609	mutex_unlock(&blkcg_pol_register_mutex);
1610	return 0;
1611
1612err_free_cpds:
1613	if (pol->cpd_free_fn)
1614		blkcg_free_all_cpd(pol);
1615
 
 
 
 
 
1616	blkcg_policy[pol->plid] = NULL;
1617err_unlock:
1618	mutex_unlock(&blkcg_pol_mutex);
1619	mutex_unlock(&blkcg_pol_register_mutex);
1620	return ret;
1621}
1622EXPORT_SYMBOL_GPL(blkcg_policy_register);
1623
1624/**
1625 * blkcg_policy_unregister - unregister a blkcg policy
1626 * @pol: blkcg policy to unregister
1627 *
1628 * Undo blkcg_policy_register(@pol).  Might sleep.
1629 */
1630void blkcg_policy_unregister(struct blkcg_policy *pol)
1631{
 
 
1632	mutex_lock(&blkcg_pol_register_mutex);
1633
1634	if (WARN_ON(blkcg_policy[pol->plid] != pol))
1635		goto out_unlock;
1636
1637	/* kill the intf files first */
1638	if (pol->dfl_cftypes)
1639		cgroup_rm_cftypes(pol->dfl_cftypes);
1640	if (pol->legacy_cftypes)
1641		cgroup_rm_cftypes(pol->legacy_cftypes);
1642
1643	/* remove cpds and unregister */
1644	mutex_lock(&blkcg_pol_mutex);
1645
1646	if (pol->cpd_free_fn)
1647		blkcg_free_all_cpd(pol);
1648
 
 
 
 
 
1649	blkcg_policy[pol->plid] = NULL;
1650
1651	mutex_unlock(&blkcg_pol_mutex);
1652out_unlock:
1653	mutex_unlock(&blkcg_pol_register_mutex);
1654}
1655EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1656
1657bool __blkcg_punt_bio_submit(struct bio *bio)
1658{
1659	struct blkcg_gq *blkg = bio->bi_blkg;
1660
1661	/* consume the flag first */
1662	bio->bi_opf &= ~REQ_CGROUP_PUNT;
1663
1664	/* never bounce for the root cgroup */
1665	if (!blkg->parent)
1666		return false;
1667
1668	spin_lock_bh(&blkg->async_bio_lock);
1669	bio_list_add(&blkg->async_bios, bio);
1670	spin_unlock_bh(&blkg->async_bio_lock);
1671
1672	queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
1673	return true;
1674}
1675
1676/*
1677 * Scale the accumulated delay based on how long it has been since we updated
1678 * the delay.  We only call this when we are adding delay, in case it's been a
1679 * while since we added delay, and when we are checking to see if we need to
1680 * delay a task, to account for any delays that may have occurred.
1681 */
1682static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1683{
1684	u64 old = atomic64_read(&blkg->delay_start);
1685
1686	/* negative use_delay means no scaling, see blkcg_set_delay() */
1687	if (atomic_read(&blkg->use_delay) < 0)
1688		return;
1689
1690	/*
1691	 * We only want to scale down every second.  The idea here is that we
1692	 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1693	 * time window.  We only want to throttle tasks for recent delay that
1694	 * has occurred, in 1 second time windows since that's the maximum
1695	 * things can be throttled.  We save the current delay window in
1696	 * blkg->last_delay so we know what amount is still left to be charged
1697	 * to the blkg from this point onward.  blkg->last_use keeps track of
1698	 * the use_delay counter.  The idea is if we're unthrottling the blkg we
1699	 * are ok with whatever is happening now, and we can take away more of
1700	 * the accumulated delay as we've already throttled enough that
1701	 * everybody is happy with their IO latencies.
1702	 */
1703	if (time_before64(old + NSEC_PER_SEC, now) &&
1704	    atomic64_try_cmpxchg(&blkg->delay_start, &old, now)) {
1705		u64 cur = atomic64_read(&blkg->delay_nsec);
1706		u64 sub = min_t(u64, blkg->last_delay, now - old);
1707		int cur_use = atomic_read(&blkg->use_delay);
1708
1709		/*
1710		 * We've been unthrottled, subtract a larger chunk of our
1711		 * accumulated delay.
1712		 */
1713		if (cur_use < blkg->last_use)
1714			sub = max_t(u64, sub, blkg->last_delay >> 1);
1715
1716		/*
1717		 * This shouldn't happen, but handle it anyway.  Our delay_nsec
1718		 * should only ever be growing except here where we subtract out
1719		 * min(last_delay, 1 second), but lord knows bugs happen and I'd
1720		 * rather not end up with negative numbers.
1721		 */
1722		if (unlikely(cur < sub)) {
1723			atomic64_set(&blkg->delay_nsec, 0);
1724			blkg->last_delay = 0;
1725		} else {
1726			atomic64_sub(sub, &blkg->delay_nsec);
1727			blkg->last_delay = cur - sub;
1728		}
1729		blkg->last_use = cur_use;
1730	}
1731}
1732
1733/*
1734 * This is called when we want to actually walk up the hierarchy and check to
1735 * see if we need to throttle, and then actually throttle if there is some
1736 * accumulated delay.  This should only be called upon return to user space so
1737 * we're not holding some lock that would induce a priority inversion.
1738 */
1739static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1740{
1741	unsigned long pflags;
1742	bool clamp;
1743	u64 now = ktime_to_ns(ktime_get());
1744	u64 exp;
1745	u64 delay_nsec = 0;
1746	int tok;
1747
1748	while (blkg->parent) {
1749		int use_delay = atomic_read(&blkg->use_delay);
1750
1751		if (use_delay) {
1752			u64 this_delay;
1753
1754			blkcg_scale_delay(blkg, now);
1755			this_delay = atomic64_read(&blkg->delay_nsec);
1756			if (this_delay > delay_nsec) {
1757				delay_nsec = this_delay;
1758				clamp = use_delay > 0;
1759			}
1760		}
1761		blkg = blkg->parent;
1762	}
1763
1764	if (!delay_nsec)
1765		return;
1766
1767	/*
1768	 * Let's not sleep for all eternity if we've amassed a huge delay.
1769	 * Swapping or metadata IO can accumulate 10's of seconds worth of
1770	 * delay, and we want userspace to be able to do _something_ so cap the
1771	 * delays at 0.25s. If there's 10's of seconds worth of delay then the
1772	 * tasks will be delayed for 0.25 second for every syscall. If
1773	 * blkcg_set_delay() was used as indicated by negative use_delay, the
1774	 * caller is responsible for regulating the range.
1775	 */
1776	if (clamp)
1777		delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1778
1779	if (use_memdelay)
1780		psi_memstall_enter(&pflags);
1781
1782	exp = ktime_add_ns(now, delay_nsec);
1783	tok = io_schedule_prepare();
1784	do {
1785		__set_current_state(TASK_KILLABLE);
1786		if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1787			break;
1788	} while (!fatal_signal_pending(current));
1789	io_schedule_finish(tok);
1790
1791	if (use_memdelay)
1792		psi_memstall_leave(&pflags);
1793}
1794
1795/**
1796 * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1797 *
1798 * This is only called if we've been marked with set_notify_resume().  Obviously
1799 * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1800 * check to see if current->throttle_queue is set and if not this doesn't do
1801 * anything.  This should only ever be called by the resume code, it's not meant
1802 * to be called by people willy-nilly as it will actually do the work to
1803 * throttle the task if it is setup for throttling.
1804 */
1805void blkcg_maybe_throttle_current(void)
1806{
1807	struct request_queue *q = current->throttle_queue;
1808	struct blkcg *blkcg;
1809	struct blkcg_gq *blkg;
1810	bool use_memdelay = current->use_memdelay;
1811
1812	if (!q)
1813		return;
1814
1815	current->throttle_queue = NULL;
1816	current->use_memdelay = false;
1817
1818	rcu_read_lock();
1819	blkcg = css_to_blkcg(blkcg_css());
1820	if (!blkcg)
1821		goto out;
1822	blkg = blkg_lookup(blkcg, q);
1823	if (!blkg)
1824		goto out;
1825	if (!blkg_tryget(blkg))
1826		goto out;
1827	rcu_read_unlock();
1828
1829	blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1830	blkg_put(blkg);
1831	blk_put_queue(q);
1832	return;
1833out:
1834	rcu_read_unlock();
1835	blk_put_queue(q);
1836}
1837
1838/**
1839 * blkcg_schedule_throttle - this task needs to check for throttling
1840 * @disk: disk to throttle
1841 * @use_memdelay: do we charge this to memory delay for PSI
1842 *
1843 * This is called by the IO controller when we know there's delay accumulated
1844 * for the blkg for this task.  We do not pass the blkg because there are places
1845 * we call this that may not have that information, the swapping code for
1846 * instance will only have a block_device at that point.  This set's the
1847 * notify_resume for the task to check and see if it requires throttling before
1848 * returning to user space.
1849 *
1850 * We will only schedule once per syscall.  You can call this over and over
1851 * again and it will only do the check once upon return to user space, and only
1852 * throttle once.  If the task needs to be throttled again it'll need to be
1853 * re-set at the next time we see the task.
1854 */
1855void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay)
1856{
1857	struct request_queue *q = disk->queue;
1858
1859	if (unlikely(current->flags & PF_KTHREAD))
1860		return;
1861
1862	if (current->throttle_queue != q) {
1863		if (!blk_get_queue(q))
1864			return;
1865
1866		if (current->throttle_queue)
1867			blk_put_queue(current->throttle_queue);
1868		current->throttle_queue = q;
1869	}
1870
1871	if (use_memdelay)
1872		current->use_memdelay = use_memdelay;
1873	set_notify_resume(current);
1874}
1875
1876/**
1877 * blkcg_add_delay - add delay to this blkg
1878 * @blkg: blkg of interest
1879 * @now: the current time in nanoseconds
1880 * @delta: how many nanoseconds of delay to add
1881 *
1882 * Charge @delta to the blkg's current delay accumulation.  This is used to
1883 * throttle tasks if an IO controller thinks we need more throttling.
1884 */
1885void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1886{
1887	if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
1888		return;
1889	blkcg_scale_delay(blkg, now);
1890	atomic64_add(delta, &blkg->delay_nsec);
1891}
1892
1893/**
1894 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
1895 * @bio: target bio
1896 * @css: target css
1897 *
1898 * As the failure mode here is to walk up the blkg tree, this ensure that the
1899 * blkg->parent pointers are always valid.  This returns the blkg that it ended
1900 * up taking a reference on or %NULL if no reference was taken.
1901 */
1902static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
1903		struct cgroup_subsys_state *css)
1904{
1905	struct blkcg_gq *blkg, *ret_blkg = NULL;
1906
1907	rcu_read_lock();
1908	blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk);
1909	while (blkg) {
1910		if (blkg_tryget(blkg)) {
1911			ret_blkg = blkg;
1912			break;
1913		}
1914		blkg = blkg->parent;
1915	}
1916	rcu_read_unlock();
1917
1918	return ret_blkg;
1919}
1920
1921/**
1922 * bio_associate_blkg_from_css - associate a bio with a specified css
1923 * @bio: target bio
1924 * @css: target css
1925 *
1926 * Associate @bio with the blkg found by combining the css's blkg and the
1927 * request_queue of the @bio.  An association failure is handled by walking up
1928 * the blkg tree.  Therefore, the blkg associated can be anything between @blkg
1929 * and q->root_blkg.  This situation only happens when a cgroup is dying and
1930 * then the remaining bios will spill to the closest alive blkg.
1931 *
1932 * A reference will be taken on the blkg and will be released when @bio is
1933 * freed.
1934 */
1935void bio_associate_blkg_from_css(struct bio *bio,
1936				 struct cgroup_subsys_state *css)
1937{
1938	if (bio->bi_blkg)
1939		blkg_put(bio->bi_blkg);
1940
1941	if (css && css->parent) {
1942		bio->bi_blkg = blkg_tryget_closest(bio, css);
1943	} else {
1944		blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg);
1945		bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg;
1946	}
1947}
1948EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
1949
1950/**
1951 * bio_associate_blkg - associate a bio with a blkg
1952 * @bio: target bio
1953 *
1954 * Associate @bio with the blkg found from the bio's css and request_queue.
1955 * If one is not found, bio_lookup_blkg() creates the blkg.  If a blkg is
1956 * already associated, the css is reused and association redone as the
1957 * request_queue may have changed.
1958 */
1959void bio_associate_blkg(struct bio *bio)
1960{
1961	struct cgroup_subsys_state *css;
1962
1963	rcu_read_lock();
1964
1965	if (bio->bi_blkg)
1966		css = bio_blkcg_css(bio);
1967	else
1968		css = blkcg_css();
1969
1970	bio_associate_blkg_from_css(bio, css);
1971
1972	rcu_read_unlock();
1973}
1974EXPORT_SYMBOL_GPL(bio_associate_blkg);
1975
1976/**
1977 * bio_clone_blkg_association - clone blkg association from src to dst bio
1978 * @dst: destination bio
1979 * @src: source bio
1980 */
1981void bio_clone_blkg_association(struct bio *dst, struct bio *src)
1982{
1983	if (src->bi_blkg)
1984		bio_associate_blkg_from_css(dst, bio_blkcg_css(src));
1985}
1986EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
1987
1988static int blk_cgroup_io_type(struct bio *bio)
1989{
1990	if (op_is_discard(bio->bi_opf))
1991		return BLKG_IOSTAT_DISCARD;
1992	if (op_is_write(bio->bi_opf))
1993		return BLKG_IOSTAT_WRITE;
1994	return BLKG_IOSTAT_READ;
1995}
1996
1997void blk_cgroup_bio_start(struct bio *bio)
1998{
1999	struct blkcg *blkcg = bio->bi_blkg->blkcg;
2000	int rwd = blk_cgroup_io_type(bio), cpu;
2001	struct blkg_iostat_set *bis;
2002	unsigned long flags;
2003
2004	/* Root-level stats are sourced from system-wide IO stats */
2005	if (!cgroup_parent(blkcg->css.cgroup))
2006		return;
2007
2008	cpu = get_cpu();
2009	bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
2010	flags = u64_stats_update_begin_irqsave(&bis->sync);
2011
2012	/*
2013	 * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
2014	 * bio and we would have already accounted for the size of the bio.
2015	 */
2016	if (!bio_flagged(bio, BIO_CGROUP_ACCT)) {
2017		bio_set_flag(bio, BIO_CGROUP_ACCT);
2018		bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
2019	}
2020	bis->cur.ios[rwd]++;
2021
2022	/*
2023	 * If the iostat_cpu isn't in a lockless list, put it into the
2024	 * list to indicate that a stat update is pending.
2025	 */
2026	if (!READ_ONCE(bis->lqueued)) {
2027		struct llist_head *lhead = this_cpu_ptr(blkcg->lhead);
2028
2029		llist_add(&bis->lnode, lhead);
2030		WRITE_ONCE(bis->lqueued, true);
2031		percpu_ref_get(&bis->blkg->refcnt);
2032	}
2033
2034	u64_stats_update_end_irqrestore(&bis->sync, flags);
2035	if (cgroup_subsys_on_dfl(io_cgrp_subsys))
2036		cgroup_rstat_updated(blkcg->css.cgroup, cpu);
2037	put_cpu();
2038}
2039
2040bool blk_cgroup_congested(void)
2041{
2042	struct cgroup_subsys_state *css;
2043	bool ret = false;
2044
2045	rcu_read_lock();
2046	for (css = blkcg_css(); css; css = css->parent) {
2047		if (atomic_read(&css->cgroup->congestion_count)) {
2048			ret = true;
2049			break;
2050		}
2051	}
2052	rcu_read_unlock();
2053	return ret;
2054}
2055
2056static int __init blkcg_init(void)
2057{
2058	blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
2059					    WQ_MEM_RECLAIM | WQ_FREEZABLE |
2060					    WQ_UNBOUND | WQ_SYSFS, 0);
2061	if (!blkcg_punt_bio_wq)
2062		return -ENOMEM;
2063	return 0;
2064}
2065subsys_initcall(blkcg_init);
2066
2067module_param(blkcg_debug_stats, bool, 0644);
2068MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
v4.6
 
   1/*
   2 * Common Block IO controller cgroup interface
   3 *
   4 * Based on ideas and code from CFQ, CFS and BFQ:
   5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   6 *
   7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   8 *		      Paolo Valente <paolo.valente@unimore.it>
   9 *
  10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11 * 	              Nauman Rafique <nauman@google.com>
  12 *
  13 * For policy-specific per-blkcg data:
  14 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
  15 *                    Arianna Avanzini <avanzini.arianna@gmail.com>
  16 */
  17#include <linux/ioprio.h>
  18#include <linux/kdev_t.h>
  19#include <linux/module.h>
 
  20#include <linux/err.h>
  21#include <linux/blkdev.h>
  22#include <linux/backing-dev.h>
  23#include <linux/slab.h>
  24#include <linux/genhd.h>
  25#include <linux/delay.h>
  26#include <linux/atomic.h>
  27#include <linux/ctype.h>
  28#include <linux/blk-cgroup.h>
 
 
  29#include "blk.h"
  30
  31#define MAX_KEY_LEN 100
 
 
  32
  33/*
  34 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
  35 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
  36 * policy [un]register operations including cgroup file additions /
  37 * removals.  Putting cgroup file registration outside blkcg_pol_mutex
  38 * allows grabbing it from cgroup callbacks.
  39 */
  40static DEFINE_MUTEX(blkcg_pol_register_mutex);
  41static DEFINE_MUTEX(blkcg_pol_mutex);
  42
  43struct blkcg blkcg_root;
  44EXPORT_SYMBOL_GPL(blkcg_root);
  45
  46struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
 
  47
  48static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  49
  50static LIST_HEAD(all_blkcgs);		/* protected by blkcg_pol_mutex */
  51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  52static bool blkcg_policy_enabled(struct request_queue *q,
  53				 const struct blkcg_policy *pol)
  54{
  55	return pol && test_bit(pol->plid, q->blkcg_pols);
  56}
  57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58/**
  59 * blkg_free - free a blkg
  60 * @blkg: blkg to free
  61 *
  62 * Free @blkg which may be partially allocated.
  63 */
  64static void blkg_free(struct blkcg_gq *blkg)
  65{
  66	int i;
  67
  68	if (!blkg)
  69		return;
  70
  71	for (i = 0; i < BLKCG_MAX_POLS; i++)
  72		if (blkg->pd[i])
  73			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  74
  75	if (blkg->blkcg != &blkcg_root)
  76		blk_exit_rl(&blkg->rl);
 
 
 
 
 
 
 
 
 
 
 
 
 
  77
  78	blkg_rwstat_exit(&blkg->stat_ios);
  79	blkg_rwstat_exit(&blkg->stat_bytes);
  80	kfree(blkg);
 
 
 
 
 
 
  81}
  82
  83/**
  84 * blkg_alloc - allocate a blkg
  85 * @blkcg: block cgroup the new blkg is associated with
  86 * @q: request_queue the new blkg is associated with
  87 * @gfp_mask: allocation mask to use
  88 *
  89 * Allocate a new blkg assocating @blkcg and @q.
  90 */
  91static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
  92				   gfp_t gfp_mask)
  93{
  94	struct blkcg_gq *blkg;
  95	int i;
  96
  97	/* alloc and init base part */
  98	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
  99	if (!blkg)
 100		return NULL;
 101
 102	if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
 103	    blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
 
 
 
 
 
 
 104		goto err_free;
 105
 106	blkg->q = q;
 107	INIT_LIST_HEAD(&blkg->q_node);
 
 
 
 108	blkg->blkcg = blkcg;
 109	atomic_set(&blkg->refcnt, 1);
 110
 111	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
 112	if (blkcg != &blkcg_root) {
 113		if (blk_init_rl(&blkg->rl, q, gfp_mask))
 114			goto err_free;
 115		blkg->rl.blkg = blkg;
 116	}
 117
 118	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 119		struct blkcg_policy *pol = blkcg_policy[i];
 120		struct blkg_policy_data *pd;
 121
 122		if (!blkcg_policy_enabled(q, pol))
 123			continue;
 124
 125		/* alloc per-policy data and attach it to blkg */
 126		pd = pol->pd_alloc_fn(gfp_mask, q->node);
 127		if (!pd)
 128			goto err_free;
 129
 130		blkg->pd[i] = pd;
 131		pd->blkg = blkg;
 132		pd->plid = i;
 133	}
 134
 135	return blkg;
 136
 137err_free:
 138	blkg_free(blkg);
 139	return NULL;
 140}
 141
 142struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
 143				      struct request_queue *q, bool update_hint)
 144{
 145	struct blkcg_gq *blkg;
 146
 147	/*
 148	 * Hint didn't match.  Look up from the radix tree.  Note that the
 149	 * hint can only be updated under queue_lock as otherwise @blkg
 150	 * could have already been removed from blkg_tree.  The caller is
 151	 * responsible for grabbing queue_lock if @update_hint.
 152	 */
 153	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
 154	if (blkg && blkg->q == q) {
 155		if (update_hint) {
 156			lockdep_assert_held(q->queue_lock);
 157			rcu_assign_pointer(blkcg->blkg_hint, blkg);
 158		}
 159		return blkg;
 160	}
 161
 162	return NULL;
 163}
 164EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
 165
 166/*
 167 * If @new_blkg is %NULL, this function tries to allocate a new one as
 168 * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
 169 */
 170static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 171				    struct request_queue *q,
 172				    struct blkcg_gq *new_blkg)
 173{
 174	struct blkcg_gq *blkg;
 175	struct bdi_writeback_congested *wb_congested;
 176	int i, ret;
 177
 178	WARN_ON_ONCE(!rcu_read_lock_held());
 179	lockdep_assert_held(q->queue_lock);
 
 
 
 
 
 180
 181	/* blkg holds a reference to blkcg */
 182	if (!css_tryget_online(&blkcg->css)) {
 183		ret = -ENODEV;
 184		goto err_free_blkg;
 185	}
 186
 187	wb_congested = wb_congested_get_create(&q->backing_dev_info,
 188					       blkcg->css.id, GFP_NOWAIT);
 189	if (!wb_congested) {
 190		ret = -ENOMEM;
 191		goto err_put_css;
 192	}
 193
 194	/* allocate */
 195	if (!new_blkg) {
 196		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
 197		if (unlikely(!new_blkg)) {
 198			ret = -ENOMEM;
 199			goto err_put_congested;
 200		}
 201	}
 202	blkg = new_blkg;
 203	blkg->wb_congested = wb_congested;
 204
 205	/* link parent */
 206	if (blkcg_parent(blkcg)) {
 207		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
 208		if (WARN_ON_ONCE(!blkg->parent)) {
 209			ret = -ENODEV;
 210			goto err_put_congested;
 211		}
 212		blkg_get(blkg->parent);
 213	}
 214
 215	/* invoke per-policy init */
 216	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 217		struct blkcg_policy *pol = blkcg_policy[i];
 218
 219		if (blkg->pd[i] && pol->pd_init_fn)
 220			pol->pd_init_fn(blkg->pd[i]);
 221	}
 222
 223	/* insert */
 224	spin_lock(&blkcg->lock);
 225	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
 226	if (likely(!ret)) {
 227		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
 228		list_add(&blkg->q_node, &q->blkg_list);
 229
 230		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 231			struct blkcg_policy *pol = blkcg_policy[i];
 232
 233			if (blkg->pd[i] && pol->pd_online_fn)
 234				pol->pd_online_fn(blkg->pd[i]);
 235		}
 236	}
 237	blkg->online = true;
 238	spin_unlock(&blkcg->lock);
 239
 240	if (!ret)
 241		return blkg;
 242
 243	/* @blkg failed fully initialized, use the usual release path */
 244	blkg_put(blkg);
 245	return ERR_PTR(ret);
 246
 247err_put_congested:
 248	wb_congested_put(wb_congested);
 249err_put_css:
 250	css_put(&blkcg->css);
 251err_free_blkg:
 252	blkg_free(new_blkg);
 253	return ERR_PTR(ret);
 254}
 255
 256/**
 257 * blkg_lookup_create - lookup blkg, try to create one if not there
 258 * @blkcg: blkcg of interest
 259 * @q: request_queue of interest
 260 *
 261 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
 262 * create one.  blkg creation is performed recursively from blkcg_root such
 263 * that all non-root blkg's have access to the parent blkg.  This function
 264 * should be called under RCU read lock and @q->queue_lock.
 265 *
 266 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
 267 * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
 268 * dead and bypassing, returns ERR_PTR(-EBUSY).
 269 */
 270struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 271				    struct request_queue *q)
 272{
 
 273	struct blkcg_gq *blkg;
 
 274
 275	WARN_ON_ONCE(!rcu_read_lock_held());
 276	lockdep_assert_held(q->queue_lock);
 277
 278	/*
 279	 * This could be the first entry point of blkcg implementation and
 280	 * we shouldn't allow anything to go through for a bypassing queue.
 281	 */
 282	if (unlikely(blk_queue_bypass(q)))
 283		return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
 284
 285	blkg = __blkg_lookup(blkcg, q, true);
 286	if (blkg)
 287		return blkg;
 288
 
 
 
 
 
 
 
 
 
 289	/*
 290	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
 291	 * non-root blkgs have access to their parents.
 
 292	 */
 293	while (true) {
 294		struct blkcg *pos = blkcg;
 295		struct blkcg *parent = blkcg_parent(blkcg);
 
 296
 297		while (parent && !__blkg_lookup(parent, q, false)) {
 
 
 
 
 
 
 298			pos = parent;
 299			parent = blkcg_parent(parent);
 300		}
 301
 302		blkg = blkg_create(pos, q, NULL);
 303		if (pos == blkcg || IS_ERR(blkg))
 304			return blkg;
 
 
 
 
 305	}
 
 
 
 
 306}
 307
 308static void blkg_destroy(struct blkcg_gq *blkg)
 309{
 310	struct blkcg *blkcg = blkg->blkcg;
 311	struct blkcg_gq *parent = blkg->parent;
 312	int i;
 313
 314	lockdep_assert_held(blkg->q->queue_lock);
 315	lockdep_assert_held(&blkcg->lock);
 316
 317	/* Something wrong if we are trying to remove same group twice */
 318	WARN_ON_ONCE(list_empty(&blkg->q_node));
 319	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 320
 321	for (i = 0; i < BLKCG_MAX_POLS; i++) {
 322		struct blkcg_policy *pol = blkcg_policy[i];
 323
 324		if (blkg->pd[i] && pol->pd_offline_fn)
 325			pol->pd_offline_fn(blkg->pd[i]);
 326	}
 327
 328	if (parent) {
 329		blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
 330		blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
 331	}
 332
 333	blkg->online = false;
 334
 335	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
 336	list_del_init(&blkg->q_node);
 337	hlist_del_init_rcu(&blkg->blkcg_node);
 338
 339	/*
 340	 * Both setting lookup hint to and clearing it from @blkg are done
 341	 * under queue_lock.  If it's not pointing to @blkg now, it never
 342	 * will.  Hint assignment itself can race safely.
 343	 */
 344	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
 345		rcu_assign_pointer(blkcg->blkg_hint, NULL);
 346
 347	/*
 348	 * Put the reference taken at the time of creation so that when all
 349	 * queues are gone, group can be destroyed.
 350	 */
 351	blkg_put(blkg);
 352}
 353
 354/**
 355 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 356 * @q: request_queue of interest
 357 *
 358 * Destroy all blkgs associated with @q.
 359 */
 360static void blkg_destroy_all(struct request_queue *q)
 361{
 
 362	struct blkcg_gq *blkg, *n;
 
 363
 364	lockdep_assert_held(q->queue_lock);
 365
 366	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 367		struct blkcg *blkcg = blkg->blkcg;
 368
 369		spin_lock(&blkcg->lock);
 370		blkg_destroy(blkg);
 371		spin_unlock(&blkcg->lock);
 
 
 
 
 
 
 
 
 
 
 
 372	}
 373
 374	q->root_blkg = NULL;
 375	q->root_rl.blkg = NULL;
 376}
 377
 378/*
 379 * A group is RCU protected, but having an rcu lock does not mean that one
 380 * can access all the fields of blkg and assume these are valid.  For
 381 * example, don't try to follow throtl_data and request queue links.
 382 *
 383 * Having a reference to blkg under an rcu allows accesses to only values
 384 * local to groups like group stats and group rate limits.
 385 */
 386void __blkg_release_rcu(struct rcu_head *rcu_head)
 387{
 388	struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
 389
 390	/* release the blkcg and parent blkg refs this blkg has been holding */
 391	css_put(&blkg->blkcg->css);
 392	if (blkg->parent)
 393		blkg_put(blkg->parent);
 394
 395	wb_congested_put(blkg->wb_congested);
 396
 397	blkg_free(blkg);
 398}
 399EXPORT_SYMBOL_GPL(__blkg_release_rcu);
 400
 401/*
 402 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 403 * because the root blkg uses @q->root_rl instead of its own rl.
 404 */
 405struct request_list *__blk_queue_next_rl(struct request_list *rl,
 406					 struct request_queue *q)
 407{
 408	struct list_head *ent;
 409	struct blkcg_gq *blkg;
 410
 411	/*
 412	 * Determine the current blkg list_head.  The first entry is
 413	 * root_rl which is off @q->blkg_list and mapped to the head.
 414	 */
 415	if (rl == &q->root_rl) {
 416		ent = &q->blkg_list;
 417		/* There are no more block groups, hence no request lists */
 418		if (list_empty(ent))
 419			return NULL;
 420	} else {
 421		blkg = container_of(rl, struct blkcg_gq, rl);
 422		ent = &blkg->q_node;
 423	}
 424
 425	/* walk to the next list_head, skip root blkcg */
 426	ent = ent->next;
 427	if (ent == &q->root_blkg->q_node)
 428		ent = ent->next;
 429	if (ent == &q->blkg_list)
 430		return NULL;
 431
 432	blkg = container_of(ent, struct blkcg_gq, q_node);
 433	return &blkg->rl;
 434}
 435
 436static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 437			     struct cftype *cftype, u64 val)
 438{
 439	struct blkcg *blkcg = css_to_blkcg(css);
 440	struct blkcg_gq *blkg;
 441	int i;
 442
 443	mutex_lock(&blkcg_pol_mutex);
 444	spin_lock_irq(&blkcg->lock);
 445
 446	/*
 447	 * Note that stat reset is racy - it doesn't synchronize against
 448	 * stat updates.  This is a debug feature which shouldn't exist
 449	 * anyway.  If you get hit by a race, retry.
 450	 */
 451	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
 452		blkg_rwstat_reset(&blkg->stat_bytes);
 453		blkg_rwstat_reset(&blkg->stat_ios);
 
 
 
 
 454
 455		for (i = 0; i < BLKCG_MAX_POLS; i++) {
 456			struct blkcg_policy *pol = blkcg_policy[i];
 457
 458			if (blkg->pd[i] && pol->pd_reset_stats_fn)
 459				pol->pd_reset_stats_fn(blkg->pd[i]);
 460		}
 461	}
 462
 463	spin_unlock_irq(&blkcg->lock);
 464	mutex_unlock(&blkcg_pol_mutex);
 465	return 0;
 466}
 467
 468const char *blkg_dev_name(struct blkcg_gq *blkg)
 469{
 470	/* some drivers (floppy) instantiate a queue w/o disk registered */
 471	if (blkg->q->backing_dev_info.dev)
 472		return dev_name(blkg->q->backing_dev_info.dev);
 473	return NULL;
 474}
 475EXPORT_SYMBOL_GPL(blkg_dev_name);
 476
 477/**
 478 * blkcg_print_blkgs - helper for printing per-blkg data
 479 * @sf: seq_file to print to
 480 * @blkcg: blkcg of interest
 481 * @prfill: fill function to print out a blkg
 482 * @pol: policy in question
 483 * @data: data to be passed to @prfill
 484 * @show_total: to print out sum of prfill return values or not
 485 *
 486 * This function invokes @prfill on each blkg of @blkcg if pd for the
 487 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 488 * policy data and @data and the matching queue lock held.  If @show_total
 489 * is %true, the sum of the return values from @prfill is printed with
 490 * "Total" label at the end.
 491 *
 492 * This is to be used to construct print functions for
 493 * cftype->read_seq_string method.
 494 */
 495void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 496		       u64 (*prfill)(struct seq_file *,
 497				     struct blkg_policy_data *, int),
 498		       const struct blkcg_policy *pol, int data,
 499		       bool show_total)
 500{
 501	struct blkcg_gq *blkg;
 502	u64 total = 0;
 503
 504	rcu_read_lock();
 505	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 506		spin_lock_irq(blkg->q->queue_lock);
 507		if (blkcg_policy_enabled(blkg->q, pol))
 508			total += prfill(sf, blkg->pd[pol->plid], data);
 509		spin_unlock_irq(blkg->q->queue_lock);
 510	}
 511	rcu_read_unlock();
 512
 513	if (show_total)
 514		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
 515}
 516EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
 517
 518/**
 519 * __blkg_prfill_u64 - prfill helper for a single u64 value
 520 * @sf: seq_file to print to
 521 * @pd: policy private data of interest
 522 * @v: value to print
 523 *
 524 * Print @v to @sf for the device assocaited with @pd.
 525 */
 526u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
 527{
 528	const char *dname = blkg_dev_name(pd->blkg);
 529
 530	if (!dname)
 531		return 0;
 532
 533	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
 534	return v;
 535}
 536EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
 537
 538/**
 539 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 540 * @sf: seq_file to print to
 541 * @pd: policy private data of interest
 542 * @rwstat: rwstat to print
 
 
 
 543 *
 544 * Print @rwstat to @sf for the device assocaited with @pd.
 545 */
 546u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 547			 const struct blkg_rwstat *rwstat)
 548{
 549	static const char *rwstr[] = {
 550		[BLKG_RWSTAT_READ]	= "Read",
 551		[BLKG_RWSTAT_WRITE]	= "Write",
 552		[BLKG_RWSTAT_SYNC]	= "Sync",
 553		[BLKG_RWSTAT_ASYNC]	= "Async",
 554	};
 555	const char *dname = blkg_dev_name(pd->blkg);
 556	u64 v;
 557	int i;
 558
 559	if (!dname)
 560		return 0;
 
 
 
 
 
 
 
 
 
 
 561
 562	for (i = 0; i < BLKG_RWSTAT_NR; i++)
 563		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
 564			   (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
 565
 566	v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
 567		atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
 568	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
 569	return v;
 570}
 571EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
 572
 573/**
 574 * blkg_prfill_stat - prfill callback for blkg_stat
 575 * @sf: seq_file to print to
 576 * @pd: policy private data of interest
 577 * @off: offset to the blkg_stat in @pd
 
 578 *
 579 * prfill callback for printing a blkg_stat.
 
 
 
 580 */
 581u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
 
 
 582{
 583	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
 584}
 585EXPORT_SYMBOL_GPL(blkg_prfill_stat);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 586
 587/**
 588 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 589 * @sf: seq_file to print to
 590 * @pd: policy private data of interest
 591 * @off: offset to the blkg_rwstat in @pd
 592 *
 593 * prfill callback for printing a blkg_rwstat.
 594 */
 595u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 596		       int off)
 597{
 598	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
 599
 600	return __blkg_prfill_rwstat(sf, pd, &rwstat);
 601}
 602EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
 603
 604static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
 605				    struct blkg_policy_data *pd, int off)
 606{
 607	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
 
 
 
 
 
 608
 609	return __blkg_prfill_rwstat(sf, pd, &rwstat);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 610}
 
 611
 612/**
 613 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
 614 * @sf: seq_file to print to
 615 * @v: unused
 616 *
 617 * To be used as cftype->seq_show to print blkg->stat_bytes.
 618 * cftype->private must be set to the blkcg_policy.
 619 */
 620int blkg_print_stat_bytes(struct seq_file *sf, void *v)
 
 621{
 622	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 623			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
 624			  offsetof(struct blkcg_gq, stat_bytes), true);
 625	return 0;
 626}
 627EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
 628
 629/**
 630 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
 631 * @sf: seq_file to print to
 632 * @v: unused
 633 *
 634 * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
 635 * must be set to the blkcg_policy.
 636 */
 637int blkg_print_stat_ios(struct seq_file *sf, void *v)
 638{
 639	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 640			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
 641			  offsetof(struct blkcg_gq, stat_ios), true);
 642	return 0;
 
 
 643}
 644EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
 645
 646static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
 647					      struct blkg_policy_data *pd,
 648					      int off)
 649{
 650	struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
 651							      NULL, off);
 652	return __blkg_prfill_rwstat(sf, pd, &rwstat);
 
 
 
 653}
 654
 655/**
 656 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
 657 * @sf: seq_file to print to
 658 * @v: unused
 659 */
 660int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
 661{
 662	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 663			  blkg_prfill_rwstat_field_recursive,
 664			  (void *)seq_cft(sf)->private,
 665			  offsetof(struct blkcg_gq, stat_bytes), true);
 666	return 0;
 
 667}
 668EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
 669
 670/**
 671 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
 672 * @sf: seq_file to print to
 673 * @v: unused
 674 */
 675int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
 676{
 677	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 678			  blkg_prfill_rwstat_field_recursive,
 679			  (void *)seq_cft(sf)->private,
 680			  offsetof(struct blkcg_gq, stat_ios), true);
 681	return 0;
 
 
 
 
 
 682}
 683EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
 684
 685/**
 686 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
 687 * @blkg: blkg of interest
 688 * @pol: blkcg_policy which contains the blkg_stat
 689 * @off: offset to the blkg_stat in blkg_policy_data or @blkg
 690 *
 691 * Collect the blkg_stat specified by @blkg, @pol and @off and all its
 692 * online descendants and their aux counts.  The caller must be holding the
 693 * queue lock for online tests.
 694 *
 695 * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
 696 * at @off bytes into @blkg's blkg_policy_data of the policy.
 697 */
 698u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
 699			    struct blkcg_policy *pol, int off)
 700{
 701	struct blkcg_gq *pos_blkg;
 702	struct cgroup_subsys_state *pos_css;
 703	u64 sum = 0;
 
 704
 705	lockdep_assert_held(blkg->q->queue_lock);
 
 
 706
 707	rcu_read_lock();
 708	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
 709		struct blkg_stat *stat;
 710
 711		if (!pos_blkg->online)
 712			continue;
 
 713
 714		if (pol)
 715			stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
 716		else
 717			stat = (void *)blkg + off;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 718
 719		sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
 720	}
 721	rcu_read_unlock();
 722
 723	return sum;
 724}
 725EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
 726
 727/**
 728 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
 729 * @blkg: blkg of interest
 730 * @pol: blkcg_policy which contains the blkg_rwstat
 731 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
 732 *
 733 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
 734 * online descendants and their aux counts.  The caller must be holding the
 735 * queue lock for online tests.
 736 *
 737 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
 738 * is at @off bytes into @blkg's blkg_policy_data of the policy.
 739 */
 740struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
 741					     struct blkcg_policy *pol, int off)
 742{
 743	struct blkcg_gq *pos_blkg;
 744	struct cgroup_subsys_state *pos_css;
 745	struct blkg_rwstat sum = { };
 746	int i;
 747
 748	lockdep_assert_held(blkg->q->queue_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 749
 750	rcu_read_lock();
 751	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
 752		struct blkg_rwstat *rwstat;
 753
 754		if (!pos_blkg->online)
 755			continue;
 756
 757		if (pol)
 758			rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
 759		else
 760			rwstat = (void *)pos_blkg + off;
 761
 762		for (i = 0; i < BLKG_RWSTAT_NR; i++)
 763			atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
 764				percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
 765				&sum.aux_cnt[i]);
 766	}
 767	rcu_read_unlock();
 768
 769	return sum;
 770}
 771EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
 772
 773/**
 774 * blkg_conf_prep - parse and prepare for per-blkg config update
 775 * @blkcg: target block cgroup
 776 * @pol: target policy
 777 * @input: input string
 778 * @ctx: blkg_conf_ctx to be filled
 779 *
 780 * Parse per-blkg config update from @input and initialize @ctx with the
 781 * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
 782 * part of @input following MAJ:MIN.  This function returns with RCU read
 783 * lock and queue lock held and must be paired with blkg_conf_finish().
 784 */
 785int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 786		   char *input, struct blkg_conf_ctx *ctx)
 787	__acquires(rcu) __acquires(disk->queue->queue_lock)
 788{
 789	struct gendisk *disk;
 790	struct blkcg_gq *blkg;
 791	struct module *owner;
 792	unsigned int major, minor;
 793	int key_len, part, ret;
 794	char *body;
 795
 796	if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
 797		return -EINVAL;
 798
 799	body = input + key_len;
 800	if (!isspace(*body))
 801		return -EINVAL;
 802	body = skip_spaces(body);
 803
 804	disk = get_gendisk(MKDEV(major, minor), &part);
 805	if (!disk)
 806		return -ENODEV;
 807	if (part) {
 808		owner = disk->fops->owner;
 809		put_disk(disk);
 810		module_put(owner);
 811		return -ENODEV;
 812	}
 813
 814	rcu_read_lock();
 815	spin_lock_irq(disk->queue->queue_lock);
 816
 817	if (blkcg_policy_enabled(disk->queue, pol))
 818		blkg = blkg_lookup_create(blkcg, disk->queue);
 819	else
 820		blkg = ERR_PTR(-EOPNOTSUPP);
 821
 822	if (IS_ERR(blkg)) {
 823		ret = PTR_ERR(blkg);
 824		rcu_read_unlock();
 825		spin_unlock_irq(disk->queue->queue_lock);
 826		owner = disk->fops->owner;
 827		put_disk(disk);
 828		module_put(owner);
 829		/*
 830		 * If queue was bypassing, we should retry.  Do so after a
 831		 * short msleep().  It isn't strictly necessary but queue
 832		 * can be bypassing for some time and it's always nice to
 833		 * avoid busy looping.
 834		 */
 835		if (ret == -EBUSY) {
 836			msleep(10);
 837			ret = restart_syscall();
 838		}
 839		return ret;
 840	}
 841
 842	ctx->disk = disk;
 843	ctx->blkg = blkg;
 844	ctx->body = body;
 845	return 0;
 846}
 847EXPORT_SYMBOL_GPL(blkg_conf_prep);
 848
 849/**
 850 * blkg_conf_finish - finish up per-blkg config update
 851 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 852 *
 853 * Finish up after per-blkg config update.  This function must be paired
 854 * with blkg_conf_prep().
 855 */
 856void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 857	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
 858{
 859	struct module *owner;
 860
 861	spin_unlock_irq(ctx->disk->queue->queue_lock);
 862	rcu_read_unlock();
 863	owner = ctx->disk->fops->owner;
 864	put_disk(ctx->disk);
 865	module_put(owner);
 866}
 867EXPORT_SYMBOL_GPL(blkg_conf_finish);
 868
 869static int blkcg_print_stat(struct seq_file *sf, void *v)
 870{
 871	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
 872	struct blkcg_gq *blkg;
 873
 
 
 
 
 
 874	rcu_read_lock();
 875
 876	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 877		const char *dname;
 878		struct blkg_rwstat rwstat;
 879		u64 rbytes, wbytes, rios, wios;
 880
 881		dname = blkg_dev_name(blkg);
 882		if (!dname)
 883			continue;
 884
 885		spin_lock_irq(blkg->q->queue_lock);
 886
 887		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
 888					offsetof(struct blkcg_gq, stat_bytes));
 889		rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
 890		wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
 891
 892		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
 893					offsetof(struct blkcg_gq, stat_ios));
 894		rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
 895		wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
 896
 897		spin_unlock_irq(blkg->q->queue_lock);
 898
 899		if (rbytes || wbytes || rios || wios)
 900			seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
 901				   dname, rbytes, wbytes, rios, wios);
 902	}
 903
 904	rcu_read_unlock();
 905	return 0;
 906}
 907
 908struct cftype blkcg_files[] = {
 909	{
 910		.name = "stat",
 911		.flags = CFTYPE_NOT_ON_ROOT,
 912		.seq_show = blkcg_print_stat,
 913	},
 914	{ }	/* terminate */
 915};
 916
 917struct cftype blkcg_legacy_files[] = {
 918	{
 919		.name = "reset_stats",
 920		.write_u64 = blkcg_reset_stats,
 921	},
 922	{ }	/* terminate */
 923};
 924
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925/**
 926 * blkcg_css_offline - cgroup css_offline callback
 927 * @css: css of interest
 928 *
 929 * This function is called when @css is about to go away and responsible
 930 * for shooting down all blkgs associated with @css.  blkgs should be
 931 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 932 * inside q lock, this function performs reverse double lock dancing.
 933 *
 934 * This is the blkcg counterpart of ioc_release_fn().
 935 */
 936static void blkcg_css_offline(struct cgroup_subsys_state *css)
 937{
 938	struct blkcg *blkcg = css_to_blkcg(css);
 939
 940	spin_lock_irq(&blkcg->lock);
 941
 942	while (!hlist_empty(&blkcg->blkg_list)) {
 943		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
 944						struct blkcg_gq, blkcg_node);
 945		struct request_queue *q = blkg->q;
 946
 947		if (spin_trylock(q->queue_lock)) {
 948			blkg_destroy(blkg);
 949			spin_unlock(q->queue_lock);
 950		} else {
 
 
 951			spin_unlock_irq(&blkcg->lock);
 952			cpu_relax();
 953			spin_lock_irq(&blkcg->lock);
 
 954		}
 
 
 
 955	}
 956
 957	spin_unlock_irq(&blkcg->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 958
 959	wb_blkcg_offline(blkcg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960}
 961
 962static void blkcg_css_free(struct cgroup_subsys_state *css)
 963{
 964	struct blkcg *blkcg = css_to_blkcg(css);
 965	int i;
 966
 967	mutex_lock(&blkcg_pol_mutex);
 968
 969	list_del(&blkcg->all_blkcgs_node);
 970
 971	for (i = 0; i < BLKCG_MAX_POLS; i++)
 972		if (blkcg->cpd[i])
 973			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
 974
 975	mutex_unlock(&blkcg_pol_mutex);
 976
 
 977	kfree(blkcg);
 978}
 979
 980static struct cgroup_subsys_state *
 981blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
 982{
 983	struct blkcg *blkcg;
 984	struct cgroup_subsys_state *ret;
 985	int i;
 986
 987	mutex_lock(&blkcg_pol_mutex);
 988
 989	if (!parent_css) {
 990		blkcg = &blkcg_root;
 991	} else {
 992		blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
 993		if (!blkcg) {
 994			ret = ERR_PTR(-ENOMEM);
 995			goto free_blkcg;
 996		}
 997	}
 998
 
 
 
 999	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1000		struct blkcg_policy *pol = blkcg_policy[i];
1001		struct blkcg_policy_data *cpd;
1002
1003		/*
1004		 * If the policy hasn't been attached yet, wait for it
1005		 * to be attached before doing anything else. Otherwise,
1006		 * check if the policy requires any specific per-cgroup
1007		 * data: if it does, allocate and initialize it.
1008		 */
1009		if (!pol || !pol->cpd_alloc_fn)
1010			continue;
1011
1012		cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1013		if (!cpd) {
1014			ret = ERR_PTR(-ENOMEM);
1015			goto free_pd_blkcg;
1016		}
1017		blkcg->cpd[i] = cpd;
1018		cpd->blkcg = blkcg;
1019		cpd->plid = i;
1020		if (pol->cpd_init_fn)
1021			pol->cpd_init_fn(cpd);
1022	}
1023
1024	spin_lock_init(&blkcg->lock);
1025	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
 
1026	INIT_HLIST_HEAD(&blkcg->blkg_list);
1027#ifdef CONFIG_CGROUP_WRITEBACK
1028	INIT_LIST_HEAD(&blkcg->cgwb_list);
1029#endif
1030	list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1031
1032	mutex_unlock(&blkcg_pol_mutex);
1033	return &blkcg->css;
1034
1035free_pd_blkcg:
1036	for (i--; i >= 0; i--)
1037		if (blkcg->cpd[i])
1038			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
 
1039free_blkcg:
1040	kfree(blkcg);
 
 
1041	mutex_unlock(&blkcg_pol_mutex);
1042	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1043}
1044
1045/**
1046 * blkcg_init_queue - initialize blkcg part of request queue
1047 * @q: request_queue to initialize
1048 *
1049 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1050 * part of new request_queue @q.
1051 *
1052 * RETURNS:
1053 * 0 on success, -errno on failure.
1054 */
1055int blkcg_init_queue(struct request_queue *q)
1056{
 
1057	struct blkcg_gq *new_blkg, *blkg;
1058	bool preloaded;
1059	int ret;
1060
1061	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
 
 
1062	if (!new_blkg)
1063		return -ENOMEM;
1064
1065	preloaded = !radix_tree_preload(GFP_KERNEL);
1066
1067	/*
1068	 * Make sure the root blkg exists and count the existing blkgs.  As
1069	 * @q is bypassing at this point, blkg_lookup_create() can't be
1070	 * used.  Open code insertion.
1071	 */
1072	rcu_read_lock();
1073	spin_lock_irq(q->queue_lock);
1074	blkg = blkg_create(&blkcg_root, q, new_blkg);
1075	spin_unlock_irq(q->queue_lock);
1076	rcu_read_unlock();
1077
1078	if (preloaded)
1079		radix_tree_preload_end();
1080
1081	if (IS_ERR(blkg)) {
1082		blkg_free(new_blkg);
1083		return PTR_ERR(blkg);
1084	}
 
 
 
 
 
 
 
1085
1086	q->root_blkg = blkg;
1087	q->root_rl.blkg = blkg;
1088
1089	ret = blk_throtl_init(q);
1090	if (ret) {
1091		spin_lock_irq(q->queue_lock);
1092		blkg_destroy_all(q);
1093		spin_unlock_irq(q->queue_lock);
1094	}
1095	return ret;
 
 
 
 
 
1096}
1097
1098/**
1099 * blkcg_drain_queue - drain blkcg part of request_queue
1100 * @q: request_queue to drain
1101 *
1102 * Called from blk_drain_queue().  Responsible for draining blkcg part.
1103 */
1104void blkcg_drain_queue(struct request_queue *q)
1105{
1106	lockdep_assert_held(q->queue_lock);
1107
1108	/*
1109	 * @q could be exiting and already have destroyed all blkgs as
1110	 * indicated by NULL root_blkg.  If so, don't confuse policies.
1111	 */
1112	if (!q->root_blkg)
1113		return;
1114
1115	blk_throtl_drain(q);
1116}
1117
1118/**
1119 * blkcg_exit_queue - exit and release blkcg part of request_queue
1120 * @q: request_queue being released
1121 *
1122 * Called from blk_release_queue().  Responsible for exiting blkcg part.
1123 */
1124void blkcg_exit_queue(struct request_queue *q)
1125{
1126	spin_lock_irq(q->queue_lock);
1127	blkg_destroy_all(q);
1128	spin_unlock_irq(q->queue_lock);
1129
1130	blk_throtl_exit(q);
1131}
1132
1133/*
1134 * We cannot support shared io contexts, as we have no mean to support
1135 * two tasks with the same ioc in two different groups without major rework
1136 * of the main cic data structures.  For now we allow a task to change
1137 * its cgroup only if it's the only owner of its ioc.
1138 */
1139static int blkcg_can_attach(struct cgroup_taskset *tset)
1140{
1141	struct task_struct *task;
1142	struct cgroup_subsys_state *dst_css;
1143	struct io_context *ioc;
1144	int ret = 0;
1145
1146	/* task_lock() is needed to avoid races with exit_io_context() */
1147	cgroup_taskset_for_each(task, dst_css, tset) {
1148		task_lock(task);
1149		ioc = task->io_context;
1150		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1151			ret = -EINVAL;
1152		task_unlock(task);
1153		if (ret)
1154			break;
1155	}
1156	return ret;
1157}
1158
1159static void blkcg_bind(struct cgroup_subsys_state *root_css)
1160{
1161	int i;
1162
1163	mutex_lock(&blkcg_pol_mutex);
1164
1165	for (i = 0; i < BLKCG_MAX_POLS; i++) {
1166		struct blkcg_policy *pol = blkcg_policy[i];
1167		struct blkcg *blkcg;
1168
1169		if (!pol || !pol->cpd_bind_fn)
1170			continue;
1171
1172		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1173			if (blkcg->cpd[pol->plid])
1174				pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1175	}
1176	mutex_unlock(&blkcg_pol_mutex);
1177}
1178
 
 
 
 
 
 
 
1179struct cgroup_subsys io_cgrp_subsys = {
1180	.css_alloc = blkcg_css_alloc,
 
1181	.css_offline = blkcg_css_offline,
1182	.css_free = blkcg_css_free,
1183	.can_attach = blkcg_can_attach,
1184	.bind = blkcg_bind,
1185	.dfl_cftypes = blkcg_files,
1186	.legacy_cftypes = blkcg_legacy_files,
1187	.legacy_name = "blkio",
 
1188#ifdef CONFIG_MEMCG
1189	/*
1190	 * This ensures that, if available, memcg is automatically enabled
1191	 * together on the default hierarchy so that the owner cgroup can
1192	 * be retrieved from writeback pages.
1193	 */
1194	.depends_on = 1 << memory_cgrp_id,
1195#endif
1196};
1197EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1198
1199/**
1200 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1201 * @q: request_queue of interest
1202 * @pol: blkcg policy to activate
1203 *
1204 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1205 * bypass mode to populate its blkgs with policy_data for @pol.
1206 *
1207 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1208 * from IO path.  Update of each blkg is protected by both queue and blkcg
1209 * locks so that holding either lock and testing blkcg_policy_enabled() is
1210 * always enough for dereferencing policy data.
1211 *
1212 * The caller is responsible for synchronizing [de]activations and policy
1213 * [un]registerations.  Returns 0 on success, -errno on failure.
1214 */
1215int blkcg_activate_policy(struct request_queue *q,
1216			  const struct blkcg_policy *pol)
1217{
1218	struct blkg_policy_data *pd_prealloc = NULL;
1219	struct blkcg_gq *blkg;
1220	int ret;
1221
1222	if (blkcg_policy_enabled(q, pol))
1223		return 0;
1224
1225	blk_queue_bypass_start(q);
1226pd_prealloc:
1227	if (!pd_prealloc) {
1228		pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1229		if (!pd_prealloc) {
1230			ret = -ENOMEM;
1231			goto out_bypass_end;
1232		}
1233	}
1234
1235	spin_lock_irq(q->queue_lock);
1236
1237	list_for_each_entry(blkg, &q->blkg_list, q_node) {
 
1238		struct blkg_policy_data *pd;
1239
1240		if (blkg->pd[pol->plid])
1241			continue;
1242
1243		pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
1244		if (!pd)
1245			swap(pd, pd_prealloc);
 
 
 
 
 
 
1246		if (!pd) {
1247			spin_unlock_irq(q->queue_lock);
1248			goto pd_prealloc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1249		}
1250
1251		blkg->pd[pol->plid] = pd;
1252		pd->blkg = blkg;
1253		pd->plid = pol->plid;
1254		if (pol->pd_init_fn)
1255			pol->pd_init_fn(pd);
1256	}
1257
 
 
 
 
 
 
 
 
 
1258	__set_bit(pol->plid, q->blkcg_pols);
1259	ret = 0;
1260
1261	spin_unlock_irq(q->queue_lock);
1262out_bypass_end:
1263	blk_queue_bypass_end(q);
 
 
 
1264	if (pd_prealloc)
1265		pol->pd_free_fn(pd_prealloc);
1266	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267}
1268EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1269
1270/**
1271 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1272 * @q: request_queue of interest
1273 * @pol: blkcg policy to deactivate
1274 *
1275 * Deactivate @pol on @q.  Follows the same synchronization rules as
1276 * blkcg_activate_policy().
1277 */
1278void blkcg_deactivate_policy(struct request_queue *q,
1279			     const struct blkcg_policy *pol)
1280{
1281	struct blkcg_gq *blkg;
1282
1283	if (!blkcg_policy_enabled(q, pol))
1284		return;
1285
1286	blk_queue_bypass_start(q);
1287	spin_lock_irq(q->queue_lock);
 
 
1288
1289	__clear_bit(pol->plid, q->blkcg_pols);
1290
1291	list_for_each_entry(blkg, &q->blkg_list, q_node) {
1292		/* grab blkcg lock too while removing @pd from @blkg */
1293		spin_lock(&blkg->blkcg->lock);
1294
 
1295		if (blkg->pd[pol->plid]) {
1296			if (pol->pd_offline_fn)
1297				pol->pd_offline_fn(blkg->pd[pol->plid]);
1298			pol->pd_free_fn(blkg->pd[pol->plid]);
1299			blkg->pd[pol->plid] = NULL;
1300		}
 
 
1301
1302		spin_unlock(&blkg->blkcg->lock);
1303	}
1304
1305	spin_unlock_irq(q->queue_lock);
1306	blk_queue_bypass_end(q);
1307}
1308EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1309
 
 
 
 
 
 
 
 
 
 
 
 
1310/**
1311 * blkcg_policy_register - register a blkcg policy
1312 * @pol: blkcg policy to register
1313 *
1314 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1315 * successful registration.  Returns 0 on success and -errno on failure.
1316 */
1317int blkcg_policy_register(struct blkcg_policy *pol)
1318{
1319	struct blkcg *blkcg;
1320	int i, ret;
1321
1322	mutex_lock(&blkcg_pol_register_mutex);
1323	mutex_lock(&blkcg_pol_mutex);
1324
1325	/* find an empty slot */
1326	ret = -ENOSPC;
1327	for (i = 0; i < BLKCG_MAX_POLS; i++)
1328		if (!blkcg_policy[i])
1329			break;
1330	if (i >= BLKCG_MAX_POLS)
 
 
 
 
 
 
 
1331		goto err_unlock;
1332
1333	/* register @pol */
1334	pol->plid = i;
1335	blkcg_policy[pol->plid] = pol;
1336
1337	/* allocate and install cpd's */
1338	if (pol->cpd_alloc_fn) {
1339		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1340			struct blkcg_policy_data *cpd;
1341
1342			cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1343			if (!cpd) {
1344				mutex_unlock(&blkcg_pol_mutex);
1345				goto err_free_cpds;
1346			}
1347
1348			blkcg->cpd[pol->plid] = cpd;
1349			cpd->blkcg = blkcg;
1350			cpd->plid = pol->plid;
1351			pol->cpd_init_fn(cpd);
 
1352		}
1353	}
1354
1355	mutex_unlock(&blkcg_pol_mutex);
1356
1357	/* everything is in place, add intf files for the new policy */
1358	if (pol->dfl_cftypes)
1359		WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1360					       pol->dfl_cftypes));
1361	if (pol->legacy_cftypes)
1362		WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1363						  pol->legacy_cftypes));
1364	mutex_unlock(&blkcg_pol_register_mutex);
1365	return 0;
1366
1367err_free_cpds:
1368	if (pol->cpd_alloc_fn) {
1369		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1370			if (blkcg->cpd[pol->plid]) {
1371				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1372				blkcg->cpd[pol->plid] = NULL;
1373			}
1374		}
1375	}
1376	blkcg_policy[pol->plid] = NULL;
1377err_unlock:
1378	mutex_unlock(&blkcg_pol_mutex);
1379	mutex_unlock(&blkcg_pol_register_mutex);
1380	return ret;
1381}
1382EXPORT_SYMBOL_GPL(blkcg_policy_register);
1383
1384/**
1385 * blkcg_policy_unregister - unregister a blkcg policy
1386 * @pol: blkcg policy to unregister
1387 *
1388 * Undo blkcg_policy_register(@pol).  Might sleep.
1389 */
1390void blkcg_policy_unregister(struct blkcg_policy *pol)
1391{
1392	struct blkcg *blkcg;
1393
1394	mutex_lock(&blkcg_pol_register_mutex);
1395
1396	if (WARN_ON(blkcg_policy[pol->plid] != pol))
1397		goto out_unlock;
1398
1399	/* kill the intf files first */
1400	if (pol->dfl_cftypes)
1401		cgroup_rm_cftypes(pol->dfl_cftypes);
1402	if (pol->legacy_cftypes)
1403		cgroup_rm_cftypes(pol->legacy_cftypes);
1404
1405	/* remove cpds and unregister */
1406	mutex_lock(&blkcg_pol_mutex);
1407
1408	if (pol->cpd_alloc_fn) {
1409		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1410			if (blkcg->cpd[pol->plid]) {
1411				pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1412				blkcg->cpd[pol->plid] = NULL;
1413			}
1414		}
1415	}
1416	blkcg_policy[pol->plid] = NULL;
1417
1418	mutex_unlock(&blkcg_pol_mutex);
1419out_unlock:
1420	mutex_unlock(&blkcg_pol_register_mutex);
1421}
1422EXPORT_SYMBOL_GPL(blkcg_policy_unregister);