Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2
   3#include <linux/wait.h>
   4#include <linux/rbtree.h>
   5#include <linux/backing-dev.h>
   6#include <linux/kthread.h>
   7#include <linux/freezer.h>
   8#include <linux/fs.h>
   9#include <linux/pagemap.h>
  10#include <linux/mm.h>
  11#include <linux/sched.h>
  12#include <linux/module.h>
  13#include <linux/writeback.h>
  14#include <linux/device.h>
  15#include <trace/events/writeback.h>
  16
 
 
  17struct backing_dev_info noop_backing_dev_info = {
  18	.name		= "noop",
  19	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
  20};
  21EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  22
  23static struct class *bdi_class;
  24
  25/*
  26 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
  27 * reader side locking.
  28 */
  29DEFINE_SPINLOCK(bdi_lock);
  30static u64 bdi_id_cursor;
  31static struct rb_root bdi_tree = RB_ROOT;
  32LIST_HEAD(bdi_list);
  33
  34/* bdi_wq serves all asynchronous writeback tasks */
  35struct workqueue_struct *bdi_wq;
  36
  37#ifdef CONFIG_DEBUG_FS
  38#include <linux/debugfs.h>
  39#include <linux/seq_file.h>
  40
  41static struct dentry *bdi_debug_root;
  42
  43static void bdi_debug_init(void)
  44{
  45	bdi_debug_root = debugfs_create_dir("bdi", NULL);
  46}
  47
  48static int bdi_debug_stats_show(struct seq_file *m, void *v)
  49{
  50	struct backing_dev_info *bdi = m->private;
  51	struct bdi_writeback *wb = &bdi->wb;
  52	unsigned long background_thresh;
  53	unsigned long dirty_thresh;
  54	unsigned long wb_thresh;
  55	unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
  56	struct inode *inode;
  57
  58	nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
  59	spin_lock(&wb->list_lock);
  60	list_for_each_entry(inode, &wb->b_dirty, i_io_list)
  61		nr_dirty++;
  62	list_for_each_entry(inode, &wb->b_io, i_io_list)
  63		nr_io++;
  64	list_for_each_entry(inode, &wb->b_more_io, i_io_list)
  65		nr_more_io++;
  66	list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
  67		if (inode->i_state & I_DIRTY_TIME)
  68			nr_dirty_time++;
  69	spin_unlock(&wb->list_lock);
  70
  71	global_dirty_limits(&background_thresh, &dirty_thresh);
  72	wb_thresh = wb_calc_thresh(wb, dirty_thresh);
  73
  74#define K(x) ((x) << (PAGE_SHIFT - 10))
  75	seq_printf(m,
  76		   "BdiWriteback:       %10lu kB\n"
  77		   "BdiReclaimable:     %10lu kB\n"
  78		   "BdiDirtyThresh:     %10lu kB\n"
  79		   "DirtyThresh:        %10lu kB\n"
  80		   "BackgroundThresh:   %10lu kB\n"
  81		   "BdiDirtied:         %10lu kB\n"
  82		   "BdiWritten:         %10lu kB\n"
  83		   "BdiWriteBandwidth:  %10lu kBps\n"
  84		   "b_dirty:            %10lu\n"
  85		   "b_io:               %10lu\n"
  86		   "b_more_io:          %10lu\n"
  87		   "b_dirty_time:       %10lu\n"
  88		   "bdi_list:           %10u\n"
  89		   "state:              %10lx\n",
  90		   (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
  91		   (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
  92		   K(wb_thresh),
  93		   K(dirty_thresh),
  94		   K(background_thresh),
  95		   (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
  96		   (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
  97		   (unsigned long) K(wb->write_bandwidth),
  98		   nr_dirty,
  99		   nr_io,
 100		   nr_more_io,
 101		   nr_dirty_time,
 102		   !list_empty(&bdi->bdi_list), bdi->wb.state);
 103#undef K
 104
 105	return 0;
 106}
 107DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
 
 
 
 
 
 
 
 
 
 
 
 108
 109static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 110{
 111	bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
 112
 113	debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
 114			    &bdi_debug_stats_fops);
 115}
 116
 117static void bdi_debug_unregister(struct backing_dev_info *bdi)
 118{
 119	debugfs_remove_recursive(bdi->debug_dir);
 
 120}
 121#else
 122static inline void bdi_debug_init(void)
 123{
 124}
 125static inline void bdi_debug_register(struct backing_dev_info *bdi,
 126				      const char *name)
 127{
 128}
 129static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 130{
 131}
 132#endif
 133
 134static ssize_t read_ahead_kb_store(struct device *dev,
 135				  struct device_attribute *attr,
 136				  const char *buf, size_t count)
 137{
 138	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 139	unsigned long read_ahead_kb;
 140	ssize_t ret;
 141
 142	ret = kstrtoul(buf, 10, &read_ahead_kb);
 143	if (ret < 0)
 144		return ret;
 145
 146	bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
 147
 148	return count;
 149}
 150
 151#define K(pages) ((pages) << (PAGE_SHIFT - 10))
 152
 153#define BDI_SHOW(name, expr)						\
 154static ssize_t name##_show(struct device *dev,				\
 155			   struct device_attribute *attr, char *page)	\
 156{									\
 157	struct backing_dev_info *bdi = dev_get_drvdata(dev);		\
 158									\
 159	return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);	\
 160}									\
 161static DEVICE_ATTR_RW(name);
 162
 163BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
 164
 165static ssize_t min_ratio_store(struct device *dev,
 166		struct device_attribute *attr, const char *buf, size_t count)
 167{
 168	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 169	unsigned int ratio;
 170	ssize_t ret;
 171
 172	ret = kstrtouint(buf, 10, &ratio);
 173	if (ret < 0)
 174		return ret;
 175
 176	ret = bdi_set_min_ratio(bdi, ratio);
 177	if (!ret)
 178		ret = count;
 179
 180	return ret;
 181}
 182BDI_SHOW(min_ratio, bdi->min_ratio)
 183
 184static ssize_t max_ratio_store(struct device *dev,
 185		struct device_attribute *attr, const char *buf, size_t count)
 186{
 187	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 188	unsigned int ratio;
 189	ssize_t ret;
 190
 191	ret = kstrtouint(buf, 10, &ratio);
 192	if (ret < 0)
 193		return ret;
 194
 195	ret = bdi_set_max_ratio(bdi, ratio);
 196	if (!ret)
 197		ret = count;
 198
 199	return ret;
 200}
 201BDI_SHOW(max_ratio, bdi->max_ratio)
 202
 203static ssize_t stable_pages_required_show(struct device *dev,
 204					  struct device_attribute *attr,
 205					  char *page)
 206{
 207	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 208
 209	return snprintf(page, PAGE_SIZE-1, "%d\n",
 210			bdi_cap_stable_pages_required(bdi) ? 1 : 0);
 211}
 212static DEVICE_ATTR_RO(stable_pages_required);
 213
 214static struct attribute *bdi_dev_attrs[] = {
 215	&dev_attr_read_ahead_kb.attr,
 216	&dev_attr_min_ratio.attr,
 217	&dev_attr_max_ratio.attr,
 218	&dev_attr_stable_pages_required.attr,
 219	NULL,
 220};
 221ATTRIBUTE_GROUPS(bdi_dev);
 222
 223static __init int bdi_class_init(void)
 224{
 225	bdi_class = class_create(THIS_MODULE, "bdi");
 226	if (IS_ERR(bdi_class))
 227		return PTR_ERR(bdi_class);
 228
 229	bdi_class->dev_groups = bdi_dev_groups;
 230	bdi_debug_init();
 231
 232	return 0;
 233}
 234postcore_initcall(bdi_class_init);
 235
 236static int bdi_init(struct backing_dev_info *bdi);
 237
 238static int __init default_bdi_init(void)
 239{
 240	int err;
 241
 242	bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
 243				 WQ_SYSFS, 0);
 244	if (!bdi_wq)
 245		return -ENOMEM;
 246
 247	err = bdi_init(&noop_backing_dev_info);
 248
 249	return err;
 250}
 251subsys_initcall(default_bdi_init);
 252
 253/*
 254 * This function is used when the first inode for this wb is marked dirty. It
 255 * wakes-up the corresponding bdi thread which should then take care of the
 256 * periodic background write-out of dirty inodes. Since the write-out would
 257 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
 258 * set up a timer which wakes the bdi thread up later.
 259 *
 260 * Note, we wouldn't bother setting up the timer, but this function is on the
 261 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
 262 * by delaying the wake-up.
 263 *
 264 * We have to be careful not to postpone flush work if it is scheduled for
 265 * earlier. Thus we use queue_delayed_work().
 266 */
 267void wb_wakeup_delayed(struct bdi_writeback *wb)
 268{
 269	unsigned long timeout;
 270
 271	timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
 272	spin_lock_bh(&wb->work_lock);
 273	if (test_bit(WB_registered, &wb->state))
 274		queue_delayed_work(bdi_wq, &wb->dwork, timeout);
 275	spin_unlock_bh(&wb->work_lock);
 276}
 277
 278/*
 279 * Initial write bandwidth: 100 MB/s
 280 */
 281#define INIT_BW		(100 << (20 - PAGE_SHIFT))
 282
 283static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
 284		   int blkcg_id, gfp_t gfp)
 285{
 286	int i, err;
 287
 288	memset(wb, 0, sizeof(*wb));
 289
 290	if (wb != &bdi->wb)
 291		bdi_get(bdi);
 292	wb->bdi = bdi;
 293	wb->last_old_flush = jiffies;
 294	INIT_LIST_HEAD(&wb->b_dirty);
 295	INIT_LIST_HEAD(&wb->b_io);
 296	INIT_LIST_HEAD(&wb->b_more_io);
 297	INIT_LIST_HEAD(&wb->b_dirty_time);
 298	spin_lock_init(&wb->list_lock);
 299
 300	wb->bw_time_stamp = jiffies;
 301	wb->balanced_dirty_ratelimit = INIT_BW;
 302	wb->dirty_ratelimit = INIT_BW;
 303	wb->write_bandwidth = INIT_BW;
 304	wb->avg_write_bandwidth = INIT_BW;
 305
 306	spin_lock_init(&wb->work_lock);
 307	INIT_LIST_HEAD(&wb->work_list);
 308	INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
 309	wb->dirty_sleep = jiffies;
 310
 311	wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
 312	if (!wb->congested) {
 313		err = -ENOMEM;
 314		goto out_put_bdi;
 315	}
 316
 317	err = fprop_local_init_percpu(&wb->completions, gfp);
 318	if (err)
 319		goto out_put_cong;
 320
 321	for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
 322		err = percpu_counter_init(&wb->stat[i], 0, gfp);
 323		if (err)
 324			goto out_destroy_stat;
 325	}
 326
 327	return 0;
 328
 329out_destroy_stat:
 330	while (i--)
 331		percpu_counter_destroy(&wb->stat[i]);
 332	fprop_local_destroy_percpu(&wb->completions);
 333out_put_cong:
 334	wb_congested_put(wb->congested);
 335out_put_bdi:
 336	if (wb != &bdi->wb)
 337		bdi_put(bdi);
 338	return err;
 339}
 340
 341static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
 342
 343/*
 344 * Remove bdi from the global list and shutdown any threads we have running
 345 */
 346static void wb_shutdown(struct bdi_writeback *wb)
 347{
 348	/* Make sure nobody queues further work */
 349	spin_lock_bh(&wb->work_lock);
 350	if (!test_and_clear_bit(WB_registered, &wb->state)) {
 351		spin_unlock_bh(&wb->work_lock);
 352		return;
 353	}
 354	spin_unlock_bh(&wb->work_lock);
 355
 356	cgwb_remove_from_bdi_list(wb);
 357	/*
 358	 * Drain work list and shutdown the delayed_work.  !WB_registered
 359	 * tells wb_workfn() that @wb is dying and its work_list needs to
 360	 * be drained no matter what.
 361	 */
 362	mod_delayed_work(bdi_wq, &wb->dwork, 0);
 363	flush_delayed_work(&wb->dwork);
 364	WARN_ON(!list_empty(&wb->work_list));
 365}
 366
 367static void wb_exit(struct bdi_writeback *wb)
 368{
 369	int i;
 370
 371	WARN_ON(delayed_work_pending(&wb->dwork));
 372
 373	for (i = 0; i < NR_WB_STAT_ITEMS; i++)
 374		percpu_counter_destroy(&wb->stat[i]);
 375
 376	fprop_local_destroy_percpu(&wb->completions);
 377	wb_congested_put(wb->congested);
 378	if (wb != &wb->bdi->wb)
 379		bdi_put(wb->bdi);
 380}
 381
 382#ifdef CONFIG_CGROUP_WRITEBACK
 383
 384#include <linux/memcontrol.h>
 385
 386/*
 387 * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
 388 * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
 389 * protected.
 
 390 */
 391static DEFINE_SPINLOCK(cgwb_lock);
 392static struct workqueue_struct *cgwb_release_wq;
 393
 394/**
 395 * wb_congested_get_create - get or create a wb_congested
 396 * @bdi: associated bdi
 397 * @blkcg_id: ID of the associated blkcg
 398 * @gfp: allocation mask
 399 *
 400 * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
 401 * The returned wb_congested has its reference count incremented.  Returns
 402 * NULL on failure.
 403 */
 404struct bdi_writeback_congested *
 405wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
 406{
 407	struct bdi_writeback_congested *new_congested = NULL, *congested;
 408	struct rb_node **node, *parent;
 409	unsigned long flags;
 410retry:
 411	spin_lock_irqsave(&cgwb_lock, flags);
 412
 413	node = &bdi->cgwb_congested_tree.rb_node;
 414	parent = NULL;
 415
 416	while (*node != NULL) {
 417		parent = *node;
 418		congested = rb_entry(parent, struct bdi_writeback_congested,
 419				     rb_node);
 420		if (congested->blkcg_id < blkcg_id)
 421			node = &parent->rb_left;
 422		else if (congested->blkcg_id > blkcg_id)
 423			node = &parent->rb_right;
 424		else
 425			goto found;
 426	}
 427
 428	if (new_congested) {
 429		/* !found and storage for new one already allocated, insert */
 430		congested = new_congested;
 
 431		rb_link_node(&congested->rb_node, parent, node);
 432		rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
 433		spin_unlock_irqrestore(&cgwb_lock, flags);
 434		return congested;
 435	}
 436
 437	spin_unlock_irqrestore(&cgwb_lock, flags);
 438
 439	/* allocate storage for new one and retry */
 440	new_congested = kzalloc(sizeof(*new_congested), gfp);
 441	if (!new_congested)
 442		return NULL;
 443
 444	refcount_set(&new_congested->refcnt, 1);
 445	new_congested->__bdi = bdi;
 446	new_congested->blkcg_id = blkcg_id;
 447	goto retry;
 448
 449found:
 450	refcount_inc(&congested->refcnt);
 451	spin_unlock_irqrestore(&cgwb_lock, flags);
 452	kfree(new_congested);
 453	return congested;
 454}
 455
 456/**
 457 * wb_congested_put - put a wb_congested
 458 * @congested: wb_congested to put
 459 *
 460 * Put @congested and destroy it if the refcnt reaches zero.
 461 */
 462void wb_congested_put(struct bdi_writeback_congested *congested)
 463{
 464	unsigned long flags;
 465
 466	if (!refcount_dec_and_lock_irqsave(&congested->refcnt, &cgwb_lock, &flags))
 
 
 467		return;
 
 468
 469	/* bdi might already have been destroyed leaving @congested unlinked */
 470	if (congested->__bdi) {
 471		rb_erase(&congested->rb_node,
 472			 &congested->__bdi->cgwb_congested_tree);
 473		congested->__bdi = NULL;
 474	}
 475
 476	spin_unlock_irqrestore(&cgwb_lock, flags);
 477	kfree(congested);
 478}
 479
 480static void cgwb_release_workfn(struct work_struct *work)
 481{
 482	struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
 483						release_work);
 484	struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
 
 
 
 
 485
 486	mutex_lock(&wb->bdi->cgwb_release_mutex);
 487	wb_shutdown(wb);
 488
 489	css_put(wb->memcg_css);
 490	css_put(wb->blkcg_css);
 491	mutex_unlock(&wb->bdi->cgwb_release_mutex);
 492
 493	/* triggers blkg destruction if cgwb_refcnt becomes zero */
 494	blkcg_cgwb_put(blkcg);
 495
 496	fprop_local_destroy_percpu(&wb->memcg_completions);
 497	percpu_ref_exit(&wb->refcnt);
 498	wb_exit(wb);
 499	kfree_rcu(wb, rcu);
 
 
 
 500}
 501
 502static void cgwb_release(struct percpu_ref *refcnt)
 503{
 504	struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
 505						refcnt);
 506	queue_work(cgwb_release_wq, &wb->release_work);
 507}
 508
 509static void cgwb_kill(struct bdi_writeback *wb)
 510{
 511	lockdep_assert_held(&cgwb_lock);
 512
 513	WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
 514	list_del(&wb->memcg_node);
 515	list_del(&wb->blkcg_node);
 516	percpu_ref_kill(&wb->refcnt);
 517}
 518
 519static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 520{
 521	spin_lock_irq(&cgwb_lock);
 522	list_del_rcu(&wb->bdi_node);
 523	spin_unlock_irq(&cgwb_lock);
 524}
 525
 526static int cgwb_create(struct backing_dev_info *bdi,
 527		       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
 528{
 529	struct mem_cgroup *memcg;
 530	struct cgroup_subsys_state *blkcg_css;
 531	struct blkcg *blkcg;
 532	struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
 533	struct bdi_writeback *wb;
 534	unsigned long flags;
 535	int ret = 0;
 536
 537	memcg = mem_cgroup_from_css(memcg_css);
 538	blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 539	blkcg = css_to_blkcg(blkcg_css);
 540	memcg_cgwb_list = &memcg->cgwb_list;
 541	blkcg_cgwb_list = &blkcg->cgwb_list;
 542
 543	/* look up again under lock and discard on blkcg mismatch */
 544	spin_lock_irqsave(&cgwb_lock, flags);
 545	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 546	if (wb && wb->blkcg_css != blkcg_css) {
 547		cgwb_kill(wb);
 548		wb = NULL;
 549	}
 550	spin_unlock_irqrestore(&cgwb_lock, flags);
 551	if (wb)
 552		goto out_put;
 553
 554	/* need to create a new one */
 555	wb = kmalloc(sizeof(*wb), gfp);
 556	if (!wb) {
 557		ret = -ENOMEM;
 558		goto out_put;
 559	}
 560
 561	ret = wb_init(wb, bdi, blkcg_css->id, gfp);
 562	if (ret)
 563		goto err_free;
 564
 565	ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
 566	if (ret)
 567		goto err_wb_exit;
 568
 569	ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
 570	if (ret)
 571		goto err_ref_exit;
 572
 573	wb->memcg_css = memcg_css;
 574	wb->blkcg_css = blkcg_css;
 575	INIT_WORK(&wb->release_work, cgwb_release_workfn);
 576	set_bit(WB_registered, &wb->state);
 577
 578	/*
 579	 * The root wb determines the registered state of the whole bdi and
 580	 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
 581	 * whether they're still online.  Don't link @wb if any is dead.
 582	 * See wb_memcg_offline() and wb_blkcg_offline().
 583	 */
 584	ret = -ENODEV;
 585	spin_lock_irqsave(&cgwb_lock, flags);
 586	if (test_bit(WB_registered, &bdi->wb.state) &&
 587	    blkcg_cgwb_list->next && memcg_cgwb_list->next) {
 588		/* we might have raced another instance of this function */
 589		ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
 590		if (!ret) {
 
 591			list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
 592			list_add(&wb->memcg_node, memcg_cgwb_list);
 593			list_add(&wb->blkcg_node, blkcg_cgwb_list);
 594			blkcg_cgwb_get(blkcg);
 595			css_get(memcg_css);
 596			css_get(blkcg_css);
 597		}
 598	}
 599	spin_unlock_irqrestore(&cgwb_lock, flags);
 600	if (ret) {
 601		if (ret == -EEXIST)
 602			ret = 0;
 603		goto err_fprop_exit;
 604	}
 605	goto out_put;
 606
 607err_fprop_exit:
 608	fprop_local_destroy_percpu(&wb->memcg_completions);
 609err_ref_exit:
 610	percpu_ref_exit(&wb->refcnt);
 611err_wb_exit:
 612	wb_exit(wb);
 613err_free:
 614	kfree(wb);
 615out_put:
 616	css_put(blkcg_css);
 617	return ret;
 618}
 619
 620/**
 621 * wb_get_lookup - get wb for a given memcg
 622 * @bdi: target bdi
 623 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 
 624 *
 625 * Try to get the wb for @memcg_css on @bdi.  The returned wb has its
 626 * refcount incremented.
 627 *
 628 * This function uses css_get() on @memcg_css and thus expects its refcnt
 629 * to be positive on invocation.  IOW, rcu_read_lock() protection on
 630 * @memcg_css isn't enough.  try_get it before calling this function.
 631 *
 632 * A wb is keyed by its associated memcg.  As blkcg implicitly enables
 633 * memcg on the default hierarchy, memcg association is guaranteed to be
 634 * more specific (equal or descendant to the associated blkcg) and thus can
 635 * identify both the memcg and blkcg associations.
 636 *
 637 * Because the blkcg associated with a memcg may change as blkcg is enabled
 638 * and disabled closer to root in the hierarchy, each wb keeps track of
 639 * both the memcg and blkcg associated with it and verifies the blkcg on
 640 * each lookup.  On mismatch, the existing wb is discarded and a new one is
 641 * created.
 642 */
 643struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
 644				    struct cgroup_subsys_state *memcg_css)
 645{
 646	struct bdi_writeback *wb;
 647
 648	if (!memcg_css->parent)
 649		return &bdi->wb;
 650
 651	rcu_read_lock();
 652	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 653	if (wb) {
 654		struct cgroup_subsys_state *blkcg_css;
 655
 656		/* see whether the blkcg association has changed */
 657		blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 658		if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
 659			wb = NULL;
 660		css_put(blkcg_css);
 661	}
 662	rcu_read_unlock();
 663
 664	return wb;
 665}
 666
 667/**
 668 * wb_get_create - get wb for a given memcg, create if necessary
 669 * @bdi: target bdi
 670 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 671 * @gfp: allocation mask to use
 672 *
 673 * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
 674 * create one.  See wb_get_lookup() for more details.
 675 */
 676struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
 677				    struct cgroup_subsys_state *memcg_css,
 678				    gfp_t gfp)
 679{
 680	struct bdi_writeback *wb;
 681
 682	might_sleep_if(gfpflags_allow_blocking(gfp));
 683
 684	if (!memcg_css->parent)
 685		return &bdi->wb;
 686
 687	do {
 688		wb = wb_get_lookup(bdi, memcg_css);
 
 
 
 
 
 
 
 
 
 
 
 
 
 689	} while (!wb && !cgwb_create(bdi, memcg_css, gfp));
 690
 691	return wb;
 692}
 693
 694static int cgwb_bdi_init(struct backing_dev_info *bdi)
 695{
 696	int ret;
 697
 698	INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
 699	bdi->cgwb_congested_tree = RB_ROOT;
 700	mutex_init(&bdi->cgwb_release_mutex);
 701	init_rwsem(&bdi->wb_switch_rwsem);
 702
 703	ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 704	if (!ret) {
 705		bdi->wb.memcg_css = &root_mem_cgroup->css;
 706		bdi->wb.blkcg_css = blkcg_root_css;
 707	}
 708	return ret;
 709}
 710
 711static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
 712{
 713	struct radix_tree_iter iter;
 
 714	void **slot;
 715	struct bdi_writeback *wb;
 716
 717	WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 718
 719	spin_lock_irq(&cgwb_lock);
 
 720	radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
 721		cgwb_kill(*slot);
 722	spin_unlock_irq(&cgwb_lock);
 723
 724	mutex_lock(&bdi->cgwb_release_mutex);
 725	spin_lock_irq(&cgwb_lock);
 726	while (!list_empty(&bdi->wb_list)) {
 727		wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
 728				      bdi_node);
 729		spin_unlock_irq(&cgwb_lock);
 730		wb_shutdown(wb);
 731		spin_lock_irq(&cgwb_lock);
 732	}
 
 733	spin_unlock_irq(&cgwb_lock);
 734	mutex_unlock(&bdi->cgwb_release_mutex);
 
 
 
 
 
 
 
 735}
 736
 737/**
 738 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
 739 * @memcg: memcg being offlined
 740 *
 741 * Also prevents creation of any new wb's associated with @memcg.
 742 */
 743void wb_memcg_offline(struct mem_cgroup *memcg)
 744{
 745	struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
 
 746	struct bdi_writeback *wb, *next;
 747
 748	spin_lock_irq(&cgwb_lock);
 749	list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
 750		cgwb_kill(wb);
 751	memcg_cgwb_list->next = NULL;	/* prevent new wb's */
 752	spin_unlock_irq(&cgwb_lock);
 753}
 754
 755/**
 756 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
 757 * @blkcg: blkcg being offlined
 758 *
 759 * Also prevents creation of any new wb's associated with @blkcg.
 760 */
 761void wb_blkcg_offline(struct blkcg *blkcg)
 762{
 
 763	struct bdi_writeback *wb, *next;
 764
 765	spin_lock_irq(&cgwb_lock);
 766	list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
 767		cgwb_kill(wb);
 768	blkcg->cgwb_list.next = NULL;	/* prevent new wb's */
 769	spin_unlock_irq(&cgwb_lock);
 770}
 771
 772static void cgwb_bdi_exit(struct backing_dev_info *bdi)
 773{
 774	struct rb_node *rbn;
 775
 776	spin_lock_irq(&cgwb_lock);
 777	while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
 778		struct bdi_writeback_congested *congested =
 779			rb_entry(rbn, struct bdi_writeback_congested, rb_node);
 780
 781		rb_erase(rbn, &bdi->cgwb_congested_tree);
 782		congested->__bdi = NULL;	/* mark @congested unlinked */
 783	}
 784	spin_unlock_irq(&cgwb_lock);
 785}
 786
 787static void cgwb_bdi_register(struct backing_dev_info *bdi)
 788{
 789	spin_lock_irq(&cgwb_lock);
 790	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 791	spin_unlock_irq(&cgwb_lock);
 792}
 793
 794static int __init cgwb_init(void)
 795{
 796	/*
 797	 * There can be many concurrent release work items overwhelming
 798	 * system_wq.  Put them in a separate wq and limit concurrency.
 799	 * There's no point in executing many of these in parallel.
 800	 */
 801	cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
 802	if (!cgwb_release_wq)
 803		return -ENOMEM;
 804
 805	return 0;
 806}
 807subsys_initcall(cgwb_init);
 808
 809#else	/* CONFIG_CGROUP_WRITEBACK */
 810
 811static int cgwb_bdi_init(struct backing_dev_info *bdi)
 812{
 813	int err;
 814
 815	bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
 816	if (!bdi->wb_congested)
 817		return -ENOMEM;
 818
 819	refcount_set(&bdi->wb_congested->refcnt, 1);
 820
 821	err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 822	if (err) {
 823		wb_congested_put(bdi->wb_congested);
 824		return err;
 825	}
 826	return 0;
 827}
 828
 829static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
 830
 831static void cgwb_bdi_exit(struct backing_dev_info *bdi)
 832{
 833	wb_congested_put(bdi->wb_congested);
 834}
 835
 836static void cgwb_bdi_register(struct backing_dev_info *bdi)
 837{
 838	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 839}
 840
 841static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 842{
 843	list_del_rcu(&wb->bdi_node);
 844}
 845
 846#endif	/* CONFIG_CGROUP_WRITEBACK */
 847
 848static int bdi_init(struct backing_dev_info *bdi)
 849{
 850	int ret;
 851
 852	bdi->dev = NULL;
 853
 854	kref_init(&bdi->refcnt);
 855	bdi->min_ratio = 0;
 856	bdi->max_ratio = 100;
 857	bdi->max_prop_frac = FPROP_FRAC_BASE;
 858	INIT_LIST_HEAD(&bdi->bdi_list);
 859	INIT_LIST_HEAD(&bdi->wb_list);
 860	init_waitqueue_head(&bdi->wb_waitq);
 861
 862	ret = cgwb_bdi_init(bdi);
 863
 864	return ret;
 865}
 866
 867struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
 868{
 869	struct backing_dev_info *bdi;
 870
 871	bdi = kmalloc_node(sizeof(struct backing_dev_info),
 872			   gfp_mask | __GFP_ZERO, node_id);
 873	if (!bdi)
 874		return NULL;
 875
 876	if (bdi_init(bdi)) {
 877		kfree(bdi);
 878		return NULL;
 879	}
 880	return bdi;
 881}
 882EXPORT_SYMBOL(bdi_alloc_node);
 883
 884static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
 885{
 886	struct rb_node **p = &bdi_tree.rb_node;
 887	struct rb_node *parent = NULL;
 888	struct backing_dev_info *bdi;
 889
 890	lockdep_assert_held(&bdi_lock);
 891
 892	while (*p) {
 893		parent = *p;
 894		bdi = rb_entry(parent, struct backing_dev_info, rb_node);
 895
 896		if (bdi->id > id)
 897			p = &(*p)->rb_left;
 898		else if (bdi->id < id)
 899			p = &(*p)->rb_right;
 900		else
 901			break;
 902	}
 903
 904	if (parentp)
 905		*parentp = parent;
 906	return p;
 907}
 908
 909/**
 910 * bdi_get_by_id - lookup and get bdi from its id
 911 * @id: bdi id to lookup
 912 *
 913 * Find bdi matching @id and get it.  Returns NULL if the matching bdi
 914 * doesn't exist or is already unregistered.
 915 */
 916struct backing_dev_info *bdi_get_by_id(u64 id)
 917{
 918	struct backing_dev_info *bdi = NULL;
 919	struct rb_node **p;
 920
 921	spin_lock_bh(&bdi_lock);
 922	p = bdi_lookup_rb_node(id, NULL);
 923	if (*p) {
 924		bdi = rb_entry(*p, struct backing_dev_info, rb_node);
 925		bdi_get(bdi);
 926	}
 927	spin_unlock_bh(&bdi_lock);
 928
 929	return bdi;
 930}
 
 931
 932int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
 
 933{
 
 934	struct device *dev;
 935	struct rb_node *parent, **p;
 936
 937	if (bdi->dev)	/* The driver needs to use separate queues per device */
 938		return 0;
 939
 940	dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
 
 
 941	if (IS_ERR(dev))
 942		return PTR_ERR(dev);
 943
 944	cgwb_bdi_register(bdi);
 945	bdi->dev = dev;
 946
 947	bdi_debug_register(bdi, dev_name(dev));
 948	set_bit(WB_registered, &bdi->wb.state);
 949
 950	spin_lock_bh(&bdi_lock);
 951
 952	bdi->id = ++bdi_id_cursor;
 953
 954	p = bdi_lookup_rb_node(bdi->id, &parent);
 955	rb_link_node(&bdi->rb_node, parent, p);
 956	rb_insert_color(&bdi->rb_node, &bdi_tree);
 957
 958	list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
 959
 960	spin_unlock_bh(&bdi_lock);
 961
 962	trace_writeback_bdi_register(bdi);
 963	return 0;
 964}
 965EXPORT_SYMBOL(bdi_register_va);
 966
 967int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
 968{
 969	va_list args;
 970	int ret;
 971
 972	va_start(args, fmt);
 973	ret = bdi_register_va(bdi, fmt, args);
 974	va_end(args);
 975	return ret;
 976}
 977EXPORT_SYMBOL(bdi_register);
 978
 979int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
 980{
 981	int rc;
 982
 983	rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt));
 984	if (rc)
 985		return rc;
 986	/* Leaking owner reference... */
 987	WARN_ON(bdi->owner);
 988	bdi->owner = owner;
 989	get_device(owner);
 990	return 0;
 991}
 992EXPORT_SYMBOL(bdi_register_owner);
 993
 994/*
 995 * Remove bdi from bdi_list, and ensure that it is no longer visible
 996 */
 997static void bdi_remove_from_list(struct backing_dev_info *bdi)
 998{
 999	spin_lock_bh(&bdi_lock);
1000	rb_erase(&bdi->rb_node, &bdi_tree);
1001	list_del_rcu(&bdi->bdi_list);
1002	spin_unlock_bh(&bdi_lock);
1003
1004	synchronize_rcu_expedited();
1005}
1006
1007void bdi_unregister(struct backing_dev_info *bdi)
1008{
1009	/* make sure nobody finds us on the bdi_list anymore */
1010	bdi_remove_from_list(bdi);
1011	wb_shutdown(&bdi->wb);
1012	cgwb_bdi_unregister(bdi);
1013
1014	if (bdi->dev) {
1015		bdi_debug_unregister(bdi);
1016		device_unregister(bdi->dev);
1017		bdi->dev = NULL;
1018	}
1019
1020	if (bdi->owner) {
1021		put_device(bdi->owner);
1022		bdi->owner = NULL;
1023	}
1024}
1025
1026static void release_bdi(struct kref *ref)
1027{
1028	struct backing_dev_info *bdi =
1029			container_of(ref, struct backing_dev_info, refcnt);
1030
1031	if (test_bit(WB_registered, &bdi->wb.state))
1032		bdi_unregister(bdi);
1033	WARN_ON_ONCE(bdi->dev);
1034	wb_exit(&bdi->wb);
1035	cgwb_bdi_exit(bdi);
1036	kfree(bdi);
1037}
1038
1039void bdi_put(struct backing_dev_info *bdi)
1040{
1041	kref_put(&bdi->refcnt, release_bdi);
 
1042}
1043EXPORT_SYMBOL(bdi_put);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1044
1045static wait_queue_head_t congestion_wqh[2] = {
1046		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
1047		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
1048	};
1049static atomic_t nr_wb_congested[2];
1050
1051void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
1052{
1053	wait_queue_head_t *wqh = &congestion_wqh[sync];
1054	enum wb_congested_state bit;
1055
1056	bit = sync ? WB_sync_congested : WB_async_congested;
1057	if (test_and_clear_bit(bit, &congested->state))
1058		atomic_dec(&nr_wb_congested[sync]);
1059	smp_mb__after_atomic();
1060	if (waitqueue_active(wqh))
1061		wake_up(wqh);
1062}
1063EXPORT_SYMBOL(clear_wb_congested);
1064
1065void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
1066{
1067	enum wb_congested_state bit;
1068
1069	bit = sync ? WB_sync_congested : WB_async_congested;
1070	if (!test_and_set_bit(bit, &congested->state))
1071		atomic_inc(&nr_wb_congested[sync]);
1072}
1073EXPORT_SYMBOL(set_wb_congested);
1074
1075/**
1076 * congestion_wait - wait for a backing_dev to become uncongested
1077 * @sync: SYNC or ASYNC IO
1078 * @timeout: timeout in jiffies
1079 *
1080 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1081 * write congestion.  If no backing_devs are congested then just wait for the
1082 * next write to be completed.
1083 */
1084long congestion_wait(int sync, long timeout)
1085{
1086	long ret;
1087	unsigned long start = jiffies;
1088	DEFINE_WAIT(wait);
1089	wait_queue_head_t *wqh = &congestion_wqh[sync];
1090
1091	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1092	ret = io_schedule_timeout(timeout);
1093	finish_wait(wqh, &wait);
1094
1095	trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1096					jiffies_to_usecs(jiffies - start));
1097
1098	return ret;
1099}
1100EXPORT_SYMBOL(congestion_wait);
1101
1102/**
1103 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
 
1104 * @sync: SYNC or ASYNC IO
1105 * @timeout: timeout in jiffies
1106 *
1107 * In the event of a congested backing_dev (any backing_dev) this waits
1108 * for up to @timeout jiffies for either a BDI to exit congestion of the
1109 * given @sync queue or a write to complete.
 
 
 
 
 
1110 *
1111 * The return value is 0 if the sleep is for the full timeout. Otherwise,
1112 * it is the number of jiffies that were still remaining when the function
1113 * returned. return_value == timeout implies the function did not sleep.
1114 */
1115long wait_iff_congested(int sync, long timeout)
1116{
1117	long ret;
1118	unsigned long start = jiffies;
1119	DEFINE_WAIT(wait);
1120	wait_queue_head_t *wqh = &congestion_wqh[sync];
1121
1122	/*
1123	 * If there is no congestion, yield if necessary instead
 
1124	 * of sleeping on the congestion queue
1125	 */
1126	if (atomic_read(&nr_wb_congested[sync]) == 0) {
1127		cond_resched();
 
 
 
 
 
 
 
 
 
 
 
 
 
1128
1129		/* In case we scheduled, work out time remaining */
1130		ret = timeout - (jiffies - start);
1131		if (ret < 0)
1132			ret = 0;
1133
1134		goto out;
1135	}
1136
1137	/* Sleep until uncongested or a write happens */
1138	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1139	ret = io_schedule_timeout(timeout);
1140	finish_wait(wqh, &wait);
1141
1142out:
1143	trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1144					jiffies_to_usecs(jiffies - start));
1145
1146	return ret;
1147}
1148EXPORT_SYMBOL(wait_iff_congested);
v4.6
 
   1
   2#include <linux/wait.h>
 
   3#include <linux/backing-dev.h>
   4#include <linux/kthread.h>
   5#include <linux/freezer.h>
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
   8#include <linux/mm.h>
   9#include <linux/sched.h>
  10#include <linux/module.h>
  11#include <linux/writeback.h>
  12#include <linux/device.h>
  13#include <trace/events/writeback.h>
  14
  15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
  16
  17struct backing_dev_info noop_backing_dev_info = {
  18	.name		= "noop",
  19	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
  20};
  21EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  22
  23static struct class *bdi_class;
  24
  25/*
  26 * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
  27 * locking.
  28 */
  29DEFINE_SPINLOCK(bdi_lock);
 
 
  30LIST_HEAD(bdi_list);
  31
  32/* bdi_wq serves all asynchronous writeback tasks */
  33struct workqueue_struct *bdi_wq;
  34
  35#ifdef CONFIG_DEBUG_FS
  36#include <linux/debugfs.h>
  37#include <linux/seq_file.h>
  38
  39static struct dentry *bdi_debug_root;
  40
  41static void bdi_debug_init(void)
  42{
  43	bdi_debug_root = debugfs_create_dir("bdi", NULL);
  44}
  45
  46static int bdi_debug_stats_show(struct seq_file *m, void *v)
  47{
  48	struct backing_dev_info *bdi = m->private;
  49	struct bdi_writeback *wb = &bdi->wb;
  50	unsigned long background_thresh;
  51	unsigned long dirty_thresh;
  52	unsigned long wb_thresh;
  53	unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
  54	struct inode *inode;
  55
  56	nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
  57	spin_lock(&wb->list_lock);
  58	list_for_each_entry(inode, &wb->b_dirty, i_io_list)
  59		nr_dirty++;
  60	list_for_each_entry(inode, &wb->b_io, i_io_list)
  61		nr_io++;
  62	list_for_each_entry(inode, &wb->b_more_io, i_io_list)
  63		nr_more_io++;
  64	list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
  65		if (inode->i_state & I_DIRTY_TIME)
  66			nr_dirty_time++;
  67	spin_unlock(&wb->list_lock);
  68
  69	global_dirty_limits(&background_thresh, &dirty_thresh);
  70	wb_thresh = wb_calc_thresh(wb, dirty_thresh);
  71
  72#define K(x) ((x) << (PAGE_SHIFT - 10))
  73	seq_printf(m,
  74		   "BdiWriteback:       %10lu kB\n"
  75		   "BdiReclaimable:     %10lu kB\n"
  76		   "BdiDirtyThresh:     %10lu kB\n"
  77		   "DirtyThresh:        %10lu kB\n"
  78		   "BackgroundThresh:   %10lu kB\n"
  79		   "BdiDirtied:         %10lu kB\n"
  80		   "BdiWritten:         %10lu kB\n"
  81		   "BdiWriteBandwidth:  %10lu kBps\n"
  82		   "b_dirty:            %10lu\n"
  83		   "b_io:               %10lu\n"
  84		   "b_more_io:          %10lu\n"
  85		   "b_dirty_time:       %10lu\n"
  86		   "bdi_list:           %10u\n"
  87		   "state:              %10lx\n",
  88		   (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
  89		   (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
  90		   K(wb_thresh),
  91		   K(dirty_thresh),
  92		   K(background_thresh),
  93		   (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
  94		   (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
  95		   (unsigned long) K(wb->write_bandwidth),
  96		   nr_dirty,
  97		   nr_io,
  98		   nr_more_io,
  99		   nr_dirty_time,
 100		   !list_empty(&bdi->bdi_list), bdi->wb.state);
 101#undef K
 102
 103	return 0;
 104}
 105
 106static int bdi_debug_stats_open(struct inode *inode, struct file *file)
 107{
 108	return single_open(file, bdi_debug_stats_show, inode->i_private);
 109}
 110
 111static const struct file_operations bdi_debug_stats_fops = {
 112	.open		= bdi_debug_stats_open,
 113	.read		= seq_read,
 114	.llseek		= seq_lseek,
 115	.release	= single_release,
 116};
 117
 118static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 119{
 120	bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
 121	bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
 122					       bdi, &bdi_debug_stats_fops);
 
 123}
 124
 125static void bdi_debug_unregister(struct backing_dev_info *bdi)
 126{
 127	debugfs_remove(bdi->debug_stats);
 128	debugfs_remove(bdi->debug_dir);
 129}
 130#else
 131static inline void bdi_debug_init(void)
 132{
 133}
 134static inline void bdi_debug_register(struct backing_dev_info *bdi,
 135				      const char *name)
 136{
 137}
 138static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 139{
 140}
 141#endif
 142
 143static ssize_t read_ahead_kb_store(struct device *dev,
 144				  struct device_attribute *attr,
 145				  const char *buf, size_t count)
 146{
 147	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 148	unsigned long read_ahead_kb;
 149	ssize_t ret;
 150
 151	ret = kstrtoul(buf, 10, &read_ahead_kb);
 152	if (ret < 0)
 153		return ret;
 154
 155	bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
 156
 157	return count;
 158}
 159
 160#define K(pages) ((pages) << (PAGE_SHIFT - 10))
 161
 162#define BDI_SHOW(name, expr)						\
 163static ssize_t name##_show(struct device *dev,				\
 164			   struct device_attribute *attr, char *page)	\
 165{									\
 166	struct backing_dev_info *bdi = dev_get_drvdata(dev);		\
 167									\
 168	return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);	\
 169}									\
 170static DEVICE_ATTR_RW(name);
 171
 172BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
 173
 174static ssize_t min_ratio_store(struct device *dev,
 175		struct device_attribute *attr, const char *buf, size_t count)
 176{
 177	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 178	unsigned int ratio;
 179	ssize_t ret;
 180
 181	ret = kstrtouint(buf, 10, &ratio);
 182	if (ret < 0)
 183		return ret;
 184
 185	ret = bdi_set_min_ratio(bdi, ratio);
 186	if (!ret)
 187		ret = count;
 188
 189	return ret;
 190}
 191BDI_SHOW(min_ratio, bdi->min_ratio)
 192
 193static ssize_t max_ratio_store(struct device *dev,
 194		struct device_attribute *attr, const char *buf, size_t count)
 195{
 196	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 197	unsigned int ratio;
 198	ssize_t ret;
 199
 200	ret = kstrtouint(buf, 10, &ratio);
 201	if (ret < 0)
 202		return ret;
 203
 204	ret = bdi_set_max_ratio(bdi, ratio);
 205	if (!ret)
 206		ret = count;
 207
 208	return ret;
 209}
 210BDI_SHOW(max_ratio, bdi->max_ratio)
 211
 212static ssize_t stable_pages_required_show(struct device *dev,
 213					  struct device_attribute *attr,
 214					  char *page)
 215{
 216	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 217
 218	return snprintf(page, PAGE_SIZE-1, "%d\n",
 219			bdi_cap_stable_pages_required(bdi) ? 1 : 0);
 220}
 221static DEVICE_ATTR_RO(stable_pages_required);
 222
 223static struct attribute *bdi_dev_attrs[] = {
 224	&dev_attr_read_ahead_kb.attr,
 225	&dev_attr_min_ratio.attr,
 226	&dev_attr_max_ratio.attr,
 227	&dev_attr_stable_pages_required.attr,
 228	NULL,
 229};
 230ATTRIBUTE_GROUPS(bdi_dev);
 231
 232static __init int bdi_class_init(void)
 233{
 234	bdi_class = class_create(THIS_MODULE, "bdi");
 235	if (IS_ERR(bdi_class))
 236		return PTR_ERR(bdi_class);
 237
 238	bdi_class->dev_groups = bdi_dev_groups;
 239	bdi_debug_init();
 
 240	return 0;
 241}
 242postcore_initcall(bdi_class_init);
 243
 
 
 244static int __init default_bdi_init(void)
 245{
 246	int err;
 247
 248	bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
 249					      WQ_UNBOUND | WQ_SYSFS, 0);
 250	if (!bdi_wq)
 251		return -ENOMEM;
 252
 253	err = bdi_init(&noop_backing_dev_info);
 254
 255	return err;
 256}
 257subsys_initcall(default_bdi_init);
 258
 259/*
 260 * This function is used when the first inode for this wb is marked dirty. It
 261 * wakes-up the corresponding bdi thread which should then take care of the
 262 * periodic background write-out of dirty inodes. Since the write-out would
 263 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
 264 * set up a timer which wakes the bdi thread up later.
 265 *
 266 * Note, we wouldn't bother setting up the timer, but this function is on the
 267 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
 268 * by delaying the wake-up.
 269 *
 270 * We have to be careful not to postpone flush work if it is scheduled for
 271 * earlier. Thus we use queue_delayed_work().
 272 */
 273void wb_wakeup_delayed(struct bdi_writeback *wb)
 274{
 275	unsigned long timeout;
 276
 277	timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
 278	spin_lock_bh(&wb->work_lock);
 279	if (test_bit(WB_registered, &wb->state))
 280		queue_delayed_work(bdi_wq, &wb->dwork, timeout);
 281	spin_unlock_bh(&wb->work_lock);
 282}
 283
 284/*
 285 * Initial write bandwidth: 100 MB/s
 286 */
 287#define INIT_BW		(100 << (20 - PAGE_SHIFT))
 288
 289static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
 290		   int blkcg_id, gfp_t gfp)
 291{
 292	int i, err;
 293
 294	memset(wb, 0, sizeof(*wb));
 295
 
 
 296	wb->bdi = bdi;
 297	wb->last_old_flush = jiffies;
 298	INIT_LIST_HEAD(&wb->b_dirty);
 299	INIT_LIST_HEAD(&wb->b_io);
 300	INIT_LIST_HEAD(&wb->b_more_io);
 301	INIT_LIST_HEAD(&wb->b_dirty_time);
 302	spin_lock_init(&wb->list_lock);
 303
 304	wb->bw_time_stamp = jiffies;
 305	wb->balanced_dirty_ratelimit = INIT_BW;
 306	wb->dirty_ratelimit = INIT_BW;
 307	wb->write_bandwidth = INIT_BW;
 308	wb->avg_write_bandwidth = INIT_BW;
 309
 310	spin_lock_init(&wb->work_lock);
 311	INIT_LIST_HEAD(&wb->work_list);
 312	INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
 
 313
 314	wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
 315	if (!wb->congested)
 316		return -ENOMEM;
 
 
 317
 318	err = fprop_local_init_percpu(&wb->completions, gfp);
 319	if (err)
 320		goto out_put_cong;
 321
 322	for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
 323		err = percpu_counter_init(&wb->stat[i], 0, gfp);
 324		if (err)
 325			goto out_destroy_stat;
 326	}
 327
 328	return 0;
 329
 330out_destroy_stat:
 331	while (i--)
 332		percpu_counter_destroy(&wb->stat[i]);
 333	fprop_local_destroy_percpu(&wb->completions);
 334out_put_cong:
 335	wb_congested_put(wb->congested);
 
 
 
 336	return err;
 337}
 338
 
 
 339/*
 340 * Remove bdi from the global list and shutdown any threads we have running
 341 */
 342static void wb_shutdown(struct bdi_writeback *wb)
 343{
 344	/* Make sure nobody queues further work */
 345	spin_lock_bh(&wb->work_lock);
 346	if (!test_and_clear_bit(WB_registered, &wb->state)) {
 347		spin_unlock_bh(&wb->work_lock);
 348		return;
 349	}
 350	spin_unlock_bh(&wb->work_lock);
 351
 
 352	/*
 353	 * Drain work list and shutdown the delayed_work.  !WB_registered
 354	 * tells wb_workfn() that @wb is dying and its work_list needs to
 355	 * be drained no matter what.
 356	 */
 357	mod_delayed_work(bdi_wq, &wb->dwork, 0);
 358	flush_delayed_work(&wb->dwork);
 359	WARN_ON(!list_empty(&wb->work_list));
 360}
 361
 362static void wb_exit(struct bdi_writeback *wb)
 363{
 364	int i;
 365
 366	WARN_ON(delayed_work_pending(&wb->dwork));
 367
 368	for (i = 0; i < NR_WB_STAT_ITEMS; i++)
 369		percpu_counter_destroy(&wb->stat[i]);
 370
 371	fprop_local_destroy_percpu(&wb->completions);
 372	wb_congested_put(wb->congested);
 
 
 373}
 374
 375#ifdef CONFIG_CGROUP_WRITEBACK
 376
 377#include <linux/memcontrol.h>
 378
 379/*
 380 * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
 381 * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
 382 * protected.  cgwb_release_wait is used to wait for the completion of cgwb
 383 * releases from bdi destruction path.
 384 */
 385static DEFINE_SPINLOCK(cgwb_lock);
 386static DECLARE_WAIT_QUEUE_HEAD(cgwb_release_wait);
 387
 388/**
 389 * wb_congested_get_create - get or create a wb_congested
 390 * @bdi: associated bdi
 391 * @blkcg_id: ID of the associated blkcg
 392 * @gfp: allocation mask
 393 *
 394 * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
 395 * The returned wb_congested has its reference count incremented.  Returns
 396 * NULL on failure.
 397 */
 398struct bdi_writeback_congested *
 399wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
 400{
 401	struct bdi_writeback_congested *new_congested = NULL, *congested;
 402	struct rb_node **node, *parent;
 403	unsigned long flags;
 404retry:
 405	spin_lock_irqsave(&cgwb_lock, flags);
 406
 407	node = &bdi->cgwb_congested_tree.rb_node;
 408	parent = NULL;
 409
 410	while (*node != NULL) {
 411		parent = *node;
 412		congested = container_of(parent, struct bdi_writeback_congested,
 413					 rb_node);
 414		if (congested->blkcg_id < blkcg_id)
 415			node = &parent->rb_left;
 416		else if (congested->blkcg_id > blkcg_id)
 417			node = &parent->rb_right;
 418		else
 419			goto found;
 420	}
 421
 422	if (new_congested) {
 423		/* !found and storage for new one already allocated, insert */
 424		congested = new_congested;
 425		new_congested = NULL;
 426		rb_link_node(&congested->rb_node, parent, node);
 427		rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
 428		goto found;
 
 429	}
 430
 431	spin_unlock_irqrestore(&cgwb_lock, flags);
 432
 433	/* allocate storage for new one and retry */
 434	new_congested = kzalloc(sizeof(*new_congested), gfp);
 435	if (!new_congested)
 436		return NULL;
 437
 438	atomic_set(&new_congested->refcnt, 0);
 439	new_congested->bdi = bdi;
 440	new_congested->blkcg_id = blkcg_id;
 441	goto retry;
 442
 443found:
 444	atomic_inc(&congested->refcnt);
 445	spin_unlock_irqrestore(&cgwb_lock, flags);
 446	kfree(new_congested);
 447	return congested;
 448}
 449
 450/**
 451 * wb_congested_put - put a wb_congested
 452 * @congested: wb_congested to put
 453 *
 454 * Put @congested and destroy it if the refcnt reaches zero.
 455 */
 456void wb_congested_put(struct bdi_writeback_congested *congested)
 457{
 458	unsigned long flags;
 459
 460	local_irq_save(flags);
 461	if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
 462		local_irq_restore(flags);
 463		return;
 464	}
 465
 466	/* bdi might already have been destroyed leaving @congested unlinked */
 467	if (congested->bdi) {
 468		rb_erase(&congested->rb_node,
 469			 &congested->bdi->cgwb_congested_tree);
 470		congested->bdi = NULL;
 471	}
 472
 473	spin_unlock_irqrestore(&cgwb_lock, flags);
 474	kfree(congested);
 475}
 476
 477static void cgwb_release_workfn(struct work_struct *work)
 478{
 479	struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
 480						release_work);
 481	struct backing_dev_info *bdi = wb->bdi;
 482
 483	spin_lock_irq(&cgwb_lock);
 484	list_del_rcu(&wb->bdi_node);
 485	spin_unlock_irq(&cgwb_lock);
 486
 
 487	wb_shutdown(wb);
 488
 489	css_put(wb->memcg_css);
 490	css_put(wb->blkcg_css);
 
 
 
 
 491
 492	fprop_local_destroy_percpu(&wb->memcg_completions);
 493	percpu_ref_exit(&wb->refcnt);
 494	wb_exit(wb);
 495	kfree_rcu(wb, rcu);
 496
 497	if (atomic_dec_and_test(&bdi->usage_cnt))
 498		wake_up_all(&cgwb_release_wait);
 499}
 500
 501static void cgwb_release(struct percpu_ref *refcnt)
 502{
 503	struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
 504						refcnt);
 505	schedule_work(&wb->release_work);
 506}
 507
 508static void cgwb_kill(struct bdi_writeback *wb)
 509{
 510	lockdep_assert_held(&cgwb_lock);
 511
 512	WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
 513	list_del(&wb->memcg_node);
 514	list_del(&wb->blkcg_node);
 515	percpu_ref_kill(&wb->refcnt);
 516}
 517
 
 
 
 
 
 
 
 518static int cgwb_create(struct backing_dev_info *bdi,
 519		       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
 520{
 521	struct mem_cgroup *memcg;
 522	struct cgroup_subsys_state *blkcg_css;
 523	struct blkcg *blkcg;
 524	struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
 525	struct bdi_writeback *wb;
 526	unsigned long flags;
 527	int ret = 0;
 528
 529	memcg = mem_cgroup_from_css(memcg_css);
 530	blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 531	blkcg = css_to_blkcg(blkcg_css);
 532	memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
 533	blkcg_cgwb_list = &blkcg->cgwb_list;
 534
 535	/* look up again under lock and discard on blkcg mismatch */
 536	spin_lock_irqsave(&cgwb_lock, flags);
 537	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 538	if (wb && wb->blkcg_css != blkcg_css) {
 539		cgwb_kill(wb);
 540		wb = NULL;
 541	}
 542	spin_unlock_irqrestore(&cgwb_lock, flags);
 543	if (wb)
 544		goto out_put;
 545
 546	/* need to create a new one */
 547	wb = kmalloc(sizeof(*wb), gfp);
 548	if (!wb)
 549		return -ENOMEM;
 
 
 550
 551	ret = wb_init(wb, bdi, blkcg_css->id, gfp);
 552	if (ret)
 553		goto err_free;
 554
 555	ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
 556	if (ret)
 557		goto err_wb_exit;
 558
 559	ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
 560	if (ret)
 561		goto err_ref_exit;
 562
 563	wb->memcg_css = memcg_css;
 564	wb->blkcg_css = blkcg_css;
 565	INIT_WORK(&wb->release_work, cgwb_release_workfn);
 566	set_bit(WB_registered, &wb->state);
 567
 568	/*
 569	 * The root wb determines the registered state of the whole bdi and
 570	 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
 571	 * whether they're still online.  Don't link @wb if any is dead.
 572	 * See wb_memcg_offline() and wb_blkcg_offline().
 573	 */
 574	ret = -ENODEV;
 575	spin_lock_irqsave(&cgwb_lock, flags);
 576	if (test_bit(WB_registered, &bdi->wb.state) &&
 577	    blkcg_cgwb_list->next && memcg_cgwb_list->next) {
 578		/* we might have raced another instance of this function */
 579		ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
 580		if (!ret) {
 581			atomic_inc(&bdi->usage_cnt);
 582			list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
 583			list_add(&wb->memcg_node, memcg_cgwb_list);
 584			list_add(&wb->blkcg_node, blkcg_cgwb_list);
 
 585			css_get(memcg_css);
 586			css_get(blkcg_css);
 587		}
 588	}
 589	spin_unlock_irqrestore(&cgwb_lock, flags);
 590	if (ret) {
 591		if (ret == -EEXIST)
 592			ret = 0;
 593		goto err_fprop_exit;
 594	}
 595	goto out_put;
 596
 597err_fprop_exit:
 598	fprop_local_destroy_percpu(&wb->memcg_completions);
 599err_ref_exit:
 600	percpu_ref_exit(&wb->refcnt);
 601err_wb_exit:
 602	wb_exit(wb);
 603err_free:
 604	kfree(wb);
 605out_put:
 606	css_put(blkcg_css);
 607	return ret;
 608}
 609
 610/**
 611 * wb_get_create - get wb for a given memcg, create if necessary
 612 * @bdi: target bdi
 613 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 614 * @gfp: allocation mask to use
 615 *
 616 * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
 617 * create one.  The returned wb has its refcount incremented.
 618 *
 619 * This function uses css_get() on @memcg_css and thus expects its refcnt
 620 * to be positive on invocation.  IOW, rcu_read_lock() protection on
 621 * @memcg_css isn't enough.  try_get it before calling this function.
 622 *
 623 * A wb is keyed by its associated memcg.  As blkcg implicitly enables
 624 * memcg on the default hierarchy, memcg association is guaranteed to be
 625 * more specific (equal or descendant to the associated blkcg) and thus can
 626 * identify both the memcg and blkcg associations.
 627 *
 628 * Because the blkcg associated with a memcg may change as blkcg is enabled
 629 * and disabled closer to root in the hierarchy, each wb keeps track of
 630 * both the memcg and blkcg associated with it and verifies the blkcg on
 631 * each lookup.  On mismatch, the existing wb is discarded and a new one is
 632 * created.
 633 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 634struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
 635				    struct cgroup_subsys_state *memcg_css,
 636				    gfp_t gfp)
 637{
 638	struct bdi_writeback *wb;
 639
 640	might_sleep_if(gfpflags_allow_blocking(gfp));
 641
 642	if (!memcg_css->parent)
 643		return &bdi->wb;
 644
 645	do {
 646		rcu_read_lock();
 647		wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 648		if (wb) {
 649			struct cgroup_subsys_state *blkcg_css;
 650
 651			/* see whether the blkcg association has changed */
 652			blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
 653						     &io_cgrp_subsys);
 654			if (unlikely(wb->blkcg_css != blkcg_css ||
 655				     !wb_tryget(wb)))
 656				wb = NULL;
 657			css_put(blkcg_css);
 658		}
 659		rcu_read_unlock();
 660	} while (!wb && !cgwb_create(bdi, memcg_css, gfp));
 661
 662	return wb;
 663}
 664
 665static int cgwb_bdi_init(struct backing_dev_info *bdi)
 666{
 667	int ret;
 668
 669	INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
 670	bdi->cgwb_congested_tree = RB_ROOT;
 671	atomic_set(&bdi->usage_cnt, 1);
 
 672
 673	ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 674	if (!ret) {
 675		bdi->wb.memcg_css = &root_mem_cgroup->css;
 676		bdi->wb.blkcg_css = blkcg_root_css;
 677	}
 678	return ret;
 679}
 680
 681static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
 682{
 683	struct radix_tree_iter iter;
 684	struct rb_node *rbn;
 685	void **slot;
 
 686
 687	WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 688
 689	spin_lock_irq(&cgwb_lock);
 690
 691	radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
 692		cgwb_kill(*slot);
 
 693
 694	while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
 695		struct bdi_writeback_congested *congested =
 696			rb_entry(rbn, struct bdi_writeback_congested, rb_node);
 697
 698		rb_erase(rbn, &bdi->cgwb_congested_tree);
 699		congested->bdi = NULL;	/* mark @congested unlinked */
 
 
 700	}
 701
 702	spin_unlock_irq(&cgwb_lock);
 703
 704	/*
 705	 * All cgwb's and their congested states must be shutdown and
 706	 * released before returning.  Drain the usage counter to wait for
 707	 * all cgwb's and cgwb_congested's ever created on @bdi.
 708	 */
 709	atomic_dec(&bdi->usage_cnt);
 710	wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
 711}
 712
 713/**
 714 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
 715 * @memcg: memcg being offlined
 716 *
 717 * Also prevents creation of any new wb's associated with @memcg.
 718 */
 719void wb_memcg_offline(struct mem_cgroup *memcg)
 720{
 721	LIST_HEAD(to_destroy);
 722	struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
 723	struct bdi_writeback *wb, *next;
 724
 725	spin_lock_irq(&cgwb_lock);
 726	list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
 727		cgwb_kill(wb);
 728	memcg_cgwb_list->next = NULL;	/* prevent new wb's */
 729	spin_unlock_irq(&cgwb_lock);
 730}
 731
 732/**
 733 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
 734 * @blkcg: blkcg being offlined
 735 *
 736 * Also prevents creation of any new wb's associated with @blkcg.
 737 */
 738void wb_blkcg_offline(struct blkcg *blkcg)
 739{
 740	LIST_HEAD(to_destroy);
 741	struct bdi_writeback *wb, *next;
 742
 743	spin_lock_irq(&cgwb_lock);
 744	list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
 745		cgwb_kill(wb);
 746	blkcg->cgwb_list.next = NULL;	/* prevent new wb's */
 747	spin_unlock_irq(&cgwb_lock);
 748}
 749
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 750#else	/* CONFIG_CGROUP_WRITEBACK */
 751
 752static int cgwb_bdi_init(struct backing_dev_info *bdi)
 753{
 754	int err;
 755
 756	bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
 757	if (!bdi->wb_congested)
 758		return -ENOMEM;
 759
 
 
 760	err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 761	if (err) {
 762		kfree(bdi->wb_congested);
 763		return err;
 764	}
 765	return 0;
 766}
 767
 768static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 769
 770#endif	/* CONFIG_CGROUP_WRITEBACK */
 771
 772int bdi_init(struct backing_dev_info *bdi)
 773{
 774	int ret;
 775
 776	bdi->dev = NULL;
 777
 
 778	bdi->min_ratio = 0;
 779	bdi->max_ratio = 100;
 780	bdi->max_prop_frac = FPROP_FRAC_BASE;
 781	INIT_LIST_HEAD(&bdi->bdi_list);
 782	INIT_LIST_HEAD(&bdi->wb_list);
 783	init_waitqueue_head(&bdi->wb_waitq);
 784
 785	ret = cgwb_bdi_init(bdi);
 786
 787	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 788
 789	return ret;
 790}
 791EXPORT_SYMBOL(bdi_init);
 792
 793int bdi_register(struct backing_dev_info *bdi, struct device *parent,
 794		const char *fmt, ...)
 795{
 796	va_list args;
 797	struct device *dev;
 
 798
 799	if (bdi->dev)	/* The driver needs to use separate queues per device */
 800		return 0;
 801
 802	va_start(args, fmt);
 803	dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
 804	va_end(args);
 805	if (IS_ERR(dev))
 806		return PTR_ERR(dev);
 807
 
 808	bdi->dev = dev;
 809
 810	bdi_debug_register(bdi, dev_name(dev));
 811	set_bit(WB_registered, &bdi->wb.state);
 812
 813	spin_lock_bh(&bdi_lock);
 
 
 
 
 
 
 
 814	list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
 
 815	spin_unlock_bh(&bdi_lock);
 816
 817	trace_writeback_bdi_register(bdi);
 818	return 0;
 819}
 
 
 
 
 
 
 
 
 
 
 
 
 820EXPORT_SYMBOL(bdi_register);
 821
 822int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
 823{
 824	return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
 
 
 
 
 
 
 
 
 
 825}
 826EXPORT_SYMBOL(bdi_register_dev);
 827
 828/*
 829 * Remove bdi from bdi_list, and ensure that it is no longer visible
 830 */
 831static void bdi_remove_from_list(struct backing_dev_info *bdi)
 832{
 833	spin_lock_bh(&bdi_lock);
 
 834	list_del_rcu(&bdi->bdi_list);
 835	spin_unlock_bh(&bdi_lock);
 836
 837	synchronize_rcu_expedited();
 838}
 839
 840void bdi_unregister(struct backing_dev_info *bdi)
 841{
 842	/* make sure nobody finds us on the bdi_list anymore */
 843	bdi_remove_from_list(bdi);
 844	wb_shutdown(&bdi->wb);
 845	cgwb_bdi_destroy(bdi);
 846
 847	if (bdi->dev) {
 848		bdi_debug_unregister(bdi);
 849		device_unregister(bdi->dev);
 850		bdi->dev = NULL;
 851	}
 
 
 
 
 
 852}
 853
 854void bdi_exit(struct backing_dev_info *bdi)
 855{
 
 
 
 
 
 856	WARN_ON_ONCE(bdi->dev);
 857	wb_exit(&bdi->wb);
 
 
 858}
 859
 860void bdi_destroy(struct backing_dev_info *bdi)
 861{
 862	bdi_unregister(bdi);
 863	bdi_exit(bdi);
 864}
 865EXPORT_SYMBOL(bdi_destroy);
 866
 867/*
 868 * For use from filesystems to quickly init and register a bdi associated
 869 * with dirty writeback
 870 */
 871int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
 872{
 873	int err;
 874
 875	bdi->name = name;
 876	bdi->capabilities = 0;
 877	err = bdi_init(bdi);
 878	if (err)
 879		return err;
 880
 881	err = bdi_register(bdi, NULL, "%.28s-%ld", name,
 882			   atomic_long_inc_return(&bdi_seq));
 883	if (err) {
 884		bdi_destroy(bdi);
 885		return err;
 886	}
 887
 888	return 0;
 889}
 890EXPORT_SYMBOL(bdi_setup_and_register);
 891
 892static wait_queue_head_t congestion_wqh[2] = {
 893		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
 894		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
 895	};
 896static atomic_t nr_wb_congested[2];
 897
 898void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
 899{
 900	wait_queue_head_t *wqh = &congestion_wqh[sync];
 901	enum wb_congested_state bit;
 902
 903	bit = sync ? WB_sync_congested : WB_async_congested;
 904	if (test_and_clear_bit(bit, &congested->state))
 905		atomic_dec(&nr_wb_congested[sync]);
 906	smp_mb__after_atomic();
 907	if (waitqueue_active(wqh))
 908		wake_up(wqh);
 909}
 910EXPORT_SYMBOL(clear_wb_congested);
 911
 912void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
 913{
 914	enum wb_congested_state bit;
 915
 916	bit = sync ? WB_sync_congested : WB_async_congested;
 917	if (!test_and_set_bit(bit, &congested->state))
 918		atomic_inc(&nr_wb_congested[sync]);
 919}
 920EXPORT_SYMBOL(set_wb_congested);
 921
 922/**
 923 * congestion_wait - wait for a backing_dev to become uncongested
 924 * @sync: SYNC or ASYNC IO
 925 * @timeout: timeout in jiffies
 926 *
 927 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
 928 * write congestion.  If no backing_devs are congested then just wait for the
 929 * next write to be completed.
 930 */
 931long congestion_wait(int sync, long timeout)
 932{
 933	long ret;
 934	unsigned long start = jiffies;
 935	DEFINE_WAIT(wait);
 936	wait_queue_head_t *wqh = &congestion_wqh[sync];
 937
 938	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
 939	ret = io_schedule_timeout(timeout);
 940	finish_wait(wqh, &wait);
 941
 942	trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
 943					jiffies_to_usecs(jiffies - start));
 944
 945	return ret;
 946}
 947EXPORT_SYMBOL(congestion_wait);
 948
 949/**
 950 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
 951 * @zone: A zone to check if it is heavily congested
 952 * @sync: SYNC or ASYNC IO
 953 * @timeout: timeout in jiffies
 954 *
 955 * In the event of a congested backing_dev (any backing_dev) and the given
 956 * @zone has experienced recent congestion, this waits for up to @timeout
 957 * jiffies for either a BDI to exit congestion of the given @sync queue
 958 * or a write to complete.
 959 *
 960 * In the absence of zone congestion, a short sleep or a cond_resched is
 961 * performed to yield the processor and to allow other subsystems to make
 962 * a forward progress.
 963 *
 964 * The return value is 0 if the sleep is for the full timeout. Otherwise,
 965 * it is the number of jiffies that were still remaining when the function
 966 * returned. return_value == timeout implies the function did not sleep.
 967 */
 968long wait_iff_congested(struct zone *zone, int sync, long timeout)
 969{
 970	long ret;
 971	unsigned long start = jiffies;
 972	DEFINE_WAIT(wait);
 973	wait_queue_head_t *wqh = &congestion_wqh[sync];
 974
 975	/*
 976	 * If there is no congestion, or heavy congestion is not being
 977	 * encountered in the current zone, yield if necessary instead
 978	 * of sleeping on the congestion queue
 979	 */
 980	if (atomic_read(&nr_wb_congested[sync]) == 0 ||
 981	    !test_bit(ZONE_CONGESTED, &zone->flags)) {
 982
 983		/*
 984		 * Memory allocation/reclaim might be called from a WQ
 985		 * context and the current implementation of the WQ
 986		 * concurrency control doesn't recognize that a particular
 987		 * WQ is congested if the worker thread is looping without
 988		 * ever sleeping. Therefore we have to do a short sleep
 989		 * here rather than calling cond_resched().
 990		 */
 991		if (current->flags & PF_WQ_WORKER)
 992			schedule_timeout_uninterruptible(1);
 993		else
 994			cond_resched();
 995
 996		/* In case we scheduled, work out time remaining */
 997		ret = timeout - (jiffies - start);
 998		if (ret < 0)
 999			ret = 0;
1000
1001		goto out;
1002	}
1003
1004	/* Sleep until uncongested or a write happens */
1005	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1006	ret = io_schedule_timeout(timeout);
1007	finish_wait(wqh, &wait);
1008
1009out:
1010	trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1011					jiffies_to_usecs(jiffies - start));
1012
1013	return ret;
1014}
1015EXPORT_SYMBOL(wait_iff_congested);
1016
1017int pdflush_proc_obsolete(struct ctl_table *table, int write,
1018			void __user *buffer, size_t *lenp, loff_t *ppos)
1019{
1020	char kbuf[] = "0\n";
1021
1022	if (*ppos || *lenp < sizeof(kbuf)) {
1023		*lenp = 0;
1024		return 0;
1025	}
1026
1027	if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
1028		return -EFAULT;
1029	pr_warn_once("%s exported in /proc is scheduled for removal\n",
1030		     table->procname);
1031
1032	*lenp = 2;
1033	*ppos += *lenp;
1034	return 2;
1035}