Linux Audio

Check our new training course

Loading...
v4.17
 
   1
 
   2#include <linux/wait.h>
   3#include <linux/backing-dev.h>
   4#include <linux/kthread.h>
 
 
   5#include <linux/freezer.h>
   6#include <linux/fs.h>
   7#include <linux/pagemap.h>
   8#include <linux/mm.h>
 
   9#include <linux/sched.h>
  10#include <linux/module.h>
  11#include <linux/writeback.h>
  12#include <linux/device.h>
  13#include <trace/events/writeback.h>
  14
  15struct backing_dev_info noop_backing_dev_info = {
  16	.name		= "noop",
  17	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
  18};
  19EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  20
  21static struct class *bdi_class;
 
  22
  23/*
  24 * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
  25 * locking.
  26 */
  27DEFINE_SPINLOCK(bdi_lock);
 
 
  28LIST_HEAD(bdi_list);
  29
  30/* bdi_wq serves all asynchronous writeback tasks */
  31struct workqueue_struct *bdi_wq;
  32
 
 
  33#ifdef CONFIG_DEBUG_FS
  34#include <linux/debugfs.h>
  35#include <linux/seq_file.h>
  36
  37static struct dentry *bdi_debug_root;
  38
  39static void bdi_debug_init(void)
  40{
  41	bdi_debug_root = debugfs_create_dir("bdi", NULL);
  42}
  43
  44static int bdi_debug_stats_show(struct seq_file *m, void *v)
  45{
  46	struct backing_dev_info *bdi = m->private;
  47	struct bdi_writeback *wb = &bdi->wb;
  48	unsigned long background_thresh;
  49	unsigned long dirty_thresh;
  50	unsigned long wb_thresh;
  51	unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
  52	struct inode *inode;
  53
  54	nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
  55	spin_lock(&wb->list_lock);
  56	list_for_each_entry(inode, &wb->b_dirty, i_io_list)
  57		nr_dirty++;
  58	list_for_each_entry(inode, &wb->b_io, i_io_list)
  59		nr_io++;
  60	list_for_each_entry(inode, &wb->b_more_io, i_io_list)
  61		nr_more_io++;
  62	list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
  63		if (inode->i_state & I_DIRTY_TIME)
  64			nr_dirty_time++;
  65	spin_unlock(&wb->list_lock);
  66
  67	global_dirty_limits(&background_thresh, &dirty_thresh);
  68	wb_thresh = wb_calc_thresh(wb, dirty_thresh);
  69
  70#define K(x) ((x) << (PAGE_SHIFT - 10))
  71	seq_printf(m,
  72		   "BdiWriteback:       %10lu kB\n"
  73		   "BdiReclaimable:     %10lu kB\n"
  74		   "BdiDirtyThresh:     %10lu kB\n"
  75		   "DirtyThresh:        %10lu kB\n"
  76		   "BackgroundThresh:   %10lu kB\n"
  77		   "BdiDirtied:         %10lu kB\n"
  78		   "BdiWritten:         %10lu kB\n"
  79		   "BdiWriteBandwidth:  %10lu kBps\n"
  80		   "b_dirty:            %10lu\n"
  81		   "b_io:               %10lu\n"
  82		   "b_more_io:          %10lu\n"
  83		   "b_dirty_time:       %10lu\n"
  84		   "bdi_list:           %10u\n"
  85		   "state:              %10lx\n",
  86		   (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
  87		   (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
  88		   K(wb_thresh),
  89		   K(dirty_thresh),
  90		   K(background_thresh),
  91		   (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
  92		   (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
  93		   (unsigned long) K(wb->write_bandwidth),
  94		   nr_dirty,
  95		   nr_io,
  96		   nr_more_io,
  97		   nr_dirty_time,
  98		   !list_empty(&bdi->bdi_list), bdi->wb.state);
  99#undef K
 100
 101	return 0;
 102}
 103DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
 104
 105static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 106{
 107	if (!bdi_debug_root)
 108		return -ENOMEM;
 109
 110	bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
 111	if (!bdi->debug_dir)
 112		return -ENOMEM;
 113
 114	bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
 115					       bdi, &bdi_debug_stats_fops);
 116	if (!bdi->debug_stats) {
 117		debugfs_remove(bdi->debug_dir);
 118		bdi->debug_dir = NULL;
 119		return -ENOMEM;
 120	}
 121
 122	return 0;
 123}
 124
 125static void bdi_debug_unregister(struct backing_dev_info *bdi)
 126{
 127	debugfs_remove(bdi->debug_stats);
 128	debugfs_remove(bdi->debug_dir);
 129}
 130#else
 131static inline void bdi_debug_init(void)
 132{
 133}
 134static inline int bdi_debug_register(struct backing_dev_info *bdi,
 135				      const char *name)
 136{
 137	return 0;
 138}
 139static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 140{
 141}
 142#endif
 143
 144static ssize_t read_ahead_kb_store(struct device *dev,
 145				  struct device_attribute *attr,
 146				  const char *buf, size_t count)
 147{
 148	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 149	unsigned long read_ahead_kb;
 150	ssize_t ret;
 151
 152	ret = kstrtoul(buf, 10, &read_ahead_kb);
 153	if (ret < 0)
 154		return ret;
 155
 156	bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
 157
 158	return count;
 159}
 160
 161#define K(pages) ((pages) << (PAGE_SHIFT - 10))
 162
 163#define BDI_SHOW(name, expr)						\
 164static ssize_t name##_show(struct device *dev,				\
 165			   struct device_attribute *attr, char *page)	\
 166{									\
 167	struct backing_dev_info *bdi = dev_get_drvdata(dev);		\
 168									\
 169	return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);	\
 170}									\
 171static DEVICE_ATTR_RW(name);
 172
 173BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
 174
 175static ssize_t min_ratio_store(struct device *dev,
 176		struct device_attribute *attr, const char *buf, size_t count)
 177{
 178	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 179	unsigned int ratio;
 180	ssize_t ret;
 181
 182	ret = kstrtouint(buf, 10, &ratio);
 183	if (ret < 0)
 184		return ret;
 185
 186	ret = bdi_set_min_ratio(bdi, ratio);
 187	if (!ret)
 188		ret = count;
 189
 190	return ret;
 191}
 192BDI_SHOW(min_ratio, bdi->min_ratio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 193
 194static ssize_t max_ratio_store(struct device *dev,
 195		struct device_attribute *attr, const char *buf, size_t count)
 196{
 197	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 198	unsigned int ratio;
 199	ssize_t ret;
 200
 201	ret = kstrtouint(buf, 10, &ratio);
 202	if (ret < 0)
 203		return ret;
 204
 205	ret = bdi_set_max_ratio(bdi, ratio);
 206	if (!ret)
 207		ret = count;
 208
 209	return ret;
 210}
 211BDI_SHOW(max_ratio, bdi->max_ratio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 212
 213static ssize_t stable_pages_required_show(struct device *dev,
 214					  struct device_attribute *attr,
 215					  char *page)
 
 
 
 
 
 
 
 
 
 216{
 217	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 
 
 
 
 
 
 218
 219	return snprintf(page, PAGE_SIZE-1, "%d\n",
 220			bdi_cap_stable_pages_required(bdi) ? 1 : 0);
 
 
 
 221}
 222static DEVICE_ATTR_RO(stable_pages_required);
 
 
 
 
 
 
 
 
 
 223
 224static struct attribute *bdi_dev_attrs[] = {
 225	&dev_attr_read_ahead_kb.attr,
 226	&dev_attr_min_ratio.attr,
 
 227	&dev_attr_max_ratio.attr,
 
 
 
 228	&dev_attr_stable_pages_required.attr,
 
 229	NULL,
 230};
 231ATTRIBUTE_GROUPS(bdi_dev);
 232
 233static __init int bdi_class_init(void)
 234{
 235	bdi_class = class_create(THIS_MODULE, "bdi");
 236	if (IS_ERR(bdi_class))
 237		return PTR_ERR(bdi_class);
 238
 239	bdi_class->dev_groups = bdi_dev_groups;
 240	bdi_debug_init();
 241
 242	return 0;
 243}
 244postcore_initcall(bdi_class_init);
 245
 246static int bdi_init(struct backing_dev_info *bdi);
 247
 248static int __init default_bdi_init(void)
 249{
 250	int err;
 251
 252	bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
 253					      WQ_UNBOUND | WQ_SYSFS, 0);
 254	if (!bdi_wq)
 255		return -ENOMEM;
 256
 257	err = bdi_init(&noop_backing_dev_info);
 258
 259	return err;
 260}
 261subsys_initcall(default_bdi_init);
 262
 263/*
 264 * This function is used when the first inode for this wb is marked dirty. It
 265 * wakes-up the corresponding bdi thread which should then take care of the
 266 * periodic background write-out of dirty inodes. Since the write-out would
 267 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
 268 * set up a timer which wakes the bdi thread up later.
 269 *
 270 * Note, we wouldn't bother setting up the timer, but this function is on the
 271 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
 272 * by delaying the wake-up.
 273 *
 274 * We have to be careful not to postpone flush work if it is scheduled for
 275 * earlier. Thus we use queue_delayed_work().
 276 */
 277void wb_wakeup_delayed(struct bdi_writeback *wb)
 278{
 279	unsigned long timeout;
 280
 281	timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
 282	spin_lock_bh(&wb->work_lock);
 283	if (test_bit(WB_registered, &wb->state))
 284		queue_delayed_work(bdi_wq, &wb->dwork, timeout);
 285	spin_unlock_bh(&wb->work_lock);
 
 
 
 
 
 
 
 
 286}
 287
 288/*
 289 * Initial write bandwidth: 100 MB/s
 290 */
 291#define INIT_BW		(100 << (20 - PAGE_SHIFT))
 292
 293static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
 294		   int blkcg_id, gfp_t gfp)
 295{
 296	int i, err;
 297
 298	memset(wb, 0, sizeof(*wb));
 299
 300	if (wb != &bdi->wb)
 301		bdi_get(bdi);
 302	wb->bdi = bdi;
 303	wb->last_old_flush = jiffies;
 304	INIT_LIST_HEAD(&wb->b_dirty);
 305	INIT_LIST_HEAD(&wb->b_io);
 306	INIT_LIST_HEAD(&wb->b_more_io);
 307	INIT_LIST_HEAD(&wb->b_dirty_time);
 308	spin_lock_init(&wb->list_lock);
 309
 
 310	wb->bw_time_stamp = jiffies;
 311	wb->balanced_dirty_ratelimit = INIT_BW;
 312	wb->dirty_ratelimit = INIT_BW;
 313	wb->write_bandwidth = INIT_BW;
 314	wb->avg_write_bandwidth = INIT_BW;
 315
 316	spin_lock_init(&wb->work_lock);
 317	INIT_LIST_HEAD(&wb->work_list);
 318	INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
 
 319	wb->dirty_sleep = jiffies;
 320
 321	wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
 322	if (!wb->congested) {
 323		err = -ENOMEM;
 324		goto out_put_bdi;
 325	}
 326
 327	err = fprop_local_init_percpu(&wb->completions, gfp);
 328	if (err)
 329		goto out_put_cong;
 330
 331	for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
 332		err = percpu_counter_init(&wb->stat[i], 0, gfp);
 333		if (err)
 334			goto out_destroy_stat;
 335	}
 336
 337	return 0;
 338
 339out_destroy_stat:
 340	while (i--)
 341		percpu_counter_destroy(&wb->stat[i]);
 342	fprop_local_destroy_percpu(&wb->completions);
 343out_put_cong:
 344	wb_congested_put(wb->congested);
 345out_put_bdi:
 346	if (wb != &bdi->wb)
 347		bdi_put(bdi);
 348	return err;
 349}
 350
 351static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
 352
 353/*
 354 * Remove bdi from the global list and shutdown any threads we have running
 355 */
 356static void wb_shutdown(struct bdi_writeback *wb)
 357{
 358	/* Make sure nobody queues further work */
 359	spin_lock_bh(&wb->work_lock);
 360	if (!test_and_clear_bit(WB_registered, &wb->state)) {
 361		spin_unlock_bh(&wb->work_lock);
 362		/*
 363		 * Wait for wb shutdown to finish if someone else is just
 364		 * running wb_shutdown(). Otherwise we could proceed to wb /
 365		 * bdi destruction before wb_shutdown() is finished.
 366		 */
 367		wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
 368		return;
 369	}
 370	set_bit(WB_shutting_down, &wb->state);
 371	spin_unlock_bh(&wb->work_lock);
 372
 373	cgwb_remove_from_bdi_list(wb);
 374	/*
 375	 * Drain work list and shutdown the delayed_work.  !WB_registered
 376	 * tells wb_workfn() that @wb is dying and its work_list needs to
 377	 * be drained no matter what.
 378	 */
 379	mod_delayed_work(bdi_wq, &wb->dwork, 0);
 380	flush_delayed_work(&wb->dwork);
 381	WARN_ON(!list_empty(&wb->work_list));
 382	/*
 383	 * Make sure bit gets cleared after shutdown is finished. Matches with
 384	 * the barrier provided by test_and_clear_bit() above.
 385	 */
 386	smp_wmb();
 387	clear_and_wake_up_bit(WB_shutting_down, &wb->state);
 388}
 389
 390static void wb_exit(struct bdi_writeback *wb)
 391{
 392	int i;
 393
 394	WARN_ON(delayed_work_pending(&wb->dwork));
 395
 396	for (i = 0; i < NR_WB_STAT_ITEMS; i++)
 397		percpu_counter_destroy(&wb->stat[i]);
 398
 399	fprop_local_destroy_percpu(&wb->completions);
 400	wb_congested_put(wb->congested);
 401	if (wb != &wb->bdi->wb)
 402		bdi_put(wb->bdi);
 403}
 404
 405#ifdef CONFIG_CGROUP_WRITEBACK
 406
 407#include <linux/memcontrol.h>
 408
 409/*
 410 * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
 411 * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
 412 * protected.
 413 */
 414static DEFINE_SPINLOCK(cgwb_lock);
 
 415
 416/**
 417 * wb_congested_get_create - get or create a wb_congested
 418 * @bdi: associated bdi
 419 * @blkcg_id: ID of the associated blkcg
 420 * @gfp: allocation mask
 421 *
 422 * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
 423 * The returned wb_congested has its reference count incremented.  Returns
 424 * NULL on failure.
 425 */
 426struct bdi_writeback_congested *
 427wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
 428{
 429	struct bdi_writeback_congested *new_congested = NULL, *congested;
 430	struct rb_node **node, *parent;
 431	unsigned long flags;
 432retry:
 433	spin_lock_irqsave(&cgwb_lock, flags);
 434
 435	node = &bdi->cgwb_congested_tree.rb_node;
 436	parent = NULL;
 437
 438	while (*node != NULL) {
 439		parent = *node;
 440		congested = rb_entry(parent, struct bdi_writeback_congested,
 441				     rb_node);
 442		if (congested->blkcg_id < blkcg_id)
 443			node = &parent->rb_left;
 444		else if (congested->blkcg_id > blkcg_id)
 445			node = &parent->rb_right;
 446		else
 447			goto found;
 448	}
 449
 450	if (new_congested) {
 451		/* !found and storage for new one already allocated, insert */
 452		congested = new_congested;
 453		new_congested = NULL;
 454		rb_link_node(&congested->rb_node, parent, node);
 455		rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
 456		goto found;
 457	}
 458
 459	spin_unlock_irqrestore(&cgwb_lock, flags);
 460
 461	/* allocate storage for new one and retry */
 462	new_congested = kzalloc(sizeof(*new_congested), gfp);
 463	if (!new_congested)
 464		return NULL;
 465
 466	atomic_set(&new_congested->refcnt, 0);
 467	new_congested->__bdi = bdi;
 468	new_congested->blkcg_id = blkcg_id;
 469	goto retry;
 470
 471found:
 472	atomic_inc(&congested->refcnt);
 473	spin_unlock_irqrestore(&cgwb_lock, flags);
 474	kfree(new_congested);
 475	return congested;
 476}
 477
 478/**
 479 * wb_congested_put - put a wb_congested
 480 * @congested: wb_congested to put
 481 *
 482 * Put @congested and destroy it if the refcnt reaches zero.
 483 */
 484void wb_congested_put(struct bdi_writeback_congested *congested)
 485{
 486	unsigned long flags;
 487
 488	local_irq_save(flags);
 489	if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
 490		local_irq_restore(flags);
 491		return;
 492	}
 493
 494	/* bdi might already have been destroyed leaving @congested unlinked */
 495	if (congested->__bdi) {
 496		rb_erase(&congested->rb_node,
 497			 &congested->__bdi->cgwb_congested_tree);
 498		congested->__bdi = NULL;
 499	}
 500
 501	spin_unlock_irqrestore(&cgwb_lock, flags);
 502	kfree(congested);
 503}
 504
 505static void cgwb_release_workfn(struct work_struct *work)
 506{
 507	struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
 508						release_work);
 
 509
 
 510	wb_shutdown(wb);
 511
 512	css_put(wb->memcg_css);
 513	css_put(wb->blkcg_css);
 
 
 
 
 514
 515	fprop_local_destroy_percpu(&wb->memcg_completions);
 
 
 
 
 
 516	percpu_ref_exit(&wb->refcnt);
 517	wb_exit(wb);
 
 
 518	kfree_rcu(wb, rcu);
 519}
 520
 521static void cgwb_release(struct percpu_ref *refcnt)
 522{
 523	struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
 524						refcnt);
 525	schedule_work(&wb->release_work);
 526}
 527
 528static void cgwb_kill(struct bdi_writeback *wb)
 529{
 530	lockdep_assert_held(&cgwb_lock);
 531
 532	WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
 533	list_del(&wb->memcg_node);
 534	list_del(&wb->blkcg_node);
 
 535	percpu_ref_kill(&wb->refcnt);
 536}
 537
 538static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 539{
 540	spin_lock_irq(&cgwb_lock);
 541	list_del_rcu(&wb->bdi_node);
 542	spin_unlock_irq(&cgwb_lock);
 543}
 544
 545static int cgwb_create(struct backing_dev_info *bdi,
 546		       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
 547{
 548	struct mem_cgroup *memcg;
 549	struct cgroup_subsys_state *blkcg_css;
 550	struct blkcg *blkcg;
 551	struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
 552	struct bdi_writeback *wb;
 553	unsigned long flags;
 554	int ret = 0;
 555
 556	memcg = mem_cgroup_from_css(memcg_css);
 557	blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 558	blkcg = css_to_blkcg(blkcg_css);
 559	memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
 560	blkcg_cgwb_list = &blkcg->cgwb_list;
 561
 562	/* look up again under lock and discard on blkcg mismatch */
 563	spin_lock_irqsave(&cgwb_lock, flags);
 564	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 565	if (wb && wb->blkcg_css != blkcg_css) {
 566		cgwb_kill(wb);
 567		wb = NULL;
 568	}
 569	spin_unlock_irqrestore(&cgwb_lock, flags);
 570	if (wb)
 571		goto out_put;
 572
 573	/* need to create a new one */
 574	wb = kmalloc(sizeof(*wb), gfp);
 575	if (!wb) {
 576		ret = -ENOMEM;
 577		goto out_put;
 578	}
 579
 580	ret = wb_init(wb, bdi, blkcg_css->id, gfp);
 581	if (ret)
 582		goto err_free;
 583
 584	ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
 585	if (ret)
 586		goto err_wb_exit;
 587
 588	ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
 589	if (ret)
 590		goto err_ref_exit;
 591
 592	wb->memcg_css = memcg_css;
 593	wb->blkcg_css = blkcg_css;
 
 594	INIT_WORK(&wb->release_work, cgwb_release_workfn);
 595	set_bit(WB_registered, &wb->state);
 
 596
 597	/*
 598	 * The root wb determines the registered state of the whole bdi and
 599	 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
 600	 * whether they're still online.  Don't link @wb if any is dead.
 601	 * See wb_memcg_offline() and wb_blkcg_offline().
 602	 */
 603	ret = -ENODEV;
 604	spin_lock_irqsave(&cgwb_lock, flags);
 605	if (test_bit(WB_registered, &bdi->wb.state) &&
 606	    blkcg_cgwb_list->next && memcg_cgwb_list->next) {
 607		/* we might have raced another instance of this function */
 608		ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
 609		if (!ret) {
 610			list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
 611			list_add(&wb->memcg_node, memcg_cgwb_list);
 612			list_add(&wb->blkcg_node, blkcg_cgwb_list);
 
 613			css_get(memcg_css);
 614			css_get(blkcg_css);
 615		}
 616	}
 617	spin_unlock_irqrestore(&cgwb_lock, flags);
 618	if (ret) {
 619		if (ret == -EEXIST)
 620			ret = 0;
 621		goto err_fprop_exit;
 622	}
 623	goto out_put;
 624
 625err_fprop_exit:
 
 626	fprop_local_destroy_percpu(&wb->memcg_completions);
 627err_ref_exit:
 628	percpu_ref_exit(&wb->refcnt);
 629err_wb_exit:
 630	wb_exit(wb);
 631err_free:
 632	kfree(wb);
 633out_put:
 634	css_put(blkcg_css);
 635	return ret;
 636}
 637
 638/**
 639 * wb_get_create - get wb for a given memcg, create if necessary
 640 * @bdi: target bdi
 641 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 642 * @gfp: allocation mask to use
 643 *
 644 * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
 645 * create one.  The returned wb has its refcount incremented.
 646 *
 647 * This function uses css_get() on @memcg_css and thus expects its refcnt
 648 * to be positive on invocation.  IOW, rcu_read_lock() protection on
 649 * @memcg_css isn't enough.  try_get it before calling this function.
 650 *
 651 * A wb is keyed by its associated memcg.  As blkcg implicitly enables
 652 * memcg on the default hierarchy, memcg association is guaranteed to be
 653 * more specific (equal or descendant to the associated blkcg) and thus can
 654 * identify both the memcg and blkcg associations.
 655 *
 656 * Because the blkcg associated with a memcg may change as blkcg is enabled
 657 * and disabled closer to root in the hierarchy, each wb keeps track of
 658 * both the memcg and blkcg associated with it and verifies the blkcg on
 659 * each lookup.  On mismatch, the existing wb is discarded and a new one is
 660 * created.
 661 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
 663				    struct cgroup_subsys_state *memcg_css,
 664				    gfp_t gfp)
 665{
 666	struct bdi_writeback *wb;
 667
 668	might_sleep_if(gfpflags_allow_blocking(gfp));
 669
 670	if (!memcg_css->parent)
 671		return &bdi->wb;
 672
 673	do {
 674		rcu_read_lock();
 675		wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 676		if (wb) {
 677			struct cgroup_subsys_state *blkcg_css;
 678
 679			/* see whether the blkcg association has changed */
 680			blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
 681						     &io_cgrp_subsys);
 682			if (unlikely(wb->blkcg_css != blkcg_css ||
 683				     !wb_tryget(wb)))
 684				wb = NULL;
 685			css_put(blkcg_css);
 686		}
 687		rcu_read_unlock();
 688	} while (!wb && !cgwb_create(bdi, memcg_css, gfp));
 689
 690	return wb;
 691}
 692
 693static int cgwb_bdi_init(struct backing_dev_info *bdi)
 694{
 695	int ret;
 696
 697	INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
 698	bdi->cgwb_congested_tree = RB_ROOT;
 
 699
 700	ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 701	if (!ret) {
 702		bdi->wb.memcg_css = &root_mem_cgroup->css;
 703		bdi->wb.blkcg_css = blkcg_root_css;
 704	}
 705	return ret;
 706}
 707
 708static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
 709{
 710	struct radix_tree_iter iter;
 711	void **slot;
 712	struct bdi_writeback *wb;
 713
 714	WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 715
 716	spin_lock_irq(&cgwb_lock);
 717	radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
 718		cgwb_kill(*slot);
 
 719
 
 
 720	while (!list_empty(&bdi->wb_list)) {
 721		wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
 722				      bdi_node);
 723		spin_unlock_irq(&cgwb_lock);
 724		wb_shutdown(wb);
 725		spin_lock_irq(&cgwb_lock);
 726	}
 727	spin_unlock_irq(&cgwb_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 728}
 729
 730/**
 731 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
 732 * @memcg: memcg being offlined
 733 *
 734 * Also prevents creation of any new wb's associated with @memcg.
 735 */
 736void wb_memcg_offline(struct mem_cgroup *memcg)
 737{
 738	struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
 739	struct bdi_writeback *wb, *next;
 740
 741	spin_lock_irq(&cgwb_lock);
 742	list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
 743		cgwb_kill(wb);
 744	memcg_cgwb_list->next = NULL;	/* prevent new wb's */
 745	spin_unlock_irq(&cgwb_lock);
 
 
 746}
 747
 748/**
 749 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
 750 * @blkcg: blkcg being offlined
 751 *
 752 * Also prevents creation of any new wb's associated with @blkcg.
 753 */
 754void wb_blkcg_offline(struct blkcg *blkcg)
 755{
 756	struct bdi_writeback *wb, *next;
 
 757
 758	spin_lock_irq(&cgwb_lock);
 759	list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
 760		cgwb_kill(wb);
 761	blkcg->cgwb_list.next = NULL;	/* prevent new wb's */
 762	spin_unlock_irq(&cgwb_lock);
 763}
 764
 765static void cgwb_bdi_exit(struct backing_dev_info *bdi)
 766{
 767	struct rb_node *rbn;
 768
 769	spin_lock_irq(&cgwb_lock);
 770	while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
 771		struct bdi_writeback_congested *congested =
 772			rb_entry(rbn, struct bdi_writeback_congested, rb_node);
 773
 774		rb_erase(rbn, &bdi->cgwb_congested_tree);
 775		congested->__bdi = NULL;	/* mark @congested unlinked */
 776	}
 777	spin_unlock_irq(&cgwb_lock);
 778}
 779
 780static void cgwb_bdi_register(struct backing_dev_info *bdi)
 781{
 782	spin_lock_irq(&cgwb_lock);
 783	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 784	spin_unlock_irq(&cgwb_lock);
 785}
 786
 787#else	/* CONFIG_CGROUP_WRITEBACK */
 788
 789static int cgwb_bdi_init(struct backing_dev_info *bdi)
 790{
 791	int err;
 792
 793	bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
 794	if (!bdi->wb_congested)
 
 
 
 795		return -ENOMEM;
 796
 797	atomic_set(&bdi->wb_congested->refcnt, 1);
 798
 799	err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
 800	if (err) {
 801		wb_congested_put(bdi->wb_congested);
 802		return err;
 803	}
 804	return 0;
 805}
 
 806
 807static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
 808
 809static void cgwb_bdi_exit(struct backing_dev_info *bdi)
 810{
 811	wb_congested_put(bdi->wb_congested);
 812}
 813
 
 
 814static void cgwb_bdi_register(struct backing_dev_info *bdi)
 815{
 816	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 817}
 818
 819static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 820{
 821	list_del_rcu(&wb->bdi_node);
 822}
 823
 824#endif	/* CONFIG_CGROUP_WRITEBACK */
 825
 826static int bdi_init(struct backing_dev_info *bdi)
 827{
 828	int ret;
 829
 830	bdi->dev = NULL;
 831
 832	kref_init(&bdi->refcnt);
 833	bdi->min_ratio = 0;
 834	bdi->max_ratio = 100;
 835	bdi->max_prop_frac = FPROP_FRAC_BASE;
 836	INIT_LIST_HEAD(&bdi->bdi_list);
 837	INIT_LIST_HEAD(&bdi->wb_list);
 838	init_waitqueue_head(&bdi->wb_waitq);
 839
 840	ret = cgwb_bdi_init(bdi);
 841
 842	return ret;
 843}
 844
 845struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
 846{
 847	struct backing_dev_info *bdi;
 848
 849	bdi = kmalloc_node(sizeof(struct backing_dev_info),
 850			   gfp_mask | __GFP_ZERO, node_id);
 851	if (!bdi)
 852		return NULL;
 853
 854	if (bdi_init(bdi)) {
 855		kfree(bdi);
 856		return NULL;
 857	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 858	return bdi;
 859}
 860EXPORT_SYMBOL(bdi_alloc_node);
 861
 862int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
 863{
 864	struct device *dev;
 
 865
 866	if (bdi->dev)	/* The driver needs to use separate queues per device */
 867		return 0;
 868
 869	dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
 
 870	if (IS_ERR(dev))
 871		return PTR_ERR(dev);
 872
 873	cgwb_bdi_register(bdi);
 874	bdi->dev = dev;
 875
 876	bdi_debug_register(bdi, dev_name(dev));
 877	set_bit(WB_registered, &bdi->wb.state);
 878
 879	spin_lock_bh(&bdi_lock);
 
 
 
 
 
 
 
 880	list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
 
 881	spin_unlock_bh(&bdi_lock);
 882
 883	trace_writeback_bdi_register(bdi);
 884	return 0;
 885}
 886EXPORT_SYMBOL(bdi_register_va);
 887
 888int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
 889{
 890	va_list args;
 891	int ret;
 892
 893	va_start(args, fmt);
 894	ret = bdi_register_va(bdi, fmt, args);
 895	va_end(args);
 896	return ret;
 897}
 898EXPORT_SYMBOL(bdi_register);
 899
 900int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
 901{
 902	int rc;
 903
 904	rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt));
 905	if (rc)
 906		return rc;
 907	/* Leaking owner reference... */
 908	WARN_ON(bdi->owner);
 909	bdi->owner = owner;
 910	get_device(owner);
 911	return 0;
 912}
 913EXPORT_SYMBOL(bdi_register_owner);
 914
 915/*
 916 * Remove bdi from bdi_list, and ensure that it is no longer visible
 917 */
 918static void bdi_remove_from_list(struct backing_dev_info *bdi)
 919{
 920	spin_lock_bh(&bdi_lock);
 
 921	list_del_rcu(&bdi->bdi_list);
 922	spin_unlock_bh(&bdi_lock);
 923
 924	synchronize_rcu_expedited();
 925}
 926
 927void bdi_unregister(struct backing_dev_info *bdi)
 928{
 
 
 929	/* make sure nobody finds us on the bdi_list anymore */
 930	bdi_remove_from_list(bdi);
 931	wb_shutdown(&bdi->wb);
 932	cgwb_bdi_unregister(bdi);
 933
 
 
 
 
 
 
 
 934	if (bdi->dev) {
 935		bdi_debug_unregister(bdi);
 936		device_unregister(bdi->dev);
 937		bdi->dev = NULL;
 938	}
 939
 940	if (bdi->owner) {
 941		put_device(bdi->owner);
 942		bdi->owner = NULL;
 943	}
 944}
 
 945
 946static void release_bdi(struct kref *ref)
 947{
 948	struct backing_dev_info *bdi =
 949			container_of(ref, struct backing_dev_info, refcnt);
 950
 951	if (test_bit(WB_registered, &bdi->wb.state))
 952		bdi_unregister(bdi);
 953	WARN_ON_ONCE(bdi->dev);
 954	wb_exit(&bdi->wb);
 955	cgwb_bdi_exit(bdi);
 956	kfree(bdi);
 957}
 958
 959void bdi_put(struct backing_dev_info *bdi)
 960{
 961	kref_put(&bdi->refcnt, release_bdi);
 962}
 963EXPORT_SYMBOL(bdi_put);
 964
 965static wait_queue_head_t congestion_wqh[2] = {
 966		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
 967		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
 968	};
 969static atomic_t nr_wb_congested[2];
 970
 971void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
 972{
 973	wait_queue_head_t *wqh = &congestion_wqh[sync];
 974	enum wb_congested_state bit;
 975
 976	bit = sync ? WB_sync_congested : WB_async_congested;
 977	if (test_and_clear_bit(bit, &congested->state))
 978		atomic_dec(&nr_wb_congested[sync]);
 979	smp_mb__after_atomic();
 980	if (waitqueue_active(wqh))
 981		wake_up(wqh);
 982}
 983EXPORT_SYMBOL(clear_wb_congested);
 984
 985void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
 986{
 987	enum wb_congested_state bit;
 988
 989	bit = sync ? WB_sync_congested : WB_async_congested;
 990	if (!test_and_set_bit(bit, &congested->state))
 991		atomic_inc(&nr_wb_congested[sync]);
 992}
 993EXPORT_SYMBOL(set_wb_congested);
 994
 995/**
 996 * congestion_wait - wait for a backing_dev to become uncongested
 997 * @sync: SYNC or ASYNC IO
 998 * @timeout: timeout in jiffies
 999 *
1000 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1001 * write congestion.  If no backing_devs are congested then just wait for the
1002 * next write to be completed.
1003 */
1004long congestion_wait(int sync, long timeout)
1005{
1006	long ret;
1007	unsigned long start = jiffies;
1008	DEFINE_WAIT(wait);
1009	wait_queue_head_t *wqh = &congestion_wqh[sync];
1010
1011	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1012	ret = io_schedule_timeout(timeout);
1013	finish_wait(wqh, &wait);
1014
1015	trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1016					jiffies_to_usecs(jiffies - start));
1017
1018	return ret;
1019}
1020EXPORT_SYMBOL(congestion_wait);
1021
1022/**
1023 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1024 * @sync: SYNC or ASYNC IO
1025 * @timeout: timeout in jiffies
1026 *
1027 * In the event of a congested backing_dev (any backing_dev) this waits
1028 * for up to @timeout jiffies for either a BDI to exit congestion of the
1029 * given @sync queue or a write to complete.
1030 *
1031 * The return value is 0 if the sleep is for the full timeout. Otherwise,
1032 * it is the number of jiffies that were still remaining when the function
1033 * returned. return_value == timeout implies the function did not sleep.
1034 */
1035long wait_iff_congested(int sync, long timeout)
1036{
1037	long ret;
1038	unsigned long start = jiffies;
1039	DEFINE_WAIT(wait);
1040	wait_queue_head_t *wqh = &congestion_wqh[sync];
1041
1042	/*
1043	 * If there is no congestion, yield if necessary instead
1044	 * of sleeping on the congestion queue
1045	 */
1046	if (atomic_read(&nr_wb_congested[sync]) == 0) {
1047		cond_resched();
1048
1049		/* In case we scheduled, work out time remaining */
1050		ret = timeout - (jiffies - start);
1051		if (ret < 0)
1052			ret = 0;
1053
1054		goto out;
1055	}
1056
1057	/* Sleep until uncongested or a write happens */
1058	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1059	ret = io_schedule_timeout(timeout);
1060	finish_wait(wqh, &wait);
1061
1062out:
1063	trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1064					jiffies_to_usecs(jiffies - start));
1065
1066	return ret;
1067}
1068EXPORT_SYMBOL(wait_iff_congested);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2
   3#include <linux/blkdev.h>
   4#include <linux/wait.h>
   5#include <linux/rbtree.h>
   6#include <linux/kthread.h>
   7#include <linux/backing-dev.h>
   8#include <linux/blk-cgroup.h>
   9#include <linux/freezer.h>
  10#include <linux/fs.h>
  11#include <linux/pagemap.h>
  12#include <linux/mm.h>
  13#include <linux/sched/mm.h>
  14#include <linux/sched.h>
  15#include <linux/module.h>
  16#include <linux/writeback.h>
  17#include <linux/device.h>
  18#include <trace/events/writeback.h>
  19
  20struct backing_dev_info noop_backing_dev_info;
 
 
 
  21EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  22
  23static struct class *bdi_class;
  24static const char *bdi_unknown_name = "(unknown)";
  25
  26/*
  27 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
  28 * reader side locking.
  29 */
  30DEFINE_SPINLOCK(bdi_lock);
  31static u64 bdi_id_cursor;
  32static struct rb_root bdi_tree = RB_ROOT;
  33LIST_HEAD(bdi_list);
  34
  35/* bdi_wq serves all asynchronous writeback tasks */
  36struct workqueue_struct *bdi_wq;
  37
  38#define K(x) ((x) << (PAGE_SHIFT - 10))
  39
  40#ifdef CONFIG_DEBUG_FS
  41#include <linux/debugfs.h>
  42#include <linux/seq_file.h>
  43
  44static struct dentry *bdi_debug_root;
  45
  46static void bdi_debug_init(void)
  47{
  48	bdi_debug_root = debugfs_create_dir("bdi", NULL);
  49}
  50
  51static int bdi_debug_stats_show(struct seq_file *m, void *v)
  52{
  53	struct backing_dev_info *bdi = m->private;
  54	struct bdi_writeback *wb = &bdi->wb;
  55	unsigned long background_thresh;
  56	unsigned long dirty_thresh;
  57	unsigned long wb_thresh;
  58	unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
  59	struct inode *inode;
  60
  61	nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
  62	spin_lock(&wb->list_lock);
  63	list_for_each_entry(inode, &wb->b_dirty, i_io_list)
  64		nr_dirty++;
  65	list_for_each_entry(inode, &wb->b_io, i_io_list)
  66		nr_io++;
  67	list_for_each_entry(inode, &wb->b_more_io, i_io_list)
  68		nr_more_io++;
  69	list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
  70		if (inode->i_state & I_DIRTY_TIME)
  71			nr_dirty_time++;
  72	spin_unlock(&wb->list_lock);
  73
  74	global_dirty_limits(&background_thresh, &dirty_thresh);
  75	wb_thresh = wb_calc_thresh(wb, dirty_thresh);
  76
 
  77	seq_printf(m,
  78		   "BdiWriteback:       %10lu kB\n"
  79		   "BdiReclaimable:     %10lu kB\n"
  80		   "BdiDirtyThresh:     %10lu kB\n"
  81		   "DirtyThresh:        %10lu kB\n"
  82		   "BackgroundThresh:   %10lu kB\n"
  83		   "BdiDirtied:         %10lu kB\n"
  84		   "BdiWritten:         %10lu kB\n"
  85		   "BdiWriteBandwidth:  %10lu kBps\n"
  86		   "b_dirty:            %10lu\n"
  87		   "b_io:               %10lu\n"
  88		   "b_more_io:          %10lu\n"
  89		   "b_dirty_time:       %10lu\n"
  90		   "bdi_list:           %10u\n"
  91		   "state:              %10lx\n",
  92		   (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
  93		   (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
  94		   K(wb_thresh),
  95		   K(dirty_thresh),
  96		   K(background_thresh),
  97		   (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
  98		   (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
  99		   (unsigned long) K(wb->write_bandwidth),
 100		   nr_dirty,
 101		   nr_io,
 102		   nr_more_io,
 103		   nr_dirty_time,
 104		   !list_empty(&bdi->bdi_list), bdi->wb.state);
 
 105
 106	return 0;
 107}
 108DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
 109
 110static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 111{
 
 
 
 112	bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
 
 
 113
 114	debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
 115			    &bdi_debug_stats_fops);
 
 
 
 
 
 
 
 116}
 117
 118static void bdi_debug_unregister(struct backing_dev_info *bdi)
 119{
 120	debugfs_remove_recursive(bdi->debug_dir);
 
 121}
 122#else
 123static inline void bdi_debug_init(void)
 124{
 125}
 126static inline void bdi_debug_register(struct backing_dev_info *bdi,
 127				      const char *name)
 128{
 
 129}
 130static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 131{
 132}
 133#endif
 134
 135static ssize_t read_ahead_kb_store(struct device *dev,
 136				  struct device_attribute *attr,
 137				  const char *buf, size_t count)
 138{
 139	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 140	unsigned long read_ahead_kb;
 141	ssize_t ret;
 142
 143	ret = kstrtoul(buf, 10, &read_ahead_kb);
 144	if (ret < 0)
 145		return ret;
 146
 147	bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
 148
 149	return count;
 150}
 151
 
 
 152#define BDI_SHOW(name, expr)						\
 153static ssize_t name##_show(struct device *dev,				\
 154			   struct device_attribute *attr, char *buf)	\
 155{									\
 156	struct backing_dev_info *bdi = dev_get_drvdata(dev);		\
 157									\
 158	return sysfs_emit(buf, "%lld\n", (long long)expr);		\
 159}									\
 160static DEVICE_ATTR_RW(name);
 161
 162BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
 163
 164static ssize_t min_ratio_store(struct device *dev,
 165		struct device_attribute *attr, const char *buf, size_t count)
 166{
 167	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 168	unsigned int ratio;
 169	ssize_t ret;
 170
 171	ret = kstrtouint(buf, 10, &ratio);
 172	if (ret < 0)
 173		return ret;
 174
 175	ret = bdi_set_min_ratio(bdi, ratio);
 176	if (!ret)
 177		ret = count;
 178
 179	return ret;
 180}
 181BDI_SHOW(min_ratio, bdi->min_ratio / BDI_RATIO_SCALE)
 182
 183static ssize_t min_ratio_fine_store(struct device *dev,
 184		struct device_attribute *attr, const char *buf, size_t count)
 185{
 186	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 187	unsigned int ratio;
 188	ssize_t ret;
 189
 190	ret = kstrtouint(buf, 10, &ratio);
 191	if (ret < 0)
 192		return ret;
 193
 194	ret = bdi_set_min_ratio_no_scale(bdi, ratio);
 195	if (!ret)
 196		ret = count;
 197
 198	return ret;
 199}
 200BDI_SHOW(min_ratio_fine, bdi->min_ratio)
 201
 202static ssize_t max_ratio_store(struct device *dev,
 203		struct device_attribute *attr, const char *buf, size_t count)
 204{
 205	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 206	unsigned int ratio;
 207	ssize_t ret;
 208
 209	ret = kstrtouint(buf, 10, &ratio);
 210	if (ret < 0)
 211		return ret;
 212
 213	ret = bdi_set_max_ratio(bdi, ratio);
 214	if (!ret)
 215		ret = count;
 216
 217	return ret;
 218}
 219BDI_SHOW(max_ratio, bdi->max_ratio / BDI_RATIO_SCALE)
 220
 221static ssize_t max_ratio_fine_store(struct device *dev,
 222		struct device_attribute *attr, const char *buf, size_t count)
 223{
 224	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 225	unsigned int ratio;
 226	ssize_t ret;
 227
 228	ret = kstrtouint(buf, 10, &ratio);
 229	if (ret < 0)
 230		return ret;
 231
 232	ret = bdi_set_max_ratio_no_scale(bdi, ratio);
 233	if (!ret)
 234		ret = count;
 235
 236	return ret;
 237}
 238BDI_SHOW(max_ratio_fine, bdi->max_ratio)
 239
 240static ssize_t min_bytes_show(struct device *dev,
 241			      struct device_attribute *attr,
 242			      char *buf)
 243{
 244	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 245
 246	return sysfs_emit(buf, "%llu\n", bdi_get_min_bytes(bdi));
 247}
 248
 249static ssize_t min_bytes_store(struct device *dev,
 250		struct device_attribute *attr, const char *buf, size_t count)
 251{
 252	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 253	u64 bytes;
 254	ssize_t ret;
 255
 256	ret = kstrtoull(buf, 10, &bytes);
 257	if (ret < 0)
 258		return ret;
 259
 260	ret = bdi_set_min_bytes(bdi, bytes);
 261	if (!ret)
 262		ret = count;
 263
 264	return ret;
 265}
 266DEVICE_ATTR_RW(min_bytes);
 267
 268static ssize_t max_bytes_show(struct device *dev,
 269			      struct device_attribute *attr,
 270			      char *buf)
 271{
 272	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 273
 274	return sysfs_emit(buf, "%llu\n", bdi_get_max_bytes(bdi));
 275}
 276
 277static ssize_t max_bytes_store(struct device *dev,
 278		struct device_attribute *attr, const char *buf, size_t count)
 279{
 280	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 281	u64 bytes;
 282	ssize_t ret;
 283
 284	ret = kstrtoull(buf, 10, &bytes);
 285	if (ret < 0)
 286		return ret;
 287
 288	ret = bdi_set_max_bytes(bdi, bytes);
 289	if (!ret)
 290		ret = count;
 291
 292	return ret;
 293}
 294DEVICE_ATTR_RW(max_bytes);
 295
 296static ssize_t stable_pages_required_show(struct device *dev,
 297					  struct device_attribute *attr,
 298					  char *buf)
 299{
 300	dev_warn_once(dev,
 301		"the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n");
 302	return sysfs_emit(buf, "%d\n", 0);
 303}
 304static DEVICE_ATTR_RO(stable_pages_required);
 305
 306static ssize_t strict_limit_store(struct device *dev,
 307		struct device_attribute *attr, const char *buf, size_t count)
 308{
 309	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 310	unsigned int strict_limit;
 311	ssize_t ret;
 312
 313	ret = kstrtouint(buf, 10, &strict_limit);
 314	if (ret < 0)
 315		return ret;
 316
 317	ret = bdi_set_strict_limit(bdi, strict_limit);
 318	if (!ret)
 319		ret = count;
 320
 321	return ret;
 322}
 323
 324static ssize_t strict_limit_show(struct device *dev,
 325		struct device_attribute *attr, char *buf)
 326{
 327	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 328
 329	return sysfs_emit(buf, "%d\n",
 330			!!(bdi->capabilities & BDI_CAP_STRICTLIMIT));
 331}
 332static DEVICE_ATTR_RW(strict_limit);
 333
 334static struct attribute *bdi_dev_attrs[] = {
 335	&dev_attr_read_ahead_kb.attr,
 336	&dev_attr_min_ratio.attr,
 337	&dev_attr_min_ratio_fine.attr,
 338	&dev_attr_max_ratio.attr,
 339	&dev_attr_max_ratio_fine.attr,
 340	&dev_attr_min_bytes.attr,
 341	&dev_attr_max_bytes.attr,
 342	&dev_attr_stable_pages_required.attr,
 343	&dev_attr_strict_limit.attr,
 344	NULL,
 345};
 346ATTRIBUTE_GROUPS(bdi_dev);
 347
 348static __init int bdi_class_init(void)
 349{
 350	bdi_class = class_create(THIS_MODULE, "bdi");
 351	if (IS_ERR(bdi_class))
 352		return PTR_ERR(bdi_class);
 353
 354	bdi_class->dev_groups = bdi_dev_groups;
 355	bdi_debug_init();
 356
 357	return 0;
 358}
 359postcore_initcall(bdi_class_init);
 360
 
 
 361static int __init default_bdi_init(void)
 362{
 363	bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
 364				 WQ_SYSFS, 0);
 
 
 365	if (!bdi_wq)
 366		return -ENOMEM;
 367	return 0;
 
 
 
 368}
 369subsys_initcall(default_bdi_init);
 370
 371/*
 372 * This function is used when the first inode for this wb is marked dirty. It
 373 * wakes-up the corresponding bdi thread which should then take care of the
 374 * periodic background write-out of dirty inodes. Since the write-out would
 375 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
 376 * set up a timer which wakes the bdi thread up later.
 377 *
 378 * Note, we wouldn't bother setting up the timer, but this function is on the
 379 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
 380 * by delaying the wake-up.
 381 *
 382 * We have to be careful not to postpone flush work if it is scheduled for
 383 * earlier. Thus we use queue_delayed_work().
 384 */
 385void wb_wakeup_delayed(struct bdi_writeback *wb)
 386{
 387	unsigned long timeout;
 388
 389	timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
 390	spin_lock_irq(&wb->work_lock);
 391	if (test_bit(WB_registered, &wb->state))
 392		queue_delayed_work(bdi_wq, &wb->dwork, timeout);
 393	spin_unlock_irq(&wb->work_lock);
 394}
 395
 396static void wb_update_bandwidth_workfn(struct work_struct *work)
 397{
 398	struct bdi_writeback *wb = container_of(to_delayed_work(work),
 399						struct bdi_writeback, bw_dwork);
 400
 401	wb_update_bandwidth(wb);
 402}
 403
 404/*
 405 * Initial write bandwidth: 100 MB/s
 406 */
 407#define INIT_BW		(100 << (20 - PAGE_SHIFT))
 408
 409static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
 410		   gfp_t gfp)
 411{
 412	int i, err;
 413
 414	memset(wb, 0, sizeof(*wb));
 415
 
 
 416	wb->bdi = bdi;
 417	wb->last_old_flush = jiffies;
 418	INIT_LIST_HEAD(&wb->b_dirty);
 419	INIT_LIST_HEAD(&wb->b_io);
 420	INIT_LIST_HEAD(&wb->b_more_io);
 421	INIT_LIST_HEAD(&wb->b_dirty_time);
 422	spin_lock_init(&wb->list_lock);
 423
 424	atomic_set(&wb->writeback_inodes, 0);
 425	wb->bw_time_stamp = jiffies;
 426	wb->balanced_dirty_ratelimit = INIT_BW;
 427	wb->dirty_ratelimit = INIT_BW;
 428	wb->write_bandwidth = INIT_BW;
 429	wb->avg_write_bandwidth = INIT_BW;
 430
 431	spin_lock_init(&wb->work_lock);
 432	INIT_LIST_HEAD(&wb->work_list);
 433	INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
 434	INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn);
 435	wb->dirty_sleep = jiffies;
 436
 
 
 
 
 
 
 437	err = fprop_local_init_percpu(&wb->completions, gfp);
 438	if (err)
 439		return err;
 440
 441	for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
 442		err = percpu_counter_init(&wb->stat[i], 0, gfp);
 443		if (err)
 444			goto out_destroy_stat;
 445	}
 446
 447	return 0;
 448
 449out_destroy_stat:
 450	while (i--)
 451		percpu_counter_destroy(&wb->stat[i]);
 452	fprop_local_destroy_percpu(&wb->completions);
 
 
 
 
 
 453	return err;
 454}
 455
 456static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
 457
 458/*
 459 * Remove bdi from the global list and shutdown any threads we have running
 460 */
 461static void wb_shutdown(struct bdi_writeback *wb)
 462{
 463	/* Make sure nobody queues further work */
 464	spin_lock_irq(&wb->work_lock);
 465	if (!test_and_clear_bit(WB_registered, &wb->state)) {
 466		spin_unlock_irq(&wb->work_lock);
 
 
 
 
 
 
 467		return;
 468	}
 469	spin_unlock_irq(&wb->work_lock);
 
 470
 471	cgwb_remove_from_bdi_list(wb);
 472	/*
 473	 * Drain work list and shutdown the delayed_work.  !WB_registered
 474	 * tells wb_workfn() that @wb is dying and its work_list needs to
 475	 * be drained no matter what.
 476	 */
 477	mod_delayed_work(bdi_wq, &wb->dwork, 0);
 478	flush_delayed_work(&wb->dwork);
 479	WARN_ON(!list_empty(&wb->work_list));
 480	flush_delayed_work(&wb->bw_dwork);
 
 
 
 
 
 481}
 482
 483static void wb_exit(struct bdi_writeback *wb)
 484{
 485	int i;
 486
 487	WARN_ON(delayed_work_pending(&wb->dwork));
 488
 489	for (i = 0; i < NR_WB_STAT_ITEMS; i++)
 490		percpu_counter_destroy(&wb->stat[i]);
 491
 492	fprop_local_destroy_percpu(&wb->completions);
 
 
 
 493}
 494
 495#ifdef CONFIG_CGROUP_WRITEBACK
 496
 497#include <linux/memcontrol.h>
 498
 499/*
 500 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and
 501 * memcg->cgwb_list.  bdi->cgwb_tree is also RCU protected.
 
 502 */
 503static DEFINE_SPINLOCK(cgwb_lock);
 504static struct workqueue_struct *cgwb_release_wq;
 505
 506static LIST_HEAD(offline_cgwbs);
 507static void cleanup_offline_cgwbs_workfn(struct work_struct *work);
 508static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 509
 510static void cgwb_release_workfn(struct work_struct *work)
 511{
 512	struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
 513						release_work);
 514	struct backing_dev_info *bdi = wb->bdi;
 515
 516	mutex_lock(&wb->bdi->cgwb_release_mutex);
 517	wb_shutdown(wb);
 518
 519	css_put(wb->memcg_css);
 520	css_put(wb->blkcg_css);
 521	mutex_unlock(&wb->bdi->cgwb_release_mutex);
 522
 523	/* triggers blkg destruction if no online users left */
 524	blkcg_unpin_online(wb->blkcg_css);
 525
 526	fprop_local_destroy_percpu(&wb->memcg_completions);
 527
 528	spin_lock_irq(&cgwb_lock);
 529	list_del(&wb->offline_node);
 530	spin_unlock_irq(&cgwb_lock);
 531
 532	percpu_ref_exit(&wb->refcnt);
 533	wb_exit(wb);
 534	bdi_put(bdi);
 535	WARN_ON_ONCE(!list_empty(&wb->b_attached));
 536	kfree_rcu(wb, rcu);
 537}
 538
 539static void cgwb_release(struct percpu_ref *refcnt)
 540{
 541	struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
 542						refcnt);
 543	queue_work(cgwb_release_wq, &wb->release_work);
 544}
 545
 546static void cgwb_kill(struct bdi_writeback *wb)
 547{
 548	lockdep_assert_held(&cgwb_lock);
 549
 550	WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
 551	list_del(&wb->memcg_node);
 552	list_del(&wb->blkcg_node);
 553	list_add(&wb->offline_node, &offline_cgwbs);
 554	percpu_ref_kill(&wb->refcnt);
 555}
 556
 557static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 558{
 559	spin_lock_irq(&cgwb_lock);
 560	list_del_rcu(&wb->bdi_node);
 561	spin_unlock_irq(&cgwb_lock);
 562}
 563
 564static int cgwb_create(struct backing_dev_info *bdi,
 565		       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
 566{
 567	struct mem_cgroup *memcg;
 568	struct cgroup_subsys_state *blkcg_css;
 
 569	struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
 570	struct bdi_writeback *wb;
 571	unsigned long flags;
 572	int ret = 0;
 573
 574	memcg = mem_cgroup_from_css(memcg_css);
 575	blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 576	memcg_cgwb_list = &memcg->cgwb_list;
 577	blkcg_cgwb_list = blkcg_get_cgwb_list(blkcg_css);
 
 578
 579	/* look up again under lock and discard on blkcg mismatch */
 580	spin_lock_irqsave(&cgwb_lock, flags);
 581	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 582	if (wb && wb->blkcg_css != blkcg_css) {
 583		cgwb_kill(wb);
 584		wb = NULL;
 585	}
 586	spin_unlock_irqrestore(&cgwb_lock, flags);
 587	if (wb)
 588		goto out_put;
 589
 590	/* need to create a new one */
 591	wb = kmalloc(sizeof(*wb), gfp);
 592	if (!wb) {
 593		ret = -ENOMEM;
 594		goto out_put;
 595	}
 596
 597	ret = wb_init(wb, bdi, gfp);
 598	if (ret)
 599		goto err_free;
 600
 601	ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
 602	if (ret)
 603		goto err_wb_exit;
 604
 605	ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
 606	if (ret)
 607		goto err_ref_exit;
 608
 609	wb->memcg_css = memcg_css;
 610	wb->blkcg_css = blkcg_css;
 611	INIT_LIST_HEAD(&wb->b_attached);
 612	INIT_WORK(&wb->release_work, cgwb_release_workfn);
 613	set_bit(WB_registered, &wb->state);
 614	bdi_get(bdi);
 615
 616	/*
 617	 * The root wb determines the registered state of the whole bdi and
 618	 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
 619	 * whether they're still online.  Don't link @wb if any is dead.
 620	 * See wb_memcg_offline() and wb_blkcg_offline().
 621	 */
 622	ret = -ENODEV;
 623	spin_lock_irqsave(&cgwb_lock, flags);
 624	if (test_bit(WB_registered, &bdi->wb.state) &&
 625	    blkcg_cgwb_list->next && memcg_cgwb_list->next) {
 626		/* we might have raced another instance of this function */
 627		ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
 628		if (!ret) {
 629			list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
 630			list_add(&wb->memcg_node, memcg_cgwb_list);
 631			list_add(&wb->blkcg_node, blkcg_cgwb_list);
 632			blkcg_pin_online(blkcg_css);
 633			css_get(memcg_css);
 634			css_get(blkcg_css);
 635		}
 636	}
 637	spin_unlock_irqrestore(&cgwb_lock, flags);
 638	if (ret) {
 639		if (ret == -EEXIST)
 640			ret = 0;
 641		goto err_fprop_exit;
 642	}
 643	goto out_put;
 644
 645err_fprop_exit:
 646	bdi_put(bdi);
 647	fprop_local_destroy_percpu(&wb->memcg_completions);
 648err_ref_exit:
 649	percpu_ref_exit(&wb->refcnt);
 650err_wb_exit:
 651	wb_exit(wb);
 652err_free:
 653	kfree(wb);
 654out_put:
 655	css_put(blkcg_css);
 656	return ret;
 657}
 658
 659/**
 660 * wb_get_lookup - get wb for a given memcg
 661 * @bdi: target bdi
 662 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 
 663 *
 664 * Try to get the wb for @memcg_css on @bdi.  The returned wb has its
 665 * refcount incremented.
 666 *
 667 * This function uses css_get() on @memcg_css and thus expects its refcnt
 668 * to be positive on invocation.  IOW, rcu_read_lock() protection on
 669 * @memcg_css isn't enough.  try_get it before calling this function.
 670 *
 671 * A wb is keyed by its associated memcg.  As blkcg implicitly enables
 672 * memcg on the default hierarchy, memcg association is guaranteed to be
 673 * more specific (equal or descendant to the associated blkcg) and thus can
 674 * identify both the memcg and blkcg associations.
 675 *
 676 * Because the blkcg associated with a memcg may change as blkcg is enabled
 677 * and disabled closer to root in the hierarchy, each wb keeps track of
 678 * both the memcg and blkcg associated with it and verifies the blkcg on
 679 * each lookup.  On mismatch, the existing wb is discarded and a new one is
 680 * created.
 681 */
 682struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
 683				    struct cgroup_subsys_state *memcg_css)
 684{
 685	struct bdi_writeback *wb;
 686
 687	if (!memcg_css->parent)
 688		return &bdi->wb;
 689
 690	rcu_read_lock();
 691	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 692	if (wb) {
 693		struct cgroup_subsys_state *blkcg_css;
 694
 695		/* see whether the blkcg association has changed */
 696		blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 697		if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
 698			wb = NULL;
 699		css_put(blkcg_css);
 700	}
 701	rcu_read_unlock();
 702
 703	return wb;
 704}
 705
 706/**
 707 * wb_get_create - get wb for a given memcg, create if necessary
 708 * @bdi: target bdi
 709 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 710 * @gfp: allocation mask to use
 711 *
 712 * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
 713 * create one.  See wb_get_lookup() for more details.
 714 */
 715struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
 716				    struct cgroup_subsys_state *memcg_css,
 717				    gfp_t gfp)
 718{
 719	struct bdi_writeback *wb;
 720
 721	might_alloc(gfp);
 722
 723	if (!memcg_css->parent)
 724		return &bdi->wb;
 725
 726	do {
 727		wb = wb_get_lookup(bdi, memcg_css);
 
 
 
 
 
 
 
 
 
 
 
 
 
 728	} while (!wb && !cgwb_create(bdi, memcg_css, gfp));
 729
 730	return wb;
 731}
 732
 733static int cgwb_bdi_init(struct backing_dev_info *bdi)
 734{
 735	int ret;
 736
 737	INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
 738	mutex_init(&bdi->cgwb_release_mutex);
 739	init_rwsem(&bdi->wb_switch_rwsem);
 740
 741	ret = wb_init(&bdi->wb, bdi, GFP_KERNEL);
 742	if (!ret) {
 743		bdi->wb.memcg_css = &root_mem_cgroup->css;
 744		bdi->wb.blkcg_css = blkcg_root_css;
 745	}
 746	return ret;
 747}
 748
 749static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
 750{
 751	struct radix_tree_iter iter;
 752	void **slot;
 753	struct bdi_writeback *wb;
 754
 755	WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 756
 757	spin_lock_irq(&cgwb_lock);
 758	radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
 759		cgwb_kill(*slot);
 760	spin_unlock_irq(&cgwb_lock);
 761
 762	mutex_lock(&bdi->cgwb_release_mutex);
 763	spin_lock_irq(&cgwb_lock);
 764	while (!list_empty(&bdi->wb_list)) {
 765		wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
 766				      bdi_node);
 767		spin_unlock_irq(&cgwb_lock);
 768		wb_shutdown(wb);
 769		spin_lock_irq(&cgwb_lock);
 770	}
 771	spin_unlock_irq(&cgwb_lock);
 772	mutex_unlock(&bdi->cgwb_release_mutex);
 773}
 774
 775/*
 776 * cleanup_offline_cgwbs_workfn - try to release dying cgwbs
 777 *
 778 * Try to release dying cgwbs by switching attached inodes to the nearest
 779 * living ancestor's writeback. Processed wbs are placed at the end
 780 * of the list to guarantee the forward progress.
 781 */
 782static void cleanup_offline_cgwbs_workfn(struct work_struct *work)
 783{
 784	struct bdi_writeback *wb;
 785	LIST_HEAD(processed);
 786
 787	spin_lock_irq(&cgwb_lock);
 788
 789	while (!list_empty(&offline_cgwbs)) {
 790		wb = list_first_entry(&offline_cgwbs, struct bdi_writeback,
 791				      offline_node);
 792		list_move(&wb->offline_node, &processed);
 793
 794		/*
 795		 * If wb is dirty, cleaning up the writeback by switching
 796		 * attached inodes will result in an effective removal of any
 797		 * bandwidth restrictions, which isn't the goal.  Instead,
 798		 * it can be postponed until the next time, when all io
 799		 * will be likely completed.  If in the meantime some inodes
 800		 * will get re-dirtied, they should be eventually switched to
 801		 * a new cgwb.
 802		 */
 803		if (wb_has_dirty_io(wb))
 804			continue;
 805
 806		if (!wb_tryget(wb))
 807			continue;
 808
 809		spin_unlock_irq(&cgwb_lock);
 810		while (cleanup_offline_cgwb(wb))
 811			cond_resched();
 812		spin_lock_irq(&cgwb_lock);
 813
 814		wb_put(wb);
 815	}
 816
 817	if (!list_empty(&processed))
 818		list_splice_tail(&processed, &offline_cgwbs);
 819
 820	spin_unlock_irq(&cgwb_lock);
 821}
 822
 823/**
 824 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
 825 * @memcg: memcg being offlined
 826 *
 827 * Also prevents creation of any new wb's associated with @memcg.
 828 */
 829void wb_memcg_offline(struct mem_cgroup *memcg)
 830{
 831	struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
 832	struct bdi_writeback *wb, *next;
 833
 834	spin_lock_irq(&cgwb_lock);
 835	list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
 836		cgwb_kill(wb);
 837	memcg_cgwb_list->next = NULL;	/* prevent new wb's */
 838	spin_unlock_irq(&cgwb_lock);
 839
 840	queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work);
 841}
 842
 843/**
 844 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
 845 * @css: blkcg being offlined
 846 *
 847 * Also prevents creation of any new wb's associated with @blkcg.
 848 */
 849void wb_blkcg_offline(struct cgroup_subsys_state *css)
 850{
 851	struct bdi_writeback *wb, *next;
 852	struct list_head *list = blkcg_get_cgwb_list(css);
 853
 854	spin_lock_irq(&cgwb_lock);
 855	list_for_each_entry_safe(wb, next, list, blkcg_node)
 856		cgwb_kill(wb);
 857	list->next = NULL;	/* prevent new wb's */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 858	spin_unlock_irq(&cgwb_lock);
 859}
 860
 861static void cgwb_bdi_register(struct backing_dev_info *bdi)
 862{
 863	spin_lock_irq(&cgwb_lock);
 864	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 865	spin_unlock_irq(&cgwb_lock);
 866}
 867
 868static int __init cgwb_init(void)
 
 
 869{
 870	/*
 871	 * There can be many concurrent release work items overwhelming
 872	 * system_wq.  Put them in a separate wq and limit concurrency.
 873	 * There's no point in executing many of these in parallel.
 874	 */
 875	cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
 876	if (!cgwb_release_wq)
 877		return -ENOMEM;
 878
 
 
 
 
 
 
 
 879	return 0;
 880}
 881subsys_initcall(cgwb_init);
 882
 883#else	/* CONFIG_CGROUP_WRITEBACK */
 884
 885static int cgwb_bdi_init(struct backing_dev_info *bdi)
 886{
 887	return wb_init(&bdi->wb, bdi, GFP_KERNEL);
 888}
 889
 890static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
 891
 892static void cgwb_bdi_register(struct backing_dev_info *bdi)
 893{
 894	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 895}
 896
 897static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 898{
 899	list_del_rcu(&wb->bdi_node);
 900}
 901
 902#endif	/* CONFIG_CGROUP_WRITEBACK */
 903
 904int bdi_init(struct backing_dev_info *bdi)
 905{
 
 
 906	bdi->dev = NULL;
 907
 908	kref_init(&bdi->refcnt);
 909	bdi->min_ratio = 0;
 910	bdi->max_ratio = 100 * BDI_RATIO_SCALE;
 911	bdi->max_prop_frac = FPROP_FRAC_BASE;
 912	INIT_LIST_HEAD(&bdi->bdi_list);
 913	INIT_LIST_HEAD(&bdi->wb_list);
 914	init_waitqueue_head(&bdi->wb_waitq);
 915
 916	return cgwb_bdi_init(bdi);
 
 
 917}
 918
 919struct backing_dev_info *bdi_alloc(int node_id)
 920{
 921	struct backing_dev_info *bdi;
 922
 923	bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id);
 
 924	if (!bdi)
 925		return NULL;
 926
 927	if (bdi_init(bdi)) {
 928		kfree(bdi);
 929		return NULL;
 930	}
 931	bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT;
 932	bdi->ra_pages = VM_READAHEAD_PAGES;
 933	bdi->io_pages = VM_READAHEAD_PAGES;
 934	timer_setup(&bdi->laptop_mode_wb_timer, laptop_mode_timer_fn, 0);
 935	return bdi;
 936}
 937EXPORT_SYMBOL(bdi_alloc);
 938
 939static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
 940{
 941	struct rb_node **p = &bdi_tree.rb_node;
 942	struct rb_node *parent = NULL;
 943	struct backing_dev_info *bdi;
 944
 945	lockdep_assert_held(&bdi_lock);
 946
 947	while (*p) {
 948		parent = *p;
 949		bdi = rb_entry(parent, struct backing_dev_info, rb_node);
 950
 951		if (bdi->id > id)
 952			p = &(*p)->rb_left;
 953		else if (bdi->id < id)
 954			p = &(*p)->rb_right;
 955		else
 956			break;
 957	}
 958
 959	if (parentp)
 960		*parentp = parent;
 961	return p;
 962}
 963
 964/**
 965 * bdi_get_by_id - lookup and get bdi from its id
 966 * @id: bdi id to lookup
 967 *
 968 * Find bdi matching @id and get it.  Returns NULL if the matching bdi
 969 * doesn't exist or is already unregistered.
 970 */
 971struct backing_dev_info *bdi_get_by_id(u64 id)
 972{
 973	struct backing_dev_info *bdi = NULL;
 974	struct rb_node **p;
 975
 976	spin_lock_bh(&bdi_lock);
 977	p = bdi_lookup_rb_node(id, NULL);
 978	if (*p) {
 979		bdi = rb_entry(*p, struct backing_dev_info, rb_node);
 980		bdi_get(bdi);
 981	}
 982	spin_unlock_bh(&bdi_lock);
 983
 984	return bdi;
 985}
 
 986
 987int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
 988{
 989	struct device *dev;
 990	struct rb_node *parent, **p;
 991
 992	if (bdi->dev)	/* The driver needs to use separate queues per device */
 993		return 0;
 994
 995	vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
 996	dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
 997	if (IS_ERR(dev))
 998		return PTR_ERR(dev);
 999
1000	cgwb_bdi_register(bdi);
1001	bdi->dev = dev;
1002
1003	bdi_debug_register(bdi, dev_name(dev));
1004	set_bit(WB_registered, &bdi->wb.state);
1005
1006	spin_lock_bh(&bdi_lock);
1007
1008	bdi->id = ++bdi_id_cursor;
1009
1010	p = bdi_lookup_rb_node(bdi->id, &parent);
1011	rb_link_node(&bdi->rb_node, parent, p);
1012	rb_insert_color(&bdi->rb_node, &bdi_tree);
1013
1014	list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
1015
1016	spin_unlock_bh(&bdi_lock);
1017
1018	trace_writeback_bdi_register(bdi);
1019	return 0;
1020}
 
1021
1022int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
1023{
1024	va_list args;
1025	int ret;
1026
1027	va_start(args, fmt);
1028	ret = bdi_register_va(bdi, fmt, args);
1029	va_end(args);
1030	return ret;
1031}
1032EXPORT_SYMBOL(bdi_register);
1033
1034void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner)
1035{
1036	WARN_ON_ONCE(bdi->owner);
 
 
 
 
 
 
1037	bdi->owner = owner;
1038	get_device(owner);
 
1039}
 
1040
1041/*
1042 * Remove bdi from bdi_list, and ensure that it is no longer visible
1043 */
1044static void bdi_remove_from_list(struct backing_dev_info *bdi)
1045{
1046	spin_lock_bh(&bdi_lock);
1047	rb_erase(&bdi->rb_node, &bdi_tree);
1048	list_del_rcu(&bdi->bdi_list);
1049	spin_unlock_bh(&bdi_lock);
1050
1051	synchronize_rcu_expedited();
1052}
1053
1054void bdi_unregister(struct backing_dev_info *bdi)
1055{
1056	del_timer_sync(&bdi->laptop_mode_wb_timer);
1057
1058	/* make sure nobody finds us on the bdi_list anymore */
1059	bdi_remove_from_list(bdi);
1060	wb_shutdown(&bdi->wb);
1061	cgwb_bdi_unregister(bdi);
1062
1063	/*
1064	 * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
1065	 * update the global bdi_min_ratio.
1066	 */
1067	if (bdi->min_ratio)
1068		bdi_set_min_ratio(bdi, 0);
1069
1070	if (bdi->dev) {
1071		bdi_debug_unregister(bdi);
1072		device_unregister(bdi->dev);
1073		bdi->dev = NULL;
1074	}
1075
1076	if (bdi->owner) {
1077		put_device(bdi->owner);
1078		bdi->owner = NULL;
1079	}
1080}
1081EXPORT_SYMBOL(bdi_unregister);
1082
1083static void release_bdi(struct kref *ref)
1084{
1085	struct backing_dev_info *bdi =
1086			container_of(ref, struct backing_dev_info, refcnt);
1087
1088	WARN_ON_ONCE(test_bit(WB_registered, &bdi->wb.state));
 
1089	WARN_ON_ONCE(bdi->dev);
1090	wb_exit(&bdi->wb);
 
1091	kfree(bdi);
1092}
1093
1094void bdi_put(struct backing_dev_info *bdi)
1095{
1096	kref_put(&bdi->refcnt, release_bdi);
1097}
1098EXPORT_SYMBOL(bdi_put);
1099
1100struct backing_dev_info *inode_to_bdi(struct inode *inode)
 
 
 
 
 
 
1101{
1102	struct super_block *sb;
 
1103
1104	if (!inode)
1105		return &noop_backing_dev_info;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1106
1107	sb = inode->i_sb;
1108#ifdef CONFIG_BLOCK
1109	if (sb_is_blkdev_sb(sb))
1110		return I_BDEV(inode)->bd_disk->bdi;
1111#endif
1112	return sb->s_bdi;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1113}
1114EXPORT_SYMBOL(inode_to_bdi);
1115
1116const char *bdi_dev_name(struct backing_dev_info *bdi)
 
 
 
 
 
 
 
 
 
 
 
 
 
1117{
1118	if (!bdi || !bdi->dev)
1119		return bdi_unknown_name;
1120	return bdi->dev_name;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1121}
1122EXPORT_SYMBOL_GPL(bdi_dev_name);