Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/wait.h>
4#include <linux/rbtree.h>
5#include <linux/backing-dev.h>
6#include <linux/kthread.h>
7#include <linux/freezer.h>
8#include <linux/fs.h>
9#include <linux/pagemap.h>
10#include <linux/mm.h>
11#include <linux/sched.h>
12#include <linux/module.h>
13#include <linux/writeback.h>
14#include <linux/device.h>
15#include <trace/events/writeback.h>
16
17struct backing_dev_info noop_backing_dev_info = {
18 .name = "noop",
19 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
20};
21EXPORT_SYMBOL_GPL(noop_backing_dev_info);
22
23static struct class *bdi_class;
24
25/*
26 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
27 * reader side locking.
28 */
29DEFINE_SPINLOCK(bdi_lock);
30static u64 bdi_id_cursor;
31static struct rb_root bdi_tree = RB_ROOT;
32LIST_HEAD(bdi_list);
33
34/* bdi_wq serves all asynchronous writeback tasks */
35struct workqueue_struct *bdi_wq;
36
37#ifdef CONFIG_DEBUG_FS
38#include <linux/debugfs.h>
39#include <linux/seq_file.h>
40
41static struct dentry *bdi_debug_root;
42
43static void bdi_debug_init(void)
44{
45 bdi_debug_root = debugfs_create_dir("bdi", NULL);
46}
47
48static int bdi_debug_stats_show(struct seq_file *m, void *v)
49{
50 struct backing_dev_info *bdi = m->private;
51 struct bdi_writeback *wb = &bdi->wb;
52 unsigned long background_thresh;
53 unsigned long dirty_thresh;
54 unsigned long wb_thresh;
55 unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
56 struct inode *inode;
57
58 nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
59 spin_lock(&wb->list_lock);
60 list_for_each_entry(inode, &wb->b_dirty, i_io_list)
61 nr_dirty++;
62 list_for_each_entry(inode, &wb->b_io, i_io_list)
63 nr_io++;
64 list_for_each_entry(inode, &wb->b_more_io, i_io_list)
65 nr_more_io++;
66 list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
67 if (inode->i_state & I_DIRTY_TIME)
68 nr_dirty_time++;
69 spin_unlock(&wb->list_lock);
70
71 global_dirty_limits(&background_thresh, &dirty_thresh);
72 wb_thresh = wb_calc_thresh(wb, dirty_thresh);
73
74#define K(x) ((x) << (PAGE_SHIFT - 10))
75 seq_printf(m,
76 "BdiWriteback: %10lu kB\n"
77 "BdiReclaimable: %10lu kB\n"
78 "BdiDirtyThresh: %10lu kB\n"
79 "DirtyThresh: %10lu kB\n"
80 "BackgroundThresh: %10lu kB\n"
81 "BdiDirtied: %10lu kB\n"
82 "BdiWritten: %10lu kB\n"
83 "BdiWriteBandwidth: %10lu kBps\n"
84 "b_dirty: %10lu\n"
85 "b_io: %10lu\n"
86 "b_more_io: %10lu\n"
87 "b_dirty_time: %10lu\n"
88 "bdi_list: %10u\n"
89 "state: %10lx\n",
90 (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
91 (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
92 K(wb_thresh),
93 K(dirty_thresh),
94 K(background_thresh),
95 (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
96 (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
97 (unsigned long) K(wb->write_bandwidth),
98 nr_dirty,
99 nr_io,
100 nr_more_io,
101 nr_dirty_time,
102 !list_empty(&bdi->bdi_list), bdi->wb.state);
103#undef K
104
105 return 0;
106}
107DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
108
109static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
110{
111 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
112
113 debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
114 &bdi_debug_stats_fops);
115}
116
117static void bdi_debug_unregister(struct backing_dev_info *bdi)
118{
119 debugfs_remove_recursive(bdi->debug_dir);
120}
121#else
122static inline void bdi_debug_init(void)
123{
124}
125static inline void bdi_debug_register(struct backing_dev_info *bdi,
126 const char *name)
127{
128}
129static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
130{
131}
132#endif
133
134static ssize_t read_ahead_kb_store(struct device *dev,
135 struct device_attribute *attr,
136 const char *buf, size_t count)
137{
138 struct backing_dev_info *bdi = dev_get_drvdata(dev);
139 unsigned long read_ahead_kb;
140 ssize_t ret;
141
142 ret = kstrtoul(buf, 10, &read_ahead_kb);
143 if (ret < 0)
144 return ret;
145
146 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
147
148 return count;
149}
150
151#define K(pages) ((pages) << (PAGE_SHIFT - 10))
152
153#define BDI_SHOW(name, expr) \
154static ssize_t name##_show(struct device *dev, \
155 struct device_attribute *attr, char *page) \
156{ \
157 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
158 \
159 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
160} \
161static DEVICE_ATTR_RW(name);
162
163BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
164
165static ssize_t min_ratio_store(struct device *dev,
166 struct device_attribute *attr, const char *buf, size_t count)
167{
168 struct backing_dev_info *bdi = dev_get_drvdata(dev);
169 unsigned int ratio;
170 ssize_t ret;
171
172 ret = kstrtouint(buf, 10, &ratio);
173 if (ret < 0)
174 return ret;
175
176 ret = bdi_set_min_ratio(bdi, ratio);
177 if (!ret)
178 ret = count;
179
180 return ret;
181}
182BDI_SHOW(min_ratio, bdi->min_ratio)
183
184static ssize_t max_ratio_store(struct device *dev,
185 struct device_attribute *attr, const char *buf, size_t count)
186{
187 struct backing_dev_info *bdi = dev_get_drvdata(dev);
188 unsigned int ratio;
189 ssize_t ret;
190
191 ret = kstrtouint(buf, 10, &ratio);
192 if (ret < 0)
193 return ret;
194
195 ret = bdi_set_max_ratio(bdi, ratio);
196 if (!ret)
197 ret = count;
198
199 return ret;
200}
201BDI_SHOW(max_ratio, bdi->max_ratio)
202
203static ssize_t stable_pages_required_show(struct device *dev,
204 struct device_attribute *attr,
205 char *page)
206{
207 struct backing_dev_info *bdi = dev_get_drvdata(dev);
208
209 return snprintf(page, PAGE_SIZE-1, "%d\n",
210 bdi_cap_stable_pages_required(bdi) ? 1 : 0);
211}
212static DEVICE_ATTR_RO(stable_pages_required);
213
214static struct attribute *bdi_dev_attrs[] = {
215 &dev_attr_read_ahead_kb.attr,
216 &dev_attr_min_ratio.attr,
217 &dev_attr_max_ratio.attr,
218 &dev_attr_stable_pages_required.attr,
219 NULL,
220};
221ATTRIBUTE_GROUPS(bdi_dev);
222
223static __init int bdi_class_init(void)
224{
225 bdi_class = class_create(THIS_MODULE, "bdi");
226 if (IS_ERR(bdi_class))
227 return PTR_ERR(bdi_class);
228
229 bdi_class->dev_groups = bdi_dev_groups;
230 bdi_debug_init();
231
232 return 0;
233}
234postcore_initcall(bdi_class_init);
235
236static int bdi_init(struct backing_dev_info *bdi);
237
238static int __init default_bdi_init(void)
239{
240 int err;
241
242 bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
243 WQ_SYSFS, 0);
244 if (!bdi_wq)
245 return -ENOMEM;
246
247 err = bdi_init(&noop_backing_dev_info);
248
249 return err;
250}
251subsys_initcall(default_bdi_init);
252
253/*
254 * This function is used when the first inode for this wb is marked dirty. It
255 * wakes-up the corresponding bdi thread which should then take care of the
256 * periodic background write-out of dirty inodes. Since the write-out would
257 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
258 * set up a timer which wakes the bdi thread up later.
259 *
260 * Note, we wouldn't bother setting up the timer, but this function is on the
261 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
262 * by delaying the wake-up.
263 *
264 * We have to be careful not to postpone flush work if it is scheduled for
265 * earlier. Thus we use queue_delayed_work().
266 */
267void wb_wakeup_delayed(struct bdi_writeback *wb)
268{
269 unsigned long timeout;
270
271 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
272 spin_lock_bh(&wb->work_lock);
273 if (test_bit(WB_registered, &wb->state))
274 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
275 spin_unlock_bh(&wb->work_lock);
276}
277
278/*
279 * Initial write bandwidth: 100 MB/s
280 */
281#define INIT_BW (100 << (20 - PAGE_SHIFT))
282
283static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
284 int blkcg_id, gfp_t gfp)
285{
286 int i, err;
287
288 memset(wb, 0, sizeof(*wb));
289
290 if (wb != &bdi->wb)
291 bdi_get(bdi);
292 wb->bdi = bdi;
293 wb->last_old_flush = jiffies;
294 INIT_LIST_HEAD(&wb->b_dirty);
295 INIT_LIST_HEAD(&wb->b_io);
296 INIT_LIST_HEAD(&wb->b_more_io);
297 INIT_LIST_HEAD(&wb->b_dirty_time);
298 spin_lock_init(&wb->list_lock);
299
300 wb->bw_time_stamp = jiffies;
301 wb->balanced_dirty_ratelimit = INIT_BW;
302 wb->dirty_ratelimit = INIT_BW;
303 wb->write_bandwidth = INIT_BW;
304 wb->avg_write_bandwidth = INIT_BW;
305
306 spin_lock_init(&wb->work_lock);
307 INIT_LIST_HEAD(&wb->work_list);
308 INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
309 wb->dirty_sleep = jiffies;
310
311 wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
312 if (!wb->congested) {
313 err = -ENOMEM;
314 goto out_put_bdi;
315 }
316
317 err = fprop_local_init_percpu(&wb->completions, gfp);
318 if (err)
319 goto out_put_cong;
320
321 for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
322 err = percpu_counter_init(&wb->stat[i], 0, gfp);
323 if (err)
324 goto out_destroy_stat;
325 }
326
327 return 0;
328
329out_destroy_stat:
330 while (i--)
331 percpu_counter_destroy(&wb->stat[i]);
332 fprop_local_destroy_percpu(&wb->completions);
333out_put_cong:
334 wb_congested_put(wb->congested);
335out_put_bdi:
336 if (wb != &bdi->wb)
337 bdi_put(bdi);
338 return err;
339}
340
341static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
342
343/*
344 * Remove bdi from the global list and shutdown any threads we have running
345 */
346static void wb_shutdown(struct bdi_writeback *wb)
347{
348 /* Make sure nobody queues further work */
349 spin_lock_bh(&wb->work_lock);
350 if (!test_and_clear_bit(WB_registered, &wb->state)) {
351 spin_unlock_bh(&wb->work_lock);
352 return;
353 }
354 spin_unlock_bh(&wb->work_lock);
355
356 cgwb_remove_from_bdi_list(wb);
357 /*
358 * Drain work list and shutdown the delayed_work. !WB_registered
359 * tells wb_workfn() that @wb is dying and its work_list needs to
360 * be drained no matter what.
361 */
362 mod_delayed_work(bdi_wq, &wb->dwork, 0);
363 flush_delayed_work(&wb->dwork);
364 WARN_ON(!list_empty(&wb->work_list));
365}
366
367static void wb_exit(struct bdi_writeback *wb)
368{
369 int i;
370
371 WARN_ON(delayed_work_pending(&wb->dwork));
372
373 for (i = 0; i < NR_WB_STAT_ITEMS; i++)
374 percpu_counter_destroy(&wb->stat[i]);
375
376 fprop_local_destroy_percpu(&wb->completions);
377 wb_congested_put(wb->congested);
378 if (wb != &wb->bdi->wb)
379 bdi_put(wb->bdi);
380}
381
382#ifdef CONFIG_CGROUP_WRITEBACK
383
384#include <linux/memcontrol.h>
385
386/*
387 * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
388 * blkcg->cgwb_list, and memcg->cgwb_list. bdi->cgwb_tree is also RCU
389 * protected.
390 */
391static DEFINE_SPINLOCK(cgwb_lock);
392static struct workqueue_struct *cgwb_release_wq;
393
394/**
395 * wb_congested_get_create - get or create a wb_congested
396 * @bdi: associated bdi
397 * @blkcg_id: ID of the associated blkcg
398 * @gfp: allocation mask
399 *
400 * Look up the wb_congested for @blkcg_id on @bdi. If missing, create one.
401 * The returned wb_congested has its reference count incremented. Returns
402 * NULL on failure.
403 */
404struct bdi_writeback_congested *
405wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
406{
407 struct bdi_writeback_congested *new_congested = NULL, *congested;
408 struct rb_node **node, *parent;
409 unsigned long flags;
410retry:
411 spin_lock_irqsave(&cgwb_lock, flags);
412
413 node = &bdi->cgwb_congested_tree.rb_node;
414 parent = NULL;
415
416 while (*node != NULL) {
417 parent = *node;
418 congested = rb_entry(parent, struct bdi_writeback_congested,
419 rb_node);
420 if (congested->blkcg_id < blkcg_id)
421 node = &parent->rb_left;
422 else if (congested->blkcg_id > blkcg_id)
423 node = &parent->rb_right;
424 else
425 goto found;
426 }
427
428 if (new_congested) {
429 /* !found and storage for new one already allocated, insert */
430 congested = new_congested;
431 rb_link_node(&congested->rb_node, parent, node);
432 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
433 spin_unlock_irqrestore(&cgwb_lock, flags);
434 return congested;
435 }
436
437 spin_unlock_irqrestore(&cgwb_lock, flags);
438
439 /* allocate storage for new one and retry */
440 new_congested = kzalloc(sizeof(*new_congested), gfp);
441 if (!new_congested)
442 return NULL;
443
444 refcount_set(&new_congested->refcnt, 1);
445 new_congested->__bdi = bdi;
446 new_congested->blkcg_id = blkcg_id;
447 goto retry;
448
449found:
450 refcount_inc(&congested->refcnt);
451 spin_unlock_irqrestore(&cgwb_lock, flags);
452 kfree(new_congested);
453 return congested;
454}
455
456/**
457 * wb_congested_put - put a wb_congested
458 * @congested: wb_congested to put
459 *
460 * Put @congested and destroy it if the refcnt reaches zero.
461 */
462void wb_congested_put(struct bdi_writeback_congested *congested)
463{
464 unsigned long flags;
465
466 if (!refcount_dec_and_lock_irqsave(&congested->refcnt, &cgwb_lock, &flags))
467 return;
468
469 /* bdi might already have been destroyed leaving @congested unlinked */
470 if (congested->__bdi) {
471 rb_erase(&congested->rb_node,
472 &congested->__bdi->cgwb_congested_tree);
473 congested->__bdi = NULL;
474 }
475
476 spin_unlock_irqrestore(&cgwb_lock, flags);
477 kfree(congested);
478}
479
480static void cgwb_release_workfn(struct work_struct *work)
481{
482 struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
483 release_work);
484 struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
485
486 mutex_lock(&wb->bdi->cgwb_release_mutex);
487 wb_shutdown(wb);
488
489 css_put(wb->memcg_css);
490 css_put(wb->blkcg_css);
491 mutex_unlock(&wb->bdi->cgwb_release_mutex);
492
493 /* triggers blkg destruction if cgwb_refcnt becomes zero */
494 blkcg_cgwb_put(blkcg);
495
496 fprop_local_destroy_percpu(&wb->memcg_completions);
497 percpu_ref_exit(&wb->refcnt);
498 wb_exit(wb);
499 kfree_rcu(wb, rcu);
500}
501
502static void cgwb_release(struct percpu_ref *refcnt)
503{
504 struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
505 refcnt);
506 queue_work(cgwb_release_wq, &wb->release_work);
507}
508
509static void cgwb_kill(struct bdi_writeback *wb)
510{
511 lockdep_assert_held(&cgwb_lock);
512
513 WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
514 list_del(&wb->memcg_node);
515 list_del(&wb->blkcg_node);
516 percpu_ref_kill(&wb->refcnt);
517}
518
519static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
520{
521 spin_lock_irq(&cgwb_lock);
522 list_del_rcu(&wb->bdi_node);
523 spin_unlock_irq(&cgwb_lock);
524}
525
526static int cgwb_create(struct backing_dev_info *bdi,
527 struct cgroup_subsys_state *memcg_css, gfp_t gfp)
528{
529 struct mem_cgroup *memcg;
530 struct cgroup_subsys_state *blkcg_css;
531 struct blkcg *blkcg;
532 struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
533 struct bdi_writeback *wb;
534 unsigned long flags;
535 int ret = 0;
536
537 memcg = mem_cgroup_from_css(memcg_css);
538 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
539 blkcg = css_to_blkcg(blkcg_css);
540 memcg_cgwb_list = &memcg->cgwb_list;
541 blkcg_cgwb_list = &blkcg->cgwb_list;
542
543 /* look up again under lock and discard on blkcg mismatch */
544 spin_lock_irqsave(&cgwb_lock, flags);
545 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
546 if (wb && wb->blkcg_css != blkcg_css) {
547 cgwb_kill(wb);
548 wb = NULL;
549 }
550 spin_unlock_irqrestore(&cgwb_lock, flags);
551 if (wb)
552 goto out_put;
553
554 /* need to create a new one */
555 wb = kmalloc(sizeof(*wb), gfp);
556 if (!wb) {
557 ret = -ENOMEM;
558 goto out_put;
559 }
560
561 ret = wb_init(wb, bdi, blkcg_css->id, gfp);
562 if (ret)
563 goto err_free;
564
565 ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
566 if (ret)
567 goto err_wb_exit;
568
569 ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
570 if (ret)
571 goto err_ref_exit;
572
573 wb->memcg_css = memcg_css;
574 wb->blkcg_css = blkcg_css;
575 INIT_WORK(&wb->release_work, cgwb_release_workfn);
576 set_bit(WB_registered, &wb->state);
577
578 /*
579 * The root wb determines the registered state of the whole bdi and
580 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
581 * whether they're still online. Don't link @wb if any is dead.
582 * See wb_memcg_offline() and wb_blkcg_offline().
583 */
584 ret = -ENODEV;
585 spin_lock_irqsave(&cgwb_lock, flags);
586 if (test_bit(WB_registered, &bdi->wb.state) &&
587 blkcg_cgwb_list->next && memcg_cgwb_list->next) {
588 /* we might have raced another instance of this function */
589 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
590 if (!ret) {
591 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
592 list_add(&wb->memcg_node, memcg_cgwb_list);
593 list_add(&wb->blkcg_node, blkcg_cgwb_list);
594 blkcg_cgwb_get(blkcg);
595 css_get(memcg_css);
596 css_get(blkcg_css);
597 }
598 }
599 spin_unlock_irqrestore(&cgwb_lock, flags);
600 if (ret) {
601 if (ret == -EEXIST)
602 ret = 0;
603 goto err_fprop_exit;
604 }
605 goto out_put;
606
607err_fprop_exit:
608 fprop_local_destroy_percpu(&wb->memcg_completions);
609err_ref_exit:
610 percpu_ref_exit(&wb->refcnt);
611err_wb_exit:
612 wb_exit(wb);
613err_free:
614 kfree(wb);
615out_put:
616 css_put(blkcg_css);
617 return ret;
618}
619
620/**
621 * wb_get_lookup - get wb for a given memcg
622 * @bdi: target bdi
623 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
624 *
625 * Try to get the wb for @memcg_css on @bdi. The returned wb has its
626 * refcount incremented.
627 *
628 * This function uses css_get() on @memcg_css and thus expects its refcnt
629 * to be positive on invocation. IOW, rcu_read_lock() protection on
630 * @memcg_css isn't enough. try_get it before calling this function.
631 *
632 * A wb is keyed by its associated memcg. As blkcg implicitly enables
633 * memcg on the default hierarchy, memcg association is guaranteed to be
634 * more specific (equal or descendant to the associated blkcg) and thus can
635 * identify both the memcg and blkcg associations.
636 *
637 * Because the blkcg associated with a memcg may change as blkcg is enabled
638 * and disabled closer to root in the hierarchy, each wb keeps track of
639 * both the memcg and blkcg associated with it and verifies the blkcg on
640 * each lookup. On mismatch, the existing wb is discarded and a new one is
641 * created.
642 */
643struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
644 struct cgroup_subsys_state *memcg_css)
645{
646 struct bdi_writeback *wb;
647
648 if (!memcg_css->parent)
649 return &bdi->wb;
650
651 rcu_read_lock();
652 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
653 if (wb) {
654 struct cgroup_subsys_state *blkcg_css;
655
656 /* see whether the blkcg association has changed */
657 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
658 if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
659 wb = NULL;
660 css_put(blkcg_css);
661 }
662 rcu_read_unlock();
663
664 return wb;
665}
666
667/**
668 * wb_get_create - get wb for a given memcg, create if necessary
669 * @bdi: target bdi
670 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
671 * @gfp: allocation mask to use
672 *
673 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
674 * create one. See wb_get_lookup() for more details.
675 */
676struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
677 struct cgroup_subsys_state *memcg_css,
678 gfp_t gfp)
679{
680 struct bdi_writeback *wb;
681
682 might_sleep_if(gfpflags_allow_blocking(gfp));
683
684 if (!memcg_css->parent)
685 return &bdi->wb;
686
687 do {
688 wb = wb_get_lookup(bdi, memcg_css);
689 } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
690
691 return wb;
692}
693
694static int cgwb_bdi_init(struct backing_dev_info *bdi)
695{
696 int ret;
697
698 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
699 bdi->cgwb_congested_tree = RB_ROOT;
700 mutex_init(&bdi->cgwb_release_mutex);
701 init_rwsem(&bdi->wb_switch_rwsem);
702
703 ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
704 if (!ret) {
705 bdi->wb.memcg_css = &root_mem_cgroup->css;
706 bdi->wb.blkcg_css = blkcg_root_css;
707 }
708 return ret;
709}
710
711static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
712{
713 struct radix_tree_iter iter;
714 void **slot;
715 struct bdi_writeback *wb;
716
717 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
718
719 spin_lock_irq(&cgwb_lock);
720 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
721 cgwb_kill(*slot);
722 spin_unlock_irq(&cgwb_lock);
723
724 mutex_lock(&bdi->cgwb_release_mutex);
725 spin_lock_irq(&cgwb_lock);
726 while (!list_empty(&bdi->wb_list)) {
727 wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
728 bdi_node);
729 spin_unlock_irq(&cgwb_lock);
730 wb_shutdown(wb);
731 spin_lock_irq(&cgwb_lock);
732 }
733 spin_unlock_irq(&cgwb_lock);
734 mutex_unlock(&bdi->cgwb_release_mutex);
735}
736
737/**
738 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
739 * @memcg: memcg being offlined
740 *
741 * Also prevents creation of any new wb's associated with @memcg.
742 */
743void wb_memcg_offline(struct mem_cgroup *memcg)
744{
745 struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
746 struct bdi_writeback *wb, *next;
747
748 spin_lock_irq(&cgwb_lock);
749 list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
750 cgwb_kill(wb);
751 memcg_cgwb_list->next = NULL; /* prevent new wb's */
752 spin_unlock_irq(&cgwb_lock);
753}
754
755/**
756 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
757 * @blkcg: blkcg being offlined
758 *
759 * Also prevents creation of any new wb's associated with @blkcg.
760 */
761void wb_blkcg_offline(struct blkcg *blkcg)
762{
763 struct bdi_writeback *wb, *next;
764
765 spin_lock_irq(&cgwb_lock);
766 list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
767 cgwb_kill(wb);
768 blkcg->cgwb_list.next = NULL; /* prevent new wb's */
769 spin_unlock_irq(&cgwb_lock);
770}
771
772static void cgwb_bdi_exit(struct backing_dev_info *bdi)
773{
774 struct rb_node *rbn;
775
776 spin_lock_irq(&cgwb_lock);
777 while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
778 struct bdi_writeback_congested *congested =
779 rb_entry(rbn, struct bdi_writeback_congested, rb_node);
780
781 rb_erase(rbn, &bdi->cgwb_congested_tree);
782 congested->__bdi = NULL; /* mark @congested unlinked */
783 }
784 spin_unlock_irq(&cgwb_lock);
785}
786
787static void cgwb_bdi_register(struct backing_dev_info *bdi)
788{
789 spin_lock_irq(&cgwb_lock);
790 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
791 spin_unlock_irq(&cgwb_lock);
792}
793
794static int __init cgwb_init(void)
795{
796 /*
797 * There can be many concurrent release work items overwhelming
798 * system_wq. Put them in a separate wq and limit concurrency.
799 * There's no point in executing many of these in parallel.
800 */
801 cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
802 if (!cgwb_release_wq)
803 return -ENOMEM;
804
805 return 0;
806}
807subsys_initcall(cgwb_init);
808
809#else /* CONFIG_CGROUP_WRITEBACK */
810
811static int cgwb_bdi_init(struct backing_dev_info *bdi)
812{
813 int err;
814
815 bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
816 if (!bdi->wb_congested)
817 return -ENOMEM;
818
819 refcount_set(&bdi->wb_congested->refcnt, 1);
820
821 err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
822 if (err) {
823 wb_congested_put(bdi->wb_congested);
824 return err;
825 }
826 return 0;
827}
828
829static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
830
831static void cgwb_bdi_exit(struct backing_dev_info *bdi)
832{
833 wb_congested_put(bdi->wb_congested);
834}
835
836static void cgwb_bdi_register(struct backing_dev_info *bdi)
837{
838 list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
839}
840
841static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
842{
843 list_del_rcu(&wb->bdi_node);
844}
845
846#endif /* CONFIG_CGROUP_WRITEBACK */
847
848static int bdi_init(struct backing_dev_info *bdi)
849{
850 int ret;
851
852 bdi->dev = NULL;
853
854 kref_init(&bdi->refcnt);
855 bdi->min_ratio = 0;
856 bdi->max_ratio = 100;
857 bdi->max_prop_frac = FPROP_FRAC_BASE;
858 INIT_LIST_HEAD(&bdi->bdi_list);
859 INIT_LIST_HEAD(&bdi->wb_list);
860 init_waitqueue_head(&bdi->wb_waitq);
861
862 ret = cgwb_bdi_init(bdi);
863
864 return ret;
865}
866
867struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
868{
869 struct backing_dev_info *bdi;
870
871 bdi = kmalloc_node(sizeof(struct backing_dev_info),
872 gfp_mask | __GFP_ZERO, node_id);
873 if (!bdi)
874 return NULL;
875
876 if (bdi_init(bdi)) {
877 kfree(bdi);
878 return NULL;
879 }
880 return bdi;
881}
882EXPORT_SYMBOL(bdi_alloc_node);
883
884static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
885{
886 struct rb_node **p = &bdi_tree.rb_node;
887 struct rb_node *parent = NULL;
888 struct backing_dev_info *bdi;
889
890 lockdep_assert_held(&bdi_lock);
891
892 while (*p) {
893 parent = *p;
894 bdi = rb_entry(parent, struct backing_dev_info, rb_node);
895
896 if (bdi->id > id)
897 p = &(*p)->rb_left;
898 else if (bdi->id < id)
899 p = &(*p)->rb_right;
900 else
901 break;
902 }
903
904 if (parentp)
905 *parentp = parent;
906 return p;
907}
908
909/**
910 * bdi_get_by_id - lookup and get bdi from its id
911 * @id: bdi id to lookup
912 *
913 * Find bdi matching @id and get it. Returns NULL if the matching bdi
914 * doesn't exist or is already unregistered.
915 */
916struct backing_dev_info *bdi_get_by_id(u64 id)
917{
918 struct backing_dev_info *bdi = NULL;
919 struct rb_node **p;
920
921 spin_lock_bh(&bdi_lock);
922 p = bdi_lookup_rb_node(id, NULL);
923 if (*p) {
924 bdi = rb_entry(*p, struct backing_dev_info, rb_node);
925 bdi_get(bdi);
926 }
927 spin_unlock_bh(&bdi_lock);
928
929 return bdi;
930}
931
932int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
933{
934 struct device *dev;
935 struct rb_node *parent, **p;
936
937 if (bdi->dev) /* The driver needs to use separate queues per device */
938 return 0;
939
940 dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
941 if (IS_ERR(dev))
942 return PTR_ERR(dev);
943
944 cgwb_bdi_register(bdi);
945 bdi->dev = dev;
946
947 bdi_debug_register(bdi, dev_name(dev));
948 set_bit(WB_registered, &bdi->wb.state);
949
950 spin_lock_bh(&bdi_lock);
951
952 bdi->id = ++bdi_id_cursor;
953
954 p = bdi_lookup_rb_node(bdi->id, &parent);
955 rb_link_node(&bdi->rb_node, parent, p);
956 rb_insert_color(&bdi->rb_node, &bdi_tree);
957
958 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
959
960 spin_unlock_bh(&bdi_lock);
961
962 trace_writeback_bdi_register(bdi);
963 return 0;
964}
965EXPORT_SYMBOL(bdi_register_va);
966
967int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
968{
969 va_list args;
970 int ret;
971
972 va_start(args, fmt);
973 ret = bdi_register_va(bdi, fmt, args);
974 va_end(args);
975 return ret;
976}
977EXPORT_SYMBOL(bdi_register);
978
979int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
980{
981 int rc;
982
983 rc = bdi_register(bdi, "%u:%u", MAJOR(owner->devt), MINOR(owner->devt));
984 if (rc)
985 return rc;
986 /* Leaking owner reference... */
987 WARN_ON(bdi->owner);
988 bdi->owner = owner;
989 get_device(owner);
990 return 0;
991}
992EXPORT_SYMBOL(bdi_register_owner);
993
994/*
995 * Remove bdi from bdi_list, and ensure that it is no longer visible
996 */
997static void bdi_remove_from_list(struct backing_dev_info *bdi)
998{
999 spin_lock_bh(&bdi_lock);
1000 rb_erase(&bdi->rb_node, &bdi_tree);
1001 list_del_rcu(&bdi->bdi_list);
1002 spin_unlock_bh(&bdi_lock);
1003
1004 synchronize_rcu_expedited();
1005}
1006
1007void bdi_unregister(struct backing_dev_info *bdi)
1008{
1009 /* make sure nobody finds us on the bdi_list anymore */
1010 bdi_remove_from_list(bdi);
1011 wb_shutdown(&bdi->wb);
1012 cgwb_bdi_unregister(bdi);
1013
1014 if (bdi->dev) {
1015 bdi_debug_unregister(bdi);
1016 device_unregister(bdi->dev);
1017 bdi->dev = NULL;
1018 }
1019
1020 if (bdi->owner) {
1021 put_device(bdi->owner);
1022 bdi->owner = NULL;
1023 }
1024}
1025
1026static void release_bdi(struct kref *ref)
1027{
1028 struct backing_dev_info *bdi =
1029 container_of(ref, struct backing_dev_info, refcnt);
1030
1031 if (test_bit(WB_registered, &bdi->wb.state))
1032 bdi_unregister(bdi);
1033 WARN_ON_ONCE(bdi->dev);
1034 wb_exit(&bdi->wb);
1035 cgwb_bdi_exit(bdi);
1036 kfree(bdi);
1037}
1038
1039void bdi_put(struct backing_dev_info *bdi)
1040{
1041 kref_put(&bdi->refcnt, release_bdi);
1042}
1043EXPORT_SYMBOL(bdi_put);
1044
1045static wait_queue_head_t congestion_wqh[2] = {
1046 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
1047 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
1048 };
1049static atomic_t nr_wb_congested[2];
1050
1051void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
1052{
1053 wait_queue_head_t *wqh = &congestion_wqh[sync];
1054 enum wb_congested_state bit;
1055
1056 bit = sync ? WB_sync_congested : WB_async_congested;
1057 if (test_and_clear_bit(bit, &congested->state))
1058 atomic_dec(&nr_wb_congested[sync]);
1059 smp_mb__after_atomic();
1060 if (waitqueue_active(wqh))
1061 wake_up(wqh);
1062}
1063EXPORT_SYMBOL(clear_wb_congested);
1064
1065void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
1066{
1067 enum wb_congested_state bit;
1068
1069 bit = sync ? WB_sync_congested : WB_async_congested;
1070 if (!test_and_set_bit(bit, &congested->state))
1071 atomic_inc(&nr_wb_congested[sync]);
1072}
1073EXPORT_SYMBOL(set_wb_congested);
1074
1075/**
1076 * congestion_wait - wait for a backing_dev to become uncongested
1077 * @sync: SYNC or ASYNC IO
1078 * @timeout: timeout in jiffies
1079 *
1080 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1081 * write congestion. If no backing_devs are congested then just wait for the
1082 * next write to be completed.
1083 */
1084long congestion_wait(int sync, long timeout)
1085{
1086 long ret;
1087 unsigned long start = jiffies;
1088 DEFINE_WAIT(wait);
1089 wait_queue_head_t *wqh = &congestion_wqh[sync];
1090
1091 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1092 ret = io_schedule_timeout(timeout);
1093 finish_wait(wqh, &wait);
1094
1095 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1096 jiffies_to_usecs(jiffies - start));
1097
1098 return ret;
1099}
1100EXPORT_SYMBOL(congestion_wait);
1101
1102/**
1103 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1104 * @sync: SYNC or ASYNC IO
1105 * @timeout: timeout in jiffies
1106 *
1107 * In the event of a congested backing_dev (any backing_dev) this waits
1108 * for up to @timeout jiffies for either a BDI to exit congestion of the
1109 * given @sync queue or a write to complete.
1110 *
1111 * The return value is 0 if the sleep is for the full timeout. Otherwise,
1112 * it is the number of jiffies that were still remaining when the function
1113 * returned. return_value == timeout implies the function did not sleep.
1114 */
1115long wait_iff_congested(int sync, long timeout)
1116{
1117 long ret;
1118 unsigned long start = jiffies;
1119 DEFINE_WAIT(wait);
1120 wait_queue_head_t *wqh = &congestion_wqh[sync];
1121
1122 /*
1123 * If there is no congestion, yield if necessary instead
1124 * of sleeping on the congestion queue
1125 */
1126 if (atomic_read(&nr_wb_congested[sync]) == 0) {
1127 cond_resched();
1128
1129 /* In case we scheduled, work out time remaining */
1130 ret = timeout - (jiffies - start);
1131 if (ret < 0)
1132 ret = 0;
1133
1134 goto out;
1135 }
1136
1137 /* Sleep until uncongested or a write happens */
1138 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1139 ret = io_schedule_timeout(timeout);
1140 finish_wait(wqh, &wait);
1141
1142out:
1143 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1144 jiffies_to_usecs(jiffies - start));
1145
1146 return ret;
1147}
1148EXPORT_SYMBOL(wait_iff_congested);
1
2#include <linux/wait.h>
3#include <linux/backing-dev.h>
4#include <linux/kthread.h>
5#include <linux/freezer.h>
6#include <linux/fs.h>
7#include <linux/pagemap.h>
8#include <linux/mm.h>
9#include <linux/sched.h>
10#include <linux/module.h>
11#include <linux/writeback.h>
12#include <linux/device.h>
13#include <trace/events/writeback.h>
14
15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16
17struct backing_dev_info default_backing_dev_info = {
18 .name = "default",
19 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
20 .state = 0,
21 .capabilities = BDI_CAP_MAP_COPY,
22};
23EXPORT_SYMBOL_GPL(default_backing_dev_info);
24
25struct backing_dev_info noop_backing_dev_info = {
26 .name = "noop",
27 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
28};
29EXPORT_SYMBOL_GPL(noop_backing_dev_info);
30
31static struct class *bdi_class;
32
33/*
34 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
35 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
36 * locking.
37 */
38DEFINE_SPINLOCK(bdi_lock);
39LIST_HEAD(bdi_list);
40LIST_HEAD(bdi_pending_list);
41
42static struct task_struct *sync_supers_tsk;
43static struct timer_list sync_supers_timer;
44
45static int bdi_sync_supers(void *);
46static void sync_supers_timer_fn(unsigned long);
47
48void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
49{
50 if (wb1 < wb2) {
51 spin_lock(&wb1->list_lock);
52 spin_lock_nested(&wb2->list_lock, 1);
53 } else {
54 spin_lock(&wb2->list_lock);
55 spin_lock_nested(&wb1->list_lock, 1);
56 }
57}
58
59#ifdef CONFIG_DEBUG_FS
60#include <linux/debugfs.h>
61#include <linux/seq_file.h>
62
63static struct dentry *bdi_debug_root;
64
65static void bdi_debug_init(void)
66{
67 bdi_debug_root = debugfs_create_dir("bdi", NULL);
68}
69
70static int bdi_debug_stats_show(struct seq_file *m, void *v)
71{
72 struct backing_dev_info *bdi = m->private;
73 struct bdi_writeback *wb = &bdi->wb;
74 unsigned long background_thresh;
75 unsigned long dirty_thresh;
76 unsigned long bdi_thresh;
77 unsigned long nr_dirty, nr_io, nr_more_io;
78 struct inode *inode;
79
80 nr_dirty = nr_io = nr_more_io = 0;
81 spin_lock(&wb->list_lock);
82 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
83 nr_dirty++;
84 list_for_each_entry(inode, &wb->b_io, i_wb_list)
85 nr_io++;
86 list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
87 nr_more_io++;
88 spin_unlock(&wb->list_lock);
89
90 global_dirty_limits(&background_thresh, &dirty_thresh);
91 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
92
93#define K(x) ((x) << (PAGE_SHIFT - 10))
94 seq_printf(m,
95 "BdiWriteback: %10lu kB\n"
96 "BdiReclaimable: %10lu kB\n"
97 "BdiDirtyThresh: %10lu kB\n"
98 "DirtyThresh: %10lu kB\n"
99 "BackgroundThresh: %10lu kB\n"
100 "BdiDirtied: %10lu kB\n"
101 "BdiWritten: %10lu kB\n"
102 "BdiWriteBandwidth: %10lu kBps\n"
103 "b_dirty: %10lu\n"
104 "b_io: %10lu\n"
105 "b_more_io: %10lu\n"
106 "bdi_list: %10u\n"
107 "state: %10lx\n",
108 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
109 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
110 K(bdi_thresh),
111 K(dirty_thresh),
112 K(background_thresh),
113 (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)),
114 (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
115 (unsigned long) K(bdi->write_bandwidth),
116 nr_dirty,
117 nr_io,
118 nr_more_io,
119 !list_empty(&bdi->bdi_list), bdi->state);
120#undef K
121
122 return 0;
123}
124
125static int bdi_debug_stats_open(struct inode *inode, struct file *file)
126{
127 return single_open(file, bdi_debug_stats_show, inode->i_private);
128}
129
130static const struct file_operations bdi_debug_stats_fops = {
131 .open = bdi_debug_stats_open,
132 .read = seq_read,
133 .llseek = seq_lseek,
134 .release = single_release,
135};
136
137static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
138{
139 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
140 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
141 bdi, &bdi_debug_stats_fops);
142}
143
144static void bdi_debug_unregister(struct backing_dev_info *bdi)
145{
146 debugfs_remove(bdi->debug_stats);
147 debugfs_remove(bdi->debug_dir);
148}
149#else
150static inline void bdi_debug_init(void)
151{
152}
153static inline void bdi_debug_register(struct backing_dev_info *bdi,
154 const char *name)
155{
156}
157static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
158{
159}
160#endif
161
162static ssize_t read_ahead_kb_store(struct device *dev,
163 struct device_attribute *attr,
164 const char *buf, size_t count)
165{
166 struct backing_dev_info *bdi = dev_get_drvdata(dev);
167 char *end;
168 unsigned long read_ahead_kb;
169 ssize_t ret = -EINVAL;
170
171 read_ahead_kb = simple_strtoul(buf, &end, 10);
172 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
173 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
174 ret = count;
175 }
176 return ret;
177}
178
179#define K(pages) ((pages) << (PAGE_SHIFT - 10))
180
181#define BDI_SHOW(name, expr) \
182static ssize_t name##_show(struct device *dev, \
183 struct device_attribute *attr, char *page) \
184{ \
185 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
186 \
187 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
188}
189
190BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
191
192static ssize_t min_ratio_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t count)
194{
195 struct backing_dev_info *bdi = dev_get_drvdata(dev);
196 char *end;
197 unsigned int ratio;
198 ssize_t ret = -EINVAL;
199
200 ratio = simple_strtoul(buf, &end, 10);
201 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
202 ret = bdi_set_min_ratio(bdi, ratio);
203 if (!ret)
204 ret = count;
205 }
206 return ret;
207}
208BDI_SHOW(min_ratio, bdi->min_ratio)
209
210static ssize_t max_ratio_store(struct device *dev,
211 struct device_attribute *attr, const char *buf, size_t count)
212{
213 struct backing_dev_info *bdi = dev_get_drvdata(dev);
214 char *end;
215 unsigned int ratio;
216 ssize_t ret = -EINVAL;
217
218 ratio = simple_strtoul(buf, &end, 10);
219 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
220 ret = bdi_set_max_ratio(bdi, ratio);
221 if (!ret)
222 ret = count;
223 }
224 return ret;
225}
226BDI_SHOW(max_ratio, bdi->max_ratio)
227
228#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
229
230static struct device_attribute bdi_dev_attrs[] = {
231 __ATTR_RW(read_ahead_kb),
232 __ATTR_RW(min_ratio),
233 __ATTR_RW(max_ratio),
234 __ATTR_NULL,
235};
236
237static __init int bdi_class_init(void)
238{
239 bdi_class = class_create(THIS_MODULE, "bdi");
240 if (IS_ERR(bdi_class))
241 return PTR_ERR(bdi_class);
242
243 bdi_class->dev_attrs = bdi_dev_attrs;
244 bdi_debug_init();
245 return 0;
246}
247postcore_initcall(bdi_class_init);
248
249static int __init default_bdi_init(void)
250{
251 int err;
252
253 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
254 BUG_ON(IS_ERR(sync_supers_tsk));
255
256 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
257 bdi_arm_supers_timer();
258
259 err = bdi_init(&default_backing_dev_info);
260 if (!err)
261 bdi_register(&default_backing_dev_info, NULL, "default");
262 err = bdi_init(&noop_backing_dev_info);
263
264 return err;
265}
266subsys_initcall(default_bdi_init);
267
268int bdi_has_dirty_io(struct backing_dev_info *bdi)
269{
270 return wb_has_dirty_io(&bdi->wb);
271}
272
273/*
274 * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
275 * or we risk deadlocking on ->s_umount. The longer term solution would be
276 * to implement sync_supers_bdi() or similar and simply do it from the
277 * bdi writeback thread individually.
278 */
279static int bdi_sync_supers(void *unused)
280{
281 set_user_nice(current, 0);
282
283 while (!kthread_should_stop()) {
284 set_current_state(TASK_INTERRUPTIBLE);
285 schedule();
286
287 /*
288 * Do this periodically, like kupdated() did before.
289 */
290 sync_supers();
291 }
292
293 return 0;
294}
295
296void bdi_arm_supers_timer(void)
297{
298 unsigned long next;
299
300 if (!dirty_writeback_interval)
301 return;
302
303 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
304 mod_timer(&sync_supers_timer, round_jiffies_up(next));
305}
306
307static void sync_supers_timer_fn(unsigned long unused)
308{
309 wake_up_process(sync_supers_tsk);
310 bdi_arm_supers_timer();
311}
312
313static void wakeup_timer_fn(unsigned long data)
314{
315 struct backing_dev_info *bdi = (struct backing_dev_info *)data;
316
317 spin_lock_bh(&bdi->wb_lock);
318 if (bdi->wb.task) {
319 trace_writeback_wake_thread(bdi);
320 wake_up_process(bdi->wb.task);
321 } else if (bdi->dev) {
322 /*
323 * When bdi tasks are inactive for long time, they are killed.
324 * In this case we have to wake-up the forker thread which
325 * should create and run the bdi thread.
326 */
327 trace_writeback_wake_forker_thread(bdi);
328 wake_up_process(default_backing_dev_info.wb.task);
329 }
330 spin_unlock_bh(&bdi->wb_lock);
331}
332
333/*
334 * This function is used when the first inode for this bdi is marked dirty. It
335 * wakes-up the corresponding bdi thread which should then take care of the
336 * periodic background write-out of dirty inodes. Since the write-out would
337 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
338 * set up a timer which wakes the bdi thread up later.
339 *
340 * Note, we wouldn't bother setting up the timer, but this function is on the
341 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
342 * by delaying the wake-up.
343 */
344void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
345{
346 unsigned long timeout;
347
348 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
349 mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout);
350}
351
352/*
353 * Calculate the longest interval (jiffies) bdi threads are allowed to be
354 * inactive.
355 */
356static unsigned long bdi_longest_inactive(void)
357{
358 unsigned long interval;
359
360 interval = msecs_to_jiffies(dirty_writeback_interval * 10);
361 return max(5UL * 60 * HZ, interval);
362}
363
364/*
365 * Clear pending bit and wakeup anybody waiting for flusher thread creation or
366 * shutdown
367 */
368static void bdi_clear_pending(struct backing_dev_info *bdi)
369{
370 clear_bit(BDI_pending, &bdi->state);
371 smp_mb__after_clear_bit();
372 wake_up_bit(&bdi->state, BDI_pending);
373}
374
375static int bdi_forker_thread(void *ptr)
376{
377 struct bdi_writeback *me = ptr;
378
379 current->flags |= PF_SWAPWRITE;
380 set_freezable();
381
382 /*
383 * Our parent may run at a different priority, just set us to normal
384 */
385 set_user_nice(current, 0);
386
387 for (;;) {
388 struct task_struct *task = NULL;
389 struct backing_dev_info *bdi;
390 enum {
391 NO_ACTION, /* Nothing to do */
392 FORK_THREAD, /* Fork bdi thread */
393 KILL_THREAD, /* Kill inactive bdi thread */
394 } action = NO_ACTION;
395
396 /*
397 * Temporary measure, we want to make sure we don't see
398 * dirty data on the default backing_dev_info
399 */
400 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) {
401 del_timer(&me->wakeup_timer);
402 wb_do_writeback(me, 0);
403 }
404
405 spin_lock_bh(&bdi_lock);
406 /*
407 * In the following loop we are going to check whether we have
408 * some work to do without any synchronization with tasks
409 * waking us up to do work for them. Set the task state here
410 * so that we don't miss wakeups after verifying conditions.
411 */
412 set_current_state(TASK_INTERRUPTIBLE);
413
414 list_for_each_entry(bdi, &bdi_list, bdi_list) {
415 bool have_dirty_io;
416
417 if (!bdi_cap_writeback_dirty(bdi) ||
418 bdi_cap_flush_forker(bdi))
419 continue;
420
421 WARN(!test_bit(BDI_registered, &bdi->state),
422 "bdi %p/%s is not registered!\n", bdi, bdi->name);
423
424 have_dirty_io = !list_empty(&bdi->work_list) ||
425 wb_has_dirty_io(&bdi->wb);
426
427 /*
428 * If the bdi has work to do, but the thread does not
429 * exist - create it.
430 */
431 if (!bdi->wb.task && have_dirty_io) {
432 /*
433 * Set the pending bit - if someone will try to
434 * unregister this bdi - it'll wait on this bit.
435 */
436 set_bit(BDI_pending, &bdi->state);
437 action = FORK_THREAD;
438 break;
439 }
440
441 spin_lock(&bdi->wb_lock);
442
443 /*
444 * If there is no work to do and the bdi thread was
445 * inactive long enough - kill it. The wb_lock is taken
446 * to make sure no-one adds more work to this bdi and
447 * wakes the bdi thread up.
448 */
449 if (bdi->wb.task && !have_dirty_io &&
450 time_after(jiffies, bdi->wb.last_active +
451 bdi_longest_inactive())) {
452 task = bdi->wb.task;
453 bdi->wb.task = NULL;
454 spin_unlock(&bdi->wb_lock);
455 set_bit(BDI_pending, &bdi->state);
456 action = KILL_THREAD;
457 break;
458 }
459 spin_unlock(&bdi->wb_lock);
460 }
461 spin_unlock_bh(&bdi_lock);
462
463 /* Keep working if default bdi still has things to do */
464 if (!list_empty(&me->bdi->work_list))
465 __set_current_state(TASK_RUNNING);
466
467 switch (action) {
468 case FORK_THREAD:
469 __set_current_state(TASK_RUNNING);
470 task = kthread_create(bdi_writeback_thread, &bdi->wb,
471 "flush-%s", dev_name(bdi->dev));
472 if (IS_ERR(task)) {
473 /*
474 * If thread creation fails, force writeout of
475 * the bdi from the thread. Hopefully 1024 is
476 * large enough for efficient IO.
477 */
478 writeback_inodes_wb(&bdi->wb, 1024,
479 WB_REASON_FORKER_THREAD);
480 } else {
481 /*
482 * The spinlock makes sure we do not lose
483 * wake-ups when racing with 'bdi_queue_work()'.
484 * And as soon as the bdi thread is visible, we
485 * can start it.
486 */
487 spin_lock_bh(&bdi->wb_lock);
488 bdi->wb.task = task;
489 spin_unlock_bh(&bdi->wb_lock);
490 wake_up_process(task);
491 }
492 bdi_clear_pending(bdi);
493 break;
494
495 case KILL_THREAD:
496 __set_current_state(TASK_RUNNING);
497 kthread_stop(task);
498 bdi_clear_pending(bdi);
499 break;
500
501 case NO_ACTION:
502 if (!wb_has_dirty_io(me) || !dirty_writeback_interval)
503 /*
504 * There are no dirty data. The only thing we
505 * should now care about is checking for
506 * inactive bdi threads and killing them. Thus,
507 * let's sleep for longer time, save energy and
508 * be friendly for battery-driven devices.
509 */
510 schedule_timeout(bdi_longest_inactive());
511 else
512 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
513 try_to_freeze();
514 break;
515 }
516 }
517
518 return 0;
519}
520
521/*
522 * Remove bdi from bdi_list, and ensure that it is no longer visible
523 */
524static void bdi_remove_from_list(struct backing_dev_info *bdi)
525{
526 spin_lock_bh(&bdi_lock);
527 list_del_rcu(&bdi->bdi_list);
528 spin_unlock_bh(&bdi_lock);
529
530 synchronize_rcu_expedited();
531}
532
533int bdi_register(struct backing_dev_info *bdi, struct device *parent,
534 const char *fmt, ...)
535{
536 va_list args;
537 struct device *dev;
538
539 if (bdi->dev) /* The driver needs to use separate queues per device */
540 return 0;
541
542 va_start(args, fmt);
543 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
544 va_end(args);
545 if (IS_ERR(dev))
546 return PTR_ERR(dev);
547
548 bdi->dev = dev;
549
550 /*
551 * Just start the forker thread for our default backing_dev_info,
552 * and add other bdi's to the list. They will get a thread created
553 * on-demand when they need it.
554 */
555 if (bdi_cap_flush_forker(bdi)) {
556 struct bdi_writeback *wb = &bdi->wb;
557
558 wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s",
559 dev_name(dev));
560 if (IS_ERR(wb->task))
561 return PTR_ERR(wb->task);
562 }
563
564 bdi_debug_register(bdi, dev_name(dev));
565 set_bit(BDI_registered, &bdi->state);
566
567 spin_lock_bh(&bdi_lock);
568 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
569 spin_unlock_bh(&bdi_lock);
570
571 trace_writeback_bdi_register(bdi);
572 return 0;
573}
574EXPORT_SYMBOL(bdi_register);
575
576int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
577{
578 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
579}
580EXPORT_SYMBOL(bdi_register_dev);
581
582/*
583 * Remove bdi from the global list and shutdown any threads we have running
584 */
585static void bdi_wb_shutdown(struct backing_dev_info *bdi)
586{
587 struct task_struct *task;
588
589 if (!bdi_cap_writeback_dirty(bdi))
590 return;
591
592 /*
593 * Make sure nobody finds us on the bdi_list anymore
594 */
595 bdi_remove_from_list(bdi);
596
597 /*
598 * If setup is pending, wait for that to complete first
599 */
600 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
601 TASK_UNINTERRUPTIBLE);
602
603 /*
604 * Finally, kill the kernel thread. We don't need to be RCU
605 * safe anymore, since the bdi is gone from visibility.
606 */
607 spin_lock_bh(&bdi->wb_lock);
608 task = bdi->wb.task;
609 bdi->wb.task = NULL;
610 spin_unlock_bh(&bdi->wb_lock);
611
612 if (task)
613 kthread_stop(task);
614}
615
616/*
617 * This bdi is going away now, make sure that no super_blocks point to it
618 */
619static void bdi_prune_sb(struct backing_dev_info *bdi)
620{
621 struct super_block *sb;
622
623 spin_lock(&sb_lock);
624 list_for_each_entry(sb, &super_blocks, s_list) {
625 if (sb->s_bdi == bdi)
626 sb->s_bdi = &default_backing_dev_info;
627 }
628 spin_unlock(&sb_lock);
629}
630
631void bdi_unregister(struct backing_dev_info *bdi)
632{
633 struct device *dev = bdi->dev;
634
635 if (dev) {
636 bdi_set_min_ratio(bdi, 0);
637 trace_writeback_bdi_unregister(bdi);
638 bdi_prune_sb(bdi);
639 del_timer_sync(&bdi->wb.wakeup_timer);
640
641 if (!bdi_cap_flush_forker(bdi))
642 bdi_wb_shutdown(bdi);
643 bdi_debug_unregister(bdi);
644
645 spin_lock_bh(&bdi->wb_lock);
646 bdi->dev = NULL;
647 spin_unlock_bh(&bdi->wb_lock);
648
649 device_unregister(dev);
650 }
651}
652EXPORT_SYMBOL(bdi_unregister);
653
654static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
655{
656 memset(wb, 0, sizeof(*wb));
657
658 wb->bdi = bdi;
659 wb->last_old_flush = jiffies;
660 INIT_LIST_HEAD(&wb->b_dirty);
661 INIT_LIST_HEAD(&wb->b_io);
662 INIT_LIST_HEAD(&wb->b_more_io);
663 spin_lock_init(&wb->list_lock);
664 setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
665}
666
667/*
668 * Initial write bandwidth: 100 MB/s
669 */
670#define INIT_BW (100 << (20 - PAGE_SHIFT))
671
672int bdi_init(struct backing_dev_info *bdi)
673{
674 int i, err;
675
676 bdi->dev = NULL;
677
678 bdi->min_ratio = 0;
679 bdi->max_ratio = 100;
680 bdi->max_prop_frac = PROP_FRAC_BASE;
681 spin_lock_init(&bdi->wb_lock);
682 INIT_LIST_HEAD(&bdi->bdi_list);
683 INIT_LIST_HEAD(&bdi->work_list);
684
685 bdi_wb_init(&bdi->wb, bdi);
686
687 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
688 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
689 if (err)
690 goto err;
691 }
692
693 bdi->dirty_exceeded = 0;
694
695 bdi->bw_time_stamp = jiffies;
696 bdi->written_stamp = 0;
697
698 bdi->balanced_dirty_ratelimit = INIT_BW;
699 bdi->dirty_ratelimit = INIT_BW;
700 bdi->write_bandwidth = INIT_BW;
701 bdi->avg_write_bandwidth = INIT_BW;
702
703 err = prop_local_init_percpu(&bdi->completions);
704
705 if (err) {
706err:
707 while (i--)
708 percpu_counter_destroy(&bdi->bdi_stat[i]);
709 }
710
711 return err;
712}
713EXPORT_SYMBOL(bdi_init);
714
715void bdi_destroy(struct backing_dev_info *bdi)
716{
717 int i;
718
719 /*
720 * Splice our entries to the default_backing_dev_info, if this
721 * bdi disappears
722 */
723 if (bdi_has_dirty_io(bdi)) {
724 struct bdi_writeback *dst = &default_backing_dev_info.wb;
725
726 bdi_lock_two(&bdi->wb, dst);
727 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
728 list_splice(&bdi->wb.b_io, &dst->b_io);
729 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
730 spin_unlock(&bdi->wb.list_lock);
731 spin_unlock(&dst->list_lock);
732 }
733
734 bdi_unregister(bdi);
735
736 /*
737 * If bdi_unregister() had already been called earlier, the
738 * wakeup_timer could still be armed because bdi_prune_sb()
739 * can race with the bdi_wakeup_thread_delayed() calls from
740 * __mark_inode_dirty().
741 */
742 del_timer_sync(&bdi->wb.wakeup_timer);
743
744 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
745 percpu_counter_destroy(&bdi->bdi_stat[i]);
746
747 prop_local_destroy_percpu(&bdi->completions);
748}
749EXPORT_SYMBOL(bdi_destroy);
750
751/*
752 * For use from filesystems to quickly init and register a bdi associated
753 * with dirty writeback
754 */
755int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
756 unsigned int cap)
757{
758 char tmp[32];
759 int err;
760
761 bdi->name = name;
762 bdi->capabilities = cap;
763 err = bdi_init(bdi);
764 if (err)
765 return err;
766
767 sprintf(tmp, "%.28s%s", name, "-%d");
768 err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
769 if (err) {
770 bdi_destroy(bdi);
771 return err;
772 }
773
774 return 0;
775}
776EXPORT_SYMBOL(bdi_setup_and_register);
777
778static wait_queue_head_t congestion_wqh[2] = {
779 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
780 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
781 };
782static atomic_t nr_bdi_congested[2];
783
784void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
785{
786 enum bdi_state bit;
787 wait_queue_head_t *wqh = &congestion_wqh[sync];
788
789 bit = sync ? BDI_sync_congested : BDI_async_congested;
790 if (test_and_clear_bit(bit, &bdi->state))
791 atomic_dec(&nr_bdi_congested[sync]);
792 smp_mb__after_clear_bit();
793 if (waitqueue_active(wqh))
794 wake_up(wqh);
795}
796EXPORT_SYMBOL(clear_bdi_congested);
797
798void set_bdi_congested(struct backing_dev_info *bdi, int sync)
799{
800 enum bdi_state bit;
801
802 bit = sync ? BDI_sync_congested : BDI_async_congested;
803 if (!test_and_set_bit(bit, &bdi->state))
804 atomic_inc(&nr_bdi_congested[sync]);
805}
806EXPORT_SYMBOL(set_bdi_congested);
807
808/**
809 * congestion_wait - wait for a backing_dev to become uncongested
810 * @sync: SYNC or ASYNC IO
811 * @timeout: timeout in jiffies
812 *
813 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
814 * write congestion. If no backing_devs are congested then just wait for the
815 * next write to be completed.
816 */
817long congestion_wait(int sync, long timeout)
818{
819 long ret;
820 unsigned long start = jiffies;
821 DEFINE_WAIT(wait);
822 wait_queue_head_t *wqh = &congestion_wqh[sync];
823
824 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
825 ret = io_schedule_timeout(timeout);
826 finish_wait(wqh, &wait);
827
828 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
829 jiffies_to_usecs(jiffies - start));
830
831 return ret;
832}
833EXPORT_SYMBOL(congestion_wait);
834
835/**
836 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
837 * @zone: A zone to check if it is heavily congested
838 * @sync: SYNC or ASYNC IO
839 * @timeout: timeout in jiffies
840 *
841 * In the event of a congested backing_dev (any backing_dev) and the given
842 * @zone has experienced recent congestion, this waits for up to @timeout
843 * jiffies for either a BDI to exit congestion of the given @sync queue
844 * or a write to complete.
845 *
846 * In the absence of zone congestion, cond_resched() is called to yield
847 * the processor if necessary but otherwise does not sleep.
848 *
849 * The return value is 0 if the sleep is for the full timeout. Otherwise,
850 * it is the number of jiffies that were still remaining when the function
851 * returned. return_value == timeout implies the function did not sleep.
852 */
853long wait_iff_congested(struct zone *zone, int sync, long timeout)
854{
855 long ret;
856 unsigned long start = jiffies;
857 DEFINE_WAIT(wait);
858 wait_queue_head_t *wqh = &congestion_wqh[sync];
859
860 /*
861 * If there is no congestion, or heavy congestion is not being
862 * encountered in the current zone, yield if necessary instead
863 * of sleeping on the congestion queue
864 */
865 if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
866 !zone_is_reclaim_congested(zone)) {
867 cond_resched();
868
869 /* In case we scheduled, work out time remaining */
870 ret = timeout - (jiffies - start);
871 if (ret < 0)
872 ret = 0;
873
874 goto out;
875 }
876
877 /* Sleep until uncongested or a write happens */
878 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
879 ret = io_schedule_timeout(timeout);
880 finish_wait(wqh, &wait);
881
882out:
883 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
884 jiffies_to_usecs(jiffies - start));
885
886 return ret;
887}
888EXPORT_SYMBOL(wait_iff_congested);