Linux Audio

Check our new training course

Loading...
  1
  2#include <linux/wait.h>
  3#include <linux/backing-dev.h>
  4#include <linux/kthread.h>
  5#include <linux/freezer.h>
  6#include <linux/fs.h>
  7#include <linux/pagemap.h>
  8#include <linux/mm.h>
  9#include <linux/sched.h>
 10#include <linux/module.h>
 11#include <linux/writeback.h>
 12#include <linux/device.h>
 13#include <trace/events/writeback.h>
 14
 15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
 16
 17struct backing_dev_info default_backing_dev_info = {
 18	.name		= "default",
 19	.ra_pages	= VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
 20	.state		= 0,
 21	.capabilities	= BDI_CAP_MAP_COPY,
 22};
 23EXPORT_SYMBOL_GPL(default_backing_dev_info);
 24
 25struct backing_dev_info noop_backing_dev_info = {
 26	.name		= "noop",
 27	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
 28};
 29EXPORT_SYMBOL_GPL(noop_backing_dev_info);
 30
 31static struct class *bdi_class;
 32
 33/*
 34 * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
 35 * locking.
 36 */
 37DEFINE_SPINLOCK(bdi_lock);
 38LIST_HEAD(bdi_list);
 39
 40/* bdi_wq serves all asynchronous writeback tasks */
 41struct workqueue_struct *bdi_wq;
 42
 43void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
 44{
 45	if (wb1 < wb2) {
 46		spin_lock(&wb1->list_lock);
 47		spin_lock_nested(&wb2->list_lock, 1);
 48	} else {
 49		spin_lock(&wb2->list_lock);
 50		spin_lock_nested(&wb1->list_lock, 1);
 51	}
 52}
 53
 54#ifdef CONFIG_DEBUG_FS
 55#include <linux/debugfs.h>
 56#include <linux/seq_file.h>
 57
 58static struct dentry *bdi_debug_root;
 59
 60static void bdi_debug_init(void)
 61{
 62	bdi_debug_root = debugfs_create_dir("bdi", NULL);
 63}
 64
 65static int bdi_debug_stats_show(struct seq_file *m, void *v)
 66{
 67	struct backing_dev_info *bdi = m->private;
 68	struct bdi_writeback *wb = &bdi->wb;
 69	unsigned long background_thresh;
 70	unsigned long dirty_thresh;
 71	unsigned long bdi_thresh;
 72	unsigned long nr_dirty, nr_io, nr_more_io;
 73	struct inode *inode;
 74
 75	nr_dirty = nr_io = nr_more_io = 0;
 76	spin_lock(&wb->list_lock);
 77	list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
 78		nr_dirty++;
 79	list_for_each_entry(inode, &wb->b_io, i_wb_list)
 80		nr_io++;
 81	list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
 82		nr_more_io++;
 83	spin_unlock(&wb->list_lock);
 84
 85	global_dirty_limits(&background_thresh, &dirty_thresh);
 86	bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
 87
 88#define K(x) ((x) << (PAGE_SHIFT - 10))
 89	seq_printf(m,
 90		   "BdiWriteback:       %10lu kB\n"
 91		   "BdiReclaimable:     %10lu kB\n"
 92		   "BdiDirtyThresh:     %10lu kB\n"
 93		   "DirtyThresh:        %10lu kB\n"
 94		   "BackgroundThresh:   %10lu kB\n"
 95		   "BdiDirtied:         %10lu kB\n"
 96		   "BdiWritten:         %10lu kB\n"
 97		   "BdiWriteBandwidth:  %10lu kBps\n"
 98		   "b_dirty:            %10lu\n"
 99		   "b_io:               %10lu\n"
100		   "b_more_io:          %10lu\n"
101		   "bdi_list:           %10u\n"
102		   "state:              %10lx\n",
103		   (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
104		   (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
105		   K(bdi_thresh),
106		   K(dirty_thresh),
107		   K(background_thresh),
108		   (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)),
109		   (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
110		   (unsigned long) K(bdi->write_bandwidth),
111		   nr_dirty,
112		   nr_io,
113		   nr_more_io,
114		   !list_empty(&bdi->bdi_list), bdi->state);
115#undef K
116
117	return 0;
118}
119
120static int bdi_debug_stats_open(struct inode *inode, struct file *file)
121{
122	return single_open(file, bdi_debug_stats_show, inode->i_private);
123}
124
125static const struct file_operations bdi_debug_stats_fops = {
126	.open		= bdi_debug_stats_open,
127	.read		= seq_read,
128	.llseek		= seq_lseek,
129	.release	= single_release,
130};
131
132static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
133{
134	bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
135	bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
136					       bdi, &bdi_debug_stats_fops);
137}
138
139static void bdi_debug_unregister(struct backing_dev_info *bdi)
140{
141	debugfs_remove(bdi->debug_stats);
142	debugfs_remove(bdi->debug_dir);
143}
144#else
145static inline void bdi_debug_init(void)
146{
147}
148static inline void bdi_debug_register(struct backing_dev_info *bdi,
149				      const char *name)
150{
151}
152static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
153{
154}
155#endif
156
157static ssize_t read_ahead_kb_store(struct device *dev,
158				  struct device_attribute *attr,
159				  const char *buf, size_t count)
160{
161	struct backing_dev_info *bdi = dev_get_drvdata(dev);
162	unsigned long read_ahead_kb;
163	ssize_t ret;
164
165	ret = kstrtoul(buf, 10, &read_ahead_kb);
166	if (ret < 0)
167		return ret;
168
169	bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
170
171	return count;
172}
173
174#define K(pages) ((pages) << (PAGE_SHIFT - 10))
175
176#define BDI_SHOW(name, expr)						\
177static ssize_t name##_show(struct device *dev,				\
178			   struct device_attribute *attr, char *page)	\
179{									\
180	struct backing_dev_info *bdi = dev_get_drvdata(dev);		\
181									\
182	return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);	\
183}									\
184static DEVICE_ATTR_RW(name);
185
186BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
187
188static ssize_t min_ratio_store(struct device *dev,
189		struct device_attribute *attr, const char *buf, size_t count)
190{
191	struct backing_dev_info *bdi = dev_get_drvdata(dev);
192	unsigned int ratio;
193	ssize_t ret;
194
195	ret = kstrtouint(buf, 10, &ratio);
196	if (ret < 0)
197		return ret;
198
199	ret = bdi_set_min_ratio(bdi, ratio);
200	if (!ret)
201		ret = count;
202
203	return ret;
204}
205BDI_SHOW(min_ratio, bdi->min_ratio)
206
207static ssize_t max_ratio_store(struct device *dev,
208		struct device_attribute *attr, const char *buf, size_t count)
209{
210	struct backing_dev_info *bdi = dev_get_drvdata(dev);
211	unsigned int ratio;
212	ssize_t ret;
213
214	ret = kstrtouint(buf, 10, &ratio);
215	if (ret < 0)
216		return ret;
217
218	ret = bdi_set_max_ratio(bdi, ratio);
219	if (!ret)
220		ret = count;
221
222	return ret;
223}
224BDI_SHOW(max_ratio, bdi->max_ratio)
225
226static ssize_t stable_pages_required_show(struct device *dev,
227					  struct device_attribute *attr,
228					  char *page)
229{
230	struct backing_dev_info *bdi = dev_get_drvdata(dev);
231
232	return snprintf(page, PAGE_SIZE-1, "%d\n",
233			bdi_cap_stable_pages_required(bdi) ? 1 : 0);
234}
235static DEVICE_ATTR_RO(stable_pages_required);
236
237static struct attribute *bdi_dev_attrs[] = {
238	&dev_attr_read_ahead_kb.attr,
239	&dev_attr_min_ratio.attr,
240	&dev_attr_max_ratio.attr,
241	&dev_attr_stable_pages_required.attr,
242	NULL,
243};
244ATTRIBUTE_GROUPS(bdi_dev);
245
246static __init int bdi_class_init(void)
247{
248	bdi_class = class_create(THIS_MODULE, "bdi");
249	if (IS_ERR(bdi_class))
250		return PTR_ERR(bdi_class);
251
252	bdi_class->dev_groups = bdi_dev_groups;
253	bdi_debug_init();
254	return 0;
255}
256postcore_initcall(bdi_class_init);
257
258static int __init default_bdi_init(void)
259{
260	int err;
261
262	bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
263					      WQ_UNBOUND | WQ_SYSFS, 0);
264	if (!bdi_wq)
265		return -ENOMEM;
266
267	err = bdi_init(&default_backing_dev_info);
268	if (!err)
269		bdi_register(&default_backing_dev_info, NULL, "default");
270	err = bdi_init(&noop_backing_dev_info);
271
272	return err;
273}
274subsys_initcall(default_bdi_init);
275
276int bdi_has_dirty_io(struct backing_dev_info *bdi)
277{
278	return wb_has_dirty_io(&bdi->wb);
279}
280
281/*
282 * This function is used when the first inode for this bdi is marked dirty. It
283 * wakes-up the corresponding bdi thread which should then take care of the
284 * periodic background write-out of dirty inodes. Since the write-out would
285 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
286 * set up a timer which wakes the bdi thread up later.
287 *
288 * Note, we wouldn't bother setting up the timer, but this function is on the
289 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
290 * by delaying the wake-up.
291 *
292 * We have to be careful not to postpone flush work if it is scheduled for
293 * earlier. Thus we use queue_delayed_work().
294 */
295void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
296{
297	unsigned long timeout;
298
299	timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
300	spin_lock_bh(&bdi->wb_lock);
301	if (test_bit(BDI_registered, &bdi->state))
302		queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
303	spin_unlock_bh(&bdi->wb_lock);
304}
305
306/*
307 * Remove bdi from bdi_list, and ensure that it is no longer visible
308 */
309static void bdi_remove_from_list(struct backing_dev_info *bdi)
310{
311	spin_lock_bh(&bdi_lock);
312	list_del_rcu(&bdi->bdi_list);
313	spin_unlock_bh(&bdi_lock);
314
315	synchronize_rcu_expedited();
316}
317
318int bdi_register(struct backing_dev_info *bdi, struct device *parent,
319		const char *fmt, ...)
320{
321	va_list args;
322	struct device *dev;
323
324	if (bdi->dev)	/* The driver needs to use separate queues per device */
325		return 0;
326
327	va_start(args, fmt);
328	dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
329	va_end(args);
330	if (IS_ERR(dev))
331		return PTR_ERR(dev);
332
333	bdi->dev = dev;
334
335	bdi_debug_register(bdi, dev_name(dev));
336	set_bit(BDI_registered, &bdi->state);
337
338	spin_lock_bh(&bdi_lock);
339	list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
340	spin_unlock_bh(&bdi_lock);
341
342	trace_writeback_bdi_register(bdi);
343	return 0;
344}
345EXPORT_SYMBOL(bdi_register);
346
347int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
348{
349	return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
350}
351EXPORT_SYMBOL(bdi_register_dev);
352
353/*
354 * Remove bdi from the global list and shutdown any threads we have running
355 */
356static void bdi_wb_shutdown(struct backing_dev_info *bdi)
357{
358	if (!bdi_cap_writeback_dirty(bdi))
359		return;
360
361	/*
362	 * Make sure nobody finds us on the bdi_list anymore
363	 */
364	bdi_remove_from_list(bdi);
365
366	/* Make sure nobody queues further work */
367	spin_lock_bh(&bdi->wb_lock);
368	clear_bit(BDI_registered, &bdi->state);
369	spin_unlock_bh(&bdi->wb_lock);
370
371	/*
372	 * Drain work list and shutdown the delayed_work.  At this point,
373	 * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
374	 * is dying and its work_list needs to be drained no matter what.
375	 */
376	mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
377	flush_delayed_work(&bdi->wb.dwork);
378	WARN_ON(!list_empty(&bdi->work_list));
379
380	/*
381	 * This shouldn't be necessary unless @bdi for some reason has
382	 * unflushed dirty IO after work_list is drained.  Do it anyway
383	 * just in case.
384	 */
385	cancel_delayed_work_sync(&bdi->wb.dwork);
386}
387
388/*
389 * This bdi is going away now, make sure that no super_blocks point to it
390 */
391static void bdi_prune_sb(struct backing_dev_info *bdi)
392{
393	struct super_block *sb;
394
395	spin_lock(&sb_lock);
396	list_for_each_entry(sb, &super_blocks, s_list) {
397		if (sb->s_bdi == bdi)
398			sb->s_bdi = &default_backing_dev_info;
399	}
400	spin_unlock(&sb_lock);
401}
402
403void bdi_unregister(struct backing_dev_info *bdi)
404{
405	struct device *dev = bdi->dev;
406
407	if (dev) {
408		bdi_set_min_ratio(bdi, 0);
409		trace_writeback_bdi_unregister(bdi);
410		bdi_prune_sb(bdi);
411
412		bdi_wb_shutdown(bdi);
413		bdi_debug_unregister(bdi);
414
415		spin_lock_bh(&bdi->wb_lock);
416		bdi->dev = NULL;
417		spin_unlock_bh(&bdi->wb_lock);
418
419		device_unregister(dev);
420	}
421}
422EXPORT_SYMBOL(bdi_unregister);
423
424static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
425{
426	memset(wb, 0, sizeof(*wb));
427
428	wb->bdi = bdi;
429	wb->last_old_flush = jiffies;
430	INIT_LIST_HEAD(&wb->b_dirty);
431	INIT_LIST_HEAD(&wb->b_io);
432	INIT_LIST_HEAD(&wb->b_more_io);
433	spin_lock_init(&wb->list_lock);
434	INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn);
435}
436
437/*
438 * Initial write bandwidth: 100 MB/s
439 */
440#define INIT_BW		(100 << (20 - PAGE_SHIFT))
441
442int bdi_init(struct backing_dev_info *bdi)
443{
444	int i, err;
445
446	bdi->dev = NULL;
447
448	bdi->min_ratio = 0;
449	bdi->max_ratio = 100;
450	bdi->max_prop_frac = FPROP_FRAC_BASE;
451	spin_lock_init(&bdi->wb_lock);
452	INIT_LIST_HEAD(&bdi->bdi_list);
453	INIT_LIST_HEAD(&bdi->work_list);
454
455	bdi_wb_init(&bdi->wb, bdi);
456
457	for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
458		err = percpu_counter_init(&bdi->bdi_stat[i], 0);
459		if (err)
460			goto err;
461	}
462
463	bdi->dirty_exceeded = 0;
464
465	bdi->bw_time_stamp = jiffies;
466	bdi->written_stamp = 0;
467
468	bdi->balanced_dirty_ratelimit = INIT_BW;
469	bdi->dirty_ratelimit = INIT_BW;
470	bdi->write_bandwidth = INIT_BW;
471	bdi->avg_write_bandwidth = INIT_BW;
472
473	err = fprop_local_init_percpu(&bdi->completions);
474
475	if (err) {
476err:
477		while (i--)
478			percpu_counter_destroy(&bdi->bdi_stat[i]);
479	}
480
481	return err;
482}
483EXPORT_SYMBOL(bdi_init);
484
485void bdi_destroy(struct backing_dev_info *bdi)
486{
487	int i;
488
489	/*
490	 * Splice our entries to the default_backing_dev_info, if this
491	 * bdi disappears
492	 */
493	if (bdi_has_dirty_io(bdi)) {
494		struct bdi_writeback *dst = &default_backing_dev_info.wb;
495
496		bdi_lock_two(&bdi->wb, dst);
497		list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
498		list_splice(&bdi->wb.b_io, &dst->b_io);
499		list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
500		spin_unlock(&bdi->wb.list_lock);
501		spin_unlock(&dst->list_lock);
502	}
503
504	bdi_unregister(bdi);
505
506	/*
507	 * If bdi_unregister() had already been called earlier, the dwork
508	 * could still be pending because bdi_prune_sb() can race with the
509	 * bdi_wakeup_thread_delayed() calls from __mark_inode_dirty().
510	 */
511	cancel_delayed_work_sync(&bdi->wb.dwork);
512
513	for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
514		percpu_counter_destroy(&bdi->bdi_stat[i]);
515
516	fprop_local_destroy_percpu(&bdi->completions);
517}
518EXPORT_SYMBOL(bdi_destroy);
519
520/*
521 * For use from filesystems to quickly init and register a bdi associated
522 * with dirty writeback
523 */
524int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
525			   unsigned int cap)
526{
527	int err;
528
529	bdi->name = name;
530	bdi->capabilities = cap;
531	err = bdi_init(bdi);
532	if (err)
533		return err;
534
535	err = bdi_register(bdi, NULL, "%.28s-%ld", name,
536			   atomic_long_inc_return(&bdi_seq));
537	if (err) {
538		bdi_destroy(bdi);
539		return err;
540	}
541
542	return 0;
543}
544EXPORT_SYMBOL(bdi_setup_and_register);
545
546static wait_queue_head_t congestion_wqh[2] = {
547		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
548		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
549	};
550static atomic_t nr_bdi_congested[2];
551
552void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
553{
554	enum bdi_state bit;
555	wait_queue_head_t *wqh = &congestion_wqh[sync];
556
557	bit = sync ? BDI_sync_congested : BDI_async_congested;
558	if (test_and_clear_bit(bit, &bdi->state))
559		atomic_dec(&nr_bdi_congested[sync]);
560	smp_mb__after_clear_bit();
561	if (waitqueue_active(wqh))
562		wake_up(wqh);
563}
564EXPORT_SYMBOL(clear_bdi_congested);
565
566void set_bdi_congested(struct backing_dev_info *bdi, int sync)
567{
568	enum bdi_state bit;
569
570	bit = sync ? BDI_sync_congested : BDI_async_congested;
571	if (!test_and_set_bit(bit, &bdi->state))
572		atomic_inc(&nr_bdi_congested[sync]);
573}
574EXPORT_SYMBOL(set_bdi_congested);
575
576/**
577 * congestion_wait - wait for a backing_dev to become uncongested
578 * @sync: SYNC or ASYNC IO
579 * @timeout: timeout in jiffies
580 *
581 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
582 * write congestion.  If no backing_devs are congested then just wait for the
583 * next write to be completed.
584 */
585long congestion_wait(int sync, long timeout)
586{
587	long ret;
588	unsigned long start = jiffies;
589	DEFINE_WAIT(wait);
590	wait_queue_head_t *wqh = &congestion_wqh[sync];
591
592	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
593	ret = io_schedule_timeout(timeout);
594	finish_wait(wqh, &wait);
595
596	trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
597					jiffies_to_usecs(jiffies - start));
598
599	return ret;
600}
601EXPORT_SYMBOL(congestion_wait);
602
603/**
604 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
605 * @zone: A zone to check if it is heavily congested
606 * @sync: SYNC or ASYNC IO
607 * @timeout: timeout in jiffies
608 *
609 * In the event of a congested backing_dev (any backing_dev) and the given
610 * @zone has experienced recent congestion, this waits for up to @timeout
611 * jiffies for either a BDI to exit congestion of the given @sync queue
612 * or a write to complete.
613 *
614 * In the absence of zone congestion, cond_resched() is called to yield
615 * the processor if necessary but otherwise does not sleep.
616 *
617 * The return value is 0 if the sleep is for the full timeout. Otherwise,
618 * it is the number of jiffies that were still remaining when the function
619 * returned. return_value == timeout implies the function did not sleep.
620 */
621long wait_iff_congested(struct zone *zone, int sync, long timeout)
622{
623	long ret;
624	unsigned long start = jiffies;
625	DEFINE_WAIT(wait);
626	wait_queue_head_t *wqh = &congestion_wqh[sync];
627
628	/*
629	 * If there is no congestion, or heavy congestion is not being
630	 * encountered in the current zone, yield if necessary instead
631	 * of sleeping on the congestion queue
632	 */
633	if (atomic_read(&nr_bdi_congested[sync]) == 0 ||
634			!zone_is_reclaim_congested(zone)) {
635		cond_resched();
636
637		/* In case we scheduled, work out time remaining */
638		ret = timeout - (jiffies - start);
639		if (ret < 0)
640			ret = 0;
641
642		goto out;
643	}
644
645	/* Sleep until uncongested or a write happens */
646	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
647	ret = io_schedule_timeout(timeout);
648	finish_wait(wqh, &wait);
649
650out:
651	trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
652					jiffies_to_usecs(jiffies - start));
653
654	return ret;
655}
656EXPORT_SYMBOL(wait_iff_congested);
657
658int pdflush_proc_obsolete(struct ctl_table *table, int write,
659			void __user *buffer, size_t *lenp, loff_t *ppos)
660{
661	char kbuf[] = "0\n";
662
663	if (*ppos || *lenp < sizeof(kbuf)) {
664		*lenp = 0;
665		return 0;
666	}
667
668	if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
669		return -EFAULT;
670	printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal\n",
671			table->procname);
672
673	*lenp = 2;
674	*ppos += *lenp;
675	return 2;
676}
   1// SPDX-License-Identifier: GPL-2.0-only
   2
   3#include <linux/wait.h>
   4#include <linux/rbtree.h>
   5#include <linux/backing-dev.h>
   6#include <linux/kthread.h>
   7#include <linux/freezer.h>
   8#include <linux/fs.h>
   9#include <linux/pagemap.h>
  10#include <linux/mm.h>
  11#include <linux/sched.h>
  12#include <linux/module.h>
  13#include <linux/writeback.h>
  14#include <linux/device.h>
  15#include <trace/events/writeback.h>
  16
  17struct backing_dev_info noop_backing_dev_info = {
  18	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
  19};
  20EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  21
  22static struct class *bdi_class;
  23static const char *bdi_unknown_name = "(unknown)";
  24
  25/*
  26 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
  27 * reader side locking.
  28 */
  29DEFINE_SPINLOCK(bdi_lock);
  30static u64 bdi_id_cursor;
  31static struct rb_root bdi_tree = RB_ROOT;
  32LIST_HEAD(bdi_list);
  33
  34/* bdi_wq serves all asynchronous writeback tasks */
  35struct workqueue_struct *bdi_wq;
  36
  37#ifdef CONFIG_DEBUG_FS
  38#include <linux/debugfs.h>
  39#include <linux/seq_file.h>
  40
  41static struct dentry *bdi_debug_root;
  42
  43static void bdi_debug_init(void)
  44{
  45	bdi_debug_root = debugfs_create_dir("bdi", NULL);
  46}
  47
  48static int bdi_debug_stats_show(struct seq_file *m, void *v)
  49{
  50	struct backing_dev_info *bdi = m->private;
  51	struct bdi_writeback *wb = &bdi->wb;
  52	unsigned long background_thresh;
  53	unsigned long dirty_thresh;
  54	unsigned long wb_thresh;
  55	unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
  56	struct inode *inode;
  57
  58	nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
  59	spin_lock(&wb->list_lock);
  60	list_for_each_entry(inode, &wb->b_dirty, i_io_list)
  61		nr_dirty++;
  62	list_for_each_entry(inode, &wb->b_io, i_io_list)
  63		nr_io++;
  64	list_for_each_entry(inode, &wb->b_more_io, i_io_list)
  65		nr_more_io++;
  66	list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
  67		if (inode->i_state & I_DIRTY_TIME)
  68			nr_dirty_time++;
  69	spin_unlock(&wb->list_lock);
  70
  71	global_dirty_limits(&background_thresh, &dirty_thresh);
  72	wb_thresh = wb_calc_thresh(wb, dirty_thresh);
  73
  74#define K(x) ((x) << (PAGE_SHIFT - 10))
  75	seq_printf(m,
  76		   "BdiWriteback:       %10lu kB\n"
  77		   "BdiReclaimable:     %10lu kB\n"
  78		   "BdiDirtyThresh:     %10lu kB\n"
  79		   "DirtyThresh:        %10lu kB\n"
  80		   "BackgroundThresh:   %10lu kB\n"
  81		   "BdiDirtied:         %10lu kB\n"
  82		   "BdiWritten:         %10lu kB\n"
  83		   "BdiWriteBandwidth:  %10lu kBps\n"
  84		   "b_dirty:            %10lu\n"
  85		   "b_io:               %10lu\n"
  86		   "b_more_io:          %10lu\n"
  87		   "b_dirty_time:       %10lu\n"
  88		   "bdi_list:           %10u\n"
  89		   "state:              %10lx\n",
  90		   (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
  91		   (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
  92		   K(wb_thresh),
  93		   K(dirty_thresh),
  94		   K(background_thresh),
  95		   (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
  96		   (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
  97		   (unsigned long) K(wb->write_bandwidth),
  98		   nr_dirty,
  99		   nr_io,
 100		   nr_more_io,
 101		   nr_dirty_time,
 102		   !list_empty(&bdi->bdi_list), bdi->wb.state);
 103#undef K
 104
 105	return 0;
 106}
 107DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
 108
 109static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 110{
 111	bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
 112
 113	debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
 114			    &bdi_debug_stats_fops);
 115}
 116
 117static void bdi_debug_unregister(struct backing_dev_info *bdi)
 118{
 119	debugfs_remove_recursive(bdi->debug_dir);
 120}
 121#else
 122static inline void bdi_debug_init(void)
 123{
 124}
 125static inline void bdi_debug_register(struct backing_dev_info *bdi,
 126				      const char *name)
 127{
 128}
 129static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 130{
 131}
 132#endif
 133
 134static ssize_t read_ahead_kb_store(struct device *dev,
 135				  struct device_attribute *attr,
 136				  const char *buf, size_t count)
 137{
 138	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 139	unsigned long read_ahead_kb;
 140	ssize_t ret;
 141
 142	ret = kstrtoul(buf, 10, &read_ahead_kb);
 143	if (ret < 0)
 144		return ret;
 145
 146	bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
 147
 148	return count;
 149}
 150
 151#define K(pages) ((pages) << (PAGE_SHIFT - 10))
 152
 153#define BDI_SHOW(name, expr)						\
 154static ssize_t name##_show(struct device *dev,				\
 155			   struct device_attribute *attr, char *page)	\
 156{									\
 157	struct backing_dev_info *bdi = dev_get_drvdata(dev);		\
 158									\
 159	return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);	\
 160}									\
 161static DEVICE_ATTR_RW(name);
 162
 163BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
 164
 165static ssize_t min_ratio_store(struct device *dev,
 166		struct device_attribute *attr, const char *buf, size_t count)
 167{
 168	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 169	unsigned int ratio;
 170	ssize_t ret;
 171
 172	ret = kstrtouint(buf, 10, &ratio);
 173	if (ret < 0)
 174		return ret;
 175
 176	ret = bdi_set_min_ratio(bdi, ratio);
 177	if (!ret)
 178		ret = count;
 179
 180	return ret;
 181}
 182BDI_SHOW(min_ratio, bdi->min_ratio)
 183
 184static ssize_t max_ratio_store(struct device *dev,
 185		struct device_attribute *attr, const char *buf, size_t count)
 186{
 187	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 188	unsigned int ratio;
 189	ssize_t ret;
 190
 191	ret = kstrtouint(buf, 10, &ratio);
 192	if (ret < 0)
 193		return ret;
 194
 195	ret = bdi_set_max_ratio(bdi, ratio);
 196	if (!ret)
 197		ret = count;
 198
 199	return ret;
 200}
 201BDI_SHOW(max_ratio, bdi->max_ratio)
 202
 203static ssize_t stable_pages_required_show(struct device *dev,
 204					  struct device_attribute *attr,
 205					  char *page)
 206{
 207	struct backing_dev_info *bdi = dev_get_drvdata(dev);
 208
 209	return snprintf(page, PAGE_SIZE-1, "%d\n",
 210			bdi_cap_stable_pages_required(bdi) ? 1 : 0);
 211}
 212static DEVICE_ATTR_RO(stable_pages_required);
 213
 214static struct attribute *bdi_dev_attrs[] = {
 215	&dev_attr_read_ahead_kb.attr,
 216	&dev_attr_min_ratio.attr,
 217	&dev_attr_max_ratio.attr,
 218	&dev_attr_stable_pages_required.attr,
 219	NULL,
 220};
 221ATTRIBUTE_GROUPS(bdi_dev);
 222
 223static __init int bdi_class_init(void)
 224{
 225	bdi_class = class_create(THIS_MODULE, "bdi");
 226	if (IS_ERR(bdi_class))
 227		return PTR_ERR(bdi_class);
 228
 229	bdi_class->dev_groups = bdi_dev_groups;
 230	bdi_debug_init();
 231
 232	return 0;
 233}
 234postcore_initcall(bdi_class_init);
 235
 236static int bdi_init(struct backing_dev_info *bdi);
 237
 238static int __init default_bdi_init(void)
 239{
 240	int err;
 241
 242	bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
 243				 WQ_SYSFS, 0);
 244	if (!bdi_wq)
 245		return -ENOMEM;
 246
 247	err = bdi_init(&noop_backing_dev_info);
 248
 249	return err;
 250}
 251subsys_initcall(default_bdi_init);
 252
 253/*
 254 * This function is used when the first inode for this wb is marked dirty. It
 255 * wakes-up the corresponding bdi thread which should then take care of the
 256 * periodic background write-out of dirty inodes. Since the write-out would
 257 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
 258 * set up a timer which wakes the bdi thread up later.
 259 *
 260 * Note, we wouldn't bother setting up the timer, but this function is on the
 261 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
 262 * by delaying the wake-up.
 263 *
 264 * We have to be careful not to postpone flush work if it is scheduled for
 265 * earlier. Thus we use queue_delayed_work().
 266 */
 267void wb_wakeup_delayed(struct bdi_writeback *wb)
 268{
 269	unsigned long timeout;
 270
 271	timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
 272	spin_lock_bh(&wb->work_lock);
 273	if (test_bit(WB_registered, &wb->state))
 274		queue_delayed_work(bdi_wq, &wb->dwork, timeout);
 275	spin_unlock_bh(&wb->work_lock);
 276}
 277
 278/*
 279 * Initial write bandwidth: 100 MB/s
 280 */
 281#define INIT_BW		(100 << (20 - PAGE_SHIFT))
 282
 283static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
 284		   gfp_t gfp)
 285{
 286	int i, err;
 287
 288	memset(wb, 0, sizeof(*wb));
 289
 290	if (wb != &bdi->wb)
 291		bdi_get(bdi);
 292	wb->bdi = bdi;
 293	wb->last_old_flush = jiffies;
 294	INIT_LIST_HEAD(&wb->b_dirty);
 295	INIT_LIST_HEAD(&wb->b_io);
 296	INIT_LIST_HEAD(&wb->b_more_io);
 297	INIT_LIST_HEAD(&wb->b_dirty_time);
 298	spin_lock_init(&wb->list_lock);
 299
 300	wb->bw_time_stamp = jiffies;
 301	wb->balanced_dirty_ratelimit = INIT_BW;
 302	wb->dirty_ratelimit = INIT_BW;
 303	wb->write_bandwidth = INIT_BW;
 304	wb->avg_write_bandwidth = INIT_BW;
 305
 306	spin_lock_init(&wb->work_lock);
 307	INIT_LIST_HEAD(&wb->work_list);
 308	INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
 309	wb->dirty_sleep = jiffies;
 310
 311	err = fprop_local_init_percpu(&wb->completions, gfp);
 312	if (err)
 313		goto out_put_bdi;
 314
 315	for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
 316		err = percpu_counter_init(&wb->stat[i], 0, gfp);
 317		if (err)
 318			goto out_destroy_stat;
 319	}
 320
 321	return 0;
 322
 323out_destroy_stat:
 324	while (i--)
 325		percpu_counter_destroy(&wb->stat[i]);
 326	fprop_local_destroy_percpu(&wb->completions);
 327out_put_bdi:
 328	if (wb != &bdi->wb)
 329		bdi_put(bdi);
 330	return err;
 331}
 332
 333static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
 334
 335/*
 336 * Remove bdi from the global list and shutdown any threads we have running
 337 */
 338static void wb_shutdown(struct bdi_writeback *wb)
 339{
 340	/* Make sure nobody queues further work */
 341	spin_lock_bh(&wb->work_lock);
 342	if (!test_and_clear_bit(WB_registered, &wb->state)) {
 343		spin_unlock_bh(&wb->work_lock);
 344		return;
 345	}
 346	spin_unlock_bh(&wb->work_lock);
 347
 348	cgwb_remove_from_bdi_list(wb);
 349	/*
 350	 * Drain work list and shutdown the delayed_work.  !WB_registered
 351	 * tells wb_workfn() that @wb is dying and its work_list needs to
 352	 * be drained no matter what.
 353	 */
 354	mod_delayed_work(bdi_wq, &wb->dwork, 0);
 355	flush_delayed_work(&wb->dwork);
 356	WARN_ON(!list_empty(&wb->work_list));
 357}
 358
 359static void wb_exit(struct bdi_writeback *wb)
 360{
 361	int i;
 362
 363	WARN_ON(delayed_work_pending(&wb->dwork));
 364
 365	for (i = 0; i < NR_WB_STAT_ITEMS; i++)
 366		percpu_counter_destroy(&wb->stat[i]);
 367
 368	fprop_local_destroy_percpu(&wb->completions);
 369	if (wb != &wb->bdi->wb)
 370		bdi_put(wb->bdi);
 371}
 372
 373#ifdef CONFIG_CGROUP_WRITEBACK
 374
 375#include <linux/memcontrol.h>
 376
 377/*
 378 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, and memcg->cgwb_list.
 379 * bdi->cgwb_tree is also RCU protected.
 380 */
 381static DEFINE_SPINLOCK(cgwb_lock);
 382static struct workqueue_struct *cgwb_release_wq;
 383
 384static void cgwb_release_workfn(struct work_struct *work)
 385{
 386	struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
 387						release_work);
 388	struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
 389
 390	mutex_lock(&wb->bdi->cgwb_release_mutex);
 391	wb_shutdown(wb);
 392
 393	css_put(wb->memcg_css);
 394	css_put(wb->blkcg_css);
 395	mutex_unlock(&wb->bdi->cgwb_release_mutex);
 396
 397	/* triggers blkg destruction if no online users left */
 398	blkcg_unpin_online(blkcg);
 399
 400	fprop_local_destroy_percpu(&wb->memcg_completions);
 401	percpu_ref_exit(&wb->refcnt);
 402	wb_exit(wb);
 403	kfree_rcu(wb, rcu);
 404}
 405
 406static void cgwb_release(struct percpu_ref *refcnt)
 407{
 408	struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
 409						refcnt);
 410	queue_work(cgwb_release_wq, &wb->release_work);
 411}
 412
 413static void cgwb_kill(struct bdi_writeback *wb)
 414{
 415	lockdep_assert_held(&cgwb_lock);
 416
 417	WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
 418	list_del(&wb->memcg_node);
 419	list_del(&wb->blkcg_node);
 420	percpu_ref_kill(&wb->refcnt);
 421}
 422
 423static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 424{
 425	spin_lock_irq(&cgwb_lock);
 426	list_del_rcu(&wb->bdi_node);
 427	spin_unlock_irq(&cgwb_lock);
 428}
 429
 430static int cgwb_create(struct backing_dev_info *bdi,
 431		       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
 432{
 433	struct mem_cgroup *memcg;
 434	struct cgroup_subsys_state *blkcg_css;
 435	struct blkcg *blkcg;
 436	struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
 437	struct bdi_writeback *wb;
 438	unsigned long flags;
 439	int ret = 0;
 440
 441	memcg = mem_cgroup_from_css(memcg_css);
 442	blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 443	blkcg = css_to_blkcg(blkcg_css);
 444	memcg_cgwb_list = &memcg->cgwb_list;
 445	blkcg_cgwb_list = &blkcg->cgwb_list;
 446
 447	/* look up again under lock and discard on blkcg mismatch */
 448	spin_lock_irqsave(&cgwb_lock, flags);
 449	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 450	if (wb && wb->blkcg_css != blkcg_css) {
 451		cgwb_kill(wb);
 452		wb = NULL;
 453	}
 454	spin_unlock_irqrestore(&cgwb_lock, flags);
 455	if (wb)
 456		goto out_put;
 457
 458	/* need to create a new one */
 459	wb = kmalloc(sizeof(*wb), gfp);
 460	if (!wb) {
 461		ret = -ENOMEM;
 462		goto out_put;
 463	}
 464
 465	ret = wb_init(wb, bdi, gfp);
 466	if (ret)
 467		goto err_free;
 468
 469	ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
 470	if (ret)
 471		goto err_wb_exit;
 472
 473	ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
 474	if (ret)
 475		goto err_ref_exit;
 476
 477	wb->memcg_css = memcg_css;
 478	wb->blkcg_css = blkcg_css;
 479	INIT_WORK(&wb->release_work, cgwb_release_workfn);
 480	set_bit(WB_registered, &wb->state);
 481
 482	/*
 483	 * The root wb determines the registered state of the whole bdi and
 484	 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
 485	 * whether they're still online.  Don't link @wb if any is dead.
 486	 * See wb_memcg_offline() and wb_blkcg_offline().
 487	 */
 488	ret = -ENODEV;
 489	spin_lock_irqsave(&cgwb_lock, flags);
 490	if (test_bit(WB_registered, &bdi->wb.state) &&
 491	    blkcg_cgwb_list->next && memcg_cgwb_list->next) {
 492		/* we might have raced another instance of this function */
 493		ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
 494		if (!ret) {
 495			list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
 496			list_add(&wb->memcg_node, memcg_cgwb_list);
 497			list_add(&wb->blkcg_node, blkcg_cgwb_list);
 498			blkcg_pin_online(blkcg);
 499			css_get(memcg_css);
 500			css_get(blkcg_css);
 501		}
 502	}
 503	spin_unlock_irqrestore(&cgwb_lock, flags);
 504	if (ret) {
 505		if (ret == -EEXIST)
 506			ret = 0;
 507		goto err_fprop_exit;
 508	}
 509	goto out_put;
 510
 511err_fprop_exit:
 512	fprop_local_destroy_percpu(&wb->memcg_completions);
 513err_ref_exit:
 514	percpu_ref_exit(&wb->refcnt);
 515err_wb_exit:
 516	wb_exit(wb);
 517err_free:
 518	kfree(wb);
 519out_put:
 520	css_put(blkcg_css);
 521	return ret;
 522}
 523
 524/**
 525 * wb_get_lookup - get wb for a given memcg
 526 * @bdi: target bdi
 527 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 528 *
 529 * Try to get the wb for @memcg_css on @bdi.  The returned wb has its
 530 * refcount incremented.
 531 *
 532 * This function uses css_get() on @memcg_css and thus expects its refcnt
 533 * to be positive on invocation.  IOW, rcu_read_lock() protection on
 534 * @memcg_css isn't enough.  try_get it before calling this function.
 535 *
 536 * A wb is keyed by its associated memcg.  As blkcg implicitly enables
 537 * memcg on the default hierarchy, memcg association is guaranteed to be
 538 * more specific (equal or descendant to the associated blkcg) and thus can
 539 * identify both the memcg and blkcg associations.
 540 *
 541 * Because the blkcg associated with a memcg may change as blkcg is enabled
 542 * and disabled closer to root in the hierarchy, each wb keeps track of
 543 * both the memcg and blkcg associated with it and verifies the blkcg on
 544 * each lookup.  On mismatch, the existing wb is discarded and a new one is
 545 * created.
 546 */
 547struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
 548				    struct cgroup_subsys_state *memcg_css)
 549{
 550	struct bdi_writeback *wb;
 551
 552	if (!memcg_css->parent)
 553		return &bdi->wb;
 554
 555	rcu_read_lock();
 556	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 557	if (wb) {
 558		struct cgroup_subsys_state *blkcg_css;
 559
 560		/* see whether the blkcg association has changed */
 561		blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 562		if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
 563			wb = NULL;
 564		css_put(blkcg_css);
 565	}
 566	rcu_read_unlock();
 567
 568	return wb;
 569}
 570
 571/**
 572 * wb_get_create - get wb for a given memcg, create if necessary
 573 * @bdi: target bdi
 574 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 575 * @gfp: allocation mask to use
 576 *
 577 * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
 578 * create one.  See wb_get_lookup() for more details.
 579 */
 580struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
 581				    struct cgroup_subsys_state *memcg_css,
 582				    gfp_t gfp)
 583{
 584	struct bdi_writeback *wb;
 585
 586	might_sleep_if(gfpflags_allow_blocking(gfp));
 587
 588	if (!memcg_css->parent)
 589		return &bdi->wb;
 590
 591	do {
 592		wb = wb_get_lookup(bdi, memcg_css);
 593	} while (!wb && !cgwb_create(bdi, memcg_css, gfp));
 594
 595	return wb;
 596}
 597
 598static int cgwb_bdi_init(struct backing_dev_info *bdi)
 599{
 600	int ret;
 601
 602	INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
 603	mutex_init(&bdi->cgwb_release_mutex);
 604	init_rwsem(&bdi->wb_switch_rwsem);
 605
 606	ret = wb_init(&bdi->wb, bdi, GFP_KERNEL);
 607	if (!ret) {
 608		bdi->wb.memcg_css = &root_mem_cgroup->css;
 609		bdi->wb.blkcg_css = blkcg_root_css;
 610	}
 611	return ret;
 612}
 613
 614static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
 615{
 616	struct radix_tree_iter iter;
 617	void **slot;
 618	struct bdi_writeback *wb;
 619
 620	WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 621
 622	spin_lock_irq(&cgwb_lock);
 623	radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
 624		cgwb_kill(*slot);
 625	spin_unlock_irq(&cgwb_lock);
 626
 627	mutex_lock(&bdi->cgwb_release_mutex);
 628	spin_lock_irq(&cgwb_lock);
 629	while (!list_empty(&bdi->wb_list)) {
 630		wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
 631				      bdi_node);
 632		spin_unlock_irq(&cgwb_lock);
 633		wb_shutdown(wb);
 634		spin_lock_irq(&cgwb_lock);
 635	}
 636	spin_unlock_irq(&cgwb_lock);
 637	mutex_unlock(&bdi->cgwb_release_mutex);
 638}
 639
 640/**
 641 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
 642 * @memcg: memcg being offlined
 643 *
 644 * Also prevents creation of any new wb's associated with @memcg.
 645 */
 646void wb_memcg_offline(struct mem_cgroup *memcg)
 647{
 648	struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
 649	struct bdi_writeback *wb, *next;
 650
 651	spin_lock_irq(&cgwb_lock);
 652	list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
 653		cgwb_kill(wb);
 654	memcg_cgwb_list->next = NULL;	/* prevent new wb's */
 655	spin_unlock_irq(&cgwb_lock);
 656}
 657
 658/**
 659 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
 660 * @blkcg: blkcg being offlined
 661 *
 662 * Also prevents creation of any new wb's associated with @blkcg.
 663 */
 664void wb_blkcg_offline(struct blkcg *blkcg)
 665{
 666	struct bdi_writeback *wb, *next;
 667
 668	spin_lock_irq(&cgwb_lock);
 669	list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
 670		cgwb_kill(wb);
 671	blkcg->cgwb_list.next = NULL;	/* prevent new wb's */
 672	spin_unlock_irq(&cgwb_lock);
 673}
 674
 675static void cgwb_bdi_register(struct backing_dev_info *bdi)
 676{
 677	spin_lock_irq(&cgwb_lock);
 678	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 679	spin_unlock_irq(&cgwb_lock);
 680}
 681
 682static int __init cgwb_init(void)
 683{
 684	/*
 685	 * There can be many concurrent release work items overwhelming
 686	 * system_wq.  Put them in a separate wq and limit concurrency.
 687	 * There's no point in executing many of these in parallel.
 688	 */
 689	cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
 690	if (!cgwb_release_wq)
 691		return -ENOMEM;
 692
 693	return 0;
 694}
 695subsys_initcall(cgwb_init);
 696
 697#else	/* CONFIG_CGROUP_WRITEBACK */
 698
 699static int cgwb_bdi_init(struct backing_dev_info *bdi)
 700{
 701	return wb_init(&bdi->wb, bdi, GFP_KERNEL);
 702}
 703
 704static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
 705
 706static void cgwb_bdi_register(struct backing_dev_info *bdi)
 707{
 708	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 709}
 710
 711static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 712{
 713	list_del_rcu(&wb->bdi_node);
 714}
 715
 716#endif	/* CONFIG_CGROUP_WRITEBACK */
 717
 718static int bdi_init(struct backing_dev_info *bdi)
 719{
 720	int ret;
 721
 722	bdi->dev = NULL;
 723
 724	kref_init(&bdi->refcnt);
 725	bdi->min_ratio = 0;
 726	bdi->max_ratio = 100;
 727	bdi->max_prop_frac = FPROP_FRAC_BASE;
 728	INIT_LIST_HEAD(&bdi->bdi_list);
 729	INIT_LIST_HEAD(&bdi->wb_list);
 730	init_waitqueue_head(&bdi->wb_waitq);
 731
 732	ret = cgwb_bdi_init(bdi);
 733
 734	return ret;
 735}
 736
 737struct backing_dev_info *bdi_alloc(int node_id)
 738{
 739	struct backing_dev_info *bdi;
 740
 741	bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id);
 742	if (!bdi)
 743		return NULL;
 744
 745	if (bdi_init(bdi)) {
 746		kfree(bdi);
 747		return NULL;
 748	}
 749	return bdi;
 750}
 751EXPORT_SYMBOL(bdi_alloc);
 752
 753static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
 754{
 755	struct rb_node **p = &bdi_tree.rb_node;
 756	struct rb_node *parent = NULL;
 757	struct backing_dev_info *bdi;
 758
 759	lockdep_assert_held(&bdi_lock);
 760
 761	while (*p) {
 762		parent = *p;
 763		bdi = rb_entry(parent, struct backing_dev_info, rb_node);
 764
 765		if (bdi->id > id)
 766			p = &(*p)->rb_left;
 767		else if (bdi->id < id)
 768			p = &(*p)->rb_right;
 769		else
 770			break;
 771	}
 772
 773	if (parentp)
 774		*parentp = parent;
 775	return p;
 776}
 777
 778/**
 779 * bdi_get_by_id - lookup and get bdi from its id
 780 * @id: bdi id to lookup
 781 *
 782 * Find bdi matching @id and get it.  Returns NULL if the matching bdi
 783 * doesn't exist or is already unregistered.
 784 */
 785struct backing_dev_info *bdi_get_by_id(u64 id)
 786{
 787	struct backing_dev_info *bdi = NULL;
 788	struct rb_node **p;
 789
 790	spin_lock_bh(&bdi_lock);
 791	p = bdi_lookup_rb_node(id, NULL);
 792	if (*p) {
 793		bdi = rb_entry(*p, struct backing_dev_info, rb_node);
 794		bdi_get(bdi);
 795	}
 796	spin_unlock_bh(&bdi_lock);
 797
 798	return bdi;
 799}
 800
 801int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
 802{
 803	struct device *dev;
 804	struct rb_node *parent, **p;
 805
 806	if (bdi->dev)	/* The driver needs to use separate queues per device */
 807		return 0;
 808
 809	vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
 810	dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
 811	if (IS_ERR(dev))
 812		return PTR_ERR(dev);
 813
 814	cgwb_bdi_register(bdi);
 815	bdi->dev = dev;
 816
 817	bdi_debug_register(bdi, dev_name(dev));
 818	set_bit(WB_registered, &bdi->wb.state);
 819
 820	spin_lock_bh(&bdi_lock);
 821
 822	bdi->id = ++bdi_id_cursor;
 823
 824	p = bdi_lookup_rb_node(bdi->id, &parent);
 825	rb_link_node(&bdi->rb_node, parent, p);
 826	rb_insert_color(&bdi->rb_node, &bdi_tree);
 827
 828	list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
 829
 830	spin_unlock_bh(&bdi_lock);
 831
 832	trace_writeback_bdi_register(bdi);
 833	return 0;
 834}
 835
 836int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
 837{
 838	va_list args;
 839	int ret;
 840
 841	va_start(args, fmt);
 842	ret = bdi_register_va(bdi, fmt, args);
 843	va_end(args);
 844	return ret;
 845}
 846EXPORT_SYMBOL(bdi_register);
 847
 848void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner)
 849{
 850	WARN_ON_ONCE(bdi->owner);
 851	bdi->owner = owner;
 852	get_device(owner);
 853}
 854
 855/*
 856 * Remove bdi from bdi_list, and ensure that it is no longer visible
 857 */
 858static void bdi_remove_from_list(struct backing_dev_info *bdi)
 859{
 860	spin_lock_bh(&bdi_lock);
 861	rb_erase(&bdi->rb_node, &bdi_tree);
 862	list_del_rcu(&bdi->bdi_list);
 863	spin_unlock_bh(&bdi_lock);
 864
 865	synchronize_rcu_expedited();
 866}
 867
 868void bdi_unregister(struct backing_dev_info *bdi)
 869{
 870	/* make sure nobody finds us on the bdi_list anymore */
 871	bdi_remove_from_list(bdi);
 872	wb_shutdown(&bdi->wb);
 873	cgwb_bdi_unregister(bdi);
 874
 875	if (bdi->dev) {
 876		bdi_debug_unregister(bdi);
 877		device_unregister(bdi->dev);
 878		bdi->dev = NULL;
 879	}
 880
 881	if (bdi->owner) {
 882		put_device(bdi->owner);
 883		bdi->owner = NULL;
 884	}
 885}
 886
 887static void release_bdi(struct kref *ref)
 888{
 889	struct backing_dev_info *bdi =
 890			container_of(ref, struct backing_dev_info, refcnt);
 891
 892	if (test_bit(WB_registered, &bdi->wb.state))
 893		bdi_unregister(bdi);
 894	WARN_ON_ONCE(bdi->dev);
 895	wb_exit(&bdi->wb);
 896	kfree(bdi);
 897}
 898
 899void bdi_put(struct backing_dev_info *bdi)
 900{
 901	kref_put(&bdi->refcnt, release_bdi);
 902}
 903EXPORT_SYMBOL(bdi_put);
 904
 905const char *bdi_dev_name(struct backing_dev_info *bdi)
 906{
 907	if (!bdi || !bdi->dev)
 908		return bdi_unknown_name;
 909	return bdi->dev_name;
 910}
 911EXPORT_SYMBOL_GPL(bdi_dev_name);
 912
 913static wait_queue_head_t congestion_wqh[2] = {
 914		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
 915		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
 916	};
 917static atomic_t nr_wb_congested[2];
 918
 919void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
 920{
 921	wait_queue_head_t *wqh = &congestion_wqh[sync];
 922	enum wb_congested_state bit;
 923
 924	bit = sync ? WB_sync_congested : WB_async_congested;
 925	if (test_and_clear_bit(bit, &bdi->wb.congested))
 926		atomic_dec(&nr_wb_congested[sync]);
 927	smp_mb__after_atomic();
 928	if (waitqueue_active(wqh))
 929		wake_up(wqh);
 930}
 931EXPORT_SYMBOL(clear_bdi_congested);
 932
 933void set_bdi_congested(struct backing_dev_info *bdi, int sync)
 934{
 935	enum wb_congested_state bit;
 936
 937	bit = sync ? WB_sync_congested : WB_async_congested;
 938	if (!test_and_set_bit(bit, &bdi->wb.congested))
 939		atomic_inc(&nr_wb_congested[sync]);
 940}
 941EXPORT_SYMBOL(set_bdi_congested);
 942
 943/**
 944 * congestion_wait - wait for a backing_dev to become uncongested
 945 * @sync: SYNC or ASYNC IO
 946 * @timeout: timeout in jiffies
 947 *
 948 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
 949 * write congestion.  If no backing_devs are congested then just wait for the
 950 * next write to be completed.
 951 */
 952long congestion_wait(int sync, long timeout)
 953{
 954	long ret;
 955	unsigned long start = jiffies;
 956	DEFINE_WAIT(wait);
 957	wait_queue_head_t *wqh = &congestion_wqh[sync];
 958
 959	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
 960	ret = io_schedule_timeout(timeout);
 961	finish_wait(wqh, &wait);
 962
 963	trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
 964					jiffies_to_usecs(jiffies - start));
 965
 966	return ret;
 967}
 968EXPORT_SYMBOL(congestion_wait);
 969
 970/**
 971 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
 972 * @sync: SYNC or ASYNC IO
 973 * @timeout: timeout in jiffies
 974 *
 975 * In the event of a congested backing_dev (any backing_dev) this waits
 976 * for up to @timeout jiffies for either a BDI to exit congestion of the
 977 * given @sync queue or a write to complete.
 978 *
 979 * The return value is 0 if the sleep is for the full timeout. Otherwise,
 980 * it is the number of jiffies that were still remaining when the function
 981 * returned. return_value == timeout implies the function did not sleep.
 982 */
 983long wait_iff_congested(int sync, long timeout)
 984{
 985	long ret;
 986	unsigned long start = jiffies;
 987	DEFINE_WAIT(wait);
 988	wait_queue_head_t *wqh = &congestion_wqh[sync];
 989
 990	/*
 991	 * If there is no congestion, yield if necessary instead
 992	 * of sleeping on the congestion queue
 993	 */
 994	if (atomic_read(&nr_wb_congested[sync]) == 0) {
 995		cond_resched();
 996
 997		/* In case we scheduled, work out time remaining */
 998		ret = timeout - (jiffies - start);
 999		if (ret < 0)
1000			ret = 0;
1001
1002		goto out;
1003	}
1004
1005	/* Sleep until uncongested or a write happens */
1006	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1007	ret = io_schedule_timeout(timeout);
1008	finish_wait(wqh, &wait);
1009
1010out:
1011	trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1012					jiffies_to_usecs(jiffies - start));
1013
1014	return ret;
1015}
1016EXPORT_SYMBOL(wait_iff_congested);