Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * include/linux/backing-dev.h
4 *
5 * low-level device information and state which is propagated up through
6 * to high-level code.
7 */
8
9#ifndef _LINUX_BACKING_DEV_H
10#define _LINUX_BACKING_DEV_H
11
12#include <linux/kernel.h>
13#include <linux/fs.h>
14#include <linux/sched.h>
15#include <linux/blkdev.h>
16#include <linux/device.h>
17#include <linux/writeback.h>
18#include <linux/blk-cgroup.h>
19#include <linux/backing-dev-defs.h>
20#include <linux/slab.h>
21
22static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
23{
24 kref_get(&bdi->refcnt);
25 return bdi;
26}
27
28struct backing_dev_info *bdi_get_by_id(u64 id);
29void bdi_put(struct backing_dev_info *bdi);
30
31__printf(2, 3)
32int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
33__printf(2, 0)
34int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
35 va_list args);
36void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
37void bdi_unregister(struct backing_dev_info *bdi);
38
39struct backing_dev_info *bdi_alloc(int node_id);
40
41void wb_start_background_writeback(struct bdi_writeback *wb);
42void wb_workfn(struct work_struct *work);
43void wb_wakeup_delayed(struct bdi_writeback *wb);
44
45void wb_wait_for_completion(struct wb_completion *done);
46
47extern spinlock_t bdi_lock;
48extern struct list_head bdi_list;
49
50extern struct workqueue_struct *bdi_wq;
51extern struct workqueue_struct *bdi_async_bio_wq;
52
53static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
54{
55 return test_bit(WB_has_dirty_io, &wb->state);
56}
57
58static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
59{
60 /*
61 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
62 * any dirty wbs. See wb_update_write_bandwidth().
63 */
64 return atomic_long_read(&bdi->tot_write_bandwidth);
65}
66
67static inline void __add_wb_stat(struct bdi_writeback *wb,
68 enum wb_stat_item item, s64 amount)
69{
70 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
71}
72
73static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
74{
75 __add_wb_stat(wb, item, 1);
76}
77
78static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
79{
80 __add_wb_stat(wb, item, -1);
81}
82
83static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
84{
85 return percpu_counter_read_positive(&wb->stat[item]);
86}
87
88static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
89{
90 return percpu_counter_sum_positive(&wb->stat[item]);
91}
92
93extern void wb_writeout_inc(struct bdi_writeback *wb);
94
95/*
96 * maximal error of a stat counter.
97 */
98static inline unsigned long wb_stat_error(void)
99{
100#ifdef CONFIG_SMP
101 return nr_cpu_ids * WB_STAT_BATCH;
102#else
103 return 1;
104#endif
105}
106
107int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
108int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
109
110/*
111 * Flags in backing_dev_info::capability
112 *
113 * The first three flags control whether dirty pages will contribute to the
114 * VM's accounting and whether writepages() should be called for dirty pages
115 * (something that would not, for example, be appropriate for ramfs)
116 *
117 * WARNING: these flags are closely related and should not normally be
118 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
119 * three flags into a single convenience macro.
120 *
121 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
122 * BDI_CAP_NO_WRITEBACK: Don't write pages back
123 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
124 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
125 *
126 * BDI_CAP_CGROUP_WRITEBACK: Supports cgroup-aware writeback.
127 * BDI_CAP_SYNCHRONOUS_IO: Device is so fast that asynchronous IO would be
128 * inefficient.
129 */
130#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
131#define BDI_CAP_NO_WRITEBACK 0x00000002
132#define BDI_CAP_NO_ACCT_WB 0x00000004
133#define BDI_CAP_STABLE_WRITES 0x00000008
134#define BDI_CAP_STRICTLIMIT 0x00000010
135#define BDI_CAP_CGROUP_WRITEBACK 0x00000020
136#define BDI_CAP_SYNCHRONOUS_IO 0x00000040
137
138#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
139 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
140
141extern struct backing_dev_info noop_backing_dev_info;
142
143/**
144 * writeback_in_progress - determine whether there is writeback in progress
145 * @wb: bdi_writeback of interest
146 *
147 * Determine whether there is writeback waiting to be handled against a
148 * bdi_writeback.
149 */
150static inline bool writeback_in_progress(struct bdi_writeback *wb)
151{
152 return test_bit(WB_writeback_running, &wb->state);
153}
154
155static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
156{
157 struct super_block *sb;
158
159 if (!inode)
160 return &noop_backing_dev_info;
161
162 sb = inode->i_sb;
163#ifdef CONFIG_BLOCK
164 if (sb_is_blkdev_sb(sb))
165 return I_BDEV(inode)->bd_bdi;
166#endif
167 return sb->s_bdi;
168}
169
170static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
171{
172 return wb->congested & cong_bits;
173}
174
175long congestion_wait(int sync, long timeout);
176long wait_iff_congested(int sync, long timeout);
177
178static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi)
179{
180 return bdi->capabilities & BDI_CAP_SYNCHRONOUS_IO;
181}
182
183static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
184{
185 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
186}
187
188static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
189{
190 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
191}
192
193static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
194{
195 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
196}
197
198static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
199{
200 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
201 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
202 BDI_CAP_NO_WRITEBACK));
203}
204
205static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
206{
207 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
208}
209
210static inline bool mapping_cap_account_dirty(struct address_space *mapping)
211{
212 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
213}
214
215static inline int bdi_sched_wait(void *word)
216{
217 schedule();
218 return 0;
219}
220
221#ifdef CONFIG_CGROUP_WRITEBACK
222
223struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
224 struct cgroup_subsys_state *memcg_css);
225struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
226 struct cgroup_subsys_state *memcg_css,
227 gfp_t gfp);
228void wb_memcg_offline(struct mem_cgroup *memcg);
229void wb_blkcg_offline(struct blkcg *blkcg);
230int inode_congested(struct inode *inode, int cong_bits);
231
232/**
233 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
234 * @inode: inode of interest
235 *
236 * cgroup writeback requires support from both the bdi and filesystem.
237 * Also, both memcg and iocg have to be on the default hierarchy. Test
238 * whether all conditions are met.
239 *
240 * Note that the test result may change dynamically on the same inode
241 * depending on how memcg and iocg are configured.
242 */
243static inline bool inode_cgwb_enabled(struct inode *inode)
244{
245 struct backing_dev_info *bdi = inode_to_bdi(inode);
246
247 return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
248 cgroup_subsys_on_dfl(io_cgrp_subsys) &&
249 bdi_cap_account_dirty(bdi) &&
250 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
251 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
252}
253
254/**
255 * wb_find_current - find wb for %current on a bdi
256 * @bdi: bdi of interest
257 *
258 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
259 * Must be called under rcu_read_lock() which protects the returend wb.
260 * NULL if not found.
261 */
262static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
263{
264 struct cgroup_subsys_state *memcg_css;
265 struct bdi_writeback *wb;
266
267 memcg_css = task_css(current, memory_cgrp_id);
268 if (!memcg_css->parent)
269 return &bdi->wb;
270
271 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
272
273 /*
274 * %current's blkcg equals the effective blkcg of its memcg. No
275 * need to use the relatively expensive cgroup_get_e_css().
276 */
277 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
278 return wb;
279 return NULL;
280}
281
282/**
283 * wb_get_create_current - get or create wb for %current on a bdi
284 * @bdi: bdi of interest
285 * @gfp: allocation mask
286 *
287 * Equivalent to wb_get_create() on %current's memcg. This function is
288 * called from a relatively hot path and optimizes the common cases using
289 * wb_find_current().
290 */
291static inline struct bdi_writeback *
292wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
293{
294 struct bdi_writeback *wb;
295
296 rcu_read_lock();
297 wb = wb_find_current(bdi);
298 if (wb && unlikely(!wb_tryget(wb)))
299 wb = NULL;
300 rcu_read_unlock();
301
302 if (unlikely(!wb)) {
303 struct cgroup_subsys_state *memcg_css;
304
305 memcg_css = task_get_css(current, memory_cgrp_id);
306 wb = wb_get_create(bdi, memcg_css, gfp);
307 css_put(memcg_css);
308 }
309 return wb;
310}
311
312/**
313 * inode_to_wb_is_valid - test whether an inode has a wb associated
314 * @inode: inode of interest
315 *
316 * Returns %true if @inode has a wb associated. May be called without any
317 * locking.
318 */
319static inline bool inode_to_wb_is_valid(struct inode *inode)
320{
321 return inode->i_wb;
322}
323
324/**
325 * inode_to_wb - determine the wb of an inode
326 * @inode: inode of interest
327 *
328 * Returns the wb @inode is currently associated with. The caller must be
329 * holding either @inode->i_lock, the i_pages lock, or the
330 * associated wb's list_lock.
331 */
332static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
333{
334#ifdef CONFIG_LOCKDEP
335 WARN_ON_ONCE(debug_locks &&
336 (!lockdep_is_held(&inode->i_lock) &&
337 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
338 !lockdep_is_held(&inode->i_wb->list_lock)));
339#endif
340 return inode->i_wb;
341}
342
343/**
344 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
345 * @inode: target inode
346 * @cookie: output param, to be passed to the end function
347 *
348 * The caller wants to access the wb associated with @inode but isn't
349 * holding inode->i_lock, the i_pages lock or wb->list_lock. This
350 * function determines the wb associated with @inode and ensures that the
351 * association doesn't change until the transaction is finished with
352 * unlocked_inode_to_wb_end().
353 *
354 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
355 * can't sleep during the transaction. IRQs may or may not be disabled on
356 * return.
357 */
358static inline struct bdi_writeback *
359unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
360{
361 rcu_read_lock();
362
363 /*
364 * Paired with store_release in inode_switch_wbs_work_fn() and
365 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
366 */
367 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
368
369 if (unlikely(cookie->locked))
370 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
371
372 /*
373 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
374 * lock. inode_to_wb() will bark. Deref directly.
375 */
376 return inode->i_wb;
377}
378
379/**
380 * unlocked_inode_to_wb_end - end inode wb access transaction
381 * @inode: target inode
382 * @cookie: @cookie from unlocked_inode_to_wb_begin()
383 */
384static inline void unlocked_inode_to_wb_end(struct inode *inode,
385 struct wb_lock_cookie *cookie)
386{
387 if (unlikely(cookie->locked))
388 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
389
390 rcu_read_unlock();
391}
392
393#else /* CONFIG_CGROUP_WRITEBACK */
394
395static inline bool inode_cgwb_enabled(struct inode *inode)
396{
397 return false;
398}
399
400static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
401{
402 return &bdi->wb;
403}
404
405static inline struct bdi_writeback *
406wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
407{
408 return &bdi->wb;
409}
410
411static inline bool inode_to_wb_is_valid(struct inode *inode)
412{
413 return true;
414}
415
416static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
417{
418 return &inode_to_bdi(inode)->wb;
419}
420
421static inline struct bdi_writeback *
422unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
423{
424 return inode_to_wb(inode);
425}
426
427static inline void unlocked_inode_to_wb_end(struct inode *inode,
428 struct wb_lock_cookie *cookie)
429{
430}
431
432static inline void wb_memcg_offline(struct mem_cgroup *memcg)
433{
434}
435
436static inline void wb_blkcg_offline(struct blkcg *blkcg)
437{
438}
439
440static inline int inode_congested(struct inode *inode, int cong_bits)
441{
442 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
443}
444
445#endif /* CONFIG_CGROUP_WRITEBACK */
446
447static inline int inode_read_congested(struct inode *inode)
448{
449 return inode_congested(inode, 1 << WB_sync_congested);
450}
451
452static inline int inode_write_congested(struct inode *inode)
453{
454 return inode_congested(inode, 1 << WB_async_congested);
455}
456
457static inline int inode_rw_congested(struct inode *inode)
458{
459 return inode_congested(inode, (1 << WB_sync_congested) |
460 (1 << WB_async_congested));
461}
462
463static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
464{
465 return wb_congested(&bdi->wb, cong_bits);
466}
467
468static inline int bdi_read_congested(struct backing_dev_info *bdi)
469{
470 return bdi_congested(bdi, 1 << WB_sync_congested);
471}
472
473static inline int bdi_write_congested(struct backing_dev_info *bdi)
474{
475 return bdi_congested(bdi, 1 << WB_async_congested);
476}
477
478static inline int bdi_rw_congested(struct backing_dev_info *bdi)
479{
480 return bdi_congested(bdi, (1 << WB_sync_congested) |
481 (1 << WB_async_congested));
482}
483
484const char *bdi_dev_name(struct backing_dev_info *bdi);
485
486#endif /* _LINUX_BACKING_DEV_H */
1/*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8#ifndef _LINUX_BACKING_DEV_H
9#define _LINUX_BACKING_DEV_H
10
11#include <linux/percpu_counter.h>
12#include <linux/log2.h>
13#include <linux/proportions.h>
14#include <linux/kernel.h>
15#include <linux/fs.h>
16#include <linux/sched.h>
17#include <linux/timer.h>
18#include <linux/writeback.h>
19#include <linux/atomic.h>
20
21struct page;
22struct device;
23struct dentry;
24
25/*
26 * Bits in backing_dev_info.state
27 */
28enum bdi_state {
29 BDI_pending, /* On its way to being activated */
30 BDI_wb_alloc, /* Default embedded wb allocated */
31 BDI_async_congested, /* The async (write) queue is getting full */
32 BDI_sync_congested, /* The sync queue is getting full */
33 BDI_registered, /* bdi_register() was done */
34 BDI_writeback_running, /* Writeback is in progress */
35 BDI_unused, /* Available bits start here */
36};
37
38typedef int (congested_fn)(void *, int);
39
40enum bdi_stat_item {
41 BDI_RECLAIMABLE,
42 BDI_WRITEBACK,
43 BDI_DIRTIED,
44 BDI_WRITTEN,
45 NR_BDI_STAT_ITEMS
46};
47
48#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
49
50struct bdi_writeback {
51 struct backing_dev_info *bdi; /* our parent bdi */
52 unsigned int nr;
53
54 unsigned long last_old_flush; /* last old data flush */
55 unsigned long last_active; /* last time bdi thread was active */
56
57 struct task_struct *task; /* writeback thread */
58 struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
59 struct list_head b_dirty; /* dirty inodes */
60 struct list_head b_io; /* parked for writeback */
61 struct list_head b_more_io; /* parked for more writeback */
62 spinlock_t list_lock; /* protects the b_* lists */
63};
64
65struct backing_dev_info {
66 struct list_head bdi_list;
67 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
68 unsigned long state; /* Always use atomic bitops on this */
69 unsigned int capabilities; /* Device capabilities */
70 congested_fn *congested_fn; /* Function pointer if device is md/dm */
71 void *congested_data; /* Pointer to aux data for congested func */
72
73 char *name;
74
75 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
76
77 unsigned long bw_time_stamp; /* last time write bw is updated */
78 unsigned long dirtied_stamp;
79 unsigned long written_stamp; /* pages written at bw_time_stamp */
80 unsigned long write_bandwidth; /* the estimated write bandwidth */
81 unsigned long avg_write_bandwidth; /* further smoothed write bw */
82
83 /*
84 * The base dirty throttle rate, re-calculated on every 200ms.
85 * All the bdi tasks' dirty rate will be curbed under it.
86 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
87 * in small steps and is much more smooth/stable than the latter.
88 */
89 unsigned long dirty_ratelimit;
90 unsigned long balanced_dirty_ratelimit;
91
92 struct prop_local_percpu completions;
93 int dirty_exceeded;
94
95 unsigned int min_ratio;
96 unsigned int max_ratio, max_prop_frac;
97
98 struct bdi_writeback wb; /* default writeback info for this bdi */
99 spinlock_t wb_lock; /* protects work_list */
100
101 struct list_head work_list;
102
103 struct device *dev;
104
105 struct timer_list laptop_mode_wb_timer;
106
107#ifdef CONFIG_DEBUG_FS
108 struct dentry *debug_dir;
109 struct dentry *debug_stats;
110#endif
111};
112
113int bdi_init(struct backing_dev_info *bdi);
114void bdi_destroy(struct backing_dev_info *bdi);
115
116int bdi_register(struct backing_dev_info *bdi, struct device *parent,
117 const char *fmt, ...);
118int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
119void bdi_unregister(struct backing_dev_info *bdi);
120int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
121void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
122 enum wb_reason reason);
123void bdi_start_background_writeback(struct backing_dev_info *bdi);
124int bdi_writeback_thread(void *data);
125int bdi_has_dirty_io(struct backing_dev_info *bdi);
126void bdi_arm_supers_timer(void);
127void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
128void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
129
130extern spinlock_t bdi_lock;
131extern struct list_head bdi_list;
132extern struct list_head bdi_pending_list;
133
134static inline int wb_has_dirty_io(struct bdi_writeback *wb)
135{
136 return !list_empty(&wb->b_dirty) ||
137 !list_empty(&wb->b_io) ||
138 !list_empty(&wb->b_more_io);
139}
140
141static inline void __add_bdi_stat(struct backing_dev_info *bdi,
142 enum bdi_stat_item item, s64 amount)
143{
144 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
145}
146
147static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
148 enum bdi_stat_item item)
149{
150 __add_bdi_stat(bdi, item, 1);
151}
152
153static inline void inc_bdi_stat(struct backing_dev_info *bdi,
154 enum bdi_stat_item item)
155{
156 unsigned long flags;
157
158 local_irq_save(flags);
159 __inc_bdi_stat(bdi, item);
160 local_irq_restore(flags);
161}
162
163static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
164 enum bdi_stat_item item)
165{
166 __add_bdi_stat(bdi, item, -1);
167}
168
169static inline void dec_bdi_stat(struct backing_dev_info *bdi,
170 enum bdi_stat_item item)
171{
172 unsigned long flags;
173
174 local_irq_save(flags);
175 __dec_bdi_stat(bdi, item);
176 local_irq_restore(flags);
177}
178
179static inline s64 bdi_stat(struct backing_dev_info *bdi,
180 enum bdi_stat_item item)
181{
182 return percpu_counter_read_positive(&bdi->bdi_stat[item]);
183}
184
185static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
186 enum bdi_stat_item item)
187{
188 return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
189}
190
191static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
192 enum bdi_stat_item item)
193{
194 s64 sum;
195 unsigned long flags;
196
197 local_irq_save(flags);
198 sum = __bdi_stat_sum(bdi, item);
199 local_irq_restore(flags);
200
201 return sum;
202}
203
204extern void bdi_writeout_inc(struct backing_dev_info *bdi);
205
206/*
207 * maximal error of a stat counter.
208 */
209static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
210{
211#ifdef CONFIG_SMP
212 return nr_cpu_ids * BDI_STAT_BATCH;
213#else
214 return 1;
215#endif
216}
217
218int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
219int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
220
221/*
222 * Flags in backing_dev_info::capability
223 *
224 * The first three flags control whether dirty pages will contribute to the
225 * VM's accounting and whether writepages() should be called for dirty pages
226 * (something that would not, for example, be appropriate for ramfs)
227 *
228 * WARNING: these flags are closely related and should not normally be
229 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
230 * three flags into a single convenience macro.
231 *
232 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
233 * BDI_CAP_NO_WRITEBACK: Don't write pages back
234 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
235 *
236 * These flags let !MMU mmap() govern direct device mapping vs immediate
237 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
238 *
239 * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
240 * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
241 * BDI_CAP_READ_MAP: Can be mapped for reading
242 * BDI_CAP_WRITE_MAP: Can be mapped for writing
243 * BDI_CAP_EXEC_MAP: Can be mapped for execution
244 *
245 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
246 */
247#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
248#define BDI_CAP_NO_WRITEBACK 0x00000002
249#define BDI_CAP_MAP_COPY 0x00000004
250#define BDI_CAP_MAP_DIRECT 0x00000008
251#define BDI_CAP_READ_MAP 0x00000010
252#define BDI_CAP_WRITE_MAP 0x00000020
253#define BDI_CAP_EXEC_MAP 0x00000040
254#define BDI_CAP_NO_ACCT_WB 0x00000080
255#define BDI_CAP_SWAP_BACKED 0x00000100
256
257#define BDI_CAP_VMFLAGS \
258 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
259
260#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
261 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
262
263#if defined(VM_MAYREAD) && \
264 (BDI_CAP_READ_MAP != VM_MAYREAD || \
265 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
266 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
267#error please change backing_dev_info::capabilities flags
268#endif
269
270extern struct backing_dev_info default_backing_dev_info;
271extern struct backing_dev_info noop_backing_dev_info;
272
273int writeback_in_progress(struct backing_dev_info *bdi);
274
275static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
276{
277 if (bdi->congested_fn)
278 return bdi->congested_fn(bdi->congested_data, bdi_bits);
279 return (bdi->state & bdi_bits);
280}
281
282static inline int bdi_read_congested(struct backing_dev_info *bdi)
283{
284 return bdi_congested(bdi, 1 << BDI_sync_congested);
285}
286
287static inline int bdi_write_congested(struct backing_dev_info *bdi)
288{
289 return bdi_congested(bdi, 1 << BDI_async_congested);
290}
291
292static inline int bdi_rw_congested(struct backing_dev_info *bdi)
293{
294 return bdi_congested(bdi, (1 << BDI_sync_congested) |
295 (1 << BDI_async_congested));
296}
297
298enum {
299 BLK_RW_ASYNC = 0,
300 BLK_RW_SYNC = 1,
301};
302
303void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
304void set_bdi_congested(struct backing_dev_info *bdi, int sync);
305long congestion_wait(int sync, long timeout);
306long wait_iff_congested(struct zone *zone, int sync, long timeout);
307
308static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
309{
310 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
311}
312
313static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
314{
315 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
316}
317
318static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
319{
320 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
321 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
322 BDI_CAP_NO_WRITEBACK));
323}
324
325static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
326{
327 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
328}
329
330static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
331{
332 return bdi == &default_backing_dev_info;
333}
334
335static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
336{
337 return bdi_cap_writeback_dirty(mapping->backing_dev_info);
338}
339
340static inline bool mapping_cap_account_dirty(struct address_space *mapping)
341{
342 return bdi_cap_account_dirty(mapping->backing_dev_info);
343}
344
345static inline bool mapping_cap_swap_backed(struct address_space *mapping)
346{
347 return bdi_cap_swap_backed(mapping->backing_dev_info);
348}
349
350static inline int bdi_sched_wait(void *word)
351{
352 schedule();
353 return 0;
354}
355
356#endif /* _LINUX_BACKING_DEV_H */