Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHE_WRITEBACK_H
3#define _BCACHE_WRITEBACK_H
4
5#define CUTOFF_WRITEBACK 40
6#define CUTOFF_WRITEBACK_SYNC 70
7
8#define CUTOFF_WRITEBACK_MAX 70
9#define CUTOFF_WRITEBACK_SYNC_MAX 90
10
11#define MAX_WRITEBACKS_IN_PASS 5
12#define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
13
14#define WRITEBACK_RATE_UPDATE_SECS_MAX 60
15#define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5
16
17#define BCH_AUTO_GC_DIRTY_THRESHOLD 50
18
19/*
20 * 14 (16384ths) is chosen here as something that each backing device
21 * should be a reasonable fraction of the share, and not to blow up
22 * until individual backing devices are a petabyte.
23 */
24#define WRITEBACK_SHARE_SHIFT 14
25
26static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
27{
28 uint64_t i, ret = 0;
29
30 for (i = 0; i < d->nr_stripes; i++)
31 ret += atomic_read(d->stripe_sectors_dirty + i);
32
33 return ret;
34}
35
36static inline unsigned int offset_to_stripe(struct bcache_device *d,
37 uint64_t offset)
38{
39 do_div(offset, d->stripe_size);
40 return offset;
41}
42
43static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
44 uint64_t offset,
45 unsigned int nr_sectors)
46{
47 unsigned int stripe = offset_to_stripe(&dc->disk, offset);
48
49 while (1) {
50 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
51 return true;
52
53 if (nr_sectors <= dc->disk.stripe_size)
54 return false;
55
56 nr_sectors -= dc->disk.stripe_size;
57 stripe++;
58 }
59}
60
61extern unsigned int bch_cutoff_writeback;
62extern unsigned int bch_cutoff_writeback_sync;
63
64static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
65 unsigned int cache_mode, bool would_skip)
66{
67 unsigned int in_use = dc->disk.c->gc_stats.in_use;
68
69 if (cache_mode != CACHE_MODE_WRITEBACK ||
70 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
71 in_use > bch_cutoff_writeback_sync)
72 return false;
73
74 if (bio_op(bio) == REQ_OP_DISCARD)
75 return false;
76
77 if (dc->partial_stripes_expensive &&
78 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
79 bio_sectors(bio)))
80 return true;
81
82 if (would_skip)
83 return false;
84
85 return (op_is_sync(bio->bi_opf) ||
86 bio->bi_opf & (REQ_META|REQ_PRIO) ||
87 in_use <= bch_cutoff_writeback);
88}
89
90static inline void bch_writeback_queue(struct cached_dev *dc)
91{
92 if (!IS_ERR_OR_NULL(dc->writeback_thread))
93 wake_up_process(dc->writeback_thread);
94}
95
96static inline void bch_writeback_add(struct cached_dev *dc)
97{
98 if (!atomic_read(&dc->has_dirty) &&
99 !atomic_xchg(&dc->has_dirty, 1)) {
100 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
101 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
102 /* XXX: should do this synchronously */
103 bch_write_bdev_super(dc, NULL);
104 }
105
106 bch_writeback_queue(dc);
107 }
108}
109
110void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
111 uint64_t offset, int nr_sectors);
112
113void bch_sectors_dirty_init(struct bcache_device *d);
114void bch_cached_dev_writeback_init(struct cached_dev *dc);
115int bch_cached_dev_writeback_start(struct cached_dev *dc);
116
117#endif
1#ifndef _BCACHE_WRITEBACK_H
2#define _BCACHE_WRITEBACK_H
3
4#define CUTOFF_WRITEBACK 40
5#define CUTOFF_WRITEBACK_SYNC 70
6
7static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
8{
9 uint64_t i, ret = 0;
10
11 for (i = 0; i < d->nr_stripes; i++)
12 ret += atomic_read(d->stripe_sectors_dirty + i);
13
14 return ret;
15}
16
17static inline unsigned offset_to_stripe(struct bcache_device *d,
18 uint64_t offset)
19{
20 do_div(offset, d->stripe_size);
21 return offset;
22}
23
24static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
25 uint64_t offset,
26 unsigned nr_sectors)
27{
28 unsigned stripe = offset_to_stripe(&dc->disk, offset);
29
30 while (1) {
31 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
32 return true;
33
34 if (nr_sectors <= dc->disk.stripe_size)
35 return false;
36
37 nr_sectors -= dc->disk.stripe_size;
38 stripe++;
39 }
40}
41
42static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
43 unsigned cache_mode, bool would_skip)
44{
45 unsigned in_use = dc->disk.c->gc_stats.in_use;
46
47 if (cache_mode != CACHE_MODE_WRITEBACK ||
48 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
49 in_use > CUTOFF_WRITEBACK_SYNC)
50 return false;
51
52 if (dc->partial_stripes_expensive &&
53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
54 bio_sectors(bio)))
55 return true;
56
57 if (would_skip)
58 return false;
59
60 return bio->bi_rw & REQ_SYNC ||
61 in_use <= CUTOFF_WRITEBACK;
62}
63
64static inline void bch_writeback_queue(struct cached_dev *dc)
65{
66 if (!IS_ERR_OR_NULL(dc->writeback_thread))
67 wake_up_process(dc->writeback_thread);
68}
69
70static inline void bch_writeback_add(struct cached_dev *dc)
71{
72 if (!atomic_read(&dc->has_dirty) &&
73 !atomic_xchg(&dc->has_dirty, 1)) {
74 atomic_inc(&dc->count);
75
76 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
77 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
78 /* XXX: should do this synchronously */
79 bch_write_bdev_super(dc, NULL);
80 }
81
82 bch_writeback_queue(dc);
83 }
84}
85
86void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
87
88void bch_sectors_dirty_init(struct cached_dev *dc);
89void bch_cached_dev_writeback_init(struct cached_dev *);
90int bch_cached_dev_writeback_start(struct cached_dev *);
91
92#endif