Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Moving/copying garbage collector
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"
9#include "btree.h"
10#include "debug.h"
11#include "request.h"
12
13#include <trace/events/bcache.h>
14
15struct moving_io {
16 struct closure cl;
17 struct keybuf_key *w;
18 struct data_insert_op op;
19 struct bbio bio;
20};
21
22static bool moving_pred(struct keybuf *buf, struct bkey *k)
23{
24 struct cache_set *c = container_of(buf, struct cache_set,
25 moving_gc_keys);
26 unsigned int i;
27
28 for (i = 0; i < KEY_PTRS(k); i++)
29 if (ptr_available(c, k, i) &&
30 GC_MOVE(PTR_BUCKET(c, k, i)))
31 return true;
32
33 return false;
34}
35
36/* Moving GC - IO loop */
37
38static CLOSURE_CALLBACK(moving_io_destructor)
39{
40 closure_type(io, struct moving_io, cl);
41
42 kfree(io);
43}
44
45static CLOSURE_CALLBACK(write_moving_finish)
46{
47 closure_type(io, struct moving_io, cl);
48 struct bio *bio = &io->bio.bio;
49
50 bio_free_pages(bio);
51
52 if (io->op.replace_collision)
53 trace_bcache_gc_copy_collision(&io->w->key);
54
55 bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
56
57 up(&io->op.c->moving_in_flight);
58
59 closure_return_with_destructor(cl, moving_io_destructor);
60}
61
62static void read_moving_endio(struct bio *bio)
63{
64 struct bbio *b = container_of(bio, struct bbio, bio);
65 struct moving_io *io = container_of(bio->bi_private,
66 struct moving_io, cl);
67
68 if (bio->bi_status)
69 io->op.status = bio->bi_status;
70 else if (!KEY_DIRTY(&b->key) &&
71 ptr_stale(io->op.c, &b->key, 0)) {
72 io->op.status = BLK_STS_IOERR;
73 }
74
75 bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
76}
77
78static void moving_init(struct moving_io *io)
79{
80 struct bio *bio = &io->bio.bio;
81
82 bio_init(bio, NULL, bio->bi_inline_vecs,
83 DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
84 bio_get(bio);
85 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
86
87 bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
88 bio->bi_private = &io->cl;
89 bch_bio_map(bio, NULL);
90}
91
92static CLOSURE_CALLBACK(write_moving)
93{
94 closure_type(io, struct moving_io, cl);
95 struct data_insert_op *op = &io->op;
96
97 if (!op->status) {
98 moving_init(io);
99
100 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
101 op->write_prio = 1;
102 op->bio = &io->bio.bio;
103
104 op->writeback = KEY_DIRTY(&io->w->key);
105 op->csum = KEY_CSUM(&io->w->key);
106
107 bkey_copy(&op->replace_key, &io->w->key);
108 op->replace = true;
109
110 closure_call(&op->cl, bch_data_insert, NULL, cl);
111 }
112
113 continue_at(cl, write_moving_finish, op->wq);
114}
115
116static CLOSURE_CALLBACK(read_moving_submit)
117{
118 closure_type(io, struct moving_io, cl);
119 struct bio *bio = &io->bio.bio;
120
121 bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
122
123 continue_at(cl, write_moving, io->op.wq);
124}
125
126static void read_moving(struct cache_set *c)
127{
128 struct keybuf_key *w;
129 struct moving_io *io;
130 struct bio *bio;
131 struct closure cl;
132
133 closure_init_stack(&cl);
134
135 /* XXX: if we error, background writeback could stall indefinitely */
136
137 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
138 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
139 &MAX_KEY, moving_pred);
140 if (!w)
141 break;
142
143 if (ptr_stale(c, &w->key, 0)) {
144 bch_keybuf_del(&c->moving_gc_keys, w);
145 continue;
146 }
147
148 io = kzalloc(struct_size(io, bio.bio.bi_inline_vecs,
149 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
150 GFP_KERNEL);
151 if (!io)
152 goto err;
153
154 w->private = io;
155 io->w = w;
156 io->op.inode = KEY_INODE(&w->key);
157 io->op.c = c;
158 io->op.wq = c->moving_gc_wq;
159
160 moving_init(io);
161 bio = &io->bio.bio;
162
163 bio->bi_opf = REQ_OP_READ;
164 bio->bi_end_io = read_moving_endio;
165
166 if (bch_bio_alloc_pages(bio, GFP_KERNEL))
167 goto err;
168
169 trace_bcache_gc_copy(&w->key);
170
171 down(&c->moving_in_flight);
172 closure_call(&io->cl, read_moving_submit, NULL, &cl);
173 }
174
175 if (0) {
176err: if (!IS_ERR_OR_NULL(w->private))
177 kfree(w->private);
178
179 bch_keybuf_del(&c->moving_gc_keys, w);
180 }
181
182 closure_sync(&cl);
183}
184
185static bool bucket_cmp(struct bucket *l, struct bucket *r)
186{
187 return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
188}
189
190static unsigned int bucket_heap_top(struct cache *ca)
191{
192 struct bucket *b;
193
194 return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
195}
196
197void bch_moving_gc(struct cache_set *c)
198{
199 struct cache *ca = c->cache;
200 struct bucket *b;
201 unsigned long sectors_to_move, reserve_sectors;
202
203 if (!c->copy_gc_enabled)
204 return;
205
206 mutex_lock(&c->bucket_lock);
207
208 sectors_to_move = 0;
209 reserve_sectors = ca->sb.bucket_size *
210 fifo_used(&ca->free[RESERVE_MOVINGGC]);
211
212 ca->heap.used = 0;
213
214 for_each_bucket(b, ca) {
215 if (GC_MARK(b) == GC_MARK_METADATA ||
216 !GC_SECTORS_USED(b) ||
217 GC_SECTORS_USED(b) == ca->sb.bucket_size ||
218 atomic_read(&b->pin))
219 continue;
220
221 if (!heap_full(&ca->heap)) {
222 sectors_to_move += GC_SECTORS_USED(b);
223 heap_add(&ca->heap, b, bucket_cmp);
224 } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
225 sectors_to_move -= bucket_heap_top(ca);
226 sectors_to_move += GC_SECTORS_USED(b);
227
228 ca->heap.data[0] = b;
229 heap_sift(&ca->heap, 0, bucket_cmp);
230 }
231 }
232
233 while (sectors_to_move > reserve_sectors) {
234 heap_pop(&ca->heap, b, bucket_cmp);
235 sectors_to_move -= GC_SECTORS_USED(b);
236 }
237
238 while (heap_pop(&ca->heap, b, bucket_cmp))
239 SET_GC_MOVE(b, 1);
240
241 mutex_unlock(&c->bucket_lock);
242
243 c->moving_gc_keys.last_scanned = ZERO_KEY;
244
245 read_moving(c);
246}
247
248void bch_moving_init_cache_set(struct cache_set *c)
249{
250 bch_keybuf_init(&c->moving_gc_keys);
251 sema_init(&c->moving_in_flight, 64);
252}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Moving/copying garbage collector
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"
9#include "btree.h"
10#include "debug.h"
11#include "request.h"
12
13#include <trace/events/bcache.h>
14
15struct moving_io {
16 struct closure cl;
17 struct keybuf_key *w;
18 struct data_insert_op op;
19 struct bbio bio;
20};
21
22static bool moving_pred(struct keybuf *buf, struct bkey *k)
23{
24 struct cache_set *c = container_of(buf, struct cache_set,
25 moving_gc_keys);
26 unsigned i;
27
28 for (i = 0; i < KEY_PTRS(k); i++)
29 if (ptr_available(c, k, i) &&
30 GC_MOVE(PTR_BUCKET(c, k, i)))
31 return true;
32
33 return false;
34}
35
36/* Moving GC - IO loop */
37
38static void moving_io_destructor(struct closure *cl)
39{
40 struct moving_io *io = container_of(cl, struct moving_io, cl);
41 kfree(io);
42}
43
44static void write_moving_finish(struct closure *cl)
45{
46 struct moving_io *io = container_of(cl, struct moving_io, cl);
47 struct bio *bio = &io->bio.bio;
48
49 bio_free_pages(bio);
50
51 if (io->op.replace_collision)
52 trace_bcache_gc_copy_collision(&io->w->key);
53
54 bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
55
56 up(&io->op.c->moving_in_flight);
57
58 closure_return_with_destructor(cl, moving_io_destructor);
59}
60
61static void read_moving_endio(struct bio *bio)
62{
63 struct bbio *b = container_of(bio, struct bbio, bio);
64 struct moving_io *io = container_of(bio->bi_private,
65 struct moving_io, cl);
66
67 if (bio->bi_status)
68 io->op.status = bio->bi_status;
69 else if (!KEY_DIRTY(&b->key) &&
70 ptr_stale(io->op.c, &b->key, 0)) {
71 io->op.status = BLK_STS_IOERR;
72 }
73
74 bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
75}
76
77static void moving_init(struct moving_io *io)
78{
79 struct bio *bio = &io->bio.bio;
80
81 bio_init(bio, bio->bi_inline_vecs,
82 DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
83 bio_get(bio);
84 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
85
86 bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
87 bio->bi_private = &io->cl;
88 bch_bio_map(bio, NULL);
89}
90
91static void write_moving(struct closure *cl)
92{
93 struct moving_io *io = container_of(cl, struct moving_io, cl);
94 struct data_insert_op *op = &io->op;
95
96 if (!op->status) {
97 moving_init(io);
98
99 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
100 op->write_prio = 1;
101 op->bio = &io->bio.bio;
102
103 op->writeback = KEY_DIRTY(&io->w->key);
104 op->csum = KEY_CSUM(&io->w->key);
105
106 bkey_copy(&op->replace_key, &io->w->key);
107 op->replace = true;
108
109 closure_call(&op->cl, bch_data_insert, NULL, cl);
110 }
111
112 continue_at(cl, write_moving_finish, op->wq);
113}
114
115static void read_moving_submit(struct closure *cl)
116{
117 struct moving_io *io = container_of(cl, struct moving_io, cl);
118 struct bio *bio = &io->bio.bio;
119
120 bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
121
122 continue_at(cl, write_moving, io->op.wq);
123}
124
125static void read_moving(struct cache_set *c)
126{
127 struct keybuf_key *w;
128 struct moving_io *io;
129 struct bio *bio;
130 struct closure cl;
131
132 closure_init_stack(&cl);
133
134 /* XXX: if we error, background writeback could stall indefinitely */
135
136 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
137 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
138 &MAX_KEY, moving_pred);
139 if (!w)
140 break;
141
142 if (ptr_stale(c, &w->key, 0)) {
143 bch_keybuf_del(&c->moving_gc_keys, w);
144 continue;
145 }
146
147 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
148 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
149 GFP_KERNEL);
150 if (!io)
151 goto err;
152
153 w->private = io;
154 io->w = w;
155 io->op.inode = KEY_INODE(&w->key);
156 io->op.c = c;
157 io->op.wq = c->moving_gc_wq;
158
159 moving_init(io);
160 bio = &io->bio.bio;
161
162 bio_set_op_attrs(bio, REQ_OP_READ, 0);
163 bio->bi_end_io = read_moving_endio;
164
165 if (bch_bio_alloc_pages(bio, GFP_KERNEL))
166 goto err;
167
168 trace_bcache_gc_copy(&w->key);
169
170 down(&c->moving_in_flight);
171 closure_call(&io->cl, read_moving_submit, NULL, &cl);
172 }
173
174 if (0) {
175err: if (!IS_ERR_OR_NULL(w->private))
176 kfree(w->private);
177
178 bch_keybuf_del(&c->moving_gc_keys, w);
179 }
180
181 closure_sync(&cl);
182}
183
184static bool bucket_cmp(struct bucket *l, struct bucket *r)
185{
186 return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
187}
188
189static unsigned bucket_heap_top(struct cache *ca)
190{
191 struct bucket *b;
192 return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
193}
194
195void bch_moving_gc(struct cache_set *c)
196{
197 struct cache *ca;
198 struct bucket *b;
199 unsigned i;
200
201 if (!c->copy_gc_enabled)
202 return;
203
204 mutex_lock(&c->bucket_lock);
205
206 for_each_cache(ca, c, i) {
207 unsigned sectors_to_move = 0;
208 unsigned reserve_sectors = ca->sb.bucket_size *
209 fifo_used(&ca->free[RESERVE_MOVINGGC]);
210
211 ca->heap.used = 0;
212
213 for_each_bucket(b, ca) {
214 if (GC_MARK(b) == GC_MARK_METADATA ||
215 !GC_SECTORS_USED(b) ||
216 GC_SECTORS_USED(b) == ca->sb.bucket_size ||
217 atomic_read(&b->pin))
218 continue;
219
220 if (!heap_full(&ca->heap)) {
221 sectors_to_move += GC_SECTORS_USED(b);
222 heap_add(&ca->heap, b, bucket_cmp);
223 } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
224 sectors_to_move -= bucket_heap_top(ca);
225 sectors_to_move += GC_SECTORS_USED(b);
226
227 ca->heap.data[0] = b;
228 heap_sift(&ca->heap, 0, bucket_cmp);
229 }
230 }
231
232 while (sectors_to_move > reserve_sectors) {
233 heap_pop(&ca->heap, b, bucket_cmp);
234 sectors_to_move -= GC_SECTORS_USED(b);
235 }
236
237 while (heap_pop(&ca->heap, b, bucket_cmp))
238 SET_GC_MOVE(b, 1);
239 }
240
241 mutex_unlock(&c->bucket_lock);
242
243 c->moving_gc_keys.last_scanned = ZERO_KEY;
244
245 read_moving(c);
246}
247
248void bch_moving_init_cache_set(struct cache_set *c)
249{
250 bch_keybuf_init(&c->moving_gc_keys);
251 sema_init(&c->moving_in_flight, 64);
252}