Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 *
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
7 * of the device.
8 *
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
13 *
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
16 *
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
20 *
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22 */
23
24#include "bcache.h"
25#include "btree.h"
26#include "debug.h"
27#include "extents.h"
28
29#include <linux/slab.h>
30#include <linux/bitops.h>
31#include <linux/hash.h>
32#include <linux/kthread.h>
33#include <linux/prefetch.h>
34#include <linux/random.h>
35#include <linux/rcupdate.h>
36#include <linux/sched/clock.h>
37#include <linux/rculist.h>
38#include <linux/delay.h>
39#include <trace/events/bcache.h>
40
41/*
42 * Todo:
43 * register_bcache: Return errors out to userspace correctly
44 *
45 * Writeback: don't undirty key until after a cache flush
46 *
47 * Create an iterator for key pointers
48 *
49 * On btree write error, mark bucket such that it won't be freed from the cache
50 *
51 * Journalling:
52 * Check for bad keys in replay
53 * Propagate barriers
54 * Refcount journal entries in journal_replay
55 *
56 * Garbage collection:
57 * Finish incremental gc
58 * Gc should free old UUIDs, data for invalid UUIDs
59 *
60 * Provide a way to list backing device UUIDs we have data cached for, and
61 * probably how long it's been since we've seen them, and a way to invalidate
62 * dirty data for devices that will never be attached again
63 *
64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65 * that based on that and how much dirty data we have we can keep writeback
66 * from being starved
67 *
68 * Add a tracepoint or somesuch to watch for writeback starvation
69 *
70 * When btree depth > 1 and splitting an interior node, we have to make sure
71 * alloc_bucket() cannot fail. This should be true but is not completely
72 * obvious.
73 *
74 * Plugging?
75 *
76 * If data write is less than hard sector size of ssd, round up offset in open
77 * bucket to the next whole sector
78 *
79 * Superblock needs to be fleshed out for multiple cache devices
80 *
81 * Add a sysfs tunable for the number of writeback IOs in flight
82 *
83 * Add a sysfs tunable for the number of open data buckets
84 *
85 * IO tracking: Can we track when one process is doing io on behalf of another?
86 * IO tracking: Don't use just an average, weigh more recent stuff higher
87 *
88 * Test module load/unload
89 */
90
91#define MAX_NEED_GC 64
92#define MAX_SAVE_PRIO 72
93#define MAX_GC_TIMES 100
94#define MIN_GC_NODES 100
95#define GC_SLEEP_MS 100
96
97#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
98
99#define PTR_HASH(c, k) \
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101
102static struct workqueue_struct *btree_io_wq;
103
104#define insert_lock(s, b) ((b)->level <= (s)->lock)
105
106
107static inline struct bset *write_block(struct btree *b)
108{
109 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
110}
111
112static void bch_btree_init_next(struct btree *b)
113{
114 /* If not a leaf node, always sort */
115 if (b->level && b->keys.nsets)
116 bch_btree_sort(&b->keys, &b->c->sort);
117 else
118 bch_btree_sort_lazy(&b->keys, &b->c->sort);
119
120 if (b->written < btree_blocks(b))
121 bch_bset_init_next(&b->keys, write_block(b),
122 bset_magic(&b->c->cache->sb));
123
124}
125
126/* Btree key manipulation */
127
128void bkey_put(struct cache_set *c, struct bkey *k)
129{
130 unsigned int i;
131
132 for (i = 0; i < KEY_PTRS(k); i++)
133 if (ptr_available(c, k, i))
134 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
135}
136
137/* Btree IO */
138
139static uint64_t btree_csum_set(struct btree *b, struct bset *i)
140{
141 uint64_t crc = b->key.ptr[0];
142 void *data = (void *) i + 8, *end = bset_bkey_last(i);
143
144 crc = crc64_be(crc, data, end - data);
145 return crc ^ 0xffffffffffffffffULL;
146}
147
148void bch_btree_node_read_done(struct btree *b)
149{
150 const char *err = "bad btree header";
151 struct bset *i = btree_bset_first(b);
152 struct btree_iter *iter;
153
154 /*
155 * c->fill_iter can allocate an iterator with more memory space
156 * than static MAX_BSETS.
157 * See the comment arount cache_set->fill_iter.
158 */
159 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
160 iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
161 iter->used = 0;
162
163#ifdef CONFIG_BCACHE_DEBUG
164 iter->b = &b->keys;
165#endif
166
167 if (!i->seq)
168 goto err;
169
170 for (;
171 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
172 i = write_block(b)) {
173 err = "unsupported bset version";
174 if (i->version > BCACHE_BSET_VERSION)
175 goto err;
176
177 err = "bad btree header";
178 if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
179 btree_blocks(b))
180 goto err;
181
182 err = "bad magic";
183 if (i->magic != bset_magic(&b->c->cache->sb))
184 goto err;
185
186 err = "bad checksum";
187 switch (i->version) {
188 case 0:
189 if (i->csum != csum_set(i))
190 goto err;
191 break;
192 case BCACHE_BSET_VERSION:
193 if (i->csum != btree_csum_set(b, i))
194 goto err;
195 break;
196 }
197
198 err = "empty set";
199 if (i != b->keys.set[0].data && !i->keys)
200 goto err;
201
202 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
203
204 b->written += set_blocks(i, block_bytes(b->c->cache));
205 }
206
207 err = "corrupted btree";
208 for (i = write_block(b);
209 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
210 i = ((void *) i) + block_bytes(b->c->cache))
211 if (i->seq == b->keys.set[0].data->seq)
212 goto err;
213
214 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
215
216 i = b->keys.set[0].data;
217 err = "short btree key";
218 if (b->keys.set[0].size &&
219 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
220 goto err;
221
222 if (b->written < btree_blocks(b))
223 bch_bset_init_next(&b->keys, write_block(b),
224 bset_magic(&b->c->cache->sb));
225out:
226 mempool_free(iter, &b->c->fill_iter);
227 return;
228err:
229 set_btree_node_io_error(b);
230 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
231 err, PTR_BUCKET_NR(b->c, &b->key, 0),
232 bset_block_offset(b, i), i->keys);
233 goto out;
234}
235
236static void btree_node_read_endio(struct bio *bio)
237{
238 struct closure *cl = bio->bi_private;
239
240 closure_put(cl);
241}
242
243static void bch_btree_node_read(struct btree *b)
244{
245 uint64_t start_time = local_clock();
246 struct closure cl;
247 struct bio *bio;
248
249 trace_bcache_btree_read(b);
250
251 closure_init_stack(&cl);
252
253 bio = bch_bbio_alloc(b->c);
254 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
255 bio->bi_end_io = btree_node_read_endio;
256 bio->bi_private = &cl;
257 bio->bi_opf = REQ_OP_READ | REQ_META;
258
259 bch_bio_map(bio, b->keys.set[0].data);
260
261 bch_submit_bbio(bio, b->c, &b->key, 0);
262 closure_sync(&cl);
263
264 if (bio->bi_status)
265 set_btree_node_io_error(b);
266
267 bch_bbio_free(bio, b->c);
268
269 if (btree_node_io_error(b))
270 goto err;
271
272 bch_btree_node_read_done(b);
273 bch_time_stats_update(&b->c->btree_read_time, start_time);
274
275 return;
276err:
277 bch_cache_set_error(b->c, "io error reading bucket %zu",
278 PTR_BUCKET_NR(b->c, &b->key, 0));
279}
280
281static void btree_complete_write(struct btree *b, struct btree_write *w)
282{
283 if (w->prio_blocked &&
284 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
285 wake_up_allocators(b->c);
286
287 if (w->journal) {
288 atomic_dec_bug(w->journal);
289 __closure_wake_up(&b->c->journal.wait);
290 }
291
292 w->prio_blocked = 0;
293 w->journal = NULL;
294}
295
296static void btree_node_write_unlock(struct closure *cl)
297{
298 struct btree *b = container_of(cl, struct btree, io);
299
300 up(&b->io_mutex);
301}
302
303static void __btree_node_write_done(struct closure *cl)
304{
305 struct btree *b = container_of(cl, struct btree, io);
306 struct btree_write *w = btree_prev_write(b);
307
308 bch_bbio_free(b->bio, b->c);
309 b->bio = NULL;
310 btree_complete_write(b, w);
311
312 if (btree_node_dirty(b))
313 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
314
315 closure_return_with_destructor(cl, btree_node_write_unlock);
316}
317
318static void btree_node_write_done(struct closure *cl)
319{
320 struct btree *b = container_of(cl, struct btree, io);
321
322 bio_free_pages(b->bio);
323 __btree_node_write_done(cl);
324}
325
326static void btree_node_write_endio(struct bio *bio)
327{
328 struct closure *cl = bio->bi_private;
329 struct btree *b = container_of(cl, struct btree, io);
330
331 if (bio->bi_status)
332 set_btree_node_io_error(b);
333
334 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
335 closure_put(cl);
336}
337
338static void do_btree_node_write(struct btree *b)
339{
340 struct closure *cl = &b->io;
341 struct bset *i = btree_bset_last(b);
342 BKEY_PADDED(key) k;
343
344 i->version = BCACHE_BSET_VERSION;
345 i->csum = btree_csum_set(b, i);
346
347 BUG_ON(b->bio);
348 b->bio = bch_bbio_alloc(b->c);
349
350 b->bio->bi_end_io = btree_node_write_endio;
351 b->bio->bi_private = cl;
352 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
353 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
354 bch_bio_map(b->bio, i);
355
356 /*
357 * If we're appending to a leaf node, we don't technically need FUA -
358 * this write just needs to be persisted before the next journal write,
359 * which will be marked FLUSH|FUA.
360 *
361 * Similarly if we're writing a new btree root - the pointer is going to
362 * be in the next journal entry.
363 *
364 * But if we're writing a new btree node (that isn't a root) or
365 * appending to a non leaf btree node, we need either FUA or a flush
366 * when we write the parent with the new pointer. FUA is cheaper than a
367 * flush, and writes appending to leaf nodes aren't blocking anything so
368 * just make all btree node writes FUA to keep things sane.
369 */
370
371 bkey_copy(&k.key, &b->key);
372 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
373 bset_sector_offset(&b->keys, i));
374
375 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
376 struct bio_vec *bv;
377 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
378 struct bvec_iter_all iter_all;
379
380 bio_for_each_segment_all(bv, b->bio, iter_all) {
381 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
382 addr += PAGE_SIZE;
383 }
384
385 bch_submit_bbio(b->bio, b->c, &k.key, 0);
386
387 continue_at(cl, btree_node_write_done, NULL);
388 } else {
389 /*
390 * No problem for multipage bvec since the bio is
391 * just allocated
392 */
393 b->bio->bi_vcnt = 0;
394 bch_bio_map(b->bio, i);
395
396 bch_submit_bbio(b->bio, b->c, &k.key, 0);
397
398 closure_sync(cl);
399 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
400 }
401}
402
403void __bch_btree_node_write(struct btree *b, struct closure *parent)
404{
405 struct bset *i = btree_bset_last(b);
406
407 lockdep_assert_held(&b->write_lock);
408
409 trace_bcache_btree_write(b);
410
411 BUG_ON(current->bio_list);
412 BUG_ON(b->written >= btree_blocks(b));
413 BUG_ON(b->written && !i->keys);
414 BUG_ON(btree_bset_first(b)->seq != i->seq);
415 bch_check_keys(&b->keys, "writing");
416
417 cancel_delayed_work(&b->work);
418
419 /* If caller isn't waiting for write, parent refcount is cache set */
420 down(&b->io_mutex);
421 closure_init(&b->io, parent ?: &b->c->cl);
422
423 clear_bit(BTREE_NODE_dirty, &b->flags);
424 change_bit(BTREE_NODE_write_idx, &b->flags);
425
426 do_btree_node_write(b);
427
428 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
429 &b->c->cache->btree_sectors_written);
430
431 b->written += set_blocks(i, block_bytes(b->c->cache));
432}
433
434void bch_btree_node_write(struct btree *b, struct closure *parent)
435{
436 unsigned int nsets = b->keys.nsets;
437
438 lockdep_assert_held(&b->lock);
439
440 __bch_btree_node_write(b, parent);
441
442 /*
443 * do verify if there was more than one set initially (i.e. we did a
444 * sort) and we sorted down to a single set:
445 */
446 if (nsets && !b->keys.nsets)
447 bch_btree_verify(b);
448
449 bch_btree_init_next(b);
450}
451
452static void bch_btree_node_write_sync(struct btree *b)
453{
454 struct closure cl;
455
456 closure_init_stack(&cl);
457
458 mutex_lock(&b->write_lock);
459 bch_btree_node_write(b, &cl);
460 mutex_unlock(&b->write_lock);
461
462 closure_sync(&cl);
463}
464
465static void btree_node_write_work(struct work_struct *w)
466{
467 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
468
469 mutex_lock(&b->write_lock);
470 if (btree_node_dirty(b))
471 __bch_btree_node_write(b, NULL);
472 mutex_unlock(&b->write_lock);
473}
474
475static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
476{
477 struct bset *i = btree_bset_last(b);
478 struct btree_write *w = btree_current_write(b);
479
480 lockdep_assert_held(&b->write_lock);
481
482 BUG_ON(!b->written);
483 BUG_ON(!i->keys);
484
485 if (!btree_node_dirty(b))
486 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
487
488 set_btree_node_dirty(b);
489
490 /*
491 * w->journal is always the oldest journal pin of all bkeys
492 * in the leaf node, to make sure the oldest jset seq won't
493 * be increased before this btree node is flushed.
494 */
495 if (journal_ref) {
496 if (w->journal &&
497 journal_pin_cmp(b->c, w->journal, journal_ref)) {
498 atomic_dec_bug(w->journal);
499 w->journal = NULL;
500 }
501
502 if (!w->journal) {
503 w->journal = journal_ref;
504 atomic_inc(w->journal);
505 }
506 }
507
508 /* Force write if set is too big */
509 if (set_bytes(i) > PAGE_SIZE - 48 &&
510 !current->bio_list)
511 bch_btree_node_write(b, NULL);
512}
513
514/*
515 * Btree in memory cache - allocation/freeing
516 * mca -> memory cache
517 */
518
519#define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \
520 ? c->root->level : 1) * 8 + 16)
521#define mca_can_free(c) \
522 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
523
524static void mca_data_free(struct btree *b)
525{
526 BUG_ON(b->io_mutex.count != 1);
527
528 bch_btree_keys_free(&b->keys);
529
530 b->c->btree_cache_used--;
531 list_move(&b->list, &b->c->btree_cache_freed);
532}
533
534static void mca_bucket_free(struct btree *b)
535{
536 BUG_ON(btree_node_dirty(b));
537
538 b->key.ptr[0] = 0;
539 hlist_del_init_rcu(&b->hash);
540 list_move(&b->list, &b->c->btree_cache_freeable);
541}
542
543static unsigned int btree_order(struct bkey *k)
544{
545 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
546}
547
548static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
549{
550 if (!bch_btree_keys_alloc(&b->keys,
551 max_t(unsigned int,
552 ilog2(b->c->btree_pages),
553 btree_order(k)),
554 gfp)) {
555 b->c->btree_cache_used++;
556 list_move(&b->list, &b->c->btree_cache);
557 } else {
558 list_move(&b->list, &b->c->btree_cache_freed);
559 }
560}
561
562static struct btree *mca_bucket_alloc(struct cache_set *c,
563 struct bkey *k, gfp_t gfp)
564{
565 /*
566 * kzalloc() is necessary here for initialization,
567 * see code comments in bch_btree_keys_init().
568 */
569 struct btree *b = kzalloc(sizeof(struct btree), gfp);
570
571 if (!b)
572 return NULL;
573
574 init_rwsem(&b->lock);
575 lockdep_set_novalidate_class(&b->lock);
576 mutex_init(&b->write_lock);
577 lockdep_set_novalidate_class(&b->write_lock);
578 INIT_LIST_HEAD(&b->list);
579 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
580 b->c = c;
581 sema_init(&b->io_mutex, 1);
582
583 mca_data_alloc(b, k, gfp);
584 return b;
585}
586
587static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
588{
589 struct closure cl;
590
591 closure_init_stack(&cl);
592 lockdep_assert_held(&b->c->bucket_lock);
593
594 if (!down_write_trylock(&b->lock))
595 return -ENOMEM;
596
597 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
598
599 if (b->keys.page_order < min_order)
600 goto out_unlock;
601
602 if (!flush) {
603 if (btree_node_dirty(b))
604 goto out_unlock;
605
606 if (down_trylock(&b->io_mutex))
607 goto out_unlock;
608 up(&b->io_mutex);
609 }
610
611retry:
612 /*
613 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
614 * __bch_btree_node_write(). To avoid an extra flush, acquire
615 * b->write_lock before checking BTREE_NODE_dirty bit.
616 */
617 mutex_lock(&b->write_lock);
618 /*
619 * If this btree node is selected in btree_flush_write() by journal
620 * code, delay and retry until the node is flushed by journal code
621 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
622 */
623 if (btree_node_journal_flush(b)) {
624 pr_debug("bnode %p is flushing by journal, retry\n", b);
625 mutex_unlock(&b->write_lock);
626 udelay(1);
627 goto retry;
628 }
629
630 if (btree_node_dirty(b))
631 __bch_btree_node_write(b, &cl);
632 mutex_unlock(&b->write_lock);
633
634 closure_sync(&cl);
635
636 /* wait for any in flight btree write */
637 down(&b->io_mutex);
638 up(&b->io_mutex);
639
640 return 0;
641out_unlock:
642 rw_unlock(true, b);
643 return -ENOMEM;
644}
645
646static unsigned long bch_mca_scan(struct shrinker *shrink,
647 struct shrink_control *sc)
648{
649 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
650 struct btree *b, *t;
651 unsigned long i, nr = sc->nr_to_scan;
652 unsigned long freed = 0;
653 unsigned int btree_cache_used;
654
655 if (c->shrinker_disabled)
656 return SHRINK_STOP;
657
658 if (c->btree_cache_alloc_lock)
659 return SHRINK_STOP;
660
661 /* Return -1 if we can't do anything right now */
662 if (sc->gfp_mask & __GFP_IO)
663 mutex_lock(&c->bucket_lock);
664 else if (!mutex_trylock(&c->bucket_lock))
665 return -1;
666
667 /*
668 * It's _really_ critical that we don't free too many btree nodes - we
669 * have to always leave ourselves a reserve. The reserve is how we
670 * guarantee that allocating memory for a new btree node can always
671 * succeed, so that inserting keys into the btree can always succeed and
672 * IO can always make forward progress:
673 */
674 nr /= c->btree_pages;
675 if (nr == 0)
676 nr = 1;
677 nr = min_t(unsigned long, nr, mca_can_free(c));
678
679 i = 0;
680 btree_cache_used = c->btree_cache_used;
681 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
682 if (nr <= 0)
683 goto out;
684
685 if (!mca_reap(b, 0, false)) {
686 mca_data_free(b);
687 rw_unlock(true, b);
688 freed++;
689 }
690 nr--;
691 i++;
692 }
693
694 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
695 if (nr <= 0 || i >= btree_cache_used)
696 goto out;
697
698 if (!mca_reap(b, 0, false)) {
699 mca_bucket_free(b);
700 mca_data_free(b);
701 rw_unlock(true, b);
702 freed++;
703 }
704
705 nr--;
706 i++;
707 }
708out:
709 mutex_unlock(&c->bucket_lock);
710 return freed * c->btree_pages;
711}
712
713static unsigned long bch_mca_count(struct shrinker *shrink,
714 struct shrink_control *sc)
715{
716 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
717
718 if (c->shrinker_disabled)
719 return 0;
720
721 if (c->btree_cache_alloc_lock)
722 return 0;
723
724 return mca_can_free(c) * c->btree_pages;
725}
726
727void bch_btree_cache_free(struct cache_set *c)
728{
729 struct btree *b;
730 struct closure cl;
731
732 closure_init_stack(&cl);
733
734 if (c->shrink.list.next)
735 unregister_shrinker(&c->shrink);
736
737 mutex_lock(&c->bucket_lock);
738
739#ifdef CONFIG_BCACHE_DEBUG
740 if (c->verify_data)
741 list_move(&c->verify_data->list, &c->btree_cache);
742
743 free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
744#endif
745
746 list_splice(&c->btree_cache_freeable,
747 &c->btree_cache);
748
749 while (!list_empty(&c->btree_cache)) {
750 b = list_first_entry(&c->btree_cache, struct btree, list);
751
752 /*
753 * This function is called by cache_set_free(), no I/O
754 * request on cache now, it is unnecessary to acquire
755 * b->write_lock before clearing BTREE_NODE_dirty anymore.
756 */
757 if (btree_node_dirty(b)) {
758 btree_complete_write(b, btree_current_write(b));
759 clear_bit(BTREE_NODE_dirty, &b->flags);
760 }
761 mca_data_free(b);
762 }
763
764 while (!list_empty(&c->btree_cache_freed)) {
765 b = list_first_entry(&c->btree_cache_freed,
766 struct btree, list);
767 list_del(&b->list);
768 cancel_delayed_work_sync(&b->work);
769 kfree(b);
770 }
771
772 mutex_unlock(&c->bucket_lock);
773}
774
775int bch_btree_cache_alloc(struct cache_set *c)
776{
777 unsigned int i;
778
779 for (i = 0; i < mca_reserve(c); i++)
780 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
781 return -ENOMEM;
782
783 list_splice_init(&c->btree_cache,
784 &c->btree_cache_freeable);
785
786#ifdef CONFIG_BCACHE_DEBUG
787 mutex_init(&c->verify_lock);
788
789 c->verify_ondisk = (void *)
790 __get_free_pages(GFP_KERNEL|__GFP_COMP,
791 ilog2(meta_bucket_pages(&c->cache->sb)));
792 if (!c->verify_ondisk) {
793 /*
794 * Don't worry about the mca_rereserve buckets
795 * allocated in previous for-loop, they will be
796 * handled properly in bch_cache_set_unregister().
797 */
798 return -ENOMEM;
799 }
800
801 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
802
803 if (c->verify_data &&
804 c->verify_data->keys.set->data)
805 list_del_init(&c->verify_data->list);
806 else
807 c->verify_data = NULL;
808#endif
809
810 c->shrink.count_objects = bch_mca_count;
811 c->shrink.scan_objects = bch_mca_scan;
812 c->shrink.seeks = 4;
813 c->shrink.batch = c->btree_pages * 2;
814
815 if (register_shrinker(&c->shrink, "md-bcache:%pU", c->set_uuid))
816 pr_warn("bcache: %s: could not register shrinker\n",
817 __func__);
818
819 return 0;
820}
821
822/* Btree in memory cache - hash table */
823
824static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
825{
826 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
827}
828
829static struct btree *mca_find(struct cache_set *c, struct bkey *k)
830{
831 struct btree *b;
832
833 rcu_read_lock();
834 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
835 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
836 goto out;
837 b = NULL;
838out:
839 rcu_read_unlock();
840 return b;
841}
842
843static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
844{
845 spin_lock(&c->btree_cannibalize_lock);
846 if (likely(c->btree_cache_alloc_lock == NULL)) {
847 c->btree_cache_alloc_lock = current;
848 } else if (c->btree_cache_alloc_lock != current) {
849 if (op)
850 prepare_to_wait(&c->btree_cache_wait, &op->wait,
851 TASK_UNINTERRUPTIBLE);
852 spin_unlock(&c->btree_cannibalize_lock);
853 return -EINTR;
854 }
855 spin_unlock(&c->btree_cannibalize_lock);
856
857 return 0;
858}
859
860static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
861 struct bkey *k)
862{
863 struct btree *b;
864
865 trace_bcache_btree_cache_cannibalize(c);
866
867 if (mca_cannibalize_lock(c, op))
868 return ERR_PTR(-EINTR);
869
870 list_for_each_entry_reverse(b, &c->btree_cache, list)
871 if (!mca_reap(b, btree_order(k), false))
872 return b;
873
874 list_for_each_entry_reverse(b, &c->btree_cache, list)
875 if (!mca_reap(b, btree_order(k), true))
876 return b;
877
878 WARN(1, "btree cache cannibalize failed\n");
879 return ERR_PTR(-ENOMEM);
880}
881
882/*
883 * We can only have one thread cannibalizing other cached btree nodes at a time,
884 * or we'll deadlock. We use an open coded mutex to ensure that, which a
885 * cannibalize_bucket() will take. This means every time we unlock the root of
886 * the btree, we need to release this lock if we have it held.
887 */
888static void bch_cannibalize_unlock(struct cache_set *c)
889{
890 spin_lock(&c->btree_cannibalize_lock);
891 if (c->btree_cache_alloc_lock == current) {
892 c->btree_cache_alloc_lock = NULL;
893 wake_up(&c->btree_cache_wait);
894 }
895 spin_unlock(&c->btree_cannibalize_lock);
896}
897
898static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
899 struct bkey *k, int level)
900{
901 struct btree *b;
902
903 BUG_ON(current->bio_list);
904
905 lockdep_assert_held(&c->bucket_lock);
906
907 if (mca_find(c, k))
908 return NULL;
909
910 /* btree_free() doesn't free memory; it sticks the node on the end of
911 * the list. Check if there's any freed nodes there:
912 */
913 list_for_each_entry(b, &c->btree_cache_freeable, list)
914 if (!mca_reap(b, btree_order(k), false))
915 goto out;
916
917 /* We never free struct btree itself, just the memory that holds the on
918 * disk node. Check the freed list before allocating a new one:
919 */
920 list_for_each_entry(b, &c->btree_cache_freed, list)
921 if (!mca_reap(b, 0, false)) {
922 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
923 if (!b->keys.set[0].data)
924 goto err;
925 else
926 goto out;
927 }
928
929 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
930 if (!b)
931 goto err;
932
933 BUG_ON(!down_write_trylock(&b->lock));
934 if (!b->keys.set->data)
935 goto err;
936out:
937 BUG_ON(b->io_mutex.count != 1);
938
939 bkey_copy(&b->key, k);
940 list_move(&b->list, &c->btree_cache);
941 hlist_del_init_rcu(&b->hash);
942 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
943
944 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
945 b->parent = (void *) ~0UL;
946 b->flags = 0;
947 b->written = 0;
948 b->level = level;
949
950 if (!b->level)
951 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
952 &b->c->expensive_debug_checks);
953 else
954 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
955 &b->c->expensive_debug_checks);
956
957 return b;
958err:
959 if (b)
960 rw_unlock(true, b);
961
962 b = mca_cannibalize(c, op, k);
963 if (!IS_ERR(b))
964 goto out;
965
966 return b;
967}
968
969/*
970 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
971 * in from disk if necessary.
972 *
973 * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
974 *
975 * The btree node will have either a read or a write lock held, depending on
976 * level and op->lock.
977 */
978struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
979 struct bkey *k, int level, bool write,
980 struct btree *parent)
981{
982 int i = 0;
983 struct btree *b;
984
985 BUG_ON(level < 0);
986retry:
987 b = mca_find(c, k);
988
989 if (!b) {
990 if (current->bio_list)
991 return ERR_PTR(-EAGAIN);
992
993 mutex_lock(&c->bucket_lock);
994 b = mca_alloc(c, op, k, level);
995 mutex_unlock(&c->bucket_lock);
996
997 if (!b)
998 goto retry;
999 if (IS_ERR(b))
1000 return b;
1001
1002 bch_btree_node_read(b);
1003
1004 if (!write)
1005 downgrade_write(&b->lock);
1006 } else {
1007 rw_lock(write, b, level);
1008 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1009 rw_unlock(write, b);
1010 goto retry;
1011 }
1012 BUG_ON(b->level != level);
1013 }
1014
1015 if (btree_node_io_error(b)) {
1016 rw_unlock(write, b);
1017 return ERR_PTR(-EIO);
1018 }
1019
1020 BUG_ON(!b->written);
1021
1022 b->parent = parent;
1023
1024 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1025 prefetch(b->keys.set[i].tree);
1026 prefetch(b->keys.set[i].data);
1027 }
1028
1029 for (; i <= b->keys.nsets; i++)
1030 prefetch(b->keys.set[i].data);
1031
1032 return b;
1033}
1034
1035static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1036{
1037 struct btree *b;
1038
1039 mutex_lock(&parent->c->bucket_lock);
1040 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1041 mutex_unlock(&parent->c->bucket_lock);
1042
1043 if (!IS_ERR_OR_NULL(b)) {
1044 b->parent = parent;
1045 bch_btree_node_read(b);
1046 rw_unlock(true, b);
1047 }
1048}
1049
1050/* Btree alloc */
1051
1052static void btree_node_free(struct btree *b)
1053{
1054 trace_bcache_btree_node_free(b);
1055
1056 BUG_ON(b == b->c->root);
1057
1058retry:
1059 mutex_lock(&b->write_lock);
1060 /*
1061 * If the btree node is selected and flushing in btree_flush_write(),
1062 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1063 * then it is safe to free the btree node here. Otherwise this btree
1064 * node will be in race condition.
1065 */
1066 if (btree_node_journal_flush(b)) {
1067 mutex_unlock(&b->write_lock);
1068 pr_debug("bnode %p journal_flush set, retry\n", b);
1069 udelay(1);
1070 goto retry;
1071 }
1072
1073 if (btree_node_dirty(b)) {
1074 btree_complete_write(b, btree_current_write(b));
1075 clear_bit(BTREE_NODE_dirty, &b->flags);
1076 }
1077
1078 mutex_unlock(&b->write_lock);
1079
1080 cancel_delayed_work(&b->work);
1081
1082 mutex_lock(&b->c->bucket_lock);
1083 bch_bucket_free(b->c, &b->key);
1084 mca_bucket_free(b);
1085 mutex_unlock(&b->c->bucket_lock);
1086}
1087
1088struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1089 int level, bool wait,
1090 struct btree *parent)
1091{
1092 BKEY_PADDED(key) k;
1093 struct btree *b = ERR_PTR(-EAGAIN);
1094
1095 mutex_lock(&c->bucket_lock);
1096retry:
1097 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
1098 goto err;
1099
1100 bkey_put(c, &k.key);
1101 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1102
1103 b = mca_alloc(c, op, &k.key, level);
1104 if (IS_ERR(b))
1105 goto err_free;
1106
1107 if (!b) {
1108 cache_bug(c,
1109 "Tried to allocate bucket that was in btree cache");
1110 goto retry;
1111 }
1112
1113 b->parent = parent;
1114 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
1115
1116 mutex_unlock(&c->bucket_lock);
1117
1118 trace_bcache_btree_node_alloc(b);
1119 return b;
1120err_free:
1121 bch_bucket_free(c, &k.key);
1122err:
1123 mutex_unlock(&c->bucket_lock);
1124
1125 trace_bcache_btree_node_alloc_fail(c);
1126 return b;
1127}
1128
1129static struct btree *bch_btree_node_alloc(struct cache_set *c,
1130 struct btree_op *op, int level,
1131 struct btree *parent)
1132{
1133 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1134}
1135
1136static struct btree *btree_node_alloc_replacement(struct btree *b,
1137 struct btree_op *op)
1138{
1139 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1140
1141 if (!IS_ERR_OR_NULL(n)) {
1142 mutex_lock(&n->write_lock);
1143 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1144 bkey_copy_key(&n->key, &b->key);
1145 mutex_unlock(&n->write_lock);
1146 }
1147
1148 return n;
1149}
1150
1151static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1152{
1153 unsigned int i;
1154
1155 mutex_lock(&b->c->bucket_lock);
1156
1157 atomic_inc(&b->c->prio_blocked);
1158
1159 bkey_copy(k, &b->key);
1160 bkey_copy_key(k, &ZERO_KEY);
1161
1162 for (i = 0; i < KEY_PTRS(k); i++)
1163 SET_PTR_GEN(k, i,
1164 bch_inc_gen(b->c->cache,
1165 PTR_BUCKET(b->c, &b->key, i)));
1166
1167 mutex_unlock(&b->c->bucket_lock);
1168}
1169
1170static int btree_check_reserve(struct btree *b, struct btree_op *op)
1171{
1172 struct cache_set *c = b->c;
1173 struct cache *ca = c->cache;
1174 unsigned int reserve = (c->root->level - b->level) * 2 + 1;
1175
1176 mutex_lock(&c->bucket_lock);
1177
1178 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1179 if (op)
1180 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1181 TASK_UNINTERRUPTIBLE);
1182 mutex_unlock(&c->bucket_lock);
1183 return -EINTR;
1184 }
1185
1186 mutex_unlock(&c->bucket_lock);
1187
1188 return mca_cannibalize_lock(b->c, op);
1189}
1190
1191/* Garbage collection */
1192
1193static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1194 struct bkey *k)
1195{
1196 uint8_t stale = 0;
1197 unsigned int i;
1198 struct bucket *g;
1199
1200 /*
1201 * ptr_invalid() can't return true for the keys that mark btree nodes as
1202 * freed, but since ptr_bad() returns true we'll never actually use them
1203 * for anything and thus we don't want mark their pointers here
1204 */
1205 if (!bkey_cmp(k, &ZERO_KEY))
1206 return stale;
1207
1208 for (i = 0; i < KEY_PTRS(k); i++) {
1209 if (!ptr_available(c, k, i))
1210 continue;
1211
1212 g = PTR_BUCKET(c, k, i);
1213
1214 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1215 g->last_gc = PTR_GEN(k, i);
1216
1217 if (ptr_stale(c, k, i)) {
1218 stale = max(stale, ptr_stale(c, k, i));
1219 continue;
1220 }
1221
1222 cache_bug_on(GC_MARK(g) &&
1223 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1224 c, "inconsistent ptrs: mark = %llu, level = %i",
1225 GC_MARK(g), level);
1226
1227 if (level)
1228 SET_GC_MARK(g, GC_MARK_METADATA);
1229 else if (KEY_DIRTY(k))
1230 SET_GC_MARK(g, GC_MARK_DIRTY);
1231 else if (!GC_MARK(g))
1232 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1233
1234 /* guard against overflow */
1235 SET_GC_SECTORS_USED(g, min_t(unsigned int,
1236 GC_SECTORS_USED(g) + KEY_SIZE(k),
1237 MAX_GC_SECTORS_USED));
1238
1239 BUG_ON(!GC_SECTORS_USED(g));
1240 }
1241
1242 return stale;
1243}
1244
1245#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1246
1247void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1248{
1249 unsigned int i;
1250
1251 for (i = 0; i < KEY_PTRS(k); i++)
1252 if (ptr_available(c, k, i) &&
1253 !ptr_stale(c, k, i)) {
1254 struct bucket *b = PTR_BUCKET(c, k, i);
1255
1256 b->gen = PTR_GEN(k, i);
1257
1258 if (level && bkey_cmp(k, &ZERO_KEY))
1259 b->prio = BTREE_PRIO;
1260 else if (!level && b->prio == BTREE_PRIO)
1261 b->prio = INITIAL_PRIO;
1262 }
1263
1264 __bch_btree_mark_key(c, level, k);
1265}
1266
1267void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1268{
1269 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1270}
1271
1272static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1273{
1274 uint8_t stale = 0;
1275 unsigned int keys = 0, good_keys = 0;
1276 struct bkey *k;
1277 struct btree_iter iter;
1278 struct bset_tree *t;
1279
1280 gc->nodes++;
1281
1282 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1283 stale = max(stale, btree_mark_key(b, k));
1284 keys++;
1285
1286 if (bch_ptr_bad(&b->keys, k))
1287 continue;
1288
1289 gc->key_bytes += bkey_u64s(k);
1290 gc->nkeys++;
1291 good_keys++;
1292
1293 gc->data += KEY_SIZE(k);
1294 }
1295
1296 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1297 btree_bug_on(t->size &&
1298 bset_written(&b->keys, t) &&
1299 bkey_cmp(&b->key, &t->end) < 0,
1300 b, "found short btree key in gc");
1301
1302 if (b->c->gc_always_rewrite)
1303 return true;
1304
1305 if (stale > 10)
1306 return true;
1307
1308 if ((keys - good_keys) * 2 > keys)
1309 return true;
1310
1311 return false;
1312}
1313
1314#define GC_MERGE_NODES 4U
1315
1316struct gc_merge_info {
1317 struct btree *b;
1318 unsigned int keys;
1319};
1320
1321static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1322 struct keylist *insert_keys,
1323 atomic_t *journal_ref,
1324 struct bkey *replace_key);
1325
1326static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1327 struct gc_stat *gc, struct gc_merge_info *r)
1328{
1329 unsigned int i, nodes = 0, keys = 0, blocks;
1330 struct btree *new_nodes[GC_MERGE_NODES];
1331 struct keylist keylist;
1332 struct closure cl;
1333 struct bkey *k;
1334
1335 bch_keylist_init(&keylist);
1336
1337 if (btree_check_reserve(b, NULL))
1338 return 0;
1339
1340 memset(new_nodes, 0, sizeof(new_nodes));
1341 closure_init_stack(&cl);
1342
1343 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1344 keys += r[nodes++].keys;
1345
1346 blocks = btree_default_blocks(b->c) * 2 / 3;
1347
1348 if (nodes < 2 ||
1349 __set_blocks(b->keys.set[0].data, keys,
1350 block_bytes(b->c->cache)) > blocks * (nodes - 1))
1351 return 0;
1352
1353 for (i = 0; i < nodes; i++) {
1354 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1355 if (IS_ERR_OR_NULL(new_nodes[i]))
1356 goto out_nocoalesce;
1357 }
1358
1359 /*
1360 * We have to check the reserve here, after we've allocated our new
1361 * nodes, to make sure the insert below will succeed - we also check
1362 * before as an optimization to potentially avoid a bunch of expensive
1363 * allocs/sorts
1364 */
1365 if (btree_check_reserve(b, NULL))
1366 goto out_nocoalesce;
1367
1368 for (i = 0; i < nodes; i++)
1369 mutex_lock(&new_nodes[i]->write_lock);
1370
1371 for (i = nodes - 1; i > 0; --i) {
1372 struct bset *n1 = btree_bset_first(new_nodes[i]);
1373 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1374 struct bkey *k, *last = NULL;
1375
1376 keys = 0;
1377
1378 if (i > 1) {
1379 for (k = n2->start;
1380 k < bset_bkey_last(n2);
1381 k = bkey_next(k)) {
1382 if (__set_blocks(n1, n1->keys + keys +
1383 bkey_u64s(k),
1384 block_bytes(b->c->cache)) > blocks)
1385 break;
1386
1387 last = k;
1388 keys += bkey_u64s(k);
1389 }
1390 } else {
1391 /*
1392 * Last node we're not getting rid of - we're getting
1393 * rid of the node at r[0]. Have to try and fit all of
1394 * the remaining keys into this node; we can't ensure
1395 * they will always fit due to rounding and variable
1396 * length keys (shouldn't be possible in practice,
1397 * though)
1398 */
1399 if (__set_blocks(n1, n1->keys + n2->keys,
1400 block_bytes(b->c->cache)) >
1401 btree_blocks(new_nodes[i]))
1402 goto out_unlock_nocoalesce;
1403
1404 keys = n2->keys;
1405 /* Take the key of the node we're getting rid of */
1406 last = &r->b->key;
1407 }
1408
1409 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
1410 btree_blocks(new_nodes[i]));
1411
1412 if (last)
1413 bkey_copy_key(&new_nodes[i]->key, last);
1414
1415 memcpy(bset_bkey_last(n1),
1416 n2->start,
1417 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1418
1419 n1->keys += keys;
1420 r[i].keys = n1->keys;
1421
1422 memmove(n2->start,
1423 bset_bkey_idx(n2, keys),
1424 (void *) bset_bkey_last(n2) -
1425 (void *) bset_bkey_idx(n2, keys));
1426
1427 n2->keys -= keys;
1428
1429 if (__bch_keylist_realloc(&keylist,
1430 bkey_u64s(&new_nodes[i]->key)))
1431 goto out_unlock_nocoalesce;
1432
1433 bch_btree_node_write(new_nodes[i], &cl);
1434 bch_keylist_add(&keylist, &new_nodes[i]->key);
1435 }
1436
1437 for (i = 0; i < nodes; i++)
1438 mutex_unlock(&new_nodes[i]->write_lock);
1439
1440 closure_sync(&cl);
1441
1442 /* We emptied out this node */
1443 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1444 btree_node_free(new_nodes[0]);
1445 rw_unlock(true, new_nodes[0]);
1446 new_nodes[0] = NULL;
1447
1448 for (i = 0; i < nodes; i++) {
1449 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1450 goto out_nocoalesce;
1451
1452 make_btree_freeing_key(r[i].b, keylist.top);
1453 bch_keylist_push(&keylist);
1454 }
1455
1456 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1457 BUG_ON(!bch_keylist_empty(&keylist));
1458
1459 for (i = 0; i < nodes; i++) {
1460 btree_node_free(r[i].b);
1461 rw_unlock(true, r[i].b);
1462
1463 r[i].b = new_nodes[i];
1464 }
1465
1466 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1467 r[nodes - 1].b = ERR_PTR(-EINTR);
1468
1469 trace_bcache_btree_gc_coalesce(nodes);
1470 gc->nodes--;
1471
1472 bch_keylist_free(&keylist);
1473
1474 /* Invalidated our iterator */
1475 return -EINTR;
1476
1477out_unlock_nocoalesce:
1478 for (i = 0; i < nodes; i++)
1479 mutex_unlock(&new_nodes[i]->write_lock);
1480
1481out_nocoalesce:
1482 closure_sync(&cl);
1483
1484 while ((k = bch_keylist_pop(&keylist)))
1485 if (!bkey_cmp(k, &ZERO_KEY))
1486 atomic_dec(&b->c->prio_blocked);
1487 bch_keylist_free(&keylist);
1488
1489 for (i = 0; i < nodes; i++)
1490 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1491 btree_node_free(new_nodes[i]);
1492 rw_unlock(true, new_nodes[i]);
1493 }
1494 return 0;
1495}
1496
1497static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1498 struct btree *replace)
1499{
1500 struct keylist keys;
1501 struct btree *n;
1502
1503 if (btree_check_reserve(b, NULL))
1504 return 0;
1505
1506 n = btree_node_alloc_replacement(replace, NULL);
1507
1508 /* recheck reserve after allocating replacement node */
1509 if (btree_check_reserve(b, NULL)) {
1510 btree_node_free(n);
1511 rw_unlock(true, n);
1512 return 0;
1513 }
1514
1515 bch_btree_node_write_sync(n);
1516
1517 bch_keylist_init(&keys);
1518 bch_keylist_add(&keys, &n->key);
1519
1520 make_btree_freeing_key(replace, keys.top);
1521 bch_keylist_push(&keys);
1522
1523 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1524 BUG_ON(!bch_keylist_empty(&keys));
1525
1526 btree_node_free(replace);
1527 rw_unlock(true, n);
1528
1529 /* Invalidated our iterator */
1530 return -EINTR;
1531}
1532
1533static unsigned int btree_gc_count_keys(struct btree *b)
1534{
1535 struct bkey *k;
1536 struct btree_iter iter;
1537 unsigned int ret = 0;
1538
1539 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1540 ret += bkey_u64s(k);
1541
1542 return ret;
1543}
1544
1545static size_t btree_gc_min_nodes(struct cache_set *c)
1546{
1547 size_t min_nodes;
1548
1549 /*
1550 * Since incremental GC would stop 100ms when front
1551 * side I/O comes, so when there are many btree nodes,
1552 * if GC only processes constant (100) nodes each time,
1553 * GC would last a long time, and the front side I/Os
1554 * would run out of the buckets (since no new bucket
1555 * can be allocated during GC), and be blocked again.
1556 * So GC should not process constant nodes, but varied
1557 * nodes according to the number of btree nodes, which
1558 * realized by dividing GC into constant(100) times,
1559 * so when there are many btree nodes, GC can process
1560 * more nodes each time, otherwise, GC will process less
1561 * nodes each time (but no less than MIN_GC_NODES)
1562 */
1563 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1564 if (min_nodes < MIN_GC_NODES)
1565 min_nodes = MIN_GC_NODES;
1566
1567 return min_nodes;
1568}
1569
1570
1571static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1572 struct closure *writes, struct gc_stat *gc)
1573{
1574 int ret = 0;
1575 bool should_rewrite;
1576 struct bkey *k;
1577 struct btree_iter iter;
1578 struct gc_merge_info r[GC_MERGE_NODES];
1579 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1580
1581 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1582
1583 for (i = r; i < r + ARRAY_SIZE(r); i++)
1584 i->b = ERR_PTR(-EINTR);
1585
1586 while (1) {
1587 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1588 if (k) {
1589 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1590 true, b);
1591 if (IS_ERR(r->b)) {
1592 ret = PTR_ERR(r->b);
1593 break;
1594 }
1595
1596 r->keys = btree_gc_count_keys(r->b);
1597
1598 ret = btree_gc_coalesce(b, op, gc, r);
1599 if (ret)
1600 break;
1601 }
1602
1603 if (!last->b)
1604 break;
1605
1606 if (!IS_ERR(last->b)) {
1607 should_rewrite = btree_gc_mark_node(last->b, gc);
1608 if (should_rewrite) {
1609 ret = btree_gc_rewrite_node(b, op, last->b);
1610 if (ret)
1611 break;
1612 }
1613
1614 if (last->b->level) {
1615 ret = btree_gc_recurse(last->b, op, writes, gc);
1616 if (ret)
1617 break;
1618 }
1619
1620 bkey_copy_key(&b->c->gc_done, &last->b->key);
1621
1622 /*
1623 * Must flush leaf nodes before gc ends, since replace
1624 * operations aren't journalled
1625 */
1626 mutex_lock(&last->b->write_lock);
1627 if (btree_node_dirty(last->b))
1628 bch_btree_node_write(last->b, writes);
1629 mutex_unlock(&last->b->write_lock);
1630 rw_unlock(true, last->b);
1631 }
1632
1633 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1634 r->b = NULL;
1635
1636 if (atomic_read(&b->c->search_inflight) &&
1637 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1638 gc->nodes_pre = gc->nodes;
1639 ret = -EAGAIN;
1640 break;
1641 }
1642
1643 if (need_resched()) {
1644 ret = -EAGAIN;
1645 break;
1646 }
1647 }
1648
1649 for (i = r; i < r + ARRAY_SIZE(r); i++)
1650 if (!IS_ERR_OR_NULL(i->b)) {
1651 mutex_lock(&i->b->write_lock);
1652 if (btree_node_dirty(i->b))
1653 bch_btree_node_write(i->b, writes);
1654 mutex_unlock(&i->b->write_lock);
1655 rw_unlock(true, i->b);
1656 }
1657
1658 return ret;
1659}
1660
1661static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1662 struct closure *writes, struct gc_stat *gc)
1663{
1664 struct btree *n = NULL;
1665 int ret = 0;
1666 bool should_rewrite;
1667
1668 should_rewrite = btree_gc_mark_node(b, gc);
1669 if (should_rewrite) {
1670 n = btree_node_alloc_replacement(b, NULL);
1671
1672 if (!IS_ERR_OR_NULL(n)) {
1673 bch_btree_node_write_sync(n);
1674
1675 bch_btree_set_root(n);
1676 btree_node_free(b);
1677 rw_unlock(true, n);
1678
1679 return -EINTR;
1680 }
1681 }
1682
1683 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1684
1685 if (b->level) {
1686 ret = btree_gc_recurse(b, op, writes, gc);
1687 if (ret)
1688 return ret;
1689 }
1690
1691 bkey_copy_key(&b->c->gc_done, &b->key);
1692
1693 return ret;
1694}
1695
1696static void btree_gc_start(struct cache_set *c)
1697{
1698 struct cache *ca;
1699 struct bucket *b;
1700
1701 if (!c->gc_mark_valid)
1702 return;
1703
1704 mutex_lock(&c->bucket_lock);
1705
1706 c->gc_mark_valid = 0;
1707 c->gc_done = ZERO_KEY;
1708
1709 ca = c->cache;
1710 for_each_bucket(b, ca) {
1711 b->last_gc = b->gen;
1712 if (!atomic_read(&b->pin)) {
1713 SET_GC_MARK(b, 0);
1714 SET_GC_SECTORS_USED(b, 0);
1715 }
1716 }
1717
1718 mutex_unlock(&c->bucket_lock);
1719}
1720
1721static void bch_btree_gc_finish(struct cache_set *c)
1722{
1723 struct bucket *b;
1724 struct cache *ca;
1725 unsigned int i, j;
1726 uint64_t *k;
1727
1728 mutex_lock(&c->bucket_lock);
1729
1730 set_gc_sectors(c);
1731 c->gc_mark_valid = 1;
1732 c->need_gc = 0;
1733
1734 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1735 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1736 GC_MARK_METADATA);
1737
1738 /* don't reclaim buckets to which writeback keys point */
1739 rcu_read_lock();
1740 for (i = 0; i < c->devices_max_used; i++) {
1741 struct bcache_device *d = c->devices[i];
1742 struct cached_dev *dc;
1743 struct keybuf_key *w, *n;
1744
1745 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1746 continue;
1747 dc = container_of(d, struct cached_dev, disk);
1748
1749 spin_lock(&dc->writeback_keys.lock);
1750 rbtree_postorder_for_each_entry_safe(w, n,
1751 &dc->writeback_keys.keys, node)
1752 for (j = 0; j < KEY_PTRS(&w->key); j++)
1753 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1754 GC_MARK_DIRTY);
1755 spin_unlock(&dc->writeback_keys.lock);
1756 }
1757 rcu_read_unlock();
1758
1759 c->avail_nbuckets = 0;
1760
1761 ca = c->cache;
1762 ca->invalidate_needs_gc = 0;
1763
1764 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
1765 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1766
1767 for (k = ca->prio_buckets;
1768 k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
1769 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1770
1771 for_each_bucket(b, ca) {
1772 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1773
1774 if (atomic_read(&b->pin))
1775 continue;
1776
1777 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1778
1779 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1780 c->avail_nbuckets++;
1781 }
1782
1783 mutex_unlock(&c->bucket_lock);
1784}
1785
1786static void bch_btree_gc(struct cache_set *c)
1787{
1788 int ret;
1789 struct gc_stat stats;
1790 struct closure writes;
1791 struct btree_op op;
1792 uint64_t start_time = local_clock();
1793
1794 trace_bcache_gc_start(c);
1795
1796 memset(&stats, 0, sizeof(struct gc_stat));
1797 closure_init_stack(&writes);
1798 bch_btree_op_init(&op, SHRT_MAX);
1799
1800 btree_gc_start(c);
1801
1802 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1803 do {
1804 ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
1805 closure_sync(&writes);
1806 cond_resched();
1807
1808 if (ret == -EAGAIN)
1809 schedule_timeout_interruptible(msecs_to_jiffies
1810 (GC_SLEEP_MS));
1811 else if (ret)
1812 pr_warn("gc failed!\n");
1813 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1814
1815 bch_btree_gc_finish(c);
1816 wake_up_allocators(c);
1817
1818 bch_time_stats_update(&c->btree_gc_time, start_time);
1819
1820 stats.key_bytes *= sizeof(uint64_t);
1821 stats.data <<= 9;
1822 bch_update_bucket_in_use(c, &stats);
1823 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1824
1825 trace_bcache_gc_end(c);
1826
1827 bch_moving_gc(c);
1828}
1829
1830static bool gc_should_run(struct cache_set *c)
1831{
1832 struct cache *ca = c->cache;
1833
1834 if (ca->invalidate_needs_gc)
1835 return true;
1836
1837 if (atomic_read(&c->sectors_to_gc) < 0)
1838 return true;
1839
1840 return false;
1841}
1842
1843static int bch_gc_thread(void *arg)
1844{
1845 struct cache_set *c = arg;
1846
1847 while (1) {
1848 wait_event_interruptible(c->gc_wait,
1849 kthread_should_stop() ||
1850 test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1851 gc_should_run(c));
1852
1853 if (kthread_should_stop() ||
1854 test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1855 break;
1856
1857 set_gc_sectors(c);
1858 bch_btree_gc(c);
1859 }
1860
1861 wait_for_kthread_stop();
1862 return 0;
1863}
1864
1865int bch_gc_thread_start(struct cache_set *c)
1866{
1867 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1868 return PTR_ERR_OR_ZERO(c->gc_thread);
1869}
1870
1871/* Initial partial gc */
1872
1873static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1874{
1875 int ret = 0;
1876 struct bkey *k, *p = NULL;
1877 struct btree_iter iter;
1878
1879 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1880 bch_initial_mark_key(b->c, b->level, k);
1881
1882 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1883
1884 if (b->level) {
1885 bch_btree_iter_init(&b->keys, &iter, NULL);
1886
1887 do {
1888 k = bch_btree_iter_next_filter(&iter, &b->keys,
1889 bch_ptr_bad);
1890 if (k) {
1891 btree_node_prefetch(b, k);
1892 /*
1893 * initiallize c->gc_stats.nodes
1894 * for incremental GC
1895 */
1896 b->c->gc_stats.nodes++;
1897 }
1898
1899 if (p)
1900 ret = bcache_btree(check_recurse, p, b, op);
1901
1902 p = k;
1903 } while (p && !ret);
1904 }
1905
1906 return ret;
1907}
1908
1909
1910static int bch_btree_check_thread(void *arg)
1911{
1912 int ret;
1913 struct btree_check_info *info = arg;
1914 struct btree_check_state *check_state = info->state;
1915 struct cache_set *c = check_state->c;
1916 struct btree_iter iter;
1917 struct bkey *k, *p;
1918 int cur_idx, prev_idx, skip_nr;
1919
1920 k = p = NULL;
1921 cur_idx = prev_idx = 0;
1922 ret = 0;
1923
1924 /* root node keys are checked before thread created */
1925 bch_btree_iter_init(&c->root->keys, &iter, NULL);
1926 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
1927 BUG_ON(!k);
1928
1929 p = k;
1930 while (k) {
1931 /*
1932 * Fetch a root node key index, skip the keys which
1933 * should be fetched by other threads, then check the
1934 * sub-tree indexed by the fetched key.
1935 */
1936 spin_lock(&check_state->idx_lock);
1937 cur_idx = check_state->key_idx;
1938 check_state->key_idx++;
1939 spin_unlock(&check_state->idx_lock);
1940
1941 skip_nr = cur_idx - prev_idx;
1942
1943 while (skip_nr) {
1944 k = bch_btree_iter_next_filter(&iter,
1945 &c->root->keys,
1946 bch_ptr_bad);
1947 if (k)
1948 p = k;
1949 else {
1950 /*
1951 * No more keys to check in root node,
1952 * current checking threads are enough,
1953 * stop creating more.
1954 */
1955 atomic_set(&check_state->enough, 1);
1956 /* Update check_state->enough earlier */
1957 smp_mb__after_atomic();
1958 goto out;
1959 }
1960 skip_nr--;
1961 cond_resched();
1962 }
1963
1964 if (p) {
1965 struct btree_op op;
1966
1967 btree_node_prefetch(c->root, p);
1968 c->gc_stats.nodes++;
1969 bch_btree_op_init(&op, 0);
1970 ret = bcache_btree(check_recurse, p, c->root, &op);
1971 if (ret)
1972 goto out;
1973 }
1974 p = NULL;
1975 prev_idx = cur_idx;
1976 cond_resched();
1977 }
1978
1979out:
1980 info->result = ret;
1981 /* update check_state->started among all CPUs */
1982 smp_mb__before_atomic();
1983 if (atomic_dec_and_test(&check_state->started))
1984 wake_up(&check_state->wait);
1985
1986 return ret;
1987}
1988
1989
1990
1991static int bch_btree_chkthread_nr(void)
1992{
1993 int n = num_online_cpus()/2;
1994
1995 if (n == 0)
1996 n = 1;
1997 else if (n > BCH_BTR_CHKTHREAD_MAX)
1998 n = BCH_BTR_CHKTHREAD_MAX;
1999
2000 return n;
2001}
2002
2003int bch_btree_check(struct cache_set *c)
2004{
2005 int ret = 0;
2006 int i;
2007 struct bkey *k = NULL;
2008 struct btree_iter iter;
2009 struct btree_check_state check_state;
2010
2011 /* check and mark root node keys */
2012 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
2013 bch_initial_mark_key(c, c->root->level, k);
2014
2015 bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
2016
2017 if (c->root->level == 0)
2018 return 0;
2019
2020 memset(&check_state, 0, sizeof(struct btree_check_state));
2021 check_state.c = c;
2022 check_state.total_threads = bch_btree_chkthread_nr();
2023 check_state.key_idx = 0;
2024 spin_lock_init(&check_state.idx_lock);
2025 atomic_set(&check_state.started, 0);
2026 atomic_set(&check_state.enough, 0);
2027 init_waitqueue_head(&check_state.wait);
2028
2029 rw_lock(0, c->root, c->root->level);
2030 /*
2031 * Run multiple threads to check btree nodes in parallel,
2032 * if check_state.enough is non-zero, it means current
2033 * running check threads are enough, unncessary to create
2034 * more.
2035 */
2036 for (i = 0; i < check_state.total_threads; i++) {
2037 /* fetch latest check_state.enough earlier */
2038 smp_mb__before_atomic();
2039 if (atomic_read(&check_state.enough))
2040 break;
2041
2042 check_state.infos[i].result = 0;
2043 check_state.infos[i].state = &check_state;
2044
2045 check_state.infos[i].thread =
2046 kthread_run(bch_btree_check_thread,
2047 &check_state.infos[i],
2048 "bch_btrchk[%d]", i);
2049 if (IS_ERR(check_state.infos[i].thread)) {
2050 pr_err("fails to run thread bch_btrchk[%d]\n", i);
2051 for (--i; i >= 0; i--)
2052 kthread_stop(check_state.infos[i].thread);
2053 ret = -ENOMEM;
2054 goto out;
2055 }
2056 atomic_inc(&check_state.started);
2057 }
2058
2059 /*
2060 * Must wait for all threads to stop.
2061 */
2062 wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
2063
2064 for (i = 0; i < check_state.total_threads; i++) {
2065 if (check_state.infos[i].result) {
2066 ret = check_state.infos[i].result;
2067 goto out;
2068 }
2069 }
2070
2071out:
2072 rw_unlock(0, c->root);
2073 return ret;
2074}
2075
2076void bch_initial_gc_finish(struct cache_set *c)
2077{
2078 struct cache *ca = c->cache;
2079 struct bucket *b;
2080
2081 bch_btree_gc_finish(c);
2082
2083 mutex_lock(&c->bucket_lock);
2084
2085 /*
2086 * We need to put some unused buckets directly on the prio freelist in
2087 * order to get the allocator thread started - it needs freed buckets in
2088 * order to rewrite the prios and gens, and it needs to rewrite prios
2089 * and gens in order to free buckets.
2090 *
2091 * This is only safe for buckets that have no live data in them, which
2092 * there should always be some of.
2093 */
2094 for_each_bucket(b, ca) {
2095 if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2096 fifo_full(&ca->free[RESERVE_BTREE]))
2097 break;
2098
2099 if (bch_can_invalidate_bucket(ca, b) &&
2100 !GC_MARK(b)) {
2101 __bch_invalidate_one_bucket(ca, b);
2102 if (!fifo_push(&ca->free[RESERVE_PRIO],
2103 b - ca->buckets))
2104 fifo_push(&ca->free[RESERVE_BTREE],
2105 b - ca->buckets);
2106 }
2107 }
2108
2109 mutex_unlock(&c->bucket_lock);
2110}
2111
2112/* Btree insertion */
2113
2114static bool btree_insert_key(struct btree *b, struct bkey *k,
2115 struct bkey *replace_key)
2116{
2117 unsigned int status;
2118
2119 BUG_ON(bkey_cmp(k, &b->key) > 0);
2120
2121 status = bch_btree_insert_key(&b->keys, k, replace_key);
2122 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2123 bch_check_keys(&b->keys, "%u for %s", status,
2124 replace_key ? "replace" : "insert");
2125
2126 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2127 status);
2128 return true;
2129 } else
2130 return false;
2131}
2132
2133static size_t insert_u64s_remaining(struct btree *b)
2134{
2135 long ret = bch_btree_keys_u64s_remaining(&b->keys);
2136
2137 /*
2138 * Might land in the middle of an existing extent and have to split it
2139 */
2140 if (b->keys.ops->is_extents)
2141 ret -= KEY_MAX_U64S;
2142
2143 return max(ret, 0L);
2144}
2145
2146static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2147 struct keylist *insert_keys,
2148 struct bkey *replace_key)
2149{
2150 bool ret = false;
2151 int oldsize = bch_count_data(&b->keys);
2152
2153 while (!bch_keylist_empty(insert_keys)) {
2154 struct bkey *k = insert_keys->keys;
2155
2156 if (bkey_u64s(k) > insert_u64s_remaining(b))
2157 break;
2158
2159 if (bkey_cmp(k, &b->key) <= 0) {
2160 if (!b->level)
2161 bkey_put(b->c, k);
2162
2163 ret |= btree_insert_key(b, k, replace_key);
2164 bch_keylist_pop_front(insert_keys);
2165 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2166 BKEY_PADDED(key) temp;
2167 bkey_copy(&temp.key, insert_keys->keys);
2168
2169 bch_cut_back(&b->key, &temp.key);
2170 bch_cut_front(&b->key, insert_keys->keys);
2171
2172 ret |= btree_insert_key(b, &temp.key, replace_key);
2173 break;
2174 } else {
2175 break;
2176 }
2177 }
2178
2179 if (!ret)
2180 op->insert_collision = true;
2181
2182 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2183
2184 BUG_ON(bch_count_data(&b->keys) < oldsize);
2185 return ret;
2186}
2187
2188static int btree_split(struct btree *b, struct btree_op *op,
2189 struct keylist *insert_keys,
2190 struct bkey *replace_key)
2191{
2192 bool split;
2193 struct btree *n1, *n2 = NULL, *n3 = NULL;
2194 uint64_t start_time = local_clock();
2195 struct closure cl;
2196 struct keylist parent_keys;
2197
2198 closure_init_stack(&cl);
2199 bch_keylist_init(&parent_keys);
2200
2201 if (btree_check_reserve(b, op)) {
2202 if (!b->level)
2203 return -EINTR;
2204 else
2205 WARN(1, "insufficient reserve for split\n");
2206 }
2207
2208 n1 = btree_node_alloc_replacement(b, op);
2209 if (IS_ERR(n1))
2210 goto err;
2211
2212 split = set_blocks(btree_bset_first(n1),
2213 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
2214
2215 if (split) {
2216 unsigned int keys = 0;
2217
2218 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2219
2220 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2221 if (IS_ERR(n2))
2222 goto err_free1;
2223
2224 if (!b->parent) {
2225 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2226 if (IS_ERR(n3))
2227 goto err_free2;
2228 }
2229
2230 mutex_lock(&n1->write_lock);
2231 mutex_lock(&n2->write_lock);
2232
2233 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2234
2235 /*
2236 * Has to be a linear search because we don't have an auxiliary
2237 * search tree yet
2238 */
2239
2240 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2241 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2242 keys));
2243
2244 bkey_copy_key(&n1->key,
2245 bset_bkey_idx(btree_bset_first(n1), keys));
2246 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2247
2248 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2249 btree_bset_first(n1)->keys = keys;
2250
2251 memcpy(btree_bset_first(n2)->start,
2252 bset_bkey_last(btree_bset_first(n1)),
2253 btree_bset_first(n2)->keys * sizeof(uint64_t));
2254
2255 bkey_copy_key(&n2->key, &b->key);
2256
2257 bch_keylist_add(&parent_keys, &n2->key);
2258 bch_btree_node_write(n2, &cl);
2259 mutex_unlock(&n2->write_lock);
2260 rw_unlock(true, n2);
2261 } else {
2262 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2263
2264 mutex_lock(&n1->write_lock);
2265 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2266 }
2267
2268 bch_keylist_add(&parent_keys, &n1->key);
2269 bch_btree_node_write(n1, &cl);
2270 mutex_unlock(&n1->write_lock);
2271
2272 if (n3) {
2273 /* Depth increases, make a new root */
2274 mutex_lock(&n3->write_lock);
2275 bkey_copy_key(&n3->key, &MAX_KEY);
2276 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2277 bch_btree_node_write(n3, &cl);
2278 mutex_unlock(&n3->write_lock);
2279
2280 closure_sync(&cl);
2281 bch_btree_set_root(n3);
2282 rw_unlock(true, n3);
2283 } else if (!b->parent) {
2284 /* Root filled up but didn't need to be split */
2285 closure_sync(&cl);
2286 bch_btree_set_root(n1);
2287 } else {
2288 /* Split a non root node */
2289 closure_sync(&cl);
2290 make_btree_freeing_key(b, parent_keys.top);
2291 bch_keylist_push(&parent_keys);
2292
2293 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2294 BUG_ON(!bch_keylist_empty(&parent_keys));
2295 }
2296
2297 btree_node_free(b);
2298 rw_unlock(true, n1);
2299
2300 bch_time_stats_update(&b->c->btree_split_time, start_time);
2301
2302 return 0;
2303err_free2:
2304 bkey_put(b->c, &n2->key);
2305 btree_node_free(n2);
2306 rw_unlock(true, n2);
2307err_free1:
2308 bkey_put(b->c, &n1->key);
2309 btree_node_free(n1);
2310 rw_unlock(true, n1);
2311err:
2312 WARN(1, "bcache: btree split failed (level %u)", b->level);
2313
2314 if (n3 == ERR_PTR(-EAGAIN) ||
2315 n2 == ERR_PTR(-EAGAIN) ||
2316 n1 == ERR_PTR(-EAGAIN))
2317 return -EAGAIN;
2318
2319 return -ENOMEM;
2320}
2321
2322static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2323 struct keylist *insert_keys,
2324 atomic_t *journal_ref,
2325 struct bkey *replace_key)
2326{
2327 struct closure cl;
2328
2329 BUG_ON(b->level && replace_key);
2330
2331 closure_init_stack(&cl);
2332
2333 mutex_lock(&b->write_lock);
2334
2335 if (write_block(b) != btree_bset_last(b) &&
2336 b->keys.last_set_unwritten)
2337 bch_btree_init_next(b); /* just wrote a set */
2338
2339 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2340 mutex_unlock(&b->write_lock);
2341 goto split;
2342 }
2343
2344 BUG_ON(write_block(b) != btree_bset_last(b));
2345
2346 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2347 if (!b->level)
2348 bch_btree_leaf_dirty(b, journal_ref);
2349 else
2350 bch_btree_node_write(b, &cl);
2351 }
2352
2353 mutex_unlock(&b->write_lock);
2354
2355 /* wait for btree node write if necessary, after unlock */
2356 closure_sync(&cl);
2357
2358 return 0;
2359split:
2360 if (current->bio_list) {
2361 op->lock = b->c->root->level + 1;
2362 return -EAGAIN;
2363 } else if (op->lock <= b->c->root->level) {
2364 op->lock = b->c->root->level + 1;
2365 return -EINTR;
2366 } else {
2367 /* Invalidated all iterators */
2368 int ret = btree_split(b, op, insert_keys, replace_key);
2369
2370 if (bch_keylist_empty(insert_keys))
2371 return 0;
2372 else if (!ret)
2373 return -EINTR;
2374 return ret;
2375 }
2376}
2377
2378int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2379 struct bkey *check_key)
2380{
2381 int ret = -EINTR;
2382 uint64_t btree_ptr = b->key.ptr[0];
2383 unsigned long seq = b->seq;
2384 struct keylist insert;
2385 bool upgrade = op->lock == -1;
2386
2387 bch_keylist_init(&insert);
2388
2389 if (upgrade) {
2390 rw_unlock(false, b);
2391 rw_lock(true, b, b->level);
2392
2393 if (b->key.ptr[0] != btree_ptr ||
2394 b->seq != seq + 1) {
2395 op->lock = b->level;
2396 goto out;
2397 }
2398 }
2399
2400 SET_KEY_PTRS(check_key, 1);
2401 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2402
2403 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2404
2405 bch_keylist_add(&insert, check_key);
2406
2407 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2408
2409 BUG_ON(!ret && !bch_keylist_empty(&insert));
2410out:
2411 if (upgrade)
2412 downgrade_write(&b->lock);
2413 return ret;
2414}
2415
2416struct btree_insert_op {
2417 struct btree_op op;
2418 struct keylist *keys;
2419 atomic_t *journal_ref;
2420 struct bkey *replace_key;
2421};
2422
2423static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2424{
2425 struct btree_insert_op *op = container_of(b_op,
2426 struct btree_insert_op, op);
2427
2428 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2429 op->journal_ref, op->replace_key);
2430 if (ret && !bch_keylist_empty(op->keys))
2431 return ret;
2432 else
2433 return MAP_DONE;
2434}
2435
2436int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2437 atomic_t *journal_ref, struct bkey *replace_key)
2438{
2439 struct btree_insert_op op;
2440 int ret = 0;
2441
2442 BUG_ON(current->bio_list);
2443 BUG_ON(bch_keylist_empty(keys));
2444
2445 bch_btree_op_init(&op.op, 0);
2446 op.keys = keys;
2447 op.journal_ref = journal_ref;
2448 op.replace_key = replace_key;
2449
2450 while (!ret && !bch_keylist_empty(keys)) {
2451 op.op.lock = 0;
2452 ret = bch_btree_map_leaf_nodes(&op.op, c,
2453 &START_KEY(keys->keys),
2454 btree_insert_fn);
2455 }
2456
2457 if (ret) {
2458 struct bkey *k;
2459
2460 pr_err("error %i\n", ret);
2461
2462 while ((k = bch_keylist_pop(keys)))
2463 bkey_put(c, k);
2464 } else if (op.op.insert_collision)
2465 ret = -ESRCH;
2466
2467 return ret;
2468}
2469
2470void bch_btree_set_root(struct btree *b)
2471{
2472 unsigned int i;
2473 struct closure cl;
2474
2475 closure_init_stack(&cl);
2476
2477 trace_bcache_btree_set_root(b);
2478
2479 BUG_ON(!b->written);
2480
2481 for (i = 0; i < KEY_PTRS(&b->key); i++)
2482 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2483
2484 mutex_lock(&b->c->bucket_lock);
2485 list_del_init(&b->list);
2486 mutex_unlock(&b->c->bucket_lock);
2487
2488 b->c->root = b;
2489
2490 bch_journal_meta(b->c, &cl);
2491 closure_sync(&cl);
2492}
2493
2494/* Map across nodes or keys */
2495
2496static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2497 struct bkey *from,
2498 btree_map_nodes_fn *fn, int flags)
2499{
2500 int ret = MAP_CONTINUE;
2501
2502 if (b->level) {
2503 struct bkey *k;
2504 struct btree_iter iter;
2505
2506 bch_btree_iter_init(&b->keys, &iter, from);
2507
2508 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2509 bch_ptr_bad))) {
2510 ret = bcache_btree(map_nodes_recurse, k, b,
2511 op, from, fn, flags);
2512 from = NULL;
2513
2514 if (ret != MAP_CONTINUE)
2515 return ret;
2516 }
2517 }
2518
2519 if (!b->level || flags == MAP_ALL_NODES)
2520 ret = fn(op, b);
2521
2522 return ret;
2523}
2524
2525int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2526 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2527{
2528 return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
2529}
2530
2531int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2532 struct bkey *from, btree_map_keys_fn *fn,
2533 int flags)
2534{
2535 int ret = MAP_CONTINUE;
2536 struct bkey *k;
2537 struct btree_iter iter;
2538
2539 bch_btree_iter_init(&b->keys, &iter, from);
2540
2541 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2542 ret = !b->level
2543 ? fn(op, b, k)
2544 : bcache_btree(map_keys_recurse, k,
2545 b, op, from, fn, flags);
2546 from = NULL;
2547
2548 if (ret != MAP_CONTINUE)
2549 return ret;
2550 }
2551
2552 if (!b->level && (flags & MAP_END_KEY))
2553 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2554 KEY_OFFSET(&b->key), 0));
2555
2556 return ret;
2557}
2558
2559int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2560 struct bkey *from, btree_map_keys_fn *fn, int flags)
2561{
2562 return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
2563}
2564
2565/* Keybuf code */
2566
2567static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2568{
2569 /* Overlapping keys compare equal */
2570 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2571 return -1;
2572 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2573 return 1;
2574 return 0;
2575}
2576
2577static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2578 struct keybuf_key *r)
2579{
2580 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2581}
2582
2583struct refill {
2584 struct btree_op op;
2585 unsigned int nr_found;
2586 struct keybuf *buf;
2587 struct bkey *end;
2588 keybuf_pred_fn *pred;
2589};
2590
2591static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2592 struct bkey *k)
2593{
2594 struct refill *refill = container_of(op, struct refill, op);
2595 struct keybuf *buf = refill->buf;
2596 int ret = MAP_CONTINUE;
2597
2598 if (bkey_cmp(k, refill->end) > 0) {
2599 ret = MAP_DONE;
2600 goto out;
2601 }
2602
2603 if (!KEY_SIZE(k)) /* end key */
2604 goto out;
2605
2606 if (refill->pred(buf, k)) {
2607 struct keybuf_key *w;
2608
2609 spin_lock(&buf->lock);
2610
2611 w = array_alloc(&buf->freelist);
2612 if (!w) {
2613 spin_unlock(&buf->lock);
2614 return MAP_DONE;
2615 }
2616
2617 w->private = NULL;
2618 bkey_copy(&w->key, k);
2619
2620 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2621 array_free(&buf->freelist, w);
2622 else
2623 refill->nr_found++;
2624
2625 if (array_freelist_empty(&buf->freelist))
2626 ret = MAP_DONE;
2627
2628 spin_unlock(&buf->lock);
2629 }
2630out:
2631 buf->last_scanned = *k;
2632 return ret;
2633}
2634
2635void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2636 struct bkey *end, keybuf_pred_fn *pred)
2637{
2638 struct bkey start = buf->last_scanned;
2639 struct refill refill;
2640
2641 cond_resched();
2642
2643 bch_btree_op_init(&refill.op, -1);
2644 refill.nr_found = 0;
2645 refill.buf = buf;
2646 refill.end = end;
2647 refill.pred = pred;
2648
2649 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2650 refill_keybuf_fn, MAP_END_KEY);
2651
2652 trace_bcache_keyscan(refill.nr_found,
2653 KEY_INODE(&start), KEY_OFFSET(&start),
2654 KEY_INODE(&buf->last_scanned),
2655 KEY_OFFSET(&buf->last_scanned));
2656
2657 spin_lock(&buf->lock);
2658
2659 if (!RB_EMPTY_ROOT(&buf->keys)) {
2660 struct keybuf_key *w;
2661
2662 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2663 buf->start = START_KEY(&w->key);
2664
2665 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2666 buf->end = w->key;
2667 } else {
2668 buf->start = MAX_KEY;
2669 buf->end = MAX_KEY;
2670 }
2671
2672 spin_unlock(&buf->lock);
2673}
2674
2675static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2676{
2677 rb_erase(&w->node, &buf->keys);
2678 array_free(&buf->freelist, w);
2679}
2680
2681void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2682{
2683 spin_lock(&buf->lock);
2684 __bch_keybuf_del(buf, w);
2685 spin_unlock(&buf->lock);
2686}
2687
2688bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2689 struct bkey *end)
2690{
2691 bool ret = false;
2692 struct keybuf_key *p, *w, s;
2693
2694 s.key = *start;
2695
2696 if (bkey_cmp(end, &buf->start) <= 0 ||
2697 bkey_cmp(start, &buf->end) >= 0)
2698 return false;
2699
2700 spin_lock(&buf->lock);
2701 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2702
2703 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2704 p = w;
2705 w = RB_NEXT(w, node);
2706
2707 if (p->private)
2708 ret = true;
2709 else
2710 __bch_keybuf_del(buf, p);
2711 }
2712
2713 spin_unlock(&buf->lock);
2714 return ret;
2715}
2716
2717struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2718{
2719 struct keybuf_key *w;
2720
2721 spin_lock(&buf->lock);
2722
2723 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2724
2725 while (w && w->private)
2726 w = RB_NEXT(w, node);
2727
2728 if (w)
2729 w->private = ERR_PTR(-EINTR);
2730
2731 spin_unlock(&buf->lock);
2732 return w;
2733}
2734
2735struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2736 struct keybuf *buf,
2737 struct bkey *end,
2738 keybuf_pred_fn *pred)
2739{
2740 struct keybuf_key *ret;
2741
2742 while (1) {
2743 ret = bch_keybuf_next(buf);
2744 if (ret)
2745 break;
2746
2747 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2748 pr_debug("scan finished\n");
2749 break;
2750 }
2751
2752 bch_refill_keybuf(c, buf, end, pred);
2753 }
2754
2755 return ret;
2756}
2757
2758void bch_keybuf_init(struct keybuf *buf)
2759{
2760 buf->last_scanned = MAX_KEY;
2761 buf->keys = RB_ROOT;
2762
2763 spin_lock_init(&buf->lock);
2764 array_allocator_init(&buf->freelist);
2765}
2766
2767void bch_btree_exit(void)
2768{
2769 if (btree_io_wq)
2770 destroy_workqueue(btree_io_wq);
2771}
2772
2773int __init bch_btree_init(void)
2774{
2775 btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
2776 if (!btree_io_wq)
2777 return -ENOMEM;
2778
2779 return 0;
2780}
1/*
2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3 *
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
6 * of the device.
7 *
8 * Buckets containing cached data are kept on a heap sorted by priority;
9 * bucket priority is increased on cache hit, and periodically all the buckets
10 * on the heap have their priority scaled down. This currently is just used as
11 * an LRU but in the future should allow for more intelligent heuristics.
12 *
13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14 * counter. Garbage collection is used to remove stale pointers.
15 *
16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17 * as keys are inserted we only sort the pages that have not yet been written.
18 * When garbage collection is run, we resort the entire node.
19 *
20 * All configuration is done via sysfs; see Documentation/bcache.txt.
21 */
22
23#include "bcache.h"
24#include "btree.h"
25#include "debug.h"
26#include "extents.h"
27
28#include <linux/slab.h>
29#include <linux/bitops.h>
30#include <linux/freezer.h>
31#include <linux/hash.h>
32#include <linux/kthread.h>
33#include <linux/prefetch.h>
34#include <linux/random.h>
35#include <linux/rcupdate.h>
36#include <trace/events/bcache.h>
37
38/*
39 * Todo:
40 * register_bcache: Return errors out to userspace correctly
41 *
42 * Writeback: don't undirty key until after a cache flush
43 *
44 * Create an iterator for key pointers
45 *
46 * On btree write error, mark bucket such that it won't be freed from the cache
47 *
48 * Journalling:
49 * Check for bad keys in replay
50 * Propagate barriers
51 * Refcount journal entries in journal_replay
52 *
53 * Garbage collection:
54 * Finish incremental gc
55 * Gc should free old UUIDs, data for invalid UUIDs
56 *
57 * Provide a way to list backing device UUIDs we have data cached for, and
58 * probably how long it's been since we've seen them, and a way to invalidate
59 * dirty data for devices that will never be attached again
60 *
61 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
62 * that based on that and how much dirty data we have we can keep writeback
63 * from being starved
64 *
65 * Add a tracepoint or somesuch to watch for writeback starvation
66 *
67 * When btree depth > 1 and splitting an interior node, we have to make sure
68 * alloc_bucket() cannot fail. This should be true but is not completely
69 * obvious.
70 *
71 * Plugging?
72 *
73 * If data write is less than hard sector size of ssd, round up offset in open
74 * bucket to the next whole sector
75 *
76 * Superblock needs to be fleshed out for multiple cache devices
77 *
78 * Add a sysfs tunable for the number of writeback IOs in flight
79 *
80 * Add a sysfs tunable for the number of open data buckets
81 *
82 * IO tracking: Can we track when one process is doing io on behalf of another?
83 * IO tracking: Don't use just an average, weigh more recent stuff higher
84 *
85 * Test module load/unload
86 */
87
88#define MAX_NEED_GC 64
89#define MAX_SAVE_PRIO 72
90
91#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
92
93#define PTR_HASH(c, k) \
94 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
95
96#define insert_lock(s, b) ((b)->level <= (s)->lock)
97
98/*
99 * These macros are for recursing down the btree - they handle the details of
100 * locking and looking up nodes in the cache for you. They're best treated as
101 * mere syntax when reading code that uses them.
102 *
103 * op->lock determines whether we take a read or a write lock at a given depth.
104 * If you've got a read lock and find that you need a write lock (i.e. you're
105 * going to have to split), set op->lock and return -EINTR; btree_root() will
106 * call you again and you'll have the correct lock.
107 */
108
109/**
110 * btree - recurse down the btree on a specified key
111 * @fn: function to call, which will be passed the child node
112 * @key: key to recurse on
113 * @b: parent btree node
114 * @op: pointer to struct btree_op
115 */
116#define btree(fn, key, b, op, ...) \
117({ \
118 int _r, l = (b)->level - 1; \
119 bool _w = l <= (op)->lock; \
120 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
121 _w, b); \
122 if (!IS_ERR(_child)) { \
123 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
124 rw_unlock(_w, _child); \
125 } else \
126 _r = PTR_ERR(_child); \
127 _r; \
128})
129
130/**
131 * btree_root - call a function on the root of the btree
132 * @fn: function to call, which will be passed the child node
133 * @c: cache set
134 * @op: pointer to struct btree_op
135 */
136#define btree_root(fn, c, op, ...) \
137({ \
138 int _r = -EINTR; \
139 do { \
140 struct btree *_b = (c)->root; \
141 bool _w = insert_lock(op, _b); \
142 rw_lock(_w, _b, _b->level); \
143 if (_b == (c)->root && \
144 _w == insert_lock(op, _b)) { \
145 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
146 } \
147 rw_unlock(_w, _b); \
148 bch_cannibalize_unlock(c); \
149 if (_r == -EINTR) \
150 schedule(); \
151 } while (_r == -EINTR); \
152 \
153 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
154 _r; \
155})
156
157static inline struct bset *write_block(struct btree *b)
158{
159 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
160}
161
162static void bch_btree_init_next(struct btree *b)
163{
164 /* If not a leaf node, always sort */
165 if (b->level && b->keys.nsets)
166 bch_btree_sort(&b->keys, &b->c->sort);
167 else
168 bch_btree_sort_lazy(&b->keys, &b->c->sort);
169
170 if (b->written < btree_blocks(b))
171 bch_bset_init_next(&b->keys, write_block(b),
172 bset_magic(&b->c->sb));
173
174}
175
176/* Btree key manipulation */
177
178void bkey_put(struct cache_set *c, struct bkey *k)
179{
180 unsigned i;
181
182 for (i = 0; i < KEY_PTRS(k); i++)
183 if (ptr_available(c, k, i))
184 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
185}
186
187/* Btree IO */
188
189static uint64_t btree_csum_set(struct btree *b, struct bset *i)
190{
191 uint64_t crc = b->key.ptr[0];
192 void *data = (void *) i + 8, *end = bset_bkey_last(i);
193
194 crc = bch_crc64_update(crc, data, end - data);
195 return crc ^ 0xffffffffffffffffULL;
196}
197
198void bch_btree_node_read_done(struct btree *b)
199{
200 const char *err = "bad btree header";
201 struct bset *i = btree_bset_first(b);
202 struct btree_iter *iter;
203
204 iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
205 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
206 iter->used = 0;
207
208#ifdef CONFIG_BCACHE_DEBUG
209 iter->b = &b->keys;
210#endif
211
212 if (!i->seq)
213 goto err;
214
215 for (;
216 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
217 i = write_block(b)) {
218 err = "unsupported bset version";
219 if (i->version > BCACHE_BSET_VERSION)
220 goto err;
221
222 err = "bad btree header";
223 if (b->written + set_blocks(i, block_bytes(b->c)) >
224 btree_blocks(b))
225 goto err;
226
227 err = "bad magic";
228 if (i->magic != bset_magic(&b->c->sb))
229 goto err;
230
231 err = "bad checksum";
232 switch (i->version) {
233 case 0:
234 if (i->csum != csum_set(i))
235 goto err;
236 break;
237 case BCACHE_BSET_VERSION:
238 if (i->csum != btree_csum_set(b, i))
239 goto err;
240 break;
241 }
242
243 err = "empty set";
244 if (i != b->keys.set[0].data && !i->keys)
245 goto err;
246
247 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
248
249 b->written += set_blocks(i, block_bytes(b->c));
250 }
251
252 err = "corrupted btree";
253 for (i = write_block(b);
254 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
255 i = ((void *) i) + block_bytes(b->c))
256 if (i->seq == b->keys.set[0].data->seq)
257 goto err;
258
259 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
260
261 i = b->keys.set[0].data;
262 err = "short btree key";
263 if (b->keys.set[0].size &&
264 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
265 goto err;
266
267 if (b->written < btree_blocks(b))
268 bch_bset_init_next(&b->keys, write_block(b),
269 bset_magic(&b->c->sb));
270out:
271 mempool_free(iter, b->c->fill_iter);
272 return;
273err:
274 set_btree_node_io_error(b);
275 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
276 err, PTR_BUCKET_NR(b->c, &b->key, 0),
277 bset_block_offset(b, i), i->keys);
278 goto out;
279}
280
281static void btree_node_read_endio(struct bio *bio)
282{
283 struct closure *cl = bio->bi_private;
284 closure_put(cl);
285}
286
287static void bch_btree_node_read(struct btree *b)
288{
289 uint64_t start_time = local_clock();
290 struct closure cl;
291 struct bio *bio;
292
293 trace_bcache_btree_read(b);
294
295 closure_init_stack(&cl);
296
297 bio = bch_bbio_alloc(b->c);
298 bio->bi_rw = REQ_META|READ_SYNC;
299 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
300 bio->bi_end_io = btree_node_read_endio;
301 bio->bi_private = &cl;
302
303 bch_bio_map(bio, b->keys.set[0].data);
304
305 bch_submit_bbio(bio, b->c, &b->key, 0);
306 closure_sync(&cl);
307
308 if (bio->bi_error)
309 set_btree_node_io_error(b);
310
311 bch_bbio_free(bio, b->c);
312
313 if (btree_node_io_error(b))
314 goto err;
315
316 bch_btree_node_read_done(b);
317 bch_time_stats_update(&b->c->btree_read_time, start_time);
318
319 return;
320err:
321 bch_cache_set_error(b->c, "io error reading bucket %zu",
322 PTR_BUCKET_NR(b->c, &b->key, 0));
323}
324
325static void btree_complete_write(struct btree *b, struct btree_write *w)
326{
327 if (w->prio_blocked &&
328 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
329 wake_up_allocators(b->c);
330
331 if (w->journal) {
332 atomic_dec_bug(w->journal);
333 __closure_wake_up(&b->c->journal.wait);
334 }
335
336 w->prio_blocked = 0;
337 w->journal = NULL;
338}
339
340static void btree_node_write_unlock(struct closure *cl)
341{
342 struct btree *b = container_of(cl, struct btree, io);
343
344 up(&b->io_mutex);
345}
346
347static void __btree_node_write_done(struct closure *cl)
348{
349 struct btree *b = container_of(cl, struct btree, io);
350 struct btree_write *w = btree_prev_write(b);
351
352 bch_bbio_free(b->bio, b->c);
353 b->bio = NULL;
354 btree_complete_write(b, w);
355
356 if (btree_node_dirty(b))
357 schedule_delayed_work(&b->work, 30 * HZ);
358
359 closure_return_with_destructor(cl, btree_node_write_unlock);
360}
361
362static void btree_node_write_done(struct closure *cl)
363{
364 struct btree *b = container_of(cl, struct btree, io);
365 struct bio_vec *bv;
366 int n;
367
368 bio_for_each_segment_all(bv, b->bio, n)
369 __free_page(bv->bv_page);
370
371 __btree_node_write_done(cl);
372}
373
374static void btree_node_write_endio(struct bio *bio)
375{
376 struct closure *cl = bio->bi_private;
377 struct btree *b = container_of(cl, struct btree, io);
378
379 if (bio->bi_error)
380 set_btree_node_io_error(b);
381
382 bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree");
383 closure_put(cl);
384}
385
386static void do_btree_node_write(struct btree *b)
387{
388 struct closure *cl = &b->io;
389 struct bset *i = btree_bset_last(b);
390 BKEY_PADDED(key) k;
391
392 i->version = BCACHE_BSET_VERSION;
393 i->csum = btree_csum_set(b, i);
394
395 BUG_ON(b->bio);
396 b->bio = bch_bbio_alloc(b->c);
397
398 b->bio->bi_end_io = btree_node_write_endio;
399 b->bio->bi_private = cl;
400 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
401 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
402 bch_bio_map(b->bio, i);
403
404 /*
405 * If we're appending to a leaf node, we don't technically need FUA -
406 * this write just needs to be persisted before the next journal write,
407 * which will be marked FLUSH|FUA.
408 *
409 * Similarly if we're writing a new btree root - the pointer is going to
410 * be in the next journal entry.
411 *
412 * But if we're writing a new btree node (that isn't a root) or
413 * appending to a non leaf btree node, we need either FUA or a flush
414 * when we write the parent with the new pointer. FUA is cheaper than a
415 * flush, and writes appending to leaf nodes aren't blocking anything so
416 * just make all btree node writes FUA to keep things sane.
417 */
418
419 bkey_copy(&k.key, &b->key);
420 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
421 bset_sector_offset(&b->keys, i));
422
423 if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
424 int j;
425 struct bio_vec *bv;
426 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
427
428 bio_for_each_segment_all(bv, b->bio, j)
429 memcpy(page_address(bv->bv_page),
430 base + j * PAGE_SIZE, PAGE_SIZE);
431
432 bch_submit_bbio(b->bio, b->c, &k.key, 0);
433
434 continue_at(cl, btree_node_write_done, NULL);
435 } else {
436 b->bio->bi_vcnt = 0;
437 bch_bio_map(b->bio, i);
438
439 bch_submit_bbio(b->bio, b->c, &k.key, 0);
440
441 closure_sync(cl);
442 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
443 }
444}
445
446void __bch_btree_node_write(struct btree *b, struct closure *parent)
447{
448 struct bset *i = btree_bset_last(b);
449
450 lockdep_assert_held(&b->write_lock);
451
452 trace_bcache_btree_write(b);
453
454 BUG_ON(current->bio_list);
455 BUG_ON(b->written >= btree_blocks(b));
456 BUG_ON(b->written && !i->keys);
457 BUG_ON(btree_bset_first(b)->seq != i->seq);
458 bch_check_keys(&b->keys, "writing");
459
460 cancel_delayed_work(&b->work);
461
462 /* If caller isn't waiting for write, parent refcount is cache set */
463 down(&b->io_mutex);
464 closure_init(&b->io, parent ?: &b->c->cl);
465
466 clear_bit(BTREE_NODE_dirty, &b->flags);
467 change_bit(BTREE_NODE_write_idx, &b->flags);
468
469 do_btree_node_write(b);
470
471 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
472 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
473
474 b->written += set_blocks(i, block_bytes(b->c));
475}
476
477void bch_btree_node_write(struct btree *b, struct closure *parent)
478{
479 unsigned nsets = b->keys.nsets;
480
481 lockdep_assert_held(&b->lock);
482
483 __bch_btree_node_write(b, parent);
484
485 /*
486 * do verify if there was more than one set initially (i.e. we did a
487 * sort) and we sorted down to a single set:
488 */
489 if (nsets && !b->keys.nsets)
490 bch_btree_verify(b);
491
492 bch_btree_init_next(b);
493}
494
495static void bch_btree_node_write_sync(struct btree *b)
496{
497 struct closure cl;
498
499 closure_init_stack(&cl);
500
501 mutex_lock(&b->write_lock);
502 bch_btree_node_write(b, &cl);
503 mutex_unlock(&b->write_lock);
504
505 closure_sync(&cl);
506}
507
508static void btree_node_write_work(struct work_struct *w)
509{
510 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
511
512 mutex_lock(&b->write_lock);
513 if (btree_node_dirty(b))
514 __bch_btree_node_write(b, NULL);
515 mutex_unlock(&b->write_lock);
516}
517
518static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
519{
520 struct bset *i = btree_bset_last(b);
521 struct btree_write *w = btree_current_write(b);
522
523 lockdep_assert_held(&b->write_lock);
524
525 BUG_ON(!b->written);
526 BUG_ON(!i->keys);
527
528 if (!btree_node_dirty(b))
529 schedule_delayed_work(&b->work, 30 * HZ);
530
531 set_btree_node_dirty(b);
532
533 if (journal_ref) {
534 if (w->journal &&
535 journal_pin_cmp(b->c, w->journal, journal_ref)) {
536 atomic_dec_bug(w->journal);
537 w->journal = NULL;
538 }
539
540 if (!w->journal) {
541 w->journal = journal_ref;
542 atomic_inc(w->journal);
543 }
544 }
545
546 /* Force write if set is too big */
547 if (set_bytes(i) > PAGE_SIZE - 48 &&
548 !current->bio_list)
549 bch_btree_node_write(b, NULL);
550}
551
552/*
553 * Btree in memory cache - allocation/freeing
554 * mca -> memory cache
555 */
556
557#define mca_reserve(c) (((c->root && c->root->level) \
558 ? c->root->level : 1) * 8 + 16)
559#define mca_can_free(c) \
560 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
561
562static void mca_data_free(struct btree *b)
563{
564 BUG_ON(b->io_mutex.count != 1);
565
566 bch_btree_keys_free(&b->keys);
567
568 b->c->btree_cache_used--;
569 list_move(&b->list, &b->c->btree_cache_freed);
570}
571
572static void mca_bucket_free(struct btree *b)
573{
574 BUG_ON(btree_node_dirty(b));
575
576 b->key.ptr[0] = 0;
577 hlist_del_init_rcu(&b->hash);
578 list_move(&b->list, &b->c->btree_cache_freeable);
579}
580
581static unsigned btree_order(struct bkey *k)
582{
583 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
584}
585
586static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
587{
588 if (!bch_btree_keys_alloc(&b->keys,
589 max_t(unsigned,
590 ilog2(b->c->btree_pages),
591 btree_order(k)),
592 gfp)) {
593 b->c->btree_cache_used++;
594 list_move(&b->list, &b->c->btree_cache);
595 } else {
596 list_move(&b->list, &b->c->btree_cache_freed);
597 }
598}
599
600static struct btree *mca_bucket_alloc(struct cache_set *c,
601 struct bkey *k, gfp_t gfp)
602{
603 struct btree *b = kzalloc(sizeof(struct btree), gfp);
604 if (!b)
605 return NULL;
606
607 init_rwsem(&b->lock);
608 lockdep_set_novalidate_class(&b->lock);
609 mutex_init(&b->write_lock);
610 lockdep_set_novalidate_class(&b->write_lock);
611 INIT_LIST_HEAD(&b->list);
612 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
613 b->c = c;
614 sema_init(&b->io_mutex, 1);
615
616 mca_data_alloc(b, k, gfp);
617 return b;
618}
619
620static int mca_reap(struct btree *b, unsigned min_order, bool flush)
621{
622 struct closure cl;
623
624 closure_init_stack(&cl);
625 lockdep_assert_held(&b->c->bucket_lock);
626
627 if (!down_write_trylock(&b->lock))
628 return -ENOMEM;
629
630 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
631
632 if (b->keys.page_order < min_order)
633 goto out_unlock;
634
635 if (!flush) {
636 if (btree_node_dirty(b))
637 goto out_unlock;
638
639 if (down_trylock(&b->io_mutex))
640 goto out_unlock;
641 up(&b->io_mutex);
642 }
643
644 mutex_lock(&b->write_lock);
645 if (btree_node_dirty(b))
646 __bch_btree_node_write(b, &cl);
647 mutex_unlock(&b->write_lock);
648
649 closure_sync(&cl);
650
651 /* wait for any in flight btree write */
652 down(&b->io_mutex);
653 up(&b->io_mutex);
654
655 return 0;
656out_unlock:
657 rw_unlock(true, b);
658 return -ENOMEM;
659}
660
661static unsigned long bch_mca_scan(struct shrinker *shrink,
662 struct shrink_control *sc)
663{
664 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
665 struct btree *b, *t;
666 unsigned long i, nr = sc->nr_to_scan;
667 unsigned long freed = 0;
668
669 if (c->shrinker_disabled)
670 return SHRINK_STOP;
671
672 if (c->btree_cache_alloc_lock)
673 return SHRINK_STOP;
674
675 /* Return -1 if we can't do anything right now */
676 if (sc->gfp_mask & __GFP_IO)
677 mutex_lock(&c->bucket_lock);
678 else if (!mutex_trylock(&c->bucket_lock))
679 return -1;
680
681 /*
682 * It's _really_ critical that we don't free too many btree nodes - we
683 * have to always leave ourselves a reserve. The reserve is how we
684 * guarantee that allocating memory for a new btree node can always
685 * succeed, so that inserting keys into the btree can always succeed and
686 * IO can always make forward progress:
687 */
688 nr /= c->btree_pages;
689 nr = min_t(unsigned long, nr, mca_can_free(c));
690
691 i = 0;
692 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
693 if (freed >= nr)
694 break;
695
696 if (++i > 3 &&
697 !mca_reap(b, 0, false)) {
698 mca_data_free(b);
699 rw_unlock(true, b);
700 freed++;
701 }
702 }
703
704 for (i = 0; (nr--) && i < c->btree_cache_used; i++) {
705 if (list_empty(&c->btree_cache))
706 goto out;
707
708 b = list_first_entry(&c->btree_cache, struct btree, list);
709 list_rotate_left(&c->btree_cache);
710
711 if (!b->accessed &&
712 !mca_reap(b, 0, false)) {
713 mca_bucket_free(b);
714 mca_data_free(b);
715 rw_unlock(true, b);
716 freed++;
717 } else
718 b->accessed = 0;
719 }
720out:
721 mutex_unlock(&c->bucket_lock);
722 return freed;
723}
724
725static unsigned long bch_mca_count(struct shrinker *shrink,
726 struct shrink_control *sc)
727{
728 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
729
730 if (c->shrinker_disabled)
731 return 0;
732
733 if (c->btree_cache_alloc_lock)
734 return 0;
735
736 return mca_can_free(c) * c->btree_pages;
737}
738
739void bch_btree_cache_free(struct cache_set *c)
740{
741 struct btree *b;
742 struct closure cl;
743 closure_init_stack(&cl);
744
745 if (c->shrink.list.next)
746 unregister_shrinker(&c->shrink);
747
748 mutex_lock(&c->bucket_lock);
749
750#ifdef CONFIG_BCACHE_DEBUG
751 if (c->verify_data)
752 list_move(&c->verify_data->list, &c->btree_cache);
753
754 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
755#endif
756
757 list_splice(&c->btree_cache_freeable,
758 &c->btree_cache);
759
760 while (!list_empty(&c->btree_cache)) {
761 b = list_first_entry(&c->btree_cache, struct btree, list);
762
763 if (btree_node_dirty(b))
764 btree_complete_write(b, btree_current_write(b));
765 clear_bit(BTREE_NODE_dirty, &b->flags);
766
767 mca_data_free(b);
768 }
769
770 while (!list_empty(&c->btree_cache_freed)) {
771 b = list_first_entry(&c->btree_cache_freed,
772 struct btree, list);
773 list_del(&b->list);
774 cancel_delayed_work_sync(&b->work);
775 kfree(b);
776 }
777
778 mutex_unlock(&c->bucket_lock);
779}
780
781int bch_btree_cache_alloc(struct cache_set *c)
782{
783 unsigned i;
784
785 for (i = 0; i < mca_reserve(c); i++)
786 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
787 return -ENOMEM;
788
789 list_splice_init(&c->btree_cache,
790 &c->btree_cache_freeable);
791
792#ifdef CONFIG_BCACHE_DEBUG
793 mutex_init(&c->verify_lock);
794
795 c->verify_ondisk = (void *)
796 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
797
798 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
799
800 if (c->verify_data &&
801 c->verify_data->keys.set->data)
802 list_del_init(&c->verify_data->list);
803 else
804 c->verify_data = NULL;
805#endif
806
807 c->shrink.count_objects = bch_mca_count;
808 c->shrink.scan_objects = bch_mca_scan;
809 c->shrink.seeks = 4;
810 c->shrink.batch = c->btree_pages * 2;
811 register_shrinker(&c->shrink);
812
813 return 0;
814}
815
816/* Btree in memory cache - hash table */
817
818static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
819{
820 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
821}
822
823static struct btree *mca_find(struct cache_set *c, struct bkey *k)
824{
825 struct btree *b;
826
827 rcu_read_lock();
828 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
829 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
830 goto out;
831 b = NULL;
832out:
833 rcu_read_unlock();
834 return b;
835}
836
837static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
838{
839 struct task_struct *old;
840
841 old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
842 if (old && old != current) {
843 if (op)
844 prepare_to_wait(&c->btree_cache_wait, &op->wait,
845 TASK_UNINTERRUPTIBLE);
846 return -EINTR;
847 }
848
849 return 0;
850}
851
852static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
853 struct bkey *k)
854{
855 struct btree *b;
856
857 trace_bcache_btree_cache_cannibalize(c);
858
859 if (mca_cannibalize_lock(c, op))
860 return ERR_PTR(-EINTR);
861
862 list_for_each_entry_reverse(b, &c->btree_cache, list)
863 if (!mca_reap(b, btree_order(k), false))
864 return b;
865
866 list_for_each_entry_reverse(b, &c->btree_cache, list)
867 if (!mca_reap(b, btree_order(k), true))
868 return b;
869
870 WARN(1, "btree cache cannibalize failed\n");
871 return ERR_PTR(-ENOMEM);
872}
873
874/*
875 * We can only have one thread cannibalizing other cached btree nodes at a time,
876 * or we'll deadlock. We use an open coded mutex to ensure that, which a
877 * cannibalize_bucket() will take. This means every time we unlock the root of
878 * the btree, we need to release this lock if we have it held.
879 */
880static void bch_cannibalize_unlock(struct cache_set *c)
881{
882 if (c->btree_cache_alloc_lock == current) {
883 c->btree_cache_alloc_lock = NULL;
884 wake_up(&c->btree_cache_wait);
885 }
886}
887
888static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
889 struct bkey *k, int level)
890{
891 struct btree *b;
892
893 BUG_ON(current->bio_list);
894
895 lockdep_assert_held(&c->bucket_lock);
896
897 if (mca_find(c, k))
898 return NULL;
899
900 /* btree_free() doesn't free memory; it sticks the node on the end of
901 * the list. Check if there's any freed nodes there:
902 */
903 list_for_each_entry(b, &c->btree_cache_freeable, list)
904 if (!mca_reap(b, btree_order(k), false))
905 goto out;
906
907 /* We never free struct btree itself, just the memory that holds the on
908 * disk node. Check the freed list before allocating a new one:
909 */
910 list_for_each_entry(b, &c->btree_cache_freed, list)
911 if (!mca_reap(b, 0, false)) {
912 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
913 if (!b->keys.set[0].data)
914 goto err;
915 else
916 goto out;
917 }
918
919 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
920 if (!b)
921 goto err;
922
923 BUG_ON(!down_write_trylock(&b->lock));
924 if (!b->keys.set->data)
925 goto err;
926out:
927 BUG_ON(b->io_mutex.count != 1);
928
929 bkey_copy(&b->key, k);
930 list_move(&b->list, &c->btree_cache);
931 hlist_del_init_rcu(&b->hash);
932 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
933
934 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
935 b->parent = (void *) ~0UL;
936 b->flags = 0;
937 b->written = 0;
938 b->level = level;
939
940 if (!b->level)
941 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
942 &b->c->expensive_debug_checks);
943 else
944 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
945 &b->c->expensive_debug_checks);
946
947 return b;
948err:
949 if (b)
950 rw_unlock(true, b);
951
952 b = mca_cannibalize(c, op, k);
953 if (!IS_ERR(b))
954 goto out;
955
956 return b;
957}
958
959/**
960 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
961 * in from disk if necessary.
962 *
963 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
964 *
965 * The btree node will have either a read or a write lock held, depending on
966 * level and op->lock.
967 */
968struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
969 struct bkey *k, int level, bool write,
970 struct btree *parent)
971{
972 int i = 0;
973 struct btree *b;
974
975 BUG_ON(level < 0);
976retry:
977 b = mca_find(c, k);
978
979 if (!b) {
980 if (current->bio_list)
981 return ERR_PTR(-EAGAIN);
982
983 mutex_lock(&c->bucket_lock);
984 b = mca_alloc(c, op, k, level);
985 mutex_unlock(&c->bucket_lock);
986
987 if (!b)
988 goto retry;
989 if (IS_ERR(b))
990 return b;
991
992 bch_btree_node_read(b);
993
994 if (!write)
995 downgrade_write(&b->lock);
996 } else {
997 rw_lock(write, b, level);
998 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
999 rw_unlock(write, b);
1000 goto retry;
1001 }
1002 BUG_ON(b->level != level);
1003 }
1004
1005 b->parent = parent;
1006 b->accessed = 1;
1007
1008 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1009 prefetch(b->keys.set[i].tree);
1010 prefetch(b->keys.set[i].data);
1011 }
1012
1013 for (; i <= b->keys.nsets; i++)
1014 prefetch(b->keys.set[i].data);
1015
1016 if (btree_node_io_error(b)) {
1017 rw_unlock(write, b);
1018 return ERR_PTR(-EIO);
1019 }
1020
1021 BUG_ON(!b->written);
1022
1023 return b;
1024}
1025
1026static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1027{
1028 struct btree *b;
1029
1030 mutex_lock(&parent->c->bucket_lock);
1031 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1032 mutex_unlock(&parent->c->bucket_lock);
1033
1034 if (!IS_ERR_OR_NULL(b)) {
1035 b->parent = parent;
1036 bch_btree_node_read(b);
1037 rw_unlock(true, b);
1038 }
1039}
1040
1041/* Btree alloc */
1042
1043static void btree_node_free(struct btree *b)
1044{
1045 trace_bcache_btree_node_free(b);
1046
1047 BUG_ON(b == b->c->root);
1048
1049 mutex_lock(&b->write_lock);
1050
1051 if (btree_node_dirty(b))
1052 btree_complete_write(b, btree_current_write(b));
1053 clear_bit(BTREE_NODE_dirty, &b->flags);
1054
1055 mutex_unlock(&b->write_lock);
1056
1057 cancel_delayed_work(&b->work);
1058
1059 mutex_lock(&b->c->bucket_lock);
1060 bch_bucket_free(b->c, &b->key);
1061 mca_bucket_free(b);
1062 mutex_unlock(&b->c->bucket_lock);
1063}
1064
1065struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1066 int level, bool wait,
1067 struct btree *parent)
1068{
1069 BKEY_PADDED(key) k;
1070 struct btree *b = ERR_PTR(-EAGAIN);
1071
1072 mutex_lock(&c->bucket_lock);
1073retry:
1074 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1075 goto err;
1076
1077 bkey_put(c, &k.key);
1078 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1079
1080 b = mca_alloc(c, op, &k.key, level);
1081 if (IS_ERR(b))
1082 goto err_free;
1083
1084 if (!b) {
1085 cache_bug(c,
1086 "Tried to allocate bucket that was in btree cache");
1087 goto retry;
1088 }
1089
1090 b->accessed = 1;
1091 b->parent = parent;
1092 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1093
1094 mutex_unlock(&c->bucket_lock);
1095
1096 trace_bcache_btree_node_alloc(b);
1097 return b;
1098err_free:
1099 bch_bucket_free(c, &k.key);
1100err:
1101 mutex_unlock(&c->bucket_lock);
1102
1103 trace_bcache_btree_node_alloc_fail(c);
1104 return b;
1105}
1106
1107static struct btree *bch_btree_node_alloc(struct cache_set *c,
1108 struct btree_op *op, int level,
1109 struct btree *parent)
1110{
1111 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1112}
1113
1114static struct btree *btree_node_alloc_replacement(struct btree *b,
1115 struct btree_op *op)
1116{
1117 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1118 if (!IS_ERR_OR_NULL(n)) {
1119 mutex_lock(&n->write_lock);
1120 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1121 bkey_copy_key(&n->key, &b->key);
1122 mutex_unlock(&n->write_lock);
1123 }
1124
1125 return n;
1126}
1127
1128static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1129{
1130 unsigned i;
1131
1132 mutex_lock(&b->c->bucket_lock);
1133
1134 atomic_inc(&b->c->prio_blocked);
1135
1136 bkey_copy(k, &b->key);
1137 bkey_copy_key(k, &ZERO_KEY);
1138
1139 for (i = 0; i < KEY_PTRS(k); i++)
1140 SET_PTR_GEN(k, i,
1141 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1142 PTR_BUCKET(b->c, &b->key, i)));
1143
1144 mutex_unlock(&b->c->bucket_lock);
1145}
1146
1147static int btree_check_reserve(struct btree *b, struct btree_op *op)
1148{
1149 struct cache_set *c = b->c;
1150 struct cache *ca;
1151 unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
1152
1153 mutex_lock(&c->bucket_lock);
1154
1155 for_each_cache(ca, c, i)
1156 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1157 if (op)
1158 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1159 TASK_UNINTERRUPTIBLE);
1160 mutex_unlock(&c->bucket_lock);
1161 return -EINTR;
1162 }
1163
1164 mutex_unlock(&c->bucket_lock);
1165
1166 return mca_cannibalize_lock(b->c, op);
1167}
1168
1169/* Garbage collection */
1170
1171static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1172 struct bkey *k)
1173{
1174 uint8_t stale = 0;
1175 unsigned i;
1176 struct bucket *g;
1177
1178 /*
1179 * ptr_invalid() can't return true for the keys that mark btree nodes as
1180 * freed, but since ptr_bad() returns true we'll never actually use them
1181 * for anything and thus we don't want mark their pointers here
1182 */
1183 if (!bkey_cmp(k, &ZERO_KEY))
1184 return stale;
1185
1186 for (i = 0; i < KEY_PTRS(k); i++) {
1187 if (!ptr_available(c, k, i))
1188 continue;
1189
1190 g = PTR_BUCKET(c, k, i);
1191
1192 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1193 g->last_gc = PTR_GEN(k, i);
1194
1195 if (ptr_stale(c, k, i)) {
1196 stale = max(stale, ptr_stale(c, k, i));
1197 continue;
1198 }
1199
1200 cache_bug_on(GC_MARK(g) &&
1201 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1202 c, "inconsistent ptrs: mark = %llu, level = %i",
1203 GC_MARK(g), level);
1204
1205 if (level)
1206 SET_GC_MARK(g, GC_MARK_METADATA);
1207 else if (KEY_DIRTY(k))
1208 SET_GC_MARK(g, GC_MARK_DIRTY);
1209 else if (!GC_MARK(g))
1210 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1211
1212 /* guard against overflow */
1213 SET_GC_SECTORS_USED(g, min_t(unsigned,
1214 GC_SECTORS_USED(g) + KEY_SIZE(k),
1215 MAX_GC_SECTORS_USED));
1216
1217 BUG_ON(!GC_SECTORS_USED(g));
1218 }
1219
1220 return stale;
1221}
1222
1223#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1224
1225void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1226{
1227 unsigned i;
1228
1229 for (i = 0; i < KEY_PTRS(k); i++)
1230 if (ptr_available(c, k, i) &&
1231 !ptr_stale(c, k, i)) {
1232 struct bucket *b = PTR_BUCKET(c, k, i);
1233
1234 b->gen = PTR_GEN(k, i);
1235
1236 if (level && bkey_cmp(k, &ZERO_KEY))
1237 b->prio = BTREE_PRIO;
1238 else if (!level && b->prio == BTREE_PRIO)
1239 b->prio = INITIAL_PRIO;
1240 }
1241
1242 __bch_btree_mark_key(c, level, k);
1243}
1244
1245static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1246{
1247 uint8_t stale = 0;
1248 unsigned keys = 0, good_keys = 0;
1249 struct bkey *k;
1250 struct btree_iter iter;
1251 struct bset_tree *t;
1252
1253 gc->nodes++;
1254
1255 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1256 stale = max(stale, btree_mark_key(b, k));
1257 keys++;
1258
1259 if (bch_ptr_bad(&b->keys, k))
1260 continue;
1261
1262 gc->key_bytes += bkey_u64s(k);
1263 gc->nkeys++;
1264 good_keys++;
1265
1266 gc->data += KEY_SIZE(k);
1267 }
1268
1269 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1270 btree_bug_on(t->size &&
1271 bset_written(&b->keys, t) &&
1272 bkey_cmp(&b->key, &t->end) < 0,
1273 b, "found short btree key in gc");
1274
1275 if (b->c->gc_always_rewrite)
1276 return true;
1277
1278 if (stale > 10)
1279 return true;
1280
1281 if ((keys - good_keys) * 2 > keys)
1282 return true;
1283
1284 return false;
1285}
1286
1287#define GC_MERGE_NODES 4U
1288
1289struct gc_merge_info {
1290 struct btree *b;
1291 unsigned keys;
1292};
1293
1294static int bch_btree_insert_node(struct btree *, struct btree_op *,
1295 struct keylist *, atomic_t *, struct bkey *);
1296
1297static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1298 struct gc_stat *gc, struct gc_merge_info *r)
1299{
1300 unsigned i, nodes = 0, keys = 0, blocks;
1301 struct btree *new_nodes[GC_MERGE_NODES];
1302 struct keylist keylist;
1303 struct closure cl;
1304 struct bkey *k;
1305
1306 bch_keylist_init(&keylist);
1307
1308 if (btree_check_reserve(b, NULL))
1309 return 0;
1310
1311 memset(new_nodes, 0, sizeof(new_nodes));
1312 closure_init_stack(&cl);
1313
1314 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1315 keys += r[nodes++].keys;
1316
1317 blocks = btree_default_blocks(b->c) * 2 / 3;
1318
1319 if (nodes < 2 ||
1320 __set_blocks(b->keys.set[0].data, keys,
1321 block_bytes(b->c)) > blocks * (nodes - 1))
1322 return 0;
1323
1324 for (i = 0; i < nodes; i++) {
1325 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1326 if (IS_ERR_OR_NULL(new_nodes[i]))
1327 goto out_nocoalesce;
1328 }
1329
1330 /*
1331 * We have to check the reserve here, after we've allocated our new
1332 * nodes, to make sure the insert below will succeed - we also check
1333 * before as an optimization to potentially avoid a bunch of expensive
1334 * allocs/sorts
1335 */
1336 if (btree_check_reserve(b, NULL))
1337 goto out_nocoalesce;
1338
1339 for (i = 0; i < nodes; i++)
1340 mutex_lock(&new_nodes[i]->write_lock);
1341
1342 for (i = nodes - 1; i > 0; --i) {
1343 struct bset *n1 = btree_bset_first(new_nodes[i]);
1344 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1345 struct bkey *k, *last = NULL;
1346
1347 keys = 0;
1348
1349 if (i > 1) {
1350 for (k = n2->start;
1351 k < bset_bkey_last(n2);
1352 k = bkey_next(k)) {
1353 if (__set_blocks(n1, n1->keys + keys +
1354 bkey_u64s(k),
1355 block_bytes(b->c)) > blocks)
1356 break;
1357
1358 last = k;
1359 keys += bkey_u64s(k);
1360 }
1361 } else {
1362 /*
1363 * Last node we're not getting rid of - we're getting
1364 * rid of the node at r[0]. Have to try and fit all of
1365 * the remaining keys into this node; we can't ensure
1366 * they will always fit due to rounding and variable
1367 * length keys (shouldn't be possible in practice,
1368 * though)
1369 */
1370 if (__set_blocks(n1, n1->keys + n2->keys,
1371 block_bytes(b->c)) >
1372 btree_blocks(new_nodes[i]))
1373 goto out_nocoalesce;
1374
1375 keys = n2->keys;
1376 /* Take the key of the node we're getting rid of */
1377 last = &r->b->key;
1378 }
1379
1380 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1381 btree_blocks(new_nodes[i]));
1382
1383 if (last)
1384 bkey_copy_key(&new_nodes[i]->key, last);
1385
1386 memcpy(bset_bkey_last(n1),
1387 n2->start,
1388 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1389
1390 n1->keys += keys;
1391 r[i].keys = n1->keys;
1392
1393 memmove(n2->start,
1394 bset_bkey_idx(n2, keys),
1395 (void *) bset_bkey_last(n2) -
1396 (void *) bset_bkey_idx(n2, keys));
1397
1398 n2->keys -= keys;
1399
1400 if (__bch_keylist_realloc(&keylist,
1401 bkey_u64s(&new_nodes[i]->key)))
1402 goto out_nocoalesce;
1403
1404 bch_btree_node_write(new_nodes[i], &cl);
1405 bch_keylist_add(&keylist, &new_nodes[i]->key);
1406 }
1407
1408 for (i = 0; i < nodes; i++)
1409 mutex_unlock(&new_nodes[i]->write_lock);
1410
1411 closure_sync(&cl);
1412
1413 /* We emptied out this node */
1414 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1415 btree_node_free(new_nodes[0]);
1416 rw_unlock(true, new_nodes[0]);
1417 new_nodes[0] = NULL;
1418
1419 for (i = 0; i < nodes; i++) {
1420 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1421 goto out_nocoalesce;
1422
1423 make_btree_freeing_key(r[i].b, keylist.top);
1424 bch_keylist_push(&keylist);
1425 }
1426
1427 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1428 BUG_ON(!bch_keylist_empty(&keylist));
1429
1430 for (i = 0; i < nodes; i++) {
1431 btree_node_free(r[i].b);
1432 rw_unlock(true, r[i].b);
1433
1434 r[i].b = new_nodes[i];
1435 }
1436
1437 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1438 r[nodes - 1].b = ERR_PTR(-EINTR);
1439
1440 trace_bcache_btree_gc_coalesce(nodes);
1441 gc->nodes--;
1442
1443 bch_keylist_free(&keylist);
1444
1445 /* Invalidated our iterator */
1446 return -EINTR;
1447
1448out_nocoalesce:
1449 closure_sync(&cl);
1450 bch_keylist_free(&keylist);
1451
1452 while ((k = bch_keylist_pop(&keylist)))
1453 if (!bkey_cmp(k, &ZERO_KEY))
1454 atomic_dec(&b->c->prio_blocked);
1455
1456 for (i = 0; i < nodes; i++)
1457 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1458 btree_node_free(new_nodes[i]);
1459 rw_unlock(true, new_nodes[i]);
1460 }
1461 return 0;
1462}
1463
1464static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1465 struct btree *replace)
1466{
1467 struct keylist keys;
1468 struct btree *n;
1469
1470 if (btree_check_reserve(b, NULL))
1471 return 0;
1472
1473 n = btree_node_alloc_replacement(replace, NULL);
1474
1475 /* recheck reserve after allocating replacement node */
1476 if (btree_check_reserve(b, NULL)) {
1477 btree_node_free(n);
1478 rw_unlock(true, n);
1479 return 0;
1480 }
1481
1482 bch_btree_node_write_sync(n);
1483
1484 bch_keylist_init(&keys);
1485 bch_keylist_add(&keys, &n->key);
1486
1487 make_btree_freeing_key(replace, keys.top);
1488 bch_keylist_push(&keys);
1489
1490 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1491 BUG_ON(!bch_keylist_empty(&keys));
1492
1493 btree_node_free(replace);
1494 rw_unlock(true, n);
1495
1496 /* Invalidated our iterator */
1497 return -EINTR;
1498}
1499
1500static unsigned btree_gc_count_keys(struct btree *b)
1501{
1502 struct bkey *k;
1503 struct btree_iter iter;
1504 unsigned ret = 0;
1505
1506 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1507 ret += bkey_u64s(k);
1508
1509 return ret;
1510}
1511
1512static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1513 struct closure *writes, struct gc_stat *gc)
1514{
1515 int ret = 0;
1516 bool should_rewrite;
1517 struct bkey *k;
1518 struct btree_iter iter;
1519 struct gc_merge_info r[GC_MERGE_NODES];
1520 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1521
1522 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1523
1524 for (i = r; i < r + ARRAY_SIZE(r); i++)
1525 i->b = ERR_PTR(-EINTR);
1526
1527 while (1) {
1528 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1529 if (k) {
1530 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1531 true, b);
1532 if (IS_ERR(r->b)) {
1533 ret = PTR_ERR(r->b);
1534 break;
1535 }
1536
1537 r->keys = btree_gc_count_keys(r->b);
1538
1539 ret = btree_gc_coalesce(b, op, gc, r);
1540 if (ret)
1541 break;
1542 }
1543
1544 if (!last->b)
1545 break;
1546
1547 if (!IS_ERR(last->b)) {
1548 should_rewrite = btree_gc_mark_node(last->b, gc);
1549 if (should_rewrite) {
1550 ret = btree_gc_rewrite_node(b, op, last->b);
1551 if (ret)
1552 break;
1553 }
1554
1555 if (last->b->level) {
1556 ret = btree_gc_recurse(last->b, op, writes, gc);
1557 if (ret)
1558 break;
1559 }
1560
1561 bkey_copy_key(&b->c->gc_done, &last->b->key);
1562
1563 /*
1564 * Must flush leaf nodes before gc ends, since replace
1565 * operations aren't journalled
1566 */
1567 mutex_lock(&last->b->write_lock);
1568 if (btree_node_dirty(last->b))
1569 bch_btree_node_write(last->b, writes);
1570 mutex_unlock(&last->b->write_lock);
1571 rw_unlock(true, last->b);
1572 }
1573
1574 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1575 r->b = NULL;
1576
1577 if (need_resched()) {
1578 ret = -EAGAIN;
1579 break;
1580 }
1581 }
1582
1583 for (i = r; i < r + ARRAY_SIZE(r); i++)
1584 if (!IS_ERR_OR_NULL(i->b)) {
1585 mutex_lock(&i->b->write_lock);
1586 if (btree_node_dirty(i->b))
1587 bch_btree_node_write(i->b, writes);
1588 mutex_unlock(&i->b->write_lock);
1589 rw_unlock(true, i->b);
1590 }
1591
1592 return ret;
1593}
1594
1595static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1596 struct closure *writes, struct gc_stat *gc)
1597{
1598 struct btree *n = NULL;
1599 int ret = 0;
1600 bool should_rewrite;
1601
1602 should_rewrite = btree_gc_mark_node(b, gc);
1603 if (should_rewrite) {
1604 n = btree_node_alloc_replacement(b, NULL);
1605
1606 if (!IS_ERR_OR_NULL(n)) {
1607 bch_btree_node_write_sync(n);
1608
1609 bch_btree_set_root(n);
1610 btree_node_free(b);
1611 rw_unlock(true, n);
1612
1613 return -EINTR;
1614 }
1615 }
1616
1617 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1618
1619 if (b->level) {
1620 ret = btree_gc_recurse(b, op, writes, gc);
1621 if (ret)
1622 return ret;
1623 }
1624
1625 bkey_copy_key(&b->c->gc_done, &b->key);
1626
1627 return ret;
1628}
1629
1630static void btree_gc_start(struct cache_set *c)
1631{
1632 struct cache *ca;
1633 struct bucket *b;
1634 unsigned i;
1635
1636 if (!c->gc_mark_valid)
1637 return;
1638
1639 mutex_lock(&c->bucket_lock);
1640
1641 c->gc_mark_valid = 0;
1642 c->gc_done = ZERO_KEY;
1643
1644 for_each_cache(ca, c, i)
1645 for_each_bucket(b, ca) {
1646 b->last_gc = b->gen;
1647 if (!atomic_read(&b->pin)) {
1648 SET_GC_MARK(b, 0);
1649 SET_GC_SECTORS_USED(b, 0);
1650 }
1651 }
1652
1653 mutex_unlock(&c->bucket_lock);
1654}
1655
1656static size_t bch_btree_gc_finish(struct cache_set *c)
1657{
1658 size_t available = 0;
1659 struct bucket *b;
1660 struct cache *ca;
1661 unsigned i;
1662
1663 mutex_lock(&c->bucket_lock);
1664
1665 set_gc_sectors(c);
1666 c->gc_mark_valid = 1;
1667 c->need_gc = 0;
1668
1669 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1670 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1671 GC_MARK_METADATA);
1672
1673 /* don't reclaim buckets to which writeback keys point */
1674 rcu_read_lock();
1675 for (i = 0; i < c->nr_uuids; i++) {
1676 struct bcache_device *d = c->devices[i];
1677 struct cached_dev *dc;
1678 struct keybuf_key *w, *n;
1679 unsigned j;
1680
1681 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1682 continue;
1683 dc = container_of(d, struct cached_dev, disk);
1684
1685 spin_lock(&dc->writeback_keys.lock);
1686 rbtree_postorder_for_each_entry_safe(w, n,
1687 &dc->writeback_keys.keys, node)
1688 for (j = 0; j < KEY_PTRS(&w->key); j++)
1689 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1690 GC_MARK_DIRTY);
1691 spin_unlock(&dc->writeback_keys.lock);
1692 }
1693 rcu_read_unlock();
1694
1695 for_each_cache(ca, c, i) {
1696 uint64_t *i;
1697
1698 ca->invalidate_needs_gc = 0;
1699
1700 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1701 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1702
1703 for (i = ca->prio_buckets;
1704 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1705 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1706
1707 for_each_bucket(b, ca) {
1708 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1709
1710 if (atomic_read(&b->pin))
1711 continue;
1712
1713 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1714
1715 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1716 available++;
1717 }
1718 }
1719
1720 mutex_unlock(&c->bucket_lock);
1721 return available;
1722}
1723
1724static void bch_btree_gc(struct cache_set *c)
1725{
1726 int ret;
1727 unsigned long available;
1728 struct gc_stat stats;
1729 struct closure writes;
1730 struct btree_op op;
1731 uint64_t start_time = local_clock();
1732
1733 trace_bcache_gc_start(c);
1734
1735 memset(&stats, 0, sizeof(struct gc_stat));
1736 closure_init_stack(&writes);
1737 bch_btree_op_init(&op, SHRT_MAX);
1738
1739 btree_gc_start(c);
1740
1741 do {
1742 ret = btree_root(gc_root, c, &op, &writes, &stats);
1743 closure_sync(&writes);
1744 cond_resched();
1745
1746 if (ret && ret != -EAGAIN)
1747 pr_warn("gc failed!");
1748 } while (ret);
1749
1750 available = bch_btree_gc_finish(c);
1751 wake_up_allocators(c);
1752
1753 bch_time_stats_update(&c->btree_gc_time, start_time);
1754
1755 stats.key_bytes *= sizeof(uint64_t);
1756 stats.data <<= 9;
1757 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1758 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1759
1760 trace_bcache_gc_end(c);
1761
1762 bch_moving_gc(c);
1763}
1764
1765static int bch_gc_thread(void *arg)
1766{
1767 struct cache_set *c = arg;
1768 struct cache *ca;
1769 unsigned i;
1770
1771 while (1) {
1772again:
1773 bch_btree_gc(c);
1774
1775 set_current_state(TASK_INTERRUPTIBLE);
1776 if (kthread_should_stop())
1777 break;
1778
1779 mutex_lock(&c->bucket_lock);
1780
1781 for_each_cache(ca, c, i)
1782 if (ca->invalidate_needs_gc) {
1783 mutex_unlock(&c->bucket_lock);
1784 set_current_state(TASK_RUNNING);
1785 goto again;
1786 }
1787
1788 mutex_unlock(&c->bucket_lock);
1789
1790 try_to_freeze();
1791 schedule();
1792 }
1793
1794 return 0;
1795}
1796
1797int bch_gc_thread_start(struct cache_set *c)
1798{
1799 c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
1800 if (IS_ERR(c->gc_thread))
1801 return PTR_ERR(c->gc_thread);
1802
1803 set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
1804 return 0;
1805}
1806
1807/* Initial partial gc */
1808
1809static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1810{
1811 int ret = 0;
1812 struct bkey *k, *p = NULL;
1813 struct btree_iter iter;
1814
1815 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1816 bch_initial_mark_key(b->c, b->level, k);
1817
1818 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1819
1820 if (b->level) {
1821 bch_btree_iter_init(&b->keys, &iter, NULL);
1822
1823 do {
1824 k = bch_btree_iter_next_filter(&iter, &b->keys,
1825 bch_ptr_bad);
1826 if (k)
1827 btree_node_prefetch(b, k);
1828
1829 if (p)
1830 ret = btree(check_recurse, p, b, op);
1831
1832 p = k;
1833 } while (p && !ret);
1834 }
1835
1836 return ret;
1837}
1838
1839int bch_btree_check(struct cache_set *c)
1840{
1841 struct btree_op op;
1842
1843 bch_btree_op_init(&op, SHRT_MAX);
1844
1845 return btree_root(check_recurse, c, &op);
1846}
1847
1848void bch_initial_gc_finish(struct cache_set *c)
1849{
1850 struct cache *ca;
1851 struct bucket *b;
1852 unsigned i;
1853
1854 bch_btree_gc_finish(c);
1855
1856 mutex_lock(&c->bucket_lock);
1857
1858 /*
1859 * We need to put some unused buckets directly on the prio freelist in
1860 * order to get the allocator thread started - it needs freed buckets in
1861 * order to rewrite the prios and gens, and it needs to rewrite prios
1862 * and gens in order to free buckets.
1863 *
1864 * This is only safe for buckets that have no live data in them, which
1865 * there should always be some of.
1866 */
1867 for_each_cache(ca, c, i) {
1868 for_each_bucket(b, ca) {
1869 if (fifo_full(&ca->free[RESERVE_PRIO]))
1870 break;
1871
1872 if (bch_can_invalidate_bucket(ca, b) &&
1873 !GC_MARK(b)) {
1874 __bch_invalidate_one_bucket(ca, b);
1875 fifo_push(&ca->free[RESERVE_PRIO],
1876 b - ca->buckets);
1877 }
1878 }
1879 }
1880
1881 mutex_unlock(&c->bucket_lock);
1882}
1883
1884/* Btree insertion */
1885
1886static bool btree_insert_key(struct btree *b, struct bkey *k,
1887 struct bkey *replace_key)
1888{
1889 unsigned status;
1890
1891 BUG_ON(bkey_cmp(k, &b->key) > 0);
1892
1893 status = bch_btree_insert_key(&b->keys, k, replace_key);
1894 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1895 bch_check_keys(&b->keys, "%u for %s", status,
1896 replace_key ? "replace" : "insert");
1897
1898 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1899 status);
1900 return true;
1901 } else
1902 return false;
1903}
1904
1905static size_t insert_u64s_remaining(struct btree *b)
1906{
1907 long ret = bch_btree_keys_u64s_remaining(&b->keys);
1908
1909 /*
1910 * Might land in the middle of an existing extent and have to split it
1911 */
1912 if (b->keys.ops->is_extents)
1913 ret -= KEY_MAX_U64S;
1914
1915 return max(ret, 0L);
1916}
1917
1918static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1919 struct keylist *insert_keys,
1920 struct bkey *replace_key)
1921{
1922 bool ret = false;
1923 int oldsize = bch_count_data(&b->keys);
1924
1925 while (!bch_keylist_empty(insert_keys)) {
1926 struct bkey *k = insert_keys->keys;
1927
1928 if (bkey_u64s(k) > insert_u64s_remaining(b))
1929 break;
1930
1931 if (bkey_cmp(k, &b->key) <= 0) {
1932 if (!b->level)
1933 bkey_put(b->c, k);
1934
1935 ret |= btree_insert_key(b, k, replace_key);
1936 bch_keylist_pop_front(insert_keys);
1937 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1938 BKEY_PADDED(key) temp;
1939 bkey_copy(&temp.key, insert_keys->keys);
1940
1941 bch_cut_back(&b->key, &temp.key);
1942 bch_cut_front(&b->key, insert_keys->keys);
1943
1944 ret |= btree_insert_key(b, &temp.key, replace_key);
1945 break;
1946 } else {
1947 break;
1948 }
1949 }
1950
1951 if (!ret)
1952 op->insert_collision = true;
1953
1954 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1955
1956 BUG_ON(bch_count_data(&b->keys) < oldsize);
1957 return ret;
1958}
1959
1960static int btree_split(struct btree *b, struct btree_op *op,
1961 struct keylist *insert_keys,
1962 struct bkey *replace_key)
1963{
1964 bool split;
1965 struct btree *n1, *n2 = NULL, *n3 = NULL;
1966 uint64_t start_time = local_clock();
1967 struct closure cl;
1968 struct keylist parent_keys;
1969
1970 closure_init_stack(&cl);
1971 bch_keylist_init(&parent_keys);
1972
1973 if (btree_check_reserve(b, op)) {
1974 if (!b->level)
1975 return -EINTR;
1976 else
1977 WARN(1, "insufficient reserve for split\n");
1978 }
1979
1980 n1 = btree_node_alloc_replacement(b, op);
1981 if (IS_ERR(n1))
1982 goto err;
1983
1984 split = set_blocks(btree_bset_first(n1),
1985 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
1986
1987 if (split) {
1988 unsigned keys = 0;
1989
1990 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
1991
1992 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1993 if (IS_ERR(n2))
1994 goto err_free1;
1995
1996 if (!b->parent) {
1997 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
1998 if (IS_ERR(n3))
1999 goto err_free2;
2000 }
2001
2002 mutex_lock(&n1->write_lock);
2003 mutex_lock(&n2->write_lock);
2004
2005 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2006
2007 /*
2008 * Has to be a linear search because we don't have an auxiliary
2009 * search tree yet
2010 */
2011
2012 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2013 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2014 keys));
2015
2016 bkey_copy_key(&n1->key,
2017 bset_bkey_idx(btree_bset_first(n1), keys));
2018 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2019
2020 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2021 btree_bset_first(n1)->keys = keys;
2022
2023 memcpy(btree_bset_first(n2)->start,
2024 bset_bkey_last(btree_bset_first(n1)),
2025 btree_bset_first(n2)->keys * sizeof(uint64_t));
2026
2027 bkey_copy_key(&n2->key, &b->key);
2028
2029 bch_keylist_add(&parent_keys, &n2->key);
2030 bch_btree_node_write(n2, &cl);
2031 mutex_unlock(&n2->write_lock);
2032 rw_unlock(true, n2);
2033 } else {
2034 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2035
2036 mutex_lock(&n1->write_lock);
2037 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2038 }
2039
2040 bch_keylist_add(&parent_keys, &n1->key);
2041 bch_btree_node_write(n1, &cl);
2042 mutex_unlock(&n1->write_lock);
2043
2044 if (n3) {
2045 /* Depth increases, make a new root */
2046 mutex_lock(&n3->write_lock);
2047 bkey_copy_key(&n3->key, &MAX_KEY);
2048 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2049 bch_btree_node_write(n3, &cl);
2050 mutex_unlock(&n3->write_lock);
2051
2052 closure_sync(&cl);
2053 bch_btree_set_root(n3);
2054 rw_unlock(true, n3);
2055 } else if (!b->parent) {
2056 /* Root filled up but didn't need to be split */
2057 closure_sync(&cl);
2058 bch_btree_set_root(n1);
2059 } else {
2060 /* Split a non root node */
2061 closure_sync(&cl);
2062 make_btree_freeing_key(b, parent_keys.top);
2063 bch_keylist_push(&parent_keys);
2064
2065 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2066 BUG_ON(!bch_keylist_empty(&parent_keys));
2067 }
2068
2069 btree_node_free(b);
2070 rw_unlock(true, n1);
2071
2072 bch_time_stats_update(&b->c->btree_split_time, start_time);
2073
2074 return 0;
2075err_free2:
2076 bkey_put(b->c, &n2->key);
2077 btree_node_free(n2);
2078 rw_unlock(true, n2);
2079err_free1:
2080 bkey_put(b->c, &n1->key);
2081 btree_node_free(n1);
2082 rw_unlock(true, n1);
2083err:
2084 WARN(1, "bcache: btree split failed (level %u)", b->level);
2085
2086 if (n3 == ERR_PTR(-EAGAIN) ||
2087 n2 == ERR_PTR(-EAGAIN) ||
2088 n1 == ERR_PTR(-EAGAIN))
2089 return -EAGAIN;
2090
2091 return -ENOMEM;
2092}
2093
2094static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2095 struct keylist *insert_keys,
2096 atomic_t *journal_ref,
2097 struct bkey *replace_key)
2098{
2099 struct closure cl;
2100
2101 BUG_ON(b->level && replace_key);
2102
2103 closure_init_stack(&cl);
2104
2105 mutex_lock(&b->write_lock);
2106
2107 if (write_block(b) != btree_bset_last(b) &&
2108 b->keys.last_set_unwritten)
2109 bch_btree_init_next(b); /* just wrote a set */
2110
2111 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2112 mutex_unlock(&b->write_lock);
2113 goto split;
2114 }
2115
2116 BUG_ON(write_block(b) != btree_bset_last(b));
2117
2118 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2119 if (!b->level)
2120 bch_btree_leaf_dirty(b, journal_ref);
2121 else
2122 bch_btree_node_write(b, &cl);
2123 }
2124
2125 mutex_unlock(&b->write_lock);
2126
2127 /* wait for btree node write if necessary, after unlock */
2128 closure_sync(&cl);
2129
2130 return 0;
2131split:
2132 if (current->bio_list) {
2133 op->lock = b->c->root->level + 1;
2134 return -EAGAIN;
2135 } else if (op->lock <= b->c->root->level) {
2136 op->lock = b->c->root->level + 1;
2137 return -EINTR;
2138 } else {
2139 /* Invalidated all iterators */
2140 int ret = btree_split(b, op, insert_keys, replace_key);
2141
2142 if (bch_keylist_empty(insert_keys))
2143 return 0;
2144 else if (!ret)
2145 return -EINTR;
2146 return ret;
2147 }
2148}
2149
2150int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2151 struct bkey *check_key)
2152{
2153 int ret = -EINTR;
2154 uint64_t btree_ptr = b->key.ptr[0];
2155 unsigned long seq = b->seq;
2156 struct keylist insert;
2157 bool upgrade = op->lock == -1;
2158
2159 bch_keylist_init(&insert);
2160
2161 if (upgrade) {
2162 rw_unlock(false, b);
2163 rw_lock(true, b, b->level);
2164
2165 if (b->key.ptr[0] != btree_ptr ||
2166 b->seq != seq + 1) {
2167 op->lock = b->level;
2168 goto out;
2169 }
2170 }
2171
2172 SET_KEY_PTRS(check_key, 1);
2173 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2174
2175 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2176
2177 bch_keylist_add(&insert, check_key);
2178
2179 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2180
2181 BUG_ON(!ret && !bch_keylist_empty(&insert));
2182out:
2183 if (upgrade)
2184 downgrade_write(&b->lock);
2185 return ret;
2186}
2187
2188struct btree_insert_op {
2189 struct btree_op op;
2190 struct keylist *keys;
2191 atomic_t *journal_ref;
2192 struct bkey *replace_key;
2193};
2194
2195static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2196{
2197 struct btree_insert_op *op = container_of(b_op,
2198 struct btree_insert_op, op);
2199
2200 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2201 op->journal_ref, op->replace_key);
2202 if (ret && !bch_keylist_empty(op->keys))
2203 return ret;
2204 else
2205 return MAP_DONE;
2206}
2207
2208int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2209 atomic_t *journal_ref, struct bkey *replace_key)
2210{
2211 struct btree_insert_op op;
2212 int ret = 0;
2213
2214 BUG_ON(current->bio_list);
2215 BUG_ON(bch_keylist_empty(keys));
2216
2217 bch_btree_op_init(&op.op, 0);
2218 op.keys = keys;
2219 op.journal_ref = journal_ref;
2220 op.replace_key = replace_key;
2221
2222 while (!ret && !bch_keylist_empty(keys)) {
2223 op.op.lock = 0;
2224 ret = bch_btree_map_leaf_nodes(&op.op, c,
2225 &START_KEY(keys->keys),
2226 btree_insert_fn);
2227 }
2228
2229 if (ret) {
2230 struct bkey *k;
2231
2232 pr_err("error %i", ret);
2233
2234 while ((k = bch_keylist_pop(keys)))
2235 bkey_put(c, k);
2236 } else if (op.op.insert_collision)
2237 ret = -ESRCH;
2238
2239 return ret;
2240}
2241
2242void bch_btree_set_root(struct btree *b)
2243{
2244 unsigned i;
2245 struct closure cl;
2246
2247 closure_init_stack(&cl);
2248
2249 trace_bcache_btree_set_root(b);
2250
2251 BUG_ON(!b->written);
2252
2253 for (i = 0; i < KEY_PTRS(&b->key); i++)
2254 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2255
2256 mutex_lock(&b->c->bucket_lock);
2257 list_del_init(&b->list);
2258 mutex_unlock(&b->c->bucket_lock);
2259
2260 b->c->root = b;
2261
2262 bch_journal_meta(b->c, &cl);
2263 closure_sync(&cl);
2264}
2265
2266/* Map across nodes or keys */
2267
2268static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2269 struct bkey *from,
2270 btree_map_nodes_fn *fn, int flags)
2271{
2272 int ret = MAP_CONTINUE;
2273
2274 if (b->level) {
2275 struct bkey *k;
2276 struct btree_iter iter;
2277
2278 bch_btree_iter_init(&b->keys, &iter, from);
2279
2280 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2281 bch_ptr_bad))) {
2282 ret = btree(map_nodes_recurse, k, b,
2283 op, from, fn, flags);
2284 from = NULL;
2285
2286 if (ret != MAP_CONTINUE)
2287 return ret;
2288 }
2289 }
2290
2291 if (!b->level || flags == MAP_ALL_NODES)
2292 ret = fn(op, b);
2293
2294 return ret;
2295}
2296
2297int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2298 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2299{
2300 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2301}
2302
2303static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2304 struct bkey *from, btree_map_keys_fn *fn,
2305 int flags)
2306{
2307 int ret = MAP_CONTINUE;
2308 struct bkey *k;
2309 struct btree_iter iter;
2310
2311 bch_btree_iter_init(&b->keys, &iter, from);
2312
2313 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2314 ret = !b->level
2315 ? fn(op, b, k)
2316 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2317 from = NULL;
2318
2319 if (ret != MAP_CONTINUE)
2320 return ret;
2321 }
2322
2323 if (!b->level && (flags & MAP_END_KEY))
2324 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2325 KEY_OFFSET(&b->key), 0));
2326
2327 return ret;
2328}
2329
2330int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2331 struct bkey *from, btree_map_keys_fn *fn, int flags)
2332{
2333 return btree_root(map_keys_recurse, c, op, from, fn, flags);
2334}
2335
2336/* Keybuf code */
2337
2338static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2339{
2340 /* Overlapping keys compare equal */
2341 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2342 return -1;
2343 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2344 return 1;
2345 return 0;
2346}
2347
2348static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2349 struct keybuf_key *r)
2350{
2351 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2352}
2353
2354struct refill {
2355 struct btree_op op;
2356 unsigned nr_found;
2357 struct keybuf *buf;
2358 struct bkey *end;
2359 keybuf_pred_fn *pred;
2360};
2361
2362static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2363 struct bkey *k)
2364{
2365 struct refill *refill = container_of(op, struct refill, op);
2366 struct keybuf *buf = refill->buf;
2367 int ret = MAP_CONTINUE;
2368
2369 if (bkey_cmp(k, refill->end) >= 0) {
2370 ret = MAP_DONE;
2371 goto out;
2372 }
2373
2374 if (!KEY_SIZE(k)) /* end key */
2375 goto out;
2376
2377 if (refill->pred(buf, k)) {
2378 struct keybuf_key *w;
2379
2380 spin_lock(&buf->lock);
2381
2382 w = array_alloc(&buf->freelist);
2383 if (!w) {
2384 spin_unlock(&buf->lock);
2385 return MAP_DONE;
2386 }
2387
2388 w->private = NULL;
2389 bkey_copy(&w->key, k);
2390
2391 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2392 array_free(&buf->freelist, w);
2393 else
2394 refill->nr_found++;
2395
2396 if (array_freelist_empty(&buf->freelist))
2397 ret = MAP_DONE;
2398
2399 spin_unlock(&buf->lock);
2400 }
2401out:
2402 buf->last_scanned = *k;
2403 return ret;
2404}
2405
2406void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2407 struct bkey *end, keybuf_pred_fn *pred)
2408{
2409 struct bkey start = buf->last_scanned;
2410 struct refill refill;
2411
2412 cond_resched();
2413
2414 bch_btree_op_init(&refill.op, -1);
2415 refill.nr_found = 0;
2416 refill.buf = buf;
2417 refill.end = end;
2418 refill.pred = pred;
2419
2420 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2421 refill_keybuf_fn, MAP_END_KEY);
2422
2423 trace_bcache_keyscan(refill.nr_found,
2424 KEY_INODE(&start), KEY_OFFSET(&start),
2425 KEY_INODE(&buf->last_scanned),
2426 KEY_OFFSET(&buf->last_scanned));
2427
2428 spin_lock(&buf->lock);
2429
2430 if (!RB_EMPTY_ROOT(&buf->keys)) {
2431 struct keybuf_key *w;
2432 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2433 buf->start = START_KEY(&w->key);
2434
2435 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2436 buf->end = w->key;
2437 } else {
2438 buf->start = MAX_KEY;
2439 buf->end = MAX_KEY;
2440 }
2441
2442 spin_unlock(&buf->lock);
2443}
2444
2445static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2446{
2447 rb_erase(&w->node, &buf->keys);
2448 array_free(&buf->freelist, w);
2449}
2450
2451void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2452{
2453 spin_lock(&buf->lock);
2454 __bch_keybuf_del(buf, w);
2455 spin_unlock(&buf->lock);
2456}
2457
2458bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2459 struct bkey *end)
2460{
2461 bool ret = false;
2462 struct keybuf_key *p, *w, s;
2463 s.key = *start;
2464
2465 if (bkey_cmp(end, &buf->start) <= 0 ||
2466 bkey_cmp(start, &buf->end) >= 0)
2467 return false;
2468
2469 spin_lock(&buf->lock);
2470 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2471
2472 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2473 p = w;
2474 w = RB_NEXT(w, node);
2475
2476 if (p->private)
2477 ret = true;
2478 else
2479 __bch_keybuf_del(buf, p);
2480 }
2481
2482 spin_unlock(&buf->lock);
2483 return ret;
2484}
2485
2486struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2487{
2488 struct keybuf_key *w;
2489 spin_lock(&buf->lock);
2490
2491 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2492
2493 while (w && w->private)
2494 w = RB_NEXT(w, node);
2495
2496 if (w)
2497 w->private = ERR_PTR(-EINTR);
2498
2499 spin_unlock(&buf->lock);
2500 return w;
2501}
2502
2503struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2504 struct keybuf *buf,
2505 struct bkey *end,
2506 keybuf_pred_fn *pred)
2507{
2508 struct keybuf_key *ret;
2509
2510 while (1) {
2511 ret = bch_keybuf_next(buf);
2512 if (ret)
2513 break;
2514
2515 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2516 pr_debug("scan finished");
2517 break;
2518 }
2519
2520 bch_refill_keybuf(c, buf, end, pred);
2521 }
2522
2523 return ret;
2524}
2525
2526void bch_keybuf_init(struct keybuf *buf)
2527{
2528 buf->last_scanned = MAX_KEY;
2529 buf->keys = RB_ROOT;
2530
2531 spin_lock_init(&buf->lock);
2532 array_allocator_init(&buf->freelist);
2533}