Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bcache journalling code, for btree insertions
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"
9#include "btree.h"
10#include "debug.h"
11#include "extents.h"
12
13#include <trace/events/bcache.h>
14
15/*
16 * Journal replay/recovery:
17 *
18 * This code is all driven from run_cache_set(); we first read the journal
19 * entries, do some other stuff, then we mark all the keys in the journal
20 * entries (same as garbage collection would), then we replay them - reinserting
21 * them into the cache in precisely the same order as they appear in the
22 * journal.
23 *
24 * We only journal keys that go in leaf nodes, which simplifies things quite a
25 * bit.
26 */
27
28static void journal_read_endio(struct bio *bio)
29{
30 struct closure *cl = bio->bi_private;
31
32 closure_put(cl);
33}
34
35static int journal_read_bucket(struct cache *ca, struct list_head *list,
36 unsigned int bucket_index)
37{
38 struct journal_device *ja = &ca->journal;
39 struct bio *bio = &ja->bio;
40
41 struct journal_replay *i;
42 struct jset *j, *data = ca->set->journal.w[0].data;
43 struct closure cl;
44 unsigned int len, left, offset = 0;
45 int ret = 0;
46 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
47
48 closure_init_stack(&cl);
49
50 pr_debug("reading %u\n", bucket_index);
51
52 while (offset < ca->sb.bucket_size) {
53reread: left = ca->sb.bucket_size - offset;
54 len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
55
56 bio_reset(bio, ca->bdev, REQ_OP_READ);
57 bio->bi_iter.bi_sector = bucket + offset;
58 bio->bi_iter.bi_size = len << 9;
59
60 bio->bi_end_io = journal_read_endio;
61 bio->bi_private = &cl;
62 bch_bio_map(bio, data);
63
64 closure_bio_submit(ca->set, bio, &cl);
65 closure_sync(&cl);
66
67 /* This function could be simpler now since we no longer write
68 * journal entries that overlap bucket boundaries; this means
69 * the start of a bucket will always have a valid journal entry
70 * if it has any journal entries at all.
71 */
72
73 j = data;
74 while (len) {
75 struct list_head *where;
76 size_t blocks, bytes = set_bytes(j);
77
78 if (j->magic != jset_magic(&ca->sb)) {
79 pr_debug("%u: bad magic\n", bucket_index);
80 return ret;
81 }
82
83 if (bytes > left << 9 ||
84 bytes > PAGE_SIZE << JSET_BITS) {
85 pr_info("%u: too big, %zu bytes, offset %u\n",
86 bucket_index, bytes, offset);
87 return ret;
88 }
89
90 if (bytes > len << 9)
91 goto reread;
92
93 if (j->csum != csum_set(j)) {
94 pr_info("%u: bad csum, %zu bytes, offset %u\n",
95 bucket_index, bytes, offset);
96 return ret;
97 }
98
99 blocks = set_blocks(j, block_bytes(ca));
100
101 /*
102 * Nodes in 'list' are in linear increasing order of
103 * i->j.seq, the node on head has the smallest (oldest)
104 * journal seq, the node on tail has the biggest
105 * (latest) journal seq.
106 */
107
108 /*
109 * Check from the oldest jset for last_seq. If
110 * i->j.seq < j->last_seq, it means the oldest jset
111 * in list is expired and useless, remove it from
112 * this list. Otherwise, j is a candidate jset for
113 * further following checks.
114 */
115 while (!list_empty(list)) {
116 i = list_first_entry(list,
117 struct journal_replay, list);
118 if (i->j.seq >= j->last_seq)
119 break;
120 list_del(&i->list);
121 kfree(i);
122 }
123
124 /* iterate list in reverse order (from latest jset) */
125 list_for_each_entry_reverse(i, list, list) {
126 if (j->seq == i->j.seq)
127 goto next_set;
128
129 /*
130 * if j->seq is less than any i->j.last_seq
131 * in list, j is an expired and useless jset.
132 */
133 if (j->seq < i->j.last_seq)
134 goto next_set;
135
136 /*
137 * 'where' points to first jset in list which
138 * is elder then j.
139 */
140 if (j->seq > i->j.seq) {
141 where = &i->list;
142 goto add;
143 }
144 }
145
146 where = list;
147add:
148 i = kmalloc(offsetof(struct journal_replay, j) +
149 bytes, GFP_KERNEL);
150 if (!i)
151 return -ENOMEM;
152 unsafe_memcpy(&i->j, j, bytes,
153 /* "bytes" was calculated by set_bytes() above */);
154 /* Add to the location after 'where' points to */
155 list_add(&i->list, where);
156 ret = 1;
157
158 if (j->seq > ja->seq[bucket_index])
159 ja->seq[bucket_index] = j->seq;
160next_set:
161 offset += blocks * ca->sb.block_size;
162 len -= blocks * ca->sb.block_size;
163 j = ((void *) j) + blocks * block_bytes(ca);
164 }
165 }
166
167 return ret;
168}
169
170int bch_journal_read(struct cache_set *c, struct list_head *list)
171{
172#define read_bucket(b) \
173 ({ \
174 ret = journal_read_bucket(ca, list, b); \
175 __set_bit(b, bitmap); \
176 if (ret < 0) \
177 return ret; \
178 ret; \
179 })
180
181 struct cache *ca = c->cache;
182 int ret = 0;
183 struct journal_device *ja = &ca->journal;
184 DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
185 unsigned int i, l, r, m;
186 uint64_t seq;
187
188 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
189 pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
190
191 /*
192 * Read journal buckets ordered by golden ratio hash to quickly
193 * find a sequence of buckets with valid journal entries
194 */
195 for (i = 0; i < ca->sb.njournal_buckets; i++) {
196 /*
197 * We must try the index l with ZERO first for
198 * correctness due to the scenario that the journal
199 * bucket is circular buffer which might have wrapped
200 */
201 l = (i * 2654435769U) % ca->sb.njournal_buckets;
202
203 if (test_bit(l, bitmap))
204 break;
205
206 if (read_bucket(l))
207 goto bsearch;
208 }
209
210 /*
211 * If that fails, check all the buckets we haven't checked
212 * already
213 */
214 pr_debug("falling back to linear search\n");
215
216 for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
217 if (read_bucket(l))
218 goto bsearch;
219
220 /* no journal entries on this device? */
221 if (l == ca->sb.njournal_buckets)
222 goto out;
223bsearch:
224 BUG_ON(list_empty(list));
225
226 /* Binary search */
227 m = l;
228 r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
229 pr_debug("starting binary search, l %u r %u\n", l, r);
230
231 while (l + 1 < r) {
232 seq = list_entry(list->prev, struct journal_replay,
233 list)->j.seq;
234
235 m = (l + r) >> 1;
236 read_bucket(m);
237
238 if (seq != list_entry(list->prev, struct journal_replay,
239 list)->j.seq)
240 l = m;
241 else
242 r = m;
243 }
244
245 /*
246 * Read buckets in reverse order until we stop finding more
247 * journal entries
248 */
249 pr_debug("finishing up: m %u njournal_buckets %u\n",
250 m, ca->sb.njournal_buckets);
251 l = m;
252
253 while (1) {
254 if (!l--)
255 l = ca->sb.njournal_buckets - 1;
256
257 if (l == m)
258 break;
259
260 if (test_bit(l, bitmap))
261 continue;
262
263 if (!read_bucket(l))
264 break;
265 }
266
267 seq = 0;
268
269 for (i = 0; i < ca->sb.njournal_buckets; i++)
270 if (ja->seq[i] > seq) {
271 seq = ja->seq[i];
272 /*
273 * When journal_reclaim() goes to allocate for
274 * the first time, it'll use the bucket after
275 * ja->cur_idx
276 */
277 ja->cur_idx = i;
278 ja->last_idx = ja->discard_idx = (i + 1) %
279 ca->sb.njournal_buckets;
280
281 }
282
283out:
284 if (!list_empty(list))
285 c->journal.seq = list_entry(list->prev,
286 struct journal_replay,
287 list)->j.seq;
288
289 return 0;
290#undef read_bucket
291}
292
293void bch_journal_mark(struct cache_set *c, struct list_head *list)
294{
295 atomic_t p = { 0 };
296 struct bkey *k;
297 struct journal_replay *i;
298 struct journal *j = &c->journal;
299 uint64_t last = j->seq;
300
301 /*
302 * journal.pin should never fill up - we never write a journal
303 * entry when it would fill up. But if for some reason it does, we
304 * iterate over the list in reverse order so that we can just skip that
305 * refcount instead of bugging.
306 */
307
308 list_for_each_entry_reverse(i, list, list) {
309 BUG_ON(last < i->j.seq);
310 i->pin = NULL;
311
312 while (last-- != i->j.seq)
313 if (fifo_free(&j->pin) > 1) {
314 fifo_push_front(&j->pin, p);
315 atomic_set(&fifo_front(&j->pin), 0);
316 }
317
318 if (fifo_free(&j->pin) > 1) {
319 fifo_push_front(&j->pin, p);
320 i->pin = &fifo_front(&j->pin);
321 atomic_set(i->pin, 1);
322 }
323
324 for (k = i->j.start;
325 k < bset_bkey_last(&i->j);
326 k = bkey_next(k))
327 if (!__bch_extent_invalid(c, k)) {
328 unsigned int j;
329
330 for (j = 0; j < KEY_PTRS(k); j++)
331 if (ptr_available(c, k, j))
332 atomic_inc(&PTR_BUCKET(c, k, j)->pin);
333
334 bch_initial_mark_key(c, 0, k);
335 }
336 }
337}
338
339static bool is_discard_enabled(struct cache_set *s)
340{
341 struct cache *ca = s->cache;
342
343 if (ca->discard)
344 return true;
345
346 return false;
347}
348
349int bch_journal_replay(struct cache_set *s, struct list_head *list)
350{
351 int ret = 0, keys = 0, entries = 0;
352 struct bkey *k;
353 struct journal_replay *i =
354 list_entry(list->prev, struct journal_replay, list);
355
356 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
357 struct keylist keylist;
358
359 list_for_each_entry(i, list, list) {
360 BUG_ON(i->pin && atomic_read(i->pin) != 1);
361
362 if (n != i->j.seq) {
363 if (n == start && is_discard_enabled(s))
364 pr_info("journal entries %llu-%llu may be discarded! (replaying %llu-%llu)\n",
365 n, i->j.seq - 1, start, end);
366 else {
367 pr_err("journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
368 n, i->j.seq - 1, start, end);
369 ret = -EIO;
370 goto err;
371 }
372 }
373
374 for (k = i->j.start;
375 k < bset_bkey_last(&i->j);
376 k = bkey_next(k)) {
377 trace_bcache_journal_replay_key(k);
378
379 bch_keylist_init_single(&keylist, k);
380
381 ret = bch_btree_insert(s, &keylist, i->pin, NULL);
382 if (ret)
383 goto err;
384
385 BUG_ON(!bch_keylist_empty(&keylist));
386 keys++;
387
388 cond_resched();
389 }
390
391 if (i->pin)
392 atomic_dec(i->pin);
393 n = i->j.seq + 1;
394 entries++;
395 }
396
397 pr_info("journal replay done, %i keys in %i entries, seq %llu\n",
398 keys, entries, end);
399err:
400 while (!list_empty(list)) {
401 i = list_first_entry(list, struct journal_replay, list);
402 list_del(&i->list);
403 kfree(i);
404 }
405
406 return ret;
407}
408
409void bch_journal_space_reserve(struct journal *j)
410{
411 j->do_reserve = true;
412}
413
414/* Journalling */
415
416static void btree_flush_write(struct cache_set *c)
417{
418 struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
419 unsigned int i, nr;
420 int ref_nr;
421 atomic_t *fifo_front_p, *now_fifo_front_p;
422 size_t mask;
423
424 if (c->journal.btree_flushing)
425 return;
426
427 spin_lock(&c->journal.flush_write_lock);
428 if (c->journal.btree_flushing) {
429 spin_unlock(&c->journal.flush_write_lock);
430 return;
431 }
432 c->journal.btree_flushing = true;
433 spin_unlock(&c->journal.flush_write_lock);
434
435 /* get the oldest journal entry and check its refcount */
436 spin_lock(&c->journal.lock);
437 fifo_front_p = &fifo_front(&c->journal.pin);
438 ref_nr = atomic_read(fifo_front_p);
439 if (ref_nr <= 0) {
440 /*
441 * do nothing if no btree node references
442 * the oldest journal entry
443 */
444 spin_unlock(&c->journal.lock);
445 goto out;
446 }
447 spin_unlock(&c->journal.lock);
448
449 mask = c->journal.pin.mask;
450 nr = 0;
451 atomic_long_inc(&c->flush_write);
452 memset(btree_nodes, 0, sizeof(btree_nodes));
453
454 mutex_lock(&c->bucket_lock);
455 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
456 /*
457 * It is safe to get now_fifo_front_p without holding
458 * c->journal.lock here, because we don't need to know
459 * the exactly accurate value, just check whether the
460 * front pointer of c->journal.pin is changed.
461 */
462 now_fifo_front_p = &fifo_front(&c->journal.pin);
463 /*
464 * If the oldest journal entry is reclaimed and front
465 * pointer of c->journal.pin changes, it is unnecessary
466 * to scan c->btree_cache anymore, just quit the loop and
467 * flush out what we have already.
468 */
469 if (now_fifo_front_p != fifo_front_p)
470 break;
471 /*
472 * quit this loop if all matching btree nodes are
473 * scanned and record in btree_nodes[] already.
474 */
475 ref_nr = atomic_read(fifo_front_p);
476 if (nr >= ref_nr)
477 break;
478
479 if (btree_node_journal_flush(b))
480 pr_err("BUG: flush_write bit should not be set here!\n");
481
482 mutex_lock(&b->write_lock);
483
484 if (!btree_node_dirty(b)) {
485 mutex_unlock(&b->write_lock);
486 continue;
487 }
488
489 if (!btree_current_write(b)->journal) {
490 mutex_unlock(&b->write_lock);
491 continue;
492 }
493
494 /*
495 * Only select the btree node which exactly references
496 * the oldest journal entry.
497 *
498 * If the journal entry pointed by fifo_front_p is
499 * reclaimed in parallel, don't worry:
500 * - the list_for_each_xxx loop will quit when checking
501 * next now_fifo_front_p.
502 * - If there are matched nodes recorded in btree_nodes[],
503 * they are clean now (this is why and how the oldest
504 * journal entry can be reclaimed). These selected nodes
505 * will be ignored and skipped in the following for-loop.
506 */
507 if (((btree_current_write(b)->journal - fifo_front_p) &
508 mask) != 0) {
509 mutex_unlock(&b->write_lock);
510 continue;
511 }
512
513 set_btree_node_journal_flush(b);
514
515 mutex_unlock(&b->write_lock);
516
517 btree_nodes[nr++] = b;
518 /*
519 * To avoid holding c->bucket_lock too long time,
520 * only scan for BTREE_FLUSH_NR matched btree nodes
521 * at most. If there are more btree nodes reference
522 * the oldest journal entry, try to flush them next
523 * time when btree_flush_write() is called.
524 */
525 if (nr == BTREE_FLUSH_NR)
526 break;
527 }
528 mutex_unlock(&c->bucket_lock);
529
530 for (i = 0; i < nr; i++) {
531 b = btree_nodes[i];
532 if (!b) {
533 pr_err("BUG: btree_nodes[%d] is NULL\n", i);
534 continue;
535 }
536
537 /* safe to check without holding b->write_lock */
538 if (!btree_node_journal_flush(b)) {
539 pr_err("BUG: bnode %p: journal_flush bit cleaned\n", b);
540 continue;
541 }
542
543 mutex_lock(&b->write_lock);
544 if (!btree_current_write(b)->journal) {
545 clear_bit(BTREE_NODE_journal_flush, &b->flags);
546 mutex_unlock(&b->write_lock);
547 pr_debug("bnode %p: written by others\n", b);
548 continue;
549 }
550
551 if (!btree_node_dirty(b)) {
552 clear_bit(BTREE_NODE_journal_flush, &b->flags);
553 mutex_unlock(&b->write_lock);
554 pr_debug("bnode %p: dirty bit cleaned by others\n", b);
555 continue;
556 }
557
558 __bch_btree_node_write(b, NULL);
559 clear_bit(BTREE_NODE_journal_flush, &b->flags);
560 mutex_unlock(&b->write_lock);
561 }
562
563out:
564 spin_lock(&c->journal.flush_write_lock);
565 c->journal.btree_flushing = false;
566 spin_unlock(&c->journal.flush_write_lock);
567}
568
569#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
570
571static void journal_discard_endio(struct bio *bio)
572{
573 struct journal_device *ja =
574 container_of(bio, struct journal_device, discard_bio);
575 struct cache *ca = container_of(ja, struct cache, journal);
576
577 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
578
579 closure_wake_up(&ca->set->journal.wait);
580 closure_put(&ca->set->cl);
581}
582
583static void journal_discard_work(struct work_struct *work)
584{
585 struct journal_device *ja =
586 container_of(work, struct journal_device, discard_work);
587
588 submit_bio(&ja->discard_bio);
589}
590
591static void do_journal_discard(struct cache *ca)
592{
593 struct journal_device *ja = &ca->journal;
594 struct bio *bio = &ja->discard_bio;
595
596 if (!ca->discard) {
597 ja->discard_idx = ja->last_idx;
598 return;
599 }
600
601 switch (atomic_read(&ja->discard_in_flight)) {
602 case DISCARD_IN_FLIGHT:
603 return;
604
605 case DISCARD_DONE:
606 ja->discard_idx = (ja->discard_idx + 1) %
607 ca->sb.njournal_buckets;
608
609 atomic_set(&ja->discard_in_flight, DISCARD_READY);
610 fallthrough;
611
612 case DISCARD_READY:
613 if (ja->discard_idx == ja->last_idx)
614 return;
615
616 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
617
618 bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD);
619 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
620 ca->sb.d[ja->discard_idx]);
621 bio->bi_iter.bi_size = bucket_bytes(ca);
622 bio->bi_end_io = journal_discard_endio;
623
624 closure_get(&ca->set->cl);
625 INIT_WORK(&ja->discard_work, journal_discard_work);
626 queue_work(bch_journal_wq, &ja->discard_work);
627 }
628}
629
630static unsigned int free_journal_buckets(struct cache_set *c)
631{
632 struct journal *j = &c->journal;
633 struct cache *ca = c->cache;
634 struct journal_device *ja = &c->cache->journal;
635 unsigned int n;
636
637 /* In case njournal_buckets is not power of 2 */
638 if (ja->cur_idx >= ja->discard_idx)
639 n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
640 else
641 n = ja->discard_idx - ja->cur_idx;
642
643 if (n > (1 + j->do_reserve))
644 return n - (1 + j->do_reserve);
645
646 return 0;
647}
648
649static void journal_reclaim(struct cache_set *c)
650{
651 struct bkey *k = &c->journal.key;
652 struct cache *ca = c->cache;
653 uint64_t last_seq;
654 struct journal_device *ja = &ca->journal;
655 atomic_t p __maybe_unused;
656
657 atomic_long_inc(&c->reclaim);
658
659 while (!atomic_read(&fifo_front(&c->journal.pin)))
660 fifo_pop(&c->journal.pin, p);
661
662 last_seq = last_seq(&c->journal);
663
664 /* Update last_idx */
665
666 while (ja->last_idx != ja->cur_idx &&
667 ja->seq[ja->last_idx] < last_seq)
668 ja->last_idx = (ja->last_idx + 1) %
669 ca->sb.njournal_buckets;
670
671 do_journal_discard(ca);
672
673 if (c->journal.blocks_free)
674 goto out;
675
676 if (!free_journal_buckets(c))
677 goto out;
678
679 ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
680 k->ptr[0] = MAKE_PTR(0,
681 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
682 ca->sb.nr_this_dev);
683 atomic_long_inc(&c->reclaimed_journal_buckets);
684
685 bkey_init(k);
686 SET_KEY_PTRS(k, 1);
687 c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
688
689out:
690 if (!journal_full(&c->journal))
691 __closure_wake_up(&c->journal.wait);
692}
693
694void bch_journal_next(struct journal *j)
695{
696 atomic_t p = { 1 };
697
698 j->cur = (j->cur == j->w)
699 ? &j->w[1]
700 : &j->w[0];
701
702 /*
703 * The fifo_push() needs to happen at the same time as j->seq is
704 * incremented for last_seq() to be calculated correctly
705 */
706 BUG_ON(!fifo_push(&j->pin, p));
707 atomic_set(&fifo_back(&j->pin), 1);
708
709 j->cur->data->seq = ++j->seq;
710 j->cur->dirty = false;
711 j->cur->need_write = false;
712 j->cur->data->keys = 0;
713
714 if (fifo_full(&j->pin))
715 pr_debug("journal_pin full (%zu)\n", fifo_used(&j->pin));
716}
717
718static void journal_write_endio(struct bio *bio)
719{
720 struct journal_write *w = bio->bi_private;
721
722 cache_set_err_on(bio->bi_status, w->c, "journal io error");
723 closure_put(&w->c->journal.io);
724}
725
726static void journal_write(struct closure *cl);
727
728static void journal_write_done(struct closure *cl)
729{
730 struct journal *j = container_of(cl, struct journal, io);
731 struct journal_write *w = (j->cur == j->w)
732 ? &j->w[1]
733 : &j->w[0];
734
735 __closure_wake_up(&w->wait);
736 continue_at_nobarrier(cl, journal_write, bch_journal_wq);
737}
738
739static void journal_write_unlock(struct closure *cl)
740 __releases(&c->journal.lock)
741{
742 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
743
744 c->journal.io_in_flight = 0;
745 spin_unlock(&c->journal.lock);
746}
747
748static void journal_write_unlocked(struct closure *cl)
749 __releases(c->journal.lock)
750{
751 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
752 struct cache *ca = c->cache;
753 struct journal_write *w = c->journal.cur;
754 struct bkey *k = &c->journal.key;
755 unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
756 ca->sb.block_size;
757
758 struct bio *bio;
759 struct bio_list list;
760
761 bio_list_init(&list);
762
763 if (!w->need_write) {
764 closure_return_with_destructor(cl, journal_write_unlock);
765 return;
766 } else if (journal_full(&c->journal)) {
767 journal_reclaim(c);
768 spin_unlock(&c->journal.lock);
769
770 btree_flush_write(c);
771 continue_at(cl, journal_write, bch_journal_wq);
772 return;
773 }
774
775 c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
776
777 w->data->btree_level = c->root->level;
778
779 bkey_copy(&w->data->btree_root, &c->root->key);
780 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
781
782 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
783 w->data->magic = jset_magic(&ca->sb);
784 w->data->version = BCACHE_JSET_VERSION;
785 w->data->last_seq = last_seq(&c->journal);
786 w->data->csum = csum_set(w->data);
787
788 for (i = 0; i < KEY_PTRS(k); i++) {
789 ca = c->cache;
790 bio = &ca->journal.bio;
791
792 atomic_long_add(sectors, &ca->meta_sectors_written);
793
794 bio_reset(bio, ca->bdev, REQ_OP_WRITE |
795 REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
796 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
797 bio->bi_iter.bi_size = sectors << 9;
798
799 bio->bi_end_io = journal_write_endio;
800 bio->bi_private = w;
801 bch_bio_map(bio, w->data);
802
803 trace_bcache_journal_write(bio, w->data->keys);
804 bio_list_add(&list, bio);
805
806 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
807
808 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
809 }
810
811 /* If KEY_PTRS(k) == 0, this jset gets lost in air */
812 BUG_ON(i == 0);
813
814 atomic_dec_bug(&fifo_back(&c->journal.pin));
815 bch_journal_next(&c->journal);
816 journal_reclaim(c);
817
818 spin_unlock(&c->journal.lock);
819
820 while ((bio = bio_list_pop(&list)))
821 closure_bio_submit(c, bio, cl);
822
823 continue_at(cl, journal_write_done, NULL);
824}
825
826static void journal_write(struct closure *cl)
827{
828 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
829
830 spin_lock(&c->journal.lock);
831 journal_write_unlocked(cl);
832}
833
834static void journal_try_write(struct cache_set *c)
835 __releases(c->journal.lock)
836{
837 struct closure *cl = &c->journal.io;
838 struct journal_write *w = c->journal.cur;
839
840 w->need_write = true;
841
842 if (!c->journal.io_in_flight) {
843 c->journal.io_in_flight = 1;
844 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
845 } else {
846 spin_unlock(&c->journal.lock);
847 }
848}
849
850static struct journal_write *journal_wait_for_write(struct cache_set *c,
851 unsigned int nkeys)
852 __acquires(&c->journal.lock)
853{
854 size_t sectors;
855 struct closure cl;
856 bool wait = false;
857 struct cache *ca = c->cache;
858
859 closure_init_stack(&cl);
860
861 spin_lock(&c->journal.lock);
862
863 while (1) {
864 struct journal_write *w = c->journal.cur;
865
866 sectors = __set_blocks(w->data, w->data->keys + nkeys,
867 block_bytes(ca)) * ca->sb.block_size;
868
869 if (sectors <= min_t(size_t,
870 c->journal.blocks_free * ca->sb.block_size,
871 PAGE_SECTORS << JSET_BITS))
872 return w;
873
874 if (wait)
875 closure_wait(&c->journal.wait, &cl);
876
877 if (!journal_full(&c->journal)) {
878 if (wait)
879 trace_bcache_journal_entry_full(c);
880
881 /*
882 * XXX: If we were inserting so many keys that they
883 * won't fit in an _empty_ journal write, we'll
884 * deadlock. For now, handle this in
885 * bch_keylist_realloc() - but something to think about.
886 */
887 BUG_ON(!w->data->keys);
888
889 journal_try_write(c); /* unlocks */
890 } else {
891 if (wait)
892 trace_bcache_journal_full(c);
893
894 journal_reclaim(c);
895 spin_unlock(&c->journal.lock);
896
897 btree_flush_write(c);
898 }
899
900 closure_sync(&cl);
901 spin_lock(&c->journal.lock);
902 wait = true;
903 }
904}
905
906static void journal_write_work(struct work_struct *work)
907{
908 struct cache_set *c = container_of(to_delayed_work(work),
909 struct cache_set,
910 journal.work);
911 spin_lock(&c->journal.lock);
912 if (c->journal.cur->dirty)
913 journal_try_write(c);
914 else
915 spin_unlock(&c->journal.lock);
916}
917
918/*
919 * Entry point to the journalling code - bio_insert() and btree_invalidate()
920 * pass bch_journal() a list of keys to be journalled, and then
921 * bch_journal() hands those same keys off to btree_insert_async()
922 */
923
924atomic_t *bch_journal(struct cache_set *c,
925 struct keylist *keys,
926 struct closure *parent)
927{
928 struct journal_write *w;
929 atomic_t *ret;
930
931 /* No journaling if CACHE_SET_IO_DISABLE set already */
932 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
933 return NULL;
934
935 if (!CACHE_SYNC(&c->cache->sb))
936 return NULL;
937
938 w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
939
940 memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
941 w->data->keys += bch_keylist_nkeys(keys);
942
943 ret = &fifo_back(&c->journal.pin);
944 atomic_inc(ret);
945
946 if (parent) {
947 closure_wait(&w->wait, parent);
948 journal_try_write(c);
949 } else if (!w->dirty) {
950 w->dirty = true;
951 queue_delayed_work(bch_flush_wq, &c->journal.work,
952 msecs_to_jiffies(c->journal_delay_ms));
953 spin_unlock(&c->journal.lock);
954 } else {
955 spin_unlock(&c->journal.lock);
956 }
957
958
959 return ret;
960}
961
962void bch_journal_meta(struct cache_set *c, struct closure *cl)
963{
964 struct keylist keys;
965 atomic_t *ref;
966
967 bch_keylist_init(&keys);
968
969 ref = bch_journal(c, &keys, cl);
970 if (ref)
971 atomic_dec_bug(ref);
972}
973
974void bch_journal_free(struct cache_set *c)
975{
976 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
977 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
978 free_fifo(&c->journal.pin);
979}
980
981int bch_journal_alloc(struct cache_set *c)
982{
983 struct journal *j = &c->journal;
984
985 spin_lock_init(&j->lock);
986 spin_lock_init(&j->flush_write_lock);
987 INIT_DELAYED_WORK(&j->work, journal_write_work);
988
989 c->journal_delay_ms = 100;
990
991 j->w[0].c = c;
992 j->w[1].c = c;
993
994 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
995 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
996 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
997 return -ENOMEM;
998
999 return 0;
1000}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bcache journalling code, for btree insertions
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"
9#include "btree.h"
10#include "debug.h"
11#include "extents.h"
12
13#include <trace/events/bcache.h>
14
15/*
16 * Journal replay/recovery:
17 *
18 * This code is all driven from run_cache_set(); we first read the journal
19 * entries, do some other stuff, then we mark all the keys in the journal
20 * entries (same as garbage collection would), then we replay them - reinserting
21 * them into the cache in precisely the same order as they appear in the
22 * journal.
23 *
24 * We only journal keys that go in leaf nodes, which simplifies things quite a
25 * bit.
26 */
27
28static void journal_read_endio(struct bio *bio)
29{
30 struct closure *cl = bio->bi_private;
31
32 closure_put(cl);
33}
34
35static int journal_read_bucket(struct cache *ca, struct list_head *list,
36 unsigned int bucket_index)
37{
38 struct journal_device *ja = &ca->journal;
39 struct bio *bio = &ja->bio;
40
41 struct journal_replay *i;
42 struct jset *j, *data = ca->set->journal.w[0].data;
43 struct closure cl;
44 unsigned int len, left, offset = 0;
45 int ret = 0;
46 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
47
48 closure_init_stack(&cl);
49
50 pr_debug("reading %u", bucket_index);
51
52 while (offset < ca->sb.bucket_size) {
53reread: left = ca->sb.bucket_size - offset;
54 len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
55
56 bio_reset(bio);
57 bio->bi_iter.bi_sector = bucket + offset;
58 bio_set_dev(bio, ca->bdev);
59 bio->bi_iter.bi_size = len << 9;
60
61 bio->bi_end_io = journal_read_endio;
62 bio->bi_private = &cl;
63 bio_set_op_attrs(bio, REQ_OP_READ, 0);
64 bch_bio_map(bio, data);
65
66 closure_bio_submit(ca->set, bio, &cl);
67 closure_sync(&cl);
68
69 /* This function could be simpler now since we no longer write
70 * journal entries that overlap bucket boundaries; this means
71 * the start of a bucket will always have a valid journal entry
72 * if it has any journal entries at all.
73 */
74
75 j = data;
76 while (len) {
77 struct list_head *where;
78 size_t blocks, bytes = set_bytes(j);
79
80 if (j->magic != jset_magic(&ca->sb)) {
81 pr_debug("%u: bad magic", bucket_index);
82 return ret;
83 }
84
85 if (bytes > left << 9 ||
86 bytes > PAGE_SIZE << JSET_BITS) {
87 pr_info("%u: too big, %zu bytes, offset %u",
88 bucket_index, bytes, offset);
89 return ret;
90 }
91
92 if (bytes > len << 9)
93 goto reread;
94
95 if (j->csum != csum_set(j)) {
96 pr_info("%u: bad csum, %zu bytes, offset %u",
97 bucket_index, bytes, offset);
98 return ret;
99 }
100
101 blocks = set_blocks(j, block_bytes(ca->set));
102
103 /*
104 * Nodes in 'list' are in linear increasing order of
105 * i->j.seq, the node on head has the smallest (oldest)
106 * journal seq, the node on tail has the biggest
107 * (latest) journal seq.
108 */
109
110 /*
111 * Check from the oldest jset for last_seq. If
112 * i->j.seq < j->last_seq, it means the oldest jset
113 * in list is expired and useless, remove it from
114 * this list. Otherwise, j is a condidate jset for
115 * further following checks.
116 */
117 while (!list_empty(list)) {
118 i = list_first_entry(list,
119 struct journal_replay, list);
120 if (i->j.seq >= j->last_seq)
121 break;
122 list_del(&i->list);
123 kfree(i);
124 }
125
126 /* iterate list in reverse order (from latest jset) */
127 list_for_each_entry_reverse(i, list, list) {
128 if (j->seq == i->j.seq)
129 goto next_set;
130
131 /*
132 * if j->seq is less than any i->j.last_seq
133 * in list, j is an expired and useless jset.
134 */
135 if (j->seq < i->j.last_seq)
136 goto next_set;
137
138 /*
139 * 'where' points to first jset in list which
140 * is elder then j.
141 */
142 if (j->seq > i->j.seq) {
143 where = &i->list;
144 goto add;
145 }
146 }
147
148 where = list;
149add:
150 i = kmalloc(offsetof(struct journal_replay, j) +
151 bytes, GFP_KERNEL);
152 if (!i)
153 return -ENOMEM;
154 memcpy(&i->j, j, bytes);
155 /* Add to the location after 'where' points to */
156 list_add(&i->list, where);
157 ret = 1;
158
159 if (j->seq > ja->seq[bucket_index])
160 ja->seq[bucket_index] = j->seq;
161next_set:
162 offset += blocks * ca->sb.block_size;
163 len -= blocks * ca->sb.block_size;
164 j = ((void *) j) + blocks * block_bytes(ca);
165 }
166 }
167
168 return ret;
169}
170
171int bch_journal_read(struct cache_set *c, struct list_head *list)
172{
173#define read_bucket(b) \
174 ({ \
175 ret = journal_read_bucket(ca, list, b); \
176 __set_bit(b, bitmap); \
177 if (ret < 0) \
178 return ret; \
179 ret; \
180 })
181
182 struct cache *ca;
183 unsigned int iter;
184 int ret = 0;
185
186 for_each_cache(ca, c, iter) {
187 struct journal_device *ja = &ca->journal;
188 DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
189 unsigned int i, l, r, m;
190 uint64_t seq;
191
192 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
193 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
194
195 /*
196 * Read journal buckets ordered by golden ratio hash to quickly
197 * find a sequence of buckets with valid journal entries
198 */
199 for (i = 0; i < ca->sb.njournal_buckets; i++) {
200 /*
201 * We must try the index l with ZERO first for
202 * correctness due to the scenario that the journal
203 * bucket is circular buffer which might have wrapped
204 */
205 l = (i * 2654435769U) % ca->sb.njournal_buckets;
206
207 if (test_bit(l, bitmap))
208 break;
209
210 if (read_bucket(l))
211 goto bsearch;
212 }
213
214 /*
215 * If that fails, check all the buckets we haven't checked
216 * already
217 */
218 pr_debug("falling back to linear search");
219
220 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
221 l < ca->sb.njournal_buckets;
222 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets,
223 l + 1))
224 if (read_bucket(l))
225 goto bsearch;
226
227 /* no journal entries on this device? */
228 if (l == ca->sb.njournal_buckets)
229 continue;
230bsearch:
231 BUG_ON(list_empty(list));
232
233 /* Binary search */
234 m = l;
235 r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
236 pr_debug("starting binary search, l %u r %u", l, r);
237
238 while (l + 1 < r) {
239 seq = list_entry(list->prev, struct journal_replay,
240 list)->j.seq;
241
242 m = (l + r) >> 1;
243 read_bucket(m);
244
245 if (seq != list_entry(list->prev, struct journal_replay,
246 list)->j.seq)
247 l = m;
248 else
249 r = m;
250 }
251
252 /*
253 * Read buckets in reverse order until we stop finding more
254 * journal entries
255 */
256 pr_debug("finishing up: m %u njournal_buckets %u",
257 m, ca->sb.njournal_buckets);
258 l = m;
259
260 while (1) {
261 if (!l--)
262 l = ca->sb.njournal_buckets - 1;
263
264 if (l == m)
265 break;
266
267 if (test_bit(l, bitmap))
268 continue;
269
270 if (!read_bucket(l))
271 break;
272 }
273
274 seq = 0;
275
276 for (i = 0; i < ca->sb.njournal_buckets; i++)
277 if (ja->seq[i] > seq) {
278 seq = ja->seq[i];
279 /*
280 * When journal_reclaim() goes to allocate for
281 * the first time, it'll use the bucket after
282 * ja->cur_idx
283 */
284 ja->cur_idx = i;
285 ja->last_idx = ja->discard_idx = (i + 1) %
286 ca->sb.njournal_buckets;
287
288 }
289 }
290
291 if (!list_empty(list))
292 c->journal.seq = list_entry(list->prev,
293 struct journal_replay,
294 list)->j.seq;
295
296 return 0;
297#undef read_bucket
298}
299
300void bch_journal_mark(struct cache_set *c, struct list_head *list)
301{
302 atomic_t p = { 0 };
303 struct bkey *k;
304 struct journal_replay *i;
305 struct journal *j = &c->journal;
306 uint64_t last = j->seq;
307
308 /*
309 * journal.pin should never fill up - we never write a journal
310 * entry when it would fill up. But if for some reason it does, we
311 * iterate over the list in reverse order so that we can just skip that
312 * refcount instead of bugging.
313 */
314
315 list_for_each_entry_reverse(i, list, list) {
316 BUG_ON(last < i->j.seq);
317 i->pin = NULL;
318
319 while (last-- != i->j.seq)
320 if (fifo_free(&j->pin) > 1) {
321 fifo_push_front(&j->pin, p);
322 atomic_set(&fifo_front(&j->pin), 0);
323 }
324
325 if (fifo_free(&j->pin) > 1) {
326 fifo_push_front(&j->pin, p);
327 i->pin = &fifo_front(&j->pin);
328 atomic_set(i->pin, 1);
329 }
330
331 for (k = i->j.start;
332 k < bset_bkey_last(&i->j);
333 k = bkey_next(k))
334 if (!__bch_extent_invalid(c, k)) {
335 unsigned int j;
336
337 for (j = 0; j < KEY_PTRS(k); j++)
338 if (ptr_available(c, k, j))
339 atomic_inc(&PTR_BUCKET(c, k, j)->pin);
340
341 bch_initial_mark_key(c, 0, k);
342 }
343 }
344}
345
346static bool is_discard_enabled(struct cache_set *s)
347{
348 struct cache *ca;
349 unsigned int i;
350
351 for_each_cache(ca, s, i)
352 if (ca->discard)
353 return true;
354
355 return false;
356}
357
358int bch_journal_replay(struct cache_set *s, struct list_head *list)
359{
360 int ret = 0, keys = 0, entries = 0;
361 struct bkey *k;
362 struct journal_replay *i =
363 list_entry(list->prev, struct journal_replay, list);
364
365 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
366 struct keylist keylist;
367
368 list_for_each_entry(i, list, list) {
369 BUG_ON(i->pin && atomic_read(i->pin) != 1);
370
371 if (n != i->j.seq) {
372 if (n == start && is_discard_enabled(s))
373 pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
374 n, i->j.seq - 1, start, end);
375 else {
376 pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
377 n, i->j.seq - 1, start, end);
378 ret = -EIO;
379 goto err;
380 }
381 }
382
383 for (k = i->j.start;
384 k < bset_bkey_last(&i->j);
385 k = bkey_next(k)) {
386 trace_bcache_journal_replay_key(k);
387
388 bch_keylist_init_single(&keylist, k);
389
390 ret = bch_btree_insert(s, &keylist, i->pin, NULL);
391 if (ret)
392 goto err;
393
394 BUG_ON(!bch_keylist_empty(&keylist));
395 keys++;
396
397 cond_resched();
398 }
399
400 if (i->pin)
401 atomic_dec(i->pin);
402 n = i->j.seq + 1;
403 entries++;
404 }
405
406 pr_info("journal replay done, %i keys in %i entries, seq %llu",
407 keys, entries, end);
408err:
409 while (!list_empty(list)) {
410 i = list_first_entry(list, struct journal_replay, list);
411 list_del(&i->list);
412 kfree(i);
413 }
414
415 return ret;
416}
417
418/* Journalling */
419
420static void btree_flush_write(struct cache_set *c)
421{
422 struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
423 unsigned int i, n;
424
425 if (c->journal.btree_flushing)
426 return;
427
428 spin_lock(&c->journal.flush_write_lock);
429 if (c->journal.btree_flushing) {
430 spin_unlock(&c->journal.flush_write_lock);
431 return;
432 }
433 c->journal.btree_flushing = true;
434 spin_unlock(&c->journal.flush_write_lock);
435
436 atomic_long_inc(&c->flush_write);
437 memset(btree_nodes, 0, sizeof(btree_nodes));
438 n = 0;
439
440 mutex_lock(&c->bucket_lock);
441 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
442 if (btree_node_journal_flush(b))
443 pr_err("BUG: flush_write bit should not be set here!");
444
445 mutex_lock(&b->write_lock);
446
447 if (!btree_node_dirty(b)) {
448 mutex_unlock(&b->write_lock);
449 continue;
450 }
451
452 if (!btree_current_write(b)->journal) {
453 mutex_unlock(&b->write_lock);
454 continue;
455 }
456
457 set_btree_node_journal_flush(b);
458
459 mutex_unlock(&b->write_lock);
460
461 btree_nodes[n++] = b;
462 if (n == BTREE_FLUSH_NR)
463 break;
464 }
465 mutex_unlock(&c->bucket_lock);
466
467 for (i = 0; i < n; i++) {
468 b = btree_nodes[i];
469 if (!b) {
470 pr_err("BUG: btree_nodes[%d] is NULL", i);
471 continue;
472 }
473
474 /* safe to check without holding b->write_lock */
475 if (!btree_node_journal_flush(b)) {
476 pr_err("BUG: bnode %p: journal_flush bit cleaned", b);
477 continue;
478 }
479
480 mutex_lock(&b->write_lock);
481 if (!btree_current_write(b)->journal) {
482 clear_bit(BTREE_NODE_journal_flush, &b->flags);
483 mutex_unlock(&b->write_lock);
484 pr_debug("bnode %p: written by others", b);
485 continue;
486 }
487
488 if (!btree_node_dirty(b)) {
489 clear_bit(BTREE_NODE_journal_flush, &b->flags);
490 mutex_unlock(&b->write_lock);
491 pr_debug("bnode %p: dirty bit cleaned by others", b);
492 continue;
493 }
494
495 __bch_btree_node_write(b, NULL);
496 clear_bit(BTREE_NODE_journal_flush, &b->flags);
497 mutex_unlock(&b->write_lock);
498 }
499
500 spin_lock(&c->journal.flush_write_lock);
501 c->journal.btree_flushing = false;
502 spin_unlock(&c->journal.flush_write_lock);
503}
504
505#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
506
507static void journal_discard_endio(struct bio *bio)
508{
509 struct journal_device *ja =
510 container_of(bio, struct journal_device, discard_bio);
511 struct cache *ca = container_of(ja, struct cache, journal);
512
513 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
514
515 closure_wake_up(&ca->set->journal.wait);
516 closure_put(&ca->set->cl);
517}
518
519static void journal_discard_work(struct work_struct *work)
520{
521 struct journal_device *ja =
522 container_of(work, struct journal_device, discard_work);
523
524 submit_bio(&ja->discard_bio);
525}
526
527static void do_journal_discard(struct cache *ca)
528{
529 struct journal_device *ja = &ca->journal;
530 struct bio *bio = &ja->discard_bio;
531
532 if (!ca->discard) {
533 ja->discard_idx = ja->last_idx;
534 return;
535 }
536
537 switch (atomic_read(&ja->discard_in_flight)) {
538 case DISCARD_IN_FLIGHT:
539 return;
540
541 case DISCARD_DONE:
542 ja->discard_idx = (ja->discard_idx + 1) %
543 ca->sb.njournal_buckets;
544
545 atomic_set(&ja->discard_in_flight, DISCARD_READY);
546 /* fallthrough */
547
548 case DISCARD_READY:
549 if (ja->discard_idx == ja->last_idx)
550 return;
551
552 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
553
554 bio_init(bio, bio->bi_inline_vecs, 1);
555 bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
556 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
557 ca->sb.d[ja->discard_idx]);
558 bio_set_dev(bio, ca->bdev);
559 bio->bi_iter.bi_size = bucket_bytes(ca);
560 bio->bi_end_io = journal_discard_endio;
561
562 closure_get(&ca->set->cl);
563 INIT_WORK(&ja->discard_work, journal_discard_work);
564 queue_work(bch_journal_wq, &ja->discard_work);
565 }
566}
567
568static void journal_reclaim(struct cache_set *c)
569{
570 struct bkey *k = &c->journal.key;
571 struct cache *ca;
572 uint64_t last_seq;
573 unsigned int iter, n = 0;
574 atomic_t p __maybe_unused;
575
576 atomic_long_inc(&c->reclaim);
577
578 while (!atomic_read(&fifo_front(&c->journal.pin)))
579 fifo_pop(&c->journal.pin, p);
580
581 last_seq = last_seq(&c->journal);
582
583 /* Update last_idx */
584
585 for_each_cache(ca, c, iter) {
586 struct journal_device *ja = &ca->journal;
587
588 while (ja->last_idx != ja->cur_idx &&
589 ja->seq[ja->last_idx] < last_seq)
590 ja->last_idx = (ja->last_idx + 1) %
591 ca->sb.njournal_buckets;
592 }
593
594 for_each_cache(ca, c, iter)
595 do_journal_discard(ca);
596
597 if (c->journal.blocks_free)
598 goto out;
599
600 /*
601 * Allocate:
602 * XXX: Sort by free journal space
603 */
604
605 for_each_cache(ca, c, iter) {
606 struct journal_device *ja = &ca->journal;
607 unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
608
609 /* No space available on this device */
610 if (next == ja->discard_idx)
611 continue;
612
613 ja->cur_idx = next;
614 k->ptr[n++] = MAKE_PTR(0,
615 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
616 ca->sb.nr_this_dev);
617 atomic_long_inc(&c->reclaimed_journal_buckets);
618 }
619
620 if (n) {
621 bkey_init(k);
622 SET_KEY_PTRS(k, n);
623 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
624 }
625out:
626 if (!journal_full(&c->journal))
627 __closure_wake_up(&c->journal.wait);
628}
629
630void bch_journal_next(struct journal *j)
631{
632 atomic_t p = { 1 };
633
634 j->cur = (j->cur == j->w)
635 ? &j->w[1]
636 : &j->w[0];
637
638 /*
639 * The fifo_push() needs to happen at the same time as j->seq is
640 * incremented for last_seq() to be calculated correctly
641 */
642 BUG_ON(!fifo_push(&j->pin, p));
643 atomic_set(&fifo_back(&j->pin), 1);
644
645 j->cur->data->seq = ++j->seq;
646 j->cur->dirty = false;
647 j->cur->need_write = false;
648 j->cur->data->keys = 0;
649
650 if (fifo_full(&j->pin))
651 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
652}
653
654static void journal_write_endio(struct bio *bio)
655{
656 struct journal_write *w = bio->bi_private;
657
658 cache_set_err_on(bio->bi_status, w->c, "journal io error");
659 closure_put(&w->c->journal.io);
660}
661
662static void journal_write(struct closure *cl);
663
664static void journal_write_done(struct closure *cl)
665{
666 struct journal *j = container_of(cl, struct journal, io);
667 struct journal_write *w = (j->cur == j->w)
668 ? &j->w[1]
669 : &j->w[0];
670
671 __closure_wake_up(&w->wait);
672 continue_at_nobarrier(cl, journal_write, bch_journal_wq);
673}
674
675static void journal_write_unlock(struct closure *cl)
676 __releases(&c->journal.lock)
677{
678 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
679
680 c->journal.io_in_flight = 0;
681 spin_unlock(&c->journal.lock);
682}
683
684static void journal_write_unlocked(struct closure *cl)
685 __releases(c->journal.lock)
686{
687 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
688 struct cache *ca;
689 struct journal_write *w = c->journal.cur;
690 struct bkey *k = &c->journal.key;
691 unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
692 c->sb.block_size;
693
694 struct bio *bio;
695 struct bio_list list;
696
697 bio_list_init(&list);
698
699 if (!w->need_write) {
700 closure_return_with_destructor(cl, journal_write_unlock);
701 return;
702 } else if (journal_full(&c->journal)) {
703 journal_reclaim(c);
704 spin_unlock(&c->journal.lock);
705
706 btree_flush_write(c);
707 continue_at(cl, journal_write, bch_journal_wq);
708 return;
709 }
710
711 c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
712
713 w->data->btree_level = c->root->level;
714
715 bkey_copy(&w->data->btree_root, &c->root->key);
716 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
717
718 for_each_cache(ca, c, i)
719 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
720
721 w->data->magic = jset_magic(&c->sb);
722 w->data->version = BCACHE_JSET_VERSION;
723 w->data->last_seq = last_seq(&c->journal);
724 w->data->csum = csum_set(w->data);
725
726 for (i = 0; i < KEY_PTRS(k); i++) {
727 ca = PTR_CACHE(c, k, i);
728 bio = &ca->journal.bio;
729
730 atomic_long_add(sectors, &ca->meta_sectors_written);
731
732 bio_reset(bio);
733 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
734 bio_set_dev(bio, ca->bdev);
735 bio->bi_iter.bi_size = sectors << 9;
736
737 bio->bi_end_io = journal_write_endio;
738 bio->bi_private = w;
739 bio_set_op_attrs(bio, REQ_OP_WRITE,
740 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
741 bch_bio_map(bio, w->data);
742
743 trace_bcache_journal_write(bio, w->data->keys);
744 bio_list_add(&list, bio);
745
746 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
747
748 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
749 }
750
751 /* If KEY_PTRS(k) == 0, this jset gets lost in air */
752 BUG_ON(i == 0);
753
754 atomic_dec_bug(&fifo_back(&c->journal.pin));
755 bch_journal_next(&c->journal);
756 journal_reclaim(c);
757
758 spin_unlock(&c->journal.lock);
759
760 while ((bio = bio_list_pop(&list)))
761 closure_bio_submit(c, bio, cl);
762
763 continue_at(cl, journal_write_done, NULL);
764}
765
766static void journal_write(struct closure *cl)
767{
768 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
769
770 spin_lock(&c->journal.lock);
771 journal_write_unlocked(cl);
772}
773
774static void journal_try_write(struct cache_set *c)
775 __releases(c->journal.lock)
776{
777 struct closure *cl = &c->journal.io;
778 struct journal_write *w = c->journal.cur;
779
780 w->need_write = true;
781
782 if (!c->journal.io_in_flight) {
783 c->journal.io_in_flight = 1;
784 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
785 } else {
786 spin_unlock(&c->journal.lock);
787 }
788}
789
790static struct journal_write *journal_wait_for_write(struct cache_set *c,
791 unsigned int nkeys)
792 __acquires(&c->journal.lock)
793{
794 size_t sectors;
795 struct closure cl;
796 bool wait = false;
797
798 closure_init_stack(&cl);
799
800 spin_lock(&c->journal.lock);
801
802 while (1) {
803 struct journal_write *w = c->journal.cur;
804
805 sectors = __set_blocks(w->data, w->data->keys + nkeys,
806 block_bytes(c)) * c->sb.block_size;
807
808 if (sectors <= min_t(size_t,
809 c->journal.blocks_free * c->sb.block_size,
810 PAGE_SECTORS << JSET_BITS))
811 return w;
812
813 if (wait)
814 closure_wait(&c->journal.wait, &cl);
815
816 if (!journal_full(&c->journal)) {
817 if (wait)
818 trace_bcache_journal_entry_full(c);
819
820 /*
821 * XXX: If we were inserting so many keys that they
822 * won't fit in an _empty_ journal write, we'll
823 * deadlock. For now, handle this in
824 * bch_keylist_realloc() - but something to think about.
825 */
826 BUG_ON(!w->data->keys);
827
828 journal_try_write(c); /* unlocks */
829 } else {
830 if (wait)
831 trace_bcache_journal_full(c);
832
833 journal_reclaim(c);
834 spin_unlock(&c->journal.lock);
835
836 btree_flush_write(c);
837 }
838
839 closure_sync(&cl);
840 spin_lock(&c->journal.lock);
841 wait = true;
842 }
843}
844
845static void journal_write_work(struct work_struct *work)
846{
847 struct cache_set *c = container_of(to_delayed_work(work),
848 struct cache_set,
849 journal.work);
850 spin_lock(&c->journal.lock);
851 if (c->journal.cur->dirty)
852 journal_try_write(c);
853 else
854 spin_unlock(&c->journal.lock);
855}
856
857/*
858 * Entry point to the journalling code - bio_insert() and btree_invalidate()
859 * pass bch_journal() a list of keys to be journalled, and then
860 * bch_journal() hands those same keys off to btree_insert_async()
861 */
862
863atomic_t *bch_journal(struct cache_set *c,
864 struct keylist *keys,
865 struct closure *parent)
866{
867 struct journal_write *w;
868 atomic_t *ret;
869
870 /* No journaling if CACHE_SET_IO_DISABLE set already */
871 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
872 return NULL;
873
874 if (!CACHE_SYNC(&c->sb))
875 return NULL;
876
877 w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
878
879 memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
880 w->data->keys += bch_keylist_nkeys(keys);
881
882 ret = &fifo_back(&c->journal.pin);
883 atomic_inc(ret);
884
885 if (parent) {
886 closure_wait(&w->wait, parent);
887 journal_try_write(c);
888 } else if (!w->dirty) {
889 w->dirty = true;
890 schedule_delayed_work(&c->journal.work,
891 msecs_to_jiffies(c->journal_delay_ms));
892 spin_unlock(&c->journal.lock);
893 } else {
894 spin_unlock(&c->journal.lock);
895 }
896
897
898 return ret;
899}
900
901void bch_journal_meta(struct cache_set *c, struct closure *cl)
902{
903 struct keylist keys;
904 atomic_t *ref;
905
906 bch_keylist_init(&keys);
907
908 ref = bch_journal(c, &keys, cl);
909 if (ref)
910 atomic_dec_bug(ref);
911}
912
913void bch_journal_free(struct cache_set *c)
914{
915 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
916 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
917 free_fifo(&c->journal.pin);
918}
919
920int bch_journal_alloc(struct cache_set *c)
921{
922 struct journal *j = &c->journal;
923
924 spin_lock_init(&j->lock);
925 spin_lock_init(&j->flush_write_lock);
926 INIT_DELAYED_WORK(&j->work, journal_write_work);
927
928 c->journal_delay_ms = 100;
929
930 j->w[0].c = c;
931 j->w[1].c = c;
932
933 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
934 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
935 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
936 return -ENOMEM;
937
938 return 0;
939}