Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_BTREE_WRITE_BUFFER_H
3#define _BCACHEFS_BTREE_WRITE_BUFFER_H
4
5#include "bkey.h"
6
7static inline bool bch2_btree_write_buffer_should_flush(struct bch_fs *c)
8{
9 struct btree_write_buffer *wb = &c->btree_write_buffer;
10
11 return wb->inc.keys.nr + wb->flushing.keys.nr > wb->inc.keys.size / 4;
12}
13
14static inline bool bch2_btree_write_buffer_must_wait(struct bch_fs *c)
15{
16 struct btree_write_buffer *wb = &c->btree_write_buffer;
17
18 return wb->inc.keys.nr > wb->inc.keys.size * 3 / 4;
19}
20
21struct btree_trans;
22int bch2_btree_write_buffer_flush_sync(struct btree_trans *);
23int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *);
24int bch2_btree_write_buffer_tryflush(struct btree_trans *);
25
26struct journal_keys_to_wb {
27 struct btree_write_buffer_keys *wb;
28 size_t room;
29 u64 seq;
30};
31
32int bch2_journal_key_to_wb_slowpath(struct bch_fs *,
33 struct journal_keys_to_wb *,
34 enum btree_id, struct bkey_i *);
35
36static inline int bch2_journal_key_to_wb(struct bch_fs *c,
37 struct journal_keys_to_wb *dst,
38 enum btree_id btree, struct bkey_i *k)
39{
40 EBUG_ON(!dst->seq);
41
42 if (unlikely(!dst->room))
43 return bch2_journal_key_to_wb_slowpath(c, dst, btree, k);
44
45 struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys);
46 wb_k->journal_seq = dst->seq;
47 wb_k->btree = btree;
48 bkey_copy(&wb_k->k, k);
49 dst->wb->keys.nr++;
50 dst->room--;
51 return 0;
52}
53
54void bch2_journal_keys_to_write_buffer_start(struct bch_fs *, struct journal_keys_to_wb *, u64);
55void bch2_journal_keys_to_write_buffer_end(struct bch_fs *, struct journal_keys_to_wb *);
56
57int bch2_btree_write_buffer_resize(struct bch_fs *, size_t);
58void bch2_fs_btree_write_buffer_exit(struct bch_fs *);
59int bch2_fs_btree_write_buffer_init(struct bch_fs *);
60
61#endif /* _BCACHEFS_BTREE_WRITE_BUFFER_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_BTREE_WRITE_BUFFER_H
3#define _BCACHEFS_BTREE_WRITE_BUFFER_H
4
5#include "bkey.h"
6#include "disk_accounting.h"
7
8static inline bool bch2_btree_write_buffer_should_flush(struct bch_fs *c)
9{
10 struct btree_write_buffer *wb = &c->btree_write_buffer;
11
12 return wb->inc.keys.nr + wb->flushing.keys.nr > wb->inc.keys.size / 4;
13}
14
15static inline bool bch2_btree_write_buffer_must_wait(struct bch_fs *c)
16{
17 struct btree_write_buffer *wb = &c->btree_write_buffer;
18
19 return wb->inc.keys.nr > wb->inc.keys.size * 3 / 4;
20}
21
22struct btree_trans;
23int bch2_btree_write_buffer_flush_sync(struct btree_trans *);
24bool bch2_btree_write_buffer_flush_going_ro(struct bch_fs *);
25int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *);
26int bch2_btree_write_buffer_tryflush(struct btree_trans *);
27
28struct bkey_buf;
29int bch2_btree_write_buffer_maybe_flush(struct btree_trans *, struct bkey_s_c, struct bkey_buf *);
30
31struct journal_keys_to_wb {
32 struct btree_write_buffer_keys *wb;
33 size_t room;
34 u64 seq;
35};
36
37static inline int wb_key_cmp(const void *_l, const void *_r)
38{
39 const struct btree_write_buffered_key *l = _l;
40 const struct btree_write_buffered_key *r = _r;
41
42 return cmp_int(l->btree, r->btree) ?: bpos_cmp(l->k.k.p, r->k.k.p);
43}
44
45int bch2_accounting_key_to_wb_slowpath(struct bch_fs *,
46 enum btree_id, struct bkey_i_accounting *);
47
48static inline int bch2_accounting_key_to_wb(struct bch_fs *c,
49 enum btree_id btree, struct bkey_i_accounting *k)
50{
51 struct btree_write_buffer *wb = &c->btree_write_buffer;
52 struct btree_write_buffered_key search;
53 search.btree = btree;
54 search.k.k.p = k->k.p;
55
56 unsigned idx = eytzinger0_find(wb->accounting.data, wb->accounting.nr,
57 sizeof(wb->accounting.data[0]),
58 wb_key_cmp, &search);
59
60 if (idx >= wb->accounting.nr)
61 return bch2_accounting_key_to_wb_slowpath(c, btree, k);
62
63 struct bkey_i_accounting *dst = bkey_i_to_accounting(&wb->accounting.data[idx].k);
64 bch2_accounting_accumulate(dst, accounting_i_to_s_c(k));
65 return 0;
66}
67
68int bch2_journal_key_to_wb_slowpath(struct bch_fs *,
69 struct journal_keys_to_wb *,
70 enum btree_id, struct bkey_i *);
71
72static inline int __bch2_journal_key_to_wb(struct bch_fs *c,
73 struct journal_keys_to_wb *dst,
74 enum btree_id btree, struct bkey_i *k)
75{
76 if (unlikely(!dst->room))
77 return bch2_journal_key_to_wb_slowpath(c, dst, btree, k);
78
79 struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys);
80 wb_k->journal_seq = dst->seq;
81 wb_k->btree = btree;
82 bkey_copy(&wb_k->k, k);
83 dst->wb->keys.nr++;
84 dst->room--;
85 return 0;
86}
87
88static inline int bch2_journal_key_to_wb(struct bch_fs *c,
89 struct journal_keys_to_wb *dst,
90 enum btree_id btree, struct bkey_i *k)
91{
92 EBUG_ON(!dst->seq);
93
94 return k->k.type == KEY_TYPE_accounting
95 ? bch2_accounting_key_to_wb(c, btree, bkey_i_to_accounting(k))
96 : __bch2_journal_key_to_wb(c, dst, btree, k);
97}
98
99void bch2_journal_keys_to_write_buffer_start(struct bch_fs *, struct journal_keys_to_wb *, u64);
100int bch2_journal_keys_to_write_buffer_end(struct bch_fs *, struct journal_keys_to_wb *);
101
102int bch2_btree_write_buffer_resize(struct bch_fs *, size_t);
103void bch2_fs_btree_write_buffer_exit(struct bch_fs *);
104int bch2_fs_btree_write_buffer_init(struct bch_fs *);
105
106#endif /* _BCACHEFS_BTREE_WRITE_BUFFER_H */