Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
4#include "btree_gc.h"
5#include "btree_io.h"
6#include "btree_iter.h"
7#include "btree_journal_iter.h"
8#include "btree_key_cache.h"
9#include "btree_update_interior.h"
10#include "btree_write_buffer.h"
11#include "buckets.h"
12#include "errcode.h"
13#include "error.h"
14#include "journal.h"
15#include "journal_io.h"
16#include "journal_reclaim.h"
17#include "replicas.h"
18#include "snapshot.h"
19
20#include <linux/prefetch.h>
21
22static void verify_update_old_key(struct btree_trans *trans, struct btree_insert_entry *i)
23{
24#ifdef CONFIG_BCACHEFS_DEBUG
25 struct bch_fs *c = trans->c;
26 struct bkey u;
27 struct bkey_s_c k = bch2_btree_path_peek_slot_exact(trans->paths + i->path, &u);
28
29 if (unlikely(trans->journal_replay_not_finished)) {
30 struct bkey_i *j_k =
31 bch2_journal_keys_peek_slot(c, i->btree_id, i->level, i->k->k.p);
32
33 if (j_k)
34 k = bkey_i_to_s_c(j_k);
35 }
36
37 u = *k.k;
38 u.needs_whiteout = i->old_k.needs_whiteout;
39
40 BUG_ON(memcmp(&i->old_k, &u, sizeof(struct bkey)));
41 BUG_ON(i->old_v != k.v);
42#endif
43}
44
45static inline struct btree_path_level *insert_l(struct btree_trans *trans, struct btree_insert_entry *i)
46{
47 return (trans->paths + i->path)->l + i->level;
48}
49
50static inline bool same_leaf_as_prev(struct btree_trans *trans,
51 struct btree_insert_entry *i)
52{
53 return i != trans->updates &&
54 insert_l(trans, &i[0])->b == insert_l(trans, &i[-1])->b;
55}
56
57static inline bool same_leaf_as_next(struct btree_trans *trans,
58 struct btree_insert_entry *i)
59{
60 return i + 1 < trans->updates + trans->nr_updates &&
61 insert_l(trans, &i[0])->b == insert_l(trans, &i[1])->b;
62}
63
64inline void bch2_btree_node_prep_for_write(struct btree_trans *trans,
65 struct btree_path *path,
66 struct btree *b)
67{
68 struct bch_fs *c = trans->c;
69
70 if (unlikely(btree_node_just_written(b)) &&
71 bch2_btree_post_write_cleanup(c, b))
72 bch2_trans_node_reinit_iter(trans, b);
73
74 /*
75 * If the last bset has been written, or if it's gotten too big - start
76 * a new bset to insert into:
77 */
78 if (want_new_bset(c, b))
79 bch2_btree_init_next(trans, b);
80}
81
82static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btree_insert_entry *i)
83{
84 while (--i >= trans->updates) {
85 if (same_leaf_as_prev(trans, i))
86 continue;
87
88 bch2_btree_node_unlock_write(trans, trans->paths + i->path, insert_l(trans, i)->b);
89 }
90
91 trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
92 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
93}
94
95static inline int bch2_trans_lock_write(struct btree_trans *trans)
96{
97 EBUG_ON(trans->write_locked);
98
99 trans_for_each_update(trans, i) {
100 if (same_leaf_as_prev(trans, i))
101 continue;
102
103 if (bch2_btree_node_lock_write(trans, trans->paths + i->path, &insert_l(trans, i)->b->c))
104 return trans_lock_write_fail(trans, i);
105
106 if (!i->cached)
107 bch2_btree_node_prep_for_write(trans, trans->paths + i->path, insert_l(trans, i)->b);
108 }
109
110 trans->write_locked = true;
111 return 0;
112}
113
114static inline void bch2_trans_unlock_write(struct btree_trans *trans)
115{
116 if (likely(trans->write_locked)) {
117 trans_for_each_update(trans, i)
118 if (!same_leaf_as_prev(trans, i))
119 bch2_btree_node_unlock_write_inlined(trans,
120 trans->paths + i->path, insert_l(trans, i)->b);
121 trans->write_locked = false;
122 }
123}
124
125/* Inserting into a given leaf node (last stage of insert): */
126
127/* Handle overwrites and do insert, for non extents: */
128bool bch2_btree_bset_insert_key(struct btree_trans *trans,
129 struct btree_path *path,
130 struct btree *b,
131 struct btree_node_iter *node_iter,
132 struct bkey_i *insert)
133{
134 struct bkey_packed *k;
135 unsigned clobber_u64s = 0, new_u64s = 0;
136
137 EBUG_ON(btree_node_just_written(b));
138 EBUG_ON(bset_written(b, btree_bset_last(b)));
139 EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
140 EBUG_ON(bpos_lt(insert->k.p, b->data->min_key));
141 EBUG_ON(bpos_gt(insert->k.p, b->data->max_key));
142 EBUG_ON(insert->k.u64s > bch2_btree_keys_u64s_remaining(b));
143 EBUG_ON(!b->c.level && !bpos_eq(insert->k.p, path->pos));
144
145 k = bch2_btree_node_iter_peek_all(node_iter, b);
146 if (k && bkey_cmp_left_packed(b, k, &insert->k.p))
147 k = NULL;
148
149 /* @k is the key being overwritten/deleted, if any: */
150 EBUG_ON(k && bkey_deleted(k));
151
152 /* Deleting, but not found? nothing to do: */
153 if (bkey_deleted(&insert->k) && !k)
154 return false;
155
156 if (bkey_deleted(&insert->k)) {
157 /* Deleting: */
158 btree_account_key_drop(b, k);
159 k->type = KEY_TYPE_deleted;
160
161 if (k->needs_whiteout)
162 push_whiteout(b, insert->k.p);
163 k->needs_whiteout = false;
164
165 if (k >= btree_bset_last(b)->start) {
166 clobber_u64s = k->u64s;
167 bch2_bset_delete(b, k, clobber_u64s);
168 goto fix_iter;
169 } else {
170 bch2_btree_path_fix_key_modified(trans, b, k);
171 }
172
173 return true;
174 }
175
176 if (k) {
177 /* Overwriting: */
178 btree_account_key_drop(b, k);
179 k->type = KEY_TYPE_deleted;
180
181 insert->k.needs_whiteout = k->needs_whiteout;
182 k->needs_whiteout = false;
183
184 if (k >= btree_bset_last(b)->start) {
185 clobber_u64s = k->u64s;
186 goto overwrite;
187 } else {
188 bch2_btree_path_fix_key_modified(trans, b, k);
189 }
190 }
191
192 k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
193overwrite:
194 bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
195 new_u64s = k->u64s;
196fix_iter:
197 if (clobber_u64s != new_u64s)
198 bch2_btree_node_iter_fix(trans, path, b, node_iter, k,
199 clobber_u64s, new_u64s);
200 return true;
201}
202
203static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
204 unsigned i, u64 seq)
205{
206 struct bch_fs *c = container_of(j, struct bch_fs, journal);
207 struct btree_write *w = container_of(pin, struct btree_write, journal);
208 struct btree *b = container_of(w, struct btree, writes[i]);
209 struct btree_trans *trans = bch2_trans_get(c);
210 unsigned long old, new, v;
211 unsigned idx = w - b->writes;
212
213 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
214 v = READ_ONCE(b->flags);
215
216 do {
217 old = new = v;
218
219 if (!(old & (1 << BTREE_NODE_dirty)) ||
220 !!(old & (1 << BTREE_NODE_write_idx)) != idx ||
221 w->journal.seq != seq)
222 break;
223
224 new &= ~BTREE_WRITE_TYPE_MASK;
225 new |= BTREE_WRITE_journal_reclaim;
226 new |= 1 << BTREE_NODE_need_write;
227 } while ((v = cmpxchg(&b->flags, old, new)) != old);
228
229 btree_node_write_if_need(c, b, SIX_LOCK_read);
230 six_unlock_read(&b->c.lock);
231
232 bch2_trans_put(trans);
233 return 0;
234}
235
236int bch2_btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
237{
238 return __btree_node_flush(j, pin, 0, seq);
239}
240
241int bch2_btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
242{
243 return __btree_node_flush(j, pin, 1, seq);
244}
245
246inline void bch2_btree_add_journal_pin(struct bch_fs *c,
247 struct btree *b, u64 seq)
248{
249 struct btree_write *w = btree_current_write(b);
250
251 bch2_journal_pin_add(&c->journal, seq, &w->journal,
252 btree_node_write_idx(b) == 0
253 ? bch2_btree_node_flush0
254 : bch2_btree_node_flush1);
255}
256
257/**
258 * bch2_btree_insert_key_leaf() - insert a key one key into a leaf node
259 * @trans: btree transaction object
260 * @path: path pointing to @insert's pos
261 * @insert: key to insert
262 * @journal_seq: sequence number of journal reservation
263 */
264inline void bch2_btree_insert_key_leaf(struct btree_trans *trans,
265 struct btree_path *path,
266 struct bkey_i *insert,
267 u64 journal_seq)
268{
269 struct bch_fs *c = trans->c;
270 struct btree *b = path_l(path)->b;
271 struct bset_tree *t = bset_tree_last(b);
272 struct bset *i = bset(b, t);
273 int old_u64s = bset_u64s(t);
274 int old_live_u64s = b->nr.live_u64s;
275 int live_u64s_added, u64s_added;
276
277 if (unlikely(!bch2_btree_bset_insert_key(trans, path, b,
278 &path_l(path)->iter, insert)))
279 return;
280
281 i->journal_seq = cpu_to_le64(max(journal_seq, le64_to_cpu(i->journal_seq)));
282
283 bch2_btree_add_journal_pin(c, b, journal_seq);
284
285 if (unlikely(!btree_node_dirty(b))) {
286 EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
287 set_btree_node_dirty_acct(c, b);
288 }
289
290 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
291 u64s_added = (int) bset_u64s(t) - old_u64s;
292
293 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
294 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
295 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
296 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
297
298 if (u64s_added > live_u64s_added &&
299 bch2_maybe_compact_whiteouts(c, b))
300 bch2_trans_node_reinit_iter(trans, b);
301}
302
303/* Cached btree updates: */
304
305/* Normal update interface: */
306
307static inline void btree_insert_entry_checks(struct btree_trans *trans,
308 struct btree_insert_entry *i)
309{
310 struct btree_path *path = trans->paths + i->path;
311
312 BUG_ON(!bpos_eq(i->k->k.p, path->pos));
313 BUG_ON(i->cached != path->cached);
314 BUG_ON(i->level != path->level);
315 BUG_ON(i->btree_id != path->btree_id);
316 EBUG_ON(!i->level &&
317 btree_type_has_snapshots(i->btree_id) &&
318 !(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) &&
319 test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) &&
320 i->k->k.p.snapshot &&
321 bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot));
322}
323
324static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
325 unsigned flags)
326{
327 return bch2_journal_res_get(&trans->c->journal, &trans->journal_res,
328 trans->journal_u64s, flags);
329}
330
331#define JSET_ENTRY_LOG_U64s 4
332
333static noinline void journal_transaction_name(struct btree_trans *trans)
334{
335 struct bch_fs *c = trans->c;
336 struct journal *j = &c->journal;
337 struct jset_entry *entry =
338 bch2_journal_add_entry(j, &trans->journal_res,
339 BCH_JSET_ENTRY_log, 0, 0,
340 JSET_ENTRY_LOG_U64s);
341 struct jset_entry_log *l =
342 container_of(entry, struct jset_entry_log, entry);
343
344 strncpy(l->d, trans->fn, JSET_ENTRY_LOG_U64s * sizeof(u64));
345}
346
347static inline int btree_key_can_insert(struct btree_trans *trans,
348 struct btree *b, unsigned u64s)
349{
350 if (!bch2_btree_node_insert_fits(b, u64s))
351 return -BCH_ERR_btree_insert_btree_node_full;
352
353 return 0;
354}
355
356noinline static int
357btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags,
358 struct btree_path *path, unsigned new_u64s)
359{
360 struct bkey_cached *ck = (void *) path->l[0].b;
361 struct bkey_i *new_k;
362 int ret;
363
364 bch2_trans_unlock_write(trans);
365 bch2_trans_unlock(trans);
366
367 new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
368 if (!new_k) {
369 bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
370 bch2_btree_id_str(path->btree_id), new_u64s);
371 return -BCH_ERR_ENOMEM_btree_key_cache_insert;
372 }
373
374 ret = bch2_trans_relock(trans) ?:
375 bch2_trans_lock_write(trans);
376 if (unlikely(ret)) {
377 kfree(new_k);
378 return ret;
379 }
380
381 memcpy(new_k, ck->k, ck->u64s * sizeof(u64));
382
383 trans_for_each_update(trans, i)
384 if (i->old_v == &ck->k->v)
385 i->old_v = &new_k->v;
386
387 kfree(ck->k);
388 ck->u64s = new_u64s;
389 ck->k = new_k;
390 return 0;
391}
392
393static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags,
394 struct btree_path *path, unsigned u64s)
395{
396 struct bch_fs *c = trans->c;
397 struct bkey_cached *ck = (void *) path->l[0].b;
398 unsigned new_u64s;
399 struct bkey_i *new_k;
400
401 EBUG_ON(path->level);
402
403 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
404 bch2_btree_key_cache_must_wait(c) &&
405 !(flags & BCH_TRANS_COMMIT_journal_reclaim))
406 return -BCH_ERR_btree_insert_need_journal_reclaim;
407
408 /*
409 * bch2_varint_decode can read past the end of the buffer by at most 7
410 * bytes (it won't be used):
411 */
412 u64s += 1;
413
414 if (u64s <= ck->u64s)
415 return 0;
416
417 new_u64s = roundup_pow_of_two(u64s);
418 new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOWAIT|__GFP_NOWARN);
419 if (unlikely(!new_k))
420 return btree_key_can_insert_cached_slowpath(trans, flags, path, new_u64s);
421
422 trans_for_each_update(trans, i)
423 if (i->old_v == &ck->k->v)
424 i->old_v = &new_k->v;
425
426 ck->u64s = new_u64s;
427 ck->k = new_k;
428 return 0;
429}
430
431/* Triggers: */
432
433static int run_one_mem_trigger(struct btree_trans *trans,
434 struct btree_insert_entry *i,
435 unsigned flags)
436{
437 struct bkey_s_c old = { &i->old_k, i->old_v };
438 struct bkey_i *new = i->k;
439 const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
440 const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
441 int ret;
442
443 verify_update_old_key(trans, i);
444
445 if (unlikely(flags & BTREE_TRIGGER_NORUN))
446 return 0;
447
448 if (old_ops->trigger == new_ops->trigger) {
449 ret = bch2_key_trigger(trans, i->btree_id, i->level,
450 old, bkey_i_to_s(new),
451 BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
452 } else {
453 ret = bch2_key_trigger_new(trans, i->btree_id, i->level,
454 bkey_i_to_s(new), flags) ?:
455 bch2_key_trigger_old(trans, i->btree_id, i->level,
456 old, flags);
457 }
458
459 return ret;
460}
461
462static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_entry *i,
463 bool overwrite)
464{
465 /*
466 * Transactional triggers create new btree_insert_entries, so we can't
467 * pass them a pointer to a btree_insert_entry, that memory is going to
468 * move:
469 */
470 struct bkey old_k = i->old_k;
471 struct bkey_s_c old = { &old_k, i->old_v };
472 const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
473 const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
474 unsigned flags = i->flags|BTREE_TRIGGER_TRANSACTIONAL;
475
476 verify_update_old_key(trans, i);
477
478 if ((i->flags & BTREE_TRIGGER_NORUN) ||
479 !(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)))
480 return 0;
481
482 if (!i->insert_trigger_run &&
483 !i->overwrite_trigger_run &&
484 old_ops->trigger == new_ops->trigger) {
485 i->overwrite_trigger_run = true;
486 i->insert_trigger_run = true;
487 return bch2_key_trigger(trans, i->btree_id, i->level, old, bkey_i_to_s(i->k),
488 BTREE_TRIGGER_INSERT|
489 BTREE_TRIGGER_OVERWRITE|flags) ?: 1;
490 } else if (overwrite && !i->overwrite_trigger_run) {
491 i->overwrite_trigger_run = true;
492 return bch2_key_trigger_old(trans, i->btree_id, i->level, old, flags) ?: 1;
493 } else if (!overwrite && !i->insert_trigger_run) {
494 i->insert_trigger_run = true;
495 return bch2_key_trigger_new(trans, i->btree_id, i->level, bkey_i_to_s(i->k), flags) ?: 1;
496 } else {
497 return 0;
498 }
499}
500
501static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
502 struct btree_insert_entry *btree_id_start)
503{
504 struct btree_insert_entry *i;
505 bool trans_trigger_run;
506 int ret, overwrite;
507
508 for (overwrite = 1; overwrite >= 0; --overwrite) {
509
510 /*
511 * Running triggers will append more updates to the list of updates as
512 * we're walking it:
513 */
514 do {
515 trans_trigger_run = false;
516
517 for (i = btree_id_start;
518 i < trans->updates + trans->nr_updates && i->btree_id <= btree_id;
519 i++) {
520 if (i->btree_id != btree_id)
521 continue;
522
523 ret = run_one_trans_trigger(trans, i, overwrite);
524 if (ret < 0)
525 return ret;
526 if (ret)
527 trans_trigger_run = true;
528 }
529 } while (trans_trigger_run);
530 }
531
532 return 0;
533}
534
535static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
536{
537 struct btree_insert_entry *btree_id_start = trans->updates;
538 unsigned btree_id = 0;
539 int ret = 0;
540
541 /*
542 *
543 * For a given btree, this algorithm runs insert triggers before
544 * overwrite triggers: this is so that when extents are being moved
545 * (e.g. by FALLOCATE_FL_INSERT_RANGE), we don't drop references before
546 * they are re-added.
547 */
548 for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
549 if (btree_id == BTREE_ID_alloc)
550 continue;
551
552 while (btree_id_start < trans->updates + trans->nr_updates &&
553 btree_id_start->btree_id < btree_id)
554 btree_id_start++;
555
556 ret = run_btree_triggers(trans, btree_id, btree_id_start);
557 if (ret)
558 return ret;
559 }
560
561 trans_for_each_update(trans, i) {
562 if (i->btree_id > BTREE_ID_alloc)
563 break;
564 if (i->btree_id == BTREE_ID_alloc) {
565 ret = run_btree_triggers(trans, BTREE_ID_alloc, i);
566 if (ret)
567 return ret;
568 break;
569 }
570 }
571
572#ifdef CONFIG_BCACHEFS_DEBUG
573 trans_for_each_update(trans, i)
574 BUG_ON(!(i->flags & BTREE_TRIGGER_NORUN) &&
575 (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)) &&
576 (!i->insert_trigger_run || !i->overwrite_trigger_run));
577#endif
578 return 0;
579}
580
581static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
582{
583 trans_for_each_update(trans, i) {
584 /*
585 * XXX: synchronization of cached update triggers with gc
586 * XXX: synchronization of interior node updates with gc
587 */
588 BUG_ON(i->cached || i->level);
589
590 if (btree_node_type_needs_gc(__btree_node_type(i->level, i->btree_id)) &&
591 gc_visited(trans->c, gc_pos_btree_node(insert_l(trans, i)->b))) {
592 int ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC);
593 if (ret)
594 return ret;
595 }
596 }
597
598 return 0;
599}
600
601static inline int
602bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
603 struct btree_insert_entry **stopped_at,
604 unsigned long trace_ip)
605{
606 struct bch_fs *c = trans->c;
607 struct btree_trans_commit_hook *h;
608 unsigned u64s = 0;
609 int ret;
610
611 if (race_fault()) {
612 trace_and_count(c, trans_restart_fault_inject, trans, trace_ip);
613 return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
614 }
615
616 /*
617 * Check if the insert will fit in the leaf node with the write lock
618 * held, otherwise another thread could write the node changing the
619 * amount of space available:
620 */
621
622 prefetch(&trans->c->journal.flags);
623
624 trans_for_each_update(trans, i) {
625 /* Multiple inserts might go to same leaf: */
626 if (!same_leaf_as_prev(trans, i))
627 u64s = 0;
628
629 u64s += i->k->k.u64s;
630 ret = !i->cached
631 ? btree_key_can_insert(trans, insert_l(trans, i)->b, u64s)
632 : btree_key_can_insert_cached(trans, flags, trans->paths + i->path, u64s);
633 if (ret) {
634 *stopped_at = i;
635 return ret;
636 }
637
638 i->k->k.needs_whiteout = false;
639 }
640
641 /*
642 * Don't get journal reservation until after we know insert will
643 * succeed:
644 */
645 if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) {
646 ret = bch2_trans_journal_res_get(trans,
647 (flags & BCH_WATERMARK_MASK)|
648 JOURNAL_RES_GET_NONBLOCK);
649 if (ret)
650 return ret;
651
652 if (unlikely(trans->journal_transaction_names))
653 journal_transaction_name(trans);
654 }
655
656 /*
657 * Not allowed to fail after we've gotten our journal reservation - we
658 * have to use it:
659 */
660
661 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
662 !(flags & BCH_TRANS_COMMIT_no_journal_res)) {
663 if (bch2_journal_seq_verify)
664 trans_for_each_update(trans, i)
665 i->k->k.version.lo = trans->journal_res.seq;
666 else if (bch2_inject_invalid_keys)
667 trans_for_each_update(trans, i)
668 i->k->k.version = MAX_VERSION;
669 }
670
671 if (trans->fs_usage_deltas &&
672 bch2_trans_fs_usage_apply(trans, trans->fs_usage_deltas))
673 return -BCH_ERR_btree_insert_need_mark_replicas;
674
675 /* XXX: we only want to run this if deltas are nonzero */
676 bch2_trans_account_disk_usage_change(trans);
677
678 h = trans->hooks;
679 while (h) {
680 ret = h->fn(trans, h);
681 if (ret)
682 goto revert_fs_usage;
683 h = h->next;
684 }
685
686 trans_for_each_update(trans, i)
687 if (BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS & (1U << i->bkey_type)) {
688 ret = run_one_mem_trigger(trans, i, BTREE_TRIGGER_ATOMIC|i->flags);
689 if (ret)
690 goto fatal_err;
691 }
692
693 if (unlikely(c->gc_pos.phase)) {
694 ret = bch2_trans_commit_run_gc_triggers(trans);
695 if (ret)
696 goto fatal_err;
697 }
698
699 if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) {
700 struct journal *j = &c->journal;
701 struct jset_entry *entry;
702
703 trans_for_each_update(trans, i) {
704 if (i->key_cache_already_flushed)
705 continue;
706
707 if (i->flags & BTREE_UPDATE_NOJOURNAL)
708 continue;
709
710 verify_update_old_key(trans, i);
711
712 if (trans->journal_transaction_names) {
713 entry = bch2_journal_add_entry(j, &trans->journal_res,
714 BCH_JSET_ENTRY_overwrite,
715 i->btree_id, i->level,
716 i->old_k.u64s);
717 bkey_reassemble((struct bkey_i *) entry->start,
718 (struct bkey_s_c) { &i->old_k, i->old_v });
719 }
720
721 entry = bch2_journal_add_entry(j, &trans->journal_res,
722 BCH_JSET_ENTRY_btree_keys,
723 i->btree_id, i->level,
724 i->k->k.u64s);
725 bkey_copy((struct bkey_i *) entry->start, i->k);
726 }
727
728 memcpy_u64s_small(journal_res_entry(&c->journal, &trans->journal_res),
729 trans->journal_entries,
730 trans->journal_entries_u64s);
731
732 trans->journal_res.offset += trans->journal_entries_u64s;
733 trans->journal_res.u64s -= trans->journal_entries_u64s;
734
735 if (trans->journal_seq)
736 *trans->journal_seq = trans->journal_res.seq;
737 }
738
739 trans_for_each_update(trans, i) {
740 struct btree_path *path = trans->paths + i->path;
741
742 if (!i->cached) {
743 bch2_btree_insert_key_leaf(trans, path, i->k, trans->journal_res.seq);
744 } else if (!i->key_cache_already_flushed)
745 bch2_btree_insert_key_cached(trans, flags, i);
746 else {
747 bch2_btree_key_cache_drop(trans, path);
748 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
749 }
750 }
751
752 return 0;
753fatal_err:
754 bch2_fatal_error(c);
755revert_fs_usage:
756 if (trans->fs_usage_deltas)
757 bch2_trans_fs_usage_revert(trans, trans->fs_usage_deltas);
758 return ret;
759}
760
761static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans)
762{
763 trans_for_each_update(trans, i)
764 bch2_journal_key_overwritten(trans->c, i->btree_id, i->level, i->k->k.p);
765}
766
767static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans,
768 enum bkey_invalid_flags flags,
769 struct btree_insert_entry *i,
770 struct printbuf *err)
771{
772 struct bch_fs *c = trans->c;
773
774 printbuf_reset(err);
775 prt_printf(err, "invalid bkey on insert from %s -> %ps",
776 trans->fn, (void *) i->ip_allocated);
777 prt_newline(err);
778 printbuf_indent_add(err, 2);
779
780 bch2_bkey_val_to_text(err, c, bkey_i_to_s_c(i->k));
781 prt_newline(err);
782
783 bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->bkey_type, flags, err);
784 bch2_print_string_as_lines(KERN_ERR, err->buf);
785
786 bch2_inconsistent_error(c);
787 bch2_dump_trans_updates(trans);
788
789 return -EINVAL;
790}
791
792static noinline int bch2_trans_commit_journal_entry_invalid(struct btree_trans *trans,
793 struct jset_entry *i)
794{
795 struct bch_fs *c = trans->c;
796 struct printbuf buf = PRINTBUF;
797
798 prt_printf(&buf, "invalid bkey on insert from %s", trans->fn);
799 prt_newline(&buf);
800 printbuf_indent_add(&buf, 2);
801
802 bch2_journal_entry_to_text(&buf, c, i);
803 prt_newline(&buf);
804
805 bch2_print_string_as_lines(KERN_ERR, buf.buf);
806
807 bch2_inconsistent_error(c);
808 bch2_dump_trans_updates(trans);
809
810 return -EINVAL;
811}
812
813static int bch2_trans_commit_journal_pin_flush(struct journal *j,
814 struct journal_entry_pin *_pin, u64 seq)
815{
816 return 0;
817}
818
819/*
820 * Get journal reservation, take write locks, and attempt to do btree update(s):
821 */
822static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags,
823 struct btree_insert_entry **stopped_at,
824 unsigned long trace_ip)
825{
826 struct bch_fs *c = trans->c;
827 int ret = 0, u64s_delta = 0;
828
829 trans_for_each_update(trans, i) {
830 if (i->cached)
831 continue;
832
833 u64s_delta += !bkey_deleted(&i->k->k) ? i->k->k.u64s : 0;
834 u64s_delta -= i->old_btree_u64s;
835
836 if (!same_leaf_as_next(trans, i)) {
837 if (u64s_delta <= 0) {
838 ret = bch2_foreground_maybe_merge(trans, i->path,
839 i->level, flags);
840 if (unlikely(ret))
841 return ret;
842 }
843
844 u64s_delta = 0;
845 }
846 }
847
848 ret = bch2_trans_lock_write(trans);
849 if (unlikely(ret))
850 return ret;
851
852 ret = bch2_trans_commit_write_locked(trans, flags, stopped_at, trace_ip);
853
854 if (!ret && unlikely(trans->journal_replay_not_finished))
855 bch2_drop_overwrites_from_journal(trans);
856
857 bch2_trans_unlock_write(trans);
858
859 if (!ret && trans->journal_pin)
860 bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
861 trans->journal_pin,
862 bch2_trans_commit_journal_pin_flush);
863
864 /*
865 * Drop journal reservation after dropping write locks, since dropping
866 * the journal reservation may kick off a journal write:
867 */
868 if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res)))
869 bch2_journal_res_put(&c->journal, &trans->journal_res);
870
871 return ret;
872}
873
874static int journal_reclaim_wait_done(struct bch_fs *c)
875{
876 int ret = bch2_journal_error(&c->journal) ?:
877 !bch2_btree_key_cache_must_wait(c);
878
879 if (!ret)
880 journal_reclaim_kick(&c->journal);
881 return ret;
882}
883
884static noinline
885int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
886 struct btree_insert_entry *i,
887 int ret, unsigned long trace_ip)
888{
889 struct bch_fs *c = trans->c;
890
891 switch (ret) {
892 case -BCH_ERR_btree_insert_btree_node_full:
893 ret = bch2_btree_split_leaf(trans, i->path, flags);
894 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
895 trace_and_count(c, trans_restart_btree_node_split, trans,
896 trace_ip, trans->paths + i->path);
897 break;
898 case -BCH_ERR_btree_insert_need_mark_replicas:
899 ret = drop_locks_do(trans,
900 bch2_replicas_delta_list_mark(c, trans->fs_usage_deltas));
901 break;
902 case -BCH_ERR_journal_res_get_blocked:
903 /*
904 * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK
905 * flag
906 */
907 if ((flags & BCH_TRANS_COMMIT_journal_reclaim) &&
908 (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim) {
909 ret = -BCH_ERR_journal_reclaim_would_deadlock;
910 break;
911 }
912
913 ret = drop_locks_do(trans,
914 bch2_trans_journal_res_get(trans,
915 (flags & BCH_WATERMARK_MASK)|
916 JOURNAL_RES_GET_CHECK));
917 break;
918 case -BCH_ERR_btree_insert_need_journal_reclaim:
919 bch2_trans_unlock(trans);
920
921 trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
922
923 wait_event_freezable(c->journal.reclaim_wait,
924 (ret = journal_reclaim_wait_done(c)));
925 if (ret < 0)
926 break;
927
928 ret = bch2_trans_relock(trans);
929 break;
930 default:
931 BUG_ON(ret >= 0);
932 break;
933 }
934
935 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
936
937 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOSPC) &&
938 (flags & BCH_TRANS_COMMIT_no_enospc), c,
939 "%s: incorrectly got %s\n", __func__, bch2_err_str(ret));
940
941 return ret;
942}
943
944static noinline int
945bch2_trans_commit_get_rw_cold(struct btree_trans *trans, unsigned flags)
946{
947 struct bch_fs *c = trans->c;
948 int ret;
949
950 if (likely(!(flags & BCH_TRANS_COMMIT_lazy_rw)) ||
951 test_bit(BCH_FS_started, &c->flags))
952 return -BCH_ERR_erofs_trans_commit;
953
954 ret = drop_locks_do(trans, bch2_fs_read_write_early(c));
955 if (ret)
956 return ret;
957
958 bch2_write_ref_get(c, BCH_WRITE_REF_trans);
959 return 0;
960}
961
962/*
963 * This is for updates done in the early part of fsck - btree_gc - before we've
964 * gone RW. we only add the new key to the list of keys for journal replay to
965 * do.
966 */
967static noinline int
968do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans)
969{
970 struct bch_fs *c = trans->c;
971 int ret = 0;
972
973 trans_for_each_update(trans, i) {
974 ret = bch2_journal_key_insert(c, i->btree_id, i->level, i->k);
975 if (ret)
976 break;
977 }
978
979 return ret;
980}
981
982int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
983{
984 struct btree_insert_entry *errored_at = NULL;
985 struct bch_fs *c = trans->c;
986 int ret = 0;
987
988 if (!trans->nr_updates &&
989 !trans->journal_entries_u64s)
990 goto out_reset;
991
992 memset(&trans->fs_usage_delta, 0, sizeof(trans->fs_usage_delta));
993
994 ret = bch2_trans_commit_run_triggers(trans);
995 if (ret)
996 goto out_reset;
997
998 trans_for_each_update(trans, i) {
999 struct printbuf buf = PRINTBUF;
1000 enum bkey_invalid_flags invalid_flags = 0;
1001
1002 if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
1003 invalid_flags |= BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT;
1004
1005 if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
1006 i->bkey_type, invalid_flags, &buf)))
1007 ret = bch2_trans_commit_bkey_invalid(trans, invalid_flags, i, &buf);
1008 btree_insert_entry_checks(trans, i);
1009 printbuf_exit(&buf);
1010
1011 if (ret)
1012 return ret;
1013 }
1014
1015 for (struct jset_entry *i = trans->journal_entries;
1016 i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
1017 i = vstruct_next(i)) {
1018 enum bkey_invalid_flags invalid_flags = 0;
1019
1020 if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
1021 invalid_flags |= BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT;
1022
1023 if (unlikely(bch2_journal_entry_validate(c, NULL, i,
1024 bcachefs_metadata_version_current,
1025 CPU_BIG_ENDIAN, invalid_flags)))
1026 ret = bch2_trans_commit_journal_entry_invalid(trans, i);
1027
1028 if (ret)
1029 return ret;
1030 }
1031
1032 if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) {
1033 ret = do_bch2_trans_commit_to_journal_replay(trans);
1034 goto out_reset;
1035 }
1036
1037 if (!(flags & BCH_TRANS_COMMIT_no_check_rw) &&
1038 unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_trans))) {
1039 ret = bch2_trans_commit_get_rw_cold(trans, flags);
1040 if (ret)
1041 goto out_reset;
1042 }
1043
1044 EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
1045
1046 trans->journal_u64s = trans->journal_entries_u64s;
1047 trans->journal_transaction_names = READ_ONCE(c->opts.journal_transaction_names);
1048 if (trans->journal_transaction_names)
1049 trans->journal_u64s += jset_u64s(JSET_ENTRY_LOG_U64s);
1050
1051 trans_for_each_update(trans, i) {
1052 struct btree_path *path = trans->paths + i->path;
1053
1054 EBUG_ON(!path->should_be_locked);
1055
1056 ret = bch2_btree_path_upgrade(trans, path, i->level + 1);
1057 if (unlikely(ret))
1058 goto out;
1059
1060 EBUG_ON(!btree_node_intent_locked(path, i->level));
1061
1062 if (i->key_cache_already_flushed)
1063 continue;
1064
1065 if (i->flags & BTREE_UPDATE_NOJOURNAL)
1066 continue;
1067
1068 /* we're going to journal the key being updated: */
1069 trans->journal_u64s += jset_u64s(i->k->k.u64s);
1070
1071 /* and we're also going to log the overwrite: */
1072 if (trans->journal_transaction_names)
1073 trans->journal_u64s += jset_u64s(i->old_k.u64s);
1074 }
1075
1076 if (trans->extra_disk_res) {
1077 ret = bch2_disk_reservation_add(c, trans->disk_res,
1078 trans->extra_disk_res,
1079 (flags & BCH_TRANS_COMMIT_no_enospc)
1080 ? BCH_DISK_RESERVATION_NOFAIL : 0);
1081 if (ret)
1082 goto err;
1083 }
1084retry:
1085 errored_at = NULL;
1086 bch2_trans_verify_not_in_restart(trans);
1087 if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res)))
1088 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
1089
1090 ret = do_bch2_trans_commit(trans, flags, &errored_at, _RET_IP_);
1091
1092 /* make sure we didn't drop or screw up locks: */
1093 bch2_trans_verify_locks(trans);
1094
1095 if (ret)
1096 goto err;
1097
1098 trace_and_count(c, transaction_commit, trans, _RET_IP_);
1099out:
1100 if (likely(!(flags & BCH_TRANS_COMMIT_no_check_rw)))
1101 bch2_write_ref_put(c, BCH_WRITE_REF_trans);
1102out_reset:
1103 if (!ret)
1104 bch2_trans_downgrade(trans);
1105 bch2_trans_reset_updates(trans);
1106
1107 return ret;
1108err:
1109 ret = bch2_trans_commit_error(trans, flags, errored_at, ret, _RET_IP_);
1110 if (ret)
1111 goto out;
1112
1113 /*
1114 * We might have done another transaction commit in the error path -
1115 * i.e. btree write buffer flush - which will have made use of
1116 * trans->journal_res, but with BCH_TRANS_COMMIT_no_journal_res that is
1117 * how the journal sequence number to pin is passed in - so we must
1118 * restart:
1119 */
1120 if (flags & BCH_TRANS_COMMIT_no_journal_res) {
1121 ret = -BCH_ERR_transaction_restart_nested;
1122 goto out;
1123 }
1124
1125 goto retry;
1126}
1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
4#include "alloc_foreground.h"
5#include "btree_gc.h"
6#include "btree_io.h"
7#include "btree_iter.h"
8#include "btree_journal_iter.h"
9#include "btree_key_cache.h"
10#include "btree_update_interior.h"
11#include "btree_write_buffer.h"
12#include "buckets.h"
13#include "disk_accounting.h"
14#include "errcode.h"
15#include "error.h"
16#include "journal.h"
17#include "journal_io.h"
18#include "journal_reclaim.h"
19#include "replicas.h"
20#include "snapshot.h"
21
22#include <linux/prefetch.h>
23
24static const char * const trans_commit_flags_strs[] = {
25#define x(n, ...) #n,
26 BCH_TRANS_COMMIT_FLAGS()
27#undef x
28 NULL
29};
30
31void bch2_trans_commit_flags_to_text(struct printbuf *out, enum bch_trans_commit_flags flags)
32{
33 enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
34
35 prt_printf(out, "watermark=%s", bch2_watermarks[watermark]);
36
37 flags >>= BCH_WATERMARK_BITS;
38 if (flags) {
39 prt_char(out, ' ');
40 bch2_prt_bitflags(out, trans_commit_flags_strs, flags);
41 }
42}
43
44static void verify_update_old_key(struct btree_trans *trans, struct btree_insert_entry *i)
45{
46#ifdef CONFIG_BCACHEFS_DEBUG
47 struct bch_fs *c = trans->c;
48 struct bkey u;
49 struct bkey_s_c k = bch2_btree_path_peek_slot_exact(trans->paths + i->path, &u);
50
51 if (unlikely(trans->journal_replay_not_finished)) {
52 struct bkey_i *j_k =
53 bch2_journal_keys_peek_slot(c, i->btree_id, i->level, i->k->k.p);
54
55 if (j_k)
56 k = bkey_i_to_s_c(j_k);
57 }
58
59 u = *k.k;
60 u.needs_whiteout = i->old_k.needs_whiteout;
61
62 BUG_ON(memcmp(&i->old_k, &u, sizeof(struct bkey)));
63 BUG_ON(i->old_v != k.v);
64#endif
65}
66
67static inline struct btree_path_level *insert_l(struct btree_trans *trans, struct btree_insert_entry *i)
68{
69 return (trans->paths + i->path)->l + i->level;
70}
71
72static inline bool same_leaf_as_prev(struct btree_trans *trans,
73 struct btree_insert_entry *i)
74{
75 return i != trans->updates &&
76 insert_l(trans, &i[0])->b == insert_l(trans, &i[-1])->b;
77}
78
79static inline bool same_leaf_as_next(struct btree_trans *trans,
80 struct btree_insert_entry *i)
81{
82 return i + 1 < trans->updates + trans->nr_updates &&
83 insert_l(trans, &i[0])->b == insert_l(trans, &i[1])->b;
84}
85
86inline void bch2_btree_node_prep_for_write(struct btree_trans *trans,
87 struct btree_path *path,
88 struct btree *b)
89{
90 struct bch_fs *c = trans->c;
91
92 if (unlikely(btree_node_just_written(b)) &&
93 bch2_btree_post_write_cleanup(c, b))
94 bch2_trans_node_reinit_iter(trans, b);
95
96 /*
97 * If the last bset has been written, or if it's gotten too big - start
98 * a new bset to insert into:
99 */
100 if (want_new_bset(c, b))
101 bch2_btree_init_next(trans, b);
102}
103
104static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btree_insert_entry *i)
105{
106 while (--i >= trans->updates) {
107 if (same_leaf_as_prev(trans, i))
108 continue;
109
110 bch2_btree_node_unlock_write(trans, trans->paths + i->path, insert_l(trans, i)->b);
111 }
112
113 trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
114 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
115}
116
117static inline int bch2_trans_lock_write(struct btree_trans *trans)
118{
119 EBUG_ON(trans->write_locked);
120
121 trans_for_each_update(trans, i) {
122 if (same_leaf_as_prev(trans, i))
123 continue;
124
125 if (bch2_btree_node_lock_write(trans, trans->paths + i->path, &insert_l(trans, i)->b->c))
126 return trans_lock_write_fail(trans, i);
127
128 if (!i->cached)
129 bch2_btree_node_prep_for_write(trans, trans->paths + i->path, insert_l(trans, i)->b);
130 }
131
132 trans->write_locked = true;
133 return 0;
134}
135
136static inline void bch2_trans_unlock_write(struct btree_trans *trans)
137{
138 if (likely(trans->write_locked)) {
139 trans_for_each_update(trans, i)
140 if (btree_node_locked_type(trans->paths + i->path, i->level) ==
141 BTREE_NODE_WRITE_LOCKED)
142 bch2_btree_node_unlock_write_inlined(trans,
143 trans->paths + i->path, insert_l(trans, i)->b);
144 trans->write_locked = false;
145 }
146}
147
148/* Inserting into a given leaf node (last stage of insert): */
149
150/* Handle overwrites and do insert, for non extents: */
151bool bch2_btree_bset_insert_key(struct btree_trans *trans,
152 struct btree_path *path,
153 struct btree *b,
154 struct btree_node_iter *node_iter,
155 struct bkey_i *insert)
156{
157 struct bkey_packed *k;
158 unsigned clobber_u64s = 0, new_u64s = 0;
159
160 EBUG_ON(btree_node_just_written(b));
161 EBUG_ON(bset_written(b, btree_bset_last(b)));
162 EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
163 EBUG_ON(bpos_lt(insert->k.p, b->data->min_key));
164 EBUG_ON(bpos_gt(insert->k.p, b->data->max_key));
165 EBUG_ON(insert->k.u64s > bch2_btree_keys_u64s_remaining(b));
166 EBUG_ON(!b->c.level && !bpos_eq(insert->k.p, path->pos));
167
168 k = bch2_btree_node_iter_peek_all(node_iter, b);
169 if (k && bkey_cmp_left_packed(b, k, &insert->k.p))
170 k = NULL;
171
172 /* @k is the key being overwritten/deleted, if any: */
173 EBUG_ON(k && bkey_deleted(k));
174
175 /* Deleting, but not found? nothing to do: */
176 if (bkey_deleted(&insert->k) && !k)
177 return false;
178
179 if (bkey_deleted(&insert->k)) {
180 /* Deleting: */
181 btree_account_key_drop(b, k);
182 k->type = KEY_TYPE_deleted;
183
184 if (k->needs_whiteout)
185 push_whiteout(b, insert->k.p);
186 k->needs_whiteout = false;
187
188 if (k >= btree_bset_last(b)->start) {
189 clobber_u64s = k->u64s;
190 bch2_bset_delete(b, k, clobber_u64s);
191 goto fix_iter;
192 } else {
193 bch2_btree_path_fix_key_modified(trans, b, k);
194 }
195
196 return true;
197 }
198
199 if (k) {
200 /* Overwriting: */
201 btree_account_key_drop(b, k);
202 k->type = KEY_TYPE_deleted;
203
204 insert->k.needs_whiteout = k->needs_whiteout;
205 k->needs_whiteout = false;
206
207 if (k >= btree_bset_last(b)->start) {
208 clobber_u64s = k->u64s;
209 goto overwrite;
210 } else {
211 bch2_btree_path_fix_key_modified(trans, b, k);
212 }
213 }
214
215 k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
216overwrite:
217 bch2_bset_insert(b, k, insert, clobber_u64s);
218 new_u64s = k->u64s;
219fix_iter:
220 if (clobber_u64s != new_u64s)
221 bch2_btree_node_iter_fix(trans, path, b, node_iter, k,
222 clobber_u64s, new_u64s);
223 return true;
224}
225
226static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
227 unsigned i, u64 seq)
228{
229 struct bch_fs *c = container_of(j, struct bch_fs, journal);
230 struct btree_write *w = container_of(pin, struct btree_write, journal);
231 struct btree *b = container_of(w, struct btree, writes[i]);
232 struct btree_trans *trans = bch2_trans_get(c);
233 unsigned long old, new;
234 unsigned idx = w - b->writes;
235
236 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
237
238 old = READ_ONCE(b->flags);
239 do {
240 new = old;
241
242 if (!(old & (1 << BTREE_NODE_dirty)) ||
243 !!(old & (1 << BTREE_NODE_write_idx)) != idx ||
244 w->journal.seq != seq)
245 break;
246
247 new &= ~BTREE_WRITE_TYPE_MASK;
248 new |= BTREE_WRITE_journal_reclaim;
249 new |= 1 << BTREE_NODE_need_write;
250 } while (!try_cmpxchg(&b->flags, &old, new));
251
252 btree_node_write_if_need(c, b, SIX_LOCK_read);
253 six_unlock_read(&b->c.lock);
254
255 bch2_trans_put(trans);
256 return 0;
257}
258
259int bch2_btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
260{
261 return __btree_node_flush(j, pin, 0, seq);
262}
263
264int bch2_btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
265{
266 return __btree_node_flush(j, pin, 1, seq);
267}
268
269inline void bch2_btree_add_journal_pin(struct bch_fs *c,
270 struct btree *b, u64 seq)
271{
272 struct btree_write *w = btree_current_write(b);
273
274 bch2_journal_pin_add(&c->journal, seq, &w->journal,
275 btree_node_write_idx(b) == 0
276 ? bch2_btree_node_flush0
277 : bch2_btree_node_flush1);
278}
279
280/**
281 * bch2_btree_insert_key_leaf() - insert a key one key into a leaf node
282 * @trans: btree transaction object
283 * @path: path pointing to @insert's pos
284 * @insert: key to insert
285 * @journal_seq: sequence number of journal reservation
286 */
287inline void bch2_btree_insert_key_leaf(struct btree_trans *trans,
288 struct btree_path *path,
289 struct bkey_i *insert,
290 u64 journal_seq)
291{
292 struct bch_fs *c = trans->c;
293 struct btree *b = path_l(path)->b;
294 struct bset_tree *t = bset_tree_last(b);
295 struct bset *i = bset(b, t);
296 int old_u64s = bset_u64s(t);
297 int old_live_u64s = b->nr.live_u64s;
298 int live_u64s_added, u64s_added;
299
300 if (unlikely(!bch2_btree_bset_insert_key(trans, path, b,
301 &path_l(path)->iter, insert)))
302 return;
303
304 i->journal_seq = cpu_to_le64(max(journal_seq, le64_to_cpu(i->journal_seq)));
305
306 bch2_btree_add_journal_pin(c, b, journal_seq);
307
308 if (unlikely(!btree_node_dirty(b))) {
309 EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
310 set_btree_node_dirty_acct(c, b);
311 }
312
313 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
314 u64s_added = (int) bset_u64s(t) - old_u64s;
315
316 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
317 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
318 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
319 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
320
321 if (u64s_added > live_u64s_added &&
322 bch2_maybe_compact_whiteouts(c, b))
323 bch2_trans_node_reinit_iter(trans, b);
324}
325
326/* Cached btree updates: */
327
328/* Normal update interface: */
329
330static inline void btree_insert_entry_checks(struct btree_trans *trans,
331 struct btree_insert_entry *i)
332{
333 struct btree_path *path = trans->paths + i->path;
334
335 BUG_ON(!bpos_eq(i->k->k.p, path->pos));
336 BUG_ON(i->cached != path->cached);
337 BUG_ON(i->level != path->level);
338 BUG_ON(i->btree_id != path->btree_id);
339 EBUG_ON(!i->level &&
340 btree_type_has_snapshots(i->btree_id) &&
341 !(i->flags & BTREE_UPDATE_internal_snapshot_node) &&
342 test_bit(JOURNAL_replay_done, &trans->c->journal.flags) &&
343 i->k->k.p.snapshot &&
344 bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot) > 0);
345}
346
347static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
348 unsigned flags)
349{
350 return bch2_journal_res_get(&trans->c->journal, &trans->journal_res,
351 trans->journal_u64s, flags);
352}
353
354#define JSET_ENTRY_LOG_U64s 4
355
356static noinline void journal_transaction_name(struct btree_trans *trans)
357{
358 struct bch_fs *c = trans->c;
359 struct journal *j = &c->journal;
360 struct jset_entry *entry =
361 bch2_journal_add_entry(j, &trans->journal_res,
362 BCH_JSET_ENTRY_log, 0, 0,
363 JSET_ENTRY_LOG_U64s);
364 struct jset_entry_log *l =
365 container_of(entry, struct jset_entry_log, entry);
366
367 strncpy(l->d, trans->fn, JSET_ENTRY_LOG_U64s * sizeof(u64));
368}
369
370static inline int btree_key_can_insert(struct btree_trans *trans,
371 struct btree *b, unsigned u64s)
372{
373 if (!bch2_btree_node_insert_fits(b, u64s))
374 return -BCH_ERR_btree_insert_btree_node_full;
375
376 return 0;
377}
378
379noinline static int
380btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags,
381 struct btree_path *path, unsigned new_u64s)
382{
383 struct bkey_cached *ck = (void *) path->l[0].b;
384 struct bkey_i *new_k;
385 int ret;
386
387 bch2_trans_unlock_write(trans);
388 bch2_trans_unlock(trans);
389
390 new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
391 if (!new_k) {
392 bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
393 bch2_btree_id_str(path->btree_id), new_u64s);
394 return -BCH_ERR_ENOMEM_btree_key_cache_insert;
395 }
396
397 ret = bch2_trans_relock(trans) ?:
398 bch2_trans_lock_write(trans);
399 if (unlikely(ret)) {
400 kfree(new_k);
401 return ret;
402 }
403
404 memcpy(new_k, ck->k, ck->u64s * sizeof(u64));
405
406 trans_for_each_update(trans, i)
407 if (i->old_v == &ck->k->v)
408 i->old_v = &new_k->v;
409
410 kfree(ck->k);
411 ck->u64s = new_u64s;
412 ck->k = new_k;
413 return 0;
414}
415
416static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags,
417 struct btree_path *path, unsigned u64s)
418{
419 struct bch_fs *c = trans->c;
420 struct bkey_cached *ck = (void *) path->l[0].b;
421 unsigned new_u64s;
422 struct bkey_i *new_k;
423 unsigned watermark = flags & BCH_WATERMARK_MASK;
424
425 EBUG_ON(path->level);
426
427 if (watermark < BCH_WATERMARK_reclaim &&
428 !test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
429 bch2_btree_key_cache_must_wait(c))
430 return -BCH_ERR_btree_insert_need_journal_reclaim;
431
432 /*
433 * bch2_varint_decode can read past the end of the buffer by at most 7
434 * bytes (it won't be used):
435 */
436 u64s += 1;
437
438 if (u64s <= ck->u64s)
439 return 0;
440
441 new_u64s = roundup_pow_of_two(u64s);
442 new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOWAIT|__GFP_NOWARN);
443 if (unlikely(!new_k))
444 return btree_key_can_insert_cached_slowpath(trans, flags, path, new_u64s);
445
446 trans_for_each_update(trans, i)
447 if (i->old_v == &ck->k->v)
448 i->old_v = &new_k->v;
449
450 ck->u64s = new_u64s;
451 ck->k = new_k;
452 return 0;
453}
454
455/* Triggers: */
456
457static int run_one_mem_trigger(struct btree_trans *trans,
458 struct btree_insert_entry *i,
459 unsigned flags)
460{
461 verify_update_old_key(trans, i);
462
463 if (unlikely(flags & BTREE_TRIGGER_norun))
464 return 0;
465
466 struct bkey_s_c old = { &i->old_k, i->old_v };
467 struct bkey_i *new = i->k;
468 const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
469 const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
470
471 if (old_ops->trigger == new_ops->trigger)
472 return bch2_key_trigger(trans, i->btree_id, i->level,
473 old, bkey_i_to_s(new),
474 BTREE_TRIGGER_insert|BTREE_TRIGGER_overwrite|flags);
475 else
476 return bch2_key_trigger_new(trans, i->btree_id, i->level,
477 bkey_i_to_s(new), flags) ?:
478 bch2_key_trigger_old(trans, i->btree_id, i->level,
479 old, flags);
480}
481
482static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_entry *i,
483 bool overwrite)
484{
485 verify_update_old_key(trans, i);
486
487 if ((i->flags & BTREE_TRIGGER_norun) ||
488 !btree_node_type_has_trans_triggers(i->bkey_type))
489 return 0;
490
491 /*
492 * Transactional triggers create new btree_insert_entries, so we can't
493 * pass them a pointer to a btree_insert_entry, that memory is going to
494 * move:
495 */
496 struct bkey old_k = i->old_k;
497 struct bkey_s_c old = { &old_k, i->old_v };
498 const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
499 const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
500 unsigned flags = i->flags|BTREE_TRIGGER_transactional;
501
502 if (!i->insert_trigger_run &&
503 !i->overwrite_trigger_run &&
504 old_ops->trigger == new_ops->trigger) {
505 i->overwrite_trigger_run = true;
506 i->insert_trigger_run = true;
507 return bch2_key_trigger(trans, i->btree_id, i->level, old, bkey_i_to_s(i->k),
508 BTREE_TRIGGER_insert|
509 BTREE_TRIGGER_overwrite|flags) ?: 1;
510 } else if (overwrite && !i->overwrite_trigger_run) {
511 i->overwrite_trigger_run = true;
512 return bch2_key_trigger_old(trans, i->btree_id, i->level, old, flags) ?: 1;
513 } else if (!overwrite && !i->insert_trigger_run) {
514 i->insert_trigger_run = true;
515 return bch2_key_trigger_new(trans, i->btree_id, i->level, bkey_i_to_s(i->k), flags) ?: 1;
516 } else {
517 return 0;
518 }
519}
520
521static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
522 unsigned btree_id_start)
523{
524 for (int overwrite = 1; overwrite >= 0; --overwrite) {
525 bool trans_trigger_run;
526
527 /*
528 * Running triggers will append more updates to the list of updates as
529 * we're walking it:
530 */
531 do {
532 trans_trigger_run = false;
533
534 for (unsigned i = btree_id_start;
535 i < trans->nr_updates && trans->updates[i].btree_id <= btree_id;
536 i++) {
537 if (trans->updates[i].btree_id != btree_id)
538 continue;
539
540 int ret = run_one_trans_trigger(trans, trans->updates + i, overwrite);
541 if (ret < 0)
542 return ret;
543 if (ret)
544 trans_trigger_run = true;
545 }
546 } while (trans_trigger_run);
547 }
548
549 return 0;
550}
551
552static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
553{
554 unsigned btree_id = 0, btree_id_start = 0;
555 int ret = 0;
556
557 /*
558 *
559 * For a given btree, this algorithm runs insert triggers before
560 * overwrite triggers: this is so that when extents are being moved
561 * (e.g. by FALLOCATE_FL_INSERT_RANGE), we don't drop references before
562 * they are re-added.
563 */
564 for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
565 if (btree_id == BTREE_ID_alloc)
566 continue;
567
568 while (btree_id_start < trans->nr_updates &&
569 trans->updates[btree_id_start].btree_id < btree_id)
570 btree_id_start++;
571
572 ret = run_btree_triggers(trans, btree_id, btree_id_start);
573 if (ret)
574 return ret;
575 }
576
577 for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
578 struct btree_insert_entry *i = trans->updates + idx;
579
580 if (i->btree_id > BTREE_ID_alloc)
581 break;
582 if (i->btree_id == BTREE_ID_alloc) {
583 ret = run_btree_triggers(trans, BTREE_ID_alloc, idx);
584 if (ret)
585 return ret;
586 break;
587 }
588 }
589
590#ifdef CONFIG_BCACHEFS_DEBUG
591 trans_for_each_update(trans, i)
592 BUG_ON(!(i->flags & BTREE_TRIGGER_norun) &&
593 btree_node_type_has_trans_triggers(i->bkey_type) &&
594 (!i->insert_trigger_run || !i->overwrite_trigger_run));
595#endif
596 return 0;
597}
598
599static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
600{
601 trans_for_each_update(trans, i)
602 if (btree_node_type_has_triggers(i->bkey_type) &&
603 gc_visited(trans->c, gc_pos_btree(i->btree_id, i->level, i->k->k.p))) {
604 int ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_gc);
605 if (ret)
606 return ret;
607 }
608
609 return 0;
610}
611
612static struct bversion journal_pos_to_bversion(struct journal_res *res, unsigned offset)
613{
614 return (struct bversion) {
615 .hi = res->seq >> 32,
616 .lo = (res->seq << 32) | (res->offset + offset),
617 };
618}
619
620static inline int
621bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
622 struct btree_insert_entry **stopped_at,
623 unsigned long trace_ip)
624{
625 struct bch_fs *c = trans->c;
626 struct btree_trans_commit_hook *h;
627 unsigned u64s = 0;
628 int ret = 0;
629
630 bch2_trans_verify_not_unlocked(trans);
631 bch2_trans_verify_not_in_restart(trans);
632
633 if (race_fault()) {
634 trace_and_count(c, trans_restart_fault_inject, trans, trace_ip);
635 return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
636 }
637
638 /*
639 * Check if the insert will fit in the leaf node with the write lock
640 * held, otherwise another thread could write the node changing the
641 * amount of space available:
642 */
643
644 prefetch(&trans->c->journal.flags);
645
646 trans_for_each_update(trans, i) {
647 /* Multiple inserts might go to same leaf: */
648 if (!same_leaf_as_prev(trans, i))
649 u64s = 0;
650
651 u64s += i->k->k.u64s;
652 ret = !i->cached
653 ? btree_key_can_insert(trans, insert_l(trans, i)->b, u64s)
654 : btree_key_can_insert_cached(trans, flags, trans->paths + i->path, u64s);
655 if (ret) {
656 *stopped_at = i;
657 return ret;
658 }
659
660 i->k->k.needs_whiteout = false;
661 }
662
663 /*
664 * Don't get journal reservation until after we know insert will
665 * succeed:
666 */
667 if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) {
668 ret = bch2_trans_journal_res_get(trans,
669 (flags & BCH_WATERMARK_MASK)|
670 JOURNAL_RES_GET_NONBLOCK);
671 if (ret)
672 return ret;
673
674 if (unlikely(trans->journal_transaction_names))
675 journal_transaction_name(trans);
676 }
677
678 /*
679 * Not allowed to fail after we've gotten our journal reservation - we
680 * have to use it:
681 */
682
683 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
684 !(flags & BCH_TRANS_COMMIT_no_journal_res)) {
685 if (bch2_journal_seq_verify)
686 trans_for_each_update(trans, i)
687 i->k->k.bversion.lo = trans->journal_res.seq;
688 else if (bch2_inject_invalid_keys)
689 trans_for_each_update(trans, i)
690 i->k->k.bversion = MAX_VERSION;
691 }
692
693 h = trans->hooks;
694 while (h) {
695 ret = h->fn(trans, h);
696 if (ret)
697 return ret;
698 h = h->next;
699 }
700
701 struct jset_entry *entry = trans->journal_entries;
702
703 percpu_down_read(&c->mark_lock);
704
705 for (entry = trans->journal_entries;
706 entry != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
707 entry = vstruct_next(entry))
708 if (entry->type == BCH_JSET_ENTRY_write_buffer_keys &&
709 entry->start->k.type == KEY_TYPE_accounting) {
710 BUG_ON(!trans->journal_res.ref);
711
712 struct bkey_i_accounting *a = bkey_i_to_accounting(entry->start);
713
714 a->k.bversion = journal_pos_to_bversion(&trans->journal_res,
715 (u64 *) entry - (u64 *) trans->journal_entries);
716 BUG_ON(bversion_zero(a->k.bversion));
717
718 if (likely(!(flags & BCH_TRANS_COMMIT_skip_accounting_apply))) {
719 ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), BCH_ACCOUNTING_normal);
720 if (ret)
721 goto revert_fs_usage;
722 }
723 }
724 percpu_up_read(&c->mark_lock);
725
726 /* XXX: we only want to run this if deltas are nonzero */
727 bch2_trans_account_disk_usage_change(trans);
728
729 trans_for_each_update(trans, i)
730 if (btree_node_type_has_atomic_triggers(i->bkey_type)) {
731 ret = run_one_mem_trigger(trans, i, BTREE_TRIGGER_atomic|i->flags);
732 if (ret)
733 goto fatal_err;
734 }
735
736 if (unlikely(c->gc_pos.phase)) {
737 ret = bch2_trans_commit_run_gc_triggers(trans);
738 if (ret)
739 goto fatal_err;
740 }
741
742 trans_for_each_update(trans, i) {
743 enum bch_validate_flags invalid_flags = 0;
744
745 if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
746 invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
747
748 ret = bch2_bkey_validate(c, bkey_i_to_s_c(i->k),
749 i->bkey_type, invalid_flags);
750 if (unlikely(ret)){
751 bch2_trans_inconsistent(trans, "invalid bkey on insert from %s -> %ps\n",
752 trans->fn, (void *) i->ip_allocated);
753 goto fatal_err;
754 }
755 btree_insert_entry_checks(trans, i);
756 }
757
758 for (struct jset_entry *i = trans->journal_entries;
759 i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
760 i = vstruct_next(i)) {
761 enum bch_validate_flags invalid_flags = 0;
762
763 if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
764 invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
765
766 ret = bch2_journal_entry_validate(c, NULL, i,
767 bcachefs_metadata_version_current,
768 CPU_BIG_ENDIAN, invalid_flags);
769 if (unlikely(ret)) {
770 bch2_trans_inconsistent(trans, "invalid journal entry on insert from %s\n",
771 trans->fn);
772 goto fatal_err;
773 }
774 }
775
776 if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) {
777 struct journal *j = &c->journal;
778 struct jset_entry *entry;
779
780 trans_for_each_update(trans, i) {
781 if (i->key_cache_already_flushed)
782 continue;
783
784 if (i->flags & BTREE_UPDATE_nojournal)
785 continue;
786
787 verify_update_old_key(trans, i);
788
789 if (trans->journal_transaction_names) {
790 entry = bch2_journal_add_entry(j, &trans->journal_res,
791 BCH_JSET_ENTRY_overwrite,
792 i->btree_id, i->level,
793 i->old_k.u64s);
794 bkey_reassemble((struct bkey_i *) entry->start,
795 (struct bkey_s_c) { &i->old_k, i->old_v });
796 }
797
798 entry = bch2_journal_add_entry(j, &trans->journal_res,
799 BCH_JSET_ENTRY_btree_keys,
800 i->btree_id, i->level,
801 i->k->k.u64s);
802 bkey_copy((struct bkey_i *) entry->start, i->k);
803 }
804
805 memcpy_u64s_small(journal_res_entry(&c->journal, &trans->journal_res),
806 trans->journal_entries,
807 trans->journal_entries_u64s);
808
809 trans->journal_res.offset += trans->journal_entries_u64s;
810 trans->journal_res.u64s -= trans->journal_entries_u64s;
811
812 if (trans->journal_seq)
813 *trans->journal_seq = trans->journal_res.seq;
814 }
815
816 trans_for_each_update(trans, i) {
817 struct btree_path *path = trans->paths + i->path;
818
819 if (!i->cached)
820 bch2_btree_insert_key_leaf(trans, path, i->k, trans->journal_res.seq);
821 else if (!i->key_cache_already_flushed)
822 bch2_btree_insert_key_cached(trans, flags, i);
823 else
824 bch2_btree_key_cache_drop(trans, path);
825 }
826
827 return 0;
828fatal_err:
829 bch2_fs_fatal_error(c, "fatal error in transaction commit: %s", bch2_err_str(ret));
830 percpu_down_read(&c->mark_lock);
831revert_fs_usage:
832 for (struct jset_entry *entry2 = trans->journal_entries;
833 entry2 != entry;
834 entry2 = vstruct_next(entry2))
835 if (entry2->type == BCH_JSET_ENTRY_write_buffer_keys &&
836 entry2->start->k.type == KEY_TYPE_accounting) {
837 struct bkey_s_accounting a = bkey_i_to_s_accounting(entry2->start);
838
839 bch2_accounting_neg(a);
840 bch2_accounting_mem_mod_locked(trans, a.c, BCH_ACCOUNTING_normal);
841 bch2_accounting_neg(a);
842 }
843 percpu_up_read(&c->mark_lock);
844 return ret;
845}
846
847static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans)
848{
849 /*
850 * Accounting keys aren't deduped in the journal: we have to compare
851 * each individual update against what's in the btree to see if it has
852 * been applied yet, and accounting updates also don't overwrite,
853 * they're deltas that accumulate.
854 */
855 trans_for_each_update(trans, i)
856 if (i->k->k.type != KEY_TYPE_accounting)
857 bch2_journal_key_overwritten(trans->c, i->btree_id, i->level, i->k->k.p);
858}
859
860static int bch2_trans_commit_journal_pin_flush(struct journal *j,
861 struct journal_entry_pin *_pin, u64 seq)
862{
863 return 0;
864}
865
866/*
867 * Get journal reservation, take write locks, and attempt to do btree update(s):
868 */
869static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags,
870 struct btree_insert_entry **stopped_at,
871 unsigned long trace_ip)
872{
873 struct bch_fs *c = trans->c;
874 int ret = 0, u64s_delta = 0;
875
876 for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
877 struct btree_insert_entry *i = trans->updates + idx;
878 if (i->cached)
879 continue;
880
881 u64s_delta += !bkey_deleted(&i->k->k) ? i->k->k.u64s : 0;
882 u64s_delta -= i->old_btree_u64s;
883
884 if (!same_leaf_as_next(trans, i)) {
885 if (u64s_delta <= 0) {
886 ret = bch2_foreground_maybe_merge(trans, i->path,
887 i->level, flags);
888 if (unlikely(ret))
889 return ret;
890 }
891
892 u64s_delta = 0;
893 }
894 }
895
896 ret = bch2_trans_lock_write(trans);
897 if (unlikely(ret))
898 return ret;
899
900 ret = bch2_trans_commit_write_locked(trans, flags, stopped_at, trace_ip);
901
902 if (!ret && unlikely(trans->journal_replay_not_finished))
903 bch2_drop_overwrites_from_journal(trans);
904
905 bch2_trans_unlock_write(trans);
906
907 if (!ret && trans->journal_pin)
908 bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
909 trans->journal_pin,
910 bch2_trans_commit_journal_pin_flush);
911
912 /*
913 * Drop journal reservation after dropping write locks, since dropping
914 * the journal reservation may kick off a journal write:
915 */
916 if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res)))
917 bch2_journal_res_put(&c->journal, &trans->journal_res);
918
919 return ret;
920}
921
922static int journal_reclaim_wait_done(struct bch_fs *c)
923{
924 int ret = bch2_journal_error(&c->journal) ?:
925 bch2_btree_key_cache_wait_done(c);
926
927 if (!ret)
928 journal_reclaim_kick(&c->journal);
929 return ret;
930}
931
932static noinline
933int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
934 struct btree_insert_entry *i,
935 int ret, unsigned long trace_ip)
936{
937 struct bch_fs *c = trans->c;
938 enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
939
940 switch (ret) {
941 case -BCH_ERR_btree_insert_btree_node_full:
942 ret = bch2_btree_split_leaf(trans, i->path, flags);
943 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
944 trace_and_count(c, trans_restart_btree_node_split, trans,
945 trace_ip, trans->paths + i->path);
946 break;
947 case -BCH_ERR_btree_insert_need_mark_replicas:
948 ret = drop_locks_do(trans,
949 bch2_accounting_update_sb(trans));
950 break;
951 case -BCH_ERR_journal_res_get_blocked:
952 /*
953 * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK
954 * flag
955 */
956 if ((flags & BCH_TRANS_COMMIT_journal_reclaim) &&
957 watermark < BCH_WATERMARK_reclaim) {
958 ret = -BCH_ERR_journal_reclaim_would_deadlock;
959 break;
960 }
961
962 ret = drop_locks_do(trans,
963 bch2_trans_journal_res_get(trans,
964 (flags & BCH_WATERMARK_MASK)|
965 JOURNAL_RES_GET_CHECK));
966 break;
967 case -BCH_ERR_btree_insert_need_journal_reclaim:
968 bch2_trans_unlock(trans);
969
970 trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
971 track_event_change(&c->times[BCH_TIME_blocked_key_cache_flush], true);
972
973 wait_event_freezable(c->journal.reclaim_wait,
974 (ret = journal_reclaim_wait_done(c)));
975
976 track_event_change(&c->times[BCH_TIME_blocked_key_cache_flush], false);
977
978 if (ret < 0)
979 break;
980
981 ret = bch2_trans_relock(trans);
982 break;
983 default:
984 BUG_ON(ret >= 0);
985 break;
986 }
987
988 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
989
990 bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOSPC) &&
991 (flags & BCH_TRANS_COMMIT_no_enospc), c,
992 "%s: incorrectly got %s\n", __func__, bch2_err_str(ret));
993
994 return ret;
995}
996
997static noinline int
998bch2_trans_commit_get_rw_cold(struct btree_trans *trans, unsigned flags)
999{
1000 struct bch_fs *c = trans->c;
1001 int ret;
1002
1003 if (likely(!(flags & BCH_TRANS_COMMIT_lazy_rw)) ||
1004 test_bit(BCH_FS_started, &c->flags))
1005 return -BCH_ERR_erofs_trans_commit;
1006
1007 ret = drop_locks_do(trans, bch2_fs_read_write_early(c));
1008 if (ret)
1009 return ret;
1010
1011 bch2_write_ref_get(c, BCH_WRITE_REF_trans);
1012 return 0;
1013}
1014
1015/*
1016 * This is for updates done in the early part of fsck - btree_gc - before we've
1017 * gone RW. we only add the new key to the list of keys for journal replay to
1018 * do.
1019 */
1020static noinline int
1021do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans)
1022{
1023 struct bch_fs *c = trans->c;
1024
1025 trans_for_each_update(trans, i) {
1026 int ret = bch2_journal_key_insert(c, i->btree_id, i->level, i->k);
1027 if (ret)
1028 return ret;
1029 }
1030
1031 for (struct jset_entry *i = trans->journal_entries;
1032 i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
1033 i = vstruct_next(i))
1034 if (i->type == BCH_JSET_ENTRY_btree_keys ||
1035 i->type == BCH_JSET_ENTRY_write_buffer_keys) {
1036 int ret = bch2_journal_key_insert(c, i->btree_id, i->level, i->start);
1037 if (ret)
1038 return ret;
1039 }
1040
1041 return 0;
1042}
1043
1044int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
1045{
1046 struct btree_insert_entry *errored_at = NULL;
1047 struct bch_fs *c = trans->c;
1048 int ret = 0;
1049
1050 bch2_trans_verify_not_unlocked(trans);
1051 bch2_trans_verify_not_in_restart(trans);
1052
1053 if (!trans->nr_updates &&
1054 !trans->journal_entries_u64s)
1055 goto out_reset;
1056
1057 ret = bch2_trans_commit_run_triggers(trans);
1058 if (ret)
1059 goto out_reset;
1060
1061 if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) {
1062 ret = do_bch2_trans_commit_to_journal_replay(trans);
1063 goto out_reset;
1064 }
1065
1066 if (!(flags & BCH_TRANS_COMMIT_no_check_rw) &&
1067 unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_trans))) {
1068 ret = bch2_trans_commit_get_rw_cold(trans, flags);
1069 if (ret)
1070 goto out_reset;
1071 }
1072
1073 EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
1074
1075 trans->journal_u64s = trans->journal_entries_u64s;
1076 trans->journal_transaction_names = READ_ONCE(c->opts.journal_transaction_names);
1077 if (trans->journal_transaction_names)
1078 trans->journal_u64s += jset_u64s(JSET_ENTRY_LOG_U64s);
1079
1080 trans_for_each_update(trans, i) {
1081 struct btree_path *path = trans->paths + i->path;
1082
1083 EBUG_ON(!path->should_be_locked);
1084
1085 ret = bch2_btree_path_upgrade(trans, path, i->level + 1);
1086 if (unlikely(ret))
1087 goto out;
1088
1089 EBUG_ON(!btree_node_intent_locked(path, i->level));
1090
1091 if (i->key_cache_already_flushed)
1092 continue;
1093
1094 if (i->flags & BTREE_UPDATE_nojournal)
1095 continue;
1096
1097 /* we're going to journal the key being updated: */
1098 trans->journal_u64s += jset_u64s(i->k->k.u64s);
1099
1100 /* and we're also going to log the overwrite: */
1101 if (trans->journal_transaction_names)
1102 trans->journal_u64s += jset_u64s(i->old_k.u64s);
1103 }
1104
1105 if (trans->extra_disk_res) {
1106 ret = bch2_disk_reservation_add(c, trans->disk_res,
1107 trans->extra_disk_res,
1108 (flags & BCH_TRANS_COMMIT_no_enospc)
1109 ? BCH_DISK_RESERVATION_NOFAIL : 0);
1110 if (ret)
1111 goto err;
1112 }
1113retry:
1114 errored_at = NULL;
1115 bch2_trans_verify_not_unlocked(trans);
1116 bch2_trans_verify_not_in_restart(trans);
1117 if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res)))
1118 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
1119 memset(&trans->fs_usage_delta, 0, sizeof(trans->fs_usage_delta));
1120
1121 ret = do_bch2_trans_commit(trans, flags, &errored_at, _RET_IP_);
1122
1123 /* make sure we didn't drop or screw up locks: */
1124 bch2_trans_verify_locks(trans);
1125
1126 if (ret)
1127 goto err;
1128
1129 trace_and_count(c, transaction_commit, trans, _RET_IP_);
1130out:
1131 if (likely(!(flags & BCH_TRANS_COMMIT_no_check_rw)))
1132 bch2_write_ref_put(c, BCH_WRITE_REF_trans);
1133out_reset:
1134 if (!ret)
1135 bch2_trans_downgrade(trans);
1136 bch2_trans_reset_updates(trans);
1137
1138 return ret;
1139err:
1140 ret = bch2_trans_commit_error(trans, flags, errored_at, ret, _RET_IP_);
1141 if (ret)
1142 goto out;
1143
1144 /*
1145 * We might have done another transaction commit in the error path -
1146 * i.e. btree write buffer flush - which will have made use of
1147 * trans->journal_res, but with BCH_TRANS_COMMIT_no_journal_res that is
1148 * how the journal sequence number to pin is passed in - so we must
1149 * restart:
1150 */
1151 if (flags & BCH_TRANS_COMMIT_no_journal_res) {
1152 ret = -BCH_ERR_transaction_restart_nested;
1153 goto out;
1154 }
1155
1156 goto retry;
1157}