Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include "bcachefs.h"
  4#include "btree_update.h"
  5#include "btree_iter.h"
  6#include "btree_journal_iter.h"
  7#include "btree_locking.h"
  8#include "buckets.h"
  9#include "debug.h"
 10#include "errcode.h"
 11#include "error.h"
 12#include "extents.h"
 13#include "keylist.h"
 14#include "snapshot.h"
 15#include "trace.h"
 16
 17static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
 18					 const struct btree_insert_entry *r)
 19{
 20	return   cmp_int(l->btree_id,	r->btree_id) ?:
 21		 cmp_int(l->cached,	r->cached) ?:
 22		 -cmp_int(l->level,	r->level) ?:
 23		 bpos_cmp(l->k->k.p,	r->k->k.p);
 24}
 25
 26static int __must_check
 27bch2_trans_update_by_path(struct btree_trans *, btree_path_idx_t,
 28			  struct bkey_i *, enum btree_update_flags,
 29			  unsigned long ip);
 30
 31static noinline int extent_front_merge(struct btree_trans *trans,
 32				       struct btree_iter *iter,
 33				       struct bkey_s_c k,
 34				       struct bkey_i **insert,
 35				       enum btree_update_flags flags)
 36{
 37	struct bch_fs *c = trans->c;
 38	struct bkey_i *update;
 39	int ret;
 40
 41	update = bch2_bkey_make_mut_noupdate(trans, k);
 42	ret = PTR_ERR_OR_ZERO(update);
 43	if (ret)
 44		return ret;
 45
 46	if (!bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(*insert)))
 47		return 0;
 48
 49	ret =   bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p) ?:
 50		bch2_key_has_snapshot_overwrites(trans, iter->btree_id, (*insert)->k.p);
 51	if (ret < 0)
 52		return ret;
 53	if (ret)
 54		return 0;
 55
 56	ret = bch2_btree_delete_at(trans, iter, flags);
 57	if (ret)
 58		return ret;
 59
 60	*insert = update;
 61	return 0;
 62}
 63
 64static noinline int extent_back_merge(struct btree_trans *trans,
 65				      struct btree_iter *iter,
 66				      struct bkey_i *insert,
 67				      struct bkey_s_c k)
 68{
 69	struct bch_fs *c = trans->c;
 70	int ret;
 71
 72	ret =   bch2_key_has_snapshot_overwrites(trans, iter->btree_id, insert->k.p) ?:
 73		bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p);
 74	if (ret < 0)
 75		return ret;
 76	if (ret)
 77		return 0;
 78
 79	bch2_bkey_merge(c, bkey_i_to_s(insert), k);
 80	return 0;
 81}
 82
 83/*
 84 * When deleting, check if we need to emit a whiteout (because we're overwriting
 85 * something in an ancestor snapshot)
 86 */
 87static int need_whiteout_for_snapshot(struct btree_trans *trans,
 88				      enum btree_id btree_id, struct bpos pos)
 89{
 90	struct btree_iter iter;
 91	struct bkey_s_c k;
 92	u32 snapshot = pos.snapshot;
 93	int ret;
 94
 95	if (!bch2_snapshot_parent(trans->c, pos.snapshot))
 96		return 0;
 97
 98	pos.snapshot++;
 99
100	for_each_btree_key_norestart(trans, iter, btree_id, pos,
101			   BTREE_ITER_ALL_SNAPSHOTS|
102			   BTREE_ITER_NOPRESERVE, k, ret) {
103		if (!bkey_eq(k.k->p, pos))
104			break;
105
106		if (bch2_snapshot_is_ancestor(trans->c, snapshot,
107					      k.k->p.snapshot)) {
108			ret = !bkey_whiteout(k.k);
109			break;
110		}
111	}
112	bch2_trans_iter_exit(trans, &iter);
113
114	return ret;
115}
116
117int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
118				   enum btree_id id,
119				   struct bpos old_pos,
120				   struct bpos new_pos)
121{
122	struct bch_fs *c = trans->c;
123	struct btree_iter old_iter, new_iter = { NULL };
124	struct bkey_s_c old_k, new_k;
125	snapshot_id_list s;
126	struct bkey_i *update;
127	int ret = 0;
128
129	if (!bch2_snapshot_has_children(c, old_pos.snapshot))
130		return 0;
131
132	darray_init(&s);
133
134	bch2_trans_iter_init(trans, &old_iter, id, old_pos,
135			     BTREE_ITER_NOT_EXTENTS|
136			     BTREE_ITER_ALL_SNAPSHOTS);
137	while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
138	       !(ret = bkey_err(old_k)) &&
139	       bkey_eq(old_pos, old_k.k->p)) {
140		struct bpos whiteout_pos =
141			SPOS(new_pos.inode, new_pos.offset, old_k.k->p.snapshot);;
142
143		if (!bch2_snapshot_is_ancestor(c, old_k.k->p.snapshot, old_pos.snapshot) ||
144		    snapshot_list_has_ancestor(c, &s, old_k.k->p.snapshot))
145			continue;
146
147		new_k = bch2_bkey_get_iter(trans, &new_iter, id, whiteout_pos,
148					   BTREE_ITER_NOT_EXTENTS|
149					   BTREE_ITER_INTENT);
150		ret = bkey_err(new_k);
151		if (ret)
152			break;
153
154		if (new_k.k->type == KEY_TYPE_deleted) {
155			update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
156			ret = PTR_ERR_OR_ZERO(update);
157			if (ret)
158				break;
159
160			bkey_init(&update->k);
161			update->k.p		= whiteout_pos;
162			update->k.type		= KEY_TYPE_whiteout;
163
164			ret = bch2_trans_update(trans, &new_iter, update,
165						BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
166		}
167		bch2_trans_iter_exit(trans, &new_iter);
168
169		ret = snapshot_list_add(c, &s, old_k.k->p.snapshot);
170		if (ret)
171			break;
172	}
173	bch2_trans_iter_exit(trans, &new_iter);
174	bch2_trans_iter_exit(trans, &old_iter);
175	darray_exit(&s);
176
177	return ret;
178}
179
180int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
181				       struct btree_iter *iter,
182				       enum btree_update_flags flags,
183				       struct bkey_s_c old,
184				       struct bkey_s_c new)
185{
186	enum btree_id btree_id = iter->btree_id;
187	struct bkey_i *update;
188	struct bpos new_start = bkey_start_pos(new.k);
189	unsigned front_split = bkey_lt(bkey_start_pos(old.k), new_start);
190	unsigned back_split  = bkey_gt(old.k->p, new.k->p);
191	unsigned middle_split = (front_split || back_split) &&
192		old.k->p.snapshot != new.k->p.snapshot;
193	unsigned nr_splits = front_split + back_split + middle_split;
194	int ret = 0, compressed_sectors;
195
196	/*
197	 * If we're going to be splitting a compressed extent, note it
198	 * so that __bch2_trans_commit() can increase our disk
199	 * reservation:
200	 */
201	if (nr_splits > 1 &&
202	    (compressed_sectors = bch2_bkey_sectors_compressed(old)))
203		trans->extra_disk_res += compressed_sectors * (nr_splits - 1);
204
205	if (front_split) {
206		update = bch2_bkey_make_mut_noupdate(trans, old);
207		if ((ret = PTR_ERR_OR_ZERO(update)))
208			return ret;
209
210		bch2_cut_back(new_start, update);
211
212		ret =   bch2_insert_snapshot_whiteouts(trans, btree_id,
213					old.k->p, update->k.p) ?:
214			bch2_btree_insert_nonextent(trans, btree_id, update,
215					BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
216		if (ret)
217			return ret;
218	}
219
220	/* If we're overwriting in a different snapshot - middle split: */
221	if (middle_split) {
222		update = bch2_bkey_make_mut_noupdate(trans, old);
223		if ((ret = PTR_ERR_OR_ZERO(update)))
224			return ret;
225
226		bch2_cut_front(new_start, update);
227		bch2_cut_back(new.k->p, update);
228
229		ret =   bch2_insert_snapshot_whiteouts(trans, btree_id,
230					old.k->p, update->k.p) ?:
231			bch2_btree_insert_nonextent(trans, btree_id, update,
232					  BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
233		if (ret)
234			return ret;
235	}
236
237	if (bkey_le(old.k->p, new.k->p)) {
238		update = bch2_trans_kmalloc(trans, sizeof(*update));
239		if ((ret = PTR_ERR_OR_ZERO(update)))
240			return ret;
241
242		bkey_init(&update->k);
243		update->k.p = old.k->p;
244		update->k.p.snapshot = new.k->p.snapshot;
245
246		if (new.k->p.snapshot != old.k->p.snapshot) {
247			update->k.type = KEY_TYPE_whiteout;
248		} else if (btree_type_has_snapshots(btree_id)) {
249			ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p);
250			if (ret < 0)
251				return ret;
252			if (ret)
253				update->k.type = KEY_TYPE_whiteout;
254		}
255
256		ret = bch2_btree_insert_nonextent(trans, btree_id, update,
257					  BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
258		if (ret)
259			return ret;
260	}
261
262	if (back_split) {
263		update = bch2_bkey_make_mut_noupdate(trans, old);
264		if ((ret = PTR_ERR_OR_ZERO(update)))
265			return ret;
266
267		bch2_cut_front(new.k->p, update);
268
269		ret = bch2_trans_update_by_path(trans, iter->path, update,
270					  BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
271					  flags, _RET_IP_);
272		if (ret)
273			return ret;
274	}
275
276	return 0;
277}
278
279static int bch2_trans_update_extent(struct btree_trans *trans,
280				    struct btree_iter *orig_iter,
281				    struct bkey_i *insert,
282				    enum btree_update_flags flags)
283{
284	struct btree_iter iter;
285	struct bkey_s_c k;
286	enum btree_id btree_id = orig_iter->btree_id;
287	int ret = 0;
288
289	bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
290			     BTREE_ITER_INTENT|
291			     BTREE_ITER_WITH_UPDATES|
292			     BTREE_ITER_NOT_EXTENTS);
293	k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
294	if ((ret = bkey_err(k)))
295		goto err;
296	if (!k.k)
297		goto out;
298
299	if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
300		if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
301			ret = extent_front_merge(trans, &iter, k, &insert, flags);
302			if (ret)
303				goto err;
304		}
305
306		goto next;
307	}
308
309	while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
310		bool done = bkey_lt(insert->k.p, k.k->p);
311
312		ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert));
313		if (ret)
314			goto err;
315
316		if (done)
317			goto out;
318next:
319		bch2_btree_iter_advance(&iter);
320		k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
321		if ((ret = bkey_err(k)))
322			goto err;
323		if (!k.k)
324			goto out;
325	}
326
327	if (bch2_bkey_maybe_mergable(&insert->k, k.k)) {
328		ret = extent_back_merge(trans, &iter, insert, k);
329		if (ret)
330			goto err;
331	}
332out:
333	if (!bkey_deleted(&insert->k))
334		ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags);
335err:
336	bch2_trans_iter_exit(trans, &iter);
337
338	return ret;
339}
340
341static noinline int flush_new_cached_update(struct btree_trans *trans,
342					    struct btree_insert_entry *i,
343					    enum btree_update_flags flags,
344					    unsigned long ip)
345{
346	struct bkey k;
347	int ret;
348
349	btree_path_idx_t path_idx =
350		bch2_path_get(trans, i->btree_id, i->old_k.p, 1, 0,
351			      BTREE_ITER_INTENT, _THIS_IP_);
352	ret = bch2_btree_path_traverse(trans, path_idx, 0);
353	if (ret)
354		goto out;
355
356	struct btree_path *btree_path = trans->paths + path_idx;
357
358	/*
359	 * The old key in the insert entry might actually refer to an existing
360	 * key in the btree that has been deleted from cache and not yet
361	 * flushed. Check for this and skip the flush so we don't run triggers
362	 * against a stale key.
363	 */
364	bch2_btree_path_peek_slot_exact(btree_path, &k);
365	if (!bkey_deleted(&k))
366		goto out;
367
368	i->key_cache_already_flushed = true;
369	i->flags |= BTREE_TRIGGER_NORUN;
370
371	btree_path_set_should_be_locked(btree_path);
372	ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
373out:
374	bch2_path_put(trans, path_idx, true);
375	return ret;
376}
377
378static int __must_check
379bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
380			  struct bkey_i *k, enum btree_update_flags flags,
381			  unsigned long ip)
382{
383	struct bch_fs *c = trans->c;
384	struct btree_insert_entry *i, n;
385	int cmp;
386
387	struct btree_path *path = trans->paths + path_idx;
388	EBUG_ON(!path->should_be_locked);
389	EBUG_ON(trans->nr_updates >= trans->nr_paths);
390	EBUG_ON(!bpos_eq(k->k.p, path->pos));
391
392	n = (struct btree_insert_entry) {
393		.flags		= flags,
394		.bkey_type	= __btree_node_type(path->level, path->btree_id),
395		.btree_id	= path->btree_id,
396		.level		= path->level,
397		.cached		= path->cached,
398		.path		= path_idx,
399		.k		= k,
400		.ip_allocated	= ip,
401	};
402
403#ifdef CONFIG_BCACHEFS_DEBUG
404	trans_for_each_update(trans, i)
405		BUG_ON(i != trans->updates &&
406		       btree_insert_entry_cmp(i - 1, i) >= 0);
407#endif
408
409	/*
410	 * Pending updates are kept sorted: first, find position of new update,
411	 * then delete/trim any updates the new update overwrites:
412	 */
413	for (i = trans->updates; i < trans->updates + trans->nr_updates; i++) {
414		cmp = btree_insert_entry_cmp(&n, i);
415		if (cmp <= 0)
416			break;
417	}
418
419	if (!cmp && i < trans->updates + trans->nr_updates) {
420		EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
421
422		bch2_path_put(trans, i->path, true);
423		i->flags	= n.flags;
424		i->cached	= n.cached;
425		i->k		= n.k;
426		i->path		= n.path;
427		i->ip_allocated	= n.ip_allocated;
428	} else {
429		array_insert_item(trans->updates, trans->nr_updates,
430				  i - trans->updates, n);
431
432		i->old_v = bch2_btree_path_peek_slot_exact(path, &i->old_k).v;
433		i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0;
434
435		if (unlikely(trans->journal_replay_not_finished)) {
436			struct bkey_i *j_k =
437				bch2_journal_keys_peek_slot(c, n.btree_id, n.level, k->k.p);
438
439			if (j_k) {
440				i->old_k = j_k->k;
441				i->old_v = &j_k->v;
442			}
443		}
444	}
445
446	__btree_path_get(trans->paths + i->path, true);
447
448	/*
449	 * If a key is present in the key cache, it must also exist in the
450	 * btree - this is necessary for cache coherency. When iterating over
451	 * a btree that's cached in the key cache, the btree iter code checks
452	 * the key cache - but the key has to exist in the btree for that to
453	 * work:
454	 */
455	if (path->cached && bkey_deleted(&i->old_k))
456		return flush_new_cached_update(trans, i, flags, ip);
457
458	return 0;
459}
460
461static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
462						    struct btree_iter *iter,
463						    struct btree_path *path)
464{
465	struct btree_path *key_cache_path = btree_iter_key_cache_path(trans, iter);
466
467	if (!key_cache_path ||
468	    !key_cache_path->should_be_locked ||
469	    !bpos_eq(key_cache_path->pos, iter->pos)) {
470		struct bkey_cached *ck;
471		int ret;
472
473		if (!iter->key_cache_path)
474			iter->key_cache_path =
475				bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
476					      BTREE_ITER_INTENT|
477					      BTREE_ITER_CACHED, _THIS_IP_);
478
479		iter->key_cache_path =
480			bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
481						iter->flags & BTREE_ITER_INTENT,
482						_THIS_IP_);
483
484		ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_CACHED);
485		if (unlikely(ret))
486			return ret;
487
488		ck = (void *) trans->paths[iter->key_cache_path].l[0].b;
489
490		if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
491			trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
492			return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
493		}
494
495		btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
496	}
497
498	return 0;
499}
500
501int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
502				   struct bkey_i *k, enum btree_update_flags flags)
503{
504	btree_path_idx_t path_idx = iter->update_path ?: iter->path;
505	int ret;
506
507	if (iter->flags & BTREE_ITER_IS_EXTENTS)
508		return bch2_trans_update_extent(trans, iter, k, flags);
509
510	if (bkey_deleted(&k->k) &&
511	    !(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
512	    (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) {
513		ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
514		if (unlikely(ret < 0))
515			return ret;
516
517		if (ret)
518			k->k.type = KEY_TYPE_whiteout;
519	}
520
521	/*
522	 * Ensure that updates to cached btrees go to the key cache:
523	 */
524	struct btree_path *path = trans->paths + path_idx;
525	if (!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
526	    !path->cached &&
527	    !path->level &&
528	    btree_id_cached(trans->c, path->btree_id)) {
529		ret = bch2_trans_update_get_key_cache(trans, iter, path);
530		if (ret)
531			return ret;
532
533		path_idx = iter->key_cache_path;
534	}
535
536	return bch2_trans_update_by_path(trans, path_idx, k, flags, _RET_IP_);
537}
538
539int bch2_btree_insert_clone_trans(struct btree_trans *trans,
540				  enum btree_id btree,
541				  struct bkey_i *k)
542{
543	struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(&k->k));
544	int ret = PTR_ERR_OR_ZERO(n);
545	if (ret)
546		return ret;
547
548	bkey_copy(n, k);
549	return bch2_btree_insert_trans(trans, btree, n, 0);
550}
551
552struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s)
553{
554	unsigned new_top = trans->journal_entries_u64s + u64s;
555	unsigned old_size = trans->journal_entries_size;
556
557	if (new_top > trans->journal_entries_size) {
558		trans->journal_entries_size = roundup_pow_of_two(new_top);
559
560		btree_trans_stats(trans)->journal_entries_size = trans->journal_entries_size;
561	}
562
563	struct jset_entry *n =
564		bch2_trans_kmalloc_nomemzero(trans,
565				trans->journal_entries_size * sizeof(u64));
566	if (IS_ERR(n))
567		return ERR_CAST(n);
568
569	if (trans->journal_entries)
570		memcpy(n, trans->journal_entries, old_size * sizeof(u64));
571	trans->journal_entries = n;
572
573	struct jset_entry *e = btree_trans_journal_entries_top(trans);
574	trans->journal_entries_u64s = new_top;
575	return e;
576}
577
578int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
579			     enum btree_id btree, struct bpos end)
580{
581	struct bkey_s_c k;
582	int ret = 0;
583
584	bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_INTENT);
585	k = bch2_btree_iter_prev(iter);
586	ret = bkey_err(k);
587	if (ret)
588		goto err;
589
590	bch2_btree_iter_advance(iter);
591	k = bch2_btree_iter_peek_slot(iter);
592	ret = bkey_err(k);
593	if (ret)
594		goto err;
595
596	BUG_ON(k.k->type != KEY_TYPE_deleted);
597
598	if (bkey_gt(k.k->p, end)) {
599		ret = -BCH_ERR_ENOSPC_btree_slot;
600		goto err;
601	}
602
603	return 0;
604err:
605	bch2_trans_iter_exit(trans, iter);
606	return ret;
607}
608
609void bch2_trans_commit_hook(struct btree_trans *trans,
610			    struct btree_trans_commit_hook *h)
611{
612	h->next = trans->hooks;
613	trans->hooks = h;
614}
615
616int bch2_btree_insert_nonextent(struct btree_trans *trans,
617				enum btree_id btree, struct bkey_i *k,
618				enum btree_update_flags flags)
619{
620	struct btree_iter iter;
621	int ret;
622
623	bch2_trans_iter_init(trans, &iter, btree, k->k.p,
624			     BTREE_ITER_CACHED|
625			     BTREE_ITER_NOT_EXTENTS|
626			     BTREE_ITER_INTENT);
627	ret   = bch2_btree_iter_traverse(&iter) ?:
628		bch2_trans_update(trans, &iter, k, flags);
629	bch2_trans_iter_exit(trans, &iter);
630	return ret;
631}
632
633int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
634			    struct bkey_i *k, enum btree_update_flags flags)
635{
636	struct btree_iter iter;
637	int ret;
638
639	bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
640			     BTREE_ITER_CACHED|
641			     BTREE_ITER_INTENT);
642	ret   = bch2_btree_iter_traverse(&iter) ?:
643		bch2_trans_update(trans, &iter, k, flags);
644	bch2_trans_iter_exit(trans, &iter);
645	return ret;
646}
647
648/**
649 * bch2_btree_insert - insert keys into the extent btree
650 * @c:			pointer to struct bch_fs
651 * @id:			btree to insert into
652 * @k:			key to insert
653 * @disk_res:		must be non-NULL whenever inserting or potentially
654 *			splitting data extents
655 * @flags:		transaction commit flags
656 *
657 * Returns:		0 on success, error code on failure
658 */
659int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k,
660		      struct disk_reservation *disk_res, int flags)
661{
662	return bch2_trans_do(c, disk_res, NULL, flags,
663			     bch2_btree_insert_trans(trans, id, k, 0));
664}
665
666int bch2_btree_delete_extent_at(struct btree_trans *trans, struct btree_iter *iter,
667				unsigned len, unsigned update_flags)
668{
669	struct bkey_i *k;
670
671	k = bch2_trans_kmalloc(trans, sizeof(*k));
672	if (IS_ERR(k))
673		return PTR_ERR(k);
674
675	bkey_init(&k->k);
676	k->k.p = iter->pos;
677	bch2_key_resize(&k->k, len);
678	return bch2_trans_update(trans, iter, k, update_flags);
679}
680
681int bch2_btree_delete_at(struct btree_trans *trans,
682			 struct btree_iter *iter, unsigned update_flags)
683{
684	return bch2_btree_delete_extent_at(trans, iter, 0, update_flags);
685}
686
687int bch2_btree_delete(struct btree_trans *trans,
688		      enum btree_id btree, struct bpos pos,
689		      unsigned update_flags)
690{
691	struct btree_iter iter;
692	int ret;
693
694	bch2_trans_iter_init(trans, &iter, btree, pos,
695			     BTREE_ITER_CACHED|
696			     BTREE_ITER_INTENT);
697	ret   = bch2_btree_iter_traverse(&iter) ?:
698		bch2_btree_delete_at(trans, &iter, update_flags);
699	bch2_trans_iter_exit(trans, &iter);
700
701	return ret;
702}
703
704int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
705				  struct bpos start, struct bpos end,
706				  unsigned update_flags,
707				  u64 *journal_seq)
708{
709	u32 restart_count = trans->restart_count;
710	struct btree_iter iter;
711	struct bkey_s_c k;
712	int ret = 0;
713
714	bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_INTENT);
715	while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) {
716		struct disk_reservation disk_res =
717			bch2_disk_reservation_init(trans->c, 0);
718		struct bkey_i delete;
719
720		ret = bkey_err(k);
721		if (ret)
722			goto err;
723
724		bkey_init(&delete.k);
725
726		/*
727		 * This could probably be more efficient for extents:
728		 */
729
730		/*
731		 * For extents, iter.pos won't necessarily be the same as
732		 * bkey_start_pos(k.k) (for non extents they always will be the
733		 * same). It's important that we delete starting from iter.pos
734		 * because the range we want to delete could start in the middle
735		 * of k.
736		 *
737		 * (bch2_btree_iter_peek() does guarantee that iter.pos >=
738		 * bkey_start_pos(k.k)).
739		 */
740		delete.k.p = iter.pos;
741
742		if (iter.flags & BTREE_ITER_IS_EXTENTS)
743			bch2_key_resize(&delete.k,
744					bpos_min(end, k.k->p).offset -
745					iter.pos.offset);
746
747		ret   = bch2_trans_update(trans, &iter, &delete, update_flags) ?:
748			bch2_trans_commit(trans, &disk_res, journal_seq,
749					  BCH_TRANS_COMMIT_no_enospc);
750		bch2_disk_reservation_put(trans->c, &disk_res);
751err:
752		/*
753		 * the bch2_trans_begin() call is in a weird place because we
754		 * need to call it after every transaction commit, to avoid path
755		 * overflow, but don't want to call it if the delete operation
756		 * is a no-op and we have no work to do:
757		 */
758		bch2_trans_begin(trans);
759
760		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
761			ret = 0;
762		if (ret)
763			break;
764	}
765	bch2_trans_iter_exit(trans, &iter);
766
767	return ret ?: trans_was_restarted(trans, restart_count);
768}
769
770/*
771 * bch_btree_delete_range - delete everything within a given range
772 *
773 * Range is a half open interval - [start, end)
774 */
775int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
776			    struct bpos start, struct bpos end,
777			    unsigned update_flags,
778			    u64 *journal_seq)
779{
780	int ret = bch2_trans_run(c,
781			bch2_btree_delete_range_trans(trans, id, start, end,
782						      update_flags, journal_seq));
783	if (ret == -BCH_ERR_transaction_restart_nested)
784		ret = 0;
785	return ret;
786}
787
788int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
789		       struct bpos pos, bool set)
790{
791	struct bkey_i k;
792
793	bkey_init(&k.k);
794	k.k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
795	k.k.p = pos;
796
797	return bch2_trans_update_buffered(trans, btree, &k);
798}
799
800static int __bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf, unsigned u64s)
801{
802	struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(u64s));
803	int ret = PTR_ERR_OR_ZERO(e);
804	if (ret)
805		return ret;
806
807	struct jset_entry_log *l = container_of(e, struct jset_entry_log, entry);
808	journal_entry_init(e, BCH_JSET_ENTRY_log, 0, 1, u64s);
809	memcpy(l->d, buf->buf, buf->pos);
810	return 0;
811}
812
813__printf(3, 0)
814static int
815__bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
816		  va_list args)
817{
818	struct printbuf buf = PRINTBUF;
819	prt_vprintf(&buf, fmt, args);
820
821	unsigned u64s = DIV_ROUND_UP(buf.pos, sizeof(u64));
822	prt_chars(&buf, '\0', u64s * sizeof(u64) - buf.pos);
823
824	int ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
825	if (ret)
826		goto err;
827
828	if (!test_bit(JOURNAL_STARTED, &c->journal.flags)) {
829		ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s));
830		if (ret)
831			goto err;
832
833		struct jset_entry_log *l = (void *) &darray_top(c->journal.early_journal_entries);
834		journal_entry_init(&l->entry, BCH_JSET_ENTRY_log, 0, 1, u64s);
835		memcpy(l->d, buf.buf, buf.pos);
836		c->journal.early_journal_entries.nr += jset_u64s(u64s);
837	} else {
838		ret = bch2_trans_do(c, NULL, NULL,
839			BCH_TRANS_COMMIT_lazy_rw|commit_flags,
840			__bch2_trans_log_msg(trans, &buf, u64s));
841	}
842err:
843	printbuf_exit(&buf);
844	return ret;
845}
846
847__printf(2, 3)
848int bch2_fs_log_msg(struct bch_fs *c, const char *fmt, ...)
849{
850	va_list args;
851	int ret;
852
853	va_start(args, fmt);
854	ret = __bch2_fs_log_msg(c, 0, fmt, args);
855	va_end(args);
856	return ret;
857}
858
859/*
860 * Use for logging messages during recovery to enable reserved space and avoid
861 * blocking.
862 */
863__printf(2, 3)
864int bch2_journal_log_msg(struct bch_fs *c, const char *fmt, ...)
865{
866	va_list args;
867	int ret;
868
869	va_start(args, fmt);
870	ret = __bch2_fs_log_msg(c, BCH_WATERMARK_reclaim, fmt, args);
871	va_end(args);
872	return ret;
873}