Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _BCACHEFS_BTREE_ITER_H
  3#define _BCACHEFS_BTREE_ITER_H
  4
  5#include "bset.h"
  6#include "btree_types.h"
  7#include "trace.h"
  8
  9void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
 10void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
 11void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
 12void bch2_dump_trans_updates(struct btree_trans *);
 13void bch2_dump_trans_paths_updates(struct btree_trans *);
 14
 15static inline int __bkey_err(const struct bkey *k)
 16{
 17	return PTR_ERR_OR_ZERO(k);
 18}
 19
 20#define bkey_err(_k)	__bkey_err((_k).k)
 21
 22static inline void __btree_path_get(struct btree_trans *trans, struct btree_path *path, bool intent)
 23{
 24	unsigned idx = path - trans->paths;
 25
 26	EBUG_ON(!test_bit(idx, trans->paths_allocated));
 27	if (unlikely(path->ref == U8_MAX)) {
 28		bch2_dump_trans_paths_updates(trans);
 29		panic("path %u refcount overflow\n", idx);
 30	}
 31
 32	path->ref++;
 33	path->intent_ref += intent;
 34	trace_btree_path_get_ll(trans, path);
 35}
 36
 37static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
 38{
 39	EBUG_ON(!test_bit(path - trans->paths, trans->paths_allocated));
 40	EBUG_ON(!path->ref);
 41	EBUG_ON(!path->intent_ref && intent);
 42
 43	trace_btree_path_put_ll(trans, path);
 44	path->intent_ref -= intent;
 45	return --path->ref == 0;
 46}
 47
 48static inline void btree_path_set_dirty(struct btree_path *path,
 49					enum btree_path_uptodate u)
 50{
 51	path->uptodate = max_t(unsigned, path->uptodate, u);
 52}
 53
 54static inline struct btree *btree_path_node(struct btree_path *path,
 55					    unsigned level)
 56{
 57	return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
 58}
 59
 60static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
 61					const struct btree *b, unsigned level)
 62{
 63	return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
 64}
 65
 66static inline struct btree *btree_node_parent(struct btree_path *path,
 67					      struct btree *b)
 68{
 69	return btree_path_node(path, b->c.level + 1);
 70}
 71
 72/* Iterate over paths within a transaction: */
 73
 74void __bch2_btree_trans_sort_paths(struct btree_trans *);
 75
 76static inline void btree_trans_sort_paths(struct btree_trans *trans)
 77{
 78	if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
 79	    trans->paths_sorted)
 80		return;
 81	__bch2_btree_trans_sort_paths(trans);
 82}
 83
 84static inline unsigned long *trans_paths_nr(struct btree_path *paths)
 85{
 86	return &container_of(paths, struct btree_trans_paths, paths[0])->nr_paths;
 87}
 88
 89static inline unsigned long *trans_paths_allocated(struct btree_path *paths)
 90{
 91	unsigned long *v = trans_paths_nr(paths);
 92	return v - BITS_TO_LONGS(*v);
 93}
 94
 95#define trans_for_each_path_idx_from(_paths_allocated, _nr, _idx, _start)\
 96	for (_idx = _start;						\
 97	     (_idx = find_next_bit(_paths_allocated, _nr, _idx)) < _nr;	\
 98	     _idx++)
 99
100static inline struct btree_path *
101__trans_next_path(struct btree_trans *trans, unsigned *idx)
102{
103	unsigned long *w = trans->paths_allocated + *idx / BITS_PER_LONG;
104	/*
105	 * Open coded find_next_bit(), because
106	 *  - this is fast path, we can't afford the function call
107	 *  - and we know that nr_paths is a multiple of BITS_PER_LONG,
108	 */
109	while (*idx < trans->nr_paths) {
110		unsigned long v = *w >> (*idx & (BITS_PER_LONG - 1));
111		if (v) {
112			*idx += __ffs(v);
113			return trans->paths + *idx;
114		}
115
116		*idx += BITS_PER_LONG;
117		*idx &= ~(BITS_PER_LONG - 1);
118		w++;
119	}
120
121	return NULL;
122}
123
124/*
125 * This version is intended to be safe for use on a btree_trans that is owned by
126 * another thread, for bch2_btree_trans_to_text();
127 */
128#define trans_for_each_path_from(_trans, _path, _idx, _start)		\
129	for (_idx = _start;						\
130	     (_path = __trans_next_path((_trans), &_idx));		\
131	     _idx++)
132
133#define trans_for_each_path(_trans, _path, _idx)			\
134	trans_for_each_path_from(_trans, _path, _idx, 1)
135
136static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
137{
138	unsigned idx = path ? path->sorted_idx + 1 : 0;
139
140	EBUG_ON(idx > trans->nr_sorted);
141
142	return idx < trans->nr_sorted
143		? trans->paths + trans->sorted[idx]
144		: NULL;
145}
146
147static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
148{
149	unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
150
151	return idx
152		? trans->paths + trans->sorted[idx - 1]
153		: NULL;
154}
155
156#define trans_for_each_path_idx_inorder(_trans, _iter)			\
157	for (_iter = (struct trans_for_each_path_inorder_iter) { 0 };	\
158	     (_iter.path_idx = trans->sorted[_iter.sorted_idx],		\
159	      _iter.sorted_idx < (_trans)->nr_sorted);			\
160	     _iter.sorted_idx++)
161
162struct trans_for_each_path_inorder_iter {
163	btree_path_idx_t	sorted_idx;
164	btree_path_idx_t	path_idx;
165};
166
167#define trans_for_each_path_inorder(_trans, _path, _iter)		\
168	for (_iter = (struct trans_for_each_path_inorder_iter) { 0 };	\
169	     (_iter.path_idx = trans->sorted[_iter.sorted_idx],		\
170	      _path = (_trans)->paths + _iter.path_idx,			\
171	      _iter.sorted_idx < (_trans)->nr_sorted);			\
172	     _iter.sorted_idx++)
173
174#define trans_for_each_path_inorder_reverse(_trans, _path, _i)		\
175	for (_i = trans->nr_sorted - 1;					\
176	     ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
177	     --_i)
178
179static inline bool __path_has_node(const struct btree_path *path,
180				   const struct btree *b)
181{
182	return path->l[b->c.level].b == b &&
183		btree_node_lock_seq_matches(path, b, b->c.level);
184}
185
186static inline struct btree_path *
187__trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
188			    unsigned *idx)
189{
190	struct btree_path *path;
191
192	while ((path = __trans_next_path(trans, idx)) &&
193		!__path_has_node(path, b))
194	       (*idx)++;
195
196	return path;
197}
198
199#define trans_for_each_path_with_node(_trans, _b, _path, _iter)		\
200	for (_iter = 1;							\
201	     (_path = __trans_next_path_with_node((_trans), (_b), &_iter));\
202	     _iter++)
203
204btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *, btree_path_idx_t,
205					    bool, unsigned long);
206
207static inline btree_path_idx_t __must_check
208bch2_btree_path_make_mut(struct btree_trans *trans,
209			 btree_path_idx_t path, bool intent,
210			 unsigned long ip)
211{
212	if (trans->paths[path].ref > 1 ||
213	    trans->paths[path].preserve)
214		path = __bch2_btree_path_make_mut(trans, path, intent, ip);
215	trans->paths[path].should_be_locked = false;
216	return path;
217}
218
219btree_path_idx_t __must_check
220__bch2_btree_path_set_pos(struct btree_trans *, btree_path_idx_t,
221			  struct bpos, bool, unsigned long);
222
223static inline btree_path_idx_t __must_check
224bch2_btree_path_set_pos(struct btree_trans *trans,
225			btree_path_idx_t path, struct bpos new_pos,
226			bool intent, unsigned long ip)
227{
228	return !bpos_eq(new_pos, trans->paths[path].pos)
229		? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip)
230		: path;
231}
232
233int __must_check bch2_btree_path_traverse_one(struct btree_trans *,
234					      btree_path_idx_t,
235					      unsigned, unsigned long);
236
237static inline void bch2_trans_verify_not_unlocked(struct btree_trans *);
238
239static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
240					  btree_path_idx_t path, unsigned flags)
241{
242	bch2_trans_verify_not_unlocked(trans);
243
244	if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK)
245		return 0;
246
247	return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
248}
249
250btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
251				 unsigned, unsigned, unsigned, unsigned long);
252btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id,
253					    unsigned, struct bpos);
254
255struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
256
257/*
258 * bch2_btree_path_peek_slot() for a cached iterator might return a key in a
259 * different snapshot:
260 */
261static inline struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
262{
263	struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);
264
265	if (k.k && bpos_eq(path->pos, k.k->p))
266		return k;
267
268	bkey_init(u);
269	u->p = path->pos;
270	return (struct bkey_s_c) { u, NULL };
271}
272
273struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
274					struct btree_iter *, struct bpos);
275
276void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
277
278int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
279
280static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
281{
282	return mutex_trylock(lock)
283		? 0
284		: __bch2_trans_mutex_lock(trans, lock);
285}
286
287#ifdef CONFIG_BCACHEFS_DEBUG
288void bch2_trans_verify_paths(struct btree_trans *);
289void bch2_assert_pos_locked(struct btree_trans *, enum btree_id, struct bpos);
290#else
291static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
292static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
293					  struct bpos pos) {}
294#endif
295
296void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
297				      struct btree *, struct bkey_packed *);
298void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
299			      struct btree *, struct btree_node_iter *,
300			      struct bkey_packed *, unsigned, unsigned);
301
302int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
303
304void bch2_path_put(struct btree_trans *, btree_path_idx_t, bool);
305
306int bch2_trans_relock(struct btree_trans *);
307int bch2_trans_relock_notrace(struct btree_trans *);
308void bch2_trans_unlock(struct btree_trans *);
309void bch2_trans_unlock_long(struct btree_trans *);
310
311static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
312{
313	return restart_count != trans->restart_count
314		? -BCH_ERR_transaction_restart_nested
315		: 0;
316}
317
318void __noreturn bch2_trans_restart_error(struct btree_trans *, u32);
319
320static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
321						   u32 restart_count)
322{
323	if (trans_was_restarted(trans, restart_count))
324		bch2_trans_restart_error(trans, restart_count);
325}
326
327void __noreturn bch2_trans_in_restart_error(struct btree_trans *);
328
329static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
330{
331	if (trans->restarted)
332		bch2_trans_in_restart_error(trans);
333}
334
335void __noreturn bch2_trans_unlocked_error(struct btree_trans *);
336
337static inline void bch2_trans_verify_not_unlocked(struct btree_trans *trans)
338{
339	if (!trans->locked)
340		bch2_trans_unlocked_error(trans);
341}
342
343__always_inline
344static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
345{
346	BUG_ON(err <= 0);
347	BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
348
349	trans->restarted = err;
350	trans->last_restarted_ip = _THIS_IP_;
351	return -err;
352}
353
354__always_inline
355static int btree_trans_restart(struct btree_trans *trans, int err)
356{
357	btree_trans_restart_nounlock(trans, err);
358	return -err;
359}
360
361bool bch2_btree_node_upgrade(struct btree_trans *,
362			     struct btree_path *, unsigned);
363
364void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
365
366static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
367					     struct btree_path *path)
368{
369	unsigned new_locks_want = path->level + !!path->intent_ref;
370
371	if (path->locks_want > new_locks_want)
372		__bch2_btree_path_downgrade(trans, path, new_locks_want);
373}
374
375void bch2_trans_downgrade(struct btree_trans *);
376
377void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct btree *);
378void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
379
380int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
381int __must_check bch2_btree_iter_traverse(struct btree_iter *);
382
383struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
384struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
385struct btree *bch2_btree_iter_next_node(struct btree_iter *);
386
387struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
388struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
389
390static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
391{
392	return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
393}
394
395struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
396struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
397
398struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
399struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
400struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
401
402bool bch2_btree_iter_advance(struct btree_iter *);
403bool bch2_btree_iter_rewind(struct btree_iter *);
404
405static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
406{
407	iter->k.type = KEY_TYPE_deleted;
408	iter->k.p.inode		= iter->pos.inode	= new_pos.inode;
409	iter->k.p.offset	= iter->pos.offset	= new_pos.offset;
410	iter->k.p.snapshot	= iter->pos.snapshot	= new_pos.snapshot;
411	iter->k.size = 0;
412}
413
414static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
415{
416	struct btree_trans *trans = iter->trans;
417
418	if (unlikely(iter->update_path))
419		bch2_path_put(trans, iter->update_path,
420			      iter->flags & BTREE_ITER_intent);
421	iter->update_path = 0;
422
423	if (!(iter->flags & BTREE_ITER_all_snapshots))
424		new_pos.snapshot = iter->snapshot;
425
426	__bch2_btree_iter_set_pos(iter, new_pos);
427}
428
429static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
430{
431	BUG_ON(!(iter->flags & BTREE_ITER_is_extents));
432	iter->pos = bkey_start_pos(&iter->k);
433}
434
435static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
436{
437	struct bpos pos = iter->pos;
438
439	iter->snapshot = snapshot;
440	pos.snapshot = snapshot;
441	bch2_btree_iter_set_pos(iter, pos);
442}
443
444void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
445
446static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
447					       unsigned btree_id,
448					       unsigned flags)
449{
450	if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) &&
451	    btree_id_is_extents(btree_id))
452		flags |= BTREE_ITER_is_extents;
453
454	if (!(flags & BTREE_ITER_snapshot_field) &&
455	    !btree_type_has_snapshot_field(btree_id))
456		flags &= ~BTREE_ITER_all_snapshots;
457
458	if (!(flags & BTREE_ITER_all_snapshots) &&
459	    btree_type_has_snapshots(btree_id))
460		flags |= BTREE_ITER_filter_snapshots;
461
462	if (trans->journal_replay_not_finished)
463		flags |= BTREE_ITER_with_journal;
464
465	return flags;
466}
467
468static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
469					     unsigned btree_id,
470					     unsigned flags)
471{
472	if (!btree_id_cached(trans->c, btree_id)) {
473		flags &= ~BTREE_ITER_cached;
474		flags &= ~BTREE_ITER_with_key_cache;
475	} else if (!(flags & BTREE_ITER_cached))
476		flags |= BTREE_ITER_with_key_cache;
477
478	return __bch2_btree_iter_flags(trans, btree_id, flags);
479}
480
481static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
482					  struct btree_iter *iter,
483					  unsigned btree_id, struct bpos pos,
484					  unsigned locks_want,
485					  unsigned depth,
486					  unsigned flags,
487					  unsigned long ip)
488{
489	iter->trans		= trans;
490	iter->update_path	= 0;
491	iter->key_cache_path	= 0;
492	iter->btree_id		= btree_id;
493	iter->min_depth		= 0;
494	iter->flags		= flags;
495	iter->snapshot		= pos.snapshot;
496	iter->pos		= pos;
497	iter->k			= POS_KEY(pos);
498	iter->journal_idx	= 0;
499#ifdef CONFIG_BCACHEFS_DEBUG
500	iter->ip_allocated = ip;
501#endif
502	iter->path = bch2_path_get(trans, btree_id, iter->pos,
503				   locks_want, depth, flags, ip);
504}
505
506void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
507			  enum btree_id, struct bpos, unsigned);
508
509static inline void bch2_trans_iter_init(struct btree_trans *trans,
510			  struct btree_iter *iter,
511			  unsigned btree_id, struct bpos pos,
512			  unsigned flags)
513{
514	if (__builtin_constant_p(btree_id) &&
515	    __builtin_constant_p(flags))
516		bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
517				bch2_btree_iter_flags(trans, btree_id, flags),
518				_THIS_IP_);
519	else
520		bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
521}
522
523void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
524			       enum btree_id, struct bpos,
525			       unsigned, unsigned, unsigned);
526void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
527
528void bch2_set_btree_iter_dontneed(struct btree_iter *);
529
530void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
531
532/**
533 * bch2_trans_kmalloc - allocate memory for use by the current transaction
534 *
535 * Must be called after bch2_trans_begin, which on second and further calls
536 * frees all memory allocated in this transaction
537 */
538static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
539{
540	size = roundup(size, 8);
541
542	if (likely(trans->mem_top + size <= trans->mem_bytes)) {
543		void *p = trans->mem + trans->mem_top;
544
545		trans->mem_top += size;
546		memset(p, 0, size);
547		return p;
548	} else {
549		return __bch2_trans_kmalloc(trans, size);
550	}
551}
552
553static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
554{
555	size = round_up(size, 8);
556
557	if (likely(trans->mem_top + size <= trans->mem_bytes)) {
558		void *p = trans->mem + trans->mem_top;
559
560		trans->mem_top += size;
561		return p;
562	} else {
563		return __bch2_trans_kmalloc(trans, size);
564	}
565}
566
567static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
568				struct btree_iter *iter,
569				unsigned btree_id, struct bpos pos,
570				unsigned flags, unsigned type)
571{
572	struct bkey_s_c k;
573
574	bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
575	k = bch2_btree_iter_peek_slot(iter);
576
577	if (!bkey_err(k) && type && k.k->type != type)
578		k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
579	if (unlikely(bkey_err(k)))
580		bch2_trans_iter_exit(trans, iter);
581	return k;
582}
583
584static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
585				struct btree_iter *iter,
586				unsigned btree_id, struct bpos pos,
587				unsigned flags)
588{
589	return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
590}
591
592#define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
593	bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter,			\
594				       _btree_id, _pos, _flags, KEY_TYPE_##_type))
595
596#define bkey_val_copy(_dst_v, _src_k)					\
597do {									\
598	unsigned b = min_t(unsigned, sizeof(*_dst_v),			\
599			   bkey_val_bytes(_src_k.k));			\
600	memcpy(_dst_v, _src_k.v, b);					\
601	if (b < sizeof(*_dst_v))					\
602		memset((void *) (_dst_v) + b, 0, sizeof(*_dst_v) - b);	\
603} while (0)
604
605static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
606				unsigned btree_id, struct bpos pos,
607				unsigned flags, unsigned type,
608				unsigned val_size, void *val)
609{
610	struct btree_iter iter;
611	struct bkey_s_c k;
612	int ret;
613
614	k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
615	ret = bkey_err(k);
616	if (!ret) {
617		unsigned b = min_t(unsigned, bkey_val_bytes(k.k), val_size);
618
619		memcpy(val, k.v, b);
620		if (unlikely(b < sizeof(*val)))
621			memset((void *) val + b, 0, sizeof(*val) - b);
622		bch2_trans_iter_exit(trans, &iter);
623	}
624
625	return ret;
626}
627
628#define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
629	__bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags,	\
630				  KEY_TYPE_##_type, sizeof(*_val), _val)
631
632void bch2_trans_srcu_unlock(struct btree_trans *);
633
634u32 bch2_trans_begin(struct btree_trans *);
635
636#define __for_each_btree_node(_trans, _iter, _btree_id, _start,			\
637			      _locks_want, _depth, _flags, _b, _do)		\
638({										\
639	bch2_trans_begin((_trans));						\
640										\
641	struct btree_iter _iter;						\
642	bch2_trans_node_iter_init((_trans), &_iter, (_btree_id),		\
643				  _start, _locks_want, _depth, _flags);		\
644	int _ret3 = 0;								\
645	do {									\
646		_ret3 = lockrestart_do((_trans), ({				\
647			struct btree *_b = bch2_btree_iter_peek_node(&_iter);	\
648			if (!_b)						\
649				break;						\
650										\
651			PTR_ERR_OR_ZERO(_b) ?: (_do);				\
652		})) ?:								\
653		lockrestart_do((_trans),					\
654			PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter)));	\
655	} while (!_ret3);							\
656										\
657	bch2_trans_iter_exit((_trans), &(_iter));				\
658	_ret3;									\
659})
660
661#define for_each_btree_node(_trans, _iter, _btree_id, _start,		\
662			    _flags, _b, _do)				\
663	__for_each_btree_node(_trans, _iter, _btree_id, _start,	\
664			      0, 0, _flags, _b, _do)
665
666static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
667							     unsigned flags)
668{
669	return  flags & BTREE_ITER_slots      ? bch2_btree_iter_peek_slot(iter) :
670						bch2_btree_iter_peek_prev(iter);
671}
672
673static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
674							unsigned flags)
675{
676	return  flags & BTREE_ITER_slots      ? bch2_btree_iter_peek_slot(iter) :
677						bch2_btree_iter_peek(iter);
678}
679
680static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
681							     struct bpos end,
682							     unsigned flags)
683{
684	if (!(flags & BTREE_ITER_slots))
685		return bch2_btree_iter_peek_upto(iter, end);
686
687	if (bkey_gt(iter->pos, end))
688		return bkey_s_c_null;
689
690	return bch2_btree_iter_peek_slot(iter);
691}
692
693int __bch2_btree_trans_too_many_iters(struct btree_trans *);
694
695static inline int btree_trans_too_many_iters(struct btree_trans *trans)
696{
697	if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_NORMAL_LIMIT - 8)
698		return __bch2_btree_trans_too_many_iters(trans);
699
700	return 0;
701}
702
703/*
704 * goto instead of loop, so that when used inside for_each_btree_key2()
705 * break/continue work correctly
706 */
707#define lockrestart_do(_trans, _do)					\
708({									\
709	__label__ transaction_restart;					\
710	u32 _restart_count;						\
711	int _ret2;							\
712transaction_restart:							\
713	_restart_count = bch2_trans_begin(_trans);			\
714	_ret2 = (_do);							\
715									\
716	if (bch2_err_matches(_ret2, BCH_ERR_transaction_restart))	\
717		goto transaction_restart;				\
718									\
719	if (!_ret2)							\
720		bch2_trans_verify_not_restarted(_trans, _restart_count);\
721	_ret2;								\
722})
723
724/*
725 * nested_lockrestart_do(), nested_commit_do():
726 *
727 * These are like lockrestart_do() and commit_do(), with two differences:
728 *
729 *  - We don't call bch2_trans_begin() unless we had a transaction restart
730 *  - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
731 *  transaction restart
732 */
733#define nested_lockrestart_do(_trans, _do)				\
734({									\
735	u32 _restart_count, _orig_restart_count;			\
736	int _ret2;							\
737									\
738	_restart_count = _orig_restart_count = (_trans)->restart_count;	\
739									\
740	while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\
741		_restart_count = bch2_trans_begin(_trans);		\
742									\
743	if (!_ret2)							\
744		bch2_trans_verify_not_restarted(_trans, _restart_count);\
745									\
746	_ret2 ?: trans_was_restarted(_trans, _restart_count);		\
747})
748
749#define for_each_btree_key_upto_continue(_trans, _iter,			\
750					 _end, _flags, _k, _do)		\
751({									\
752	struct bkey_s_c _k;						\
753	int _ret3 = 0;							\
754									\
755	do {								\
756		_ret3 = lockrestart_do(_trans, ({			\
757			(_k) = bch2_btree_iter_peek_upto_type(&(_iter),	\
758						_end, (_flags));	\
759			if (!(_k).k)					\
760				break;					\
761									\
762			bkey_err(_k) ?: (_do);				\
763		}));							\
764	} while (!_ret3 && bch2_btree_iter_advance(&(_iter)));		\
765									\
766	bch2_trans_iter_exit((_trans), &(_iter));			\
767	_ret3;								\
768})
769
770#define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do)	\
771	for_each_btree_key_upto_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do)
772
773#define for_each_btree_key_upto(_trans, _iter, _btree_id,		\
774				_start, _end, _flags, _k, _do)		\
775({									\
776	bch2_trans_begin(trans);					\
777									\
778	struct btree_iter _iter;					\
779	bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
780			     (_start), (_flags));			\
781									\
782	for_each_btree_key_upto_continue(_trans, _iter, _end, _flags, _k, _do);\
783})
784
785#define for_each_btree_key(_trans, _iter, _btree_id,			\
786			   _start, _flags, _k, _do)			\
787	for_each_btree_key_upto(_trans, _iter, _btree_id, _start,	\
788				 SPOS_MAX, _flags, _k, _do)
789
790#define for_each_btree_key_reverse(_trans, _iter, _btree_id,		\
791				   _start, _flags, _k, _do)		\
792({									\
793	struct btree_iter _iter;					\
794	struct bkey_s_c _k;						\
795	int _ret3 = 0;							\
796									\
797	bch2_trans_iter_init((_trans), &(_iter), (_btree_id),		\
798			     (_start), (_flags));			\
799									\
800	do {								\
801		_ret3 = lockrestart_do(_trans, ({			\
802			(_k) = bch2_btree_iter_peek_prev_type(&(_iter),	\
803							(_flags));	\
804			if (!(_k).k)					\
805				break;					\
806									\
807			bkey_err(_k) ?: (_do);				\
808		}));							\
809	} while (!_ret3 && bch2_btree_iter_rewind(&(_iter)));		\
810									\
811	bch2_trans_iter_exit((_trans), &(_iter));			\
812	_ret3;								\
813})
814
815#define for_each_btree_key_commit(_trans, _iter, _btree_id,		\
816				  _start, _iter_flags, _k,		\
817				  _disk_res, _journal_seq, _commit_flags,\
818				  _do)					\
819	for_each_btree_key(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
820			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
821					(_journal_seq), (_commit_flags)))
822
823#define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id,	\
824				  _start, _iter_flags, _k,		\
825				  _disk_res, _journal_seq, _commit_flags,\
826				  _do)					\
827	for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
828			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
829					(_journal_seq), (_commit_flags)))
830
831#define for_each_btree_key_upto_commit(_trans, _iter, _btree_id,	\
832				  _start, _end, _iter_flags, _k,	\
833				  _disk_res, _journal_seq, _commit_flags,\
834				  _do)					\
835	for_each_btree_key_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
836			    (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
837					(_journal_seq), (_commit_flags)))
838
839struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
840
841#define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id,	\
842			   _start, _end, _flags, _k, _ret)		\
843	for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id),	\
844				  (_start), (_flags));			\
845	     (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
846	     !((_ret) = bkey_err(_k)) && (_k).k;			\
847	     bch2_btree_iter_advance(&(_iter)))
848
849#define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
850	for (;									\
851	     (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),	\
852	     !((_ret) = bkey_err(_k)) && (_k).k;				\
853	     bch2_btree_iter_advance(&(_iter)))
854
855#define for_each_btree_key_norestart(_trans, _iter, _btree_id,		\
856			   _start, _flags, _k, _ret)			\
857	for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, _start,\
858					  SPOS_MAX, _flags, _k, _ret)
859
860#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id,	\
861					     _start, _flags, _k, _ret)	\
862	for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id),	\
863				  (_start), (_flags));			\
864	     (_k) = bch2_btree_iter_peek_prev_type(&(_iter), _flags),	\
865	     !((_ret) = bkey_err(_k)) && (_k).k;			\
866	     bch2_btree_iter_rewind(&(_iter)))
867
868#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret)	\
869	for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
870
871/*
872 * This should not be used in a fastpath, without first trying _do in
873 * nonblocking mode - it will cause excessive transaction restarts and
874 * potentially livelocking:
875 */
876#define drop_locks_do(_trans, _do)					\
877({									\
878	bch2_trans_unlock(_trans);					\
879	(_do) ?: bch2_trans_relock(_trans);				\
880})
881
882#define allocate_dropping_locks_errcode(_trans, _do)			\
883({									\
884	gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;				\
885	int _ret = _do;							\
886									\
887	if (bch2_err_matches(_ret, ENOMEM)) {				\
888		_gfp = GFP_KERNEL;					\
889		_ret = drop_locks_do(_trans, _do);			\
890	}								\
891	_ret;								\
892})
893
894#define allocate_dropping_locks(_trans, _ret, _do)			\
895({									\
896	gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;				\
897	typeof(_do) _p = _do;						\
898									\
899	_ret = 0;							\
900	if (unlikely(!_p)) {						\
901		_gfp = GFP_KERNEL;					\
902		_ret = drop_locks_do(_trans, ((_p = _do), 0));		\
903	}								\
904	_p;								\
905})
906
907#define bch2_trans_run(_c, _do)						\
908({									\
909	struct btree_trans *trans = bch2_trans_get(_c);			\
910	int _ret = (_do);						\
911	bch2_trans_put(trans);						\
912	_ret;								\
913})
914
915#define bch2_trans_do(_c, _do)	bch2_trans_run(_c, lockrestart_do(trans, _do))
916
917struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
918void bch2_trans_put(struct btree_trans *);
919
920bool bch2_current_has_btree_trans(struct bch_fs *);
921
922extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
923unsigned bch2_trans_get_fn_idx(const char *);
924
925#define bch2_trans_get(_c)						\
926({									\
927	static unsigned trans_fn_idx;					\
928									\
929	if (unlikely(!trans_fn_idx))					\
930		trans_fn_idx = bch2_trans_get_fn_idx(__func__);		\
931	__bch2_trans_get(_c, trans_fn_idx);				\
932})
933
934void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
935
936void bch2_fs_btree_iter_exit(struct bch_fs *);
937void bch2_fs_btree_iter_init_early(struct bch_fs *);
938int bch2_fs_btree_iter_init(struct bch_fs *);
939
940#endif /* _BCACHEFS_BTREE_ITER_H */