Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _BCACHEFS_BTREE_TYPES_H
  3#define _BCACHEFS_BTREE_TYPES_H
  4
  5#include <linux/list.h>
  6#include <linux/rhashtable.h>
  7
  8#include "btree_key_cache_types.h"
  9#include "buckets_types.h"
 10#include "darray.h"
 11#include "errcode.h"
 12#include "journal_types.h"
 13#include "replicas_types.h"
 14#include "six.h"
 15
 16struct open_bucket;
 17struct btree_update;
 18struct btree_trans;
 19
 20#define MAX_BSETS		3U
 21
 22struct btree_nr_keys {
 23
 24	/*
 25	 * Amount of live metadata (i.e. size of node after a compaction) in
 26	 * units of u64s
 27	 */
 28	u16			live_u64s;
 29	u16			bset_u64s[MAX_BSETS];
 30
 31	/* live keys only: */
 32	u16			packed_keys;
 33	u16			unpacked_keys;
 34};
 35
 36struct bset_tree {
 37	/*
 38	 * We construct a binary tree in an array as if the array
 39	 * started at 1, so that things line up on the same cachelines
 40	 * better: see comments in bset.c at cacheline_to_bkey() for
 41	 * details
 42	 */
 43
 44	/* size of the binary tree and prev array */
 45	u16			size;
 46
 47	/* function of size - precalculated for to_inorder() */
 48	u16			extra;
 49
 50	u16			data_offset;
 51	u16			aux_data_offset;
 52	u16			end_offset;
 53};
 54
 55struct btree_write {
 56	struct journal_entry_pin	journal;
 57};
 58
 59struct btree_alloc {
 60	struct open_buckets	ob;
 61	__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
 62};
 63
 64struct btree_bkey_cached_common {
 65	struct six_lock		lock;
 66	u8			level;
 67	u8			btree_id;
 68	bool			cached;
 69};
 70
 71struct btree {
 72	struct btree_bkey_cached_common c;
 73
 74	struct rhash_head	hash;
 75	u64			hash_val;
 76
 77	unsigned long		flags;
 78	u16			written;
 79	u8			nsets;
 80	u8			nr_key_bits;
 81	u16			version_ondisk;
 82
 83	struct bkey_format	format;
 84
 85	struct btree_node	*data;
 86	void			*aux_data;
 87
 88	/*
 89	 * Sets of sorted keys - the real btree node - plus a binary search tree
 90	 *
 91	 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
 92	 * to the memory we have allocated for this btree node. Additionally,
 93	 * set[0]->data points to the entire btree node as it exists on disk.
 94	 */
 95	struct bset_tree	set[MAX_BSETS];
 96
 97	struct btree_nr_keys	nr;
 98	u16			sib_u64s[2];
 99	u16			whiteout_u64s;
100	u8			byte_order;
101	u8			unpack_fn_len;
102
103	struct btree_write	writes[2];
104
105	/* Key/pointer for this btree node */
106	__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
107
108	/*
109	 * XXX: add a delete sequence number, so when bch2_btree_node_relock()
110	 * fails because the lock sequence number has changed - i.e. the
111	 * contents were modified - we can still relock the node if it's still
112	 * the one we want, without redoing the traversal
113	 */
114
115	/*
116	 * For asynchronous splits/interior node updates:
117	 * When we do a split, we allocate new child nodes and update the parent
118	 * node to point to them: we update the parent in memory immediately,
119	 * but then we must wait until the children have been written out before
120	 * the update to the parent can be written - this is a list of the
121	 * btree_updates that are blocking this node from being
122	 * written:
123	 */
124	struct list_head	write_blocked;
125
126	/*
127	 * Also for asynchronous splits/interior node updates:
128	 * If a btree node isn't reachable yet, we don't want to kick off
129	 * another write - because that write also won't yet be reachable and
130	 * marking it as completed before it's reachable would be incorrect:
131	 */
132	unsigned long		will_make_reachable;
133
134	struct open_buckets	ob;
135
136	/* lru list */
137	struct list_head	list;
138};
139
140struct btree_cache {
141	struct rhashtable	table;
142	bool			table_init_done;
143	/*
144	 * We never free a struct btree, except on shutdown - we just put it on
145	 * the btree_cache_freed list and reuse it later. This simplifies the
146	 * code, and it doesn't cost us much memory as the memory usage is
147	 * dominated by buffers that hold the actual btree node data and those
148	 * can be freed - and the number of struct btrees allocated is
149	 * effectively bounded.
150	 *
151	 * btree_cache_freeable effectively is a small cache - we use it because
152	 * high order page allocations can be rather expensive, and it's quite
153	 * common to delete and allocate btree nodes in quick succession. It
154	 * should never grow past ~2-3 nodes in practice.
155	 */
156	struct mutex		lock;
157	struct list_head	live;
158	struct list_head	freeable;
159	struct list_head	freed_pcpu;
160	struct list_head	freed_nonpcpu;
161
162	/* Number of elements in live + freeable lists */
163	unsigned		used;
164	unsigned		reserve;
165	atomic_t		dirty;
166	struct shrinker		*shrink;
167
168	/*
169	 * If we need to allocate memory for a new btree node and that
170	 * allocation fails, we can cannibalize another node in the btree cache
171	 * to satisfy the allocation - lock to guarantee only one thread does
172	 * this at a time:
173	 */
174	struct task_struct	*alloc_lock;
175	struct closure_waitlist	alloc_wait;
176};
177
178struct btree_node_iter {
179	struct btree_node_iter_set {
180		u16	k, end;
181	} data[MAX_BSETS];
182};
183
184/*
185 * Iterate over all possible positions, synthesizing deleted keys for holes:
186 */
187static const __maybe_unused u16 BTREE_ITER_SLOTS		= 1 << 0;
188/*
189 * Indicates that intent locks should be taken on leaf nodes, because we expect
190 * to be doing updates:
191 */
192static const __maybe_unused u16 BTREE_ITER_INTENT		= 1 << 1;
193/*
194 * Causes the btree iterator code to prefetch additional btree nodes from disk:
195 */
196static const __maybe_unused u16 BTREE_ITER_PREFETCH		= 1 << 2;
197/*
198 * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
199 * @pos or the first key strictly greater than @pos
200 */
201static const __maybe_unused u16 BTREE_ITER_IS_EXTENTS		= 1 << 3;
202static const __maybe_unused u16 BTREE_ITER_NOT_EXTENTS		= 1 << 4;
203static const __maybe_unused u16 BTREE_ITER_CACHED		= 1 << 5;
204static const __maybe_unused u16 BTREE_ITER_WITH_KEY_CACHE	= 1 << 6;
205static const __maybe_unused u16 BTREE_ITER_WITH_UPDATES		= 1 << 7;
206static const __maybe_unused u16 BTREE_ITER_WITH_JOURNAL		= 1 << 8;
207static const __maybe_unused u16 __BTREE_ITER_ALL_SNAPSHOTS	= 1 << 9;
208static const __maybe_unused u16 BTREE_ITER_ALL_SNAPSHOTS	= 1 << 10;
209static const __maybe_unused u16 BTREE_ITER_FILTER_SNAPSHOTS	= 1 << 11;
210static const __maybe_unused u16 BTREE_ITER_NOPRESERVE		= 1 << 12;
211static const __maybe_unused u16 BTREE_ITER_CACHED_NOFILL	= 1 << 13;
212static const __maybe_unused u16 BTREE_ITER_KEY_CACHE_FILL	= 1 << 14;
213#define __BTREE_ITER_FLAGS_END					       15
214
215enum btree_path_uptodate {
216	BTREE_ITER_UPTODATE		= 0,
217	BTREE_ITER_NEED_RELOCK		= 1,
218	BTREE_ITER_NEED_TRAVERSE	= 2,
219};
220
221#if defined(CONFIG_BCACHEFS_LOCK_TIME_STATS) || defined(CONFIG_BCACHEFS_DEBUG)
222#define TRACK_PATH_ALLOCATED
223#endif
224
225typedef u16 btree_path_idx_t;
226
227struct btree_path {
228	btree_path_idx_t	sorted_idx;
229	u8			ref;
230	u8			intent_ref;
231
232	/* btree_iter_copy starts here: */
233	struct bpos		pos;
234
235	enum btree_id		btree_id:5;
236	bool			cached:1;
237	bool			preserve:1;
238	enum btree_path_uptodate uptodate:2;
239	/*
240	 * When true, failing to relock this path will cause the transaction to
241	 * restart:
242	 */
243	bool			should_be_locked:1;
244	unsigned		level:3,
245				locks_want:3;
246	u8			nodes_locked;
247
248	struct btree_path_level {
249		struct btree	*b;
250		struct btree_node_iter iter;
251		u32		lock_seq;
252#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
253		u64             lock_taken_time;
254#endif
255	}			l[BTREE_MAX_DEPTH];
256#ifdef TRACK_PATH_ALLOCATED
257	unsigned long		ip_allocated;
258#endif
259};
260
261static inline struct btree_path_level *path_l(struct btree_path *path)
262{
263	return path->l + path->level;
264}
265
266static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
267{
268#ifdef TRACK_PATH_ALLOCATED
269	return path->ip_allocated;
270#else
271	return _THIS_IP_;
272#endif
273}
274
275/*
276 * @pos			- iterator's current position
277 * @level		- current btree depth
278 * @locks_want		- btree level below which we start taking intent locks
279 * @nodes_locked	- bitmask indicating which nodes in @nodes are locked
280 * @nodes_intent_locked	- bitmask indicating which locks are intent locks
281 */
282struct btree_iter {
283	struct btree_trans	*trans;
284	btree_path_idx_t	path;
285	btree_path_idx_t	update_path;
286	btree_path_idx_t	key_cache_path;
287
288	enum btree_id		btree_id:8;
289	u8			min_depth;
290
291	/* btree_iter_copy starts here: */
292	u16			flags;
293
294	/* When we're filtering by snapshot, the snapshot ID we're looking for: */
295	unsigned		snapshot;
296
297	struct bpos		pos;
298	/*
299	 * Current unpacked key - so that bch2_btree_iter_next()/
300	 * bch2_btree_iter_next_slot() can correctly advance pos.
301	 */
302	struct bkey		k;
303
304	/* BTREE_ITER_WITH_JOURNAL: */
305	size_t			journal_idx;
306#ifdef TRACK_PATH_ALLOCATED
307	unsigned long		ip_allocated;
308#endif
309};
310
311#define BKEY_CACHED_ACCESSED		0
312#define BKEY_CACHED_DIRTY		1
313
314struct bkey_cached {
315	struct btree_bkey_cached_common c;
316
317	unsigned long		flags;
318	u16			u64s;
319	bool			valid;
320	u32			btree_trans_barrier_seq;
321	struct bkey_cached_key	key;
322
323	struct rhash_head	hash;
324	struct list_head	list;
325
326	struct journal_entry_pin journal;
327	u64			seq;
328
329	struct bkey_i		*k;
330};
331
332static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
333{
334	return !b->cached
335		? container_of(b, struct btree, c)->key.k.p
336		: container_of(b, struct bkey_cached, c)->key.pos;
337}
338
339struct btree_insert_entry {
340	unsigned		flags;
341	u8			bkey_type;
342	enum btree_id		btree_id:8;
343	u8			level:4;
344	bool			cached:1;
345	bool			insert_trigger_run:1;
346	bool			overwrite_trigger_run:1;
347	bool			key_cache_already_flushed:1;
348	/*
349	 * @old_k may be a key from the journal; @old_btree_u64s always refers
350	 * to the size of the key being overwritten in the btree:
351	 */
352	u8			old_btree_u64s;
353	btree_path_idx_t	path;
354	struct bkey_i		*k;
355	/* key being overwritten: */
356	struct bkey		old_k;
357	const struct bch_val	*old_v;
358	unsigned long		ip_allocated;
359};
360
361#define BTREE_ITER_INITIAL		64
362#define BTREE_ITER_MAX			(1U << 10)
363
364struct btree_trans_commit_hook;
365typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
366
367struct btree_trans_commit_hook {
368	btree_trans_commit_hook_fn	*fn;
369	struct btree_trans_commit_hook	*next;
370};
371
372#define BTREE_TRANS_MEM_MAX	(1U << 16)
373
374#define BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS	10000
375
376struct btree_trans_paths {
377	unsigned long		nr_paths;
378	struct btree_path	paths[];
379};
380
381struct btree_trans {
382	struct bch_fs		*c;
383
384	unsigned long		*paths_allocated;
385	struct btree_path	*paths;
386	btree_path_idx_t	*sorted;
387	struct btree_insert_entry *updates;
388
389	void			*mem;
390	unsigned		mem_top;
391	unsigned		mem_bytes;
392
393	btree_path_idx_t	nr_sorted;
394	btree_path_idx_t	nr_paths;
395	btree_path_idx_t	nr_paths_max;
396	u8			fn_idx;
397	u8			nr_updates;
398	u8			lock_must_abort;
399	bool			lock_may_not_fail:1;
400	bool			srcu_held:1;
401	bool			used_mempool:1;
402	bool			in_traverse_all:1;
403	bool			paths_sorted:1;
404	bool			memory_allocation_failure:1;
405	bool			journal_transaction_names:1;
406	bool			journal_replay_not_finished:1;
407	bool			notrace_relock_fail:1;
408	bool			write_locked:1;
409	enum bch_errcode	restarted:16;
410	u32			restart_count;
411
412	u64			last_begin_time;
413	unsigned long		last_begin_ip;
414	unsigned long		last_restarted_ip;
415	unsigned long		srcu_lock_time;
416
417	const char		*fn;
418	struct btree_bkey_cached_common *locking;
419	struct six_lock_waiter	locking_wait;
420	int			srcu_idx;
421
422	/* update path: */
423	u16			journal_entries_u64s;
424	u16			journal_entries_size;
425	struct jset_entry	*journal_entries;
426
427	struct btree_trans_commit_hook *hooks;
428	struct journal_entry_pin *journal_pin;
429
430	struct journal_res	journal_res;
431	u64			*journal_seq;
432	struct disk_reservation *disk_res;
433
434	struct bch_fs_usage_base fs_usage_delta;
435
436	unsigned		journal_u64s;
437	unsigned		extra_disk_res; /* XXX kill */
438	struct replicas_delta_list *fs_usage_deltas;
439
440	/* Entries before this are zeroed out on every bch2_trans_get() call */
441
442	struct list_head	list;
443	struct closure		ref;
444
445	unsigned long		_paths_allocated[BITS_TO_LONGS(BTREE_ITER_INITIAL)];
446	struct btree_trans_paths trans_paths;
447	struct btree_path	_paths[BTREE_ITER_INITIAL];
448	btree_path_idx_t	_sorted[BTREE_ITER_INITIAL + 4];
449	struct btree_insert_entry _updates[BTREE_ITER_INITIAL];
450};
451
452static inline struct btree_path *btree_iter_path(struct btree_trans *trans, struct btree_iter *iter)
453{
454	return trans->paths + iter->path;
455}
456
457static inline struct btree_path *btree_iter_key_cache_path(struct btree_trans *trans, struct btree_iter *iter)
458{
459	return iter->key_cache_path
460		? trans->paths + iter->key_cache_path
461		: NULL;
462}
463
464#define BCH_BTREE_WRITE_TYPES()						\
465	x(initial,		0)					\
466	x(init_next_bset,	1)					\
467	x(cache_reclaim,	2)					\
468	x(journal_reclaim,	3)					\
469	x(interior,		4)
470
471enum btree_write_type {
472#define x(t, n) BTREE_WRITE_##t,
473	BCH_BTREE_WRITE_TYPES()
474#undef x
475	BTREE_WRITE_TYPE_NR,
476};
477
478#define BTREE_WRITE_TYPE_MASK	(roundup_pow_of_two(BTREE_WRITE_TYPE_NR) - 1)
479#define BTREE_WRITE_TYPE_BITS	ilog2(roundup_pow_of_two(BTREE_WRITE_TYPE_NR))
480
481#define BTREE_FLAGS()							\
482	x(read_in_flight)						\
483	x(read_error)							\
484	x(dirty)							\
485	x(need_write)							\
486	x(write_blocked)						\
487	x(will_make_reachable)						\
488	x(noevict)							\
489	x(write_idx)							\
490	x(accessed)							\
491	x(write_in_flight)						\
492	x(write_in_flight_inner)					\
493	x(just_written)							\
494	x(dying)							\
495	x(fake)								\
496	x(need_rewrite)							\
497	x(never_write)
498
499enum btree_flags {
500	/* First bits for btree node write type */
501	BTREE_NODE_FLAGS_START = BTREE_WRITE_TYPE_BITS - 1,
502#define x(flag)	BTREE_NODE_##flag,
503	BTREE_FLAGS()
504#undef x
505};
506
507#define x(flag)								\
508static inline bool btree_node_ ## flag(struct btree *b)			\
509{	return test_bit(BTREE_NODE_ ## flag, &b->flags); }		\
510									\
511static inline void set_btree_node_ ## flag(struct btree *b)		\
512{	set_bit(BTREE_NODE_ ## flag, &b->flags); }			\
513									\
514static inline void clear_btree_node_ ## flag(struct btree *b)		\
515{	clear_bit(BTREE_NODE_ ## flag, &b->flags); }
516
517BTREE_FLAGS()
518#undef x
519
520static inline struct btree_write *btree_current_write(struct btree *b)
521{
522	return b->writes + btree_node_write_idx(b);
523}
524
525static inline struct btree_write *btree_prev_write(struct btree *b)
526{
527	return b->writes + (btree_node_write_idx(b) ^ 1);
528}
529
530static inline struct bset_tree *bset_tree_last(struct btree *b)
531{
532	EBUG_ON(!b->nsets);
533	return b->set + b->nsets - 1;
534}
535
536static inline void *
537__btree_node_offset_to_ptr(const struct btree *b, u16 offset)
538{
539	return (void *) ((u64 *) b->data + 1 + offset);
540}
541
542static inline u16
543__btree_node_ptr_to_offset(const struct btree *b, const void *p)
544{
545	u16 ret = (u64 *) p - 1 - (u64 *) b->data;
546
547	EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
548	return ret;
549}
550
551static inline struct bset *bset(const struct btree *b,
552				const struct bset_tree *t)
553{
554	return __btree_node_offset_to_ptr(b, t->data_offset);
555}
556
557static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
558{
559	t->end_offset =
560		__btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
561}
562
563static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
564				  const struct bset *i)
565{
566	t->data_offset = __btree_node_ptr_to_offset(b, i);
567	set_btree_bset_end(b, t);
568}
569
570static inline struct bset *btree_bset_first(struct btree *b)
571{
572	return bset(b, b->set);
573}
574
575static inline struct bset *btree_bset_last(struct btree *b)
576{
577	return bset(b, bset_tree_last(b));
578}
579
580static inline u16
581__btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
582{
583	return __btree_node_ptr_to_offset(b, k);
584}
585
586static inline struct bkey_packed *
587__btree_node_offset_to_key(const struct btree *b, u16 k)
588{
589	return __btree_node_offset_to_ptr(b, k);
590}
591
592static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
593{
594	return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
595}
596
597#define btree_bkey_first(_b, _t)					\
598({									\
599	EBUG_ON(bset(_b, _t)->start !=					\
600		__btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
601									\
602	bset(_b, _t)->start;						\
603})
604
605#define btree_bkey_last(_b, _t)						\
606({									\
607	EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) !=	\
608		vstruct_last(bset(_b, _t)));				\
609									\
610	__btree_node_offset_to_key(_b, (_t)->end_offset);		\
611})
612
613static inline unsigned bset_u64s(struct bset_tree *t)
614{
615	return t->end_offset - t->data_offset -
616		sizeof(struct bset) / sizeof(u64);
617}
618
619static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
620{
621	return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
622}
623
624static inline unsigned bset_byte_offset(struct btree *b, void *i)
625{
626	return i - (void *) b->data;
627}
628
629enum btree_node_type {
630	BKEY_TYPE_btree,
631#define x(kwd, val, ...) BKEY_TYPE_##kwd = val + 1,
632	BCH_BTREE_IDS()
633#undef x
634	BKEY_TYPE_NR
635};
636
637/* Type of a key in btree @id at level @level: */
638static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
639{
640	return level ? BKEY_TYPE_btree : (unsigned) id + 1;
641}
642
643/* Type of keys @b contains: */
644static inline enum btree_node_type btree_node_type(struct btree *b)
645{
646	return __btree_node_type(b->c.level, b->c.btree_id);
647}
648
649const char *bch2_btree_node_type_str(enum btree_node_type);
650
651#define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS		\
652	(BIT_ULL(BKEY_TYPE_extents)|			\
653	 BIT_ULL(BKEY_TYPE_alloc)|			\
654	 BIT_ULL(BKEY_TYPE_inodes)|			\
655	 BIT_ULL(BKEY_TYPE_stripes)|			\
656	 BIT_ULL(BKEY_TYPE_reflink)|			\
657	 BIT_ULL(BKEY_TYPE_btree))
658
659#define BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS		\
660	(BIT_ULL(BKEY_TYPE_alloc)|			\
661	 BIT_ULL(BKEY_TYPE_inodes)|			\
662	 BIT_ULL(BKEY_TYPE_stripes)|			\
663	 BIT_ULL(BKEY_TYPE_snapshots))
664
665#define BTREE_NODE_TYPE_HAS_TRIGGERS			\
666	(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS|		\
667	 BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS)
668
669static inline bool btree_node_type_needs_gc(enum btree_node_type type)
670{
671	return BTREE_NODE_TYPE_HAS_TRIGGERS & BIT_ULL(type);
672}
673
674static inline bool btree_node_type_is_extents(enum btree_node_type type)
675{
676	const unsigned mask = 0
677#define x(name, nr, flags, ...)	|((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1))
678	BCH_BTREE_IDS()
679#undef x
680	;
681
682	return (1U << type) & mask;
683}
684
685static inline bool btree_id_is_extents(enum btree_id btree)
686{
687	return btree_node_type_is_extents(__btree_node_type(0, btree));
688}
689
690static inline bool btree_type_has_snapshots(enum btree_id id)
691{
692	const unsigned mask = 0
693#define x(name, nr, flags, ...)	|((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr)
694	BCH_BTREE_IDS()
695#undef x
696	;
697
698	return (1U << id) & mask;
699}
700
701static inline bool btree_type_has_snapshot_field(enum btree_id id)
702{
703	const unsigned mask = 0
704#define x(name, nr, flags, ...)	|((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr)
705	BCH_BTREE_IDS()
706#undef x
707	;
708
709	return (1U << id) & mask;
710}
711
712static inline bool btree_type_has_ptrs(enum btree_id id)
713{
714	const unsigned mask = 0
715#define x(name, nr, flags, ...)	|((!!((flags) & BTREE_ID_DATA)) << nr)
716	BCH_BTREE_IDS()
717#undef x
718	;
719
720	return (1U << id) & mask;
721}
722
723struct btree_root {
724	struct btree		*b;
725
726	/* On disk root - see async splits: */
727	__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
728	u8			level;
729	u8			alive;
730	s8			error;
731};
732
733enum btree_gc_coalesce_fail_reason {
734	BTREE_GC_COALESCE_FAIL_RESERVE_GET,
735	BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
736	BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
737};
738
739enum btree_node_sibling {
740	btree_prev_sib,
741	btree_next_sib,
742};
743
744struct get_locks_fail {
745	unsigned	l;
746	struct btree	*b;
747};
748
749#endif /* _BCACHEFS_BTREE_TYPES_H */