Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _BCACHEFS_EXTENTS_H
  3#define _BCACHEFS_EXTENTS_H
  4
  5#include "bcachefs.h"
  6#include "bkey.h"
  7#include "extents_types.h"
  8
  9struct bch_fs;
 10struct btree_trans;
 11enum bkey_invalid_flags;
 12
 13/* extent entries: */
 14
 15#define extent_entry_last(_e)						\
 16	((typeof(&(_e).v->start[0])) bkey_val_end(_e))
 17
 18#define entry_to_ptr(_entry)						\
 19({									\
 20	EBUG_ON((_entry) && !extent_entry_is_ptr(_entry));		\
 21									\
 22	__builtin_choose_expr(						\
 23		type_is_exact(_entry, const union bch_extent_entry *),	\
 24		(const struct bch_extent_ptr *) (_entry),		\
 25		(struct bch_extent_ptr *) (_entry));			\
 26})
 27
 28/* downcast, preserves const */
 29#define to_entry(_entry)						\
 30({									\
 31	BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) &&	\
 32		     !type_is(_entry, struct bch_extent_ptr *) &&	\
 33		     !type_is(_entry, struct bch_extent_stripe_ptr *));	\
 34									\
 35	__builtin_choose_expr(						\
 36		(type_is_exact(_entry, const union bch_extent_crc *) ||	\
 37		 type_is_exact(_entry, const struct bch_extent_ptr *) ||\
 38		 type_is_exact(_entry, const struct bch_extent_stripe_ptr *)),\
 39		(const union bch_extent_entry *) (_entry),		\
 40		(union bch_extent_entry *) (_entry));			\
 41})
 42
 43#define extent_entry_next(_entry)					\
 44	((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
 45
 46static inline unsigned
 47__extent_entry_type(const union bch_extent_entry *e)
 48{
 49	return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
 50}
 51
 52static inline enum bch_extent_entry_type
 53extent_entry_type(const union bch_extent_entry *e)
 54{
 55	int ret = __ffs(e->type);
 56
 57	EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
 58
 59	return ret;
 60}
 61
 62static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
 63{
 64	switch (extent_entry_type(entry)) {
 65#define x(f, n)						\
 66	case BCH_EXTENT_ENTRY_##f:			\
 67		return sizeof(struct bch_extent_##f);
 68	BCH_EXTENT_ENTRY_TYPES()
 69#undef x
 70	default:
 71		BUG();
 72	}
 73}
 74
 75static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
 76{
 77	return extent_entry_bytes(entry) / sizeof(u64);
 78}
 79
 80static inline void __extent_entry_insert(struct bkey_i *k,
 81					 union bch_extent_entry *dst,
 82					 union bch_extent_entry *new)
 83{
 84	union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
 85
 86	memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
 87			      dst, (u64 *) end - (u64 *) dst);
 88	k->k.u64s += extent_entry_u64s(new);
 89	memcpy_u64s_small(dst, new, extent_entry_u64s(new));
 90}
 91
 92static inline void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
 93{
 94	union bch_extent_entry *next = extent_entry_next(entry);
 95
 96	/* stripes have ptrs, but their layout doesn't work with this code */
 97	BUG_ON(k.k->type == KEY_TYPE_stripe);
 98
 99	memmove_u64s_down(entry, next,
100			  (u64 *) bkey_val_end(k) - (u64 *) next);
101	k.k->u64s -= (u64 *) next - (u64 *) entry;
102}
103
104static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
105{
106	return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
107}
108
109static inline bool extent_entry_is_stripe_ptr(const union bch_extent_entry *e)
110{
111	return extent_entry_type(e) == BCH_EXTENT_ENTRY_stripe_ptr;
112}
113
114static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
115{
116	switch (extent_entry_type(e)) {
117	case BCH_EXTENT_ENTRY_crc32:
118	case BCH_EXTENT_ENTRY_crc64:
119	case BCH_EXTENT_ENTRY_crc128:
120		return true;
121	default:
122		return false;
123	}
124}
125
126union bch_extent_crc {
127	u8				type;
128	struct bch_extent_crc32		crc32;
129	struct bch_extent_crc64		crc64;
130	struct bch_extent_crc128	crc128;
131};
132
133#define __entry_to_crc(_entry)						\
134	__builtin_choose_expr(						\
135		type_is_exact(_entry, const union bch_extent_entry *),	\
136		(const union bch_extent_crc *) (_entry),		\
137		(union bch_extent_crc *) (_entry))
138
139#define entry_to_crc(_entry)						\
140({									\
141	EBUG_ON((_entry) && !extent_entry_is_crc(_entry));		\
142									\
143	__entry_to_crc(_entry);						\
144})
145
146static inline struct bch_extent_crc_unpacked
147bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
148{
149#define common_fields(_crc)						\
150		.csum_type		= _crc.csum_type,		\
151		.compression_type	= _crc.compression_type,	\
152		.compressed_size	= _crc._compressed_size + 1,	\
153		.uncompressed_size	= _crc._uncompressed_size + 1,	\
154		.offset			= _crc.offset,			\
155		.live_size		= k->size
156
157	if (!crc)
158		return (struct bch_extent_crc_unpacked) {
159			.compressed_size	= k->size,
160			.uncompressed_size	= k->size,
161			.live_size		= k->size,
162		};
163
164	switch (extent_entry_type(to_entry(crc))) {
165	case BCH_EXTENT_ENTRY_crc32: {
166		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
167			common_fields(crc->crc32),
168		};
169
170		*((__le32 *) &ret.csum.lo) = (__le32 __force) crc->crc32.csum;
171		return ret;
172	}
173	case BCH_EXTENT_ENTRY_crc64: {
174		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
175			common_fields(crc->crc64),
176			.nonce			= crc->crc64.nonce,
177			.csum.lo		= (__force __le64) crc->crc64.csum_lo,
178		};
179
180		*((__le16 *) &ret.csum.hi) = (__le16 __force) crc->crc64.csum_hi;
181
182		return ret;
183	}
184	case BCH_EXTENT_ENTRY_crc128: {
185		struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
186			common_fields(crc->crc128),
187			.nonce			= crc->crc128.nonce,
188			.csum			= crc->crc128.csum,
189		};
190
191		return ret;
192	}
193	default:
194		BUG();
195	}
196#undef common_fields
197}
198
199static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
200{
201	return (crc.compression_type != BCH_COMPRESSION_TYPE_none &&
202		crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
203}
204
205static inline bool crc_is_encoded(struct bch_extent_crc_unpacked crc)
206{
207	return crc.csum_type != BCH_CSUM_none || crc_is_compressed(crc);
208}
209
210/* bkey_ptrs: generically over any key type that has ptrs */
211
212struct bkey_ptrs_c {
213	const union bch_extent_entry	*start;
214	const union bch_extent_entry	*end;
215};
216
217struct bkey_ptrs {
218	union bch_extent_entry	*start;
219	union bch_extent_entry	*end;
220};
221
222static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
223{
224	switch (k.k->type) {
225	case KEY_TYPE_btree_ptr: {
226		struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
227
228		return (struct bkey_ptrs_c) {
229			to_entry(&e.v->start[0]),
230			to_entry(extent_entry_last(e))
231		};
232	}
233	case KEY_TYPE_extent: {
234		struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
235
236		return (struct bkey_ptrs_c) {
237			e.v->start,
238			extent_entry_last(e)
239		};
240	}
241	case KEY_TYPE_stripe: {
242		struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
243
244		return (struct bkey_ptrs_c) {
245			to_entry(&s.v->ptrs[0]),
246			to_entry(&s.v->ptrs[s.v->nr_blocks]),
247		};
248	}
249	case KEY_TYPE_reflink_v: {
250		struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
251
252		return (struct bkey_ptrs_c) {
253			r.v->start,
254			bkey_val_end(r),
255		};
256	}
257	case KEY_TYPE_btree_ptr_v2: {
258		struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
259
260		return (struct bkey_ptrs_c) {
261			to_entry(&e.v->start[0]),
262			to_entry(extent_entry_last(e))
263		};
264	}
265	default:
266		return (struct bkey_ptrs_c) { NULL, NULL };
267	}
268}
269
270static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
271{
272	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
273
274	return (struct bkey_ptrs) {
275		(void *) p.start,
276		(void *) p.end
277	};
278}
279
280#define __bkey_extent_entry_for_each_from(_start, _end, _entry)		\
281	for ((_entry) = (_start);					\
282	     (_entry) < (_end);						\
283	     (_entry) = extent_entry_next(_entry))
284
285#define __bkey_ptr_next(_ptr, _end)					\
286({									\
287	typeof(_end) _entry;						\
288									\
289	__bkey_extent_entry_for_each_from(to_entry(_ptr), _end, _entry)	\
290		if (extent_entry_is_ptr(_entry))			\
291			break;						\
292									\
293	_entry < (_end) ? entry_to_ptr(_entry) : NULL;			\
294})
295
296#define bkey_extent_entry_for_each_from(_p, _entry, _start)		\
297	__bkey_extent_entry_for_each_from(_start, (_p).end, _entry)
298
299#define bkey_extent_entry_for_each(_p, _entry)				\
300	bkey_extent_entry_for_each_from(_p, _entry, _p.start)
301
302#define __bkey_for_each_ptr(_start, _end, _ptr)				\
303	for (typeof(_start) (_ptr) = (_start);				\
304	     ((_ptr) = __bkey_ptr_next(_ptr, _end));			\
305	     (_ptr)++)
306
307#define bkey_ptr_next(_p, _ptr)						\
308	__bkey_ptr_next(_ptr, (_p).end)
309
310#define bkey_for_each_ptr(_p, _ptr)					\
311	__bkey_for_each_ptr(&(_p).start->ptr, (_p).end, _ptr)
312
313#define __bkey_ptr_next_decode(_k, _end, _ptr, _entry)			\
314({									\
315	__label__ out;							\
316									\
317	(_ptr).idx	= 0;						\
318	(_ptr).has_ec	= false;					\
319									\
320	__bkey_extent_entry_for_each_from(_entry, _end, _entry)		\
321		switch (extent_entry_type(_entry)) {			\
322		case BCH_EXTENT_ENTRY_ptr:				\
323			(_ptr).ptr		= _entry->ptr;		\
324			goto out;					\
325		case BCH_EXTENT_ENTRY_crc32:				\
326		case BCH_EXTENT_ENTRY_crc64:				\
327		case BCH_EXTENT_ENTRY_crc128:				\
328			(_ptr).crc = bch2_extent_crc_unpack(_k,		\
329					entry_to_crc(_entry));		\
330			break;						\
331		case BCH_EXTENT_ENTRY_stripe_ptr:			\
332			(_ptr).ec = _entry->stripe_ptr;			\
333			(_ptr).has_ec	= true;				\
334			break;						\
335		default:						\
336			/* nothing */					\
337			break;						\
338		}							\
339out:									\
340	_entry < (_end);						\
341})
342
343#define __bkey_for_each_ptr_decode(_k, _start, _end, _ptr, _entry)	\
344	for ((_ptr).crc = bch2_extent_crc_unpack(_k, NULL),		\
345	     (_entry) = _start;						\
346	     __bkey_ptr_next_decode(_k, _end, _ptr, _entry);		\
347	     (_entry) = extent_entry_next(_entry))
348
349#define bkey_for_each_ptr_decode(_k, _p, _ptr, _entry)			\
350	__bkey_for_each_ptr_decode(_k, (_p).start, (_p).end,		\
351				   _ptr, _entry)
352
353#define bkey_crc_next(_k, _start, _end, _crc, _iter)			\
354({									\
355	__bkey_extent_entry_for_each_from(_iter, _end, _iter)		\
356		if (extent_entry_is_crc(_iter)) {			\
357			(_crc) = bch2_extent_crc_unpack(_k,		\
358						entry_to_crc(_iter));	\
359			break;						\
360		}							\
361									\
362	(_iter) < (_end);						\
363})
364
365#define __bkey_for_each_crc(_k, _start, _end, _crc, _iter)		\
366	for ((_crc) = bch2_extent_crc_unpack(_k, NULL),			\
367	     (_iter) = (_start);					\
368	     bkey_crc_next(_k, _start, _end, _crc, _iter);		\
369	     (_iter) = extent_entry_next(_iter))
370
371#define bkey_for_each_crc(_k, _p, _crc, _iter)				\
372	__bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter)
373
374/* Iterate over pointers in KEY_TYPE_extent: */
375
376#define extent_for_each_entry_from(_e, _entry, _start)			\
377	__bkey_extent_entry_for_each_from(_start,			\
378				extent_entry_last(_e), _entry)
379
380#define extent_for_each_entry(_e, _entry)				\
381	extent_for_each_entry_from(_e, _entry, (_e).v->start)
382
383#define extent_ptr_next(_e, _ptr)					\
384	__bkey_ptr_next(_ptr, extent_entry_last(_e))
385
386#define extent_for_each_ptr(_e, _ptr)					\
387	__bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
388
389#define extent_for_each_ptr_decode(_e, _ptr, _entry)			\
390	__bkey_for_each_ptr_decode((_e).k, (_e).v->start,		\
391				   extent_entry_last(_e), _ptr, _entry)
392
393/* utility code common to all keys with pointers: */
394
395void bch2_mark_io_failure(struct bch_io_failures *,
396			  struct extent_ptr_decoded *);
397int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
398			       struct bch_io_failures *,
399			       struct extent_ptr_decoded *);
400
401/* KEY_TYPE_btree_ptr: */
402
403int bch2_btree_ptr_invalid(struct bch_fs *, struct bkey_s_c,
404			   enum bkey_invalid_flags, struct printbuf *);
405void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
406			    struct bkey_s_c);
407
408int bch2_btree_ptr_v2_invalid(struct bch_fs *, struct bkey_s_c,
409			      enum bkey_invalid_flags, struct printbuf *);
410void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
411void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
412			      int, struct bkey_s);
413
414#define bch2_bkey_ops_btree_ptr ((struct bkey_ops) {		\
415	.key_invalid	= bch2_btree_ptr_invalid,		\
416	.val_to_text	= bch2_btree_ptr_to_text,		\
417	.swab		= bch2_ptr_swab,			\
418	.trigger	= bch2_trigger_extent,			\
419})
420
421#define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) {		\
422	.key_invalid	= bch2_btree_ptr_v2_invalid,		\
423	.val_to_text	= bch2_btree_ptr_v2_to_text,		\
424	.swab		= bch2_ptr_swab,			\
425	.compat		= bch2_btree_ptr_v2_compat,		\
426	.trigger	= bch2_trigger_extent,			\
427	.min_val_size	= 40,					\
428})
429
430/* KEY_TYPE_extent: */
431
432bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
433
434#define bch2_bkey_ops_extent ((struct bkey_ops) {		\
435	.key_invalid	= bch2_bkey_ptrs_invalid,		\
436	.val_to_text	= bch2_bkey_ptrs_to_text,		\
437	.swab		= bch2_ptr_swab,			\
438	.key_normalize	= bch2_extent_normalize,		\
439	.key_merge	= bch2_extent_merge,			\
440	.trigger	= bch2_trigger_extent,			\
441})
442
443/* KEY_TYPE_reservation: */
444
445int bch2_reservation_invalid(struct bch_fs *, struct bkey_s_c,
446			     enum bkey_invalid_flags, struct printbuf *);
447void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
448bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
449
450#define bch2_bkey_ops_reservation ((struct bkey_ops) {		\
451	.key_invalid	= bch2_reservation_invalid,		\
452	.val_to_text	= bch2_reservation_to_text,		\
453	.key_merge	= bch2_reservation_merge,		\
454	.trigger	= bch2_trigger_reservation,		\
455	.min_val_size	= 8,					\
456})
457
458/* Extent checksum entries: */
459
460bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
461				 struct bch_extent_crc_unpacked);
462bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
463void bch2_extent_crc_append(struct bkey_i *,
464			    struct bch_extent_crc_unpacked);
465
466/* Generic code for keys with pointers: */
467
468static inline bool bkey_is_btree_ptr(const struct bkey *k)
469{
470	switch (k->type) {
471	case KEY_TYPE_btree_ptr:
472	case KEY_TYPE_btree_ptr_v2:
473		return true;
474	default:
475		return false;
476	}
477}
478
479static inline bool bkey_extent_is_direct_data(const struct bkey *k)
480{
481	switch (k->type) {
482	case KEY_TYPE_btree_ptr:
483	case KEY_TYPE_btree_ptr_v2:
484	case KEY_TYPE_extent:
485	case KEY_TYPE_reflink_v:
486		return true;
487	default:
488		return false;
489	}
490}
491
492static inline bool bkey_extent_is_inline_data(const struct bkey *k)
493{
494	return  k->type == KEY_TYPE_inline_data ||
495		k->type == KEY_TYPE_indirect_inline_data;
496}
497
498static inline unsigned bkey_inline_data_offset(const struct bkey *k)
499{
500	switch (k->type) {
501	case KEY_TYPE_inline_data:
502		return sizeof(struct bch_inline_data);
503	case KEY_TYPE_indirect_inline_data:
504		return sizeof(struct bch_indirect_inline_data);
505	default:
506		BUG();
507	}
508}
509
510static inline unsigned bkey_inline_data_bytes(const struct bkey *k)
511{
512	return bkey_val_bytes(k) - bkey_inline_data_offset(k);
513}
514
515#define bkey_inline_data_p(_k)	(((void *) (_k).v) + bkey_inline_data_offset((_k).k))
516
517static inline bool bkey_extent_is_data(const struct bkey *k)
518{
519	return  bkey_extent_is_direct_data(k) ||
520		bkey_extent_is_inline_data(k) ||
521		k->type == KEY_TYPE_reflink_p;
522}
523
524/*
525 * Should extent be counted under inode->i_sectors?
526 */
527static inline bool bkey_extent_is_allocation(const struct bkey *k)
528{
529	switch (k->type) {
530	case KEY_TYPE_extent:
531	case KEY_TYPE_reservation:
532	case KEY_TYPE_reflink_p:
533	case KEY_TYPE_reflink_v:
534	case KEY_TYPE_inline_data:
535	case KEY_TYPE_indirect_inline_data:
536	case KEY_TYPE_error:
537		return true;
538	default:
539		return false;
540	}
541}
542
543static inline bool bkey_extent_is_unwritten(struct bkey_s_c k)
544{
545	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
546
547	bkey_for_each_ptr(ptrs, ptr)
548		if (ptr->unwritten)
549			return true;
550	return false;
551}
552
553static inline bool bkey_extent_is_reservation(struct bkey_s_c k)
554{
555	return k.k->type == KEY_TYPE_reservation ||
556		bkey_extent_is_unwritten(k);
557}
558
559static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
560{
561	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
562	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
563
564	bkey_for_each_ptr(p, ptr)
565		ret.data[ret.nr++] = ptr->dev;
566
567	return ret;
568}
569
570static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
571{
572	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
573	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
574
575	bkey_for_each_ptr(p, ptr)
576		if (!ptr->cached)
577			ret.data[ret.nr++] = ptr->dev;
578
579	return ret;
580}
581
582static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
583{
584	struct bch_devs_list ret = (struct bch_devs_list) { 0 };
585	struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
586
587	bkey_for_each_ptr(p, ptr)
588		if (ptr->cached)
589			ret.data[ret.nr++] = ptr->dev;
590
591	return ret;
592}
593
594static inline unsigned bch2_bkey_ptr_data_type(struct bkey_s_c k, const struct bch_extent_ptr *ptr)
595{
596	switch (k.k->type) {
597	case KEY_TYPE_btree_ptr:
598	case KEY_TYPE_btree_ptr_v2:
599		return BCH_DATA_btree;
600	case KEY_TYPE_extent:
601	case KEY_TYPE_reflink_v:
602		return BCH_DATA_user;
603	case KEY_TYPE_stripe: {
604		struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
605
606		BUG_ON(ptr < s.v->ptrs ||
607		       ptr >= s.v->ptrs + s.v->nr_blocks);
608
609		return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
610			? BCH_DATA_parity
611			: BCH_DATA_user;
612	}
613	default:
614		BUG();
615	}
616}
617
618unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
619unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
620unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
621bool bch2_bkey_is_incompressible(struct bkey_s_c);
622unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
623
624unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
625unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *);
626unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
627unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
628
629void bch2_bkey_drop_device(struct bkey_s, unsigned);
630void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
631
632const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c, unsigned);
633
634static inline struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s k, unsigned dev)
635{
636	return (void *) bch2_bkey_has_device_c(k.s_c, dev);
637}
638
639bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
640
641void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
642
643static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr ptr)
644{
645	struct bch_extent_ptr *dest;
646
647	EBUG_ON(bch2_bkey_has_device(bkey_i_to_s(k), ptr.dev));
648
649	switch (k->k.type) {
650	case KEY_TYPE_btree_ptr:
651	case KEY_TYPE_btree_ptr_v2:
652	case KEY_TYPE_extent:
653		EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
654
655		ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
656		dest = (struct bch_extent_ptr *)((void *) &k->v + bkey_val_bytes(&k->k));
657		*dest = ptr;
658		k->k.u64s++;
659		break;
660	default:
661		BUG();
662	}
663}
664
665void bch2_extent_ptr_decoded_append(struct bkey_i *,
666				    struct extent_ptr_decoded *);
667union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s,
668						   struct bch_extent_ptr *);
669union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
670					   struct bch_extent_ptr *);
671
672#define bch2_bkey_drop_ptrs(_k, _ptr, _cond)				\
673do {									\
674	struct bkey_ptrs _ptrs = bch2_bkey_ptrs(_k);			\
675									\
676	_ptr = &_ptrs.start->ptr;					\
677									\
678	while ((_ptr = bkey_ptr_next(_ptrs, _ptr))) {			\
679		if (_cond) {						\
680			_ptr = (void *) bch2_bkey_drop_ptr(_k, _ptr);	\
681			_ptrs = bch2_bkey_ptrs(_k);			\
682			continue;					\
683		}							\
684									\
685		(_ptr)++;						\
686	}								\
687} while (0)
688
689bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
690			   struct bch_extent_ptr, u64);
691bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c);
692struct bch_extent_ptr *
693bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s);
694
695void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *);
696
697bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
698void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
699			    struct bkey_s_c);
700int bch2_bkey_ptrs_invalid(struct bch_fs *, struct bkey_s_c,
701			   enum bkey_invalid_flags, struct printbuf *);
702
703void bch2_ptr_swab(struct bkey_s);
704
705const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c);
706unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c,
707				       unsigned, unsigned);
708bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c);
709
710int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *,
711				  struct bch_io_opts *);
712
713/* Generic extent code: */
714
715enum bch_extent_overlap {
716	BCH_EXTENT_OVERLAP_ALL		= 0,
717	BCH_EXTENT_OVERLAP_BACK		= 1,
718	BCH_EXTENT_OVERLAP_FRONT	= 2,
719	BCH_EXTENT_OVERLAP_MIDDLE	= 3,
720};
721
722/* Returns how k overlaps with m */
723static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
724							  const struct bkey *m)
725{
726	int cmp1 = bkey_lt(k->p, m->p);
727	int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m));
728
729	return (cmp1 << 1) + cmp2;
730}
731
732int bch2_cut_front_s(struct bpos, struct bkey_s);
733int bch2_cut_back_s(struct bpos, struct bkey_s);
734
735static inline void bch2_cut_front(struct bpos where, struct bkey_i *k)
736{
737	bch2_cut_front_s(where, bkey_i_to_s(k));
738}
739
740static inline void bch2_cut_back(struct bpos where, struct bkey_i *k)
741{
742	bch2_cut_back_s(where, bkey_i_to_s(k));
743}
744
745/**
746 * bch_key_resize - adjust size of @k
747 *
748 * bkey_start_offset(k) will be preserved, modifies where the extent ends
749 */
750static inline void bch2_key_resize(struct bkey *k, unsigned new_size)
751{
752	k->p.offset -= k->size;
753	k->p.offset += new_size;
754	k->size = new_size;
755}
756
757#endif /* _BCACHEFS_EXTENTS_H */