Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
  3#define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
  4
  5#include "btree_cache.h"
  6#include "btree_iter.h"
  7#include "btree_update.h"
  8#include "buckets.h"
  9#include "super.h"
 10
 11static inline u64 swab40(u64 x)
 12{
 13	return (((x & 0x00000000ffULL) << 32)|
 14		((x & 0x000000ff00ULL) << 16)|
 15		((x & 0x0000ff0000ULL) >>  0)|
 16		((x & 0x00ff000000ULL) >> 16)|
 17		((x & 0xff00000000ULL) >> 32));
 18}
 19
 20int bch2_backpointer_invalid(struct bch_fs *, struct bkey_s_c k,
 21			     enum bkey_invalid_flags, struct printbuf *);
 22void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *);
 23void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 24void bch2_backpointer_swab(struct bkey_s);
 25
 26#define bch2_bkey_ops_backpointer ((struct bkey_ops) {	\
 27	.key_invalid	= bch2_backpointer_invalid,	\
 28	.val_to_text	= bch2_backpointer_k_to_text,	\
 29	.swab		= bch2_backpointer_swab,	\
 30	.min_val_size	= 32,				\
 31})
 32
 33#define MAX_EXTENT_COMPRESS_RATIO_SHIFT		10
 34
 35/*
 36 * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
 37 * btree:
 38 */
 39static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
 40					   struct bpos bp_pos)
 41{
 42	struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
 43	u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
 44
 45	return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
 46}
 47
 
 
 
 
 
 
 
 
 
 48/*
 49 * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
 50 */
 51static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
 52					   struct bpos bucket,
 53					   u64 bucket_offset)
 54{
 55	struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
 56	struct bpos ret;
 57
 58	ret = POS(bucket.inode,
 59		  (bucket_to_sector(ca, bucket.offset) <<
 60		   MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
 61
 62	EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
 63
 64	return ret;
 65}
 66
 67int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bpos bucket,
 68				struct bch_backpointer, struct bkey_s_c, bool);
 69
 70static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
 71				struct bpos bucket,
 72				struct bch_backpointer bp,
 73				struct bkey_s_c orig_k,
 74				bool insert)
 75{
 76	if (unlikely(bch2_backpointers_no_use_write_buffer))
 77		return bch2_bucket_backpointer_mod_nowritebuffer(trans, bucket, bp, orig_k, insert);
 78
 79	struct bkey_i_backpointer bp_k;
 80
 81	bkey_backpointer_init(&bp_k.k_i);
 82	bp_k.k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
 83	bp_k.v = bp;
 84
 85	if (!insert) {
 86		bp_k.k.type = KEY_TYPE_deleted;
 87		set_bkey_val_u64s(&bp_k.k, 0);
 88	}
 89
 90	return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i);
 91}
 92
 93static inline enum bch_data_type bkey_ptr_data_type(enum btree_id btree_id, unsigned level,
 94						    struct bkey_s_c k, struct extent_ptr_decoded p)
 
 95{
 96	return  level		? BCH_DATA_btree :
 97		p.has_ec	? BCH_DATA_stripe :
 98				  BCH_DATA_user;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99}
100
101static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
102			   enum btree_id btree_id, unsigned level,
103			   struct bkey_s_c k, struct extent_ptr_decoded p,
 
104			   struct bpos *bucket_pos, struct bch_backpointer *bp)
105{
106	enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
107	s64 sectors = level ? btree_sectors(c) : k.k->size;
108	u32 bucket_offset;
109
110	*bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
111	*bp = (struct bch_backpointer) {
112		.btree_id	= btree_id,
113		.level		= level,
114		.data_type	= data_type,
115		.bucket_offset	= ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
116			p.crc.offset,
117		.bucket_len	= ptr_disk_sectors(sectors, p),
118		.pos		= k.k->p,
119	};
120}
121
122int bch2_get_next_backpointer(struct btree_trans *, struct bpos, int,
123			      struct bpos *, struct bch_backpointer *, unsigned);
124struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *,
125					 struct bpos, struct bch_backpointer,
126					 unsigned);
127struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *,
128					struct bpos, struct bch_backpointer);
129
130int bch2_check_btree_backpointers(struct bch_fs *);
131int bch2_check_extents_to_backpointers(struct bch_fs *);
132int bch2_check_backpointers_to_extents(struct bch_fs *);
133
134#endif /* _BCACHEFS_BACKPOINTERS_BACKGROUND_H */
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
  3#define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
  4
  5#include "btree_cache.h"
  6#include "btree_iter.h"
  7#include "btree_update.h"
  8#include "buckets.h"
  9#include "super.h"
 10
 11static inline u64 swab40(u64 x)
 12{
 13	return (((x & 0x00000000ffULL) << 32)|
 14		((x & 0x000000ff00ULL) << 16)|
 15		((x & 0x0000ff0000ULL) >>  0)|
 16		((x & 0x00ff000000ULL) >> 16)|
 17		((x & 0xff00000000ULL) >> 32));
 18}
 19
 20int bch2_backpointer_invalid(struct bch_fs *, struct bkey_s_c k,
 21			     enum bkey_invalid_flags, struct printbuf *);
 22void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *);
 23void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 24void bch2_backpointer_swab(struct bkey_s);
 25
 26#define bch2_bkey_ops_backpointer ((struct bkey_ops) {	\
 27	.key_invalid	= bch2_backpointer_invalid,	\
 28	.val_to_text	= bch2_backpointer_k_to_text,	\
 29	.swab		= bch2_backpointer_swab,	\
 30	.min_val_size	= 32,				\
 31})
 32
 33#define MAX_EXTENT_COMPRESS_RATIO_SHIFT		10
 34
 35/*
 36 * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
 37 * btree:
 38 */
 39static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
 40					   struct bpos bp_pos)
 41{
 42	struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
 43	u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
 44
 45	return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
 46}
 47
 48static inline struct bpos bucket_pos_to_bp_noerror(const struct bch_dev *ca,
 49						   struct bpos bucket,
 50						   u64 bucket_offset)
 51{
 52	return POS(bucket.inode,
 53		   (bucket_to_sector(ca, bucket.offset) <<
 54		    MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
 55}
 56
 57/*
 58 * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
 59 */
 60static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
 61					   struct bpos bucket,
 62					   u64 bucket_offset)
 63{
 64	struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
 65	struct bpos ret = bucket_pos_to_bp_noerror(ca, bucket, bucket_offset);
 
 
 
 
 
 66	EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
 
 67	return ret;
 68}
 69
 70int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bpos bucket,
 71				struct bch_backpointer, struct bkey_s_c, bool);
 72
 73static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
 74				struct bpos bucket,
 75				struct bch_backpointer bp,
 76				struct bkey_s_c orig_k,
 77				bool insert)
 78{
 79	if (unlikely(bch2_backpointers_no_use_write_buffer))
 80		return bch2_bucket_backpointer_mod_nowritebuffer(trans, bucket, bp, orig_k, insert);
 81
 82	struct bkey_i_backpointer bp_k;
 83
 84	bkey_backpointer_init(&bp_k.k_i);
 85	bp_k.k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
 86	bp_k.v = bp;
 87
 88	if (!insert) {
 89		bp_k.k.type = KEY_TYPE_deleted;
 90		set_bkey_val_u64s(&bp_k.k, 0);
 91	}
 92
 93	return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i);
 94}
 95
 96static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
 97							 struct extent_ptr_decoded p,
 98							 const union bch_extent_entry *entry)
 99{
100	switch (k.k->type) {
101	case KEY_TYPE_btree_ptr:
102	case KEY_TYPE_btree_ptr_v2:
103		return BCH_DATA_btree;
104	case KEY_TYPE_extent:
105	case KEY_TYPE_reflink_v:
106		return p.has_ec ? BCH_DATA_stripe : BCH_DATA_user;
107	case KEY_TYPE_stripe: {
108		const struct bch_extent_ptr *ptr = &entry->ptr;
109		struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
110
111		BUG_ON(ptr < s.v->ptrs ||
112		       ptr >= s.v->ptrs + s.v->nr_blocks);
113
114		return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
115			? BCH_DATA_parity
116			: BCH_DATA_user;
117	}
118	default:
119		BUG();
120	}
121}
122
123static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
124			   enum btree_id btree_id, unsigned level,
125			   struct bkey_s_c k, struct extent_ptr_decoded p,
126			   const union bch_extent_entry *entry,
127			   struct bpos *bucket_pos, struct bch_backpointer *bp)
128{
129	enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
130	s64 sectors = level ? btree_sectors(c) : k.k->size;
131	u32 bucket_offset;
132
133	*bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
134	*bp = (struct bch_backpointer) {
135		.btree_id	= btree_id,
136		.level		= level,
137		.data_type	= data_type,
138		.bucket_offset	= ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
139			p.crc.offset,
140		.bucket_len	= ptr_disk_sectors(sectors, p),
141		.pos		= k.k->p,
142	};
143}
144
145int bch2_get_next_backpointer(struct btree_trans *, struct bpos, int,
146			      struct bpos *, struct bch_backpointer *, unsigned);
147struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *,
148					 struct bpos, struct bch_backpointer,
149					 unsigned);
150struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *,
151					struct bpos, struct bch_backpointer);
152
153int bch2_check_btree_backpointers(struct bch_fs *);
154int bch2_check_extents_to_backpointers(struct bch_fs *);
155int bch2_check_backpointers_to_extents(struct bch_fs *);
156
157#endif /* _BCACHEFS_BACKPOINTERS_BACKGROUND_H */