Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#ifndef BTRFS_EXTENT_IO_TREE_H
  4#define BTRFS_EXTENT_IO_TREE_H
  5
  6#include <linux/rbtree.h>
  7#include <linux/spinlock.h>
  8#include <linux/refcount.h>
  9#include <linux/list.h>
 10#include <linux/wait.h>
 11#include "misc.h"
 12
 13struct extent_changeset;
 14struct btrfs_fs_info;
 15struct btrfs_inode;
 16
 17/* Bits for the extent state */
 18enum {
 19	ENUM_BIT(EXTENT_DIRTY),
 20	ENUM_BIT(EXTENT_UPTODATE),
 21	ENUM_BIT(EXTENT_LOCKED),
 22	ENUM_BIT(EXTENT_DIO_LOCKED),
 23	ENUM_BIT(EXTENT_NEW),
 24	ENUM_BIT(EXTENT_DELALLOC),
 25	ENUM_BIT(EXTENT_DEFRAG),
 26	ENUM_BIT(EXTENT_BOUNDARY),
 27	ENUM_BIT(EXTENT_NODATASUM),
 28	ENUM_BIT(EXTENT_CLEAR_META_RESV),
 29	ENUM_BIT(EXTENT_NEED_WAIT),
 30	ENUM_BIT(EXTENT_NORESERVE),
 31	ENUM_BIT(EXTENT_QGROUP_RESERVED),
 32	ENUM_BIT(EXTENT_CLEAR_DATA_RESV),
 33	/*
 34	 * Must be cleared only during ordered extent completion or on error
 35	 * paths if we did not manage to submit bios and create the ordered
 36	 * extents for the range.  Should not be cleared during page release
 37	 * and page invalidation (if there is an ordered extent in flight),
 38	 * that is left for the ordered extent completion.
 39	 */
 40	ENUM_BIT(EXTENT_DELALLOC_NEW),
 41	/*
 42	 * When an ordered extent successfully completes for a region marked as
 43	 * a new delalloc range, use this flag when clearing a new delalloc
 44	 * range to indicate that the VFS' inode number of bytes should be
 45	 * incremented and the inode's new delalloc bytes decremented, in an
 46	 * atomic way to prevent races with stat(2).
 47	 */
 48	ENUM_BIT(EXTENT_ADD_INODE_BYTES),
 49	/*
 50	 * Set during truncate when we're clearing an entire range and we just
 51	 * want the extent states to go away.
 52	 */
 53	ENUM_BIT(EXTENT_CLEAR_ALL_BITS),
 54
 55	/*
 56	 * This must be last.
 57	 *
 58	 * Bit not representing a state but a request for NOWAIT semantics,
 59	 * e.g. when allocating memory, and must be masked out from the other
 60	 * bits.
 61	 */
 62	ENUM_BIT(EXTENT_NOWAIT)
 63};
 64
 65#define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
 66				 EXTENT_CLEAR_DATA_RESV)
 67#define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING | \
 68				 EXTENT_ADD_INODE_BYTES | \
 69				 EXTENT_CLEAR_ALL_BITS)
 70
 71#define EXTENT_LOCK_BITS	(EXTENT_LOCKED | EXTENT_DIO_LOCKED)
 72
 73/*
 74 * Redefined bits above which are used only in the device allocation tree,
 75 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
 76 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
 77 * manipulation functions
 78 */
 79#define CHUNK_ALLOCATED				EXTENT_DIRTY
 80#define CHUNK_TRIMMED				EXTENT_DEFRAG
 81#define CHUNK_STATE_MASK			(CHUNK_ALLOCATED |		\
 82						 CHUNK_TRIMMED)
 83
 84enum {
 85	IO_TREE_FS_PINNED_EXTENTS,
 86	IO_TREE_FS_EXCLUDED_EXTENTS,
 87	IO_TREE_BTREE_INODE_IO,
 88	IO_TREE_INODE_IO,
 
 89	IO_TREE_RELOC_BLOCKS,
 90	IO_TREE_TRANS_DIRTY_PAGES,
 91	IO_TREE_ROOT_DIRTY_LOG_PAGES,
 92	IO_TREE_INODE_FILE_EXTENT,
 93	IO_TREE_LOG_CSUM_RANGE,
 94	IO_TREE_SELFTEST,
 95	IO_TREE_DEVICE_ALLOC_STATE,
 96};
 97
 98struct extent_io_tree {
 99	struct rb_root state;
100	/*
101	 * The fs_info is needed for trace points, a tree attached to an inode
102	 * needs the inode.
103	 *
104	 * owner == IO_TREE_INODE_IO - then inode is valid and fs_info can be
105	 *                             accessed as inode->root->fs_info
106	 */
107	union {
108		struct btrfs_fs_info *fs_info;
109		struct btrfs_inode *inode;
110	};
111
112	/* Who owns this io tree, should be one of IO_TREE_* */
113	u8 owner;
114
115	spinlock_t lock;
116};
117
118struct extent_state {
119	u64 start;
120	u64 end; /* inclusive */
121	struct rb_node rb_node;
122
123	/* ADD NEW ELEMENTS AFTER THIS */
124	wait_queue_head_t wq;
125	refcount_t refs;
126	u32 state;
127
 
 
128#ifdef CONFIG_BTRFS_DEBUG
129	struct list_head leak_list;
130#endif
131};
132
133struct btrfs_inode *extent_io_tree_to_inode(struct extent_io_tree *tree);
134const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_tree *tree);
135const struct btrfs_fs_info *extent_io_tree_to_fs_info(const struct extent_io_tree *tree);
136
137void extent_io_tree_init(struct btrfs_fs_info *fs_info,
138			 struct extent_io_tree *tree, unsigned int owner);
 
139void extent_io_tree_release(struct extent_io_tree *tree);
140int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
141		  struct extent_state **cached);
142bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
143		       struct extent_state **cached);
144
145static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
146			      struct extent_state **cached)
147{
148	return __lock_extent(tree, start, end, EXTENT_LOCKED, cached);
149}
150
151static inline bool try_lock_extent(struct extent_io_tree *tree, u64 start,
152				   u64 end, struct extent_state **cached)
153{
154	return __try_lock_extent(tree, start, end, EXTENT_LOCKED, cached);
155}
156
157int __init extent_state_init_cachep(void);
158void __cold extent_state_free_cachep(void);
 
 
159
160u64 count_range_bits(struct extent_io_tree *tree,
161		     u64 *start, u64 search_end,
162		     u64 max_bytes, u32 bits, int contig,
163		     struct extent_state **cached_state);
164
165void free_extent_state(struct extent_state *state);
166bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
167		    struct extent_state *cached_state);
168bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
169int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
170			     u32 bits, struct extent_changeset *changeset);
 
 
 
171int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
172		       u32 bits, struct extent_state **cached,
173		       struct extent_changeset *changeset);
 
 
 
 
 
 
174
175static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
176				   u64 end, u32 bits,
177				   struct extent_state **cached)
178{
179	return __clear_extent_bit(tree, start, end, bits, cached, NULL);
 
180}
181
182static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
183				struct extent_state **cached)
184{
185	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, NULL);
 
186}
187
188static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
189				    u64 end, u32 bits)
190{
191	return clear_extent_bit(tree, start, end, bits, NULL);
 
 
 
 
 
192}
193
194int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
195			   u32 bits, struct extent_changeset *changeset);
196int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
197		   u32 bits, struct extent_state **cached_state);
 
 
 
 
 
 
 
 
 
 
 
198
199static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
200		u64 end, struct extent_state **cached_state)
201{
202	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
203				  cached_state, NULL);
 
 
 
 
 
 
 
204}
205
206static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
207				     u64 end, struct extent_state **cached)
208{
209	return clear_extent_bit(tree, start, end,
210				EXTENT_DIRTY | EXTENT_DELALLOC |
211				EXTENT_DO_ACCOUNTING, cached);
212}
213
214int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
215		       u32 bits, u32 clear_bits,
216		       struct extent_state **cached_state);
217
218bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
219			   u64 *start_ret, u64 *end_ret, u32 bits,
220			   struct extent_state **cached_state);
221void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
222				 u64 *start_ret, u64 *end_ret, u32 bits);
223int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
224			       u64 *start_ret, u64 *end_ret, u32 bits);
225bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
226			       u64 *end, u64 max_bytes,
227			       struct extent_state **cached_state);
228static inline int lock_dio_extent(struct extent_io_tree *tree, u64 start,
229				  u64 end, struct extent_state **cached)
230{
231	return __lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached);
 
 
232}
233
234static inline bool try_lock_dio_extent(struct extent_io_tree *tree, u64 start,
235				       u64 end, struct extent_state **cached)
236{
237	return __try_lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached);
 
238}
239
240static inline int unlock_dio_extent(struct extent_io_tree *tree, u64 start,
241				    u64 end, struct extent_state **cached)
242{
243	return __clear_extent_bit(tree, start, end, EXTENT_DIO_LOCKED, cached, NULL);
 
244}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
246#endif /* BTRFS_EXTENT_IO_TREE_H */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#ifndef BTRFS_EXTENT_IO_TREE_H
  4#define BTRFS_EXTENT_IO_TREE_H
  5
 
 
 
 
 
 
 
  6struct extent_changeset;
  7struct io_failure_record;
 
  8
  9/* Bits for the extent state */
 10#define EXTENT_DIRTY		(1U << 0)
 11#define EXTENT_UPTODATE		(1U << 1)
 12#define EXTENT_LOCKED		(1U << 2)
 13#define EXTENT_NEW		(1U << 3)
 14#define EXTENT_DELALLOC		(1U << 4)
 15#define EXTENT_DEFRAG		(1U << 5)
 16#define EXTENT_BOUNDARY		(1U << 6)
 17#define EXTENT_NODATASUM	(1U << 7)
 18#define EXTENT_CLEAR_META_RESV	(1U << 8)
 19#define EXTENT_NEED_WAIT	(1U << 9)
 20#define EXTENT_DAMAGED		(1U << 10)
 21#define EXTENT_NORESERVE	(1U << 11)
 22#define EXTENT_QGROUP_RESERVED	(1U << 12)
 23#define EXTENT_CLEAR_DATA_RESV	(1U << 13)
 24/*
 25 * Must be cleared only during ordered extent completion or on error paths if we
 26 * did not manage to submit bios and create the ordered extents for the range.
 27 * Should not be cleared during page release and page invalidation (if there is
 28 * an ordered extent in flight), that is left for the ordered extent completion.
 29 */
 30#define EXTENT_DELALLOC_NEW	(1U << 14)
 31/*
 32 * When an ordered extent successfully completes for a region marked as a new
 33 * delalloc range, use this flag when clearing a new delalloc range to indicate
 34 * that the VFS' inode number of bytes should be incremented and the inode's new
 35 * delalloc bytes decremented, in an atomic way to prevent races with stat(2).
 36 */
 37#define EXTENT_ADD_INODE_BYTES  (1U << 15)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38#define EXTENT_DO_ACCOUNTING    (EXTENT_CLEAR_META_RESV | \
 39				 EXTENT_CLEAR_DATA_RESV)
 40#define EXTENT_CTLBITS		(EXTENT_DO_ACCOUNTING | \
 41				 EXTENT_ADD_INODE_BYTES)
 
 
 
 42
 43/*
 44 * Redefined bits above which are used only in the device allocation tree,
 45 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
 46 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
 47 * manipulation functions
 48 */
 49#define CHUNK_ALLOCATED				EXTENT_DIRTY
 50#define CHUNK_TRIMMED				EXTENT_DEFRAG
 51#define CHUNK_STATE_MASK			(CHUNK_ALLOCATED |		\
 52						 CHUNK_TRIMMED)
 53
 54enum {
 55	IO_TREE_FS_PINNED_EXTENTS,
 56	IO_TREE_FS_EXCLUDED_EXTENTS,
 57	IO_TREE_BTREE_INODE_IO,
 58	IO_TREE_INODE_IO,
 59	IO_TREE_INODE_IO_FAILURE,
 60	IO_TREE_RELOC_BLOCKS,
 61	IO_TREE_TRANS_DIRTY_PAGES,
 62	IO_TREE_ROOT_DIRTY_LOG_PAGES,
 63	IO_TREE_INODE_FILE_EXTENT,
 64	IO_TREE_LOG_CSUM_RANGE,
 65	IO_TREE_SELFTEST,
 66	IO_TREE_DEVICE_ALLOC_STATE,
 67};
 68
 69struct extent_io_tree {
 70	struct rb_root state;
 71	struct btrfs_fs_info *fs_info;
 72	void *private_data;
 73	u64 dirty_bytes;
 74	bool track_uptodate;
 
 
 
 
 
 
 
 75
 76	/* Who owns this io tree, should be one of IO_TREE_* */
 77	u8 owner;
 78
 79	spinlock_t lock;
 80};
 81
 82struct extent_state {
 83	u64 start;
 84	u64 end; /* inclusive */
 85	struct rb_node rb_node;
 86
 87	/* ADD NEW ELEMENTS AFTER THIS */
 88	wait_queue_head_t wq;
 89	refcount_t refs;
 90	u32 state;
 91
 92	struct io_failure_record *failrec;
 93
 94#ifdef CONFIG_BTRFS_DEBUG
 95	struct list_head leak_list;
 96#endif
 97};
 98
 99int __init extent_state_cache_init(void);
100void __cold extent_state_cache_exit(void);
 
101
102void extent_io_tree_init(struct btrfs_fs_info *fs_info,
103			 struct extent_io_tree *tree, unsigned int owner,
104			 void *private_data);
105void extent_io_tree_release(struct extent_io_tree *tree);
 
 
 
 
106
107int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
108		     struct extent_state **cached);
 
 
 
109
110static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
 
111{
112	return lock_extent_bits(tree, start, end, NULL);
113}
114
115int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
116
117int __init extent_io_init(void);
118void __cold extent_io_exit(void);
119
120u64 count_range_bits(struct extent_io_tree *tree,
121		     u64 *start, u64 search_end,
122		     u64 max_bytes, u32 bits, int contig);
 
123
124void free_extent_state(struct extent_state *state);
125int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
126		   u32 bits, int filled, struct extent_state *cached_state);
 
127int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
128			     u32 bits, struct extent_changeset *changeset);
129int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
130		     u32 bits, int wake, int delete,
131		     struct extent_state **cached);
132int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
133		     u32 bits, int wake, int delete,
134		     struct extent_state **cached, gfp_t mask,
135		     struct extent_changeset *changeset);
136
137static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
138{
139	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
140}
141
142static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
143		u64 end, struct extent_state **cached)
 
144{
145	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
146				GFP_NOFS, NULL);
147}
148
149static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
150		u64 start, u64 end, struct extent_state **cached)
151{
152	return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
153				GFP_ATOMIC, NULL);
154}
155
156static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
157				    u64 end, u32 bits)
158{
159	int wake = 0;
160
161	if (bits & EXTENT_LOCKED)
162		wake = 1;
163
164	return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
165}
166
167int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
168			   u32 bits, struct extent_changeset *changeset);
169int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
170		   u32 bits, unsigned exclusive_bits, u64 *failed_start,
171		   struct extent_state **cached_state, gfp_t mask,
172		   struct extent_changeset *changeset);
173int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
174			   u32 bits);
175
176static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
177		u64 end, u32 bits)
178{
179	return set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
180			      NULL);
181}
182
183static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
184		u64 end, struct extent_state **cached_state)
185{
186	return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
187				cached_state, GFP_NOFS, NULL);
188}
189
190static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
191		u64 end, gfp_t mask)
192{
193	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, NULL,
194			      mask, NULL);
195}
196
197static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
198				     u64 end, struct extent_state **cached)
199{
200	return clear_extent_bit(tree, start, end,
201				EXTENT_DIRTY | EXTENT_DELALLOC |
202				EXTENT_DO_ACCOUNTING, 0, 0, cached);
203}
204
205int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
206		       u32 bits, u32 clear_bits,
207		       struct extent_state **cached_state);
208
209static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
210				      u64 end, u32 extra_bits,
211				      struct extent_state **cached_state)
212{
213	return set_extent_bit(tree, start, end,
214			      EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
215			      0, NULL, cached_state, GFP_NOFS, NULL);
216}
217
218static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
219		u64 end, struct extent_state **cached_state)
 
220{
221	return set_extent_bit(tree, start, end,
222			      EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
223			      0, NULL, cached_state, GFP_NOFS, NULL);
224}
225
226static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
227		u64 end)
228{
229	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, NULL,
230			      GFP_NOFS, NULL);
231}
232
233static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
234		u64 end, struct extent_state **cached_state, gfp_t mask)
235{
236	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
237			      cached_state, mask, NULL);
238}
239
240int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
241			  u64 *start_ret, u64 *end_ret, u32 bits,
242			  struct extent_state **cached_state);
243void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
244				 u64 *start_ret, u64 *end_ret, u32 bits);
245int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
246			       u64 *start_ret, u64 *end_ret, u32 bits);
247int extent_invalidatepage(struct extent_io_tree *tree,
248			  struct page *page, unsigned long offset);
249bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
250			       u64 *end, u64 max_bytes,
251			       struct extent_state **cached_state);
252
253/* This should be reworked in the future and put elsewhere. */
254struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start);
255int set_state_failrec(struct extent_io_tree *tree, u64 start,
256		      struct io_failure_record *failrec);
257void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
258		u64 end);
259int free_io_failure(struct extent_io_tree *failure_tree,
260		    struct extent_io_tree *io_tree,
261		    struct io_failure_record *rec);
262int clean_io_failure(struct btrfs_fs_info *fs_info,
263		     struct extent_io_tree *failure_tree,
264		     struct extent_io_tree *io_tree, u64 start,
265		     struct page *page, u64 ino, unsigned int pg_offset);
266
267#endif /* BTRFS_EXTENT_IO_TREE_H */