Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_EXTENT_IO_TREE_H
4#define BTRFS_EXTENT_IO_TREE_H
5
6#include "misc.h"
7
8struct extent_changeset;
9struct io_failure_record;
10
11/* Bits for the extent state */
12enum {
13 ENUM_BIT(EXTENT_DIRTY),
14 ENUM_BIT(EXTENT_UPTODATE),
15 ENUM_BIT(EXTENT_LOCKED),
16 ENUM_BIT(EXTENT_NEW),
17 ENUM_BIT(EXTENT_DELALLOC),
18 ENUM_BIT(EXTENT_DEFRAG),
19 ENUM_BIT(EXTENT_BOUNDARY),
20 ENUM_BIT(EXTENT_NODATASUM),
21 ENUM_BIT(EXTENT_CLEAR_META_RESV),
22 ENUM_BIT(EXTENT_NEED_WAIT),
23 ENUM_BIT(EXTENT_NORESERVE),
24 ENUM_BIT(EXTENT_QGROUP_RESERVED),
25 ENUM_BIT(EXTENT_CLEAR_DATA_RESV),
26 /*
27 * Must be cleared only during ordered extent completion or on error
28 * paths if we did not manage to submit bios and create the ordered
29 * extents for the range. Should not be cleared during page release
30 * and page invalidation (if there is an ordered extent in flight),
31 * that is left for the ordered extent completion.
32 */
33 ENUM_BIT(EXTENT_DELALLOC_NEW),
34 /*
35 * When an ordered extent successfully completes for a region marked as
36 * a new delalloc range, use this flag when clearing a new delalloc
37 * range to indicate that the VFS' inode number of bytes should be
38 * incremented and the inode's new delalloc bytes decremented, in an
39 * atomic way to prevent races with stat(2).
40 */
41 ENUM_BIT(EXTENT_ADD_INODE_BYTES),
42 /*
43 * Set during truncate when we're clearing an entire range and we just
44 * want the extent states to go away.
45 */
46 ENUM_BIT(EXTENT_CLEAR_ALL_BITS),
47};
48
49#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
50 EXTENT_CLEAR_DATA_RESV)
51#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | \
52 EXTENT_ADD_INODE_BYTES | \
53 EXTENT_CLEAR_ALL_BITS)
54
55/*
56 * Redefined bits above which are used only in the device allocation tree,
57 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
58 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
59 * manipulation functions
60 */
61#define CHUNK_ALLOCATED EXTENT_DIRTY
62#define CHUNK_TRIMMED EXTENT_DEFRAG
63#define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \
64 CHUNK_TRIMMED)
65
66enum {
67 IO_TREE_FS_PINNED_EXTENTS,
68 IO_TREE_FS_EXCLUDED_EXTENTS,
69 IO_TREE_BTREE_INODE_IO,
70 IO_TREE_INODE_IO,
71 IO_TREE_RELOC_BLOCKS,
72 IO_TREE_TRANS_DIRTY_PAGES,
73 IO_TREE_ROOT_DIRTY_LOG_PAGES,
74 IO_TREE_INODE_FILE_EXTENT,
75 IO_TREE_LOG_CSUM_RANGE,
76 IO_TREE_SELFTEST,
77 IO_TREE_DEVICE_ALLOC_STATE,
78};
79
80struct extent_io_tree {
81 struct rb_root state;
82 struct btrfs_fs_info *fs_info;
83 /* Inode associated with this tree, or NULL. */
84 struct btrfs_inode *inode;
85
86 /* Who owns this io tree, should be one of IO_TREE_* */
87 u8 owner;
88
89 spinlock_t lock;
90};
91
92struct extent_state {
93 u64 start;
94 u64 end; /* inclusive */
95 struct rb_node rb_node;
96
97 /* ADD NEW ELEMENTS AFTER THIS */
98 wait_queue_head_t wq;
99 refcount_t refs;
100 u32 state;
101
102#ifdef CONFIG_BTRFS_DEBUG
103 struct list_head leak_list;
104#endif
105};
106
107void extent_io_tree_init(struct btrfs_fs_info *fs_info,
108 struct extent_io_tree *tree, unsigned int owner);
109void extent_io_tree_release(struct extent_io_tree *tree);
110
111int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
112 struct extent_state **cached);
113
114int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
115 struct extent_state **cached);
116
117int __init extent_state_init_cachep(void);
118void __cold extent_state_free_cachep(void);
119
120u64 count_range_bits(struct extent_io_tree *tree,
121 u64 *start, u64 search_end,
122 u64 max_bytes, u32 bits, int contig,
123 struct extent_state **cached_state);
124
125void free_extent_state(struct extent_state *state);
126int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
127 u32 bits, int filled, struct extent_state *cached_state);
128int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
129 u32 bits, struct extent_changeset *changeset);
130int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
131 u32 bits, struct extent_state **cached, gfp_t mask,
132 struct extent_changeset *changeset);
133
134static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
135 u64 end, u32 bits,
136 struct extent_state **cached)
137{
138 return __clear_extent_bit(tree, start, end, bits, cached,
139 GFP_NOFS, NULL);
140}
141
142static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
143 struct extent_state **cached)
144{
145 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached,
146 GFP_NOFS, NULL);
147}
148
149static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
150 u64 end, u32 bits)
151{
152 return clear_extent_bit(tree, start, end, bits, NULL);
153}
154
155int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
156 u32 bits, struct extent_changeset *changeset);
157int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
158 u32 bits, struct extent_state **cached_state, gfp_t mask);
159
160static inline int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start,
161 u64 end, u32 bits)
162{
163 return set_extent_bit(tree, start, end, bits, NULL, GFP_NOWAIT);
164}
165
166static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
167 u64 end, u32 bits)
168{
169 return set_extent_bit(tree, start, end, bits, NULL, GFP_NOFS);
170}
171
172static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
173 u64 end, struct extent_state **cached_state)
174{
175 return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
176 cached_state, GFP_NOFS, NULL);
177}
178
179static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
180 u64 end, gfp_t mask)
181{
182 return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, mask);
183}
184
185static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
186 u64 end, struct extent_state **cached)
187{
188 return clear_extent_bit(tree, start, end,
189 EXTENT_DIRTY | EXTENT_DELALLOC |
190 EXTENT_DO_ACCOUNTING, cached);
191}
192
193int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
194 u32 bits, u32 clear_bits,
195 struct extent_state **cached_state);
196
197static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
198 u64 end, u32 extra_bits,
199 struct extent_state **cached_state)
200{
201 return set_extent_bit(tree, start, end,
202 EXTENT_DELALLOC | extra_bits,
203 cached_state, GFP_NOFS);
204}
205
206static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
207 u64 end, struct extent_state **cached_state)
208{
209 return set_extent_bit(tree, start, end,
210 EXTENT_DELALLOC | EXTENT_DEFRAG,
211 cached_state, GFP_NOFS);
212}
213
214static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
215 u64 end)
216{
217 return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, GFP_NOFS);
218}
219
220int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
221 u64 *start_ret, u64 *end_ret, u32 bits,
222 struct extent_state **cached_state);
223void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
224 u64 *start_ret, u64 *end_ret, u32 bits);
225int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
226 u64 *start_ret, u64 *end_ret, u32 bits);
227bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
228 u64 *end, u64 max_bytes,
229 struct extent_state **cached_state);
230void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
231 struct extent_state **cached_state);
232
233#endif /* BTRFS_EXTENT_IO_TREE_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_EXTENT_IO_TREE_H
4#define BTRFS_EXTENT_IO_TREE_H
5
6#include <linux/rbtree.h>
7#include <linux/spinlock.h>
8#include <linux/refcount.h>
9#include <linux/list.h>
10#include <linux/wait.h>
11#include "misc.h"
12
13struct extent_changeset;
14struct btrfs_fs_info;
15struct btrfs_inode;
16
17/* Bits for the extent state */
18enum {
19 ENUM_BIT(EXTENT_DIRTY),
20 ENUM_BIT(EXTENT_UPTODATE),
21 ENUM_BIT(EXTENT_LOCKED),
22 ENUM_BIT(EXTENT_DIO_LOCKED),
23 ENUM_BIT(EXTENT_NEW),
24 ENUM_BIT(EXTENT_DELALLOC),
25 ENUM_BIT(EXTENT_DEFRAG),
26 ENUM_BIT(EXTENT_BOUNDARY),
27 ENUM_BIT(EXTENT_NODATASUM),
28 ENUM_BIT(EXTENT_CLEAR_META_RESV),
29 ENUM_BIT(EXTENT_NEED_WAIT),
30 ENUM_BIT(EXTENT_NORESERVE),
31 ENUM_BIT(EXTENT_QGROUP_RESERVED),
32 ENUM_BIT(EXTENT_CLEAR_DATA_RESV),
33 /*
34 * Must be cleared only during ordered extent completion or on error
35 * paths if we did not manage to submit bios and create the ordered
36 * extents for the range. Should not be cleared during page release
37 * and page invalidation (if there is an ordered extent in flight),
38 * that is left for the ordered extent completion.
39 */
40 ENUM_BIT(EXTENT_DELALLOC_NEW),
41 /*
42 * When an ordered extent successfully completes for a region marked as
43 * a new delalloc range, use this flag when clearing a new delalloc
44 * range to indicate that the VFS' inode number of bytes should be
45 * incremented and the inode's new delalloc bytes decremented, in an
46 * atomic way to prevent races with stat(2).
47 */
48 ENUM_BIT(EXTENT_ADD_INODE_BYTES),
49 /*
50 * Set during truncate when we're clearing an entire range and we just
51 * want the extent states to go away.
52 */
53 ENUM_BIT(EXTENT_CLEAR_ALL_BITS),
54
55 /*
56 * This must be last.
57 *
58 * Bit not representing a state but a request for NOWAIT semantics,
59 * e.g. when allocating memory, and must be masked out from the other
60 * bits.
61 */
62 ENUM_BIT(EXTENT_NOWAIT)
63};
64
65#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
66 EXTENT_CLEAR_DATA_RESV)
67#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | \
68 EXTENT_ADD_INODE_BYTES | \
69 EXTENT_CLEAR_ALL_BITS)
70
71#define EXTENT_LOCK_BITS (EXTENT_LOCKED | EXTENT_DIO_LOCKED)
72
73/*
74 * Redefined bits above which are used only in the device allocation tree,
75 * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
76 * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
77 * manipulation functions
78 */
79#define CHUNK_ALLOCATED EXTENT_DIRTY
80#define CHUNK_TRIMMED EXTENT_DEFRAG
81#define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \
82 CHUNK_TRIMMED)
83
84enum {
85 IO_TREE_FS_PINNED_EXTENTS,
86 IO_TREE_FS_EXCLUDED_EXTENTS,
87 IO_TREE_BTREE_INODE_IO,
88 IO_TREE_INODE_IO,
89 IO_TREE_RELOC_BLOCKS,
90 IO_TREE_TRANS_DIRTY_PAGES,
91 IO_TREE_ROOT_DIRTY_LOG_PAGES,
92 IO_TREE_INODE_FILE_EXTENT,
93 IO_TREE_LOG_CSUM_RANGE,
94 IO_TREE_SELFTEST,
95 IO_TREE_DEVICE_ALLOC_STATE,
96};
97
98struct extent_io_tree {
99 struct rb_root state;
100 /*
101 * The fs_info is needed for trace points, a tree attached to an inode
102 * needs the inode.
103 *
104 * owner == IO_TREE_INODE_IO - then inode is valid and fs_info can be
105 * accessed as inode->root->fs_info
106 */
107 union {
108 struct btrfs_fs_info *fs_info;
109 struct btrfs_inode *inode;
110 };
111
112 /* Who owns this io tree, should be one of IO_TREE_* */
113 u8 owner;
114
115 spinlock_t lock;
116};
117
118struct extent_state {
119 u64 start;
120 u64 end; /* inclusive */
121 struct rb_node rb_node;
122
123 /* ADD NEW ELEMENTS AFTER THIS */
124 wait_queue_head_t wq;
125 refcount_t refs;
126 u32 state;
127
128#ifdef CONFIG_BTRFS_DEBUG
129 struct list_head leak_list;
130#endif
131};
132
133struct btrfs_inode *extent_io_tree_to_inode(struct extent_io_tree *tree);
134const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_tree *tree);
135const struct btrfs_fs_info *extent_io_tree_to_fs_info(const struct extent_io_tree *tree);
136
137void extent_io_tree_init(struct btrfs_fs_info *fs_info,
138 struct extent_io_tree *tree, unsigned int owner);
139void extent_io_tree_release(struct extent_io_tree *tree);
140int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
141 struct extent_state **cached);
142bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
143 struct extent_state **cached);
144
145static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
146 struct extent_state **cached)
147{
148 return __lock_extent(tree, start, end, EXTENT_LOCKED, cached);
149}
150
151static inline bool try_lock_extent(struct extent_io_tree *tree, u64 start,
152 u64 end, struct extent_state **cached)
153{
154 return __try_lock_extent(tree, start, end, EXTENT_LOCKED, cached);
155}
156
157int __init extent_state_init_cachep(void);
158void __cold extent_state_free_cachep(void);
159
160u64 count_range_bits(struct extent_io_tree *tree,
161 u64 *start, u64 search_end,
162 u64 max_bytes, u32 bits, int contig,
163 struct extent_state **cached_state);
164
165void free_extent_state(struct extent_state *state);
166bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
167 struct extent_state *cached_state);
168bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
169int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
170 u32 bits, struct extent_changeset *changeset);
171int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
172 u32 bits, struct extent_state **cached,
173 struct extent_changeset *changeset);
174
175static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
176 u64 end, u32 bits,
177 struct extent_state **cached)
178{
179 return __clear_extent_bit(tree, start, end, bits, cached, NULL);
180}
181
182static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
183 struct extent_state **cached)
184{
185 return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, NULL);
186}
187
188static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
189 u64 end, u32 bits)
190{
191 return clear_extent_bit(tree, start, end, bits, NULL);
192}
193
194int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
195 u32 bits, struct extent_changeset *changeset);
196int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
197 u32 bits, struct extent_state **cached_state);
198
199static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
200 u64 end, struct extent_state **cached_state)
201{
202 return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
203 cached_state, NULL);
204}
205
206static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
207 u64 end, struct extent_state **cached)
208{
209 return clear_extent_bit(tree, start, end,
210 EXTENT_DIRTY | EXTENT_DELALLOC |
211 EXTENT_DO_ACCOUNTING, cached);
212}
213
214int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
215 u32 bits, u32 clear_bits,
216 struct extent_state **cached_state);
217
218bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
219 u64 *start_ret, u64 *end_ret, u32 bits,
220 struct extent_state **cached_state);
221void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
222 u64 *start_ret, u64 *end_ret, u32 bits);
223int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
224 u64 *start_ret, u64 *end_ret, u32 bits);
225bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
226 u64 *end, u64 max_bytes,
227 struct extent_state **cached_state);
228static inline int lock_dio_extent(struct extent_io_tree *tree, u64 start,
229 u64 end, struct extent_state **cached)
230{
231 return __lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached);
232}
233
234static inline bool try_lock_dio_extent(struct extent_io_tree *tree, u64 start,
235 u64 end, struct extent_state **cached)
236{
237 return __try_lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached);
238}
239
240static inline int unlock_dio_extent(struct extent_io_tree *tree, u64 start,
241 u64 end, struct extent_state **cached)
242{
243 return __clear_extent_bit(tree, start, end, EXTENT_DIO_LOCKED, cached, NULL);
244}
245
246#endif /* BTRFS_EXTENT_IO_TREE_H */