Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_BLOCK_GROUP_H
4#define BTRFS_BLOCK_GROUP_H
5
6#include "free-space-cache.h"
7
8enum btrfs_disk_cache_state {
9 BTRFS_DC_WRITTEN,
10 BTRFS_DC_ERROR,
11 BTRFS_DC_CLEAR,
12 BTRFS_DC_SETUP,
13};
14
15/*
16 * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
17 * only allocate a chunk if we really need one.
18 *
19 * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
20 * chunks already allocated. This is used as part of the clustering code to
21 * help make sure we have a good pool of storage to cluster in, without filling
22 * the FS with empty chunks
23 *
24 * CHUNK_ALLOC_FORCE means it must try to allocate one
25 */
26enum btrfs_chunk_alloc_enum {
27 CHUNK_ALLOC_NO_FORCE,
28 CHUNK_ALLOC_LIMITED,
29 CHUNK_ALLOC_FORCE,
30};
31
32struct btrfs_caching_control {
33 struct list_head list;
34 struct mutex mutex;
35 wait_queue_head_t wait;
36 struct btrfs_work work;
37 struct btrfs_block_group_cache *block_group;
38 u64 progress;
39 refcount_t count;
40};
41
42/* Once caching_thread() finds this much free space, it will wake up waiters. */
43#define CACHING_CTL_WAKE_UP SZ_2M
44
45struct btrfs_block_group_cache {
46 struct btrfs_key key;
47 struct btrfs_block_group_item item;
48 struct btrfs_fs_info *fs_info;
49 struct inode *inode;
50 spinlock_t lock;
51 u64 pinned;
52 u64 reserved;
53 u64 delalloc_bytes;
54 u64 bytes_super;
55 u64 flags;
56 u64 cache_generation;
57
58 /*
59 * If the free space extent count exceeds this number, convert the block
60 * group to bitmaps.
61 */
62 u32 bitmap_high_thresh;
63
64 /*
65 * If the free space extent count drops below this number, convert the
66 * block group back to extents.
67 */
68 u32 bitmap_low_thresh;
69
70 /*
71 * It is just used for the delayed data space allocation because
72 * only the data space allocation and the relative metadata update
73 * can be done cross the transaction.
74 */
75 struct rw_semaphore data_rwsem;
76
77 /* For raid56, this is a full stripe, without parity */
78 unsigned long full_stripe_len;
79
80 unsigned int ro;
81 unsigned int iref:1;
82 unsigned int has_caching_ctl:1;
83 unsigned int removed:1;
84
85 int disk_cache_state;
86
87 /* Cache tracking stuff */
88 int cached;
89 struct btrfs_caching_control *caching_ctl;
90 u64 last_byte_to_unpin;
91
92 struct btrfs_space_info *space_info;
93
94 /* Free space cache stuff */
95 struct btrfs_free_space_ctl *free_space_ctl;
96
97 /* Block group cache stuff */
98 struct rb_node cache_node;
99
100 /* For block groups in the same raid type */
101 struct list_head list;
102
103 /* Usage count */
104 atomic_t count;
105
106 /*
107 * List of struct btrfs_free_clusters for this block group.
108 * Today it will only have one thing on it, but that may change
109 */
110 struct list_head cluster_list;
111
112 /* For delayed block group creation or deletion of empty block groups */
113 struct list_head bg_list;
114
115 /* For read-only block groups */
116 struct list_head ro_list;
117
118 atomic_t trimming;
119
120 /* For dirty block groups */
121 struct list_head dirty_list;
122 struct list_head io_list;
123
124 struct btrfs_io_ctl io_ctl;
125
126 /*
127 * Incremented when doing extent allocations and holding a read lock
128 * on the space_info's groups_sem semaphore.
129 * Decremented when an ordered extent that represents an IO against this
130 * block group's range is created (after it's added to its inode's
131 * root's list of ordered extents) or immediately after the allocation
132 * if it's a metadata extent or fallocate extent (for these cases we
133 * don't create ordered extents).
134 */
135 atomic_t reservations;
136
137 /*
138 * Incremented while holding the spinlock *lock* by a task checking if
139 * it can perform a nocow write (incremented if the value for the *ro*
140 * field is 0). Decremented by such tasks once they create an ordered
141 * extent or before that if some error happens before reaching that step.
142 * This is to prevent races between block group relocation and nocow
143 * writes through direct IO.
144 */
145 atomic_t nocow_writers;
146
147 /* Lock for free space tree operations. */
148 struct mutex free_space_lock;
149
150 /*
151 * Does the block group need to be added to the free space tree?
152 * Protected by free_space_lock.
153 */
154 int needs_free_space;
155
156 /* Record locked full stripes for RAID5/6 block group */
157 struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
158};
159
160#ifdef CONFIG_BTRFS_DEBUG
161static inline int btrfs_should_fragment_free_space(
162 struct btrfs_block_group_cache *block_group)
163{
164 struct btrfs_fs_info *fs_info = block_group->fs_info;
165
166 return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
167 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
168 (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
169 block_group->flags & BTRFS_BLOCK_GROUP_DATA);
170}
171#endif
172
173struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
174 struct btrfs_fs_info *info, u64 bytenr);
175struct btrfs_block_group_cache *btrfs_lookup_block_group(
176 struct btrfs_fs_info *info, u64 bytenr);
177struct btrfs_block_group_cache *btrfs_next_block_group(
178 struct btrfs_block_group_cache *cache);
179void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
180void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
181void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
182 const u64 start);
183void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
184bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
185void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
186void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
187void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
188 u64 num_bytes);
189int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache);
190int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
191 int load_cache_only);
192void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
193struct btrfs_caching_control *btrfs_get_caching_control(
194 struct btrfs_block_group_cache *cache);
195u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
196 u64 start, u64 end);
197struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
198 struct btrfs_fs_info *fs_info,
199 const u64 chunk_offset);
200int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
201 u64 group_start, struct extent_map *em);
202void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
203void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg);
204int btrfs_read_block_groups(struct btrfs_fs_info *info);
205int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
206 u64 type, u64 chunk_offset, u64 size);
207void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
208int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache);
209void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
210int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
211int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
212int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
213int btrfs_update_block_group(struct btrfs_trans_handle *trans,
214 u64 bytenr, u64 num_bytes, int alloc);
215int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
216 u64 ram_bytes, u64 num_bytes, int delalloc);
217void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
218 u64 num_bytes, int delalloc);
219int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
220 enum btrfs_chunk_alloc_enum force);
221int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
222void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
223u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
224void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
225int btrfs_free_block_groups(struct btrfs_fs_info *info);
226
227static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
228{
229 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
230}
231
232static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
233{
234 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
235}
236
237static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
238{
239 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
240}
241
242static inline int btrfs_block_group_cache_done(
243 struct btrfs_block_group_cache *cache)
244{
245 smp_mb();
246 return cache->cached == BTRFS_CACHE_FINISHED ||
247 cache->cached == BTRFS_CACHE_ERROR;
248}
249
250#endif /* BTRFS_BLOCK_GROUP_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_BLOCK_GROUP_H
4#define BTRFS_BLOCK_GROUP_H
5
6#include "free-space-cache.h"
7
8enum btrfs_disk_cache_state {
9 BTRFS_DC_WRITTEN,
10 BTRFS_DC_ERROR,
11 BTRFS_DC_CLEAR,
12 BTRFS_DC_SETUP,
13};
14
15/*
16 * This describes the state of the block_group for async discard. This is due
17 * to the two pass nature of it where extent discarding is prioritized over
18 * bitmap discarding. BTRFS_DISCARD_RESET_CURSOR is set when we are resetting
19 * between lists to prevent contention for discard state variables
20 * (eg. discard_cursor).
21 */
22enum btrfs_discard_state {
23 BTRFS_DISCARD_EXTENTS,
24 BTRFS_DISCARD_BITMAPS,
25 BTRFS_DISCARD_RESET_CURSOR,
26};
27
28/*
29 * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to
30 * only allocate a chunk if we really need one.
31 *
32 * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few
33 * chunks already allocated. This is used as part of the clustering code to
34 * help make sure we have a good pool of storage to cluster in, without filling
35 * the FS with empty chunks
36 *
37 * CHUNK_ALLOC_FORCE means it must try to allocate one
38 *
39 * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
40 * find_free_extent() that also activaes the zone
41 */
42enum btrfs_chunk_alloc_enum {
43 CHUNK_ALLOC_NO_FORCE,
44 CHUNK_ALLOC_LIMITED,
45 CHUNK_ALLOC_FORCE,
46 CHUNK_ALLOC_FORCE_FOR_EXTENT,
47};
48
49/* Block group flags set at runtime */
50enum btrfs_block_group_flags {
51 BLOCK_GROUP_FLAG_IREF,
52 BLOCK_GROUP_FLAG_REMOVED,
53 BLOCK_GROUP_FLAG_TO_COPY,
54 BLOCK_GROUP_FLAG_RELOCATING_REPAIR,
55 BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
56 BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
57 BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
58 /* Does the block group need to be added to the free space tree? */
59 BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
60 /* Indicate that the block group is placed on a sequential zone */
61 BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE,
62};
63
64enum btrfs_caching_type {
65 BTRFS_CACHE_NO,
66 BTRFS_CACHE_STARTED,
67 BTRFS_CACHE_FINISHED,
68 BTRFS_CACHE_ERROR,
69};
70
71struct btrfs_caching_control {
72 struct list_head list;
73 struct mutex mutex;
74 wait_queue_head_t wait;
75 struct btrfs_work work;
76 struct btrfs_block_group *block_group;
77 refcount_t count;
78};
79
80/* Once caching_thread() finds this much free space, it will wake up waiters. */
81#define CACHING_CTL_WAKE_UP SZ_2M
82
83/*
84 * Tree to record all locked full stripes of a RAID5/6 block group
85 */
86struct btrfs_full_stripe_locks_tree {
87 struct rb_root root;
88 struct mutex lock;
89};
90
91struct btrfs_block_group {
92 struct btrfs_fs_info *fs_info;
93 struct inode *inode;
94 spinlock_t lock;
95 u64 start;
96 u64 length;
97 u64 pinned;
98 u64 reserved;
99 u64 used;
100 u64 delalloc_bytes;
101 u64 bytes_super;
102 u64 flags;
103 u64 cache_generation;
104 u64 global_root_id;
105
106 /*
107 * The last committed used bytes of this block group, if the above @used
108 * is still the same as @commit_used, we don't need to update block
109 * group item of this block group.
110 */
111 u64 commit_used;
112 /*
113 * If the free space extent count exceeds this number, convert the block
114 * group to bitmaps.
115 */
116 u32 bitmap_high_thresh;
117
118 /*
119 * If the free space extent count drops below this number, convert the
120 * block group back to extents.
121 */
122 u32 bitmap_low_thresh;
123
124 /*
125 * It is just used for the delayed data space allocation because
126 * only the data space allocation and the relative metadata update
127 * can be done cross the transaction.
128 */
129 struct rw_semaphore data_rwsem;
130
131 /* For raid56, this is a full stripe, without parity */
132 unsigned long full_stripe_len;
133 unsigned long runtime_flags;
134
135 unsigned int ro;
136
137 int disk_cache_state;
138
139 /* Cache tracking stuff */
140 int cached;
141 struct btrfs_caching_control *caching_ctl;
142
143 struct btrfs_space_info *space_info;
144
145 /* Free space cache stuff */
146 struct btrfs_free_space_ctl *free_space_ctl;
147
148 /* Block group cache stuff */
149 struct rb_node cache_node;
150
151 /* For block groups in the same raid type */
152 struct list_head list;
153
154 refcount_t refs;
155
156 /*
157 * List of struct btrfs_free_clusters for this block group.
158 * Today it will only have one thing on it, but that may change
159 */
160 struct list_head cluster_list;
161
162 /* For delayed block group creation or deletion of empty block groups */
163 struct list_head bg_list;
164
165 /* For read-only block groups */
166 struct list_head ro_list;
167
168 /*
169 * When non-zero it means the block group's logical address and its
170 * device extents can not be reused for future block group allocations
171 * until the counter goes down to 0. This is to prevent them from being
172 * reused while some task is still using the block group after it was
173 * deleted - we want to make sure they can only be reused for new block
174 * groups after that task is done with the deleted block group.
175 */
176 atomic_t frozen;
177
178 /* For discard operations */
179 struct list_head discard_list;
180 int discard_index;
181 u64 discard_eligible_time;
182 u64 discard_cursor;
183 enum btrfs_discard_state discard_state;
184
185 /* For dirty block groups */
186 struct list_head dirty_list;
187 struct list_head io_list;
188
189 struct btrfs_io_ctl io_ctl;
190
191 /*
192 * Incremented when doing extent allocations and holding a read lock
193 * on the space_info's groups_sem semaphore.
194 * Decremented when an ordered extent that represents an IO against this
195 * block group's range is created (after it's added to its inode's
196 * root's list of ordered extents) or immediately after the allocation
197 * if it's a metadata extent or fallocate extent (for these cases we
198 * don't create ordered extents).
199 */
200 atomic_t reservations;
201
202 /*
203 * Incremented while holding the spinlock *lock* by a task checking if
204 * it can perform a nocow write (incremented if the value for the *ro*
205 * field is 0). Decremented by such tasks once they create an ordered
206 * extent or before that if some error happens before reaching that step.
207 * This is to prevent races between block group relocation and nocow
208 * writes through direct IO.
209 */
210 atomic_t nocow_writers;
211
212 /* Lock for free space tree operations. */
213 struct mutex free_space_lock;
214
215 /*
216 * Number of extents in this block group used for swap files.
217 * All accesses protected by the spinlock 'lock'.
218 */
219 int swap_extents;
220
221 /* Record locked full stripes for RAID5/6 block group */
222 struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
223
224 /*
225 * Allocation offset for the block group to implement sequential
226 * allocation. This is used only on a zoned filesystem.
227 */
228 u64 alloc_offset;
229 u64 zone_unusable;
230 u64 zone_capacity;
231 u64 meta_write_pointer;
232 struct map_lookup *physical_map;
233 struct list_head active_bg_list;
234 struct work_struct zone_finish_work;
235 struct extent_buffer *last_eb;
236};
237
238static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
239{
240 return (block_group->start + block_group->length);
241}
242
243static inline bool btrfs_is_block_group_data_only(
244 struct btrfs_block_group *block_group)
245{
246 /*
247 * In mixed mode the fragmentation is expected to be high, lowering the
248 * efficiency, so only proper data block groups are considered.
249 */
250 return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
251 !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA);
252}
253
254#ifdef CONFIG_BTRFS_DEBUG
255int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group);
256#endif
257
258struct btrfs_block_group *btrfs_lookup_first_block_group(
259 struct btrfs_fs_info *info, u64 bytenr);
260struct btrfs_block_group *btrfs_lookup_block_group(
261 struct btrfs_fs_info *info, u64 bytenr);
262struct btrfs_block_group *btrfs_next_block_group(
263 struct btrfs_block_group *cache);
264void btrfs_get_block_group(struct btrfs_block_group *cache);
265void btrfs_put_block_group(struct btrfs_block_group *cache);
266void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
267 const u64 start);
268void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
269struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
270 u64 bytenr);
271void btrfs_dec_nocow_writers(struct btrfs_block_group *bg);
272void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
273void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
274 u64 num_bytes);
275int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
276void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
277struct btrfs_caching_control *btrfs_get_caching_control(
278 struct btrfs_block_group *cache);
279u64 add_new_free_space(struct btrfs_block_group *block_group,
280 u64 start, u64 end);
281struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
282 struct btrfs_fs_info *fs_info,
283 const u64 chunk_offset);
284int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
285 u64 group_start, struct extent_map *em);
286void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
287void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
288void btrfs_reclaim_bgs_work(struct work_struct *work);
289void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
290void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
291int btrfs_read_block_groups(struct btrfs_fs_info *info);
292struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
293 u64 bytes_used, u64 type,
294 u64 chunk_offset, u64 size);
295void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
296int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
297 bool do_chunk_alloc);
298void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
299int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
300int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
301int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
302int btrfs_update_block_group(struct btrfs_trans_handle *trans,
303 u64 bytenr, u64 num_bytes, bool alloc);
304int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
305 u64 ram_bytes, u64 num_bytes, int delalloc);
306void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
307 u64 num_bytes, int delalloc);
308int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
309 enum btrfs_chunk_alloc_enum force);
310int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type);
311void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
312void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
313 bool is_item_insertion);
314u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags);
315void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
316int btrfs_free_block_groups(struct btrfs_fs_info *info);
317int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
318 struct block_device *bdev, u64 physical, u64 **logical,
319 int *naddrs, int *stripe_len);
320
321static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info)
322{
323 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA);
324}
325
326static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info)
327{
328 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA);
329}
330
331static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
332{
333 return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
334}
335
336static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
337{
338 smp_mb();
339 return cache->cached == BTRFS_CACHE_FINISHED ||
340 cache->cached == BTRFS_CACHE_ERROR;
341}
342
343void btrfs_freeze_block_group(struct btrfs_block_group *cache);
344void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
345
346bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
347void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
348
349#endif /* BTRFS_BLOCK_GROUP_H */