Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_BLOCK_RSV_H
4#define BTRFS_BLOCK_RSV_H
5
6#include <linux/types.h>
7#include <linux/compiler.h>
8#include <linux/spinlock.h>
9
10struct btrfs_trans_handle;
11struct btrfs_root;
12struct btrfs_space_info;
13struct btrfs_block_rsv;
14struct btrfs_fs_info;
15enum btrfs_reserve_flush_enum;
16
17/*
18 * Types of block reserves
19 */
20enum btrfs_rsv_type {
21 BTRFS_BLOCK_RSV_GLOBAL,
22 BTRFS_BLOCK_RSV_DELALLOC,
23 BTRFS_BLOCK_RSV_TRANS,
24 BTRFS_BLOCK_RSV_CHUNK,
25 BTRFS_BLOCK_RSV_DELOPS,
26 BTRFS_BLOCK_RSV_DELREFS,
27 BTRFS_BLOCK_RSV_EMPTY,
28 BTRFS_BLOCK_RSV_TEMP,
29};
30
31struct btrfs_block_rsv {
32 u64 size;
33 u64 reserved;
34 struct btrfs_space_info *space_info;
35 spinlock_t lock;
36 bool full;
37 bool failfast;
38 /* Block reserve type, one of BTRFS_BLOCK_RSV_* */
39 enum btrfs_rsv_type type:8;
40
41 /*
42 * Qgroup equivalent for @size @reserved
43 *
44 * Unlike normal @size/@reserved for inode rsv, qgroup doesn't care
45 * about things like csum size nor how many tree blocks it will need to
46 * reserve.
47 *
48 * Qgroup cares more about net change of the extent usage.
49 *
50 * So for one newly inserted file extent, in worst case it will cause
51 * leaf split and level increase, nodesize for each file extent is
52 * already too much.
53 *
54 * In short, qgroup_size/reserved is the upper limit of possible needed
55 * qgroup metadata reservation.
56 */
57 u64 qgroup_rsv_size;
58 u64 qgroup_rsv_reserved;
59};
60
61void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type);
62void btrfs_init_root_block_rsv(struct btrfs_root *root);
63struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
64 enum btrfs_rsv_type type);
65void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
66 struct btrfs_block_rsv *rsv,
67 enum btrfs_rsv_type type);
68void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
69 struct btrfs_block_rsv *rsv);
70int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
71 struct btrfs_block_rsv *block_rsv, u64 num_bytes,
72 enum btrfs_reserve_flush_enum flush);
73int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent);
74int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
75 struct btrfs_block_rsv *block_rsv, u64 num_bytes,
76 enum btrfs_reserve_flush_enum flush);
77int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
78 struct btrfs_block_rsv *dst_rsv, u64 num_bytes,
79 bool update_size);
80int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes);
81void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
82 u64 num_bytes, bool update_size);
83u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
84 struct btrfs_block_rsv *block_rsv,
85 u64 num_bytes, u64 *qgroup_to_release);
86void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info);
87void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info);
88void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info);
89struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
90 struct btrfs_root *root,
91 u32 blocksize);
92int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info,
93 struct btrfs_block_rsv *rsv);
94static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info,
95 struct btrfs_block_rsv *block_rsv,
96 u32 blocksize)
97{
98 btrfs_block_rsv_add_bytes(block_rsv, blocksize, false);
99 btrfs_block_rsv_release(fs_info, block_rsv, 0, NULL);
100}
101
102/*
103 * Fast path to check if the reserve is full, may be carefully used outside of
104 * locks.
105 */
106static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
107{
108 return data_race(rsv->full);
109}
110
111/*
112 * Get the reserved mount of a block reserve in a context where getting a stale
113 * value is acceptable, instead of accessing it directly and trigger data race
114 * warning from KCSAN.
115 */
116static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
117{
118 u64 ret;
119
120 spin_lock(&rsv->lock);
121 ret = rsv->reserved;
122 spin_unlock(&rsv->lock);
123
124 return ret;
125}
126
127/*
128 * Get the size of a block reserve in a context where getting a stale value is
129 * acceptable, instead of accessing it directly and trigger data race warning
130 * from KCSAN.
131 */
132static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
133{
134 u64 ret;
135
136 spin_lock(&rsv->lock);
137 ret = rsv->size;
138 spin_unlock(&rsv->lock);
139
140 return ret;
141}
142
143#endif /* BTRFS_BLOCK_RSV_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_BLOCK_RSV_H
4#define BTRFS_BLOCK_RSV_H
5
6struct btrfs_trans_handle;
7struct btrfs_root;
8enum btrfs_reserve_flush_enum;
9
10/*
11 * Types of block reserves
12 */
13enum btrfs_rsv_type {
14 BTRFS_BLOCK_RSV_GLOBAL,
15 BTRFS_BLOCK_RSV_DELALLOC,
16 BTRFS_BLOCK_RSV_TRANS,
17 BTRFS_BLOCK_RSV_CHUNK,
18 BTRFS_BLOCK_RSV_DELOPS,
19 BTRFS_BLOCK_RSV_DELREFS,
20 BTRFS_BLOCK_RSV_EMPTY,
21 BTRFS_BLOCK_RSV_TEMP,
22};
23
24struct btrfs_block_rsv {
25 u64 size;
26 u64 reserved;
27 struct btrfs_space_info *space_info;
28 spinlock_t lock;
29 bool full;
30 bool failfast;
31 /* Block reserve type, one of BTRFS_BLOCK_RSV_* */
32 enum btrfs_rsv_type type:8;
33
34 /*
35 * Qgroup equivalent for @size @reserved
36 *
37 * Unlike normal @size/@reserved for inode rsv, qgroup doesn't care
38 * about things like csum size nor how many tree blocks it will need to
39 * reserve.
40 *
41 * Qgroup cares more about net change of the extent usage.
42 *
43 * So for one newly inserted file extent, in worst case it will cause
44 * leaf split and level increase, nodesize for each file extent is
45 * already too much.
46 *
47 * In short, qgroup_size/reserved is the upper limit of possible needed
48 * qgroup metadata reservation.
49 */
50 u64 qgroup_rsv_size;
51 u64 qgroup_rsv_reserved;
52};
53
54void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type);
55void btrfs_init_root_block_rsv(struct btrfs_root *root);
56struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
57 enum btrfs_rsv_type type);
58void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
59 struct btrfs_block_rsv *rsv,
60 enum btrfs_rsv_type type);
61void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
62 struct btrfs_block_rsv *rsv);
63int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
64 struct btrfs_block_rsv *block_rsv, u64 num_bytes,
65 enum btrfs_reserve_flush_enum flush);
66int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent);
67int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
68 struct btrfs_block_rsv *block_rsv, u64 min_reserved,
69 enum btrfs_reserve_flush_enum flush);
70int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
71 struct btrfs_block_rsv *dst_rsv, u64 num_bytes,
72 bool update_size);
73int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes);
74void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
75 u64 num_bytes, bool update_size);
76u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
77 struct btrfs_block_rsv *block_rsv,
78 u64 num_bytes, u64 *qgroup_to_release);
79void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info);
80void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info);
81void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info);
82struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
83 struct btrfs_root *root,
84 u32 blocksize);
85static inline void btrfs_unuse_block_rsv(struct btrfs_fs_info *fs_info,
86 struct btrfs_block_rsv *block_rsv,
87 u32 blocksize)
88{
89 btrfs_block_rsv_add_bytes(block_rsv, blocksize, false);
90 btrfs_block_rsv_release(fs_info, block_rsv, 0, NULL);
91}
92
93/*
94 * Fast path to check if the reserve is full, may be carefully used outside of
95 * locks.
96 */
97static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
98{
99 return data_race(rsv->full);
100}
101
102#endif /* BTRFS_BLOCK_RSV_H */