Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2014 Facebook. All rights reserved.
4 */
5
6#ifndef BTRFS_QGROUP_H
7#define BTRFS_QGROUP_H
8
9#include <linux/spinlock.h>
10#include <linux/rbtree.h>
11#include <linux/kobject.h>
12#include "ulist.h"
13#include "delayed-ref.h"
14#include "misc.h"
15
16/*
17 * Btrfs qgroup overview
18 *
19 * Btrfs qgroup splits into 3 main part:
20 * 1) Reserve
21 * Reserve metadata/data space for incoming operations
22 * Affect how qgroup limit works
23 *
24 * 2) Trace
25 * Tell btrfs qgroup to trace dirty extents.
26 *
27 * Dirty extents including:
28 * - Newly allocated extents
29 * - Extents going to be deleted (in this trans)
30 * - Extents whose owner is going to be modified
31 *
32 * This is the main part affects whether qgroup numbers will stay
33 * consistent.
34 * Btrfs qgroup can trace clean extents and won't cause any problem,
35 * but it will consume extra CPU time, it should be avoided if possible.
36 *
37 * 3) Account
38 * Btrfs qgroup will updates its numbers, based on dirty extents traced
39 * in previous step.
40 *
41 * Normally at qgroup rescan and transaction commit time.
42 */
43
44/*
45 * Special performance optimization for balance.
46 *
47 * For balance, we need to swap subtree of subvolume and reloc trees.
48 * In theory, we need to trace all subtree blocks of both subvolume and reloc
49 * trees, since their owner has changed during such swap.
50 *
51 * However since balance has ensured that both subtrees are containing the
52 * same contents and have the same tree structures, such swap won't cause
53 * qgroup number change.
54 *
55 * But there is a race window between subtree swap and transaction commit,
56 * during that window, if we increase/decrease tree level or merge/split tree
57 * blocks, we still need to trace the original subtrees.
58 *
59 * So for balance, we use a delayed subtree tracing, whose workflow is:
60 *
61 * 1) Record the subtree root block get swapped.
62 *
63 * During subtree swap:
64 * O = Old tree blocks
65 * N = New tree blocks
66 * reloc tree subvolume tree X
67 * Root Root
68 * / \ / \
69 * NA OB OA OB
70 * / | | \ / | | \
71 * NC ND OE OF OC OD OE OF
72 *
73 * In this case, NA and OA are going to be swapped, record (NA, OA) into
74 * subvolume tree X.
75 *
76 * 2) After subtree swap.
77 * reloc tree subvolume tree X
78 * Root Root
79 * / \ / \
80 * OA OB NA OB
81 * / | | \ / | | \
82 * OC OD OE OF NC ND OE OF
83 *
84 * 3a) COW happens for OB
85 * If we are going to COW tree block OB, we check OB's bytenr against
86 * tree X's swapped_blocks structure.
87 * If it doesn't fit any, nothing will happen.
88 *
89 * 3b) COW happens for NA
90 * Check NA's bytenr against tree X's swapped_blocks, and get a hit.
91 * Then we do subtree scan on both subtrees OA and NA.
92 * Resulting 6 tree blocks to be scanned (OA, OC, OD, NA, NC, ND).
93 *
94 * Then no matter what we do to subvolume tree X, qgroup numbers will
95 * still be correct.
96 * Then NA's record gets removed from X's swapped_blocks.
97 *
98 * 4) Transaction commit
99 * Any record in X's swapped_blocks gets removed, since there is no
100 * modification to the swapped subtrees, no need to trigger heavy qgroup
101 * subtree rescan for them.
102 */
103
104/*
105 * These flags share the flags field of the btrfs_qgroup_status_item with the
106 * persisted flags defined in btrfs_tree.h.
107 *
108 * To minimize the chance of collision with new persisted status flags, these
109 * count backwards from the MSB.
110 */
111#define BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN (1ULL << 63)
112#define BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING (1ULL << 62)
113
114/*
115 * Record a dirty extent, and info qgroup to update quota on it
116 * TODO: Use kmem cache to alloc it.
117 */
118struct btrfs_qgroup_extent_record {
119 struct rb_node node;
120 u64 bytenr;
121 u64 num_bytes;
122
123 /*
124 * For qgroup reserved data space freeing.
125 *
126 * @data_rsv_refroot and @data_rsv will be recorded after
127 * BTRFS_ADD_DELAYED_EXTENT is called.
128 * And will be used to free reserved qgroup space at
129 * transaction commit time.
130 */
131 u32 data_rsv; /* reserved data space needs to be freed */
132 u64 data_rsv_refroot; /* which root the reserved data belongs to */
133 struct ulist *old_roots;
134};
135
136struct btrfs_qgroup_swapped_block {
137 struct rb_node node;
138
139 int level;
140 bool trace_leaf;
141
142 /* bytenr/generation of the tree block in subvolume tree after swap */
143 u64 subvol_bytenr;
144 u64 subvol_generation;
145
146 /* bytenr/generation of the tree block in reloc tree after swap */
147 u64 reloc_bytenr;
148 u64 reloc_generation;
149
150 u64 last_snapshot;
151 struct btrfs_key first_key;
152};
153
154/*
155 * Qgroup reservation types:
156 *
157 * DATA:
158 * space reserved for data
159 *
160 * META_PERTRANS:
161 * Space reserved for metadata (per-transaction)
162 * Due to the fact that qgroup data is only updated at transaction commit
163 * time, reserved space for metadata must be kept until transaction
164 * commits.
165 * Any metadata reserved that are used in btrfs_start_transaction() should
166 * be of this type.
167 *
168 * META_PREALLOC:
169 * There are cases where metadata space is reserved before starting
170 * transaction, and then btrfs_join_transaction() to get a trans handle.
171 * Any metadata reserved for such usage should be of this type.
172 * And after join_transaction() part (or all) of such reservation should
173 * be converted into META_PERTRANS.
174 */
175enum btrfs_qgroup_rsv_type {
176 BTRFS_QGROUP_RSV_DATA,
177 BTRFS_QGROUP_RSV_META_PERTRANS,
178 BTRFS_QGROUP_RSV_META_PREALLOC,
179 BTRFS_QGROUP_RSV_LAST,
180};
181
182/*
183 * Represents how many bytes we have reserved for this qgroup.
184 *
185 * Each type should have different reservation behavior.
186 * E.g, data follows its io_tree flag modification, while
187 * *currently* meta is just reserve-and-clear during transaction.
188 *
189 * TODO: Add new type for reservation which can survive transaction commit.
190 * Current metadata reservation behavior is not suitable for such case.
191 */
192struct btrfs_qgroup_rsv {
193 u64 values[BTRFS_QGROUP_RSV_LAST];
194};
195
196/*
197 * one struct for each qgroup, organized in fs_info->qgroup_tree.
198 */
199struct btrfs_qgroup {
200 u64 qgroupid;
201
202 /*
203 * state
204 */
205 u64 rfer; /* referenced */
206 u64 rfer_cmpr; /* referenced compressed */
207 u64 excl; /* exclusive */
208 u64 excl_cmpr; /* exclusive compressed */
209
210 /*
211 * limits
212 */
213 u64 lim_flags; /* which limits are set */
214 u64 max_rfer;
215 u64 max_excl;
216 u64 rsv_rfer;
217 u64 rsv_excl;
218
219 /*
220 * reservation tracking
221 */
222 struct btrfs_qgroup_rsv rsv;
223
224 /*
225 * lists
226 */
227 struct list_head groups; /* groups this group is member of */
228 struct list_head members; /* groups that are members of this group */
229 struct list_head dirty; /* dirty groups */
230
231 /*
232 * For qgroup iteration usage.
233 *
234 * The iteration list should always be empty until qgroup_iterator_add()
235 * is called. And should be reset to empty after the iteration is
236 * finished.
237 */
238 struct list_head iterator;
239
240 /*
241 * For nested iterator usage.
242 *
243 * Here we support at most one level of nested iterator calls like:
244 *
245 * LIST_HEAD(all_qgroups);
246 * {
247 * LIST_HEAD(local_qgroups);
248 * qgroup_iterator_add(local_qgroups, qg);
249 * qgroup_iterator_nested_add(all_qgroups, qg);
250 * do_some_work(local_qgroups);
251 * qgroup_iterator_clean(local_qgroups);
252 * }
253 * do_some_work(all_qgroups);
254 * qgroup_iterator_nested_clean(all_qgroups);
255 */
256 struct list_head nested_iterator;
257 struct rb_node node; /* tree of qgroups */
258
259 /*
260 * temp variables for accounting operations
261 * Refer to qgroup_shared_accounting() for details.
262 */
263 u64 old_refcnt;
264 u64 new_refcnt;
265
266 /*
267 * Sysfs kobjectid
268 */
269 struct kobject kobj;
270};
271
272struct btrfs_squota_delta {
273 /* The fstree root this delta counts against. */
274 u64 root;
275 /* The number of bytes in the extent being counted. */
276 u64 num_bytes;
277 /* The generation the extent was created in. */
278 u64 generation;
279 /* Whether we are using or freeing the extent. */
280 bool is_inc;
281 /* Whether the extent is data or metadata. */
282 bool is_data;
283};
284
285static inline u64 btrfs_qgroup_subvolid(u64 qgroupid)
286{
287 return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1));
288}
289
290/*
291 * For qgroup event trace points only
292 */
293enum {
294 ENUM_BIT(QGROUP_RESERVE),
295 ENUM_BIT(QGROUP_RELEASE),
296 ENUM_BIT(QGROUP_FREE),
297};
298
299enum btrfs_qgroup_mode {
300 BTRFS_QGROUP_MODE_DISABLED,
301 BTRFS_QGROUP_MODE_FULL,
302 BTRFS_QGROUP_MODE_SIMPLE
303};
304
305enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info);
306bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info);
307bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info);
308int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
309 struct btrfs_ioctl_quota_ctl_args *quota_ctl_args);
310int btrfs_quota_disable(struct btrfs_fs_info *fs_info);
311int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
312void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
313int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
314 bool interruptible);
315int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst);
316int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
317 u64 dst);
318int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
319int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
320int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
321 struct btrfs_qgroup_limit *limit);
322int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
323void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
324struct btrfs_delayed_extent_op;
325
326int btrfs_qgroup_trace_extent_nolock(
327 struct btrfs_fs_info *fs_info,
328 struct btrfs_delayed_ref_root *delayed_refs,
329 struct btrfs_qgroup_extent_record *record);
330int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
331 struct btrfs_qgroup_extent_record *qrecord);
332int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
333 u64 num_bytes);
334int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
335 struct extent_buffer *eb);
336int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
337 struct extent_buffer *root_eb,
338 u64 root_gen, int root_level);
339int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
340 u64 num_bytes, struct ulist *old_roots,
341 struct ulist *new_roots);
342int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans);
343int btrfs_run_qgroups(struct btrfs_trans_handle *trans);
344int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
345 u64 objectid, u64 inode_rootid,
346 struct btrfs_qgroup_inherit *inherit);
347void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
348 u64 ref_root, u64 num_bytes,
349 enum btrfs_qgroup_rsv_type type);
350
351#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
352int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
353 u64 rfer, u64 excl);
354#endif
355
356/* New io_tree based accurate qgroup reserve API */
357int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
358 struct extent_changeset **reserved, u64 start, u64 len);
359int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len, u64 *released);
360int btrfs_qgroup_free_data(struct btrfs_inode *inode,
361 struct extent_changeset *reserved, u64 start,
362 u64 len, u64 *freed);
363int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
364 enum btrfs_qgroup_rsv_type type, bool enforce);
365int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
366 enum btrfs_qgroup_rsv_type type, bool enforce,
367 bool noflush);
368/* Reserve metadata space for pertrans and prealloc type */
369static inline int btrfs_qgroup_reserve_meta_pertrans(struct btrfs_root *root,
370 int num_bytes, bool enforce)
371{
372 return __btrfs_qgroup_reserve_meta(root, num_bytes,
373 BTRFS_QGROUP_RSV_META_PERTRANS,
374 enforce, false);
375}
376static inline int btrfs_qgroup_reserve_meta_prealloc(struct btrfs_root *root,
377 int num_bytes, bool enforce,
378 bool noflush)
379{
380 return __btrfs_qgroup_reserve_meta(root, num_bytes,
381 BTRFS_QGROUP_RSV_META_PREALLOC,
382 enforce, noflush);
383}
384
385void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
386 enum btrfs_qgroup_rsv_type type);
387
388/* Free per-transaction meta reservation for error handling */
389static inline void btrfs_qgroup_free_meta_pertrans(struct btrfs_root *root,
390 int num_bytes)
391{
392 __btrfs_qgroup_free_meta(root, num_bytes,
393 BTRFS_QGROUP_RSV_META_PERTRANS);
394}
395
396/* Pre-allocated meta reservation can be freed at need */
397static inline void btrfs_qgroup_free_meta_prealloc(struct btrfs_root *root,
398 int num_bytes)
399{
400 __btrfs_qgroup_free_meta(root, num_bytes,
401 BTRFS_QGROUP_RSV_META_PREALLOC);
402}
403
404void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root);
405void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
406void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode);
407
408/* btrfs_qgroup_swapped_blocks related functions */
409void btrfs_qgroup_init_swapped_blocks(
410 struct btrfs_qgroup_swapped_blocks *swapped_blocks);
411
412void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root);
413int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
414 struct btrfs_root *subvol_root,
415 struct btrfs_block_group *bg,
416 struct extent_buffer *subvol_parent, int subvol_slot,
417 struct extent_buffer *reloc_parent, int reloc_slot,
418 u64 last_snapshot);
419int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
420 struct btrfs_root *root, struct extent_buffer *eb);
421void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
422bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info);
423void btrfs_free_squota_rsv(struct btrfs_fs_info *fs_info, u64 root, u64 rsv_bytes);
424int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
425 struct btrfs_squota_delta *delta);
426
427#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2014 Facebook. All rights reserved.
4 */
5
6#ifndef BTRFS_QGROUP_H
7#define BTRFS_QGROUP_H
8
9#include <linux/spinlock.h>
10#include <linux/rbtree.h>
11#include <linux/kobject.h>
12#include "ulist.h"
13#include "delayed-ref.h"
14
15/*
16 * Btrfs qgroup overview
17 *
18 * Btrfs qgroup splits into 3 main part:
19 * 1) Reserve
20 * Reserve metadata/data space for incoming operations
21 * Affect how qgroup limit works
22 *
23 * 2) Trace
24 * Tell btrfs qgroup to trace dirty extents.
25 *
26 * Dirty extents including:
27 * - Newly allocated extents
28 * - Extents going to be deleted (in this trans)
29 * - Extents whose owner is going to be modified
30 *
31 * This is the main part affects whether qgroup numbers will stay
32 * consistent.
33 * Btrfs qgroup can trace clean extents and won't cause any problem,
34 * but it will consume extra CPU time, it should be avoided if possible.
35 *
36 * 3) Account
37 * Btrfs qgroup will updates its numbers, based on dirty extents traced
38 * in previous step.
39 *
40 * Normally at qgroup rescan and transaction commit time.
41 */
42
43/*
44 * Special performance optimization for balance.
45 *
46 * For balance, we need to swap subtree of subvolume and reloc trees.
47 * In theory, we need to trace all subtree blocks of both subvolume and reloc
48 * trees, since their owner has changed during such swap.
49 *
50 * However since balance has ensured that both subtrees are containing the
51 * same contents and have the same tree structures, such swap won't cause
52 * qgroup number change.
53 *
54 * But there is a race window between subtree swap and transaction commit,
55 * during that window, if we increase/decrease tree level or merge/split tree
56 * blocks, we still need to trace the original subtrees.
57 *
58 * So for balance, we use a delayed subtree tracing, whose workflow is:
59 *
60 * 1) Record the subtree root block get swapped.
61 *
62 * During subtree swap:
63 * O = Old tree blocks
64 * N = New tree blocks
65 * reloc tree subvolume tree X
66 * Root Root
67 * / \ / \
68 * NA OB OA OB
69 * / | | \ / | | \
70 * NC ND OE OF OC OD OE OF
71 *
72 * In this case, NA and OA are going to be swapped, record (NA, OA) into
73 * subvolume tree X.
74 *
75 * 2) After subtree swap.
76 * reloc tree subvolume tree X
77 * Root Root
78 * / \ / \
79 * OA OB NA OB
80 * / | | \ / | | \
81 * OC OD OE OF NC ND OE OF
82 *
83 * 3a) COW happens for OB
84 * If we are going to COW tree block OB, we check OB's bytenr against
85 * tree X's swapped_blocks structure.
86 * If it doesn't fit any, nothing will happen.
87 *
88 * 3b) COW happens for NA
89 * Check NA's bytenr against tree X's swapped_blocks, and get a hit.
90 * Then we do subtree scan on both subtrees OA and NA.
91 * Resulting 6 tree blocks to be scanned (OA, OC, OD, NA, NC, ND).
92 *
93 * Then no matter what we do to subvolume tree X, qgroup numbers will
94 * still be correct.
95 * Then NA's record gets removed from X's swapped_blocks.
96 *
97 * 4) Transaction commit
98 * Any record in X's swapped_blocks gets removed, since there is no
99 * modification to the swapped subtrees, no need to trigger heavy qgroup
100 * subtree rescan for them.
101 */
102
103/*
104 * Record a dirty extent, and info qgroup to update quota on it
105 * TODO: Use kmem cache to alloc it.
106 */
107struct btrfs_qgroup_extent_record {
108 struct rb_node node;
109 u64 bytenr;
110 u64 num_bytes;
111
112 /*
113 * For qgroup reserved data space freeing.
114 *
115 * @data_rsv_refroot and @data_rsv will be recorded after
116 * BTRFS_ADD_DELAYED_EXTENT is called.
117 * And will be used to free reserved qgroup space at
118 * transaction commit time.
119 */
120 u32 data_rsv; /* reserved data space needs to be freed */
121 u64 data_rsv_refroot; /* which root the reserved data belongs to */
122 struct ulist *old_roots;
123};
124
125struct btrfs_qgroup_swapped_block {
126 struct rb_node node;
127
128 int level;
129 bool trace_leaf;
130
131 /* bytenr/generation of the tree block in subvolume tree after swap */
132 u64 subvol_bytenr;
133 u64 subvol_generation;
134
135 /* bytenr/generation of the tree block in reloc tree after swap */
136 u64 reloc_bytenr;
137 u64 reloc_generation;
138
139 u64 last_snapshot;
140 struct btrfs_key first_key;
141};
142
143/*
144 * Qgroup reservation types:
145 *
146 * DATA:
147 * space reserved for data
148 *
149 * META_PERTRANS:
150 * Space reserved for metadata (per-transaction)
151 * Due to the fact that qgroup data is only updated at transaction commit
152 * time, reserved space for metadata must be kept until transaction
153 * commits.
154 * Any metadata reserved that are used in btrfs_start_transaction() should
155 * be of this type.
156 *
157 * META_PREALLOC:
158 * There are cases where metadata space is reserved before starting
159 * transaction, and then btrfs_join_transaction() to get a trans handle.
160 * Any metadata reserved for such usage should be of this type.
161 * And after join_transaction() part (or all) of such reservation should
162 * be converted into META_PERTRANS.
163 */
164enum btrfs_qgroup_rsv_type {
165 BTRFS_QGROUP_RSV_DATA,
166 BTRFS_QGROUP_RSV_META_PERTRANS,
167 BTRFS_QGROUP_RSV_META_PREALLOC,
168 BTRFS_QGROUP_RSV_LAST,
169};
170
171/*
172 * Represents how many bytes we have reserved for this qgroup.
173 *
174 * Each type should have different reservation behavior.
175 * E.g, data follows its io_tree flag modification, while
176 * *currently* meta is just reserve-and-clear during transaction.
177 *
178 * TODO: Add new type for reservation which can survive transaction commit.
179 * Current metadata reservation behavior is not suitable for such case.
180 */
181struct btrfs_qgroup_rsv {
182 u64 values[BTRFS_QGROUP_RSV_LAST];
183};
184
185/*
186 * one struct for each qgroup, organized in fs_info->qgroup_tree.
187 */
188struct btrfs_qgroup {
189 u64 qgroupid;
190
191 /*
192 * state
193 */
194 u64 rfer; /* referenced */
195 u64 rfer_cmpr; /* referenced compressed */
196 u64 excl; /* exclusive */
197 u64 excl_cmpr; /* exclusive compressed */
198
199 /*
200 * limits
201 */
202 u64 lim_flags; /* which limits are set */
203 u64 max_rfer;
204 u64 max_excl;
205 u64 rsv_rfer;
206 u64 rsv_excl;
207
208 /*
209 * reservation tracking
210 */
211 struct btrfs_qgroup_rsv rsv;
212
213 /*
214 * lists
215 */
216 struct list_head groups; /* groups this group is member of */
217 struct list_head members; /* groups that are members of this group */
218 struct list_head dirty; /* dirty groups */
219 struct rb_node node; /* tree of qgroups */
220
221 /*
222 * temp variables for accounting operations
223 * Refer to qgroup_shared_accounting() for details.
224 */
225 u64 old_refcnt;
226 u64 new_refcnt;
227
228 /*
229 * Sysfs kobjectid
230 */
231 struct kobject kobj;
232};
233
234static inline u64 btrfs_qgroup_subvolid(u64 qgroupid)
235{
236 return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1));
237}
238
239/*
240 * For qgroup event trace points only
241 */
242#define QGROUP_RESERVE (1<<0)
243#define QGROUP_RELEASE (1<<1)
244#define QGROUP_FREE (1<<2)
245
246int btrfs_quota_enable(struct btrfs_fs_info *fs_info);
247int btrfs_quota_disable(struct btrfs_fs_info *fs_info);
248int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
249void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
250int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
251 bool interruptible);
252int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
253 u64 dst);
254int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
255 u64 dst);
256int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
257int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
258int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
259 struct btrfs_qgroup_limit *limit);
260int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
261void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
262struct btrfs_delayed_extent_op;
263
264/*
265 * Inform qgroup to trace one dirty extent, its info is recorded in @record.
266 * So qgroup can account it at transaction committing time.
267 *
268 * No lock version, caller must acquire delayed ref lock and allocated memory,
269 * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
270 *
271 * Return 0 for success insert
272 * Return >0 for existing record, caller can free @record safely.
273 * Error is not possible
274 */
275int btrfs_qgroup_trace_extent_nolock(
276 struct btrfs_fs_info *fs_info,
277 struct btrfs_delayed_ref_root *delayed_refs,
278 struct btrfs_qgroup_extent_record *record);
279
280/*
281 * Post handler after qgroup_trace_extent_nolock().
282 *
283 * NOTE: Current qgroup does the expensive backref walk at transaction
284 * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
285 * new transaction.
286 * This is designed to allow btrfs_find_all_roots() to get correct new_roots
287 * result.
288 *
289 * However for old_roots there is no need to do backref walk at that time,
290 * since we search commit roots to walk backref and result will always be
291 * correct.
292 *
293 * Due to the nature of no lock version, we can't do backref there.
294 * So we must call btrfs_qgroup_trace_extent_post() after exiting
295 * spinlock context.
296 *
297 * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
298 * using current root, then we can move all expensive backref walk out of
299 * transaction committing, but not now as qgroup accounting will be wrong again.
300 */
301int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
302 struct btrfs_qgroup_extent_record *qrecord);
303
304/*
305 * Inform qgroup to trace one dirty extent, specified by @bytenr and
306 * @num_bytes.
307 * So qgroup can account it at commit trans time.
308 *
309 * Better encapsulated version, with memory allocation and backref walk for
310 * commit roots.
311 * So this can sleep.
312 *
313 * Return 0 if the operation is done.
314 * Return <0 for error, like memory allocation failure or invalid parameter
315 * (NULL trans)
316 */
317int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
318 u64 num_bytes, gfp_t gfp_flag);
319
320/*
321 * Inform qgroup to trace all leaf items of data
322 *
323 * Return 0 for success
324 * Return <0 for error(ENOMEM)
325 */
326int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
327 struct extent_buffer *eb);
328/*
329 * Inform qgroup to trace a whole subtree, including all its child tree
330 * blocks and data.
331 * The root tree block is specified by @root_eb.
332 *
333 * Normally used by relocation(tree block swap) and subvolume deletion.
334 *
335 * Return 0 for success
336 * Return <0 for error(ENOMEM or tree search error)
337 */
338int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
339 struct extent_buffer *root_eb,
340 u64 root_gen, int root_level);
341int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
342 u64 num_bytes, struct ulist *old_roots,
343 struct ulist *new_roots);
344int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans);
345int btrfs_run_qgroups(struct btrfs_trans_handle *trans);
346int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
347 u64 objectid, struct btrfs_qgroup_inherit *inherit);
348void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
349 u64 ref_root, u64 num_bytes,
350 enum btrfs_qgroup_rsv_type type);
351
352#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
353int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
354 u64 rfer, u64 excl);
355#endif
356
357/* New io_tree based accurate qgroup reserve API */
358int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
359 struct extent_changeset **reserved, u64 start, u64 len);
360int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len);
361int btrfs_qgroup_free_data(struct btrfs_inode *inode,
362 struct extent_changeset *reserved, u64 start,
363 u64 len);
364int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
365 enum btrfs_qgroup_rsv_type type, bool enforce);
366/* Reserve metadata space for pertrans and prealloc type */
367static inline int btrfs_qgroup_reserve_meta_pertrans(struct btrfs_root *root,
368 int num_bytes, bool enforce)
369{
370 return __btrfs_qgroup_reserve_meta(root, num_bytes,
371 BTRFS_QGROUP_RSV_META_PERTRANS, enforce);
372}
373static inline int btrfs_qgroup_reserve_meta_prealloc(struct btrfs_root *root,
374 int num_bytes, bool enforce)
375{
376 return __btrfs_qgroup_reserve_meta(root, num_bytes,
377 BTRFS_QGROUP_RSV_META_PREALLOC, enforce);
378}
379
380void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
381 enum btrfs_qgroup_rsv_type type);
382
383/* Free per-transaction meta reservation for error handling */
384static inline void btrfs_qgroup_free_meta_pertrans(struct btrfs_root *root,
385 int num_bytes)
386{
387 __btrfs_qgroup_free_meta(root, num_bytes,
388 BTRFS_QGROUP_RSV_META_PERTRANS);
389}
390
391/* Pre-allocated meta reservation can be freed at need */
392static inline void btrfs_qgroup_free_meta_prealloc(struct btrfs_root *root,
393 int num_bytes)
394{
395 __btrfs_qgroup_free_meta(root, num_bytes,
396 BTRFS_QGROUP_RSV_META_PREALLOC);
397}
398
399/*
400 * Per-transaction meta reservation should be all freed at transaction commit
401 * time
402 */
403void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root);
404
405/*
406 * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
407 *
408 * This is called when preallocated meta reservation needs to be used.
409 * Normally after btrfs_join_transaction() call.
410 */
411void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
412
413void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode);
414
415/* btrfs_qgroup_swapped_blocks related functions */
416void btrfs_qgroup_init_swapped_blocks(
417 struct btrfs_qgroup_swapped_blocks *swapped_blocks);
418
419void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root);
420int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
421 struct btrfs_root *subvol_root,
422 struct btrfs_block_group *bg,
423 struct extent_buffer *subvol_parent, int subvol_slot,
424 struct extent_buffer *reloc_parent, int reloc_slot,
425 u64 last_snapshot);
426int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
427 struct btrfs_root *root, struct extent_buffer *eb);
428void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
429bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info);
430
431#endif