Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_FS_H
4#define BTRFS_FS_H
5
6#include <linux/blkdev.h>
7#include <linux/fs.h>
8#include <linux/btrfs_tree.h>
9#include <linux/sizes.h>
10#include "extent-io-tree.h"
11#include "extent_map.h"
12#include "async-thread.h"
13#include "block-rsv.h"
14
15#define BTRFS_MAX_EXTENT_SIZE SZ_128M
16
17#define BTRFS_OLDEST_GENERATION 0ULL
18
19#define BTRFS_EMPTY_DIR_SIZE 0
20
21#define BTRFS_DIRTY_METADATA_THRESH SZ_32M
22
23#define BTRFS_SUPER_INFO_OFFSET SZ_64K
24#define BTRFS_SUPER_INFO_SIZE 4096
25static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
26
27/*
28 * Number of metadata items necessary for an unlink operation:
29 *
30 * 1 for the possible orphan item
31 * 1 for the dir item
32 * 1 for the dir index
33 * 1 for the inode ref
34 * 1 for the inode
35 * 1 for the parent inode
36 */
37#define BTRFS_UNLINK_METADATA_UNITS 6
38
39/*
40 * The reserved space at the beginning of each device. It covers the primary
41 * super block and leaves space for potential use by other tools like
42 * bootloaders or to lower potential damage of accidental overwrite.
43 */
44#define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M)
45/*
46 * Runtime (in-memory) states of filesystem
47 */
48enum {
49 /*
50 * Filesystem is being remounted, allow to skip some operations, like
51 * defrag
52 */
53 BTRFS_FS_STATE_REMOUNTING,
54 /* Filesystem in RO mode */
55 BTRFS_FS_STATE_RO,
56 /* Track if a transaction abort has been reported on this filesystem */
57 BTRFS_FS_STATE_TRANS_ABORTED,
58 /*
59 * Bio operations should be blocked on this filesystem because a source
60 * or target device is being destroyed as part of a device replace
61 */
62 BTRFS_FS_STATE_DEV_REPLACING,
63 /* The btrfs_fs_info created for self-tests */
64 BTRFS_FS_STATE_DUMMY_FS_INFO,
65
66 BTRFS_FS_STATE_NO_CSUMS,
67
68 /* Indicates there was an error cleaning up a log tree. */
69 BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
70
71 BTRFS_FS_STATE_COUNT
72};
73
74enum {
75 BTRFS_FS_CLOSING_START,
76 BTRFS_FS_CLOSING_DONE,
77 BTRFS_FS_LOG_RECOVERING,
78 BTRFS_FS_OPEN,
79 BTRFS_FS_QUOTA_ENABLED,
80 BTRFS_FS_UPDATE_UUID_TREE_GEN,
81 BTRFS_FS_CREATING_FREE_SPACE_TREE,
82 BTRFS_FS_BTREE_ERR,
83 BTRFS_FS_LOG1_ERR,
84 BTRFS_FS_LOG2_ERR,
85 BTRFS_FS_QUOTA_OVERRIDE,
86 /* Used to record internally whether fs has been frozen */
87 BTRFS_FS_FROZEN,
88 /*
89 * Indicate that balance has been set up from the ioctl and is in the
90 * main phase. The fs_info::balance_ctl is initialized.
91 */
92 BTRFS_FS_BALANCE_RUNNING,
93
94 /*
95 * Indicate that relocation of a chunk has started, it's set per chunk
96 * and is toggled between chunks.
97 */
98 BTRFS_FS_RELOC_RUNNING,
99
100 /* Indicate that the cleaner thread is awake and doing something. */
101 BTRFS_FS_CLEANER_RUNNING,
102
103 /*
104 * The checksumming has an optimized version and is considered fast,
105 * so we don't need to offload checksums to workqueues.
106 */
107 BTRFS_FS_CSUM_IMPL_FAST,
108
109 /* Indicate that the discard workqueue can service discards. */
110 BTRFS_FS_DISCARD_RUNNING,
111
112 /* Indicate that we need to cleanup space cache v1 */
113 BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
114
115 /* Indicate that we can't trust the free space tree for caching yet */
116 BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
117
118 /* Indicate whether there are any tree modification log users */
119 BTRFS_FS_TREE_MOD_LOG_USERS,
120
121 /* Indicate that we want the transaction kthread to commit right now. */
122 BTRFS_FS_COMMIT_TRANS,
123
124 /* Indicate we have half completed snapshot deletions pending. */
125 BTRFS_FS_UNFINISHED_DROPS,
126
127 /* Indicate we have to finish a zone to do next allocation. */
128 BTRFS_FS_NEED_ZONE_FINISH,
129
130 /* Indicate that we want to commit the transaction. */
131 BTRFS_FS_NEED_TRANS_COMMIT,
132
133 /* This is set when active zone tracking is needed. */
134 BTRFS_FS_ACTIVE_ZONE_TRACKING,
135
136 /*
137 * Indicate if we have some features changed, this is mostly for
138 * cleaner thread to update the sysfs interface.
139 */
140 BTRFS_FS_FEATURE_CHANGED,
141
142 /*
143 * Indicate that we have found a tree block which is only aligned to
144 * sectorsize, but not to nodesize. This should be rare nowadays.
145 */
146 BTRFS_FS_UNALIGNED_TREE_BLOCK,
147
148#if BITS_PER_LONG == 32
149 /* Indicate if we have error/warn message printed on 32bit systems */
150 BTRFS_FS_32BIT_ERROR,
151 BTRFS_FS_32BIT_WARN,
152#endif
153};
154
155/*
156 * Flags for mount options.
157 *
158 * Note: don't forget to add new options to btrfs_show_options()
159 */
160enum {
161 BTRFS_MOUNT_NODATASUM = (1UL << 0),
162 BTRFS_MOUNT_NODATACOW = (1UL << 1),
163 BTRFS_MOUNT_NOBARRIER = (1UL << 2),
164 BTRFS_MOUNT_SSD = (1UL << 3),
165 BTRFS_MOUNT_DEGRADED = (1UL << 4),
166 BTRFS_MOUNT_COMPRESS = (1UL << 5),
167 BTRFS_MOUNT_NOTREELOG = (1UL << 6),
168 BTRFS_MOUNT_FLUSHONCOMMIT = (1UL << 7),
169 BTRFS_MOUNT_SSD_SPREAD = (1UL << 8),
170 BTRFS_MOUNT_NOSSD = (1UL << 9),
171 BTRFS_MOUNT_DISCARD_SYNC = (1UL << 10),
172 BTRFS_MOUNT_FORCE_COMPRESS = (1UL << 11),
173 BTRFS_MOUNT_SPACE_CACHE = (1UL << 12),
174 BTRFS_MOUNT_CLEAR_CACHE = (1UL << 13),
175 BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = (1UL << 14),
176 BTRFS_MOUNT_ENOSPC_DEBUG = (1UL << 15),
177 BTRFS_MOUNT_AUTO_DEFRAG = (1UL << 16),
178 BTRFS_MOUNT_USEBACKUPROOT = (1UL << 17),
179 BTRFS_MOUNT_SKIP_BALANCE = (1UL << 18),
180 BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1UL << 19),
181 BTRFS_MOUNT_RESCAN_UUID_TREE = (1UL << 20),
182 BTRFS_MOUNT_FRAGMENT_DATA = (1UL << 21),
183 BTRFS_MOUNT_FRAGMENT_METADATA = (1UL << 22),
184 BTRFS_MOUNT_FREE_SPACE_TREE = (1UL << 23),
185 BTRFS_MOUNT_NOLOGREPLAY = (1UL << 24),
186 BTRFS_MOUNT_REF_VERIFY = (1UL << 25),
187 BTRFS_MOUNT_DISCARD_ASYNC = (1UL << 26),
188 BTRFS_MOUNT_IGNOREBADROOTS = (1UL << 27),
189 BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 28),
190 BTRFS_MOUNT_NODISCARD = (1UL << 29),
191 BTRFS_MOUNT_NOSPACECACHE = (1UL << 30),
192};
193
194/*
195 * Compat flags that we support. If any incompat flags are set other than the
196 * ones specified below then we will fail to mount
197 */
198#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
199#define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL
200#define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
201
202#define BTRFS_FEATURE_COMPAT_RO_SUPP \
203 (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \
204 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \
205 BTRFS_FEATURE_COMPAT_RO_VERITY | \
206 BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE)
207
208#define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
209#define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
210
211#define BTRFS_FEATURE_INCOMPAT_SUPP_STABLE \
212 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
213 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
214 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
215 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
216 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
217 BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
218 BTRFS_FEATURE_INCOMPAT_RAID56 | \
219 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
220 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
221 BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
222 BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
223 BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
224 BTRFS_FEATURE_INCOMPAT_ZONED | \
225 BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA)
226
227#ifdef CONFIG_BTRFS_DEBUG
228 /*
229 * Features under developmen like Extent tree v2 support is enabled
230 * only under CONFIG_BTRFS_DEBUG.
231 */
232#define BTRFS_FEATURE_INCOMPAT_SUPP \
233 (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE | \
234 BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE | \
235 BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2)
236
237#else
238
239#define BTRFS_FEATURE_INCOMPAT_SUPP \
240 (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE)
241
242#endif
243
244#define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
245 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
246#define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL
247
248#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
249#define BTRFS_DEFAULT_MAX_INLINE (2048)
250
251struct btrfs_dev_replace {
252 /* See #define above */
253 u64 replace_state;
254 /* Seconds since 1-Jan-1970 */
255 time64_t time_started;
256 /* Seconds since 1-Jan-1970 */
257 time64_t time_stopped;
258 atomic64_t num_write_errors;
259 atomic64_t num_uncorrectable_read_errors;
260
261 u64 cursor_left;
262 u64 committed_cursor_left;
263 u64 cursor_left_last_write_of_item;
264 u64 cursor_right;
265
266 /* See #define above */
267 u64 cont_reading_from_srcdev_mode;
268
269 int is_valid;
270 int item_needs_writeback;
271 struct btrfs_device *srcdev;
272 struct btrfs_device *tgtdev;
273
274 struct mutex lock_finishing_cancel_unmount;
275 struct rw_semaphore rwsem;
276
277 struct btrfs_scrub_progress scrub_progress;
278
279 struct percpu_counter bio_counter;
280 wait_queue_head_t replace_wait;
281};
282
283/*
284 * Free clusters are used to claim free space in relatively large chunks,
285 * allowing us to do less seeky writes. They are used for all metadata
286 * allocations. In ssd_spread mode they are also used for data allocations.
287 */
288struct btrfs_free_cluster {
289 spinlock_t lock;
290 spinlock_t refill_lock;
291 struct rb_root root;
292
293 /* Largest extent in this cluster */
294 u64 max_size;
295
296 /* First extent starting offset */
297 u64 window_start;
298
299 /* We did a full search and couldn't create a cluster */
300 bool fragmented;
301
302 struct btrfs_block_group *block_group;
303 /*
304 * When a cluster is allocated from a block group, we put the cluster
305 * onto a list in the block group so that it can be freed before the
306 * block group is freed.
307 */
308 struct list_head block_group_list;
309};
310
311/* Discard control. */
312/*
313 * Async discard uses multiple lists to differentiate the discard filter
314 * parameters. Index 0 is for completely free block groups where we need to
315 * ensure the entire block group is trimmed without being lossy. Indices
316 * afterwards represent monotonically decreasing discard filter sizes to
317 * prioritize what should be discarded next.
318 */
319#define BTRFS_NR_DISCARD_LISTS 3
320#define BTRFS_DISCARD_INDEX_UNUSED 0
321#define BTRFS_DISCARD_INDEX_START 1
322
323struct btrfs_discard_ctl {
324 struct workqueue_struct *discard_workers;
325 struct delayed_work work;
326 spinlock_t lock;
327 struct btrfs_block_group *block_group;
328 struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
329 u64 prev_discard;
330 u64 prev_discard_time;
331 atomic_t discardable_extents;
332 atomic64_t discardable_bytes;
333 u64 max_discard_size;
334 u64 delay_ms;
335 u32 iops_limit;
336 u32 kbps_limit;
337 u64 discard_extent_bytes;
338 u64 discard_bitmap_bytes;
339 atomic64_t discard_bytes_saved;
340};
341
342/*
343 * Exclusive operations (device replace, resize, device add/remove, balance)
344 */
345enum btrfs_exclusive_operation {
346 BTRFS_EXCLOP_NONE,
347 BTRFS_EXCLOP_BALANCE_PAUSED,
348 BTRFS_EXCLOP_BALANCE,
349 BTRFS_EXCLOP_DEV_ADD,
350 BTRFS_EXCLOP_DEV_REMOVE,
351 BTRFS_EXCLOP_DEV_REPLACE,
352 BTRFS_EXCLOP_RESIZE,
353 BTRFS_EXCLOP_SWAP_ACTIVATE,
354};
355
356/* Store data about transaction commits, exported via sysfs. */
357struct btrfs_commit_stats {
358 /* Total number of commits */
359 u64 commit_count;
360 /* The maximum commit duration so far in ns */
361 u64 max_commit_dur;
362 /* The last commit duration in ns */
363 u64 last_commit_dur;
364 /* The total commit duration in ns */
365 u64 total_commit_dur;
366};
367
368struct btrfs_fs_info {
369 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
370 unsigned long flags;
371 struct btrfs_root *tree_root;
372 struct btrfs_root *chunk_root;
373 struct btrfs_root *dev_root;
374 struct btrfs_root *fs_root;
375 struct btrfs_root *quota_root;
376 struct btrfs_root *uuid_root;
377 struct btrfs_root *data_reloc_root;
378 struct btrfs_root *block_group_root;
379 struct btrfs_root *stripe_root;
380
381 /* The log root tree is a directory of all the other log roots */
382 struct btrfs_root *log_root_tree;
383
384 /* The tree that holds the global roots (csum, extent, etc) */
385 rwlock_t global_root_lock;
386 struct rb_root global_root_tree;
387
388 spinlock_t fs_roots_radix_lock;
389 struct radix_tree_root fs_roots_radix;
390
391 /* Block group cache stuff */
392 rwlock_t block_group_cache_lock;
393 struct rb_root_cached block_group_cache_tree;
394
395 /* Keep track of unallocated space */
396 atomic64_t free_chunk_space;
397
398 /* Track ranges which are used by log trees blocks/logged data extents */
399 struct extent_io_tree excluded_extents;
400
401 /* logical->physical extent mapping */
402 struct rb_root_cached mapping_tree;
403 rwlock_t mapping_tree_lock;
404
405 /*
406 * Block reservation for extent, checksum, root tree and delayed dir
407 * index item.
408 */
409 struct btrfs_block_rsv global_block_rsv;
410 /* Block reservation for metadata operations */
411 struct btrfs_block_rsv trans_block_rsv;
412 /* Block reservation for chunk tree */
413 struct btrfs_block_rsv chunk_block_rsv;
414 /* Block reservation for delayed operations */
415 struct btrfs_block_rsv delayed_block_rsv;
416 /* Block reservation for delayed refs */
417 struct btrfs_block_rsv delayed_refs_rsv;
418
419 struct btrfs_block_rsv empty_block_rsv;
420
421 /*
422 * Updated while holding the lock 'trans_lock'. Due to the life cycle of
423 * a transaction, it can be directly read while holding a transaction
424 * handle, everywhere else must be read with btrfs_get_fs_generation().
425 * Should always be updated using btrfs_set_fs_generation().
426 */
427 u64 generation;
428 /*
429 * Always use btrfs_get_last_trans_committed() and
430 * btrfs_set_last_trans_committed() to read and update this field.
431 */
432 u64 last_trans_committed;
433 /*
434 * Generation of the last transaction used for block group relocation
435 * since the filesystem was last mounted (or 0 if none happened yet).
436 * Must be written and read while holding btrfs_fs_info::commit_root_sem.
437 */
438 u64 last_reloc_trans;
439
440 /*
441 * This is updated to the current trans every time a full commit is
442 * required instead of the faster short fsync log commits
443 */
444 u64 last_trans_log_full_commit;
445 unsigned long mount_opt;
446
447 unsigned long compress_type:4;
448 unsigned int compress_level;
449 u32 commit_interval;
450 /*
451 * It is a suggestive number, the read side is safe even it gets a
452 * wrong number because we will write out the data into a regular
453 * extent. The write side(mount/remount) is under ->s_umount lock,
454 * so it is also safe.
455 */
456 u64 max_inline;
457
458 struct btrfs_transaction *running_transaction;
459 wait_queue_head_t transaction_throttle;
460 wait_queue_head_t transaction_wait;
461 wait_queue_head_t transaction_blocked_wait;
462 wait_queue_head_t async_submit_wait;
463
464 /*
465 * Used to protect the incompat_flags, compat_flags, compat_ro_flags
466 * when they are updated.
467 *
468 * Because we do not clear the flags for ever, so we needn't use
469 * the lock on the read side.
470 *
471 * We also needn't use the lock when we mount the fs, because
472 * there is no other task which will update the flag.
473 */
474 spinlock_t super_lock;
475 struct btrfs_super_block *super_copy;
476 struct btrfs_super_block *super_for_commit;
477 struct super_block *sb;
478 struct inode *btree_inode;
479 struct mutex tree_log_mutex;
480 struct mutex transaction_kthread_mutex;
481 struct mutex cleaner_mutex;
482 struct mutex chunk_mutex;
483
484 /*
485 * This is taken to make sure we don't set block groups ro after the
486 * free space cache has been allocated on them.
487 */
488 struct mutex ro_block_group_mutex;
489
490 /*
491 * This is used during read/modify/write to make sure no two ios are
492 * trying to mod the same stripe at the same time.
493 */
494 struct btrfs_stripe_hash_table *stripe_hash_table;
495
496 /*
497 * This protects the ordered operations list only while we are
498 * processing all of the entries on it. This way we make sure the
499 * commit code doesn't find the list temporarily empty because another
500 * function happens to be doing non-waiting preflush before jumping
501 * into the main commit.
502 */
503 struct mutex ordered_operations_mutex;
504
505 struct rw_semaphore commit_root_sem;
506
507 struct rw_semaphore cleanup_work_sem;
508
509 struct rw_semaphore subvol_sem;
510
511 spinlock_t trans_lock;
512 /*
513 * The reloc mutex goes with the trans lock, it is taken during commit
514 * to protect us from the relocation code.
515 */
516 struct mutex reloc_mutex;
517
518 struct list_head trans_list;
519 struct list_head dead_roots;
520 struct list_head caching_block_groups;
521
522 spinlock_t delayed_iput_lock;
523 struct list_head delayed_iputs;
524 atomic_t nr_delayed_iputs;
525 wait_queue_head_t delayed_iputs_wait;
526
527 atomic64_t tree_mod_seq;
528
529 /* This protects tree_mod_log and tree_mod_seq_list */
530 rwlock_t tree_mod_log_lock;
531 struct rb_root tree_mod_log;
532 struct list_head tree_mod_seq_list;
533
534 atomic_t async_delalloc_pages;
535
536 /* This is used to protect the following list -- ordered_roots. */
537 spinlock_t ordered_root_lock;
538
539 /*
540 * All fs/file tree roots in which there are data=ordered extents
541 * pending writeback are added into this list.
542 *
543 * These can span multiple transactions and basically include every
544 * dirty data page that isn't from nodatacow.
545 */
546 struct list_head ordered_roots;
547
548 struct mutex delalloc_root_mutex;
549 spinlock_t delalloc_root_lock;
550 /* All fs/file tree roots that have delalloc inodes. */
551 struct list_head delalloc_roots;
552
553 /*
554 * There is a pool of worker threads for checksumming during writes and
555 * a pool for checksumming after reads. This is because readers can
556 * run with FS locks held, and the writers may be waiting for those
557 * locks. We don't want ordering in the pending list to cause
558 * deadlocks, and so the two are serviced separately.
559 *
560 * A third pool does submit_bio to avoid deadlocking with the other two.
561 */
562 struct btrfs_workqueue *workers;
563 struct btrfs_workqueue *delalloc_workers;
564 struct btrfs_workqueue *flush_workers;
565 struct workqueue_struct *endio_workers;
566 struct workqueue_struct *endio_meta_workers;
567 struct workqueue_struct *rmw_workers;
568 struct workqueue_struct *compressed_write_workers;
569 struct btrfs_workqueue *endio_write_workers;
570 struct btrfs_workqueue *endio_freespace_worker;
571 struct btrfs_workqueue *caching_workers;
572
573 /*
574 * Fixup workers take dirty pages that didn't properly go through the
575 * cow mechanism and make them safe to write. It happens for the
576 * sys_munmap function call path.
577 */
578 struct btrfs_workqueue *fixup_workers;
579 struct btrfs_workqueue *delayed_workers;
580
581 struct task_struct *transaction_kthread;
582 struct task_struct *cleaner_kthread;
583 u32 thread_pool_size;
584
585 struct kobject *space_info_kobj;
586 struct kobject *qgroups_kobj;
587 struct kobject *discard_kobj;
588
589 /* Used to keep from writing metadata until there is a nice batch */
590 struct percpu_counter dirty_metadata_bytes;
591 struct percpu_counter delalloc_bytes;
592 struct percpu_counter ordered_bytes;
593 s32 dirty_metadata_batch;
594 s32 delalloc_batch;
595
596 /* Protected by 'trans_lock'. */
597 struct list_head dirty_cowonly_roots;
598
599 struct btrfs_fs_devices *fs_devices;
600
601 /*
602 * The space_info list is effectively read only after initial setup.
603 * It is populated at mount time and cleaned up after all block groups
604 * are removed. RCU is used to protect it.
605 */
606 struct list_head space_info;
607
608 struct btrfs_space_info *data_sinfo;
609
610 struct reloc_control *reloc_ctl;
611
612 /* data_alloc_cluster is only used in ssd_spread mode */
613 struct btrfs_free_cluster data_alloc_cluster;
614
615 /* All metadata allocations go through this cluster. */
616 struct btrfs_free_cluster meta_alloc_cluster;
617
618 /* Auto defrag inodes go here. */
619 spinlock_t defrag_inodes_lock;
620 struct rb_root defrag_inodes;
621 atomic_t defrag_running;
622
623 /* Used to protect avail_{data, metadata, system}_alloc_bits */
624 seqlock_t profiles_lock;
625 /*
626 * These three are in extended format (availability of single chunks is
627 * denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other types are denoted
628 * by corresponding BTRFS_BLOCK_GROUP_* bits)
629 */
630 u64 avail_data_alloc_bits;
631 u64 avail_metadata_alloc_bits;
632 u64 avail_system_alloc_bits;
633
634 /* Balance state */
635 spinlock_t balance_lock;
636 struct mutex balance_mutex;
637 atomic_t balance_pause_req;
638 atomic_t balance_cancel_req;
639 struct btrfs_balance_control *balance_ctl;
640 wait_queue_head_t balance_wait_q;
641
642 /* Cancellation requests for chunk relocation */
643 atomic_t reloc_cancel_req;
644
645 u32 data_chunk_allocations;
646 u32 metadata_ratio;
647
648 void *bdev_holder;
649
650 /* Private scrub information */
651 struct mutex scrub_lock;
652 atomic_t scrubs_running;
653 atomic_t scrub_pause_req;
654 atomic_t scrubs_paused;
655 atomic_t scrub_cancel_req;
656 wait_queue_head_t scrub_pause_wait;
657 /*
658 * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
659 * running.
660 */
661 refcount_t scrub_workers_refcnt;
662 struct workqueue_struct *scrub_workers;
663 struct btrfs_subpage_info *subpage_info;
664
665 struct btrfs_discard_ctl discard_ctl;
666
667 /* Is qgroup tracking in a consistent state? */
668 u64 qgroup_flags;
669
670 /* Holds configuration and tracking. Protected by qgroup_lock. */
671 struct rb_root qgroup_tree;
672 spinlock_t qgroup_lock;
673
674 /*
675 * Used to avoid frequently calling ulist_alloc()/ulist_free()
676 * when doing qgroup accounting, it must be protected by qgroup_lock.
677 */
678 struct ulist *qgroup_ulist;
679
680 /*
681 * Protect user change for quota operations. If a transaction is needed,
682 * it must be started before locking this lock.
683 */
684 struct mutex qgroup_ioctl_lock;
685
686 /* List of dirty qgroups to be written at next commit. */
687 struct list_head dirty_qgroups;
688
689 /* Used by qgroup for an efficient tree traversal. */
690 u64 qgroup_seq;
691
692 /* Qgroup rescan items. */
693 /* Protects the progress item */
694 struct mutex qgroup_rescan_lock;
695 struct btrfs_key qgroup_rescan_progress;
696 struct btrfs_workqueue *qgroup_rescan_workers;
697 struct completion qgroup_rescan_completion;
698 struct btrfs_work qgroup_rescan_work;
699 /* Protected by qgroup_rescan_lock */
700 bool qgroup_rescan_running;
701 u8 qgroup_drop_subtree_thres;
702 u64 qgroup_enable_gen;
703
704 /*
705 * If this is not 0, then it indicates a serious filesystem error has
706 * happened and it contains that error (negative errno value).
707 */
708 int fs_error;
709
710 /* Filesystem state */
711 unsigned long fs_state;
712
713 struct btrfs_delayed_root *delayed_root;
714
715 /* Extent buffer radix tree */
716 spinlock_t buffer_lock;
717 /* Entries are eb->start / sectorsize */
718 struct radix_tree_root buffer_radix;
719
720 /* Next backup root to be overwritten */
721 int backup_root_index;
722
723 /* Device replace state */
724 struct btrfs_dev_replace dev_replace;
725
726 struct semaphore uuid_tree_rescan_sem;
727
728 /* Used to reclaim the metadata space in the background. */
729 struct work_struct async_reclaim_work;
730 struct work_struct async_data_reclaim_work;
731 struct work_struct preempt_reclaim_work;
732
733 /* Reclaim partially filled block groups in the background */
734 struct work_struct reclaim_bgs_work;
735 struct list_head reclaim_bgs;
736 int bg_reclaim_threshold;
737
738 spinlock_t unused_bgs_lock;
739 struct list_head unused_bgs;
740 struct mutex unused_bg_unpin_mutex;
741 /* Protect block groups that are going to be deleted */
742 struct mutex reclaim_bgs_lock;
743
744 /* Cached block sizes */
745 u32 nodesize;
746 u32 sectorsize;
747 /* ilog2 of sectorsize, use to avoid 64bit division */
748 u32 sectorsize_bits;
749 u32 csum_size;
750 u32 csums_per_leaf;
751 u32 stripesize;
752
753 /*
754 * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
755 * filesystem, on zoned it depends on the device constraints.
756 */
757 u64 max_extent_size;
758
759 /* Block groups and devices containing active swapfiles. */
760 spinlock_t swapfile_pins_lock;
761 struct rb_root swapfile_pins;
762
763 struct crypto_shash *csum_shash;
764
765 /* Type of exclusive operation running, protected by super_lock */
766 enum btrfs_exclusive_operation exclusive_operation;
767
768 /*
769 * Zone size > 0 when in ZONED mode, otherwise it's used for a check
770 * if the mode is enabled
771 */
772 u64 zone_size;
773
774 /* Constraints for ZONE_APPEND commands: */
775 struct queue_limits limits;
776 u64 max_zone_append_size;
777
778 struct mutex zoned_meta_io_lock;
779 spinlock_t treelog_bg_lock;
780 u64 treelog_bg;
781
782 /*
783 * Start of the dedicated data relocation block group, protected by
784 * relocation_bg_lock.
785 */
786 spinlock_t relocation_bg_lock;
787 u64 data_reloc_bg;
788 struct mutex zoned_data_reloc_io_lock;
789
790 struct btrfs_block_group *active_meta_bg;
791 struct btrfs_block_group *active_system_bg;
792
793 u64 nr_global_roots;
794
795 spinlock_t zone_active_bgs_lock;
796 struct list_head zone_active_bgs;
797
798 /* Updates are not protected by any lock */
799 struct btrfs_commit_stats commit_stats;
800
801 /*
802 * Last generation where we dropped a non-relocation root.
803 * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen()
804 * to change it and to read it, respectively.
805 */
806 u64 last_root_drop_gen;
807
808 /*
809 * Annotations for transaction events (structures are empty when
810 * compiled without lockdep).
811 */
812 struct lockdep_map btrfs_trans_num_writers_map;
813 struct lockdep_map btrfs_trans_num_extwriters_map;
814 struct lockdep_map btrfs_state_change_map[4];
815 struct lockdep_map btrfs_trans_pending_ordered_map;
816 struct lockdep_map btrfs_ordered_extent_map;
817
818#ifdef CONFIG_BTRFS_FS_REF_VERIFY
819 spinlock_t ref_verify_lock;
820 struct rb_root block_tree;
821#endif
822
823#ifdef CONFIG_BTRFS_DEBUG
824 struct kobject *debug_kobj;
825 struct list_head allocated_roots;
826
827 spinlock_t eb_leak_lock;
828 struct list_head allocated_ebs;
829#endif
830};
831
832static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info)
833{
834 return READ_ONCE(fs_info->generation);
835}
836
837static inline void btrfs_set_fs_generation(struct btrfs_fs_info *fs_info, u64 gen)
838{
839 WRITE_ONCE(fs_info->generation, gen);
840}
841
842static inline u64 btrfs_get_last_trans_committed(const struct btrfs_fs_info *fs_info)
843{
844 return READ_ONCE(fs_info->last_trans_committed);
845}
846
847static inline void btrfs_set_last_trans_committed(struct btrfs_fs_info *fs_info, u64 gen)
848{
849 WRITE_ONCE(fs_info->last_trans_committed, gen);
850}
851
852static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
853 u64 gen)
854{
855 WRITE_ONCE(fs_info->last_root_drop_gen, gen);
856}
857
858static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info)
859{
860 return READ_ONCE(fs_info->last_root_drop_gen);
861}
862
863/*
864 * Take the number of bytes to be checksummed and figure out how many leaves
865 * it would require to store the csums for that many bytes.
866 */
867static inline u64 btrfs_csum_bytes_to_leaves(
868 const struct btrfs_fs_info *fs_info, u64 csum_bytes)
869{
870 const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
871
872 return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
873}
874
875/*
876 * Use this if we would be adding new items, as we could split nodes as we cow
877 * down the tree.
878 */
879static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info,
880 unsigned num_items)
881{
882 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
883}
884
885/*
886 * Doing a truncate or a modification won't result in new nodes or leaves, just
887 * what we need for COW.
888 */
889static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info,
890 unsigned num_items)
891{
892 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
893}
894
895#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
896 sizeof(struct btrfs_item))
897
898static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
899{
900 return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && fs_info->zone_size > 0;
901}
902
903/*
904 * Count how many fs_info->max_extent_size cover the @size
905 */
906static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size)
907{
908#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
909 if (!fs_info)
910 return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
911#endif
912
913 return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
914}
915
916bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
917 enum btrfs_exclusive_operation type);
918bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
919 enum btrfs_exclusive_operation type);
920void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
921void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
922void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
923 enum btrfs_exclusive_operation op);
924
925/* Compatibility and incompatibility defines */
926void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
927 const char *name);
928void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
929 const char *name);
930void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
931 const char *name);
932void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
933 const char *name);
934
935#define __btrfs_fs_incompat(fs_info, flags) \
936 (!!(btrfs_super_incompat_flags((fs_info)->super_copy) & (flags)))
937
938#define __btrfs_fs_compat_ro(fs_info, flags) \
939 (!!(btrfs_super_compat_ro_flags((fs_info)->super_copy) & (flags)))
940
941#define btrfs_set_fs_incompat(__fs_info, opt) \
942 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
943
944#define btrfs_clear_fs_incompat(__fs_info, opt) \
945 __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
946
947#define btrfs_fs_incompat(fs_info, opt) \
948 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
949
950#define btrfs_set_fs_compat_ro(__fs_info, opt) \
951 __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
952
953#define btrfs_clear_fs_compat_ro(__fs_info, opt) \
954 __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
955
956#define btrfs_fs_compat_ro(fs_info, opt) \
957 __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
958
959#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
960#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
961#define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
962#define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \
963 BTRFS_MOUNT_##opt)
964
965static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
966{
967 /* Do it this way so we only ever do one test_bit in the normal case. */
968 if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
969 if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
970 return 2;
971 return 1;
972 }
973 return 0;
974}
975
976/*
977 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
978 * anything except sleeping. This function is used to check the status of
979 * the fs.
980 * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
981 * since setting and checking for SB_RDONLY in the superblock's flags is not
982 * atomic.
983 */
984static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
985{
986 return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
987 btrfs_fs_closing(fs_info);
988}
989
990static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
991{
992 clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
993}
994
995#define BTRFS_FS_ERROR(fs_info) (READ_ONCE((fs_info)->fs_error))
996
997#define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \
998 (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
999 &(fs_info)->fs_state)))
1000
1001#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1002
1003#define EXPORT_FOR_TESTS
1004
1005static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
1006{
1007 return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1008}
1009
1010void btrfs_test_destroy_inode(struct inode *inode);
1011
1012#else
1013
1014#define EXPORT_FOR_TESTS static
1015
1016static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
1017{
1018 return 0;
1019}
1020#endif
1021
1022#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_FS_H
4#define BTRFS_FS_H
5
6#include <linux/blkdev.h>
7#include <linux/sizes.h>
8#include <linux/time64.h>
9#include <linux/compiler.h>
10#include <linux/math.h>
11#include <linux/atomic.h>
12#include <linux/percpu_counter.h>
13#include <linux/completion.h>
14#include <linux/lockdep.h>
15#include <linux/spinlock.h>
16#include <linux/mutex.h>
17#include <linux/rwlock_types.h>
18#include <linux/rwsem.h>
19#include <linux/semaphore.h>
20#include <linux/list.h>
21#include <linux/radix-tree.h>
22#include <linux/workqueue.h>
23#include <linux/wait.h>
24#include <linux/wait_bit.h>
25#include <linux/sched.h>
26#include <linux/rbtree.h>
27#include <uapi/linux/btrfs.h>
28#include <uapi/linux/btrfs_tree.h>
29#include "extent-io-tree.h"
30#include "async-thread.h"
31#include "block-rsv.h"
32
33struct inode;
34struct super_block;
35struct kobject;
36struct reloc_control;
37struct crypto_shash;
38struct ulist;
39struct btrfs_device;
40struct btrfs_block_group;
41struct btrfs_root;
42struct btrfs_fs_devices;
43struct btrfs_transaction;
44struct btrfs_delayed_root;
45struct btrfs_balance_control;
46struct btrfs_subpage_info;
47struct btrfs_stripe_hash_table;
48struct btrfs_space_info;
49
50#define BTRFS_MAX_EXTENT_SIZE SZ_128M
51
52#define BTRFS_OLDEST_GENERATION 0ULL
53
54#define BTRFS_EMPTY_DIR_SIZE 0
55
56#define BTRFS_DIRTY_METADATA_THRESH SZ_32M
57
58#define BTRFS_SUPER_INFO_OFFSET SZ_64K
59#define BTRFS_SUPER_INFO_SIZE 4096
60static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
61
62/*
63 * Number of metadata items necessary for an unlink operation:
64 *
65 * 1 for the possible orphan item
66 * 1 for the dir item
67 * 1 for the dir index
68 * 1 for the inode ref
69 * 1 for the inode
70 * 1 for the parent inode
71 */
72#define BTRFS_UNLINK_METADATA_UNITS 6
73
74/*
75 * The reserved space at the beginning of each device. It covers the primary
76 * super block and leaves space for potential use by other tools like
77 * bootloaders or to lower potential damage of accidental overwrite.
78 */
79#define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M)
80/*
81 * Runtime (in-memory) states of filesystem
82 */
83enum {
84 /*
85 * Filesystem is being remounted, allow to skip some operations, like
86 * defrag
87 */
88 BTRFS_FS_STATE_REMOUNTING,
89 /* Filesystem in RO mode */
90 BTRFS_FS_STATE_RO,
91 /* Track if a transaction abort has been reported on this filesystem */
92 BTRFS_FS_STATE_TRANS_ABORTED,
93 /*
94 * Bio operations should be blocked on this filesystem because a source
95 * or target device is being destroyed as part of a device replace
96 */
97 BTRFS_FS_STATE_DEV_REPLACING,
98 /* The btrfs_fs_info created for self-tests */
99 BTRFS_FS_STATE_DUMMY_FS_INFO,
100
101 /* Checksum errors are ignored. */
102 BTRFS_FS_STATE_NO_DATA_CSUMS,
103 BTRFS_FS_STATE_SKIP_META_CSUMS,
104
105 /* Indicates there was an error cleaning up a log tree. */
106 BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
107
108 BTRFS_FS_STATE_COUNT
109};
110
111enum {
112 BTRFS_FS_CLOSING_START,
113 BTRFS_FS_CLOSING_DONE,
114 BTRFS_FS_LOG_RECOVERING,
115 BTRFS_FS_OPEN,
116 BTRFS_FS_QUOTA_ENABLED,
117 BTRFS_FS_UPDATE_UUID_TREE_GEN,
118 BTRFS_FS_CREATING_FREE_SPACE_TREE,
119 BTRFS_FS_BTREE_ERR,
120 BTRFS_FS_LOG1_ERR,
121 BTRFS_FS_LOG2_ERR,
122 BTRFS_FS_QUOTA_OVERRIDE,
123 /* Used to record internally whether fs has been frozen */
124 BTRFS_FS_FROZEN,
125 /*
126 * Indicate that balance has been set up from the ioctl and is in the
127 * main phase. The fs_info::balance_ctl is initialized.
128 */
129 BTRFS_FS_BALANCE_RUNNING,
130
131 /*
132 * Indicate that relocation of a chunk has started, it's set per chunk
133 * and is toggled between chunks.
134 */
135 BTRFS_FS_RELOC_RUNNING,
136
137 /* Indicate that the cleaner thread is awake and doing something. */
138 BTRFS_FS_CLEANER_RUNNING,
139
140 /*
141 * The checksumming has an optimized version and is considered fast,
142 * so we don't need to offload checksums to workqueues.
143 */
144 BTRFS_FS_CSUM_IMPL_FAST,
145
146 /* Indicate that the discard workqueue can service discards. */
147 BTRFS_FS_DISCARD_RUNNING,
148
149 /* Indicate that we need to cleanup space cache v1 */
150 BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
151
152 /* Indicate that we can't trust the free space tree for caching yet */
153 BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
154
155 /* Indicate whether there are any tree modification log users */
156 BTRFS_FS_TREE_MOD_LOG_USERS,
157
158 /* Indicate that we want the transaction kthread to commit right now. */
159 BTRFS_FS_COMMIT_TRANS,
160
161 /* Indicate we have half completed snapshot deletions pending. */
162 BTRFS_FS_UNFINISHED_DROPS,
163
164 /* Indicate we have to finish a zone to do next allocation. */
165 BTRFS_FS_NEED_ZONE_FINISH,
166
167 /* Indicate that we want to commit the transaction. */
168 BTRFS_FS_NEED_TRANS_COMMIT,
169
170 /* This is set when active zone tracking is needed. */
171 BTRFS_FS_ACTIVE_ZONE_TRACKING,
172
173 /*
174 * Indicate if we have some features changed, this is mostly for
175 * cleaner thread to update the sysfs interface.
176 */
177 BTRFS_FS_FEATURE_CHANGED,
178
179 /*
180 * Indicate that we have found a tree block which is only aligned to
181 * sectorsize, but not to nodesize. This should be rare nowadays.
182 */
183 BTRFS_FS_UNALIGNED_TREE_BLOCK,
184
185#if BITS_PER_LONG == 32
186 /* Indicate if we have error/warn message printed on 32bit systems */
187 BTRFS_FS_32BIT_ERROR,
188 BTRFS_FS_32BIT_WARN,
189#endif
190};
191
192/*
193 * Flags for mount options.
194 *
195 * Note: don't forget to add new options to btrfs_show_options()
196 */
197enum {
198 BTRFS_MOUNT_NODATASUM = (1ULL << 0),
199 BTRFS_MOUNT_NODATACOW = (1ULL << 1),
200 BTRFS_MOUNT_NOBARRIER = (1ULL << 2),
201 BTRFS_MOUNT_SSD = (1ULL << 3),
202 BTRFS_MOUNT_DEGRADED = (1ULL << 4),
203 BTRFS_MOUNT_COMPRESS = (1ULL << 5),
204 BTRFS_MOUNT_NOTREELOG = (1ULL << 6),
205 BTRFS_MOUNT_FLUSHONCOMMIT = (1ULL << 7),
206 BTRFS_MOUNT_SSD_SPREAD = (1ULL << 8),
207 BTRFS_MOUNT_NOSSD = (1ULL << 9),
208 BTRFS_MOUNT_DISCARD_SYNC = (1ULL << 10),
209 BTRFS_MOUNT_FORCE_COMPRESS = (1ULL << 11),
210 BTRFS_MOUNT_SPACE_CACHE = (1ULL << 12),
211 BTRFS_MOUNT_CLEAR_CACHE = (1ULL << 13),
212 BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED = (1ULL << 14),
213 BTRFS_MOUNT_ENOSPC_DEBUG = (1ULL << 15),
214 BTRFS_MOUNT_AUTO_DEFRAG = (1ULL << 16),
215 BTRFS_MOUNT_USEBACKUPROOT = (1ULL << 17),
216 BTRFS_MOUNT_SKIP_BALANCE = (1ULL << 18),
217 BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1ULL << 19),
218 BTRFS_MOUNT_RESCAN_UUID_TREE = (1ULL << 20),
219 BTRFS_MOUNT_FRAGMENT_DATA = (1ULL << 21),
220 BTRFS_MOUNT_FRAGMENT_METADATA = (1ULL << 22),
221 BTRFS_MOUNT_FREE_SPACE_TREE = (1ULL << 23),
222 BTRFS_MOUNT_NOLOGREPLAY = (1ULL << 24),
223 BTRFS_MOUNT_REF_VERIFY = (1ULL << 25),
224 BTRFS_MOUNT_DISCARD_ASYNC = (1ULL << 26),
225 BTRFS_MOUNT_IGNOREBADROOTS = (1ULL << 27),
226 BTRFS_MOUNT_IGNOREDATACSUMS = (1ULL << 28),
227 BTRFS_MOUNT_NODISCARD = (1ULL << 29),
228 BTRFS_MOUNT_NOSPACECACHE = (1ULL << 30),
229 BTRFS_MOUNT_IGNOREMETACSUMS = (1ULL << 31),
230 BTRFS_MOUNT_IGNORESUPERFLAGS = (1ULL << 32),
231};
232
233/*
234 * Compat flags that we support. If any incompat flags are set other than the
235 * ones specified below then we will fail to mount
236 */
237#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
238#define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL
239#define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
240
241#define BTRFS_FEATURE_COMPAT_RO_SUPP \
242 (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \
243 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID | \
244 BTRFS_FEATURE_COMPAT_RO_VERITY | \
245 BTRFS_FEATURE_COMPAT_RO_BLOCK_GROUP_TREE)
246
247#define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
248#define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
249
250#define BTRFS_FEATURE_INCOMPAT_SUPP_STABLE \
251 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
252 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
253 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
254 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
255 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
256 BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
257 BTRFS_FEATURE_INCOMPAT_RAID56 | \
258 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
259 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
260 BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
261 BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
262 BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
263 BTRFS_FEATURE_INCOMPAT_ZONED | \
264 BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA)
265
266#ifdef CONFIG_BTRFS_EXPERIMENTAL
267 /*
268 * Features under developmen like Extent tree v2 support is enabled
269 * only under CONFIG_BTRFS_EXPERIMENTAL
270 */
271#define BTRFS_FEATURE_INCOMPAT_SUPP \
272 (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE | \
273 BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE | \
274 BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2)
275
276#else
277
278#define BTRFS_FEATURE_INCOMPAT_SUPP \
279 (BTRFS_FEATURE_INCOMPAT_SUPP_STABLE)
280
281#endif
282
283#define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
284 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
285#define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL
286
287#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
288#define BTRFS_DEFAULT_MAX_INLINE (2048)
289
290struct btrfs_dev_replace {
291 /* See #define above */
292 u64 replace_state;
293 /* Seconds since 1-Jan-1970 */
294 time64_t time_started;
295 /* Seconds since 1-Jan-1970 */
296 time64_t time_stopped;
297 atomic64_t num_write_errors;
298 atomic64_t num_uncorrectable_read_errors;
299
300 u64 cursor_left;
301 u64 committed_cursor_left;
302 u64 cursor_left_last_write_of_item;
303 u64 cursor_right;
304
305 /* See #define above */
306 u64 cont_reading_from_srcdev_mode;
307
308 int is_valid;
309 int item_needs_writeback;
310 struct btrfs_device *srcdev;
311 struct btrfs_device *tgtdev;
312
313 struct mutex lock_finishing_cancel_unmount;
314 struct rw_semaphore rwsem;
315
316 struct btrfs_scrub_progress scrub_progress;
317
318 struct percpu_counter bio_counter;
319 wait_queue_head_t replace_wait;
320
321 struct task_struct *replace_task;
322};
323
324/*
325 * Free clusters are used to claim free space in relatively large chunks,
326 * allowing us to do less seeky writes. They are used for all metadata
327 * allocations. In ssd_spread mode they are also used for data allocations.
328 */
329struct btrfs_free_cluster {
330 spinlock_t lock;
331 spinlock_t refill_lock;
332 struct rb_root root;
333
334 /* Largest extent in this cluster */
335 u64 max_size;
336
337 /* First extent starting offset */
338 u64 window_start;
339
340 /* We did a full search and couldn't create a cluster */
341 bool fragmented;
342
343 struct btrfs_block_group *block_group;
344 /*
345 * When a cluster is allocated from a block group, we put the cluster
346 * onto a list in the block group so that it can be freed before the
347 * block group is freed.
348 */
349 struct list_head block_group_list;
350};
351
352/* Discard control. */
353/*
354 * Async discard uses multiple lists to differentiate the discard filter
355 * parameters. Index 0 is for completely free block groups where we need to
356 * ensure the entire block group is trimmed without being lossy. Indices
357 * afterwards represent monotonically decreasing discard filter sizes to
358 * prioritize what should be discarded next.
359 */
360#define BTRFS_NR_DISCARD_LISTS 3
361#define BTRFS_DISCARD_INDEX_UNUSED 0
362#define BTRFS_DISCARD_INDEX_START 1
363
364struct btrfs_discard_ctl {
365 struct workqueue_struct *discard_workers;
366 struct delayed_work work;
367 spinlock_t lock;
368 struct btrfs_block_group *block_group;
369 struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
370 u64 prev_discard;
371 u64 prev_discard_time;
372 atomic_t discardable_extents;
373 atomic64_t discardable_bytes;
374 u64 max_discard_size;
375 u64 delay_ms;
376 u32 iops_limit;
377 u32 kbps_limit;
378 u64 discard_extent_bytes;
379 u64 discard_bitmap_bytes;
380 atomic64_t discard_bytes_saved;
381};
382
383/*
384 * Exclusive operations (device replace, resize, device add/remove, balance)
385 */
386enum btrfs_exclusive_operation {
387 BTRFS_EXCLOP_NONE,
388 BTRFS_EXCLOP_BALANCE_PAUSED,
389 BTRFS_EXCLOP_BALANCE,
390 BTRFS_EXCLOP_DEV_ADD,
391 BTRFS_EXCLOP_DEV_REMOVE,
392 BTRFS_EXCLOP_DEV_REPLACE,
393 BTRFS_EXCLOP_RESIZE,
394 BTRFS_EXCLOP_SWAP_ACTIVATE,
395};
396
397/* Store data about transaction commits, exported via sysfs. */
398struct btrfs_commit_stats {
399 /* Total number of commits */
400 u64 commit_count;
401 /* The maximum commit duration so far in ns */
402 u64 max_commit_dur;
403 /* The last commit duration in ns */
404 u64 last_commit_dur;
405 /* The total commit duration in ns */
406 u64 total_commit_dur;
407};
408
409struct btrfs_fs_info {
410 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
411 unsigned long flags;
412 struct btrfs_root *tree_root;
413 struct btrfs_root *chunk_root;
414 struct btrfs_root *dev_root;
415 struct btrfs_root *fs_root;
416 struct btrfs_root *quota_root;
417 struct btrfs_root *uuid_root;
418 struct btrfs_root *data_reloc_root;
419 struct btrfs_root *block_group_root;
420 struct btrfs_root *stripe_root;
421
422 /* The log root tree is a directory of all the other log roots */
423 struct btrfs_root *log_root_tree;
424
425 /* The tree that holds the global roots (csum, extent, etc) */
426 rwlock_t global_root_lock;
427 struct rb_root global_root_tree;
428
429 spinlock_t fs_roots_radix_lock;
430 struct radix_tree_root fs_roots_radix;
431
432 /* Block group cache stuff */
433 rwlock_t block_group_cache_lock;
434 struct rb_root_cached block_group_cache_tree;
435
436 /* Keep track of unallocated space */
437 atomic64_t free_chunk_space;
438
439 /* Track ranges which are used by log trees blocks/logged data extents */
440 struct extent_io_tree excluded_extents;
441
442 /* logical->physical extent mapping */
443 struct rb_root_cached mapping_tree;
444 rwlock_t mapping_tree_lock;
445
446 /*
447 * Block reservation for extent, checksum, root tree and delayed dir
448 * index item.
449 */
450 struct btrfs_block_rsv global_block_rsv;
451 /* Block reservation for metadata operations */
452 struct btrfs_block_rsv trans_block_rsv;
453 /* Block reservation for chunk tree */
454 struct btrfs_block_rsv chunk_block_rsv;
455 /* Block reservation for delayed operations */
456 struct btrfs_block_rsv delayed_block_rsv;
457 /* Block reservation for delayed refs */
458 struct btrfs_block_rsv delayed_refs_rsv;
459
460 struct btrfs_block_rsv empty_block_rsv;
461
462 /*
463 * Updated while holding the lock 'trans_lock'. Due to the life cycle of
464 * a transaction, it can be directly read while holding a transaction
465 * handle, everywhere else must be read with btrfs_get_fs_generation().
466 * Should always be updated using btrfs_set_fs_generation().
467 */
468 u64 generation;
469 /*
470 * Always use btrfs_get_last_trans_committed() and
471 * btrfs_set_last_trans_committed() to read and update this field.
472 */
473 u64 last_trans_committed;
474 /*
475 * Generation of the last transaction used for block group relocation
476 * since the filesystem was last mounted (or 0 if none happened yet).
477 * Must be written and read while holding btrfs_fs_info::commit_root_sem.
478 */
479 u64 last_reloc_trans;
480
481 /*
482 * This is updated to the current trans every time a full commit is
483 * required instead of the faster short fsync log commits
484 */
485 u64 last_trans_log_full_commit;
486 unsigned long long mount_opt;
487
488 unsigned long compress_type:4;
489 unsigned int compress_level;
490 u32 commit_interval;
491 /*
492 * It is a suggestive number, the read side is safe even it gets a
493 * wrong number because we will write out the data into a regular
494 * extent. The write side(mount/remount) is under ->s_umount lock,
495 * so it is also safe.
496 */
497 u64 max_inline;
498
499 struct btrfs_transaction *running_transaction;
500 wait_queue_head_t transaction_throttle;
501 wait_queue_head_t transaction_wait;
502 wait_queue_head_t transaction_blocked_wait;
503 wait_queue_head_t async_submit_wait;
504
505 /*
506 * Used to protect the incompat_flags, compat_flags, compat_ro_flags
507 * when they are updated.
508 *
509 * Because we do not clear the flags for ever, so we needn't use
510 * the lock on the read side.
511 *
512 * We also needn't use the lock when we mount the fs, because
513 * there is no other task which will update the flag.
514 */
515 spinlock_t super_lock;
516 struct btrfs_super_block *super_copy;
517 struct btrfs_super_block *super_for_commit;
518 struct super_block *sb;
519 struct inode *btree_inode;
520 struct mutex tree_log_mutex;
521 struct mutex transaction_kthread_mutex;
522 struct mutex cleaner_mutex;
523 struct mutex chunk_mutex;
524
525 /*
526 * This is taken to make sure we don't set block groups ro after the
527 * free space cache has been allocated on them.
528 */
529 struct mutex ro_block_group_mutex;
530
531 /*
532 * This is used during read/modify/write to make sure no two ios are
533 * trying to mod the same stripe at the same time.
534 */
535 struct btrfs_stripe_hash_table *stripe_hash_table;
536
537 /*
538 * This protects the ordered operations list only while we are
539 * processing all of the entries on it. This way we make sure the
540 * commit code doesn't find the list temporarily empty because another
541 * function happens to be doing non-waiting preflush before jumping
542 * into the main commit.
543 */
544 struct mutex ordered_operations_mutex;
545
546 struct rw_semaphore commit_root_sem;
547
548 struct rw_semaphore cleanup_work_sem;
549
550 struct rw_semaphore subvol_sem;
551
552 spinlock_t trans_lock;
553 /*
554 * The reloc mutex goes with the trans lock, it is taken during commit
555 * to protect us from the relocation code.
556 */
557 struct mutex reloc_mutex;
558
559 struct list_head trans_list;
560 struct list_head dead_roots;
561 struct list_head caching_block_groups;
562
563 spinlock_t delayed_iput_lock;
564 struct list_head delayed_iputs;
565 atomic_t nr_delayed_iputs;
566 wait_queue_head_t delayed_iputs_wait;
567
568 atomic64_t tree_mod_seq;
569
570 /* This protects tree_mod_log and tree_mod_seq_list */
571 rwlock_t tree_mod_log_lock;
572 struct rb_root tree_mod_log;
573 struct list_head tree_mod_seq_list;
574
575 atomic_t async_delalloc_pages;
576
577 /* This is used to protect the following list -- ordered_roots. */
578 spinlock_t ordered_root_lock;
579
580 /*
581 * All fs/file tree roots in which there are data=ordered extents
582 * pending writeback are added into this list.
583 *
584 * These can span multiple transactions and basically include every
585 * dirty data page that isn't from nodatacow.
586 */
587 struct list_head ordered_roots;
588
589 struct mutex delalloc_root_mutex;
590 spinlock_t delalloc_root_lock;
591 /* All fs/file tree roots that have delalloc inodes. */
592 struct list_head delalloc_roots;
593
594 /*
595 * There is a pool of worker threads for checksumming during writes and
596 * a pool for checksumming after reads. This is because readers can
597 * run with FS locks held, and the writers may be waiting for those
598 * locks. We don't want ordering in the pending list to cause
599 * deadlocks, and so the two are serviced separately.
600 *
601 * A third pool does submit_bio to avoid deadlocking with the other two.
602 */
603 struct btrfs_workqueue *workers;
604 struct btrfs_workqueue *delalloc_workers;
605 struct btrfs_workqueue *flush_workers;
606 struct workqueue_struct *endio_workers;
607 struct workqueue_struct *endio_meta_workers;
608 struct workqueue_struct *rmw_workers;
609 struct workqueue_struct *compressed_write_workers;
610 struct btrfs_workqueue *endio_write_workers;
611 struct btrfs_workqueue *endio_freespace_worker;
612 struct btrfs_workqueue *caching_workers;
613
614 /*
615 * Fixup workers take dirty pages that didn't properly go through the
616 * cow mechanism and make them safe to write. It happens for the
617 * sys_munmap function call path.
618 */
619 struct btrfs_workqueue *fixup_workers;
620 struct btrfs_workqueue *delayed_workers;
621
622 struct task_struct *transaction_kthread;
623 struct task_struct *cleaner_kthread;
624 u32 thread_pool_size;
625
626 struct kobject *space_info_kobj;
627 struct kobject *qgroups_kobj;
628 struct kobject *discard_kobj;
629
630 /* Used to keep from writing metadata until there is a nice batch */
631 struct percpu_counter dirty_metadata_bytes;
632 struct percpu_counter delalloc_bytes;
633 struct percpu_counter ordered_bytes;
634 s32 dirty_metadata_batch;
635 s32 delalloc_batch;
636
637 struct percpu_counter evictable_extent_maps;
638 u64 em_shrinker_last_root;
639 u64 em_shrinker_last_ino;
640 atomic64_t em_shrinker_nr_to_scan;
641 struct work_struct em_shrinker_work;
642
643 /* Protected by 'trans_lock'. */
644 struct list_head dirty_cowonly_roots;
645
646 struct btrfs_fs_devices *fs_devices;
647
648 /*
649 * The space_info list is effectively read only after initial setup.
650 * It is populated at mount time and cleaned up after all block groups
651 * are removed. RCU is used to protect it.
652 */
653 struct list_head space_info;
654
655 struct btrfs_space_info *data_sinfo;
656
657 struct reloc_control *reloc_ctl;
658
659 /* data_alloc_cluster is only used in ssd_spread mode */
660 struct btrfs_free_cluster data_alloc_cluster;
661
662 /* All metadata allocations go through this cluster. */
663 struct btrfs_free_cluster meta_alloc_cluster;
664
665 /* Auto defrag inodes go here. */
666 spinlock_t defrag_inodes_lock;
667 struct rb_root defrag_inodes;
668 atomic_t defrag_running;
669
670 /* Used to protect avail_{data, metadata, system}_alloc_bits */
671 seqlock_t profiles_lock;
672 /*
673 * These three are in extended format (availability of single chunks is
674 * denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other types are denoted
675 * by corresponding BTRFS_BLOCK_GROUP_* bits)
676 */
677 u64 avail_data_alloc_bits;
678 u64 avail_metadata_alloc_bits;
679 u64 avail_system_alloc_bits;
680
681 /* Balance state */
682 spinlock_t balance_lock;
683 struct mutex balance_mutex;
684 atomic_t balance_pause_req;
685 atomic_t balance_cancel_req;
686 struct btrfs_balance_control *balance_ctl;
687 wait_queue_head_t balance_wait_q;
688
689 /* Cancellation requests for chunk relocation */
690 atomic_t reloc_cancel_req;
691
692 u32 data_chunk_allocations;
693 u32 metadata_ratio;
694
695 void *bdev_holder;
696
697 /* Private scrub information */
698 struct mutex scrub_lock;
699 atomic_t scrubs_running;
700 atomic_t scrub_pause_req;
701 atomic_t scrubs_paused;
702 atomic_t scrub_cancel_req;
703 wait_queue_head_t scrub_pause_wait;
704 /*
705 * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
706 * running.
707 */
708 refcount_t scrub_workers_refcnt;
709 u32 sectors_per_page;
710 struct workqueue_struct *scrub_workers;
711
712 struct btrfs_discard_ctl discard_ctl;
713
714 /* Is qgroup tracking in a consistent state? */
715 u64 qgroup_flags;
716
717 /* Holds configuration and tracking. Protected by qgroup_lock. */
718 struct rb_root qgroup_tree;
719 spinlock_t qgroup_lock;
720
721 /*
722 * Used to avoid frequently calling ulist_alloc()/ulist_free()
723 * when doing qgroup accounting, it must be protected by qgroup_lock.
724 */
725 struct ulist *qgroup_ulist;
726
727 /*
728 * Protect user change for quota operations. If a transaction is needed,
729 * it must be started before locking this lock.
730 */
731 struct mutex qgroup_ioctl_lock;
732
733 /* List of dirty qgroups to be written at next commit. */
734 struct list_head dirty_qgroups;
735
736 /* Used by qgroup for an efficient tree traversal. */
737 u64 qgroup_seq;
738
739 /* Qgroup rescan items. */
740 /* Protects the progress item */
741 struct mutex qgroup_rescan_lock;
742 struct btrfs_key qgroup_rescan_progress;
743 struct btrfs_workqueue *qgroup_rescan_workers;
744 struct completion qgroup_rescan_completion;
745 struct btrfs_work qgroup_rescan_work;
746 /* Protected by qgroup_rescan_lock */
747 bool qgroup_rescan_running;
748 u8 qgroup_drop_subtree_thres;
749 u64 qgroup_enable_gen;
750
751 /*
752 * If this is not 0, then it indicates a serious filesystem error has
753 * happened and it contains that error (negative errno value).
754 */
755 int fs_error;
756
757 /* Filesystem state */
758 unsigned long fs_state;
759
760 struct btrfs_delayed_root *delayed_root;
761
762 /* Extent buffer radix tree */
763 spinlock_t buffer_lock;
764 /* Entries are eb->start / sectorsize */
765 struct radix_tree_root buffer_radix;
766
767 /* Next backup root to be overwritten */
768 int backup_root_index;
769
770 /* Device replace state */
771 struct btrfs_dev_replace dev_replace;
772
773 struct semaphore uuid_tree_rescan_sem;
774
775 /* Used to reclaim the metadata space in the background. */
776 struct work_struct async_reclaim_work;
777 struct work_struct async_data_reclaim_work;
778 struct work_struct preempt_reclaim_work;
779
780 /* Reclaim partially filled block groups in the background */
781 struct work_struct reclaim_bgs_work;
782 /* Protected by unused_bgs_lock. */
783 struct list_head reclaim_bgs;
784 int bg_reclaim_threshold;
785
786 /* Protects the lists unused_bgs and reclaim_bgs. */
787 spinlock_t unused_bgs_lock;
788 /* Protected by unused_bgs_lock. */
789 struct list_head unused_bgs;
790 struct mutex unused_bg_unpin_mutex;
791 /* Protect block groups that are going to be deleted */
792 struct mutex reclaim_bgs_lock;
793
794 /* Cached block sizes */
795 u32 nodesize;
796 u32 sectorsize;
797 /* ilog2 of sectorsize, use to avoid 64bit division */
798 u32 sectorsize_bits;
799 u32 csum_size;
800 u32 csums_per_leaf;
801 u32 stripesize;
802
803 /*
804 * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
805 * filesystem, on zoned it depends on the device constraints.
806 */
807 u64 max_extent_size;
808
809 /* Block groups and devices containing active swapfiles. */
810 spinlock_t swapfile_pins_lock;
811 struct rb_root swapfile_pins;
812
813 struct crypto_shash *csum_shash;
814
815 /* Type of exclusive operation running, protected by super_lock */
816 enum btrfs_exclusive_operation exclusive_operation;
817
818 /*
819 * Zone size > 0 when in ZONED mode, otherwise it's used for a check
820 * if the mode is enabled
821 */
822 u64 zone_size;
823
824 /* Constraints for ZONE_APPEND commands: */
825 struct queue_limits limits;
826 u64 max_zone_append_size;
827
828 struct mutex zoned_meta_io_lock;
829 spinlock_t treelog_bg_lock;
830 u64 treelog_bg;
831
832 /*
833 * Start of the dedicated data relocation block group, protected by
834 * relocation_bg_lock.
835 */
836 spinlock_t relocation_bg_lock;
837 u64 data_reloc_bg;
838 struct mutex zoned_data_reloc_io_lock;
839
840 struct btrfs_block_group *active_meta_bg;
841 struct btrfs_block_group *active_system_bg;
842
843 u64 nr_global_roots;
844
845 spinlock_t zone_active_bgs_lock;
846 struct list_head zone_active_bgs;
847
848 /* Updates are not protected by any lock */
849 struct btrfs_commit_stats commit_stats;
850
851 /*
852 * Last generation where we dropped a non-relocation root.
853 * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen()
854 * to change it and to read it, respectively.
855 */
856 u64 last_root_drop_gen;
857
858 /*
859 * Annotations for transaction events (structures are empty when
860 * compiled without lockdep).
861 */
862 struct lockdep_map btrfs_trans_num_writers_map;
863 struct lockdep_map btrfs_trans_num_extwriters_map;
864 struct lockdep_map btrfs_state_change_map[4];
865 struct lockdep_map btrfs_trans_pending_ordered_map;
866 struct lockdep_map btrfs_ordered_extent_map;
867
868#ifdef CONFIG_BTRFS_FS_REF_VERIFY
869 spinlock_t ref_verify_lock;
870 struct rb_root block_tree;
871#endif
872
873#ifdef CONFIG_BTRFS_DEBUG
874 struct kobject *debug_kobj;
875 struct list_head allocated_roots;
876
877 spinlock_t eb_leak_lock;
878 struct list_head allocated_ebs;
879#endif
880};
881
882#define folio_to_inode(_folio) (BTRFS_I(_Generic((_folio), \
883 struct folio *: (_folio))->mapping->host))
884
885#define folio_to_fs_info(_folio) (folio_to_inode(_folio)->root->fs_info)
886
887#define inode_to_fs_info(_inode) (BTRFS_I(_Generic((_inode), \
888 struct inode *: (_inode)))->root->fs_info)
889
890static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info)
891{
892 return READ_ONCE(fs_info->generation);
893}
894
895static inline void btrfs_set_fs_generation(struct btrfs_fs_info *fs_info, u64 gen)
896{
897 WRITE_ONCE(fs_info->generation, gen);
898}
899
900static inline u64 btrfs_get_last_trans_committed(const struct btrfs_fs_info *fs_info)
901{
902 return READ_ONCE(fs_info->last_trans_committed);
903}
904
905static inline void btrfs_set_last_trans_committed(struct btrfs_fs_info *fs_info, u64 gen)
906{
907 WRITE_ONCE(fs_info->last_trans_committed, gen);
908}
909
910static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
911 u64 gen)
912{
913 WRITE_ONCE(fs_info->last_root_drop_gen, gen);
914}
915
916static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info)
917{
918 return READ_ONCE(fs_info->last_root_drop_gen);
919}
920
921/*
922 * Take the number of bytes to be checksummed and figure out how many leaves
923 * it would require to store the csums for that many bytes.
924 */
925static inline u64 btrfs_csum_bytes_to_leaves(
926 const struct btrfs_fs_info *fs_info, u64 csum_bytes)
927{
928 const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
929
930 return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
931}
932
933/*
934 * Use this if we would be adding new items, as we could split nodes as we cow
935 * down the tree.
936 */
937static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info,
938 unsigned num_items)
939{
940 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
941}
942
943/*
944 * Doing a truncate or a modification won't result in new nodes or leaves, just
945 * what we need for COW.
946 */
947static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info,
948 unsigned num_items)
949{
950 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
951}
952
953#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
954 sizeof(struct btrfs_item))
955
956static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
957{
958 return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && fs_info->zone_size > 0;
959}
960
961/*
962 * Count how many fs_info->max_extent_size cover the @size
963 */
964static inline u32 count_max_extents(const struct btrfs_fs_info *fs_info, u64 size)
965{
966#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
967 if (!fs_info)
968 return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
969#endif
970
971 return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
972}
973
974bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
975 enum btrfs_exclusive_operation type);
976bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
977 enum btrfs_exclusive_operation type);
978void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
979void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
980void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
981 enum btrfs_exclusive_operation op);
982
983int btrfs_check_ioctl_vol_args_path(const struct btrfs_ioctl_vol_args *vol_args);
984
985/* Compatibility and incompatibility defines */
986void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
987 const char *name);
988void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
989 const char *name);
990void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
991 const char *name);
992void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
993 const char *name);
994
995#define __btrfs_fs_incompat(fs_info, flags) \
996 (!!(btrfs_super_incompat_flags((fs_info)->super_copy) & (flags)))
997
998#define __btrfs_fs_compat_ro(fs_info, flags) \
999 (!!(btrfs_super_compat_ro_flags((fs_info)->super_copy) & (flags)))
1000
1001#define btrfs_set_fs_incompat(__fs_info, opt) \
1002 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
1003
1004#define btrfs_clear_fs_incompat(__fs_info, opt) \
1005 __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, #opt)
1006
1007#define btrfs_fs_incompat(fs_info, opt) \
1008 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
1009
1010#define btrfs_set_fs_compat_ro(__fs_info, opt) \
1011 __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
1012
1013#define btrfs_clear_fs_compat_ro(__fs_info, opt) \
1014 __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, #opt)
1015
1016#define btrfs_fs_compat_ro(fs_info, opt) \
1017 __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
1018
1019#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1020#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
1021#define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
1022#define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \
1023 BTRFS_MOUNT_##opt)
1024
1025static inline int btrfs_fs_closing(const struct btrfs_fs_info *fs_info)
1026{
1027 /* Do it this way so we only ever do one test_bit in the normal case. */
1028 if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) {
1029 if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags))
1030 return 2;
1031 return 1;
1032 }
1033 return 0;
1034}
1035
1036/*
1037 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
1038 * anything except sleeping. This function is used to check the status of
1039 * the fs.
1040 * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
1041 * since setting and checking for SB_RDONLY in the superblock's flags is not
1042 * atomic.
1043 */
1044static inline int btrfs_need_cleaner_sleep(const struct btrfs_fs_info *fs_info)
1045{
1046 return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
1047 btrfs_fs_closing(fs_info);
1048}
1049
1050static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
1051{
1052 clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
1053}
1054
1055#define BTRFS_FS_ERROR(fs_info) (READ_ONCE((fs_info)->fs_error))
1056
1057#define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \
1058 (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
1059 &(fs_info)->fs_state)))
1060
1061#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1062
1063#define EXPORT_FOR_TESTS
1064
1065static inline int btrfs_is_testing(const struct btrfs_fs_info *fs_info)
1066{
1067 return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
1068}
1069
1070void btrfs_test_destroy_inode(struct inode *inode);
1071
1072#else
1073
1074#define EXPORT_FOR_TESTS static
1075
1076static inline int btrfs_is_testing(const struct btrfs_fs_info *fs_info)
1077{
1078 return 0;
1079}
1080#endif
1081
1082#endif