Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Copyright (C) 2007 Oracle.  All rights reserved.
  4 */
  5
  6#ifndef BTRFS_TRANSACTION_H
  7#define BTRFS_TRANSACTION_H
  8
  9#include <linux/refcount.h>
 10#include "btrfs_inode.h"
 11#include "delayed-ref.h"
 12#include "ctree.h"
 13#include "misc.h"
 14
 15/* Radix-tree tag for roots that are part of the trasaction. */
 16#define BTRFS_ROOT_TRANS_TAG			0
 17
 18enum btrfs_trans_state {
 19	TRANS_STATE_RUNNING,
 20	TRANS_STATE_COMMIT_PREP,
 21	TRANS_STATE_COMMIT_START,
 22	TRANS_STATE_COMMIT_DOING,
 23	TRANS_STATE_UNBLOCKED,
 24	TRANS_STATE_SUPER_COMMITTED,
 25	TRANS_STATE_COMPLETED,
 26	TRANS_STATE_MAX,
 27};
 28
 29#define BTRFS_TRANS_HAVE_FREE_BGS	0
 30#define BTRFS_TRANS_DIRTY_BG_RUN	1
 31#define BTRFS_TRANS_CACHE_ENOSPC	2
 32
 33struct btrfs_transaction {
 34	u64 transid;
 35	/*
 36	 * total external writers(USERSPACE/START/ATTACH) in this
 37	 * transaction, it must be zero before the transaction is
 38	 * being committed
 39	 */
 40	atomic_t num_extwriters;
 41	/*
 42	 * total writers in this transaction, it must be zero before the
 43	 * transaction can end
 44	 */
 45	atomic_t num_writers;
 46	refcount_t use_count;
 47
 48	unsigned long flags;
 49
 50	/* Be protected by fs_info->trans_lock when we want to change it. */
 51	enum btrfs_trans_state state;
 52	int aborted;
 53	struct list_head list;
 54	struct extent_io_tree dirty_pages;
 55	time64_t start_time;
 56	wait_queue_head_t writer_wait;
 57	wait_queue_head_t commit_wait;
 58	struct list_head pending_snapshots;
 59	struct list_head dev_update_list;
 60	struct list_head switch_commits;
 61	struct list_head dirty_bgs;
 62
 63	/*
 64	 * There is no explicit lock which protects io_bgs, rather its
 65	 * consistency is implied by the fact that all the sites which modify
 66	 * it do so under some form of transaction critical section, namely:
 67	 *
 68	 * - btrfs_start_dirty_block_groups - This function can only ever be
 69	 *   run by one of the transaction committers. Refer to
 70	 *   BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction
 71	 *
 72	 * - btrfs_write_dirty_blockgroups - this is called by
 73	 *   commit_cowonly_roots from transaction critical section
 74	 *   (TRANS_STATE_COMMIT_DOING)
 75	 *
 76	 * - btrfs_cleanup_dirty_bgs - called on transaction abort
 77	 */
 78	struct list_head io_bgs;
 79	struct list_head dropped_roots;
 80	struct extent_io_tree pinned_extents;
 81
 82	/*
 83	 * we need to make sure block group deletion doesn't race with
 84	 * free space cache writeout.  This mutex keeps them from stomping
 85	 * on each other
 86	 */
 87	struct mutex cache_write_mutex;
 88	spinlock_t dirty_bgs_lock;
 89	/* Protected by spin lock fs_info->unused_bgs_lock. */
 90	struct list_head deleted_bgs;
 91	spinlock_t dropped_roots_lock;
 92	struct btrfs_delayed_ref_root delayed_refs;
 93	struct btrfs_fs_info *fs_info;
 94
 95	/*
 96	 * Number of ordered extents the transaction must wait for before
 97	 * committing. These are ordered extents started by a fast fsync.
 98	 */
 99	atomic_t pending_ordered;
100	wait_queue_head_t pending_wait;
101};
102
103enum {
104	ENUM_BIT(__TRANS_FREEZABLE),
105	ENUM_BIT(__TRANS_START),
106	ENUM_BIT(__TRANS_ATTACH),
107	ENUM_BIT(__TRANS_JOIN),
108	ENUM_BIT(__TRANS_JOIN_NOLOCK),
109	ENUM_BIT(__TRANS_DUMMY),
110	ENUM_BIT(__TRANS_JOIN_NOSTART),
111};
112
113#define TRANS_START		(__TRANS_START | __TRANS_FREEZABLE)
114#define TRANS_ATTACH		(__TRANS_ATTACH)
115#define TRANS_JOIN		(__TRANS_JOIN | __TRANS_FREEZABLE)
116#define TRANS_JOIN_NOLOCK	(__TRANS_JOIN_NOLOCK)
117#define TRANS_JOIN_NOSTART	(__TRANS_JOIN_NOSTART)
118
119#define TRANS_EXTWRITERS	(__TRANS_START | __TRANS_ATTACH)
120
 
 
121struct btrfs_trans_handle {
122	u64 transid;
123	u64 bytes_reserved;
124	u64 delayed_refs_bytes_reserved;
125	u64 chunk_bytes_reserved;
126	unsigned long delayed_ref_updates;
127	unsigned long delayed_ref_csum_deletions;
128	struct btrfs_transaction *transaction;
129	struct btrfs_block_rsv *block_rsv;
130	struct btrfs_block_rsv *orig_rsv;
131	/* Set by a task that wants to create a snapshot. */
132	struct btrfs_pending_snapshot *pending_snapshot;
133	refcount_t use_count;
134	unsigned int type;
135	/*
136	 * Error code of transaction abort, set outside of locks and must use
137	 * the READ_ONCE/WRITE_ONCE access
138	 */
139	short aborted;
140	bool adding_csums;
141	bool allocating_chunk;
142	bool removing_chunk;
143	bool reloc_reserved;
144	bool in_fsync;
 
145	struct btrfs_fs_info *fs_info;
146	struct list_head new_bgs;
147	struct btrfs_block_rsv delayed_rsv;
148};
149
150/*
151 * The abort status can be changed between calls and is not protected by locks.
152 * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's
153 * set to a non-zero value it does not change, so the macro should be in checks
154 * but is not necessary for further reads of the value.
155 */
156#define TRANS_ABORTED(trans)		(unlikely(READ_ONCE((trans)->aborted)))
157
158struct btrfs_pending_snapshot {
159	struct dentry *dentry;
160	struct inode *dir;
161	struct btrfs_root *root;
162	struct btrfs_root_item *root_item;
163	struct btrfs_root *snap;
164	struct btrfs_qgroup_inherit *inherit;
165	struct btrfs_path *path;
166	/* block reservation for the operation */
167	struct btrfs_block_rsv block_rsv;
168	/* extra metadata reservation for relocation */
169	int error;
170	/* Preallocated anonymous block device number */
171	dev_t anon_dev;
172	bool readonly;
173	struct list_head list;
174};
175
176static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
177					      struct btrfs_inode *inode)
178{
179	spin_lock(&inode->lock);
180	inode->last_trans = trans->transaction->transid;
181	inode->last_sub_trans = btrfs_get_root_log_transid(inode->root);
182	inode->last_log_commit = inode->last_sub_trans - 1;
183	spin_unlock(&inode->lock);
184}
185
186/*
187 * Make qgroup codes to skip given qgroupid, means the old/new_roots for
188 * qgroup won't contain the qgroupid in it.
189 */
190static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans,
191					 u64 qgroupid)
192{
193	struct btrfs_delayed_ref_root *delayed_refs;
194
195	delayed_refs = &trans->transaction->delayed_refs;
196	WARN_ON(delayed_refs->qgroup_to_skip);
197	delayed_refs->qgroup_to_skip = qgroupid;
198}
199
200static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
201{
202	struct btrfs_delayed_ref_root *delayed_refs;
203
204	delayed_refs = &trans->transaction->delayed_refs;
205	WARN_ON(!delayed_refs->qgroup_to_skip);
206	delayed_refs->qgroup_to_skip = 0;
207}
208
209bool __cold abort_should_print_stack(int error);
210
211/*
212 * Call btrfs_abort_transaction as early as possible when an error condition is
213 * detected, that way the exact stack trace is reported for some errors.
214 */
215#define btrfs_abort_transaction(trans, error)		\
216do {								\
217	bool first = false;					\
218	/* Report first abort since mount */			\
219	if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED,	\
220			&((trans)->fs_info->fs_state))) {	\
221		first = true;					\
222		if (WARN(abort_should_print_stack(error),	\
223			KERN_ERR				\
224			"BTRFS: Transaction aborted (error %d)\n",	\
225			(error))) {					\
226			/* Stack trace printed. */			\
227		} else {						\
228			btrfs_err((trans)->fs_info,			\
229				  "Transaction aborted (error %d)",	\
230				  (error));			\
231		}						\
232	}							\
233	__btrfs_abort_transaction((trans), __func__,		\
234				  __LINE__, (error), first);	\
235} while (0)
236
237int btrfs_end_transaction(struct btrfs_trans_handle *trans);
238struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
239						   unsigned int num_items);
240struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
241					struct btrfs_root *root,
242					unsigned int num_items);
243struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
244struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root);
245struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
246struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
247struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
248					struct btrfs_root *root);
249int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
250
251void btrfs_add_dead_root(struct btrfs_root *root);
252void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info);
253int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info);
254int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
255void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans);
 
256int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
257bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
258void btrfs_throttle(struct btrfs_fs_info *fs_info);
259int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
260				struct btrfs_root *root);
261int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
262				struct extent_io_tree *dirty_pages, int mark);
263int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
264int btrfs_transaction_blocked(struct btrfs_fs_info *info);
265int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
266void btrfs_put_transaction(struct btrfs_transaction *transaction);
 
267void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
268			    struct btrfs_root *root);
269void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
270void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
271				      const char *function,
272				      unsigned int line, int error, bool first_hit);
273
274int __init btrfs_transaction_init(void);
275void __cold btrfs_transaction_exit(void);
276
277#endif
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Copyright (C) 2007 Oracle.  All rights reserved.
  4 */
  5
  6#ifndef BTRFS_TRANSACTION_H
  7#define BTRFS_TRANSACTION_H
  8
  9#include <linux/refcount.h>
 10#include "btrfs_inode.h"
 11#include "delayed-ref.h"
 12#include "ctree.h"
 
 
 
 
 13
 14enum btrfs_trans_state {
 15	TRANS_STATE_RUNNING,
 
 16	TRANS_STATE_COMMIT_START,
 17	TRANS_STATE_COMMIT_DOING,
 18	TRANS_STATE_UNBLOCKED,
 
 19	TRANS_STATE_COMPLETED,
 20	TRANS_STATE_MAX,
 21};
 22
 23#define BTRFS_TRANS_HAVE_FREE_BGS	0
 24#define BTRFS_TRANS_DIRTY_BG_RUN	1
 25#define BTRFS_TRANS_CACHE_ENOSPC	2
 26
 27struct btrfs_transaction {
 28	u64 transid;
 29	/*
 30	 * total external writers(USERSPACE/START/ATTACH) in this
 31	 * transaction, it must be zero before the transaction is
 32	 * being committed
 33	 */
 34	atomic_t num_extwriters;
 35	/*
 36	 * total writers in this transaction, it must be zero before the
 37	 * transaction can end
 38	 */
 39	atomic_t num_writers;
 40	refcount_t use_count;
 41
 42	unsigned long flags;
 43
 44	/* Be protected by fs_info->trans_lock when we want to change it. */
 45	enum btrfs_trans_state state;
 46	int aborted;
 47	struct list_head list;
 48	struct extent_io_tree dirty_pages;
 49	time64_t start_time;
 50	wait_queue_head_t writer_wait;
 51	wait_queue_head_t commit_wait;
 52	struct list_head pending_snapshots;
 53	struct list_head dev_update_list;
 54	struct list_head switch_commits;
 55	struct list_head dirty_bgs;
 56
 57	/*
 58	 * There is no explicit lock which protects io_bgs, rather its
 59	 * consistency is implied by the fact that all the sites which modify
 60	 * it do so under some form of transaction critical section, namely:
 61	 *
 62	 * - btrfs_start_dirty_block_groups - This function can only ever be
 63	 *   run by one of the transaction committers. Refer to
 64	 *   BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction
 65	 *
 66	 * - btrfs_write_dirty_blockgroups - this is called by
 67	 *   commit_cowonly_roots from transaction critical section
 68	 *   (TRANS_STATE_COMMIT_DOING)
 69	 *
 70	 * - btrfs_cleanup_dirty_bgs - called on transaction abort
 71	 */
 72	struct list_head io_bgs;
 73	struct list_head dropped_roots;
 74	struct extent_io_tree pinned_extents;
 75
 76	/*
 77	 * we need to make sure block group deletion doesn't race with
 78	 * free space cache writeout.  This mutex keeps them from stomping
 79	 * on each other
 80	 */
 81	struct mutex cache_write_mutex;
 82	spinlock_t dirty_bgs_lock;
 83	/* Protected by spin lock fs_info->unused_bgs_lock. */
 84	struct list_head deleted_bgs;
 85	spinlock_t dropped_roots_lock;
 86	struct btrfs_delayed_ref_root delayed_refs;
 87	struct btrfs_fs_info *fs_info;
 
 
 
 
 
 
 
 88};
 89
 90#define __TRANS_FREEZABLE	(1U << 0)
 91
 92#define __TRANS_START		(1U << 9)
 93#define __TRANS_ATTACH		(1U << 10)
 94#define __TRANS_JOIN		(1U << 11)
 95#define __TRANS_JOIN_NOLOCK	(1U << 12)
 96#define __TRANS_DUMMY		(1U << 13)
 97#define __TRANS_JOIN_NOSTART	(1U << 14)
 
 98
 99#define TRANS_START		(__TRANS_START | __TRANS_FREEZABLE)
100#define TRANS_ATTACH		(__TRANS_ATTACH)
101#define TRANS_JOIN		(__TRANS_JOIN | __TRANS_FREEZABLE)
102#define TRANS_JOIN_NOLOCK	(__TRANS_JOIN_NOLOCK)
103#define TRANS_JOIN_NOSTART	(__TRANS_JOIN_NOSTART)
104
105#define TRANS_EXTWRITERS	(__TRANS_START | __TRANS_ATTACH)
106
107#define BTRFS_SEND_TRANS_STUB	((void *)1)
108
109struct btrfs_trans_handle {
110	u64 transid;
111	u64 bytes_reserved;
 
112	u64 chunk_bytes_reserved;
113	unsigned long delayed_ref_updates;
 
114	struct btrfs_transaction *transaction;
115	struct btrfs_block_rsv *block_rsv;
116	struct btrfs_block_rsv *orig_rsv;
 
 
117	refcount_t use_count;
118	unsigned int type;
119	/*
120	 * Error code of transaction abort, set outside of locks and must use
121	 * the READ_ONCE/WRITE_ONCE access
122	 */
123	short aborted;
124	bool adding_csums;
125	bool allocating_chunk;
126	bool can_flush_pending_bgs;
127	bool reloc_reserved;
128	bool dirty;
129	struct btrfs_root *root;
130	struct btrfs_fs_info *fs_info;
131	struct list_head new_bgs;
 
132};
133
134/*
135 * The abort status can be changed between calls and is not protected by locks.
136 * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's
137 * set to a non-zero value it does not change, so the macro should be in checks
138 * but is not necessary for further reads of the value.
139 */
140#define TRANS_ABORTED(trans)		(unlikely(READ_ONCE((trans)->aborted)))
141
142struct btrfs_pending_snapshot {
143	struct dentry *dentry;
144	struct inode *dir;
145	struct btrfs_root *root;
146	struct btrfs_root_item *root_item;
147	struct btrfs_root *snap;
148	struct btrfs_qgroup_inherit *inherit;
149	struct btrfs_path *path;
150	/* block reservation for the operation */
151	struct btrfs_block_rsv block_rsv;
152	/* extra metadata reservation for relocation */
153	int error;
154	/* Preallocated anonymous block device number */
155	dev_t anon_dev;
156	bool readonly;
157	struct list_head list;
158};
159
160static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
161					      struct btrfs_inode *inode)
162{
163	spin_lock(&inode->lock);
164	inode->last_trans = trans->transaction->transid;
165	inode->last_sub_trans = inode->root->log_transid;
166	inode->last_log_commit = inode->root->last_log_commit;
167	spin_unlock(&inode->lock);
168}
169
170/*
171 * Make qgroup codes to skip given qgroupid, means the old/new_roots for
172 * qgroup won't contain the qgroupid in it.
173 */
174static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans,
175					 u64 qgroupid)
176{
177	struct btrfs_delayed_ref_root *delayed_refs;
178
179	delayed_refs = &trans->transaction->delayed_refs;
180	WARN_ON(delayed_refs->qgroup_to_skip);
181	delayed_refs->qgroup_to_skip = qgroupid;
182}
183
184static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
185{
186	struct btrfs_delayed_ref_root *delayed_refs;
187
188	delayed_refs = &trans->transaction->delayed_refs;
189	WARN_ON(!delayed_refs->qgroup_to_skip);
190	delayed_refs->qgroup_to_skip = 0;
191}
192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193int btrfs_end_transaction(struct btrfs_trans_handle *trans);
194struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
195						   unsigned int num_items);
196struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
197					struct btrfs_root *root,
198					unsigned int num_items);
199struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
200struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root);
201struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
202struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
203struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
204					struct btrfs_root *root);
205int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
206
207void btrfs_add_dead_root(struct btrfs_root *root);
208int btrfs_defrag_root(struct btrfs_root *root);
209int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
210int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
211int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
212				   int wait_for_unblock);
213int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
214int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
215void btrfs_throttle(struct btrfs_fs_info *fs_info);
216int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
217				struct btrfs_root *root);
218int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
219				struct extent_io_tree *dirty_pages, int mark);
220int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
221int btrfs_transaction_blocked(struct btrfs_fs_info *info);
222int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
223void btrfs_put_transaction(struct btrfs_transaction *transaction);
224void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info);
225void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
226			    struct btrfs_root *root);
227void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
 
 
 
 
 
 
228
229#endif