Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Copyright (C) 2008 Oracle.  All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  4 */
  5
  6#ifndef BTRFS_DELAYED_REF_H
  7#define BTRFS_DELAYED_REF_H
  8
  9#include <linux/refcount.h>
 10
 11/* these are the possible values of struct btrfs_delayed_ref_node->action */
 12#define BTRFS_ADD_DELAYED_REF    1 /* add one backref to the tree */
 13#define BTRFS_DROP_DELAYED_REF   2 /* delete one backref from the tree */
 14#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
 15#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
 16
 17struct btrfs_delayed_ref_node {
 18	struct rb_node ref_node;
 19	/*
 20	 * If action is BTRFS_ADD_DELAYED_REF, also link this node to
 21	 * ref_head->ref_add_list, then we do not need to iterate the
 22	 * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
 23	 */
 24	struct list_head add_list;
 25
 26	/* the starting bytenr of the extent */
 27	u64 bytenr;
 28
 29	/* the size of the extent */
 30	u64 num_bytes;
 31
 32	/* seq number to keep track of insertion order */
 33	u64 seq;
 34
 35	/* ref count on this data structure */
 36	refcount_t refs;
 37
 38	/*
 39	 * how many refs is this entry adding or deleting.  For
 40	 * head refs, this may be a negative number because it is keeping
 41	 * track of the total mods done to the reference count.
 42	 * For individual refs, this will always be a positive number
 43	 *
 44	 * It may be more than one, since it is possible for a single
 45	 * parent to have more than one ref on an extent
 46	 */
 47	int ref_mod;
 48
 49	unsigned int action:8;
 50	unsigned int type:8;
 51	/* is this node still in the rbtree? */
 52	unsigned int is_head:1;
 53	unsigned int in_tree:1;
 54};
 55
 56struct btrfs_delayed_extent_op {
 57	struct btrfs_disk_key key;
 58	u8 level;
 59	bool update_key;
 60	bool update_flags;
 61	bool is_data;
 62	u64 flags_to_set;
 
 
 
 
 63};
 64
 65/*
 66 * the head refs are used to hold a lock on a given extent, which allows us
 67 * to make sure that only one process is running the delayed refs
 68 * at a time for a single extent.  They also store the sum of all the
 69 * reference count modifications we've queued up.
 70 */
 71struct btrfs_delayed_ref_head {
 72	u64 bytenr;
 73	u64 num_bytes;
 74	refcount_t refs;
 75	/*
 76	 * the mutex is held while running the refs, and it is also
 77	 * held when checking the sum of reference modifications.
 78	 */
 79	struct mutex mutex;
 80
 81	spinlock_t lock;
 82	struct rb_root_cached ref_tree;
 83	/* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
 84	struct list_head ref_add_list;
 85
 86	struct rb_node href_node;
 87
 88	struct btrfs_delayed_extent_op *extent_op;
 89
 90	/*
 91	 * This is used to track the final ref_mod from all the refs associated
 92	 * with this head ref, this is not adjusted as delayed refs are run,
 93	 * this is meant to track if we need to do the csum accounting or not.
 94	 */
 95	int total_ref_mod;
 96
 97	/*
 98	 * This is the current outstanding mod references for this bytenr.  This
 99	 * is used with lookup_extent_info to get an accurate reference count
100	 * for a bytenr, so it is adjusted as delayed refs are run so that any
101	 * on disk reference count + ref_mod is accurate.
102	 */
103	int ref_mod;
104
105	/*
106	 * when a new extent is allocated, it is just reserved in memory
107	 * The actual extent isn't inserted into the extent allocation tree
108	 * until the delayed ref is processed.  must_insert_reserved is
109	 * used to flag a delayed ref so the accounting can be updated
110	 * when a full insert is done.
111	 *
112	 * It is possible the extent will be freed before it is ever
113	 * inserted into the extent allocation tree.  In this case
114	 * we need to update the in ram accounting to properly reflect
115	 * the free has happened.
116	 */
117	unsigned int must_insert_reserved:1;
118	unsigned int is_data:1;
119	unsigned int is_system:1;
120	unsigned int processing:1;
121};
122
123struct btrfs_delayed_tree_ref {
124	struct btrfs_delayed_ref_node node;
125	u64 root;
126	u64 parent;
127	int level;
128};
129
130struct btrfs_delayed_data_ref {
131	struct btrfs_delayed_ref_node node;
132	u64 root;
133	u64 parent;
134	u64 objectid;
135	u64 offset;
136};
137
138struct btrfs_delayed_ref_root {
139	/* head ref rbtree */
140	struct rb_root_cached href_root;
141
142	/* dirty extent records */
143	struct rb_root dirty_extent_root;
144
145	/* this spin lock protects the rbtree and the entries inside */
146	spinlock_t lock;
147
148	/* how many delayed ref updates we've queued, used by the
149	 * throttling code
150	 */
151	atomic_t num_entries;
152
153	/* total number of head nodes in tree */
154	unsigned long num_heads;
155
156	/* total number of head nodes ready for processing */
157	unsigned long num_heads_ready;
158
159	u64 pending_csums;
160
161	/*
162	 * set when the tree is flushing before a transaction commit,
163	 * used by the throttling code to decide if new updates need
164	 * to be run right away
165	 */
166	int flushing;
167
168	u64 run_delayed_start;
169
170	/*
171	 * To make qgroup to skip given root.
172	 * This is for snapshot, as btrfs_qgroup_inherit() will manually
173	 * modify counters for snapshot and its source, so we should skip
174	 * the snapshot in new_root/old_roots or it will get calculated twice
175	 */
176	u64 qgroup_to_skip;
177};
178
179enum btrfs_ref_type {
180	BTRFS_REF_NOT_SET,
181	BTRFS_REF_DATA,
182	BTRFS_REF_METADATA,
183	BTRFS_REF_LAST,
184};
185
186struct btrfs_data_ref {
187	/* For EXTENT_DATA_REF */
188
189	/* Root which refers to this data extent */
190	u64 ref_root;
191
192	/* Inode which refers to this data extent */
193	u64 ino;
194
195	/*
196	 * file_offset - extent_offset
197	 *
198	 * file_offset is the key.offset of the EXTENT_DATA key.
199	 * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
200	 */
201	u64 offset;
202};
203
204struct btrfs_tree_ref {
205	/*
206	 * Level of this tree block
207	 *
208	 * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
209	 */
210	int level;
211
212	/*
213	 * Root which refers to this tree block.
214	 *
215	 * For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
216	 */
217	u64 root;
218
219	/* For non-skinny metadata, no special member needed */
220};
221
222struct btrfs_ref {
223	enum btrfs_ref_type type;
224	int action;
225
226	/*
227	 * Whether this extent should go through qgroup record.
228	 *
229	 * Normally false, but for certain cases like delayed subtree scan,
230	 * setting this flag can hugely reduce qgroup overhead.
231	 */
232	bool skip_qgroup;
233
234	/*
235	 * Optional. For which root is this modification.
236	 * Mostly used for qgroup optimization.
237	 *
238	 * When unset, data/tree ref init code will populate it.
239	 * In certain cases, we're modifying reference for a different root.
240	 * E.g. COW fs tree blocks for balance.
241	 * In that case, tree_ref::root will be fs tree, but we're doing this
242	 * for reloc tree, then we should set @real_root to reloc tree.
243	 */
244	u64 real_root;
245	u64 bytenr;
246	u64 len;
247
248	/* Bytenr of the parent tree block */
249	u64 parent;
250	union {
251		struct btrfs_data_ref data_ref;
252		struct btrfs_tree_ref tree_ref;
253	};
254};
255
256extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
257extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
258extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
259extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
260
261int __init btrfs_delayed_ref_init(void);
262void __cold btrfs_delayed_ref_exit(void);
263
264static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref,
265				int action, u64 bytenr, u64 len, u64 parent)
266{
267	generic_ref->action = action;
268	generic_ref->bytenr = bytenr;
269	generic_ref->len = len;
270	generic_ref->parent = parent;
271}
272
273static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref,
274				int level, u64 root)
275{
276	/* If @real_root not set, use @root as fallback */
277	if (!generic_ref->real_root)
278		generic_ref->real_root = root;
279	generic_ref->tree_ref.level = level;
280	generic_ref->tree_ref.root = root;
281	generic_ref->type = BTRFS_REF_METADATA;
282}
283
284static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref,
285				u64 ref_root, u64 ino, u64 offset)
286{
287	/* If @real_root not set, use @root as fallback */
288	if (!generic_ref->real_root)
289		generic_ref->real_root = ref_root;
290	generic_ref->data_ref.ref_root = ref_root;
291	generic_ref->data_ref.ino = ino;
292	generic_ref->data_ref.offset = offset;
293	generic_ref->type = BTRFS_REF_DATA;
294}
295
296static inline struct btrfs_delayed_extent_op *
297btrfs_alloc_delayed_extent_op(void)
298{
299	return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
300}
301
302static inline void
303btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
304{
305	if (op)
306		kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
307}
308
309static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
310{
311	WARN_ON(refcount_read(&ref->refs) == 0);
312	if (refcount_dec_and_test(&ref->refs)) {
313		WARN_ON(ref->in_tree);
314		switch (ref->type) {
315		case BTRFS_TREE_BLOCK_REF_KEY:
316		case BTRFS_SHARED_BLOCK_REF_KEY:
317			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
318			break;
319		case BTRFS_EXTENT_DATA_REF_KEY:
320		case BTRFS_SHARED_DATA_REF_KEY:
321			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
322			break;
 
 
 
323		default:
324			BUG();
325		}
326	}
327}
328
329static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
330{
331	if (refcount_dec_and_test(&head->refs))
332		kmem_cache_free(btrfs_delayed_ref_head_cachep, head);
333}
334
335int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
336			       struct btrfs_ref *generic_ref,
 
 
 
337			       struct btrfs_delayed_extent_op *extent_op,
338			       int *old_ref_mod, int *new_ref_mod);
339int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
340			       struct btrfs_ref *generic_ref,
341			       u64 reserved, int *old_ref_mod,
342			       int *new_ref_mod);
343int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
344				u64 bytenr, u64 num_bytes,
345				struct btrfs_delayed_extent_op *extent_op);
346void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
 
347			      struct btrfs_delayed_ref_root *delayed_refs,
348			      struct btrfs_delayed_ref_head *head);
349
350struct btrfs_delayed_ref_head *
351btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
352			    u64 bytenr);
353int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
354			   struct btrfs_delayed_ref_head *head);
355static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
356{
357	mutex_unlock(&head->mutex);
358}
359void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
360			   struct btrfs_delayed_ref_head *head);
361
362struct btrfs_delayed_ref_head *btrfs_select_ref_head(
363		struct btrfs_delayed_ref_root *delayed_refs);
364
365int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
367void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr);
368void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
369int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
370				  enum btrfs_reserve_flush_enum flush);
371void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
372				       struct btrfs_block_rsv *src,
373				       u64 num_bytes);
374int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
375bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
 
 
376
377/*
378 * helper functions to cast a node into its container
379 */
380static inline struct btrfs_delayed_tree_ref *
381btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
382{
 
383	return container_of(node, struct btrfs_delayed_tree_ref, node);
384}
385
386static inline struct btrfs_delayed_data_ref *
387btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
388{
 
389	return container_of(node, struct btrfs_delayed_data_ref, node);
390}
391
 
 
 
 
 
 
392#endif
v3.15
 
  1/*
  2 * Copyright (C) 2008 Oracle.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public
  6 * License v2 as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public
 14 * License along with this program; if not, write to the
 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16 * Boston, MA 021110-1307, USA.
 17 */
 18#ifndef __DELAYED_REF__
 19#define __DELAYED_REF__
 
 
 
 20
 21/* these are the possible values of struct btrfs_delayed_ref_node->action */
 22#define BTRFS_ADD_DELAYED_REF    1 /* add one backref to the tree */
 23#define BTRFS_DROP_DELAYED_REF   2 /* delete one backref from the tree */
 24#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
 25#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
 26
 27struct btrfs_delayed_ref_node {
 28	struct rb_node rb_node;
 
 
 
 
 
 
 29
 30	/* the starting bytenr of the extent */
 31	u64 bytenr;
 32
 33	/* the size of the extent */
 34	u64 num_bytes;
 35
 36	/* seq number to keep track of insertion order */
 37	u64 seq;
 38
 39	/* ref count on this data structure */
 40	atomic_t refs;
 41
 42	/*
 43	 * how many refs is this entry adding or deleting.  For
 44	 * head refs, this may be a negative number because it is keeping
 45	 * track of the total mods done to the reference count.
 46	 * For individual refs, this will always be a positive number
 47	 *
 48	 * It may be more than one, since it is possible for a single
 49	 * parent to have more than one ref on an extent
 50	 */
 51	int ref_mod;
 52
 53	unsigned int action:8;
 54	unsigned int type:8;
 55	/* is this node still in the rbtree? */
 56	unsigned int is_head:1;
 57	unsigned int in_tree:1;
 58};
 59
 60struct btrfs_delayed_extent_op {
 61	struct btrfs_disk_key key;
 
 
 
 
 62	u64 flags_to_set;
 63	int level;
 64	unsigned int update_key:1;
 65	unsigned int update_flags:1;
 66	unsigned int is_data:1;
 67};
 68
 69/*
 70 * the head refs are used to hold a lock on a given extent, which allows us
 71 * to make sure that only one process is running the delayed refs
 72 * at a time for a single extent.  They also store the sum of all the
 73 * reference count modifications we've queued up.
 74 */
 75struct btrfs_delayed_ref_head {
 76	struct btrfs_delayed_ref_node node;
 77
 
 78	/*
 79	 * the mutex is held while running the refs, and it is also
 80	 * held when checking the sum of reference modifications.
 81	 */
 82	struct mutex mutex;
 83
 84	spinlock_t lock;
 85	struct rb_root ref_root;
 
 
 86
 87	struct rb_node href_node;
 88
 89	struct btrfs_delayed_extent_op *extent_op;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90	/*
 91	 * when a new extent is allocated, it is just reserved in memory
 92	 * The actual extent isn't inserted into the extent allocation tree
 93	 * until the delayed ref is processed.  must_insert_reserved is
 94	 * used to flag a delayed ref so the accounting can be updated
 95	 * when a full insert is done.
 96	 *
 97	 * It is possible the extent will be freed before it is ever
 98	 * inserted into the extent allocation tree.  In this case
 99	 * we need to update the in ram accounting to properly reflect
100	 * the free has happened.
101	 */
102	unsigned int must_insert_reserved:1;
103	unsigned int is_data:1;
 
104	unsigned int processing:1;
105};
106
107struct btrfs_delayed_tree_ref {
108	struct btrfs_delayed_ref_node node;
109	u64 root;
110	u64 parent;
111	int level;
112};
113
114struct btrfs_delayed_data_ref {
115	struct btrfs_delayed_ref_node node;
116	u64 root;
117	u64 parent;
118	u64 objectid;
119	u64 offset;
120};
121
122struct btrfs_delayed_ref_root {
123	/* head ref rbtree */
124	struct rb_root href_root;
 
 
 
125
126	/* this spin lock protects the rbtree and the entries inside */
127	spinlock_t lock;
128
129	/* how many delayed ref updates we've queued, used by the
130	 * throttling code
131	 */
132	atomic_t num_entries;
133
134	/* total number of head nodes in tree */
135	unsigned long num_heads;
136
137	/* total number of head nodes ready for processing */
138	unsigned long num_heads_ready;
139
 
 
140	/*
141	 * set when the tree is flushing before a transaction commit,
142	 * used by the throttling code to decide if new updates need
143	 * to be run right away
144	 */
145	int flushing;
146
147	u64 run_delayed_start;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148};
149
150extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
151extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
152extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
153extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
154
155int btrfs_delayed_ref_init(void);
156void btrfs_delayed_ref_exit(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
158static inline struct btrfs_delayed_extent_op *
159btrfs_alloc_delayed_extent_op(void)
160{
161	return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
162}
163
164static inline void
165btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
166{
167	if (op)
168		kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
169}
170
171static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
172{
173	WARN_ON(atomic_read(&ref->refs) == 0);
174	if (atomic_dec_and_test(&ref->refs)) {
175		WARN_ON(ref->in_tree);
176		switch (ref->type) {
177		case BTRFS_TREE_BLOCK_REF_KEY:
178		case BTRFS_SHARED_BLOCK_REF_KEY:
179			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
180			break;
181		case BTRFS_EXTENT_DATA_REF_KEY:
182		case BTRFS_SHARED_DATA_REF_KEY:
183			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
184			break;
185		case 0:
186			kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
187			break;
188		default:
189			BUG();
190		}
191	}
192}
193
194int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
195			       struct btrfs_trans_handle *trans,
196			       u64 bytenr, u64 num_bytes, u64 parent,
197			       u64 ref_root, int level, int action,
198			       struct btrfs_delayed_extent_op *extent_op,
199			       int for_cow);
200int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
201			       struct btrfs_trans_handle *trans,
202			       u64 bytenr, u64 num_bytes,
203			       u64 parent, u64 ref_root,
204			       u64 owner, u64 offset, int action,
205			       struct btrfs_delayed_extent_op *extent_op,
206			       int for_cow);
207int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
208				struct btrfs_trans_handle *trans,
 
 
 
209				u64 bytenr, u64 num_bytes,
210				struct btrfs_delayed_extent_op *extent_op);
211void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
212			      struct btrfs_fs_info *fs_info,
213			      struct btrfs_delayed_ref_root *delayed_refs,
214			      struct btrfs_delayed_ref_head *head);
215
216struct btrfs_delayed_ref_head *
217btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
218int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
 
219			   struct btrfs_delayed_ref_head *head);
220static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
221{
222	mutex_unlock(&head->mutex);
223}
 
 
224
 
 
225
226struct btrfs_delayed_ref_head *
227btrfs_select_ref_head(struct btrfs_trans_handle *trans);
228
229int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
230			    struct btrfs_delayed_ref_root *delayed_refs,
231			    u64 seq);
232
233/*
234 * delayed refs with a ref_seq > 0 must be held back during backref walking.
235 * this only applies to items in one of the fs-trees. for_cow items never need
236 * to be held back, so they won't get a ref_seq number.
237 */
238static inline int need_ref_seq(int for_cow, u64 rootid)
239{
240	if (for_cow)
241		return 0;
242
243	if (rootid == BTRFS_FS_TREE_OBJECTID)
244		return 1;
245
246	if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
247		return 1;
248
249	return 0;
250}
251
252/*
253 * a node might live in a head or a regular ref, this lets you
254 * test for the proper type to use.
255 */
256static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
257{
258	return node->is_head;
259}
260
261/*
262 * helper functions to cast a node into its container
263 */
264static inline struct btrfs_delayed_tree_ref *
265btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
266{
267	WARN_ON(btrfs_delayed_ref_is_head(node));
268	return container_of(node, struct btrfs_delayed_tree_ref, node);
269}
270
271static inline struct btrfs_delayed_data_ref *
272btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
273{
274	WARN_ON(btrfs_delayed_ref_is_head(node));
275	return container_of(node, struct btrfs_delayed_data_ref, node);
276}
277
278static inline struct btrfs_delayed_ref_head *
279btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
280{
281	WARN_ON(!btrfs_delayed_ref_is_head(node));
282	return container_of(node, struct btrfs_delayed_ref_head, node);
283}
284#endif