Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2009 Oracle.  All rights reserved.
  4 */
  5
  6#include <linux/sched.h>
  7#include <linux/slab.h>
  8#include <linux/sort.h>
  9#include "ctree.h"
 10#include "delayed-ref.h"
 11#include "transaction.h"
 12#include "qgroup.h"
 
 
 13
 14struct kmem_cache *btrfs_delayed_ref_head_cachep;
 15struct kmem_cache *btrfs_delayed_tree_ref_cachep;
 16struct kmem_cache *btrfs_delayed_data_ref_cachep;
 17struct kmem_cache *btrfs_delayed_extent_op_cachep;
 18/*
 19 * delayed back reference update tracking.  For subvolume trees
 20 * we queue up extent allocations and backref maintenance for
 21 * delayed processing.   This avoids deep call chains where we
 22 * add extents in the middle of btrfs_search_slot, and it allows
 23 * us to buffer up frequently modified backrefs in an rb tree instead
 24 * of hammering updates on the extent allocation tree.
 25 */
 26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27/*
 28 * compare two delayed tree backrefs with same bytenr and type
 29 */
 30static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
 31			  struct btrfs_delayed_tree_ref *ref2)
 32{
 33	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
 34		if (ref1->root < ref2->root)
 35			return -1;
 36		if (ref1->root > ref2->root)
 37			return 1;
 38	} else {
 39		if (ref1->parent < ref2->parent)
 40			return -1;
 41		if (ref1->parent > ref2->parent)
 42			return 1;
 43	}
 44	return 0;
 45}
 46
 47/*
 48 * compare two delayed data backrefs with same bytenr and type
 49 */
 50static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
 51			  struct btrfs_delayed_data_ref *ref2)
 52{
 53	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
 54		if (ref1->root < ref2->root)
 55			return -1;
 56		if (ref1->root > ref2->root)
 57			return 1;
 58		if (ref1->objectid < ref2->objectid)
 59			return -1;
 60		if (ref1->objectid > ref2->objectid)
 61			return 1;
 62		if (ref1->offset < ref2->offset)
 63			return -1;
 64		if (ref1->offset > ref2->offset)
 65			return 1;
 66	} else {
 67		if (ref1->parent < ref2->parent)
 68			return -1;
 69		if (ref1->parent > ref2->parent)
 70			return 1;
 71	}
 72	return 0;
 73}
 74
 75static int comp_refs(struct btrfs_delayed_ref_node *ref1,
 76		     struct btrfs_delayed_ref_node *ref2,
 77		     bool check_seq)
 78{
 79	int ret = 0;
 80
 81	if (ref1->type < ref2->type)
 82		return -1;
 83	if (ref1->type > ref2->type)
 84		return 1;
 85	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
 86	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
 87		ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
 88				     btrfs_delayed_node_to_tree_ref(ref2));
 89	else
 90		ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
 91				     btrfs_delayed_node_to_data_ref(ref2));
 92	if (ret)
 93		return ret;
 94	if (check_seq) {
 95		if (ref1->seq < ref2->seq)
 96			return -1;
 97		if (ref1->seq > ref2->seq)
 98			return 1;
 99	}
100	return 0;
101}
102
103/* insert a new ref to head ref rbtree */
104static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
105						   struct rb_node *node)
106{
107	struct rb_node **p = &root->rb_node;
108	struct rb_node *parent_node = NULL;
109	struct btrfs_delayed_ref_head *entry;
110	struct btrfs_delayed_ref_head *ins;
111	u64 bytenr;
 
112
113	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
114	bytenr = ins->bytenr;
115	while (*p) {
116		parent_node = *p;
117		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
118				 href_node);
119
120		if (bytenr < entry->bytenr)
121			p = &(*p)->rb_left;
122		else if (bytenr > entry->bytenr)
123			p = &(*p)->rb_right;
124		else
 
125			return entry;
 
126	}
127
128	rb_link_node(node, parent_node, p);
129	rb_insert_color(node, root);
130	return NULL;
131}
132
133static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
134		struct btrfs_delayed_ref_node *ins)
135{
136	struct rb_node **p = &root->rb_node;
137	struct rb_node *node = &ins->ref_node;
138	struct rb_node *parent_node = NULL;
139	struct btrfs_delayed_ref_node *entry;
 
140
141	while (*p) {
142		int comp;
143
144		parent_node = *p;
145		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
146				 ref_node);
147		comp = comp_refs(ins, entry, true);
148		if (comp < 0)
149			p = &(*p)->rb_left;
150		else if (comp > 0)
151			p = &(*p)->rb_right;
152		else
 
153			return entry;
 
154	}
155
156	rb_link_node(node, parent_node, p);
157	rb_insert_color(node, root);
158	return NULL;
159}
160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161/*
162 * find an head entry based on bytenr. This returns the delayed ref
163 * head if it was able to find one, or NULL if nothing was in that spot.
164 * If return_bigger is given, the next bigger entry is returned if no exact
165 * match is found.
166 */
167static struct btrfs_delayed_ref_head *
168find_ref_head(struct rb_root *root, u64 bytenr,
169	      int return_bigger)
170{
 
171	struct rb_node *n;
172	struct btrfs_delayed_ref_head *entry;
173
174	n = root->rb_node;
175	entry = NULL;
176	while (n) {
177		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
178
179		if (bytenr < entry->bytenr)
180			n = n->rb_left;
181		else if (bytenr > entry->bytenr)
182			n = n->rb_right;
183		else
184			return entry;
185	}
186	if (entry && return_bigger) {
187		if (bytenr > entry->bytenr) {
188			n = rb_next(&entry->href_node);
189			if (!n)
190				n = rb_first(root);
191			entry = rb_entry(n, struct btrfs_delayed_ref_head,
192					 href_node);
193			return entry;
194		}
195		return entry;
196	}
197	return NULL;
198}
199
200int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
201			   struct btrfs_delayed_ref_head *head)
202{
203	struct btrfs_delayed_ref_root *delayed_refs;
204
205	delayed_refs = &trans->transaction->delayed_refs;
206	lockdep_assert_held(&delayed_refs->lock);
207	if (mutex_trylock(&head->mutex))
208		return 0;
209
210	refcount_inc(&head->refs);
211	spin_unlock(&delayed_refs->lock);
212
213	mutex_lock(&head->mutex);
214	spin_lock(&delayed_refs->lock);
215	if (RB_EMPTY_NODE(&head->href_node)) {
216		mutex_unlock(&head->mutex);
217		btrfs_put_delayed_ref_head(head);
218		return -EAGAIN;
219	}
220	btrfs_put_delayed_ref_head(head);
221	return 0;
222}
223
224static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
225				    struct btrfs_delayed_ref_root *delayed_refs,
226				    struct btrfs_delayed_ref_head *head,
227				    struct btrfs_delayed_ref_node *ref)
228{
229	lockdep_assert_held(&head->lock);
230	rb_erase(&ref->ref_node, &head->ref_tree);
231	RB_CLEAR_NODE(&ref->ref_node);
232	if (!list_empty(&ref->add_list))
233		list_del(&ref->add_list);
234	ref->in_tree = 0;
235	btrfs_put_delayed_ref(ref);
236	atomic_dec(&delayed_refs->num_entries);
237	if (trans->delayed_ref_updates)
238		trans->delayed_ref_updates--;
239}
240
241static bool merge_ref(struct btrfs_trans_handle *trans,
242		      struct btrfs_delayed_ref_root *delayed_refs,
243		      struct btrfs_delayed_ref_head *head,
244		      struct btrfs_delayed_ref_node *ref,
245		      u64 seq)
246{
247	struct btrfs_delayed_ref_node *next;
248	struct rb_node *node = rb_next(&ref->ref_node);
249	bool done = false;
250
251	while (!done && node) {
252		int mod;
253
254		next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
255		node = rb_next(node);
256		if (seq && next->seq >= seq)
257			break;
258		if (comp_refs(ref, next, false))
259			break;
260
261		if (ref->action == next->action) {
262			mod = next->ref_mod;
263		} else {
264			if (ref->ref_mod < next->ref_mod) {
265				swap(ref, next);
266				done = true;
267			}
268			mod = -next->ref_mod;
269		}
270
271		drop_delayed_ref(trans, delayed_refs, head, next);
272		ref->ref_mod += mod;
273		if (ref->ref_mod == 0) {
274			drop_delayed_ref(trans, delayed_refs, head, ref);
275			done = true;
276		} else {
277			/*
278			 * Can't have multiples of the same ref on a tree block.
279			 */
280			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
281				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
282		}
283	}
284
285	return done;
286}
287
288void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
289			      struct btrfs_fs_info *fs_info,
290			      struct btrfs_delayed_ref_root *delayed_refs,
291			      struct btrfs_delayed_ref_head *head)
292{
 
293	struct btrfs_delayed_ref_node *ref;
294	struct rb_node *node;
295	u64 seq = 0;
296
297	lockdep_assert_held(&head->lock);
298
299	if (RB_EMPTY_ROOT(&head->ref_tree))
300		return;
301
302	/* We don't have too many refs to merge for data. */
303	if (head->is_data)
304		return;
305
306	spin_lock(&fs_info->tree_mod_seq_lock);
307	if (!list_empty(&fs_info->tree_mod_seq_list)) {
308		struct seq_list *elem;
309
310		elem = list_first_entry(&fs_info->tree_mod_seq_list,
311					struct seq_list, list);
312		seq = elem->seq;
313	}
314	spin_unlock(&fs_info->tree_mod_seq_lock);
315
316again:
317	for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
 
318		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
319		if (seq && ref->seq >= seq)
320			continue;
321		if (merge_ref(trans, delayed_refs, head, ref, seq))
322			goto again;
323	}
324}
325
326int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
327			    struct btrfs_delayed_ref_root *delayed_refs,
328			    u64 seq)
329{
330	struct seq_list *elem;
331	int ret = 0;
 
332
333	spin_lock(&fs_info->tree_mod_seq_lock);
334	if (!list_empty(&fs_info->tree_mod_seq_list)) {
335		elem = list_first_entry(&fs_info->tree_mod_seq_list,
336					struct seq_list, list);
337		if (seq >= elem->seq) {
338			btrfs_debug(fs_info,
339				"holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
340				(u32)(seq >> 32), (u32)seq,
341				(u32)(elem->seq >> 32), (u32)elem->seq,
342				delayed_refs);
343			ret = 1;
344		}
345	}
346
347	spin_unlock(&fs_info->tree_mod_seq_lock);
348	return ret;
349}
350
351struct btrfs_delayed_ref_head *
352btrfs_select_ref_head(struct btrfs_trans_handle *trans)
353{
354	struct btrfs_delayed_ref_root *delayed_refs;
355	struct btrfs_delayed_ref_head *head;
356	u64 start;
357	bool loop = false;
358
359	delayed_refs = &trans->transaction->delayed_refs;
360
361again:
362	start = delayed_refs->run_delayed_start;
363	head = find_ref_head(&delayed_refs->href_root, start, 1);
364	if (!head && !loop) {
365		delayed_refs->run_delayed_start = 0;
366		start = 0;
367		loop = true;
368		head = find_ref_head(&delayed_refs->href_root, start, 1);
369		if (!head)
370			return NULL;
371	} else if (!head && loop) {
372		return NULL;
373	}
 
 
374
375	while (head->processing) {
376		struct rb_node *node;
377
378		node = rb_next(&head->href_node);
379		if (!node) {
380			if (loop)
381				return NULL;
382			delayed_refs->run_delayed_start = 0;
383			start = 0;
384			loop = true;
385			goto again;
386		}
387		head = rb_entry(node, struct btrfs_delayed_ref_head,
388				href_node);
389	}
390
391	head->processing = 1;
392	WARN_ON(delayed_refs->num_heads_ready == 0);
393	delayed_refs->num_heads_ready--;
394	delayed_refs->run_delayed_start = head->bytenr +
395		head->num_bytes;
396	return head;
397}
398
 
 
 
 
 
 
 
 
 
 
 
 
 
 
399/*
400 * Helper to insert the ref_node to the tail or merge with tail.
401 *
402 * Return 0 for insert.
403 * Return >0 for merge.
404 */
405static int insert_delayed_ref(struct btrfs_trans_handle *trans,
406			      struct btrfs_delayed_ref_root *root,
407			      struct btrfs_delayed_ref_head *href,
408			      struct btrfs_delayed_ref_node *ref)
409{
410	struct btrfs_delayed_ref_node *exist;
411	int mod;
412	int ret = 0;
413
414	spin_lock(&href->lock);
415	exist = tree_insert(&href->ref_tree, ref);
416	if (!exist)
417		goto inserted;
418
419	/* Now we are sure we can merge */
420	ret = 1;
421	if (exist->action == ref->action) {
422		mod = ref->ref_mod;
423	} else {
424		/* Need to change action */
425		if (exist->ref_mod < ref->ref_mod) {
426			exist->action = ref->action;
427			mod = -exist->ref_mod;
428			exist->ref_mod = ref->ref_mod;
429			if (ref->action == BTRFS_ADD_DELAYED_REF)
430				list_add_tail(&exist->add_list,
431					      &href->ref_add_list);
432			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
433				ASSERT(!list_empty(&exist->add_list));
434				list_del(&exist->add_list);
435			} else {
436				ASSERT(0);
437			}
438		} else
439			mod = -ref->ref_mod;
440	}
441	exist->ref_mod += mod;
442
443	/* remove existing tail if its ref_mod is zero */
444	if (exist->ref_mod == 0)
445		drop_delayed_ref(trans, root, href, exist);
446	spin_unlock(&href->lock);
447	return ret;
448inserted:
449	if (ref->action == BTRFS_ADD_DELAYED_REF)
450		list_add_tail(&ref->add_list, &href->ref_add_list);
451	atomic_inc(&root->num_entries);
452	trans->delayed_ref_updates++;
453	spin_unlock(&href->lock);
454	return ret;
455}
456
457/*
458 * helper function to update the accounting in the head ref
459 * existing and update must have the same bytenr
460 */
461static noinline void
462update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
463			 struct btrfs_delayed_ref_head *existing,
464			 struct btrfs_delayed_ref_head *update,
465			 int *old_ref_mod_ret)
466{
 
 
 
467	int old_ref_mod;
468
469	BUG_ON(existing->is_data != update->is_data);
470
471	spin_lock(&existing->lock);
472	if (update->must_insert_reserved) {
473		/* if the extent was freed and then
474		 * reallocated before the delayed ref
475		 * entries were processed, we can end up
476		 * with an existing head ref without
477		 * the must_insert_reserved flag set.
478		 * Set it again here
479		 */
480		existing->must_insert_reserved = update->must_insert_reserved;
481
482		/*
483		 * update the num_bytes so we make sure the accounting
484		 * is done correctly
485		 */
486		existing->num_bytes = update->num_bytes;
487
488	}
489
490	if (update->extent_op) {
491		if (!existing->extent_op) {
492			existing->extent_op = update->extent_op;
493		} else {
494			if (update->extent_op->update_key) {
495				memcpy(&existing->extent_op->key,
496				       &update->extent_op->key,
497				       sizeof(update->extent_op->key));
498				existing->extent_op->update_key = true;
499			}
500			if (update->extent_op->update_flags) {
501				existing->extent_op->flags_to_set |=
502					update->extent_op->flags_to_set;
503				existing->extent_op->update_flags = true;
504			}
505			btrfs_free_delayed_extent_op(update->extent_op);
506		}
507	}
508	/*
509	 * update the reference mod on the head to reflect this new operation,
510	 * only need the lock for this case cause we could be processing it
511	 * currently, for refs we just added we know we're a-ok.
512	 */
513	old_ref_mod = existing->total_ref_mod;
514	if (old_ref_mod_ret)
515		*old_ref_mod_ret = old_ref_mod;
516	existing->ref_mod += update->ref_mod;
517	existing->total_ref_mod += update->ref_mod;
518
519	/*
520	 * If we are going to from a positive ref mod to a negative or vice
521	 * versa we need to make sure to adjust pending_csums accordingly.
522	 */
523	if (existing->is_data) {
524		if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
 
 
 
 
525			delayed_refs->pending_csums -= existing->num_bytes;
526		if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
 
 
527			delayed_refs->pending_csums += existing->num_bytes;
 
 
528	}
 
529	spin_unlock(&existing->lock);
530}
531
532/*
533 * helper function to actually insert a head node into the rbtree.
534 * this does all the dirty work in terms of maintaining the correct
535 * overall modification count.
536 */
537static noinline struct btrfs_delayed_ref_head *
538add_delayed_ref_head(struct btrfs_fs_info *fs_info,
539		     struct btrfs_trans_handle *trans,
540		     struct btrfs_delayed_ref_head *head_ref,
541		     struct btrfs_qgroup_extent_record *qrecord,
542		     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
543		     int action, int is_data, int is_system,
544		     int *qrecord_inserted_ret,
545		     int *old_ref_mod, int *new_ref_mod)
546
547{
548	struct btrfs_delayed_ref_head *existing;
549	struct btrfs_delayed_ref_root *delayed_refs;
550	int count_mod = 1;
551	int must_insert_reserved = 0;
552	int qrecord_inserted = 0;
553
554	/* If reserved is provided, it must be a data extent. */
555	BUG_ON(!is_data && reserved);
556
557	/*
558	 * the head node stores the sum of all the mods, so dropping a ref
559	 * should drop the sum in the head node by one.
560	 */
561	if (action == BTRFS_UPDATE_DELAYED_HEAD)
562		count_mod = 0;
563	else if (action == BTRFS_DROP_DELAYED_REF)
564		count_mod = -1;
565
566	/*
567	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
568	 * the reserved accounting when the extent is finally added, or
569	 * if a later modification deletes the delayed ref without ever
570	 * inserting the extent into the extent allocation tree.
571	 * ref->must_insert_reserved is the flag used to record
572	 * that accounting mods are required.
573	 *
574	 * Once we record must_insert_reserved, switch the action to
575	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
576	 */
577	if (action == BTRFS_ADD_DELAYED_EXTENT)
578		must_insert_reserved = 1;
579	else
580		must_insert_reserved = 0;
581
582	delayed_refs = &trans->transaction->delayed_refs;
583
584	refcount_set(&head_ref->refs, 1);
585	head_ref->bytenr = bytenr;
586	head_ref->num_bytes = num_bytes;
587	head_ref->ref_mod = count_mod;
588	head_ref->must_insert_reserved = must_insert_reserved;
589	head_ref->is_data = is_data;
590	head_ref->is_system = is_system;
591	head_ref->ref_tree = RB_ROOT;
592	INIT_LIST_HEAD(&head_ref->ref_add_list);
593	RB_CLEAR_NODE(&head_ref->href_node);
594	head_ref->processing = 0;
595	head_ref->total_ref_mod = count_mod;
596	head_ref->qgroup_reserved = 0;
597	head_ref->qgroup_ref_root = 0;
598	spin_lock_init(&head_ref->lock);
599	mutex_init(&head_ref->mutex);
600
601	/* Record qgroup extent info if provided */
602	if (qrecord) {
603		if (ref_root && reserved) {
604			head_ref->qgroup_ref_root = ref_root;
605			head_ref->qgroup_reserved = reserved;
606		}
607
608		qrecord->bytenr = bytenr;
609		qrecord->num_bytes = num_bytes;
610		qrecord->old_roots = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
611
612		if(btrfs_qgroup_trace_extent_nolock(fs_info,
 
 
613					delayed_refs, qrecord))
614			kfree(qrecord);
615		else
616			qrecord_inserted = 1;
617	}
618
619	trace_add_delayed_ref_head(fs_info, head_ref, action);
620
621	existing = htree_insert(&delayed_refs->href_root,
622				&head_ref->href_node);
623	if (existing) {
624		WARN_ON(ref_root && reserved && existing->qgroup_ref_root
625			&& existing->qgroup_reserved);
626		update_existing_head_ref(delayed_refs, existing, head_ref,
627					 old_ref_mod);
628		/*
629		 * we've updated the existing ref, free the newly
630		 * allocated ref
631		 */
632		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
633		head_ref = existing;
634	} else {
635		if (old_ref_mod)
636			*old_ref_mod = 0;
637		if (is_data && count_mod < 0)
638			delayed_refs->pending_csums += num_bytes;
 
 
639		delayed_refs->num_heads++;
640		delayed_refs->num_heads_ready++;
641		atomic_inc(&delayed_refs->num_entries);
642		trans->delayed_ref_updates++;
643	}
644	if (qrecord_inserted_ret)
645		*qrecord_inserted_ret = qrecord_inserted;
646	if (new_ref_mod)
647		*new_ref_mod = head_ref->total_ref_mod;
648	return head_ref;
649}
650
651/*
652 * helper to insert a delayed tree ref into the rbtree.
653 */
654static noinline void
655add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
656		     struct btrfs_trans_handle *trans,
657		     struct btrfs_delayed_ref_head *head_ref,
658		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
659		     u64 num_bytes, u64 parent, u64 ref_root, int level,
660		     int action)
661{
662	struct btrfs_delayed_tree_ref *full_ref;
663	struct btrfs_delayed_ref_root *delayed_refs;
664	u64 seq = 0;
665	int ret;
666
667	if (action == BTRFS_ADD_DELAYED_EXTENT)
668		action = BTRFS_ADD_DELAYED_REF;
669
670	if (is_fstree(ref_root))
671		seq = atomic64_read(&fs_info->tree_mod_seq);
672	delayed_refs = &trans->transaction->delayed_refs;
673
674	/* first set the basic ref node struct up */
675	refcount_set(&ref->refs, 1);
676	ref->bytenr = bytenr;
677	ref->num_bytes = num_bytes;
678	ref->ref_mod = 1;
679	ref->action = action;
680	ref->is_head = 0;
681	ref->in_tree = 1;
682	ref->seq = seq;
683	RB_CLEAR_NODE(&ref->ref_node);
684	INIT_LIST_HEAD(&ref->add_list);
685
686	full_ref = btrfs_delayed_node_to_tree_ref(ref);
687	full_ref->parent = parent;
688	full_ref->root = ref_root;
689	if (parent)
690		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
691	else
692		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
693	full_ref->level = level;
694
695	trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
696
697	ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
698
699	/*
700	 * XXX: memory should be freed at the same level allocated.
701	 * But bad practice is anywhere... Follow it now. Need cleanup.
702	 */
703	if (ret > 0)
704		kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
705}
706
707/*
708 * helper to insert a delayed data ref into the rbtree.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709 */
710static noinline void
711add_delayed_data_ref(struct btrfs_fs_info *fs_info,
712		     struct btrfs_trans_handle *trans,
713		     struct btrfs_delayed_ref_head *head_ref,
714		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
715		     u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
716		     u64 offset, int action)
717{
718	struct btrfs_delayed_data_ref *full_ref;
719	struct btrfs_delayed_ref_root *delayed_refs;
720	u64 seq = 0;
721	int ret;
722
723	if (action == BTRFS_ADD_DELAYED_EXTENT)
724		action = BTRFS_ADD_DELAYED_REF;
725
726	delayed_refs = &trans->transaction->delayed_refs;
727
728	if (is_fstree(ref_root))
729		seq = atomic64_read(&fs_info->tree_mod_seq);
730
731	/* first set the basic ref node struct up */
732	refcount_set(&ref->refs, 1);
733	ref->bytenr = bytenr;
734	ref->num_bytes = num_bytes;
735	ref->ref_mod = 1;
736	ref->action = action;
737	ref->is_head = 0;
738	ref->in_tree = 1;
739	ref->seq = seq;
 
740	RB_CLEAR_NODE(&ref->ref_node);
741	INIT_LIST_HEAD(&ref->add_list);
742
743	full_ref = btrfs_delayed_node_to_data_ref(ref);
744	full_ref->parent = parent;
745	full_ref->root = ref_root;
746	if (parent)
747		ref->type = BTRFS_SHARED_DATA_REF_KEY;
748	else
749		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
750
751	full_ref->objectid = owner;
752	full_ref->offset = offset;
753
754	trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
755
756	ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
757	if (ret > 0)
758		kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
759}
760
761/*
762 * add a delayed tree ref.  This does all of the accounting required
763 * to make sure the delayed ref is eventually processed before this
764 * transaction commits.
765 */
766int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
767			       struct btrfs_trans_handle *trans,
768			       u64 bytenr, u64 num_bytes, u64 parent,
769			       u64 ref_root,  int level, int action,
770			       struct btrfs_delayed_extent_op *extent_op,
771			       int *old_ref_mod, int *new_ref_mod)
772{
 
773	struct btrfs_delayed_tree_ref *ref;
774	struct btrfs_delayed_ref_head *head_ref;
775	struct btrfs_delayed_ref_root *delayed_refs;
776	struct btrfs_qgroup_extent_record *record = NULL;
777	int qrecord_inserted;
778	int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
 
 
 
 
 
 
 
779
 
 
 
780	BUG_ON(extent_op && extent_op->is_data);
781	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
782	if (!ref)
783		return -ENOMEM;
784
785	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
786	if (!head_ref)
787		goto free_ref;
 
 
788
789	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
790	    is_fstree(ref_root)) {
791		record = kmalloc(sizeof(*record), GFP_NOFS);
792		if (!record)
793			goto free_head_ref;
 
 
 
 
 
794	}
795
 
 
 
 
 
 
 
 
 
 
 
 
 
 
796	head_ref->extent_op = extent_op;
797
798	delayed_refs = &trans->transaction->delayed_refs;
799	spin_lock(&delayed_refs->lock);
800
801	/*
802	 * insert both the head node and the new ref without dropping
803	 * the spin lock
804	 */
805	head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
806					bytenr, num_bytes, 0, 0, action, 0,
807					is_system, &qrecord_inserted,
808					old_ref_mod, new_ref_mod);
809
810	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
811			     num_bytes, parent, ref_root, level, action);
812	spin_unlock(&delayed_refs->lock);
813
814	if (qrecord_inserted)
815		btrfs_qgroup_trace_extent_post(fs_info, record);
 
 
 
816
817	return 0;
 
 
 
 
818
819free_head_ref:
820	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
821free_ref:
822	kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
823
824	return -ENOMEM;
825}
826
827/*
828 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
829 */
830int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
831			       struct btrfs_trans_handle *trans,
832			       u64 bytenr, u64 num_bytes,
833			       u64 parent, u64 ref_root,
834			       u64 owner, u64 offset, u64 reserved, int action,
835			       int *old_ref_mod, int *new_ref_mod)
836{
 
837	struct btrfs_delayed_data_ref *ref;
838	struct btrfs_delayed_ref_head *head_ref;
839	struct btrfs_delayed_ref_root *delayed_refs;
840	struct btrfs_qgroup_extent_record *record = NULL;
841	int qrecord_inserted;
 
 
 
 
 
 
 
 
 
842
 
843	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
844	if (!ref)
845		return -ENOMEM;
846
 
 
 
 
 
 
 
 
 
 
 
 
847	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
848	if (!head_ref) {
849		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
850		return -ENOMEM;
851	}
852
853	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
854	    is_fstree(ref_root)) {
855		record = kmalloc(sizeof(*record), GFP_NOFS);
 
 
856		if (!record) {
857			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
858			kmem_cache_free(btrfs_delayed_ref_head_cachep,
859					head_ref);
860			return -ENOMEM;
861		}
862	}
863
 
 
864	head_ref->extent_op = NULL;
865
866	delayed_refs = &trans->transaction->delayed_refs;
867	spin_lock(&delayed_refs->lock);
868
869	/*
870	 * insert both the head node and the new ref without dropping
871	 * the spin lock
872	 */
873	head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
874					bytenr, num_bytes, ref_root, reserved,
875					action, 1, 0, &qrecord_inserted,
876					old_ref_mod, new_ref_mod);
877
878	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
879				   num_bytes, parent, ref_root, owner, offset,
880				   action);
881	spin_unlock(&delayed_refs->lock);
882
 
 
 
 
 
 
 
 
 
 
 
 
 
883	if (qrecord_inserted)
884		return btrfs_qgroup_trace_extent_post(fs_info, record);
885	return 0;
886}
887
888int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
889				struct btrfs_trans_handle *trans,
890				u64 bytenr, u64 num_bytes,
891				struct btrfs_delayed_extent_op *extent_op)
892{
893	struct btrfs_delayed_ref_head *head_ref;
894	struct btrfs_delayed_ref_root *delayed_refs;
895
896	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
897	if (!head_ref)
898		return -ENOMEM;
899
 
 
 
900	head_ref->extent_op = extent_op;
901
902	delayed_refs = &trans->transaction->delayed_refs;
903	spin_lock(&delayed_refs->lock);
904
905	/*
906	 * extent_ops just modify the flags of an extent and they don't result
907	 * in ref count changes, hence it's safe to pass false/0 for is_system
908	 * argument
909	 */
910	add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
911			     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
912			     extent_op->is_data, 0, NULL, NULL, NULL);
913
914	spin_unlock(&delayed_refs->lock);
 
 
 
 
 
 
915	return 0;
916}
917
918/*
919 * this does a simple search for the head node for a given extent.
920 * It must be called with the delayed ref spinlock held, and it returns
921 * the head node if any where found, or NULL if not.
922 */
923struct btrfs_delayed_ref_head *
924btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
925{
926	return find_ref_head(&delayed_refs->href_root, bytenr, 0);
 
 
927}
928
929void __cold btrfs_delayed_ref_exit(void)
930{
931	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
932	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
933	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
934	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
935}
936
937int __init btrfs_delayed_ref_init(void)
938{
939	btrfs_delayed_ref_head_cachep = kmem_cache_create(
940				"btrfs_delayed_ref_head",
941				sizeof(struct btrfs_delayed_ref_head), 0,
942				SLAB_MEM_SPREAD, NULL);
943	if (!btrfs_delayed_ref_head_cachep)
944		goto fail;
945
946	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
947				"btrfs_delayed_tree_ref",
948				sizeof(struct btrfs_delayed_tree_ref), 0,
949				SLAB_MEM_SPREAD, NULL);
950	if (!btrfs_delayed_tree_ref_cachep)
951		goto fail;
952
953	btrfs_delayed_data_ref_cachep = kmem_cache_create(
954				"btrfs_delayed_data_ref",
955				sizeof(struct btrfs_delayed_data_ref), 0,
956				SLAB_MEM_SPREAD, NULL);
957	if (!btrfs_delayed_data_ref_cachep)
958		goto fail;
959
960	btrfs_delayed_extent_op_cachep = kmem_cache_create(
961				"btrfs_delayed_extent_op",
962				sizeof(struct btrfs_delayed_extent_op), 0,
963				SLAB_MEM_SPREAD, NULL);
964	if (!btrfs_delayed_extent_op_cachep)
965		goto fail;
966
967	return 0;
968fail:
969	btrfs_delayed_ref_exit();
970	return -ENOMEM;
971}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2009 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/slab.h>
   8#include <linux/sort.h>
   9#include "ctree.h"
  10#include "delayed-ref.h"
  11#include "transaction.h"
  12#include "qgroup.h"
  13#include "space-info.h"
  14#include "tree-mod-log.h"
  15
  16struct kmem_cache *btrfs_delayed_ref_head_cachep;
  17struct kmem_cache *btrfs_delayed_tree_ref_cachep;
  18struct kmem_cache *btrfs_delayed_data_ref_cachep;
  19struct kmem_cache *btrfs_delayed_extent_op_cachep;
  20/*
  21 * delayed back reference update tracking.  For subvolume trees
  22 * we queue up extent allocations and backref maintenance for
  23 * delayed processing.   This avoids deep call chains where we
  24 * add extents in the middle of btrfs_search_slot, and it allows
  25 * us to buffer up frequently modified backrefs in an rb tree instead
  26 * of hammering updates on the extent allocation tree.
  27 */
  28
  29bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
  30{
  31	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
  32	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
  33	bool ret = false;
  34	u64 reserved;
  35
  36	spin_lock(&global_rsv->lock);
  37	reserved = global_rsv->reserved;
  38	spin_unlock(&global_rsv->lock);
  39
  40	/*
  41	 * Since the global reserve is just kind of magic we don't really want
  42	 * to rely on it to save our bacon, so if our size is more than the
  43	 * delayed_refs_rsv and the global rsv then it's time to think about
  44	 * bailing.
  45	 */
  46	spin_lock(&delayed_refs_rsv->lock);
  47	reserved += delayed_refs_rsv->reserved;
  48	if (delayed_refs_rsv->size >= reserved)
  49		ret = true;
  50	spin_unlock(&delayed_refs_rsv->lock);
  51	return ret;
  52}
  53
  54int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
  55{
  56	u64 num_entries =
  57		atomic_read(&trans->transaction->delayed_refs.num_entries);
  58	u64 avg_runtime;
  59	u64 val;
  60
  61	smp_mb();
  62	avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
  63	val = num_entries * avg_runtime;
  64	if (val >= NSEC_PER_SEC)
  65		return 1;
  66	if (val >= NSEC_PER_SEC / 2)
  67		return 2;
  68
  69	return btrfs_check_space_for_delayed_refs(trans->fs_info);
  70}
  71
  72/**
  73 * Release a ref head's reservation
  74 *
  75 * @fs_info:  the filesystem
  76 * @nr:       number of items to drop
  77 *
  78 * This drops the delayed ref head's count from the delayed refs rsv and frees
  79 * any excess reservation we had.
  80 */
  81void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
  82{
  83	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
  84	u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
  85	u64 released = 0;
  86
  87	released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
  88	if (released)
  89		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
  90					      0, released, 0);
  91}
  92
  93/*
  94 * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
  95 * @trans - the trans that may have generated delayed refs
  96 *
  97 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
  98 * it'll calculate the additional size and add it to the delayed_refs_rsv.
  99 */
 100void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
 101{
 102	struct btrfs_fs_info *fs_info = trans->fs_info;
 103	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
 104	u64 num_bytes;
 105
 106	if (!trans->delayed_ref_updates)
 107		return;
 108
 109	num_bytes = btrfs_calc_insert_metadata_size(fs_info,
 110						    trans->delayed_ref_updates);
 111	spin_lock(&delayed_rsv->lock);
 112	delayed_rsv->size += num_bytes;
 113	delayed_rsv->full = 0;
 114	spin_unlock(&delayed_rsv->lock);
 115	trans->delayed_ref_updates = 0;
 116}
 117
 118/**
 119 * Transfer bytes to our delayed refs rsv
 120 *
 121 * @fs_info:   the filesystem
 122 * @src:       source block rsv to transfer from
 123 * @num_bytes: number of bytes to transfer
 124 *
 125 * This transfers up to the num_bytes amount from the src rsv to the
 126 * delayed_refs_rsv.  Any extra bytes are returned to the space info.
 127 */
 128void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
 129				       struct btrfs_block_rsv *src,
 130				       u64 num_bytes)
 131{
 132	struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
 133	u64 to_free = 0;
 134
 135	spin_lock(&src->lock);
 136	src->reserved -= num_bytes;
 137	src->size -= num_bytes;
 138	spin_unlock(&src->lock);
 139
 140	spin_lock(&delayed_refs_rsv->lock);
 141	if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
 142		u64 delta = delayed_refs_rsv->size -
 143			delayed_refs_rsv->reserved;
 144		if (num_bytes > delta) {
 145			to_free = num_bytes - delta;
 146			num_bytes = delta;
 147		}
 148	} else {
 149		to_free = num_bytes;
 150		num_bytes = 0;
 151	}
 152
 153	if (num_bytes)
 154		delayed_refs_rsv->reserved += num_bytes;
 155	if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
 156		delayed_refs_rsv->full = 1;
 157	spin_unlock(&delayed_refs_rsv->lock);
 158
 159	if (num_bytes)
 160		trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
 161					      0, num_bytes, 1);
 162	if (to_free)
 163		btrfs_space_info_free_bytes_may_use(fs_info,
 164				delayed_refs_rsv->space_info, to_free);
 165}
 166
 167/**
 168 * Refill based on our delayed refs usage
 169 *
 170 * @fs_info: the filesystem
 171 * @flush:   control how we can flush for this reservation.
 172 *
 173 * This will refill the delayed block_rsv up to 1 items size worth of space and
 174 * will return -ENOSPC if we can't make the reservation.
 175 */
 176int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
 177				  enum btrfs_reserve_flush_enum flush)
 178{
 179	struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
 180	u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
 181	u64 num_bytes = 0;
 182	int ret = -ENOSPC;
 183
 184	spin_lock(&block_rsv->lock);
 185	if (block_rsv->reserved < block_rsv->size) {
 186		num_bytes = block_rsv->size - block_rsv->reserved;
 187		num_bytes = min(num_bytes, limit);
 188	}
 189	spin_unlock(&block_rsv->lock);
 190
 191	if (!num_bytes)
 192		return 0;
 193
 194	ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv,
 195					   num_bytes, flush);
 196	if (ret)
 197		return ret;
 198	btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
 199	trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
 200				      0, num_bytes, 1);
 201	return 0;
 202}
 203
 204/*
 205 * compare two delayed tree backrefs with same bytenr and type
 206 */
 207static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
 208			  struct btrfs_delayed_tree_ref *ref2)
 209{
 210	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
 211		if (ref1->root < ref2->root)
 212			return -1;
 213		if (ref1->root > ref2->root)
 214			return 1;
 215	} else {
 216		if (ref1->parent < ref2->parent)
 217			return -1;
 218		if (ref1->parent > ref2->parent)
 219			return 1;
 220	}
 221	return 0;
 222}
 223
 224/*
 225 * compare two delayed data backrefs with same bytenr and type
 226 */
 227static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
 228			  struct btrfs_delayed_data_ref *ref2)
 229{
 230	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
 231		if (ref1->root < ref2->root)
 232			return -1;
 233		if (ref1->root > ref2->root)
 234			return 1;
 235		if (ref1->objectid < ref2->objectid)
 236			return -1;
 237		if (ref1->objectid > ref2->objectid)
 238			return 1;
 239		if (ref1->offset < ref2->offset)
 240			return -1;
 241		if (ref1->offset > ref2->offset)
 242			return 1;
 243	} else {
 244		if (ref1->parent < ref2->parent)
 245			return -1;
 246		if (ref1->parent > ref2->parent)
 247			return 1;
 248	}
 249	return 0;
 250}
 251
 252static int comp_refs(struct btrfs_delayed_ref_node *ref1,
 253		     struct btrfs_delayed_ref_node *ref2,
 254		     bool check_seq)
 255{
 256	int ret = 0;
 257
 258	if (ref1->type < ref2->type)
 259		return -1;
 260	if (ref1->type > ref2->type)
 261		return 1;
 262	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
 263	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
 264		ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
 265				     btrfs_delayed_node_to_tree_ref(ref2));
 266	else
 267		ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
 268				     btrfs_delayed_node_to_data_ref(ref2));
 269	if (ret)
 270		return ret;
 271	if (check_seq) {
 272		if (ref1->seq < ref2->seq)
 273			return -1;
 274		if (ref1->seq > ref2->seq)
 275			return 1;
 276	}
 277	return 0;
 278}
 279
 280/* insert a new ref to head ref rbtree */
 281static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
 282						   struct rb_node *node)
 283{
 284	struct rb_node **p = &root->rb_root.rb_node;
 285	struct rb_node *parent_node = NULL;
 286	struct btrfs_delayed_ref_head *entry;
 287	struct btrfs_delayed_ref_head *ins;
 288	u64 bytenr;
 289	bool leftmost = true;
 290
 291	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
 292	bytenr = ins->bytenr;
 293	while (*p) {
 294		parent_node = *p;
 295		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
 296				 href_node);
 297
 298		if (bytenr < entry->bytenr) {
 299			p = &(*p)->rb_left;
 300		} else if (bytenr > entry->bytenr) {
 301			p = &(*p)->rb_right;
 302			leftmost = false;
 303		} else {
 304			return entry;
 305		}
 306	}
 307
 308	rb_link_node(node, parent_node, p);
 309	rb_insert_color_cached(node, root, leftmost);
 310	return NULL;
 311}
 312
 313static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
 314		struct btrfs_delayed_ref_node *ins)
 315{
 316	struct rb_node **p = &root->rb_root.rb_node;
 317	struct rb_node *node = &ins->ref_node;
 318	struct rb_node *parent_node = NULL;
 319	struct btrfs_delayed_ref_node *entry;
 320	bool leftmost = true;
 321
 322	while (*p) {
 323		int comp;
 324
 325		parent_node = *p;
 326		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
 327				 ref_node);
 328		comp = comp_refs(ins, entry, true);
 329		if (comp < 0) {
 330			p = &(*p)->rb_left;
 331		} else if (comp > 0) {
 332			p = &(*p)->rb_right;
 333			leftmost = false;
 334		} else {
 335			return entry;
 336		}
 337	}
 338
 339	rb_link_node(node, parent_node, p);
 340	rb_insert_color_cached(node, root, leftmost);
 341	return NULL;
 342}
 343
 344static struct btrfs_delayed_ref_head *find_first_ref_head(
 345		struct btrfs_delayed_ref_root *dr)
 346{
 347	struct rb_node *n;
 348	struct btrfs_delayed_ref_head *entry;
 349
 350	n = rb_first_cached(&dr->href_root);
 351	if (!n)
 352		return NULL;
 353
 354	entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
 355
 356	return entry;
 357}
 358
 359/*
 360 * Find a head entry based on bytenr. This returns the delayed ref head if it
 361 * was able to find one, or NULL if nothing was in that spot.  If return_bigger
 362 * is given, the next bigger entry is returned if no exact match is found.
 363 */
 364static struct btrfs_delayed_ref_head *find_ref_head(
 365		struct btrfs_delayed_ref_root *dr, u64 bytenr,
 366		bool return_bigger)
 
 367{
 368	struct rb_root *root = &dr->href_root.rb_root;
 369	struct rb_node *n;
 370	struct btrfs_delayed_ref_head *entry;
 371
 372	n = root->rb_node;
 373	entry = NULL;
 374	while (n) {
 375		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
 376
 377		if (bytenr < entry->bytenr)
 378			n = n->rb_left;
 379		else if (bytenr > entry->bytenr)
 380			n = n->rb_right;
 381		else
 382			return entry;
 383	}
 384	if (entry && return_bigger) {
 385		if (bytenr > entry->bytenr) {
 386			n = rb_next(&entry->href_node);
 387			if (!n)
 388				return NULL;
 389			entry = rb_entry(n, struct btrfs_delayed_ref_head,
 390					 href_node);
 
 391		}
 392		return entry;
 393	}
 394	return NULL;
 395}
 396
 397int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
 398			   struct btrfs_delayed_ref_head *head)
 399{
 
 
 
 400	lockdep_assert_held(&delayed_refs->lock);
 401	if (mutex_trylock(&head->mutex))
 402		return 0;
 403
 404	refcount_inc(&head->refs);
 405	spin_unlock(&delayed_refs->lock);
 406
 407	mutex_lock(&head->mutex);
 408	spin_lock(&delayed_refs->lock);
 409	if (RB_EMPTY_NODE(&head->href_node)) {
 410		mutex_unlock(&head->mutex);
 411		btrfs_put_delayed_ref_head(head);
 412		return -EAGAIN;
 413	}
 414	btrfs_put_delayed_ref_head(head);
 415	return 0;
 416}
 417
 418static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
 419				    struct btrfs_delayed_ref_root *delayed_refs,
 420				    struct btrfs_delayed_ref_head *head,
 421				    struct btrfs_delayed_ref_node *ref)
 422{
 423	lockdep_assert_held(&head->lock);
 424	rb_erase_cached(&ref->ref_node, &head->ref_tree);
 425	RB_CLEAR_NODE(&ref->ref_node);
 426	if (!list_empty(&ref->add_list))
 427		list_del(&ref->add_list);
 428	ref->in_tree = 0;
 429	btrfs_put_delayed_ref(ref);
 430	atomic_dec(&delayed_refs->num_entries);
 
 
 431}
 432
 433static bool merge_ref(struct btrfs_trans_handle *trans,
 434		      struct btrfs_delayed_ref_root *delayed_refs,
 435		      struct btrfs_delayed_ref_head *head,
 436		      struct btrfs_delayed_ref_node *ref,
 437		      u64 seq)
 438{
 439	struct btrfs_delayed_ref_node *next;
 440	struct rb_node *node = rb_next(&ref->ref_node);
 441	bool done = false;
 442
 443	while (!done && node) {
 444		int mod;
 445
 446		next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
 447		node = rb_next(node);
 448		if (seq && next->seq >= seq)
 449			break;
 450		if (comp_refs(ref, next, false))
 451			break;
 452
 453		if (ref->action == next->action) {
 454			mod = next->ref_mod;
 455		} else {
 456			if (ref->ref_mod < next->ref_mod) {
 457				swap(ref, next);
 458				done = true;
 459			}
 460			mod = -next->ref_mod;
 461		}
 462
 463		drop_delayed_ref(trans, delayed_refs, head, next);
 464		ref->ref_mod += mod;
 465		if (ref->ref_mod == 0) {
 466			drop_delayed_ref(trans, delayed_refs, head, ref);
 467			done = true;
 468		} else {
 469			/*
 470			 * Can't have multiples of the same ref on a tree block.
 471			 */
 472			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
 473				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
 474		}
 475	}
 476
 477	return done;
 478}
 479
 480void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
 
 481			      struct btrfs_delayed_ref_root *delayed_refs,
 482			      struct btrfs_delayed_ref_head *head)
 483{
 484	struct btrfs_fs_info *fs_info = trans->fs_info;
 485	struct btrfs_delayed_ref_node *ref;
 486	struct rb_node *node;
 487	u64 seq = 0;
 488
 489	lockdep_assert_held(&head->lock);
 490
 491	if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
 492		return;
 493
 494	/* We don't have too many refs to merge for data. */
 495	if (head->is_data)
 496		return;
 497
 498	seq = btrfs_tree_mod_log_lowest_seq(fs_info);
 
 
 
 
 
 
 
 
 
 499again:
 500	for (node = rb_first_cached(&head->ref_tree); node;
 501	     node = rb_next(node)) {
 502		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
 503		if (seq && ref->seq >= seq)
 504			continue;
 505		if (merge_ref(trans, delayed_refs, head, ref, seq))
 506			goto again;
 507	}
 508}
 509
 510int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
 
 
 511{
 
 512	int ret = 0;
 513	u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
 514
 515	if (min_seq != 0 && seq >= min_seq) {
 516		btrfs_debug(fs_info,
 517			    "holding back delayed_ref %llu, lowest is %llu",
 518			    seq, min_seq);
 519		ret = 1;
 
 
 
 
 
 
 
 520	}
 521
 
 522	return ret;
 523}
 524
 525struct btrfs_delayed_ref_head *btrfs_select_ref_head(
 526		struct btrfs_delayed_ref_root *delayed_refs)
 527{
 
 528	struct btrfs_delayed_ref_head *head;
 
 
 
 
 529
 530again:
 531	head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
 532			     true);
 533	if (!head && delayed_refs->run_delayed_start != 0) {
 534		delayed_refs->run_delayed_start = 0;
 535		head = find_first_ref_head(delayed_refs);
 
 
 
 
 
 
 536	}
 537	if (!head)
 538		return NULL;
 539
 540	while (head->processing) {
 541		struct rb_node *node;
 542
 543		node = rb_next(&head->href_node);
 544		if (!node) {
 545			if (delayed_refs->run_delayed_start == 0)
 546				return NULL;
 547			delayed_refs->run_delayed_start = 0;
 
 
 548			goto again;
 549		}
 550		head = rb_entry(node, struct btrfs_delayed_ref_head,
 551				href_node);
 552	}
 553
 554	head->processing = 1;
 555	WARN_ON(delayed_refs->num_heads_ready == 0);
 556	delayed_refs->num_heads_ready--;
 557	delayed_refs->run_delayed_start = head->bytenr +
 558		head->num_bytes;
 559	return head;
 560}
 561
 562void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
 563			   struct btrfs_delayed_ref_head *head)
 564{
 565	lockdep_assert_held(&delayed_refs->lock);
 566	lockdep_assert_held(&head->lock);
 567
 568	rb_erase_cached(&head->href_node, &delayed_refs->href_root);
 569	RB_CLEAR_NODE(&head->href_node);
 570	atomic_dec(&delayed_refs->num_entries);
 571	delayed_refs->num_heads--;
 572	if (head->processing == 0)
 573		delayed_refs->num_heads_ready--;
 574}
 575
 576/*
 577 * Helper to insert the ref_node to the tail or merge with tail.
 578 *
 579 * Return 0 for insert.
 580 * Return >0 for merge.
 581 */
 582static int insert_delayed_ref(struct btrfs_trans_handle *trans,
 583			      struct btrfs_delayed_ref_root *root,
 584			      struct btrfs_delayed_ref_head *href,
 585			      struct btrfs_delayed_ref_node *ref)
 586{
 587	struct btrfs_delayed_ref_node *exist;
 588	int mod;
 589	int ret = 0;
 590
 591	spin_lock(&href->lock);
 592	exist = tree_insert(&href->ref_tree, ref);
 593	if (!exist)
 594		goto inserted;
 595
 596	/* Now we are sure we can merge */
 597	ret = 1;
 598	if (exist->action == ref->action) {
 599		mod = ref->ref_mod;
 600	} else {
 601		/* Need to change action */
 602		if (exist->ref_mod < ref->ref_mod) {
 603			exist->action = ref->action;
 604			mod = -exist->ref_mod;
 605			exist->ref_mod = ref->ref_mod;
 606			if (ref->action == BTRFS_ADD_DELAYED_REF)
 607				list_add_tail(&exist->add_list,
 608					      &href->ref_add_list);
 609			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
 610				ASSERT(!list_empty(&exist->add_list));
 611				list_del(&exist->add_list);
 612			} else {
 613				ASSERT(0);
 614			}
 615		} else
 616			mod = -ref->ref_mod;
 617	}
 618	exist->ref_mod += mod;
 619
 620	/* remove existing tail if its ref_mod is zero */
 621	if (exist->ref_mod == 0)
 622		drop_delayed_ref(trans, root, href, exist);
 623	spin_unlock(&href->lock);
 624	return ret;
 625inserted:
 626	if (ref->action == BTRFS_ADD_DELAYED_REF)
 627		list_add_tail(&ref->add_list, &href->ref_add_list);
 628	atomic_inc(&root->num_entries);
 
 629	spin_unlock(&href->lock);
 630	return ret;
 631}
 632
 633/*
 634 * helper function to update the accounting in the head ref
 635 * existing and update must have the same bytenr
 636 */
 637static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
 
 638			 struct btrfs_delayed_ref_head *existing,
 639			 struct btrfs_delayed_ref_head *update)
 
 640{
 641	struct btrfs_delayed_ref_root *delayed_refs =
 642		&trans->transaction->delayed_refs;
 643	struct btrfs_fs_info *fs_info = trans->fs_info;
 644	int old_ref_mod;
 645
 646	BUG_ON(existing->is_data != update->is_data);
 647
 648	spin_lock(&existing->lock);
 649	if (update->must_insert_reserved) {
 650		/* if the extent was freed and then
 651		 * reallocated before the delayed ref
 652		 * entries were processed, we can end up
 653		 * with an existing head ref without
 654		 * the must_insert_reserved flag set.
 655		 * Set it again here
 656		 */
 657		existing->must_insert_reserved = update->must_insert_reserved;
 658
 659		/*
 660		 * update the num_bytes so we make sure the accounting
 661		 * is done correctly
 662		 */
 663		existing->num_bytes = update->num_bytes;
 664
 665	}
 666
 667	if (update->extent_op) {
 668		if (!existing->extent_op) {
 669			existing->extent_op = update->extent_op;
 670		} else {
 671			if (update->extent_op->update_key) {
 672				memcpy(&existing->extent_op->key,
 673				       &update->extent_op->key,
 674				       sizeof(update->extent_op->key));
 675				existing->extent_op->update_key = true;
 676			}
 677			if (update->extent_op->update_flags) {
 678				existing->extent_op->flags_to_set |=
 679					update->extent_op->flags_to_set;
 680				existing->extent_op->update_flags = true;
 681			}
 682			btrfs_free_delayed_extent_op(update->extent_op);
 683		}
 684	}
 685	/*
 686	 * update the reference mod on the head to reflect this new operation,
 687	 * only need the lock for this case cause we could be processing it
 688	 * currently, for refs we just added we know we're a-ok.
 689	 */
 690	old_ref_mod = existing->total_ref_mod;
 
 
 691	existing->ref_mod += update->ref_mod;
 692	existing->total_ref_mod += update->ref_mod;
 693
 694	/*
 695	 * If we are going to from a positive ref mod to a negative or vice
 696	 * versa we need to make sure to adjust pending_csums accordingly.
 697	 */
 698	if (existing->is_data) {
 699		u64 csum_leaves =
 700			btrfs_csum_bytes_to_leaves(fs_info,
 701						   existing->num_bytes);
 702
 703		if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
 704			delayed_refs->pending_csums -= existing->num_bytes;
 705			btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
 706		}
 707		if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
 708			delayed_refs->pending_csums += existing->num_bytes;
 709			trans->delayed_ref_updates += csum_leaves;
 710		}
 711	}
 712
 713	spin_unlock(&existing->lock);
 714}
 715
 716static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
 717				  struct btrfs_qgroup_extent_record *qrecord,
 718				  u64 bytenr, u64 num_bytes, u64 ref_root,
 719				  u64 reserved, int action, bool is_data,
 720				  bool is_system)
 
 
 
 
 
 
 
 
 
 
 721{
 
 
 722	int count_mod = 1;
 723	int must_insert_reserved = 0;
 
 724
 725	/* If reserved is provided, it must be a data extent. */
 726	BUG_ON(!is_data && reserved);
 727
 728	/*
 729	 * The head node stores the sum of all the mods, so dropping a ref
 730	 * should drop the sum in the head node by one.
 731	 */
 732	if (action == BTRFS_UPDATE_DELAYED_HEAD)
 733		count_mod = 0;
 734	else if (action == BTRFS_DROP_DELAYED_REF)
 735		count_mod = -1;
 736
 737	/*
 738	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
 739	 * accounting when the extent is finally added, or if a later
 740	 * modification deletes the delayed ref without ever inserting the
 741	 * extent into the extent allocation tree.  ref->must_insert_reserved
 742	 * is the flag used to record that accounting mods are required.
 
 743	 *
 744	 * Once we record must_insert_reserved, switch the action to
 745	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
 746	 */
 747	if (action == BTRFS_ADD_DELAYED_EXTENT)
 748		must_insert_reserved = 1;
 749	else
 750		must_insert_reserved = 0;
 751
 
 
 752	refcount_set(&head_ref->refs, 1);
 753	head_ref->bytenr = bytenr;
 754	head_ref->num_bytes = num_bytes;
 755	head_ref->ref_mod = count_mod;
 756	head_ref->must_insert_reserved = must_insert_reserved;
 757	head_ref->is_data = is_data;
 758	head_ref->is_system = is_system;
 759	head_ref->ref_tree = RB_ROOT_CACHED;
 760	INIT_LIST_HEAD(&head_ref->ref_add_list);
 761	RB_CLEAR_NODE(&head_ref->href_node);
 762	head_ref->processing = 0;
 763	head_ref->total_ref_mod = count_mod;
 
 
 764	spin_lock_init(&head_ref->lock);
 765	mutex_init(&head_ref->mutex);
 766
 
 767	if (qrecord) {
 768		if (ref_root && reserved) {
 769			qrecord->data_rsv = reserved;
 770			qrecord->data_rsv_refroot = ref_root;
 771		}
 
 772		qrecord->bytenr = bytenr;
 773		qrecord->num_bytes = num_bytes;
 774		qrecord->old_roots = NULL;
 775	}
 776}
 777
 778/*
 779 * helper function to actually insert a head node into the rbtree.
 780 * this does all the dirty work in terms of maintaining the correct
 781 * overall modification count.
 782 */
 783static noinline struct btrfs_delayed_ref_head *
 784add_delayed_ref_head(struct btrfs_trans_handle *trans,
 785		     struct btrfs_delayed_ref_head *head_ref,
 786		     struct btrfs_qgroup_extent_record *qrecord,
 787		     int action, int *qrecord_inserted_ret)
 788{
 789	struct btrfs_delayed_ref_head *existing;
 790	struct btrfs_delayed_ref_root *delayed_refs;
 791	int qrecord_inserted = 0;
 792
 793	delayed_refs = &trans->transaction->delayed_refs;
 794
 795	/* Record qgroup extent info if provided */
 796	if (qrecord) {
 797		if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
 798					delayed_refs, qrecord))
 799			kfree(qrecord);
 800		else
 801			qrecord_inserted = 1;
 802	}
 803
 804	trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
 805
 806	existing = htree_insert(&delayed_refs->href_root,
 807				&head_ref->href_node);
 808	if (existing) {
 809		update_existing_head_ref(trans, existing, head_ref);
 
 
 
 810		/*
 811		 * we've updated the existing ref, free the newly
 812		 * allocated ref
 813		 */
 814		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
 815		head_ref = existing;
 816	} else {
 817		if (head_ref->is_data && head_ref->ref_mod < 0) {
 818			delayed_refs->pending_csums += head_ref->num_bytes;
 819			trans->delayed_ref_updates +=
 820				btrfs_csum_bytes_to_leaves(trans->fs_info,
 821							   head_ref->num_bytes);
 822		}
 823		delayed_refs->num_heads++;
 824		delayed_refs->num_heads_ready++;
 825		atomic_inc(&delayed_refs->num_entries);
 826		trans->delayed_ref_updates++;
 827	}
 828	if (qrecord_inserted_ret)
 829		*qrecord_inserted_ret = qrecord_inserted;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 830
 831	return head_ref;
 
 
 
 
 
 
 
 832}
 833
 834/*
 835 * init_delayed_ref_common - Initialize the structure which represents a
 836 *			     modification to a an extent.
 837 *
 838 * @fs_info:    Internal to the mounted filesystem mount structure.
 839 *
 840 * @ref:	The structure which is going to be initialized.
 841 *
 842 * @bytenr:	The logical address of the extent for which a modification is
 843 *		going to be recorded.
 844 *
 845 * @num_bytes:  Size of the extent whose modification is being recorded.
 846 *
 847 * @ref_root:	The id of the root where this modification has originated, this
 848 *		can be either one of the well-known metadata trees or the
 849 *		subvolume id which references this extent.
 850 *
 851 * @action:	Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
 852 *		BTRFS_ADD_DELAYED_EXTENT
 853 *
 854 * @ref_type:	Holds the type of the extent which is being recorded, can be
 855 *		one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
 856 *		when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
 857 *		BTRFS_EXTENT_DATA_REF_KEY when recording data extent
 858 */
 859static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
 860				    struct btrfs_delayed_ref_node *ref,
 861				    u64 bytenr, u64 num_bytes, u64 ref_root,
 862				    int action, u8 ref_type)
 
 
 
 863{
 
 
 864	u64 seq = 0;
 
 865
 866	if (action == BTRFS_ADD_DELAYED_EXTENT)
 867		action = BTRFS_ADD_DELAYED_REF;
 868
 
 
 869	if (is_fstree(ref_root))
 870		seq = atomic64_read(&fs_info->tree_mod_seq);
 871
 
 872	refcount_set(&ref->refs, 1);
 873	ref->bytenr = bytenr;
 874	ref->num_bytes = num_bytes;
 875	ref->ref_mod = 1;
 876	ref->action = action;
 877	ref->is_head = 0;
 878	ref->in_tree = 1;
 879	ref->seq = seq;
 880	ref->type = ref_type;
 881	RB_CLEAR_NODE(&ref->ref_node);
 882	INIT_LIST_HEAD(&ref->add_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 883}
 884
 885/*
 886 * add a delayed tree ref.  This does all of the accounting required
 887 * to make sure the delayed ref is eventually processed before this
 888 * transaction commits.
 889 */
 890int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 891			       struct btrfs_ref *generic_ref,
 892			       struct btrfs_delayed_extent_op *extent_op)
 
 
 
 893{
 894	struct btrfs_fs_info *fs_info = trans->fs_info;
 895	struct btrfs_delayed_tree_ref *ref;
 896	struct btrfs_delayed_ref_head *head_ref;
 897	struct btrfs_delayed_ref_root *delayed_refs;
 898	struct btrfs_qgroup_extent_record *record = NULL;
 899	int qrecord_inserted;
 900	bool is_system;
 901	int action = generic_ref->action;
 902	int level = generic_ref->tree_ref.level;
 903	int ret;
 904	u64 bytenr = generic_ref->bytenr;
 905	u64 num_bytes = generic_ref->len;
 906	u64 parent = generic_ref->parent;
 907	u8 ref_type;
 908
 909	is_system = (generic_ref->real_root == BTRFS_CHUNK_TREE_OBJECTID);
 910
 911	ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
 912	BUG_ON(extent_op && extent_op->is_data);
 913	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
 914	if (!ref)
 915		return -ENOMEM;
 916
 917	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
 918	if (!head_ref) {
 919		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 920		return -ENOMEM;
 921	}
 922
 923	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
 924	    is_fstree(generic_ref->real_root) &&
 925	    is_fstree(generic_ref->tree_ref.root) &&
 926	    !generic_ref->skip_qgroup) {
 927		record = kzalloc(sizeof(*record), GFP_NOFS);
 928		if (!record) {
 929			kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 930			kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
 931			return -ENOMEM;
 932		}
 933	}
 934
 935	if (parent)
 936		ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
 937	else
 938		ref_type = BTRFS_TREE_BLOCK_REF_KEY;
 939
 940	init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
 941				generic_ref->tree_ref.root, action, ref_type);
 942	ref->root = generic_ref->tree_ref.root;
 943	ref->parent = parent;
 944	ref->level = level;
 945
 946	init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
 947			      generic_ref->tree_ref.root, 0, action, false,
 948			      is_system);
 949	head_ref->extent_op = extent_op;
 950
 951	delayed_refs = &trans->transaction->delayed_refs;
 952	spin_lock(&delayed_refs->lock);
 953
 954	/*
 955	 * insert both the head node and the new ref without dropping
 956	 * the spin lock
 957	 */
 958	head_ref = add_delayed_ref_head(trans, head_ref, record,
 959					action, &qrecord_inserted);
 
 
 960
 961	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
 
 962	spin_unlock(&delayed_refs->lock);
 963
 964	/*
 965	 * Need to update the delayed_refs_rsv with any changes we may have
 966	 * made.
 967	 */
 968	btrfs_update_delayed_refs_rsv(trans);
 969
 970	trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
 971				   action == BTRFS_ADD_DELAYED_EXTENT ?
 972				   BTRFS_ADD_DELAYED_REF : action);
 973	if (ret > 0)
 974		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
 975
 976	if (qrecord_inserted)
 977		btrfs_qgroup_trace_extent_post(trans, record);
 
 
 978
 979	return 0;
 980}
 981
 982/*
 983 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
 984 */
 985int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
 986			       struct btrfs_ref *generic_ref,
 987			       u64 reserved)
 
 
 
 988{
 989	struct btrfs_fs_info *fs_info = trans->fs_info;
 990	struct btrfs_delayed_data_ref *ref;
 991	struct btrfs_delayed_ref_head *head_ref;
 992	struct btrfs_delayed_ref_root *delayed_refs;
 993	struct btrfs_qgroup_extent_record *record = NULL;
 994	int qrecord_inserted;
 995	int action = generic_ref->action;
 996	int ret;
 997	u64 bytenr = generic_ref->bytenr;
 998	u64 num_bytes = generic_ref->len;
 999	u64 parent = generic_ref->parent;
1000	u64 ref_root = generic_ref->data_ref.ref_root;
1001	u64 owner = generic_ref->data_ref.ino;
1002	u64 offset = generic_ref->data_ref.offset;
1003	u8 ref_type;
1004
1005	ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1006	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1007	if (!ref)
1008		return -ENOMEM;
1009
1010	if (parent)
1011	        ref_type = BTRFS_SHARED_DATA_REF_KEY;
1012	else
1013	        ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1014	init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1015				ref_root, action, ref_type);
1016	ref->root = ref_root;
1017	ref->parent = parent;
1018	ref->objectid = owner;
1019	ref->offset = offset;
1020
1021
1022	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1023	if (!head_ref) {
1024		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1025		return -ENOMEM;
1026	}
1027
1028	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1029	    is_fstree(ref_root) &&
1030	    is_fstree(generic_ref->real_root) &&
1031	    !generic_ref->skip_qgroup) {
1032		record = kzalloc(sizeof(*record), GFP_NOFS);
1033		if (!record) {
1034			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1035			kmem_cache_free(btrfs_delayed_ref_head_cachep,
1036					head_ref);
1037			return -ENOMEM;
1038		}
1039	}
1040
1041	init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1042			      reserved, action, true, false);
1043	head_ref->extent_op = NULL;
1044
1045	delayed_refs = &trans->transaction->delayed_refs;
1046	spin_lock(&delayed_refs->lock);
1047
1048	/*
1049	 * insert both the head node and the new ref without dropping
1050	 * the spin lock
1051	 */
1052	head_ref = add_delayed_ref_head(trans, head_ref, record,
1053					action, &qrecord_inserted);
1054
1055	ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
 
 
 
 
1056	spin_unlock(&delayed_refs->lock);
1057
1058	/*
1059	 * Need to update the delayed_refs_rsv with any changes we may have
1060	 * made.
1061	 */
1062	btrfs_update_delayed_refs_rsv(trans);
1063
1064	trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1065				   action == BTRFS_ADD_DELAYED_EXTENT ?
1066				   BTRFS_ADD_DELAYED_REF : action);
1067	if (ret > 0)
1068		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1069
1070
1071	if (qrecord_inserted)
1072		return btrfs_qgroup_trace_extent_post(trans, record);
1073	return 0;
1074}
1075
1076int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
 
1077				u64 bytenr, u64 num_bytes,
1078				struct btrfs_delayed_extent_op *extent_op)
1079{
1080	struct btrfs_delayed_ref_head *head_ref;
1081	struct btrfs_delayed_ref_root *delayed_refs;
1082
1083	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1084	if (!head_ref)
1085		return -ENOMEM;
1086
1087	init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1088			      BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
1089			      false);
1090	head_ref->extent_op = extent_op;
1091
1092	delayed_refs = &trans->transaction->delayed_refs;
1093	spin_lock(&delayed_refs->lock);
1094
1095	add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1096			     NULL);
 
 
 
 
 
 
1097
1098	spin_unlock(&delayed_refs->lock);
1099
1100	/*
1101	 * Need to update the delayed_refs_rsv with any changes we may have
1102	 * made.
1103	 */
1104	btrfs_update_delayed_refs_rsv(trans);
1105	return 0;
1106}
1107
1108/*
1109 * This does a simple search for the head node for a given extent.  Returns the
1110 * head node if found, or NULL if not.
 
1111 */
1112struct btrfs_delayed_ref_head *
1113btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1114{
1115	lockdep_assert_held(&delayed_refs->lock);
1116
1117	return find_ref_head(delayed_refs, bytenr, false);
1118}
1119
1120void __cold btrfs_delayed_ref_exit(void)
1121{
1122	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1123	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1124	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1125	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1126}
1127
1128int __init btrfs_delayed_ref_init(void)
1129{
1130	btrfs_delayed_ref_head_cachep = kmem_cache_create(
1131				"btrfs_delayed_ref_head",
1132				sizeof(struct btrfs_delayed_ref_head), 0,
1133				SLAB_MEM_SPREAD, NULL);
1134	if (!btrfs_delayed_ref_head_cachep)
1135		goto fail;
1136
1137	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1138				"btrfs_delayed_tree_ref",
1139				sizeof(struct btrfs_delayed_tree_ref), 0,
1140				SLAB_MEM_SPREAD, NULL);
1141	if (!btrfs_delayed_tree_ref_cachep)
1142		goto fail;
1143
1144	btrfs_delayed_data_ref_cachep = kmem_cache_create(
1145				"btrfs_delayed_data_ref",
1146				sizeof(struct btrfs_delayed_data_ref), 0,
1147				SLAB_MEM_SPREAD, NULL);
1148	if (!btrfs_delayed_data_ref_cachep)
1149		goto fail;
1150
1151	btrfs_delayed_extent_op_cachep = kmem_cache_create(
1152				"btrfs_delayed_extent_op",
1153				sizeof(struct btrfs_delayed_extent_op), 0,
1154				SLAB_MEM_SPREAD, NULL);
1155	if (!btrfs_delayed_extent_op_cachep)
1156		goto fail;
1157
1158	return 0;
1159fail:
1160	btrfs_delayed_ref_exit();
1161	return -ENOMEM;
1162}