Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright (C) 2009 Oracle.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public
  6 * License v2 as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public
 14 * License along with this program; if not, write to the
 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16 * Boston, MA 021110-1307, USA.
 17 */
 18
 19#include <linux/sched.h>
 20#include <linux/slab.h>
 21#include <linux/sort.h>
 22#include "ctree.h"
 23#include "delayed-ref.h"
 24#include "transaction.h"
 
 25
 
 
 
 
 26/*
 27 * delayed back reference update tracking.  For subvolume trees
 28 * we queue up extent allocations and backref maintenance for
 29 * delayed processing.   This avoids deep call chains where we
 30 * add extents in the middle of btrfs_search_slot, and it allows
 31 * us to buffer up frequently modified backrefs in an rb tree instead
 32 * of hammering updates on the extent allocation tree.
 33 */
 34
 35/*
 36 * compare two delayed tree backrefs with same bytenr and type
 37 */
 38static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
 39			  struct btrfs_delayed_tree_ref *ref1)
 40{
 41	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
 42		if (ref1->root < ref2->root)
 43			return -1;
 44		if (ref1->root > ref2->root)
 45			return 1;
 46	} else {
 47		if (ref1->parent < ref2->parent)
 48			return -1;
 49		if (ref1->parent > ref2->parent)
 50			return 1;
 51	}
 52	return 0;
 53}
 54
 55/*
 56 * compare two delayed data backrefs with same bytenr and type
 57 */
 58static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
 59			  struct btrfs_delayed_data_ref *ref1)
 60{
 61	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
 62		if (ref1->root < ref2->root)
 63			return -1;
 64		if (ref1->root > ref2->root)
 65			return 1;
 66		if (ref1->objectid < ref2->objectid)
 67			return -1;
 68		if (ref1->objectid > ref2->objectid)
 69			return 1;
 70		if (ref1->offset < ref2->offset)
 71			return -1;
 72		if (ref1->offset > ref2->offset)
 73			return 1;
 74	} else {
 75		if (ref1->parent < ref2->parent)
 76			return -1;
 77		if (ref1->parent > ref2->parent)
 78			return 1;
 79	}
 80	return 0;
 81}
 82
 83/*
 84 * entries in the rb tree are ordered by the byte number of the extent,
 85 * type of the delayed backrefs and content of delayed backrefs.
 86 */
 87static int comp_entry(struct btrfs_delayed_ref_node *ref2,
 88		      struct btrfs_delayed_ref_node *ref1)
 89{
 90	if (ref1->bytenr < ref2->bytenr)
 91		return -1;
 92	if (ref1->bytenr > ref2->bytenr)
 93		return 1;
 94	if (ref1->is_head && ref2->is_head)
 95		return 0;
 96	if (ref2->is_head)
 97		return -1;
 98	if (ref1->is_head)
 99		return 1;
100	if (ref1->type < ref2->type)
101		return -1;
102	if (ref1->type > ref2->type)
103		return 1;
104	/* merging of sequenced refs is not allowed */
105	if (ref1->seq < ref2->seq)
106		return -1;
107	if (ref1->seq > ref2->seq)
108		return 1;
109	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
110	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
111		return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
112				      btrfs_delayed_node_to_tree_ref(ref1));
113	} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
114		   ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
115		return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
116				      btrfs_delayed_node_to_data_ref(ref1));
117	}
118	BUG();
119	return 0;
120}
121
122/*
123 * insert a new ref into the rbtree.  This returns any existing refs
124 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
125 * inserted.
126 */
127static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
128						  struct rb_node *node)
129{
130	struct rb_node **p = &root->rb_node;
131	struct rb_node *parent_node = NULL;
132	struct btrfs_delayed_ref_node *entry;
133	struct btrfs_delayed_ref_node *ins;
134	int cmp;
135
136	ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
 
137	while (*p) {
138		parent_node = *p;
139		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
140				 rb_node);
141
142		cmp = comp_entry(entry, ins);
143		if (cmp < 0)
144			p = &(*p)->rb_left;
145		else if (cmp > 0)
146			p = &(*p)->rb_right;
147		else
148			return entry;
149	}
150
151	rb_link_node(node, parent_node, p);
152	rb_insert_color(node, root);
153	return NULL;
154}
155
156/*
157 * find an head entry based on bytenr. This returns the delayed ref
158 * head if it was able to find one, or NULL if nothing was in that spot.
159 * If return_bigger is given, the next bigger entry is returned if no exact
160 * match is found.
161 */
162static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
163				  u64 bytenr,
164				  struct btrfs_delayed_ref_node **last,
165				  int return_bigger)
166{
167	struct rb_node *n;
168	struct btrfs_delayed_ref_node *entry;
169	int cmp = 0;
170
171again:
172	n = root->rb_node;
173	entry = NULL;
174	while (n) {
175		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
176		WARN_ON(!entry->in_tree);
177		if (last)
178			*last = entry;
179
180		if (bytenr < entry->bytenr)
181			cmp = -1;
182		else if (bytenr > entry->bytenr)
183			cmp = 1;
184		else if (!btrfs_delayed_ref_is_head(entry))
185			cmp = 1;
186		else
187			cmp = 0;
188
189		if (cmp < 0)
190			n = n->rb_left;
191		else if (cmp > 0)
192			n = n->rb_right;
193		else
194			return entry;
195	}
196	if (entry && return_bigger) {
197		if (cmp > 0) {
198			n = rb_next(&entry->rb_node);
199			if (!n)
200				n = rb_first(root);
201			entry = rb_entry(n, struct btrfs_delayed_ref_node,
202					 rb_node);
203			bytenr = entry->bytenr;
204			return_bigger = 0;
205			goto again;
206		}
207		return entry;
208	}
209	return NULL;
210}
211
212int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
213			   struct btrfs_delayed_ref_head *head)
214{
215	struct btrfs_delayed_ref_root *delayed_refs;
216
217	delayed_refs = &trans->transaction->delayed_refs;
218	assert_spin_locked(&delayed_refs->lock);
219	if (mutex_trylock(&head->mutex))
220		return 0;
221
222	atomic_inc(&head->node.refs);
223	spin_unlock(&delayed_refs->lock);
224
225	mutex_lock(&head->mutex);
226	spin_lock(&delayed_refs->lock);
227	if (!head->node.in_tree) {
228		mutex_unlock(&head->mutex);
229		btrfs_put_delayed_ref(&head->node);
230		return -EAGAIN;
231	}
232	btrfs_put_delayed_ref(&head->node);
233	return 0;
234}
235
236int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237			    u64 seq)
238{
239	struct seq_list *elem;
 
240
241	assert_spin_locked(&delayed_refs->lock);
242	if (list_empty(&delayed_refs->seq_head))
243		return 0;
244
245	elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
246	if (seq >= elem->seq) {
247		pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n",
248			 seq, elem->seq, delayed_refs);
249		return 1;
 
 
250	}
251	return 0;
 
 
252}
253
254int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
255			   struct list_head *cluster, u64 start)
256{
257	int count = 0;
258	struct btrfs_delayed_ref_root *delayed_refs;
259	struct rb_node *node;
260	struct btrfs_delayed_ref_node *ref;
261	struct btrfs_delayed_ref_head *head;
 
 
262
263	delayed_refs = &trans->transaction->delayed_refs;
264	if (start == 0) {
265		node = rb_first(&delayed_refs->root);
266	} else {
267		ref = NULL;
268		find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
269		if (ref) {
270			node = &ref->rb_node;
271		} else
272			node = rb_first(&delayed_refs->root);
273	}
274again:
275	while (node && count < 32) {
276		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
277		if (btrfs_delayed_ref_is_head(ref)) {
278			head = btrfs_delayed_node_to_head(ref);
279			if (list_empty(&head->cluster)) {
280				list_add_tail(&head->cluster, cluster);
281				delayed_refs->run_delayed_start =
282					head->node.bytenr;
283				count++;
284
285				WARN_ON(delayed_refs->num_heads_ready == 0);
286				delayed_refs->num_heads_ready--;
287			} else if (count) {
288				/* the goal of the clustering is to find extents
289				 * that are likely to end up in the same extent
290				 * leaf on disk.  So, we don't want them spread
291				 * all over the tree.  Stop now if we've hit
292				 * a head that was already in use
293				 */
294				break;
295			}
296		}
297		node = rb_next(node);
298	}
299	if (count) {
300		return 0;
301	} else if (start) {
302		/*
303		 * we've gone to the end of the rbtree without finding any
304		 * clusters.  start from the beginning and try again
305		 */
306		start = 0;
307		node = rb_first(&delayed_refs->root);
308		goto again;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309	}
310	return 1;
 
 
 
 
 
 
311}
312
313/*
314 * helper function to update an extent delayed ref in the
315 * rbtree.  existing and update must both have the same
316 * bytenr and parent
317 *
318 * This may free existing if the update cancels out whatever
319 * operation it was doing.
320 */
321static noinline void
322update_existing_ref(struct btrfs_trans_handle *trans,
323		    struct btrfs_delayed_ref_root *delayed_refs,
324		    struct btrfs_delayed_ref_node *existing,
325		    struct btrfs_delayed_ref_node *update)
326{
327	if (update->action != existing->action) {
328		/*
329		 * this is effectively undoing either an add or a
330		 * drop.  We decrement the ref_mod, and if it goes
331		 * down to zero we just delete the entry without
332		 * every changing the extent allocation tree.
333		 */
334		existing->ref_mod--;
335		if (existing->ref_mod == 0) {
336			rb_erase(&existing->rb_node,
337				 &delayed_refs->root);
338			existing->in_tree = 0;
339			btrfs_put_delayed_ref(existing);
340			delayed_refs->num_entries--;
341			if (trans->delayed_ref_updates)
342				trans->delayed_ref_updates--;
343		} else {
344			WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
345				existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
346		}
 
 
 
 
 
 
 
 
 
 
347	} else {
348		WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
349			existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
350		/*
351		 * the action on the existing ref matches
352		 * the action on the ref we're trying to add.
353		 * Bump the ref_mod by one so the backref that
354		 * is eventually added/removed has the correct
355		 * reference count
356		 */
357		existing->ref_mod += update->ref_mod;
358	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359}
360
361/*
362 * helper function to update the accounting in the head ref
363 * existing and update must have the same bytenr
364 */
365static noinline void
366update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
 
367			 struct btrfs_delayed_ref_node *update)
368{
369	struct btrfs_delayed_ref_head *existing_ref;
370	struct btrfs_delayed_ref_head *ref;
 
371
372	existing_ref = btrfs_delayed_node_to_head(existing);
373	ref = btrfs_delayed_node_to_head(update);
374	BUG_ON(existing_ref->is_data != ref->is_data);
375
 
376	if (ref->must_insert_reserved) {
377		/* if the extent was freed and then
378		 * reallocated before the delayed ref
379		 * entries were processed, we can end up
380		 * with an existing head ref without
381		 * the must_insert_reserved flag set.
382		 * Set it again here
383		 */
384		existing_ref->must_insert_reserved = ref->must_insert_reserved;
385
386		/*
387		 * update the num_bytes so we make sure the accounting
388		 * is done correctly
389		 */
390		existing->num_bytes = update->num_bytes;
391
392	}
393
394	if (ref->extent_op) {
395		if (!existing_ref->extent_op) {
396			existing_ref->extent_op = ref->extent_op;
397		} else {
398			if (ref->extent_op->update_key) {
399				memcpy(&existing_ref->extent_op->key,
400				       &ref->extent_op->key,
401				       sizeof(ref->extent_op->key));
402				existing_ref->extent_op->update_key = 1;
403			}
404			if (ref->extent_op->update_flags) {
405				existing_ref->extent_op->flags_to_set |=
406					ref->extent_op->flags_to_set;
407				existing_ref->extent_op->update_flags = 1;
408			}
409			kfree(ref->extent_op);
410		}
411	}
412	/*
413	 * update the reference mod on the head to reflect this new operation
 
 
414	 */
 
415	existing->ref_mod += update->ref_mod;
 
 
 
 
 
 
 
 
 
 
 
 
 
416}
417
418/*
419 * helper function to actually insert a head node into the rbtree.
420 * this does all the dirty work in terms of maintaining the correct
421 * overall modification count.
422 */
423static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
424					struct btrfs_trans_handle *trans,
425					struct btrfs_delayed_ref_node *ref,
426					u64 bytenr, u64 num_bytes,
427					int action, int is_data)
 
 
428{
429	struct btrfs_delayed_ref_node *existing;
430	struct btrfs_delayed_ref_head *head_ref = NULL;
431	struct btrfs_delayed_ref_root *delayed_refs;
 
432	int count_mod = 1;
433	int must_insert_reserved = 0;
434
 
 
 
435	/*
436	 * the head node stores the sum of all the mods, so dropping a ref
437	 * should drop the sum in the head node by one.
438	 */
439	if (action == BTRFS_UPDATE_DELAYED_HEAD)
440		count_mod = 0;
441	else if (action == BTRFS_DROP_DELAYED_REF)
442		count_mod = -1;
443
444	/*
445	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
446	 * the reserved accounting when the extent is finally added, or
447	 * if a later modification deletes the delayed ref without ever
448	 * inserting the extent into the extent allocation tree.
449	 * ref->must_insert_reserved is the flag used to record
450	 * that accounting mods are required.
451	 *
452	 * Once we record must_insert_reserved, switch the action to
453	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
454	 */
455	if (action == BTRFS_ADD_DELAYED_EXTENT)
456		must_insert_reserved = 1;
457	else
458		must_insert_reserved = 0;
459
460	delayed_refs = &trans->transaction->delayed_refs;
461
462	/* first set the basic ref node struct up */
463	atomic_set(&ref->refs, 1);
464	ref->bytenr = bytenr;
465	ref->num_bytes = num_bytes;
466	ref->ref_mod = count_mod;
467	ref->type  = 0;
468	ref->action  = 0;
469	ref->is_head = 1;
470	ref->in_tree = 1;
471	ref->seq = 0;
472
473	head_ref = btrfs_delayed_node_to_head(ref);
474	head_ref->must_insert_reserved = must_insert_reserved;
475	head_ref->is_data = is_data;
 
 
 
 
 
 
 
 
 
 
 
 
476
477	INIT_LIST_HEAD(&head_ref->cluster);
478	mutex_init(&head_ref->mutex);
 
479
480	trace_btrfs_delayed_ref_head(ref, head_ref, action);
 
 
 
 
481
482	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
 
483
 
 
 
 
484	if (existing) {
485		update_existing_head_ref(existing, ref);
 
 
486		/*
487		 * we've updated the existing ref, free the newly
488		 * allocated ref
489		 */
490		kfree(head_ref);
 
491	} else {
 
 
492		delayed_refs->num_heads++;
493		delayed_refs->num_heads_ready++;
494		delayed_refs->num_entries++;
495		trans->delayed_ref_updates++;
496	}
 
497}
498
499/*
500 * helper to insert a delayed tree ref into the rbtree.
501 */
502static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
503					 struct btrfs_trans_handle *trans,
504					 struct btrfs_delayed_ref_node *ref,
505					 u64 bytenr, u64 num_bytes, u64 parent,
506					 u64 ref_root, int level, int action,
507					 int for_cow)
 
508{
509	struct btrfs_delayed_ref_node *existing;
510	struct btrfs_delayed_tree_ref *full_ref;
511	struct btrfs_delayed_ref_root *delayed_refs;
512	u64 seq = 0;
 
513
514	if (action == BTRFS_ADD_DELAYED_EXTENT)
515		action = BTRFS_ADD_DELAYED_REF;
516
 
 
517	delayed_refs = &trans->transaction->delayed_refs;
518
519	/* first set the basic ref node struct up */
520	atomic_set(&ref->refs, 1);
521	ref->bytenr = bytenr;
522	ref->num_bytes = num_bytes;
523	ref->ref_mod = 1;
524	ref->action = action;
525	ref->is_head = 0;
526	ref->in_tree = 1;
527
528	if (is_fstree(ref_root))
529		seq = inc_delayed_seq(delayed_refs);
530	ref->seq = seq;
531
532	full_ref = btrfs_delayed_node_to_tree_ref(ref);
533	full_ref->parent = parent;
534	full_ref->root = ref_root;
535	if (parent)
536		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
537	else
538		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
539	full_ref->level = level;
540
541	trace_btrfs_delayed_tree_ref(ref, full_ref, action);
542
543	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
544
545	if (existing) {
546		update_existing_ref(trans, delayed_refs, existing, ref);
547		/*
548		 * we've updated the existing ref, free the newly
549		 * allocated ref
550		 */
551		kfree(full_ref);
552	} else {
553		delayed_refs->num_entries++;
554		trans->delayed_ref_updates++;
555	}
556}
557
558/*
559 * helper to insert a delayed data ref into the rbtree.
560 */
561static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
562					 struct btrfs_trans_handle *trans,
563					 struct btrfs_delayed_ref_node *ref,
564					 u64 bytenr, u64 num_bytes, u64 parent,
565					 u64 ref_root, u64 owner, u64 offset,
566					 int action, int for_cow)
 
567{
568	struct btrfs_delayed_ref_node *existing;
569	struct btrfs_delayed_data_ref *full_ref;
570	struct btrfs_delayed_ref_root *delayed_refs;
571	u64 seq = 0;
 
572
573	if (action == BTRFS_ADD_DELAYED_EXTENT)
574		action = BTRFS_ADD_DELAYED_REF;
575
576	delayed_refs = &trans->transaction->delayed_refs;
577
 
 
 
578	/* first set the basic ref node struct up */
579	atomic_set(&ref->refs, 1);
580	ref->bytenr = bytenr;
581	ref->num_bytes = num_bytes;
582	ref->ref_mod = 1;
583	ref->action = action;
584	ref->is_head = 0;
585	ref->in_tree = 1;
586
587	if (is_fstree(ref_root))
588		seq = inc_delayed_seq(delayed_refs);
589	ref->seq = seq;
590
591	full_ref = btrfs_delayed_node_to_data_ref(ref);
592	full_ref->parent = parent;
593	full_ref->root = ref_root;
594	if (parent)
595		ref->type = BTRFS_SHARED_DATA_REF_KEY;
596	else
597		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
598
599	full_ref->objectid = owner;
600	full_ref->offset = offset;
601
602	trace_btrfs_delayed_data_ref(ref, full_ref, action);
603
604	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
605
606	if (existing) {
607		update_existing_ref(trans, delayed_refs, existing, ref);
608		/*
609		 * we've updated the existing ref, free the newly
610		 * allocated ref
611		 */
612		kfree(full_ref);
613	} else {
614		delayed_refs->num_entries++;
615		trans->delayed_ref_updates++;
616	}
617}
618
619/*
620 * add a delayed tree ref.  This does all of the accounting required
621 * to make sure the delayed ref is eventually processed before this
622 * transaction commits.
623 */
624int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
625			       struct btrfs_trans_handle *trans,
626			       u64 bytenr, u64 num_bytes, u64 parent,
627			       u64 ref_root,  int level, int action,
628			       struct btrfs_delayed_extent_op *extent_op,
629			       int for_cow)
630{
631	struct btrfs_delayed_tree_ref *ref;
632	struct btrfs_delayed_ref_head *head_ref;
633	struct btrfs_delayed_ref_root *delayed_refs;
 
634
635	BUG_ON(extent_op && extent_op->is_data);
636	ref = kmalloc(sizeof(*ref), GFP_NOFS);
637	if (!ref)
638		return -ENOMEM;
639
640	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
641	if (!head_ref) {
642		kfree(ref);
643		return -ENOMEM;
 
 
 
 
644	}
645
646	head_ref->extent_op = extent_op;
647
648	delayed_refs = &trans->transaction->delayed_refs;
649	spin_lock(&delayed_refs->lock);
650
651	/*
652	 * insert both the head node and the new ref without dropping
653	 * the spin lock
654	 */
655	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
656				   num_bytes, action, 0);
657
658	add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
659				   num_bytes, parent, ref_root, level, action,
660				   for_cow);
661	if (!is_fstree(ref_root) &&
662	    waitqueue_active(&delayed_refs->seq_wait))
663		wake_up(&delayed_refs->seq_wait);
664	spin_unlock(&delayed_refs->lock);
665
666	return 0;
 
 
 
 
 
 
 
667}
668
669/*
670 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
671 */
672int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
673			       struct btrfs_trans_handle *trans,
674			       u64 bytenr, u64 num_bytes,
675			       u64 parent, u64 ref_root,
676			       u64 owner, u64 offset, int action,
677			       struct btrfs_delayed_extent_op *extent_op,
678			       int for_cow)
679{
680	struct btrfs_delayed_data_ref *ref;
681	struct btrfs_delayed_ref_head *head_ref;
682	struct btrfs_delayed_ref_root *delayed_refs;
 
683
684	BUG_ON(extent_op && !extent_op->is_data);
685	ref = kmalloc(sizeof(*ref), GFP_NOFS);
686	if (!ref)
687		return -ENOMEM;
688
689	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
690	if (!head_ref) {
691		kfree(ref);
692		return -ENOMEM;
693	}
694
 
 
 
 
 
 
 
 
 
 
695	head_ref->extent_op = extent_op;
696
697	delayed_refs = &trans->transaction->delayed_refs;
698	spin_lock(&delayed_refs->lock);
699
700	/*
701	 * insert both the head node and the new ref without dropping
702	 * the spin lock
703	 */
704	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
705				   num_bytes, action, 1);
 
706
707	add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
708				   num_bytes, parent, ref_root, owner, offset,
709				   action, for_cow);
710	if (!is_fstree(ref_root) &&
711	    waitqueue_active(&delayed_refs->seq_wait))
712		wake_up(&delayed_refs->seq_wait);
713	spin_unlock(&delayed_refs->lock);
714
715	return 0;
716}
717
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
718int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
719				struct btrfs_trans_handle *trans,
720				u64 bytenr, u64 num_bytes,
721				struct btrfs_delayed_extent_op *extent_op)
722{
723	struct btrfs_delayed_ref_head *head_ref;
724	struct btrfs_delayed_ref_root *delayed_refs;
725
726	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
727	if (!head_ref)
728		return -ENOMEM;
729
730	head_ref->extent_op = extent_op;
731
732	delayed_refs = &trans->transaction->delayed_refs;
733	spin_lock(&delayed_refs->lock);
734
735	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
736				   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
737				   extent_op->is_data);
738
739	if (waitqueue_active(&delayed_refs->seq_wait))
740		wake_up(&delayed_refs->seq_wait);
741	spin_unlock(&delayed_refs->lock);
742	return 0;
743}
744
745/*
746 * this does a simple search for the head node for a given extent.
747 * It must be called with the delayed ref spinlock held, and it returns
748 * the head node if any where found, or NULL if not.
749 */
750struct btrfs_delayed_ref_head *
751btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
752{
753	struct btrfs_delayed_ref_node *ref;
754	struct btrfs_delayed_ref_root *delayed_refs;
755
756	delayed_refs = &trans->transaction->delayed_refs;
757	ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
758	if (ref)
759		return btrfs_delayed_node_to_head(ref);
760	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
761}
v4.6
  1/*
  2 * Copyright (C) 2009 Oracle.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public
  6 * License v2 as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public
 14 * License along with this program; if not, write to the
 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16 * Boston, MA 021110-1307, USA.
 17 */
 18
 19#include <linux/sched.h>
 20#include <linux/slab.h>
 21#include <linux/sort.h>
 22#include "ctree.h"
 23#include "delayed-ref.h"
 24#include "transaction.h"
 25#include "qgroup.h"
 26
 27struct kmem_cache *btrfs_delayed_ref_head_cachep;
 28struct kmem_cache *btrfs_delayed_tree_ref_cachep;
 29struct kmem_cache *btrfs_delayed_data_ref_cachep;
 30struct kmem_cache *btrfs_delayed_extent_op_cachep;
 31/*
 32 * delayed back reference update tracking.  For subvolume trees
 33 * we queue up extent allocations and backref maintenance for
 34 * delayed processing.   This avoids deep call chains where we
 35 * add extents in the middle of btrfs_search_slot, and it allows
 36 * us to buffer up frequently modified backrefs in an rb tree instead
 37 * of hammering updates on the extent allocation tree.
 38 */
 39
 40/*
 41 * compare two delayed tree backrefs with same bytenr and type
 42 */
 43static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
 44			  struct btrfs_delayed_tree_ref *ref1, int type)
 45{
 46	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
 47		if (ref1->root < ref2->root)
 48			return -1;
 49		if (ref1->root > ref2->root)
 50			return 1;
 51	} else {
 52		if (ref1->parent < ref2->parent)
 53			return -1;
 54		if (ref1->parent > ref2->parent)
 55			return 1;
 56	}
 57	return 0;
 58}
 59
 60/*
 61 * compare two delayed data backrefs with same bytenr and type
 62 */
 63static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
 64			  struct btrfs_delayed_data_ref *ref1)
 65{
 66	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
 67		if (ref1->root < ref2->root)
 68			return -1;
 69		if (ref1->root > ref2->root)
 70			return 1;
 71		if (ref1->objectid < ref2->objectid)
 72			return -1;
 73		if (ref1->objectid > ref2->objectid)
 74			return 1;
 75		if (ref1->offset < ref2->offset)
 76			return -1;
 77		if (ref1->offset > ref2->offset)
 78			return 1;
 79	} else {
 80		if (ref1->parent < ref2->parent)
 81			return -1;
 82		if (ref1->parent > ref2->parent)
 83			return 1;
 84	}
 85	return 0;
 86}
 87
 88/* insert a new ref to head ref rbtree */
 89static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
 90						   struct rb_node *node)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91{
 92	struct rb_node **p = &root->rb_node;
 93	struct rb_node *parent_node = NULL;
 94	struct btrfs_delayed_ref_head *entry;
 95	struct btrfs_delayed_ref_head *ins;
 96	u64 bytenr;
 97
 98	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
 99	bytenr = ins->node.bytenr;
100	while (*p) {
101		parent_node = *p;
102		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
103				 href_node);
104
105		if (bytenr < entry->node.bytenr)
 
106			p = &(*p)->rb_left;
107		else if (bytenr > entry->node.bytenr)
108			p = &(*p)->rb_right;
109		else
110			return entry;
111	}
112
113	rb_link_node(node, parent_node, p);
114	rb_insert_color(node, root);
115	return NULL;
116}
117
118/*
119 * find an head entry based on bytenr. This returns the delayed ref
120 * head if it was able to find one, or NULL if nothing was in that spot.
121 * If return_bigger is given, the next bigger entry is returned if no exact
122 * match is found.
123 */
124static struct btrfs_delayed_ref_head *
125find_ref_head(struct rb_root *root, u64 bytenr,
126	      int return_bigger)
 
127{
128	struct rb_node *n;
129	struct btrfs_delayed_ref_head *entry;
 
130
 
131	n = root->rb_node;
132	entry = NULL;
133	while (n) {
134		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
 
 
 
 
 
 
 
 
 
 
 
 
135
136		if (bytenr < entry->node.bytenr)
137			n = n->rb_left;
138		else if (bytenr > entry->node.bytenr)
139			n = n->rb_right;
140		else
141			return entry;
142	}
143	if (entry && return_bigger) {
144		if (bytenr > entry->node.bytenr) {
145			n = rb_next(&entry->href_node);
146			if (!n)
147				n = rb_first(root);
148			entry = rb_entry(n, struct btrfs_delayed_ref_head,
149					 href_node);
150			return entry;
 
 
151		}
152		return entry;
153	}
154	return NULL;
155}
156
157int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158			   struct btrfs_delayed_ref_head *head)
159{
160	struct btrfs_delayed_ref_root *delayed_refs;
161
162	delayed_refs = &trans->transaction->delayed_refs;
163	assert_spin_locked(&delayed_refs->lock);
164	if (mutex_trylock(&head->mutex))
165		return 0;
166
167	atomic_inc(&head->node.refs);
168	spin_unlock(&delayed_refs->lock);
169
170	mutex_lock(&head->mutex);
171	spin_lock(&delayed_refs->lock);
172	if (!head->node.in_tree) {
173		mutex_unlock(&head->mutex);
174		btrfs_put_delayed_ref(&head->node);
175		return -EAGAIN;
176	}
177	btrfs_put_delayed_ref(&head->node);
178	return 0;
179}
180
181static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182				    struct btrfs_delayed_ref_root *delayed_refs,
183				    struct btrfs_delayed_ref_head *head,
184				    struct btrfs_delayed_ref_node *ref)
185{
186	if (btrfs_delayed_ref_is_head(ref)) {
187		head = btrfs_delayed_node_to_head(ref);
188		rb_erase(&head->href_node, &delayed_refs->href_root);
189	} else {
190		assert_spin_locked(&head->lock);
191		list_del(&ref->list);
192	}
193	ref->in_tree = 0;
194	btrfs_put_delayed_ref(ref);
195	atomic_dec(&delayed_refs->num_entries);
196	if (trans->delayed_ref_updates)
197		trans->delayed_ref_updates--;
198}
199
200static bool merge_ref(struct btrfs_trans_handle *trans,
201		      struct btrfs_delayed_ref_root *delayed_refs,
202		      struct btrfs_delayed_ref_head *head,
203		      struct btrfs_delayed_ref_node *ref,
204		      u64 seq)
205{
206	struct btrfs_delayed_ref_node *next;
207	bool done = false;
208
209	next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
210				list);
211	while (!done && &next->list != &head->ref_list) {
212		int mod;
213		struct btrfs_delayed_ref_node *next2;
214
215		next2 = list_next_entry(next, list);
216
217		if (next == ref)
218			goto next;
219
220		if (seq && next->seq >= seq)
221			goto next;
222
223		if (next->type != ref->type)
224			goto next;
225
226		if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
227		     ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
228		    comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
229				   btrfs_delayed_node_to_tree_ref(next),
230				   ref->type))
231			goto next;
232		if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
233		     ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
234		    comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
235				   btrfs_delayed_node_to_data_ref(next)))
236			goto next;
237
238		if (ref->action == next->action) {
239			mod = next->ref_mod;
240		} else {
241			if (ref->ref_mod < next->ref_mod) {
242				swap(ref, next);
243				done = true;
244			}
245			mod = -next->ref_mod;
246		}
247
248		drop_delayed_ref(trans, delayed_refs, head, next);
249		ref->ref_mod += mod;
250		if (ref->ref_mod == 0) {
251			drop_delayed_ref(trans, delayed_refs, head, ref);
252			done = true;
253		} else {
254			/*
255			 * Can't have multiples of the same ref on a tree block.
256			 */
257			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
258				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
259		}
260next:
261		next = next2;
262	}
263
264	return done;
265}
266
267void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
268			      struct btrfs_fs_info *fs_info,
269			      struct btrfs_delayed_ref_root *delayed_refs,
270			      struct btrfs_delayed_ref_head *head)
271{
272	struct btrfs_delayed_ref_node *ref;
273	u64 seq = 0;
274
275	assert_spin_locked(&head->lock);
276
277	if (list_empty(&head->ref_list))
278		return;
279
280	/* We don't have too many refs to merge for data. */
281	if (head->is_data)
282		return;
283
284	spin_lock(&fs_info->tree_mod_seq_lock);
285	if (!list_empty(&fs_info->tree_mod_seq_list)) {
286		struct seq_list *elem;
287
288		elem = list_first_entry(&fs_info->tree_mod_seq_list,
289					struct seq_list, list);
290		seq = elem->seq;
291	}
292	spin_unlock(&fs_info->tree_mod_seq_lock);
293
294	ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
295			       list);
296	while (&ref->list != &head->ref_list) {
297		if (seq && ref->seq >= seq)
298			goto next;
299
300		if (merge_ref(trans, delayed_refs, head, ref, seq)) {
301			if (list_empty(&head->ref_list))
302				break;
303			ref = list_first_entry(&head->ref_list,
304					       struct btrfs_delayed_ref_node,
305					       list);
306			continue;
307		}
308next:
309		ref = list_next_entry(ref, list);
310	}
311}
312
313int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
314			    struct btrfs_delayed_ref_root *delayed_refs,
315			    u64 seq)
316{
317	struct seq_list *elem;
318	int ret = 0;
319
320	spin_lock(&fs_info->tree_mod_seq_lock);
321	if (!list_empty(&fs_info->tree_mod_seq_list)) {
322		elem = list_first_entry(&fs_info->tree_mod_seq_list,
323					struct seq_list, list);
324		if (seq >= elem->seq) {
325			pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
326				 (u32)(seq >> 32), (u32)seq,
327				 (u32)(elem->seq >> 32), (u32)elem->seq,
328				 delayed_refs);
329			ret = 1;
330		}
331	}
332
333	spin_unlock(&fs_info->tree_mod_seq_lock);
334	return ret;
335}
336
337struct btrfs_delayed_ref_head *
338btrfs_select_ref_head(struct btrfs_trans_handle *trans)
339{
 
340	struct btrfs_delayed_ref_root *delayed_refs;
 
 
341	struct btrfs_delayed_ref_head *head;
342	u64 start;
343	bool loop = false;
344
345	delayed_refs = &trans->transaction->delayed_refs;
346
 
 
 
 
 
 
 
 
 
347again:
348	start = delayed_refs->run_delayed_start;
349	head = find_ref_head(&delayed_refs->href_root, start, 1);
350	if (!head && !loop) {
351		delayed_refs->run_delayed_start = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352		start = 0;
353		loop = true;
354		head = find_ref_head(&delayed_refs->href_root, start, 1);
355		if (!head)
356			return NULL;
357	} else if (!head && loop) {
358		return NULL;
359	}
360
361	while (head->processing) {
362		struct rb_node *node;
363
364		node = rb_next(&head->href_node);
365		if (!node) {
366			if (loop)
367				return NULL;
368			delayed_refs->run_delayed_start = 0;
369			start = 0;
370			loop = true;
371			goto again;
372		}
373		head = rb_entry(node, struct btrfs_delayed_ref_head,
374				href_node);
375	}
376
377	head->processing = 1;
378	WARN_ON(delayed_refs->num_heads_ready == 0);
379	delayed_refs->num_heads_ready--;
380	delayed_refs->run_delayed_start = head->node.bytenr +
381		head->node.num_bytes;
382	return head;
383}
384
385/*
386 * Helper to insert the ref_node to the tail or merge with tail.
 
 
387 *
388 * Return 0 for insert.
389 * Return >0 for merge.
390 */
391static int
392add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
393			   struct btrfs_delayed_ref_root *root,
394			   struct btrfs_delayed_ref_head *href,
395			   struct btrfs_delayed_ref_node *ref)
396{
397	struct btrfs_delayed_ref_node *exist;
398	int mod;
399	int ret = 0;
400
401	spin_lock(&href->lock);
402	/* Check whether we can merge the tail node with ref */
403	if (list_empty(&href->ref_list))
404		goto add_tail;
405	exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
406			   list);
407	/* No need to compare bytenr nor is_head */
408	if (exist->type != ref->type || exist->seq != ref->seq)
409		goto add_tail;
410
411	if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
412	     exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
413	    comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
414			   btrfs_delayed_node_to_tree_ref(ref),
415			   ref->type))
416		goto add_tail;
417	if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
418	     exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
419	    comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
420			   btrfs_delayed_node_to_data_ref(ref)))
421		goto add_tail;
422
423	/* Now we are sure we can merge */
424	ret = 1;
425	if (exist->action == ref->action) {
426		mod = ref->ref_mod;
427	} else {
428		/* Need to change action */
429		if (exist->ref_mod < ref->ref_mod) {
430			exist->action = ref->action;
431			mod = -exist->ref_mod;
432			exist->ref_mod = ref->ref_mod;
433		} else
434			mod = -ref->ref_mod;
 
 
 
435	}
436	exist->ref_mod += mod;
437
438	/* remove existing tail if its ref_mod is zero */
439	if (exist->ref_mod == 0)
440		drop_delayed_ref(trans, root, href, exist);
441	spin_unlock(&href->lock);
442	return ret;
443
444add_tail:
445	list_add_tail(&ref->list, &href->ref_list);
446	atomic_inc(&root->num_entries);
447	trans->delayed_ref_updates++;
448	spin_unlock(&href->lock);
449	return ret;
450}
451
452/*
453 * helper function to update the accounting in the head ref
454 * existing and update must have the same bytenr
455 */
456static noinline void
457update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
458			 struct btrfs_delayed_ref_node *existing,
459			 struct btrfs_delayed_ref_node *update)
460{
461	struct btrfs_delayed_ref_head *existing_ref;
462	struct btrfs_delayed_ref_head *ref;
463	int old_ref_mod;
464
465	existing_ref = btrfs_delayed_node_to_head(existing);
466	ref = btrfs_delayed_node_to_head(update);
467	BUG_ON(existing_ref->is_data != ref->is_data);
468
469	spin_lock(&existing_ref->lock);
470	if (ref->must_insert_reserved) {
471		/* if the extent was freed and then
472		 * reallocated before the delayed ref
473		 * entries were processed, we can end up
474		 * with an existing head ref without
475		 * the must_insert_reserved flag set.
476		 * Set it again here
477		 */
478		existing_ref->must_insert_reserved = ref->must_insert_reserved;
479
480		/*
481		 * update the num_bytes so we make sure the accounting
482		 * is done correctly
483		 */
484		existing->num_bytes = update->num_bytes;
485
486	}
487
488	if (ref->extent_op) {
489		if (!existing_ref->extent_op) {
490			existing_ref->extent_op = ref->extent_op;
491		} else {
492			if (ref->extent_op->update_key) {
493				memcpy(&existing_ref->extent_op->key,
494				       &ref->extent_op->key,
495				       sizeof(ref->extent_op->key));
496				existing_ref->extent_op->update_key = true;
497			}
498			if (ref->extent_op->update_flags) {
499				existing_ref->extent_op->flags_to_set |=
500					ref->extent_op->flags_to_set;
501				existing_ref->extent_op->update_flags = true;
502			}
503			btrfs_free_delayed_extent_op(ref->extent_op);
504		}
505	}
506	/*
507	 * update the reference mod on the head to reflect this new operation,
508	 * only need the lock for this case cause we could be processing it
509	 * currently, for refs we just added we know we're a-ok.
510	 */
511	old_ref_mod = existing_ref->total_ref_mod;
512	existing->ref_mod += update->ref_mod;
513	existing_ref->total_ref_mod += update->ref_mod;
514
515	/*
516	 * If we are going to from a positive ref mod to a negative or vice
517	 * versa we need to make sure to adjust pending_csums accordingly.
518	 */
519	if (existing_ref->is_data) {
520		if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
521			delayed_refs->pending_csums -= existing->num_bytes;
522		if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
523			delayed_refs->pending_csums += existing->num_bytes;
524	}
525	spin_unlock(&existing_ref->lock);
526}
527
528/*
529 * helper function to actually insert a head node into the rbtree.
530 * this does all the dirty work in terms of maintaining the correct
531 * overall modification count.
532 */
533static noinline struct btrfs_delayed_ref_head *
534add_delayed_ref_head(struct btrfs_fs_info *fs_info,
535		     struct btrfs_trans_handle *trans,
536		     struct btrfs_delayed_ref_node *ref,
537		     struct btrfs_qgroup_extent_record *qrecord,
538		     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
539		     int action, int is_data)
540{
541	struct btrfs_delayed_ref_head *existing;
542	struct btrfs_delayed_ref_head *head_ref = NULL;
543	struct btrfs_delayed_ref_root *delayed_refs;
544	struct btrfs_qgroup_extent_record *qexisting;
545	int count_mod = 1;
546	int must_insert_reserved = 0;
547
548	/* If reserved is provided, it must be a data extent. */
549	BUG_ON(!is_data && reserved);
550
551	/*
552	 * the head node stores the sum of all the mods, so dropping a ref
553	 * should drop the sum in the head node by one.
554	 */
555	if (action == BTRFS_UPDATE_DELAYED_HEAD)
556		count_mod = 0;
557	else if (action == BTRFS_DROP_DELAYED_REF)
558		count_mod = -1;
559
560	/*
561	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
562	 * the reserved accounting when the extent is finally added, or
563	 * if a later modification deletes the delayed ref without ever
564	 * inserting the extent into the extent allocation tree.
565	 * ref->must_insert_reserved is the flag used to record
566	 * that accounting mods are required.
567	 *
568	 * Once we record must_insert_reserved, switch the action to
569	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
570	 */
571	if (action == BTRFS_ADD_DELAYED_EXTENT)
572		must_insert_reserved = 1;
573	else
574		must_insert_reserved = 0;
575
576	delayed_refs = &trans->transaction->delayed_refs;
577
578	/* first set the basic ref node struct up */
579	atomic_set(&ref->refs, 1);
580	ref->bytenr = bytenr;
581	ref->num_bytes = num_bytes;
582	ref->ref_mod = count_mod;
583	ref->type  = 0;
584	ref->action  = 0;
585	ref->is_head = 1;
586	ref->in_tree = 1;
587	ref->seq = 0;
588
589	head_ref = btrfs_delayed_node_to_head(ref);
590	head_ref->must_insert_reserved = must_insert_reserved;
591	head_ref->is_data = is_data;
592	INIT_LIST_HEAD(&head_ref->ref_list);
593	head_ref->processing = 0;
594	head_ref->total_ref_mod = count_mod;
595	head_ref->qgroup_reserved = 0;
596	head_ref->qgroup_ref_root = 0;
597
598	/* Record qgroup extent info if provided */
599	if (qrecord) {
600		if (ref_root && reserved) {
601			head_ref->qgroup_ref_root = ref_root;
602			head_ref->qgroup_reserved = reserved;
603		}
604
605		qrecord->bytenr = bytenr;
606		qrecord->num_bytes = num_bytes;
607		qrecord->old_roots = NULL;
608
609		qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
610							     qrecord);
611		if (qexisting)
612			kfree(qrecord);
613	}
614
615	spin_lock_init(&head_ref->lock);
616	mutex_init(&head_ref->mutex);
617
618	trace_add_delayed_ref_head(ref, head_ref, action);
619
620	existing = htree_insert(&delayed_refs->href_root,
621				&head_ref->href_node);
622	if (existing) {
623		WARN_ON(ref_root && reserved && existing->qgroup_ref_root
624			&& existing->qgroup_reserved);
625		update_existing_head_ref(delayed_refs, &existing->node, ref);
626		/*
627		 * we've updated the existing ref, free the newly
628		 * allocated ref
629		 */
630		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
631		head_ref = existing;
632	} else {
633		if (is_data && count_mod < 0)
634			delayed_refs->pending_csums += num_bytes;
635		delayed_refs->num_heads++;
636		delayed_refs->num_heads_ready++;
637		atomic_inc(&delayed_refs->num_entries);
638		trans->delayed_ref_updates++;
639	}
640	return head_ref;
641}
642
643/*
644 * helper to insert a delayed tree ref into the rbtree.
645 */
646static noinline void
647add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
648		     struct btrfs_trans_handle *trans,
649		     struct btrfs_delayed_ref_head *head_ref,
650		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
651		     u64 num_bytes, u64 parent, u64 ref_root, int level,
652		     int action)
653{
 
654	struct btrfs_delayed_tree_ref *full_ref;
655	struct btrfs_delayed_ref_root *delayed_refs;
656	u64 seq = 0;
657	int ret;
658
659	if (action == BTRFS_ADD_DELAYED_EXTENT)
660		action = BTRFS_ADD_DELAYED_REF;
661
662	if (is_fstree(ref_root))
663		seq = atomic64_read(&fs_info->tree_mod_seq);
664	delayed_refs = &trans->transaction->delayed_refs;
665
666	/* first set the basic ref node struct up */
667	atomic_set(&ref->refs, 1);
668	ref->bytenr = bytenr;
669	ref->num_bytes = num_bytes;
670	ref->ref_mod = 1;
671	ref->action = action;
672	ref->is_head = 0;
673	ref->in_tree = 1;
 
 
 
674	ref->seq = seq;
675
676	full_ref = btrfs_delayed_node_to_tree_ref(ref);
677	full_ref->parent = parent;
678	full_ref->root = ref_root;
679	if (parent)
680		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
681	else
682		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
683	full_ref->level = level;
684
685	trace_add_delayed_tree_ref(ref, full_ref, action);
686
687	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
688
689	/*
690	 * XXX: memory should be freed at the same level allocated.
691	 * But bad practice is anywhere... Follow it now. Need cleanup.
692	 */
693	if (ret > 0)
694		kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
 
 
 
 
 
695}
696
697/*
698 * helper to insert a delayed data ref into the rbtree.
699 */
700static noinline void
701add_delayed_data_ref(struct btrfs_fs_info *fs_info,
702		     struct btrfs_trans_handle *trans,
703		     struct btrfs_delayed_ref_head *head_ref,
704		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
705		     u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
706		     u64 offset, int action)
707{
 
708	struct btrfs_delayed_data_ref *full_ref;
709	struct btrfs_delayed_ref_root *delayed_refs;
710	u64 seq = 0;
711	int ret;
712
713	if (action == BTRFS_ADD_DELAYED_EXTENT)
714		action = BTRFS_ADD_DELAYED_REF;
715
716	delayed_refs = &trans->transaction->delayed_refs;
717
718	if (is_fstree(ref_root))
719		seq = atomic64_read(&fs_info->tree_mod_seq);
720
721	/* first set the basic ref node struct up */
722	atomic_set(&ref->refs, 1);
723	ref->bytenr = bytenr;
724	ref->num_bytes = num_bytes;
725	ref->ref_mod = 1;
726	ref->action = action;
727	ref->is_head = 0;
728	ref->in_tree = 1;
 
 
 
729	ref->seq = seq;
730
731	full_ref = btrfs_delayed_node_to_data_ref(ref);
732	full_ref->parent = parent;
733	full_ref->root = ref_root;
734	if (parent)
735		ref->type = BTRFS_SHARED_DATA_REF_KEY;
736	else
737		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
738
739	full_ref->objectid = owner;
740	full_ref->offset = offset;
741
742	trace_add_delayed_data_ref(ref, full_ref, action);
743
744	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
745
746	if (ret > 0)
747		kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
 
 
 
 
 
 
 
 
 
748}
749
750/*
751 * add a delayed tree ref.  This does all of the accounting required
752 * to make sure the delayed ref is eventually processed before this
753 * transaction commits.
754 */
755int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
756			       struct btrfs_trans_handle *trans,
757			       u64 bytenr, u64 num_bytes, u64 parent,
758			       u64 ref_root,  int level, int action,
759			       struct btrfs_delayed_extent_op *extent_op)
 
760{
761	struct btrfs_delayed_tree_ref *ref;
762	struct btrfs_delayed_ref_head *head_ref;
763	struct btrfs_delayed_ref_root *delayed_refs;
764	struct btrfs_qgroup_extent_record *record = NULL;
765
766	BUG_ON(extent_op && extent_op->is_data);
767	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
768	if (!ref)
769		return -ENOMEM;
770
771	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
772	if (!head_ref)
773		goto free_ref;
774
775	if (fs_info->quota_enabled && is_fstree(ref_root)) {
776		record = kmalloc(sizeof(*record), GFP_NOFS);
777		if (!record)
778			goto free_head_ref;
779	}
780
781	head_ref->extent_op = extent_op;
782
783	delayed_refs = &trans->transaction->delayed_refs;
784	spin_lock(&delayed_refs->lock);
785
786	/*
787	 * insert both the head node and the new ref without dropping
788	 * the spin lock
789	 */
790	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
791					bytenr, num_bytes, 0, 0, action, 0);
792
793	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
794			     num_bytes, parent, ref_root, level, action);
 
 
 
 
795	spin_unlock(&delayed_refs->lock);
796
797	return 0;
798
799free_head_ref:
800	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
801free_ref:
802	kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
803
804	return -ENOMEM;
805}
806
807/*
808 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
809 */
810int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
811			       struct btrfs_trans_handle *trans,
812			       u64 bytenr, u64 num_bytes,
813			       u64 parent, u64 ref_root,
814			       u64 owner, u64 offset, u64 reserved, int action,
815			       struct btrfs_delayed_extent_op *extent_op)
 
816{
817	struct btrfs_delayed_data_ref *ref;
818	struct btrfs_delayed_ref_head *head_ref;
819	struct btrfs_delayed_ref_root *delayed_refs;
820	struct btrfs_qgroup_extent_record *record = NULL;
821
822	BUG_ON(extent_op && !extent_op->is_data);
823	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
824	if (!ref)
825		return -ENOMEM;
826
827	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
828	if (!head_ref) {
829		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
830		return -ENOMEM;
831	}
832
833	if (fs_info->quota_enabled && is_fstree(ref_root)) {
834		record = kmalloc(sizeof(*record), GFP_NOFS);
835		if (!record) {
836			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
837			kmem_cache_free(btrfs_delayed_ref_head_cachep,
838					head_ref);
839			return -ENOMEM;
840		}
841	}
842
843	head_ref->extent_op = extent_op;
844
845	delayed_refs = &trans->transaction->delayed_refs;
846	spin_lock(&delayed_refs->lock);
847
848	/*
849	 * insert both the head node and the new ref without dropping
850	 * the spin lock
851	 */
852	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
853					bytenr, num_bytes, ref_root, reserved,
854					action, 1);
855
856	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
857				   num_bytes, parent, ref_root, owner, offset,
858				   action);
 
 
 
859	spin_unlock(&delayed_refs->lock);
860
861	return 0;
862}
863
864int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
865				     struct btrfs_trans_handle *trans,
866				     u64 ref_root, u64 bytenr, u64 num_bytes)
867{
868	struct btrfs_delayed_ref_root *delayed_refs;
869	struct btrfs_delayed_ref_head *ref_head;
870	int ret = 0;
871
872	if (!fs_info->quota_enabled || !is_fstree(ref_root))
873		return 0;
874
875	delayed_refs = &trans->transaction->delayed_refs;
876
877	spin_lock(&delayed_refs->lock);
878	ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
879	if (!ref_head) {
880		ret = -ENOENT;
881		goto out;
882	}
883	WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
884	ref_head->qgroup_ref_root = ref_root;
885	ref_head->qgroup_reserved = num_bytes;
886out:
887	spin_unlock(&delayed_refs->lock);
888	return ret;
889}
890
891int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
892				struct btrfs_trans_handle *trans,
893				u64 bytenr, u64 num_bytes,
894				struct btrfs_delayed_extent_op *extent_op)
895{
896	struct btrfs_delayed_ref_head *head_ref;
897	struct btrfs_delayed_ref_root *delayed_refs;
898
899	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
900	if (!head_ref)
901		return -ENOMEM;
902
903	head_ref->extent_op = extent_op;
904
905	delayed_refs = &trans->transaction->delayed_refs;
906	spin_lock(&delayed_refs->lock);
907
908	add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
909			     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
910			     extent_op->is_data);
911
 
 
912	spin_unlock(&delayed_refs->lock);
913	return 0;
914}
915
916/*
917 * this does a simple search for the head node for a given extent.
918 * It must be called with the delayed ref spinlock held, and it returns
919 * the head node if any where found, or NULL if not.
920 */
921struct btrfs_delayed_ref_head *
922btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
923{
 
924	struct btrfs_delayed_ref_root *delayed_refs;
925
926	delayed_refs = &trans->transaction->delayed_refs;
927	return find_ref_head(&delayed_refs->href_root, bytenr, 0);
928}
929
930void btrfs_delayed_ref_exit(void)
931{
932	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
933	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
934	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
935	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
936}
937
938int btrfs_delayed_ref_init(void)
939{
940	btrfs_delayed_ref_head_cachep = kmem_cache_create(
941				"btrfs_delayed_ref_head",
942				sizeof(struct btrfs_delayed_ref_head), 0,
943				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
944	if (!btrfs_delayed_ref_head_cachep)
945		goto fail;
946
947	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
948				"btrfs_delayed_tree_ref",
949				sizeof(struct btrfs_delayed_tree_ref), 0,
950				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
951	if (!btrfs_delayed_tree_ref_cachep)
952		goto fail;
953
954	btrfs_delayed_data_ref_cachep = kmem_cache_create(
955				"btrfs_delayed_data_ref",
956				sizeof(struct btrfs_delayed_data_ref), 0,
957				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
958	if (!btrfs_delayed_data_ref_cachep)
959		goto fail;
960
961	btrfs_delayed_extent_op_cachep = kmem_cache_create(
962				"btrfs_delayed_extent_op",
963				sizeof(struct btrfs_delayed_extent_op), 0,
964				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
965	if (!btrfs_delayed_extent_op_cachep)
966		goto fail;
967
968	return 0;
969fail:
970	btrfs_delayed_ref_exit();
971	return -ENOMEM;
972}