Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 * Copyright (C) 2009 Oracle.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public
  6 * License v2 as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public
 14 * License along with this program; if not, write to the
 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16 * Boston, MA 021110-1307, USA.
 17 */
 18
 19#include <linux/sched.h>
 20#include <linux/slab.h>
 21#include <linux/sort.h>
 22#include "ctree.h"
 23#include "delayed-ref.h"
 24#include "transaction.h"
 25#include "qgroup.h"
 26
 27struct kmem_cache *btrfs_delayed_ref_head_cachep;
 28struct kmem_cache *btrfs_delayed_tree_ref_cachep;
 29struct kmem_cache *btrfs_delayed_data_ref_cachep;
 30struct kmem_cache *btrfs_delayed_extent_op_cachep;
 31/*
 32 * delayed back reference update tracking.  For subvolume trees
 33 * we queue up extent allocations and backref maintenance for
 34 * delayed processing.   This avoids deep call chains where we
 35 * add extents in the middle of btrfs_search_slot, and it allows
 36 * us to buffer up frequently modified backrefs in an rb tree instead
 37 * of hammering updates on the extent allocation tree.
 38 */
 39
 40/*
 41 * compare two delayed tree backrefs with same bytenr and type
 42 */
 43static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
 44			  struct btrfs_delayed_tree_ref *ref1, int type)
 45{
 46	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
 47		if (ref1->root < ref2->root)
 48			return -1;
 49		if (ref1->root > ref2->root)
 50			return 1;
 51	} else {
 52		if (ref1->parent < ref2->parent)
 53			return -1;
 54		if (ref1->parent > ref2->parent)
 55			return 1;
 56	}
 57	return 0;
 58}
 59
 60/*
 61 * compare two delayed data backrefs with same bytenr and type
 62 */
 63static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
 64			  struct btrfs_delayed_data_ref *ref1)
 65{
 66	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
 67		if (ref1->root < ref2->root)
 68			return -1;
 69		if (ref1->root > ref2->root)
 70			return 1;
 71		if (ref1->objectid < ref2->objectid)
 72			return -1;
 73		if (ref1->objectid > ref2->objectid)
 74			return 1;
 75		if (ref1->offset < ref2->offset)
 76			return -1;
 77		if (ref1->offset > ref2->offset)
 78			return 1;
 79	} else {
 80		if (ref1->parent < ref2->parent)
 81			return -1;
 82		if (ref1->parent > ref2->parent)
 83			return 1;
 84	}
 85	return 0;
 86}
 87
 88/* insert a new ref to head ref rbtree */
 89static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
 90						   struct rb_node *node)
 91{
 92	struct rb_node **p = &root->rb_node;
 93	struct rb_node *parent_node = NULL;
 94	struct btrfs_delayed_ref_head *entry;
 95	struct btrfs_delayed_ref_head *ins;
 96	u64 bytenr;
 97
 98	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
 99	bytenr = ins->node.bytenr;
100	while (*p) {
101		parent_node = *p;
102		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
103				 href_node);
104
105		if (bytenr < entry->node.bytenr)
106			p = &(*p)->rb_left;
107		else if (bytenr > entry->node.bytenr)
108			p = &(*p)->rb_right;
109		else
110			return entry;
111	}
112
113	rb_link_node(node, parent_node, p);
114	rb_insert_color(node, root);
115	return NULL;
116}
117
118/*
119 * find an head entry based on bytenr. This returns the delayed ref
120 * head if it was able to find one, or NULL if nothing was in that spot.
121 * If return_bigger is given, the next bigger entry is returned if no exact
122 * match is found.
123 */
124static struct btrfs_delayed_ref_head *
125find_ref_head(struct rb_root *root, u64 bytenr,
126	      int return_bigger)
127{
128	struct rb_node *n;
129	struct btrfs_delayed_ref_head *entry;
130
131	n = root->rb_node;
132	entry = NULL;
133	while (n) {
134		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
135
136		if (bytenr < entry->node.bytenr)
137			n = n->rb_left;
138		else if (bytenr > entry->node.bytenr)
139			n = n->rb_right;
140		else
141			return entry;
142	}
143	if (entry && return_bigger) {
144		if (bytenr > entry->node.bytenr) {
145			n = rb_next(&entry->href_node);
146			if (!n)
147				n = rb_first(root);
148			entry = rb_entry(n, struct btrfs_delayed_ref_head,
149					 href_node);
150			return entry;
151		}
152		return entry;
153	}
154	return NULL;
155}
156
157int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158			   struct btrfs_delayed_ref_head *head)
159{
160	struct btrfs_delayed_ref_root *delayed_refs;
161
162	delayed_refs = &trans->transaction->delayed_refs;
163	assert_spin_locked(&delayed_refs->lock);
164	if (mutex_trylock(&head->mutex))
165		return 0;
166
167	atomic_inc(&head->node.refs);
168	spin_unlock(&delayed_refs->lock);
169
170	mutex_lock(&head->mutex);
171	spin_lock(&delayed_refs->lock);
172	if (!head->node.in_tree) {
173		mutex_unlock(&head->mutex);
174		btrfs_put_delayed_ref(&head->node);
175		return -EAGAIN;
176	}
177	btrfs_put_delayed_ref(&head->node);
178	return 0;
179}
180
181static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182				    struct btrfs_delayed_ref_root *delayed_refs,
183				    struct btrfs_delayed_ref_head *head,
184				    struct btrfs_delayed_ref_node *ref)
185{
186	if (btrfs_delayed_ref_is_head(ref)) {
187		head = btrfs_delayed_node_to_head(ref);
188		rb_erase(&head->href_node, &delayed_refs->href_root);
189	} else {
190		assert_spin_locked(&head->lock);
191		list_del(&ref->list);
192		if (!list_empty(&ref->add_list))
193			list_del(&ref->add_list);
194	}
195	ref->in_tree = 0;
196	btrfs_put_delayed_ref(ref);
197	atomic_dec(&delayed_refs->num_entries);
198	if (trans->delayed_ref_updates)
199		trans->delayed_ref_updates--;
200}
201
202static bool merge_ref(struct btrfs_trans_handle *trans,
203		      struct btrfs_delayed_ref_root *delayed_refs,
204		      struct btrfs_delayed_ref_head *head,
205		      struct btrfs_delayed_ref_node *ref,
206		      u64 seq)
207{
208	struct btrfs_delayed_ref_node *next;
209	bool done = false;
210
211	next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
212				list);
213	while (!done && &next->list != &head->ref_list) {
214		int mod;
215		struct btrfs_delayed_ref_node *next2;
216
217		next2 = list_next_entry(next, list);
218
219		if (next == ref)
220			goto next;
221
222		if (seq && next->seq >= seq)
223			goto next;
224
225		if (next->type != ref->type)
226			goto next;
227
228		if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
229		     ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
230		    comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
231				   btrfs_delayed_node_to_tree_ref(next),
232				   ref->type))
233			goto next;
234		if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
235		     ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
236		    comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
237				   btrfs_delayed_node_to_data_ref(next)))
238			goto next;
239
240		if (ref->action == next->action) {
241			mod = next->ref_mod;
242		} else {
243			if (ref->ref_mod < next->ref_mod) {
244				swap(ref, next);
245				done = true;
246			}
247			mod = -next->ref_mod;
248		}
249
250		drop_delayed_ref(trans, delayed_refs, head, next);
251		ref->ref_mod += mod;
252		if (ref->ref_mod == 0) {
253			drop_delayed_ref(trans, delayed_refs, head, ref);
254			done = true;
255		} else {
256			/*
257			 * Can't have multiples of the same ref on a tree block.
258			 */
259			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
260				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
261		}
262next:
263		next = next2;
264	}
265
266	return done;
267}
268
269void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
270			      struct btrfs_fs_info *fs_info,
271			      struct btrfs_delayed_ref_root *delayed_refs,
272			      struct btrfs_delayed_ref_head *head)
273{
274	struct btrfs_delayed_ref_node *ref;
275	u64 seq = 0;
276
277	assert_spin_locked(&head->lock);
278
279	if (list_empty(&head->ref_list))
280		return;
281
282	/* We don't have too many refs to merge for data. */
283	if (head->is_data)
284		return;
285
286	spin_lock(&fs_info->tree_mod_seq_lock);
287	if (!list_empty(&fs_info->tree_mod_seq_list)) {
288		struct seq_list *elem;
289
290		elem = list_first_entry(&fs_info->tree_mod_seq_list,
291					struct seq_list, list);
292		seq = elem->seq;
293	}
294	spin_unlock(&fs_info->tree_mod_seq_lock);
295
296	ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
297			       list);
298	while (&ref->list != &head->ref_list) {
299		if (seq && ref->seq >= seq)
300			goto next;
301
302		if (merge_ref(trans, delayed_refs, head, ref, seq)) {
303			if (list_empty(&head->ref_list))
304				break;
305			ref = list_first_entry(&head->ref_list,
306					       struct btrfs_delayed_ref_node,
307					       list);
308			continue;
309		}
310next:
311		ref = list_next_entry(ref, list);
312	}
313}
314
315int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
316			    struct btrfs_delayed_ref_root *delayed_refs,
317			    u64 seq)
318{
319	struct seq_list *elem;
320	int ret = 0;
321
322	spin_lock(&fs_info->tree_mod_seq_lock);
323	if (!list_empty(&fs_info->tree_mod_seq_list)) {
324		elem = list_first_entry(&fs_info->tree_mod_seq_list,
325					struct seq_list, list);
326		if (seq >= elem->seq) {
327			btrfs_debug(fs_info,
328				"holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
329				(u32)(seq >> 32), (u32)seq,
330				(u32)(elem->seq >> 32), (u32)elem->seq,
331				delayed_refs);
332			ret = 1;
333		}
334	}
335
336	spin_unlock(&fs_info->tree_mod_seq_lock);
337	return ret;
338}
339
340struct btrfs_delayed_ref_head *
341btrfs_select_ref_head(struct btrfs_trans_handle *trans)
342{
343	struct btrfs_delayed_ref_root *delayed_refs;
344	struct btrfs_delayed_ref_head *head;
345	u64 start;
346	bool loop = false;
347
348	delayed_refs = &trans->transaction->delayed_refs;
349
350again:
351	start = delayed_refs->run_delayed_start;
352	head = find_ref_head(&delayed_refs->href_root, start, 1);
353	if (!head && !loop) {
354		delayed_refs->run_delayed_start = 0;
355		start = 0;
356		loop = true;
357		head = find_ref_head(&delayed_refs->href_root, start, 1);
358		if (!head)
359			return NULL;
360	} else if (!head && loop) {
361		return NULL;
362	}
363
364	while (head->processing) {
365		struct rb_node *node;
366
367		node = rb_next(&head->href_node);
368		if (!node) {
369			if (loop)
370				return NULL;
371			delayed_refs->run_delayed_start = 0;
372			start = 0;
373			loop = true;
374			goto again;
375		}
376		head = rb_entry(node, struct btrfs_delayed_ref_head,
377				href_node);
378	}
379
380	head->processing = 1;
381	WARN_ON(delayed_refs->num_heads_ready == 0);
382	delayed_refs->num_heads_ready--;
383	delayed_refs->run_delayed_start = head->node.bytenr +
384		head->node.num_bytes;
385	return head;
386}
387
388/*
389 * Helper to insert the ref_node to the tail or merge with tail.
390 *
391 * Return 0 for insert.
392 * Return >0 for merge.
393 */
394static int
395add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
396			   struct btrfs_delayed_ref_root *root,
397			   struct btrfs_delayed_ref_head *href,
398			   struct btrfs_delayed_ref_node *ref)
399{
400	struct btrfs_delayed_ref_node *exist;
401	int mod;
402	int ret = 0;
403
404	spin_lock(&href->lock);
405	/* Check whether we can merge the tail node with ref */
406	if (list_empty(&href->ref_list))
407		goto add_tail;
408	exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
409			   list);
410	/* No need to compare bytenr nor is_head */
411	if (exist->type != ref->type || exist->seq != ref->seq)
412		goto add_tail;
413
414	if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
415	     exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
416	    comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
417			   btrfs_delayed_node_to_tree_ref(ref),
418			   ref->type))
419		goto add_tail;
420	if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
421	     exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
422	    comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
423			   btrfs_delayed_node_to_data_ref(ref)))
424		goto add_tail;
425
426	/* Now we are sure we can merge */
427	ret = 1;
428	if (exist->action == ref->action) {
429		mod = ref->ref_mod;
430	} else {
431		/* Need to change action */
432		if (exist->ref_mod < ref->ref_mod) {
433			exist->action = ref->action;
434			mod = -exist->ref_mod;
435			exist->ref_mod = ref->ref_mod;
436			if (ref->action == BTRFS_ADD_DELAYED_REF)
437				list_add_tail(&exist->add_list,
438					      &href->ref_add_list);
439			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
440				ASSERT(!list_empty(&exist->add_list));
441				list_del(&exist->add_list);
442			} else {
443				ASSERT(0);
444			}
445		} else
446			mod = -ref->ref_mod;
447	}
448	exist->ref_mod += mod;
449
450	/* remove existing tail if its ref_mod is zero */
451	if (exist->ref_mod == 0)
452		drop_delayed_ref(trans, root, href, exist);
453	spin_unlock(&href->lock);
454	return ret;
455
456add_tail:
457	list_add_tail(&ref->list, &href->ref_list);
458	if (ref->action == BTRFS_ADD_DELAYED_REF)
459		list_add_tail(&ref->add_list, &href->ref_add_list);
460	atomic_inc(&root->num_entries);
461	trans->delayed_ref_updates++;
462	spin_unlock(&href->lock);
463	return ret;
464}
465
466/*
467 * helper function to update the accounting in the head ref
468 * existing and update must have the same bytenr
469 */
470static noinline void
471update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
472			 struct btrfs_delayed_ref_node *existing,
473			 struct btrfs_delayed_ref_node *update)
474{
475	struct btrfs_delayed_ref_head *existing_ref;
476	struct btrfs_delayed_ref_head *ref;
477	int old_ref_mod;
478
479	existing_ref = btrfs_delayed_node_to_head(existing);
480	ref = btrfs_delayed_node_to_head(update);
481	BUG_ON(existing_ref->is_data != ref->is_data);
482
483	spin_lock(&existing_ref->lock);
484	if (ref->must_insert_reserved) {
485		/* if the extent was freed and then
486		 * reallocated before the delayed ref
487		 * entries were processed, we can end up
488		 * with an existing head ref without
489		 * the must_insert_reserved flag set.
490		 * Set it again here
491		 */
492		existing_ref->must_insert_reserved = ref->must_insert_reserved;
493
494		/*
495		 * update the num_bytes so we make sure the accounting
496		 * is done correctly
497		 */
498		existing->num_bytes = update->num_bytes;
499
500	}
501
502	if (ref->extent_op) {
503		if (!existing_ref->extent_op) {
504			existing_ref->extent_op = ref->extent_op;
505		} else {
506			if (ref->extent_op->update_key) {
507				memcpy(&existing_ref->extent_op->key,
508				       &ref->extent_op->key,
509				       sizeof(ref->extent_op->key));
510				existing_ref->extent_op->update_key = true;
511			}
512			if (ref->extent_op->update_flags) {
513				existing_ref->extent_op->flags_to_set |=
514					ref->extent_op->flags_to_set;
515				existing_ref->extent_op->update_flags = true;
516			}
517			btrfs_free_delayed_extent_op(ref->extent_op);
518		}
519	}
520	/*
521	 * update the reference mod on the head to reflect this new operation,
522	 * only need the lock for this case cause we could be processing it
523	 * currently, for refs we just added we know we're a-ok.
524	 */
525	old_ref_mod = existing_ref->total_ref_mod;
526	existing->ref_mod += update->ref_mod;
527	existing_ref->total_ref_mod += update->ref_mod;
528
529	/*
530	 * If we are going to from a positive ref mod to a negative or vice
531	 * versa we need to make sure to adjust pending_csums accordingly.
532	 */
533	if (existing_ref->is_data) {
534		if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
535			delayed_refs->pending_csums -= existing->num_bytes;
536		if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
537			delayed_refs->pending_csums += existing->num_bytes;
538	}
539	spin_unlock(&existing_ref->lock);
540}
541
542/*
543 * helper function to actually insert a head node into the rbtree.
544 * this does all the dirty work in terms of maintaining the correct
545 * overall modification count.
546 */
547static noinline struct btrfs_delayed_ref_head *
548add_delayed_ref_head(struct btrfs_fs_info *fs_info,
549		     struct btrfs_trans_handle *trans,
550		     struct btrfs_delayed_ref_node *ref,
551		     struct btrfs_qgroup_extent_record *qrecord,
552		     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
553		     int action, int is_data)
554{
555	struct btrfs_delayed_ref_head *existing;
556	struct btrfs_delayed_ref_head *head_ref = NULL;
557	struct btrfs_delayed_ref_root *delayed_refs;
 
558	int count_mod = 1;
559	int must_insert_reserved = 0;
560
561	/* If reserved is provided, it must be a data extent. */
562	BUG_ON(!is_data && reserved);
563
564	/*
565	 * the head node stores the sum of all the mods, so dropping a ref
566	 * should drop the sum in the head node by one.
567	 */
568	if (action == BTRFS_UPDATE_DELAYED_HEAD)
569		count_mod = 0;
570	else if (action == BTRFS_DROP_DELAYED_REF)
571		count_mod = -1;
572
573	/*
574	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
575	 * the reserved accounting when the extent is finally added, or
576	 * if a later modification deletes the delayed ref without ever
577	 * inserting the extent into the extent allocation tree.
578	 * ref->must_insert_reserved is the flag used to record
579	 * that accounting mods are required.
580	 *
581	 * Once we record must_insert_reserved, switch the action to
582	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
583	 */
584	if (action == BTRFS_ADD_DELAYED_EXTENT)
585		must_insert_reserved = 1;
586	else
587		must_insert_reserved = 0;
588
589	delayed_refs = &trans->transaction->delayed_refs;
590
591	/* first set the basic ref node struct up */
592	atomic_set(&ref->refs, 1);
593	ref->bytenr = bytenr;
594	ref->num_bytes = num_bytes;
595	ref->ref_mod = count_mod;
596	ref->type  = 0;
597	ref->action  = 0;
598	ref->is_head = 1;
599	ref->in_tree = 1;
600	ref->seq = 0;
601
602	head_ref = btrfs_delayed_node_to_head(ref);
603	head_ref->must_insert_reserved = must_insert_reserved;
604	head_ref->is_data = is_data;
605	INIT_LIST_HEAD(&head_ref->ref_list);
606	INIT_LIST_HEAD(&head_ref->ref_add_list);
607	head_ref->processing = 0;
608	head_ref->total_ref_mod = count_mod;
609	head_ref->qgroup_reserved = 0;
610	head_ref->qgroup_ref_root = 0;
611
612	/* Record qgroup extent info if provided */
613	if (qrecord) {
614		if (ref_root && reserved) {
615			head_ref->qgroup_ref_root = ref_root;
616			head_ref->qgroup_reserved = reserved;
617		}
618
619		qrecord->bytenr = bytenr;
620		qrecord->num_bytes = num_bytes;
621		qrecord->old_roots = NULL;
622
623		if(btrfs_qgroup_trace_extent_nolock(fs_info,
624					delayed_refs, qrecord))
 
625			kfree(qrecord);
626	}
627
628	spin_lock_init(&head_ref->lock);
629	mutex_init(&head_ref->mutex);
630
631	trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
632
633	existing = htree_insert(&delayed_refs->href_root,
634				&head_ref->href_node);
635	if (existing) {
636		WARN_ON(ref_root && reserved && existing->qgroup_ref_root
637			&& existing->qgroup_reserved);
638		update_existing_head_ref(delayed_refs, &existing->node, ref);
639		/*
640		 * we've updated the existing ref, free the newly
641		 * allocated ref
642		 */
643		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
644		head_ref = existing;
645	} else {
646		if (is_data && count_mod < 0)
647			delayed_refs->pending_csums += num_bytes;
648		delayed_refs->num_heads++;
649		delayed_refs->num_heads_ready++;
650		atomic_inc(&delayed_refs->num_entries);
651		trans->delayed_ref_updates++;
652	}
653	return head_ref;
654}
655
656/*
657 * helper to insert a delayed tree ref into the rbtree.
658 */
659static noinline void
660add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
661		     struct btrfs_trans_handle *trans,
662		     struct btrfs_delayed_ref_head *head_ref,
663		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
664		     u64 num_bytes, u64 parent, u64 ref_root, int level,
665		     int action)
666{
667	struct btrfs_delayed_tree_ref *full_ref;
668	struct btrfs_delayed_ref_root *delayed_refs;
669	u64 seq = 0;
670	int ret;
671
672	if (action == BTRFS_ADD_DELAYED_EXTENT)
673		action = BTRFS_ADD_DELAYED_REF;
674
675	if (is_fstree(ref_root))
676		seq = atomic64_read(&fs_info->tree_mod_seq);
677	delayed_refs = &trans->transaction->delayed_refs;
678
679	/* first set the basic ref node struct up */
680	atomic_set(&ref->refs, 1);
681	ref->bytenr = bytenr;
682	ref->num_bytes = num_bytes;
683	ref->ref_mod = 1;
684	ref->action = action;
685	ref->is_head = 0;
686	ref->in_tree = 1;
687	ref->seq = seq;
688	INIT_LIST_HEAD(&ref->list);
689	INIT_LIST_HEAD(&ref->add_list);
690
691	full_ref = btrfs_delayed_node_to_tree_ref(ref);
692	full_ref->parent = parent;
693	full_ref->root = ref_root;
694	if (parent)
695		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
696	else
697		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
698	full_ref->level = level;
699
700	trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
701
702	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
703
704	/*
705	 * XXX: memory should be freed at the same level allocated.
706	 * But bad practice is anywhere... Follow it now. Need cleanup.
707	 */
708	if (ret > 0)
709		kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
710}
711
712/*
713 * helper to insert a delayed data ref into the rbtree.
714 */
715static noinline void
716add_delayed_data_ref(struct btrfs_fs_info *fs_info,
717		     struct btrfs_trans_handle *trans,
718		     struct btrfs_delayed_ref_head *head_ref,
719		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
720		     u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
721		     u64 offset, int action)
722{
723	struct btrfs_delayed_data_ref *full_ref;
724	struct btrfs_delayed_ref_root *delayed_refs;
725	u64 seq = 0;
726	int ret;
727
728	if (action == BTRFS_ADD_DELAYED_EXTENT)
729		action = BTRFS_ADD_DELAYED_REF;
730
731	delayed_refs = &trans->transaction->delayed_refs;
732
733	if (is_fstree(ref_root))
734		seq = atomic64_read(&fs_info->tree_mod_seq);
735
736	/* first set the basic ref node struct up */
737	atomic_set(&ref->refs, 1);
738	ref->bytenr = bytenr;
739	ref->num_bytes = num_bytes;
740	ref->ref_mod = 1;
741	ref->action = action;
742	ref->is_head = 0;
743	ref->in_tree = 1;
744	ref->seq = seq;
745	INIT_LIST_HEAD(&ref->list);
746	INIT_LIST_HEAD(&ref->add_list);
747
748	full_ref = btrfs_delayed_node_to_data_ref(ref);
749	full_ref->parent = parent;
750	full_ref->root = ref_root;
751	if (parent)
752		ref->type = BTRFS_SHARED_DATA_REF_KEY;
753	else
754		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
755
756	full_ref->objectid = owner;
757	full_ref->offset = offset;
758
759	trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
760
761	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
762
763	if (ret > 0)
764		kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
765}
766
767/*
768 * add a delayed tree ref.  This does all of the accounting required
769 * to make sure the delayed ref is eventually processed before this
770 * transaction commits.
771 */
772int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
773			       struct btrfs_trans_handle *trans,
774			       u64 bytenr, u64 num_bytes, u64 parent,
775			       u64 ref_root,  int level, int action,
776			       struct btrfs_delayed_extent_op *extent_op)
777{
778	struct btrfs_delayed_tree_ref *ref;
779	struct btrfs_delayed_ref_head *head_ref;
780	struct btrfs_delayed_ref_root *delayed_refs;
781	struct btrfs_qgroup_extent_record *record = NULL;
782
783	BUG_ON(extent_op && extent_op->is_data);
784	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
785	if (!ref)
786		return -ENOMEM;
787
788	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
789	if (!head_ref)
790		goto free_ref;
791
792	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
793	    is_fstree(ref_root)) {
794		record = kmalloc(sizeof(*record), GFP_NOFS);
795		if (!record)
796			goto free_head_ref;
797	}
798
799	head_ref->extent_op = extent_op;
800
801	delayed_refs = &trans->transaction->delayed_refs;
802	spin_lock(&delayed_refs->lock);
803
804	/*
805	 * insert both the head node and the new ref without dropping
806	 * the spin lock
807	 */
808	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
809					bytenr, num_bytes, 0, 0, action, 0);
810
811	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
812			     num_bytes, parent, ref_root, level, action);
813	spin_unlock(&delayed_refs->lock);
814
815	return 0;
816
817free_head_ref:
818	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
819free_ref:
820	kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
821
822	return -ENOMEM;
823}
824
825/*
826 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
827 */
828int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
829			       struct btrfs_trans_handle *trans,
830			       u64 bytenr, u64 num_bytes,
831			       u64 parent, u64 ref_root,
832			       u64 owner, u64 offset, u64 reserved, int action,
833			       struct btrfs_delayed_extent_op *extent_op)
834{
835	struct btrfs_delayed_data_ref *ref;
836	struct btrfs_delayed_ref_head *head_ref;
837	struct btrfs_delayed_ref_root *delayed_refs;
838	struct btrfs_qgroup_extent_record *record = NULL;
839
840	BUG_ON(extent_op && !extent_op->is_data);
841	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
842	if (!ref)
843		return -ENOMEM;
844
845	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
846	if (!head_ref) {
847		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
848		return -ENOMEM;
849	}
850
851	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
852	    is_fstree(ref_root)) {
853		record = kmalloc(sizeof(*record), GFP_NOFS);
854		if (!record) {
855			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
856			kmem_cache_free(btrfs_delayed_ref_head_cachep,
857					head_ref);
858			return -ENOMEM;
859		}
860	}
861
862	head_ref->extent_op = extent_op;
863
864	delayed_refs = &trans->transaction->delayed_refs;
865	spin_lock(&delayed_refs->lock);
866
867	/*
868	 * insert both the head node and the new ref without dropping
869	 * the spin lock
870	 */
871	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
872					bytenr, num_bytes, ref_root, reserved,
873					action, 1);
874
875	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
876				   num_bytes, parent, ref_root, owner, offset,
877				   action);
878	spin_unlock(&delayed_refs->lock);
879
880	return 0;
881}
882
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
883int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
884				struct btrfs_trans_handle *trans,
885				u64 bytenr, u64 num_bytes,
886				struct btrfs_delayed_extent_op *extent_op)
887{
888	struct btrfs_delayed_ref_head *head_ref;
889	struct btrfs_delayed_ref_root *delayed_refs;
890
891	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
892	if (!head_ref)
893		return -ENOMEM;
894
895	head_ref->extent_op = extent_op;
896
897	delayed_refs = &trans->transaction->delayed_refs;
898	spin_lock(&delayed_refs->lock);
899
900	add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
901			     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
902			     extent_op->is_data);
903
904	spin_unlock(&delayed_refs->lock);
905	return 0;
906}
907
908/*
909 * this does a simple search for the head node for a given extent.
910 * It must be called with the delayed ref spinlock held, and it returns
911 * the head node if any where found, or NULL if not.
912 */
913struct btrfs_delayed_ref_head *
914btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
915{
916	struct btrfs_delayed_ref_root *delayed_refs;
917
918	delayed_refs = &trans->transaction->delayed_refs;
919	return find_ref_head(&delayed_refs->href_root, bytenr, 0);
920}
921
922void btrfs_delayed_ref_exit(void)
923{
924	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
925	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
926	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
927	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
928}
929
930int btrfs_delayed_ref_init(void)
931{
932	btrfs_delayed_ref_head_cachep = kmem_cache_create(
933				"btrfs_delayed_ref_head",
934				sizeof(struct btrfs_delayed_ref_head), 0,
935				SLAB_MEM_SPREAD, NULL);
936	if (!btrfs_delayed_ref_head_cachep)
937		goto fail;
938
939	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
940				"btrfs_delayed_tree_ref",
941				sizeof(struct btrfs_delayed_tree_ref), 0,
942				SLAB_MEM_SPREAD, NULL);
943	if (!btrfs_delayed_tree_ref_cachep)
944		goto fail;
945
946	btrfs_delayed_data_ref_cachep = kmem_cache_create(
947				"btrfs_delayed_data_ref",
948				sizeof(struct btrfs_delayed_data_ref), 0,
949				SLAB_MEM_SPREAD, NULL);
950	if (!btrfs_delayed_data_ref_cachep)
951		goto fail;
952
953	btrfs_delayed_extent_op_cachep = kmem_cache_create(
954				"btrfs_delayed_extent_op",
955				sizeof(struct btrfs_delayed_extent_op), 0,
956				SLAB_MEM_SPREAD, NULL);
957	if (!btrfs_delayed_extent_op_cachep)
958		goto fail;
959
960	return 0;
961fail:
962	btrfs_delayed_ref_exit();
963	return -ENOMEM;
964}
v4.6
  1/*
  2 * Copyright (C) 2009 Oracle.  All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public
  6 * License v2 as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 *
 13 * You should have received a copy of the GNU General Public
 14 * License along with this program; if not, write to the
 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 16 * Boston, MA 021110-1307, USA.
 17 */
 18
 19#include <linux/sched.h>
 20#include <linux/slab.h>
 21#include <linux/sort.h>
 22#include "ctree.h"
 23#include "delayed-ref.h"
 24#include "transaction.h"
 25#include "qgroup.h"
 26
 27struct kmem_cache *btrfs_delayed_ref_head_cachep;
 28struct kmem_cache *btrfs_delayed_tree_ref_cachep;
 29struct kmem_cache *btrfs_delayed_data_ref_cachep;
 30struct kmem_cache *btrfs_delayed_extent_op_cachep;
 31/*
 32 * delayed back reference update tracking.  For subvolume trees
 33 * we queue up extent allocations and backref maintenance for
 34 * delayed processing.   This avoids deep call chains where we
 35 * add extents in the middle of btrfs_search_slot, and it allows
 36 * us to buffer up frequently modified backrefs in an rb tree instead
 37 * of hammering updates on the extent allocation tree.
 38 */
 39
 40/*
 41 * compare two delayed tree backrefs with same bytenr and type
 42 */
 43static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
 44			  struct btrfs_delayed_tree_ref *ref1, int type)
 45{
 46	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
 47		if (ref1->root < ref2->root)
 48			return -1;
 49		if (ref1->root > ref2->root)
 50			return 1;
 51	} else {
 52		if (ref1->parent < ref2->parent)
 53			return -1;
 54		if (ref1->parent > ref2->parent)
 55			return 1;
 56	}
 57	return 0;
 58}
 59
 60/*
 61 * compare two delayed data backrefs with same bytenr and type
 62 */
 63static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
 64			  struct btrfs_delayed_data_ref *ref1)
 65{
 66	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
 67		if (ref1->root < ref2->root)
 68			return -1;
 69		if (ref1->root > ref2->root)
 70			return 1;
 71		if (ref1->objectid < ref2->objectid)
 72			return -1;
 73		if (ref1->objectid > ref2->objectid)
 74			return 1;
 75		if (ref1->offset < ref2->offset)
 76			return -1;
 77		if (ref1->offset > ref2->offset)
 78			return 1;
 79	} else {
 80		if (ref1->parent < ref2->parent)
 81			return -1;
 82		if (ref1->parent > ref2->parent)
 83			return 1;
 84	}
 85	return 0;
 86}
 87
 88/* insert a new ref to head ref rbtree */
 89static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
 90						   struct rb_node *node)
 91{
 92	struct rb_node **p = &root->rb_node;
 93	struct rb_node *parent_node = NULL;
 94	struct btrfs_delayed_ref_head *entry;
 95	struct btrfs_delayed_ref_head *ins;
 96	u64 bytenr;
 97
 98	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
 99	bytenr = ins->node.bytenr;
100	while (*p) {
101		parent_node = *p;
102		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
103				 href_node);
104
105		if (bytenr < entry->node.bytenr)
106			p = &(*p)->rb_left;
107		else if (bytenr > entry->node.bytenr)
108			p = &(*p)->rb_right;
109		else
110			return entry;
111	}
112
113	rb_link_node(node, parent_node, p);
114	rb_insert_color(node, root);
115	return NULL;
116}
117
118/*
119 * find an head entry based on bytenr. This returns the delayed ref
120 * head if it was able to find one, or NULL if nothing was in that spot.
121 * If return_bigger is given, the next bigger entry is returned if no exact
122 * match is found.
123 */
124static struct btrfs_delayed_ref_head *
125find_ref_head(struct rb_root *root, u64 bytenr,
126	      int return_bigger)
127{
128	struct rb_node *n;
129	struct btrfs_delayed_ref_head *entry;
130
131	n = root->rb_node;
132	entry = NULL;
133	while (n) {
134		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
135
136		if (bytenr < entry->node.bytenr)
137			n = n->rb_left;
138		else if (bytenr > entry->node.bytenr)
139			n = n->rb_right;
140		else
141			return entry;
142	}
143	if (entry && return_bigger) {
144		if (bytenr > entry->node.bytenr) {
145			n = rb_next(&entry->href_node);
146			if (!n)
147				n = rb_first(root);
148			entry = rb_entry(n, struct btrfs_delayed_ref_head,
149					 href_node);
150			return entry;
151		}
152		return entry;
153	}
154	return NULL;
155}
156
157int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158			   struct btrfs_delayed_ref_head *head)
159{
160	struct btrfs_delayed_ref_root *delayed_refs;
161
162	delayed_refs = &trans->transaction->delayed_refs;
163	assert_spin_locked(&delayed_refs->lock);
164	if (mutex_trylock(&head->mutex))
165		return 0;
166
167	atomic_inc(&head->node.refs);
168	spin_unlock(&delayed_refs->lock);
169
170	mutex_lock(&head->mutex);
171	spin_lock(&delayed_refs->lock);
172	if (!head->node.in_tree) {
173		mutex_unlock(&head->mutex);
174		btrfs_put_delayed_ref(&head->node);
175		return -EAGAIN;
176	}
177	btrfs_put_delayed_ref(&head->node);
178	return 0;
179}
180
181static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182				    struct btrfs_delayed_ref_root *delayed_refs,
183				    struct btrfs_delayed_ref_head *head,
184				    struct btrfs_delayed_ref_node *ref)
185{
186	if (btrfs_delayed_ref_is_head(ref)) {
187		head = btrfs_delayed_node_to_head(ref);
188		rb_erase(&head->href_node, &delayed_refs->href_root);
189	} else {
190		assert_spin_locked(&head->lock);
191		list_del(&ref->list);
 
 
192	}
193	ref->in_tree = 0;
194	btrfs_put_delayed_ref(ref);
195	atomic_dec(&delayed_refs->num_entries);
196	if (trans->delayed_ref_updates)
197		trans->delayed_ref_updates--;
198}
199
200static bool merge_ref(struct btrfs_trans_handle *trans,
201		      struct btrfs_delayed_ref_root *delayed_refs,
202		      struct btrfs_delayed_ref_head *head,
203		      struct btrfs_delayed_ref_node *ref,
204		      u64 seq)
205{
206	struct btrfs_delayed_ref_node *next;
207	bool done = false;
208
209	next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
210				list);
211	while (!done && &next->list != &head->ref_list) {
212		int mod;
213		struct btrfs_delayed_ref_node *next2;
214
215		next2 = list_next_entry(next, list);
216
217		if (next == ref)
218			goto next;
219
220		if (seq && next->seq >= seq)
221			goto next;
222
223		if (next->type != ref->type)
224			goto next;
225
226		if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
227		     ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
228		    comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
229				   btrfs_delayed_node_to_tree_ref(next),
230				   ref->type))
231			goto next;
232		if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
233		     ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
234		    comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
235				   btrfs_delayed_node_to_data_ref(next)))
236			goto next;
237
238		if (ref->action == next->action) {
239			mod = next->ref_mod;
240		} else {
241			if (ref->ref_mod < next->ref_mod) {
242				swap(ref, next);
243				done = true;
244			}
245			mod = -next->ref_mod;
246		}
247
248		drop_delayed_ref(trans, delayed_refs, head, next);
249		ref->ref_mod += mod;
250		if (ref->ref_mod == 0) {
251			drop_delayed_ref(trans, delayed_refs, head, ref);
252			done = true;
253		} else {
254			/*
255			 * Can't have multiples of the same ref on a tree block.
256			 */
257			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
258				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
259		}
260next:
261		next = next2;
262	}
263
264	return done;
265}
266
267void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
268			      struct btrfs_fs_info *fs_info,
269			      struct btrfs_delayed_ref_root *delayed_refs,
270			      struct btrfs_delayed_ref_head *head)
271{
272	struct btrfs_delayed_ref_node *ref;
273	u64 seq = 0;
274
275	assert_spin_locked(&head->lock);
276
277	if (list_empty(&head->ref_list))
278		return;
279
280	/* We don't have too many refs to merge for data. */
281	if (head->is_data)
282		return;
283
284	spin_lock(&fs_info->tree_mod_seq_lock);
285	if (!list_empty(&fs_info->tree_mod_seq_list)) {
286		struct seq_list *elem;
287
288		elem = list_first_entry(&fs_info->tree_mod_seq_list,
289					struct seq_list, list);
290		seq = elem->seq;
291	}
292	spin_unlock(&fs_info->tree_mod_seq_lock);
293
294	ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
295			       list);
296	while (&ref->list != &head->ref_list) {
297		if (seq && ref->seq >= seq)
298			goto next;
299
300		if (merge_ref(trans, delayed_refs, head, ref, seq)) {
301			if (list_empty(&head->ref_list))
302				break;
303			ref = list_first_entry(&head->ref_list,
304					       struct btrfs_delayed_ref_node,
305					       list);
306			continue;
307		}
308next:
309		ref = list_next_entry(ref, list);
310	}
311}
312
313int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
314			    struct btrfs_delayed_ref_root *delayed_refs,
315			    u64 seq)
316{
317	struct seq_list *elem;
318	int ret = 0;
319
320	spin_lock(&fs_info->tree_mod_seq_lock);
321	if (!list_empty(&fs_info->tree_mod_seq_list)) {
322		elem = list_first_entry(&fs_info->tree_mod_seq_list,
323					struct seq_list, list);
324		if (seq >= elem->seq) {
325			pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
326				 (u32)(seq >> 32), (u32)seq,
327				 (u32)(elem->seq >> 32), (u32)elem->seq,
328				 delayed_refs);
 
329			ret = 1;
330		}
331	}
332
333	spin_unlock(&fs_info->tree_mod_seq_lock);
334	return ret;
335}
336
337struct btrfs_delayed_ref_head *
338btrfs_select_ref_head(struct btrfs_trans_handle *trans)
339{
340	struct btrfs_delayed_ref_root *delayed_refs;
341	struct btrfs_delayed_ref_head *head;
342	u64 start;
343	bool loop = false;
344
345	delayed_refs = &trans->transaction->delayed_refs;
346
347again:
348	start = delayed_refs->run_delayed_start;
349	head = find_ref_head(&delayed_refs->href_root, start, 1);
350	if (!head && !loop) {
351		delayed_refs->run_delayed_start = 0;
352		start = 0;
353		loop = true;
354		head = find_ref_head(&delayed_refs->href_root, start, 1);
355		if (!head)
356			return NULL;
357	} else if (!head && loop) {
358		return NULL;
359	}
360
361	while (head->processing) {
362		struct rb_node *node;
363
364		node = rb_next(&head->href_node);
365		if (!node) {
366			if (loop)
367				return NULL;
368			delayed_refs->run_delayed_start = 0;
369			start = 0;
370			loop = true;
371			goto again;
372		}
373		head = rb_entry(node, struct btrfs_delayed_ref_head,
374				href_node);
375	}
376
377	head->processing = 1;
378	WARN_ON(delayed_refs->num_heads_ready == 0);
379	delayed_refs->num_heads_ready--;
380	delayed_refs->run_delayed_start = head->node.bytenr +
381		head->node.num_bytes;
382	return head;
383}
384
385/*
386 * Helper to insert the ref_node to the tail or merge with tail.
387 *
388 * Return 0 for insert.
389 * Return >0 for merge.
390 */
391static int
392add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
393			   struct btrfs_delayed_ref_root *root,
394			   struct btrfs_delayed_ref_head *href,
395			   struct btrfs_delayed_ref_node *ref)
396{
397	struct btrfs_delayed_ref_node *exist;
398	int mod;
399	int ret = 0;
400
401	spin_lock(&href->lock);
402	/* Check whether we can merge the tail node with ref */
403	if (list_empty(&href->ref_list))
404		goto add_tail;
405	exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
406			   list);
407	/* No need to compare bytenr nor is_head */
408	if (exist->type != ref->type || exist->seq != ref->seq)
409		goto add_tail;
410
411	if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
412	     exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
413	    comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
414			   btrfs_delayed_node_to_tree_ref(ref),
415			   ref->type))
416		goto add_tail;
417	if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
418	     exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
419	    comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
420			   btrfs_delayed_node_to_data_ref(ref)))
421		goto add_tail;
422
423	/* Now we are sure we can merge */
424	ret = 1;
425	if (exist->action == ref->action) {
426		mod = ref->ref_mod;
427	} else {
428		/* Need to change action */
429		if (exist->ref_mod < ref->ref_mod) {
430			exist->action = ref->action;
431			mod = -exist->ref_mod;
432			exist->ref_mod = ref->ref_mod;
 
 
 
 
 
 
 
 
 
433		} else
434			mod = -ref->ref_mod;
435	}
436	exist->ref_mod += mod;
437
438	/* remove existing tail if its ref_mod is zero */
439	if (exist->ref_mod == 0)
440		drop_delayed_ref(trans, root, href, exist);
441	spin_unlock(&href->lock);
442	return ret;
443
444add_tail:
445	list_add_tail(&ref->list, &href->ref_list);
 
 
446	atomic_inc(&root->num_entries);
447	trans->delayed_ref_updates++;
448	spin_unlock(&href->lock);
449	return ret;
450}
451
452/*
453 * helper function to update the accounting in the head ref
454 * existing and update must have the same bytenr
455 */
456static noinline void
457update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
458			 struct btrfs_delayed_ref_node *existing,
459			 struct btrfs_delayed_ref_node *update)
460{
461	struct btrfs_delayed_ref_head *existing_ref;
462	struct btrfs_delayed_ref_head *ref;
463	int old_ref_mod;
464
465	existing_ref = btrfs_delayed_node_to_head(existing);
466	ref = btrfs_delayed_node_to_head(update);
467	BUG_ON(existing_ref->is_data != ref->is_data);
468
469	spin_lock(&existing_ref->lock);
470	if (ref->must_insert_reserved) {
471		/* if the extent was freed and then
472		 * reallocated before the delayed ref
473		 * entries were processed, we can end up
474		 * with an existing head ref without
475		 * the must_insert_reserved flag set.
476		 * Set it again here
477		 */
478		existing_ref->must_insert_reserved = ref->must_insert_reserved;
479
480		/*
481		 * update the num_bytes so we make sure the accounting
482		 * is done correctly
483		 */
484		existing->num_bytes = update->num_bytes;
485
486	}
487
488	if (ref->extent_op) {
489		if (!existing_ref->extent_op) {
490			existing_ref->extent_op = ref->extent_op;
491		} else {
492			if (ref->extent_op->update_key) {
493				memcpy(&existing_ref->extent_op->key,
494				       &ref->extent_op->key,
495				       sizeof(ref->extent_op->key));
496				existing_ref->extent_op->update_key = true;
497			}
498			if (ref->extent_op->update_flags) {
499				existing_ref->extent_op->flags_to_set |=
500					ref->extent_op->flags_to_set;
501				existing_ref->extent_op->update_flags = true;
502			}
503			btrfs_free_delayed_extent_op(ref->extent_op);
504		}
505	}
506	/*
507	 * update the reference mod on the head to reflect this new operation,
508	 * only need the lock for this case cause we could be processing it
509	 * currently, for refs we just added we know we're a-ok.
510	 */
511	old_ref_mod = existing_ref->total_ref_mod;
512	existing->ref_mod += update->ref_mod;
513	existing_ref->total_ref_mod += update->ref_mod;
514
515	/*
516	 * If we are going to from a positive ref mod to a negative or vice
517	 * versa we need to make sure to adjust pending_csums accordingly.
518	 */
519	if (existing_ref->is_data) {
520		if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
521			delayed_refs->pending_csums -= existing->num_bytes;
522		if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
523			delayed_refs->pending_csums += existing->num_bytes;
524	}
525	spin_unlock(&existing_ref->lock);
526}
527
528/*
529 * helper function to actually insert a head node into the rbtree.
530 * this does all the dirty work in terms of maintaining the correct
531 * overall modification count.
532 */
533static noinline struct btrfs_delayed_ref_head *
534add_delayed_ref_head(struct btrfs_fs_info *fs_info,
535		     struct btrfs_trans_handle *trans,
536		     struct btrfs_delayed_ref_node *ref,
537		     struct btrfs_qgroup_extent_record *qrecord,
538		     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
539		     int action, int is_data)
540{
541	struct btrfs_delayed_ref_head *existing;
542	struct btrfs_delayed_ref_head *head_ref = NULL;
543	struct btrfs_delayed_ref_root *delayed_refs;
544	struct btrfs_qgroup_extent_record *qexisting;
545	int count_mod = 1;
546	int must_insert_reserved = 0;
547
548	/* If reserved is provided, it must be a data extent. */
549	BUG_ON(!is_data && reserved);
550
551	/*
552	 * the head node stores the sum of all the mods, so dropping a ref
553	 * should drop the sum in the head node by one.
554	 */
555	if (action == BTRFS_UPDATE_DELAYED_HEAD)
556		count_mod = 0;
557	else if (action == BTRFS_DROP_DELAYED_REF)
558		count_mod = -1;
559
560	/*
561	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
562	 * the reserved accounting when the extent is finally added, or
563	 * if a later modification deletes the delayed ref without ever
564	 * inserting the extent into the extent allocation tree.
565	 * ref->must_insert_reserved is the flag used to record
566	 * that accounting mods are required.
567	 *
568	 * Once we record must_insert_reserved, switch the action to
569	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
570	 */
571	if (action == BTRFS_ADD_DELAYED_EXTENT)
572		must_insert_reserved = 1;
573	else
574		must_insert_reserved = 0;
575
576	delayed_refs = &trans->transaction->delayed_refs;
577
578	/* first set the basic ref node struct up */
579	atomic_set(&ref->refs, 1);
580	ref->bytenr = bytenr;
581	ref->num_bytes = num_bytes;
582	ref->ref_mod = count_mod;
583	ref->type  = 0;
584	ref->action  = 0;
585	ref->is_head = 1;
586	ref->in_tree = 1;
587	ref->seq = 0;
588
589	head_ref = btrfs_delayed_node_to_head(ref);
590	head_ref->must_insert_reserved = must_insert_reserved;
591	head_ref->is_data = is_data;
592	INIT_LIST_HEAD(&head_ref->ref_list);
 
593	head_ref->processing = 0;
594	head_ref->total_ref_mod = count_mod;
595	head_ref->qgroup_reserved = 0;
596	head_ref->qgroup_ref_root = 0;
597
598	/* Record qgroup extent info if provided */
599	if (qrecord) {
600		if (ref_root && reserved) {
601			head_ref->qgroup_ref_root = ref_root;
602			head_ref->qgroup_reserved = reserved;
603		}
604
605		qrecord->bytenr = bytenr;
606		qrecord->num_bytes = num_bytes;
607		qrecord->old_roots = NULL;
608
609		qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
610							     qrecord);
611		if (qexisting)
612			kfree(qrecord);
613	}
614
615	spin_lock_init(&head_ref->lock);
616	mutex_init(&head_ref->mutex);
617
618	trace_add_delayed_ref_head(ref, head_ref, action);
619
620	existing = htree_insert(&delayed_refs->href_root,
621				&head_ref->href_node);
622	if (existing) {
623		WARN_ON(ref_root && reserved && existing->qgroup_ref_root
624			&& existing->qgroup_reserved);
625		update_existing_head_ref(delayed_refs, &existing->node, ref);
626		/*
627		 * we've updated the existing ref, free the newly
628		 * allocated ref
629		 */
630		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
631		head_ref = existing;
632	} else {
633		if (is_data && count_mod < 0)
634			delayed_refs->pending_csums += num_bytes;
635		delayed_refs->num_heads++;
636		delayed_refs->num_heads_ready++;
637		atomic_inc(&delayed_refs->num_entries);
638		trans->delayed_ref_updates++;
639	}
640	return head_ref;
641}
642
643/*
644 * helper to insert a delayed tree ref into the rbtree.
645 */
646static noinline void
647add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
648		     struct btrfs_trans_handle *trans,
649		     struct btrfs_delayed_ref_head *head_ref,
650		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
651		     u64 num_bytes, u64 parent, u64 ref_root, int level,
652		     int action)
653{
654	struct btrfs_delayed_tree_ref *full_ref;
655	struct btrfs_delayed_ref_root *delayed_refs;
656	u64 seq = 0;
657	int ret;
658
659	if (action == BTRFS_ADD_DELAYED_EXTENT)
660		action = BTRFS_ADD_DELAYED_REF;
661
662	if (is_fstree(ref_root))
663		seq = atomic64_read(&fs_info->tree_mod_seq);
664	delayed_refs = &trans->transaction->delayed_refs;
665
666	/* first set the basic ref node struct up */
667	atomic_set(&ref->refs, 1);
668	ref->bytenr = bytenr;
669	ref->num_bytes = num_bytes;
670	ref->ref_mod = 1;
671	ref->action = action;
672	ref->is_head = 0;
673	ref->in_tree = 1;
674	ref->seq = seq;
 
 
675
676	full_ref = btrfs_delayed_node_to_tree_ref(ref);
677	full_ref->parent = parent;
678	full_ref->root = ref_root;
679	if (parent)
680		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
681	else
682		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
683	full_ref->level = level;
684
685	trace_add_delayed_tree_ref(ref, full_ref, action);
686
687	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
688
689	/*
690	 * XXX: memory should be freed at the same level allocated.
691	 * But bad practice is anywhere... Follow it now. Need cleanup.
692	 */
693	if (ret > 0)
694		kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
695}
696
697/*
698 * helper to insert a delayed data ref into the rbtree.
699 */
700static noinline void
701add_delayed_data_ref(struct btrfs_fs_info *fs_info,
702		     struct btrfs_trans_handle *trans,
703		     struct btrfs_delayed_ref_head *head_ref,
704		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
705		     u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
706		     u64 offset, int action)
707{
708	struct btrfs_delayed_data_ref *full_ref;
709	struct btrfs_delayed_ref_root *delayed_refs;
710	u64 seq = 0;
711	int ret;
712
713	if (action == BTRFS_ADD_DELAYED_EXTENT)
714		action = BTRFS_ADD_DELAYED_REF;
715
716	delayed_refs = &trans->transaction->delayed_refs;
717
718	if (is_fstree(ref_root))
719		seq = atomic64_read(&fs_info->tree_mod_seq);
720
721	/* first set the basic ref node struct up */
722	atomic_set(&ref->refs, 1);
723	ref->bytenr = bytenr;
724	ref->num_bytes = num_bytes;
725	ref->ref_mod = 1;
726	ref->action = action;
727	ref->is_head = 0;
728	ref->in_tree = 1;
729	ref->seq = seq;
 
 
730
731	full_ref = btrfs_delayed_node_to_data_ref(ref);
732	full_ref->parent = parent;
733	full_ref->root = ref_root;
734	if (parent)
735		ref->type = BTRFS_SHARED_DATA_REF_KEY;
736	else
737		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
738
739	full_ref->objectid = owner;
740	full_ref->offset = offset;
741
742	trace_add_delayed_data_ref(ref, full_ref, action);
743
744	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
745
746	if (ret > 0)
747		kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
748}
749
750/*
751 * add a delayed tree ref.  This does all of the accounting required
752 * to make sure the delayed ref is eventually processed before this
753 * transaction commits.
754 */
755int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
756			       struct btrfs_trans_handle *trans,
757			       u64 bytenr, u64 num_bytes, u64 parent,
758			       u64 ref_root,  int level, int action,
759			       struct btrfs_delayed_extent_op *extent_op)
760{
761	struct btrfs_delayed_tree_ref *ref;
762	struct btrfs_delayed_ref_head *head_ref;
763	struct btrfs_delayed_ref_root *delayed_refs;
764	struct btrfs_qgroup_extent_record *record = NULL;
765
766	BUG_ON(extent_op && extent_op->is_data);
767	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
768	if (!ref)
769		return -ENOMEM;
770
771	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
772	if (!head_ref)
773		goto free_ref;
774
775	if (fs_info->quota_enabled && is_fstree(ref_root)) {
 
776		record = kmalloc(sizeof(*record), GFP_NOFS);
777		if (!record)
778			goto free_head_ref;
779	}
780
781	head_ref->extent_op = extent_op;
782
783	delayed_refs = &trans->transaction->delayed_refs;
784	spin_lock(&delayed_refs->lock);
785
786	/*
787	 * insert both the head node and the new ref without dropping
788	 * the spin lock
789	 */
790	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
791					bytenr, num_bytes, 0, 0, action, 0);
792
793	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
794			     num_bytes, parent, ref_root, level, action);
795	spin_unlock(&delayed_refs->lock);
796
797	return 0;
798
799free_head_ref:
800	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
801free_ref:
802	kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
803
804	return -ENOMEM;
805}
806
807/*
808 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
809 */
810int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
811			       struct btrfs_trans_handle *trans,
812			       u64 bytenr, u64 num_bytes,
813			       u64 parent, u64 ref_root,
814			       u64 owner, u64 offset, u64 reserved, int action,
815			       struct btrfs_delayed_extent_op *extent_op)
816{
817	struct btrfs_delayed_data_ref *ref;
818	struct btrfs_delayed_ref_head *head_ref;
819	struct btrfs_delayed_ref_root *delayed_refs;
820	struct btrfs_qgroup_extent_record *record = NULL;
821
822	BUG_ON(extent_op && !extent_op->is_data);
823	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
824	if (!ref)
825		return -ENOMEM;
826
827	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
828	if (!head_ref) {
829		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
830		return -ENOMEM;
831	}
832
833	if (fs_info->quota_enabled && is_fstree(ref_root)) {
 
834		record = kmalloc(sizeof(*record), GFP_NOFS);
835		if (!record) {
836			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
837			kmem_cache_free(btrfs_delayed_ref_head_cachep,
838					head_ref);
839			return -ENOMEM;
840		}
841	}
842
843	head_ref->extent_op = extent_op;
844
845	delayed_refs = &trans->transaction->delayed_refs;
846	spin_lock(&delayed_refs->lock);
847
848	/*
849	 * insert both the head node and the new ref without dropping
850	 * the spin lock
851	 */
852	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
853					bytenr, num_bytes, ref_root, reserved,
854					action, 1);
855
856	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
857				   num_bytes, parent, ref_root, owner, offset,
858				   action);
859	spin_unlock(&delayed_refs->lock);
860
861	return 0;
862}
863
864int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
865				     struct btrfs_trans_handle *trans,
866				     u64 ref_root, u64 bytenr, u64 num_bytes)
867{
868	struct btrfs_delayed_ref_root *delayed_refs;
869	struct btrfs_delayed_ref_head *ref_head;
870	int ret = 0;
871
872	if (!fs_info->quota_enabled || !is_fstree(ref_root))
873		return 0;
874
875	delayed_refs = &trans->transaction->delayed_refs;
876
877	spin_lock(&delayed_refs->lock);
878	ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
879	if (!ref_head) {
880		ret = -ENOENT;
881		goto out;
882	}
883	WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
884	ref_head->qgroup_ref_root = ref_root;
885	ref_head->qgroup_reserved = num_bytes;
886out:
887	spin_unlock(&delayed_refs->lock);
888	return ret;
889}
890
891int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
892				struct btrfs_trans_handle *trans,
893				u64 bytenr, u64 num_bytes,
894				struct btrfs_delayed_extent_op *extent_op)
895{
896	struct btrfs_delayed_ref_head *head_ref;
897	struct btrfs_delayed_ref_root *delayed_refs;
898
899	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
900	if (!head_ref)
901		return -ENOMEM;
902
903	head_ref->extent_op = extent_op;
904
905	delayed_refs = &trans->transaction->delayed_refs;
906	spin_lock(&delayed_refs->lock);
907
908	add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
909			     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
910			     extent_op->is_data);
911
912	spin_unlock(&delayed_refs->lock);
913	return 0;
914}
915
916/*
917 * this does a simple search for the head node for a given extent.
918 * It must be called with the delayed ref spinlock held, and it returns
919 * the head node if any where found, or NULL if not.
920 */
921struct btrfs_delayed_ref_head *
922btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
923{
924	struct btrfs_delayed_ref_root *delayed_refs;
925
926	delayed_refs = &trans->transaction->delayed_refs;
927	return find_ref_head(&delayed_refs->href_root, bytenr, 0);
928}
929
930void btrfs_delayed_ref_exit(void)
931{
932	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
933	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
934	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
935	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
936}
937
938int btrfs_delayed_ref_init(void)
939{
940	btrfs_delayed_ref_head_cachep = kmem_cache_create(
941				"btrfs_delayed_ref_head",
942				sizeof(struct btrfs_delayed_ref_head), 0,
943				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
944	if (!btrfs_delayed_ref_head_cachep)
945		goto fail;
946
947	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
948				"btrfs_delayed_tree_ref",
949				sizeof(struct btrfs_delayed_tree_ref), 0,
950				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
951	if (!btrfs_delayed_tree_ref_cachep)
952		goto fail;
953
954	btrfs_delayed_data_ref_cachep = kmem_cache_create(
955				"btrfs_delayed_data_ref",
956				sizeof(struct btrfs_delayed_data_ref), 0,
957				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
958	if (!btrfs_delayed_data_ref_cachep)
959		goto fail;
960
961	btrfs_delayed_extent_op_cachep = kmem_cache_create(
962				"btrfs_delayed_extent_op",
963				sizeof(struct btrfs_delayed_extent_op), 0,
964				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
965	if (!btrfs_delayed_extent_op_cachep)
966		goto fail;
967
968	return 0;
969fail:
970	btrfs_delayed_ref_exit();
971	return -ENOMEM;
972}